summaryrefslogtreecommitdiffstats
path: root/dom/webgpu
diff options
context:
space:
mode:
Diffstat (limited to 'dom/webgpu')
-rw-r--r--dom/webgpu/Adapter.cpp503
-rw-r--r--dom/webgpu/Adapter.h117
-rw-r--r--dom/webgpu/BindGroup.cpp34
-rw-r--r--dom/webgpu/BindGroup.h33
-rw-r--r--dom/webgpu/BindGroupLayout.cpp34
-rw-r--r--dom/webgpu/BindGroupLayout.h34
-rw-r--r--dom/webgpu/Buffer.cpp388
-rw-r--r--dom/webgpu/Buffer.h95
-rw-r--r--dom/webgpu/CanvasContext.cpp387
-rw-r--r--dom/webgpu/CanvasContext.h119
-rw-r--r--dom/webgpu/CommandBuffer.cpp44
-rw-r--r--dom/webgpu/CommandBuffer.h46
-rw-r--r--dom/webgpu/CommandEncoder.cpp270
-rw-r--r--dom/webgpu/CommandEncoder.h109
-rw-r--r--dom/webgpu/CompilationInfo.cpp33
-rw-r--r--dom/webgpu/CompilationInfo.h38
-rw-r--r--dom/webgpu/CompilationMessage.cpp24
-rw-r--r--dom/webgpu/CompilationMessage.h53
-rw-r--r--dom/webgpu/ComputePassEncoder.cpp109
-rw-r--r--dom/webgpu/ComputePassEncoder.h73
-rw-r--r--dom/webgpu/ComputePipeline.cpp58
-rw-r--r--dom/webgpu/ComputePipeline.h41
-rw-r--r--dom/webgpu/Device.cpp1058
-rw-r--r--dom/webgpu/Device.h190
-rw-r--r--dom/webgpu/DeviceLostInfo.cpp13
-rw-r--r--dom/webgpu/DeviceLostInfo.h51
-rw-r--r--dom/webgpu/Error.cpp20
-rw-r--r--dom/webgpu/Error.h46
-rw-r--r--dom/webgpu/ExternalTexture.cpp39
-rw-r--r--dom/webgpu/ExternalTexture.h60
-rw-r--r--dom/webgpu/ExternalTextureD3D11.cpp168
-rw-r--r--dom/webgpu/ExternalTextureD3D11.h48
-rw-r--r--dom/webgpu/Instance.cpp125
-rw-r--r--dom/webgpu/Instance.h62
-rw-r--r--dom/webgpu/InternalError.cpp21
-rw-r--r--dom/webgpu/InternalError.h40
-rw-r--r--dom/webgpu/ObjectModel.cpp41
-rw-r--r--dom/webgpu/ObjectModel.h134
-rw-r--r--dom/webgpu/OutOfMemoryError.cpp21
-rw-r--r--dom/webgpu/OutOfMemoryError.h40
-rw-r--r--dom/webgpu/PipelineLayout.cpp34
-rw-r--r--dom/webgpu/PipelineLayout.h33
-rw-r--r--dom/webgpu/QuerySet.cpp22
-rw-r--r--dom/webgpu/QuerySet.h31
-rw-r--r--dom/webgpu/Queue.cpp413
-rw-r--r--dom/webgpu/Queue.h76
-rw-r--r--dom/webgpu/RenderBundle.cpp35
-rw-r--r--dom/webgpu/RenderBundle.h32
-rw-r--r--dom/webgpu/RenderBundleEncoder.cpp211
-rw-r--r--dom/webgpu/RenderBundleEncoder.h74
-rw-r--r--dom/webgpu/RenderPassEncoder.cpp328
-rw-r--r--dom/webgpu/RenderPassEncoder.h101
-rw-r--r--dom/webgpu/RenderPipeline.cpp58
-rw-r--r--dom/webgpu/RenderPipeline.h41
-rw-r--r--dom/webgpu/Sampler.cpp34
-rw-r--r--dom/webgpu/Sampler.h33
-rw-r--r--dom/webgpu/ShaderModule.cpp47
-rw-r--r--dom/webgpu/ShaderModule.h39
-rw-r--r--dom/webgpu/SupportedFeatures.cpp27
-rw-r--r--dom/webgpu/SupportedFeatures.h43
-rw-r--r--dom/webgpu/SupportedLimits.cpp201
-rw-r--r--dom/webgpu/SupportedLimits.h113
-rw-r--r--dom/webgpu/Texture.cpp121
-rw-r--r--dom/webgpu/Texture.h73
-rw-r--r--dom/webgpu/TextureView.cpp39
-rw-r--r--dom/webgpu/TextureView.h35
-rw-r--r--dom/webgpu/Utility.cpp287
-rw-r--r--dom/webgpu/Utility.h58
-rw-r--r--dom/webgpu/ValidationError.cpp21
-rw-r--r--dom/webgpu/ValidationError.h40
-rw-r--r--dom/webgpu/crashtests/1809567.html72
-rw-r--r--dom/webgpu/crashtests/crashtests.list1
-rw-r--r--dom/webgpu/ipc/PWebGPU.ipdl100
-rw-r--r--dom/webgpu/ipc/PWebGPUTypes.ipdlh26
-rw-r--r--dom/webgpu/ipc/WebGPUChild.cpp270
-rw-r--r--dom/webgpu/ipc/WebGPUChild.h115
-rw-r--r--dom/webgpu/ipc/WebGPUParent.cpp1557
-rw-r--r--dom/webgpu/ipc/WebGPUParent.h238
-rw-r--r--dom/webgpu/ipc/WebGPUSerialize.h63
-rw-r--r--dom/webgpu/ipc/WebGPUTypes.h82
-rw-r--r--dom/webgpu/mochitest/mochitest-no-pref.toml10
-rw-r--r--dom/webgpu/mochitest/mochitest.toml116
-rw-r--r--dom/webgpu/mochitest/test_basic_canvas.worker.html18
-rw-r--r--dom/webgpu/mochitest/test_basic_canvas.worker.js32
-rw-r--r--dom/webgpu/mochitest/test_buffer_mapping.html73
-rw-r--r--dom/webgpu/mochitest/test_buffer_mapping_invalid_device.html60
-rw-r--r--dom/webgpu/mochitest/test_command_buffer_creation.html29
-rw-r--r--dom/webgpu/mochitest/test_context_configure.html48
-rw-r--r--dom/webgpu/mochitest/test_device_creation.html29
-rw-r--r--dom/webgpu/mochitest/test_device_lost.html73
-rw-r--r--dom/webgpu/mochitest/test_disabled.html17
-rw-r--r--dom/webgpu/mochitest/test_double_encoder_finish.html36
-rw-r--r--dom/webgpu/mochitest/test_enabled.html17
-rw-r--r--dom/webgpu/mochitest/test_error_scope.html43
-rw-r--r--dom/webgpu/mochitest/test_insecure_context.html22
-rw-r--r--dom/webgpu/mochitest/test_navigator_gpu_not_replaceable.html32
-rw-r--r--dom/webgpu/mochitest/test_queue_copyExternalImageToTexture.html261
-rw-r--r--dom/webgpu/mochitest/test_queue_write.html50
-rw-r--r--dom/webgpu/mochitest/test_queue_write_invalid_device.html44
-rw-r--r--dom/webgpu/mochitest/test_submit_compute_empty.html32
-rw-r--r--dom/webgpu/mochitest/test_submit_render_empty.html57
-rw-r--r--dom/webgpu/mochitest/test_submit_render_empty.worker.html14
-rw-r--r--dom/webgpu/mochitest/test_submit_render_empty.worker.js49
-rw-r--r--dom/webgpu/mochitest/worker_wrapper.js33
-rw-r--r--dom/webgpu/moz.build88
-rw-r--r--dom/webgpu/tests/cts/README.md17
-rw-r--r--dom/webgpu/tests/cts/checkout/.eslint-resolver.js23
-rw-r--r--dom/webgpu/tests/cts/checkout/.eslintignore1
-rw-r--r--dom/webgpu/tests/cts/checkout/.eslintrc.json138
-rw-r--r--dom/webgpu/tests/cts/checkout/.github/pull_request_template.md21
-rw-r--r--dom/webgpu/tests/cts/checkout/.github/workflows/pr.yml25
-rw-r--r--dom/webgpu/tests/cts/checkout/.github/workflows/push.yml26
-rw-r--r--dom/webgpu/tests/cts/checkout/.gitignore196
-rw-r--r--dom/webgpu/tests/cts/checkout/CONTRIBUTING.md31
-rw-r--r--dom/webgpu/tests/cts/checkout/Gruntfile.js247
-rw-r--r--dom/webgpu/tests/cts/checkout/LICENSE.txt26
-rw-r--r--dom/webgpu/tests/cts/checkout/README.md22
-rw-r--r--dom/webgpu/tests/cts/checkout/babel.config.js21
-rw-r--r--dom/webgpu/tests/cts/checkout/cts.code-workspace111
-rw-r--r--dom/webgpu/tests/cts/checkout/docs/adding_timing_metadata.md163
-rw-r--r--dom/webgpu/tests/cts/checkout/docs/build.md43
-rw-r--r--dom/webgpu/tests/cts/checkout/docs/deno.md24
-rw-r--r--dom/webgpu/tests/cts/checkout/docs/fp_primer.md871
-rw-r--r--dom/webgpu/tests/cts/checkout/docs/helper_index.txt93
-rw-r--r--dom/webgpu/tests/cts/checkout/docs/implementing.md97
-rw-r--r--dom/webgpu/tests/cts/checkout/docs/intro/README.md99
-rw-r--r--dom/webgpu/tests/cts/checkout/docs/intro/convert_to_issue.pngbin0 -> 2061 bytes
-rw-r--r--dom/webgpu/tests/cts/checkout/docs/intro/developing.md134
-rw-r--r--dom/webgpu/tests/cts/checkout/docs/intro/life_of.md46
-rw-r--r--dom/webgpu/tests/cts/checkout/docs/intro/plans.md82
-rw-r--r--dom/webgpu/tests/cts/checkout/docs/intro/tests.md25
-rw-r--r--dom/webgpu/tests/cts/checkout/docs/organization.md166
-rw-r--r--dom/webgpu/tests/cts/checkout/docs/reviews.md70
-rw-r--r--dom/webgpu/tests/cts/checkout/docs/terms.md270
-rw-r--r--dom/webgpu/tests/cts/checkout/node.tsconfig.json20
-rw-r--r--dom/webgpu/tests/cts/checkout/package-lock.json18083
-rw-r--r--dom/webgpu/tests/cts/checkout/package.json80
-rw-r--r--dom/webgpu/tests/cts/checkout/prettier.config.js8
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/framework/data_cache.ts197
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/framework/fixture.ts370
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/framework/metadata.ts28
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/framework/params_builder.ts389
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/framework/resources.ts110
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/framework/test_config.ts32
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/framework/test_group.ts1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/file_loader.ts105
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/logging/log_message.ts44
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/logging/logger.ts30
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/logging/result.ts21
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/logging/test_case_recorder.ts184
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/params_utils.ts138
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/query/compare.ts95
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/query/encode_selectively.ts23
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/query/json_param_value.ts114
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/query/parseQuery.ts155
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/query/query.ts262
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/query/separators.ts14
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/query/stringify_params.ts44
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/query/validQueryPart.ts2
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/stack.ts82
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/test_group.ts754
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/test_suite_listing.ts15
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/tree.ts671
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/util.ts10
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/version.ts1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/internal/websocket_logger.ts52
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/runtime/cmdline.ts286
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/runtime/helper/options.ts129
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/runtime/helper/sys.ts46
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/runtime/helper/test_worker-worker.ts48
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/runtime/helper/test_worker.ts49
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/runtime/server.ts236
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/runtime/standalone.ts679
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/runtime/wpt.ts83
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/templates/cts.https.html32
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/.eslintrc.json11
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/checklist.ts136
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/crawl.ts167
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/dev_server.ts214
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/gen_cache.ts198
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/gen_listings.ts63
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/gen_wpt_cts_html.ts252
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/image_utils.ts58
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/merge_listing_times.ts177
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/run_wpt_ref_tests.ts446
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/setup-ts-in-node.js51
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/validate.ts36
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/version.ts4
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/util/collect_garbage.ts58
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/util/colors.ts127
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/util/data_tables.ts129
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/util/navigator_gpu.ts86
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/util/preprocessor.ts149
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/util/timeout.ts7
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/util/types.ts97
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/util/util.ts476
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/util/wpt_reftest_wait.ts24
-rw-r--r--dom/webgpu/tests/cts/checkout/src/demo/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/demo/a.spec.ts8
-rw-r--r--dom/webgpu/tests/cts/checkout/src/demo/a/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/demo/a/b.spec.ts6
-rw-r--r--dom/webgpu/tests/cts/checkout/src/demo/a/b/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/demo/a/b/c.spec.ts80
-rw-r--r--dom/webgpu/tests/cts/checkout/src/demo/a/b/d.spec.ts8
-rw-r--r--dom/webgpu/tests/cts/checkout/src/demo/file_depth_2/in_single_child_dir/r.spec.ts6
-rw-r--r--dom/webgpu/tests/cts/checkout/src/demo/json.spec.ts10
-rw-r--r--dom/webgpu/tests/cts/checkout/src/demo/subcases.spec.ts38
-rw-r--r--dom/webgpu/tests/cts/checkout/src/external/README.md31
-rw-r--r--dom/webgpu/tests/cts/checkout/src/external/petamoriken/float16/LICENSE.txt21
-rw-r--r--dom/webgpu/tests/cts/checkout/src/external/petamoriken/float16/float16.d.ts471
-rw-r--r--dom/webgpu/tests/cts/checkout/src/external/petamoriken/float16/float16.js1228
-rw-r--r--dom/webgpu/tests/cts/checkout/src/manual/README.txt18
-rw-r--r--dom/webgpu/tests/cts/checkout/src/resources/README.md15
-rw-r--r--dom/webgpu/tests/cts/checkout/src/resources/four-colors-h264-bt601-rotate-180.mp4bin0 -> 16261 bytes
-rw-r--r--dom/webgpu/tests/cts/checkout/src/resources/four-colors-h264-bt601-rotate-270.mp4bin0 -> 16261 bytes
-rw-r--r--dom/webgpu/tests/cts/checkout/src/resources/four-colors-h264-bt601-rotate-90.mp4bin0 -> 16261 bytes
-rw-r--r--dom/webgpu/tests/cts/checkout/src/resources/four-colors-h264-bt601.mp4bin0 -> 16261 bytes
-rw-r--r--dom/webgpu/tests/cts/checkout/src/resources/four-colors-theora-bt601.ogvbin0 -> 44488 bytes
-rw-r--r--dom/webgpu/tests/cts/checkout/src/resources/four-colors-vp8-bt601.webmbin0 -> 17910 bytes
-rw-r--r--dom/webgpu/tests/cts/checkout/src/resources/four-colors-vp9-bt601.webmbin0 -> 13116 bytes
-rw-r--r--dom/webgpu/tests/cts/checkout/src/resources/four-colors-vp9-bt709.webmbin0 -> 12584 bytes
-rw-r--r--dom/webgpu/tests/cts/checkout/src/resources/four-colors.pngbin0 -> 840 bytes
-rw-r--r--dom/webgpu/tests/cts/checkout/src/resources/webgpu.pngbin0 -> 33475 bytes
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/README.txt6
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/adapter/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/adapter/device_allocation.spec.ts292
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/compute/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/compute/compute_pass.spec.ts243
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/device/README.txt2
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/device/bind_group_allocation.spec.ts65
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/device/bind_group_layout_allocation.spec.ts20
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/device/buffer_allocation.spec.ts25
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/device/command_encoder_allocation.spec.ts20
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/device/compute_pipeline_allocation.spec.ts20
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/device/pipeline_layout_allocation.spec.ts20
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/device/query_set_allocation.spec.ts27
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/device/render_bundle_allocation.spec.ts20
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/device/render_pipeline_allocation.spec.ts20
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/device/sampler_allocation.spec.ts20
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/device/shader_module_allocation.spec.ts20
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/device/texture_allocation.spec.ts27
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/listing.ts5
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/memory/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/memory/churn.spec.ts17
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/memory/oom.spec.ts178
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/queries/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/queries/occlusion.spec.ts10
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/queries/resolve.spec.ts15
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/queries/timestamps.spec.ts50
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/queue/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/queue/submit.spec.ts102
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/render/README.txt3
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/render/render_pass.spec.ts354
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/render/vertex_buffers.spec.ts130
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/shaders/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/shaders/entry_points.spec.ts78
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/shaders/non_halting.spec.ts194
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/shaders/slow.spec.ts191
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/texture/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/stress/texture/large.spec.ts56
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/async_expectations.spec.ts168
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/basic.spec.ts35
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/check_contents.spec.ts71
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/conversion.spec.ts640
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/floating_point.spec.ts8238
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/getStackTrace.spec.ts138
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/listing.ts5
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/loaders_and_trees.spec.ts978
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/logger.spec.ts173
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/maths.spec.ts1924
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/params_builder_and_utils.spec.ts549
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/params_builder_toplevel.spec.ts112
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/preprocessor.spec.ts207
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/prng.spec.ts74
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/query_compare.spec.ts144
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/query_string.spec.ts268
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/serialization.spec.ts413
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/test_group.spec.ts437
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/test_group_test.ts34
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/test_query.spec.ts143
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/texture_ok.spec.ts161
-rw-r--r--dom/webgpu/tests/cts/checkout/src/unittests/unit_test.ts3
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/README.txt2
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/adapter/requestAdapter.spec.ts124
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/adapter/requestAdapterInfo.spec.ts54
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/adapter/requestDevice.spec.ts376
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/async_ordering/README.txt12
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/map.spec.ts510
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/map_ArrayBuffer.spec.ts89
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/map_detach.spec.ts79
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/map_oom.spec.ts50
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/mapping_test.ts39
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/threading.spec.ts29
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/basic.spec.ts98
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/clearBuffer.spec.ts54
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/copyBufferToBuffer.spec.ts108
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/copyTextureToTexture.spec.ts1686
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/image_copy.spec.ts2098
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/programmable/programmable_state_test.ts157
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/programmable/state_tracking.spec.ts306
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/queries/README.txt5
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/queries/occlusionQuery.spec.ts1033
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/render/dynamic_state.spec.ts19
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/render/state_tracking.spec.ts624
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/compute/basic.spec.ts162
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/compute_pipeline/entry_point_name.spec.ts12
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/compute_pipeline/overrides.spec.ts503
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/device/lost.spec.ts92
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/labels.spec.ts280
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_allocation/README.txt7
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/buffer/buffer_sync_test.ts942
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/buffer/multiple_buffers.spec.ts354
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/buffer/single_buffer.spec.ts257
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/operation_context_helper.ts330
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/texture/same_subresource.spec.ts709
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/texture/texture_sync_test.ts124
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/onSubmittedWorkDone.spec.ts56
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/pipeline/default_layout.spec.ts27
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/queue/writeBuffer.spec.ts235
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/reflection.spec.ts137
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/clear_value.spec.ts188
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/resolve.spec.ts183
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/storeOp.spec.ts354
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/storeop2.spec.ts83
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/culling_tests.spec.ts359
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/overrides.spec.ts453
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/pipeline_output_targets.spec.ts450
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/primitive_topology.spec.ts488
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/sample_mask.spec.ts806
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/vertex_only_render_pipeline.spec.ts29
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/basic.spec.ts353
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/color_target_state.spec.ts818
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/depth.spec.ts546
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/depth_bias.spec.ts352
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/depth_clip_clamp.spec.ts524
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/draw.spec.ts768
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/indirect_draw.spec.ts242
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/robust_access_index.spec.ts8
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/stencil.spec.ts584
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/buffer.spec.ts899
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/check_texture/by_copy.ts66
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/check_texture/by_ds_test.ts200
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/check_texture/by_sampling.ts157
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/texture_zero.spec.ts645
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/sampling/anisotropy.spec.ts325
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/sampling/filter_mode.spec.ts1143
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/sampling/lod_clamp.spec.ts12
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/shader_module/compilation_info.spec.ts264
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/texture_view/format_reinterpretation.spec.ts358
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/texture_view/read.spec.ts56
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/texture_view/write.spec.ts54
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/threading/README.txt11
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/uncapturederror.spec.ts34
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/vertex_state/correctness.spec.ts1180
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/vertex_state/index_format.spec.ts584
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/regression/README.txt2
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/buffer/create.spec.ts113
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/buffer/destroy.spec.ts101
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/buffer/mapping.spec.ts1125
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/buffer/threading.spec.ts14
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/features/README.txt10
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/features/query_types.spec.ts76
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/features/texture_formats.spec.ts463
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/README.txt8
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/limit_utils.ts1089
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxBindGroups.spec.ts95
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxBindingsPerBindGroup.spec.ts75
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxBufferSize.spec.ts28
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxColorAttachmentBytesPerSample.spec.ts260
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxColorAttachments.spec.ts124
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeInvocationsPerWorkgroup.spec.ts147
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupSizeX.spec.ts20
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupSizeY.spec.ts20
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupSizeZ.spec.ts20
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupStorageSize.spec.ts182
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupsPerDimension.spec.ts97
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxDynamicStorageBuffersPerPipelineLayout.spec.ts39
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxDynamicUniformBuffersPerPipelineLayout.spec.ts42
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxInterStageShaderComponents.spec.ts151
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxInterStageShaderVariables.spec.ts44
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxSampledTexturesPerShaderStage.spec.ts144
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxSamplersPerShaderStage.spec.ts145
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxStorageBufferBindingSize.spec.ts161
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxStorageBuffersPerShaderStage.spec.ts174
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxStorageTexturesPerShaderStage.spec.ts156
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxTextureArrayLayers.spec.ts27
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxTextureDimension1D.spec.ts34
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxTextureDimension2D.spec.ts133
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxTextureDimension3D.spec.ts39
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxUniformBufferBindingSize.spec.ts90
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxUniformBuffersPerShaderStage.spec.ts144
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxVertexAttributes.spec.ts43
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxVertexBufferArrayStride.spec.ts121
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxVertexBuffers.spec.ts100
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/minStorageBufferOffsetAlignment.spec.ts183
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/minUniformBufferOffsetAlignment.spec.ts186
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/compute_pipeline.spec.ts692
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createBindGroup.spec.ts1110
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createBindGroupLayout.spec.ts464
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createPipelineLayout.spec.ts164
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createSampler.spec.ts68
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createTexture.spec.ts1130
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createView.spec.ts340
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/debugMarker.spec.ts98
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/beginComputePass.spec.ts147
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/beginRenderPass.spec.ts215
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/clearBuffer.spec.ts246
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/compute_pass.spec.ts259
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/copyBufferToBuffer.spec.ts326
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/copyTextureToTexture.spec.ts874
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/debug.spec.ts66
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/index_access.spec.ts162
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/draw.spec.ts877
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/dynamic_state.spec.ts319
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/indirect_draw.spec.ts202
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/render.ts29
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/setIndexBuffer.spec.ts124
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/setPipeline.spec.ts62
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/setVertexBuffer.spec.ts144
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/state_tracking.spec.ts184
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render_pass.spec.ts14
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/setBindGroup.spec.ts435
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/createRenderBundleEncoder.spec.ts259
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/encoder_open_state.spec.ts587
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/encoder_state.spec.ts250
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/programmable/pipeline_bind_group_compat.spec.ts777
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/queries/begin_end.spec.ts117
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/queries/common.ts37
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/queries/general.spec.ts152
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/queries/resolveQuerySet.spec.ts181
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/render_bundle.spec.ts258
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/error_scope.spec.ts291
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/getBindGroupLayout.spec.ts201
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/gpu_external_texture_expiration.spec.ts332
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/README.txt32
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/buffer_related.spec.ts226
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/buffer_texture_copies.spec.ts453
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/image_copy.ts278
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/layout_related.spec.ts483
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/texture_related.spec.ts534
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/layout_shader_compat.spec.ts14
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/query_set/create.spec.ts34
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/query_set/destroy.spec.ts33
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/README.txt13
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/buffer_mapped.spec.ts280
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/copyToTexture/CopyExternalImageToTexture.spec.ts816
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/destroyed/buffer.spec.ts296
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/destroyed/query_set.spec.ts63
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/destroyed/texture.spec.ts294
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/submit.spec.ts47
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/writeBuffer.spec.ts200
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/writeTexture.spec.ts110
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pass/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pass/attachment_compatibility.spec.ts690
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pass/render_pass_descriptor.spec.ts1097
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pass/resolve.spec.ts192
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/common.ts68
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/depth_stencil_state.spec.ts304
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/fragment_state.spec.ts427
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/inter_stage.spec.ts324
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/misc.spec.ts98
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/multisample_state.spec.ts87
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/overrides.spec.ts535
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/primitive_state.spec.ts42
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/shader_module.spec.ts112
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/vertex_state.spec.ts765
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/buffer/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/buffer/in_pass_encoder.spec.ts928
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/buffer/in_pass_misc.spec.ts409
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/texture/in_pass_encoder.spec.ts1395
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/texture/in_render_common.spec.ts566
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/texture/in_render_misc.spec.ts420
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/shader_module/entry_point.spec.ts117
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/shader_module/overrides.spec.ts96
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/state/device_lost/README.txt5
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/state/device_lost/destroy.spec.ts1170
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/texture/bgra8unorm_storage.spec.ts205
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/texture/destroy.spec.ts139
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/texture/float32_filterable.spec.ts58
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/texture/rg11b10ufloat_renderable.spec.ts149
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/validation_test.ts448
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/capability_info.ts792
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/compat/README.md3
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/encoding/cmds/copyTextureToBuffer.spec.ts44
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/encoding/programmable/pipeline_bind_group_compat.spec.ts423
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/render_pipeline/fragment_state.spec.ts128
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/render_pipeline/shader_module.spec.ts74
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/render_pipeline/vertex_state.spec.ts91
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/texture/createTexture.spec.ts41
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/texture/cubeArray.spec.ts26
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/compat/compatibility_test.ts10
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/constants.ts62
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/examples.spec.ts275
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/format_info.ts1273
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/gpu_test.ts1681
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/idl/README.txt7
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/idl/constants/flags.spec.ts79
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/idl/exposed.html.ts52
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/idl/exposed.http.html13
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/idl/exposed.https.html13
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/idl/idl_test.ts41
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/listing.ts5
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/listing_meta.json2002
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_addition.spec.ts154
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_comparison.spec.ts214
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_division.spec.ts154
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_matrix_addition.spec.ts61
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_matrix_subtraction.spec.ts61
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_multiplication.spec.ts154
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_remainder.spec.ts154
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_subtraction.spec.ts154
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/binary.ts21
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/bitwise.spec.ts303
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/bitwise_shift.spec.ts343
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/bool_logical.spec.ts187
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_addition.spec.ts212
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_comparison.spec.ts280
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_division.spec.ts212
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_addition.spec.ts101
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_matrix_multiplication.spec.ts114
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_scalar_multiplication.spec.ts161
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_subtraction.spec.ts101
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_vector_multiplication.spec.ts156
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_multiplication.spec.ts212
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_remainder.spec.ts212
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_subtraction.spec.ts212
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_addition.spec.ts194
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_comparison.spec.ts262
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_division.spec.ts194
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_addition.spec.ts95
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_matrix_multiplication.spec.ts108
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_scalar_multiplication.spec.ts152
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_subtraction.spec.ts95
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_vector_multiplication.spec.ts147
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_multiplication.spec.ts194
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_remainder.spec.ts194
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_subtraction.spec.ts194
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/i32_arithmetic.spec.ts738
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/i32_comparison.spec.ts121
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/u32_arithmetic.spec.ts725
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/u32_comparison.spec.ts121
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/abs.spec.ts196
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/acos.spec.ts78
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/acosh.spec.ts81
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/all.spec.ts92
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/any.spec.ts92
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/arrayLength.spec.ts306
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/asin.spec.ts78
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/asinh.spec.ts65
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atan.spec.ts80
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atan2.spec.ts83
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atanh.spec.ts87
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicAdd.spec.ts101
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicAnd.spec.ts135
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicCompareExchangeWeak.spec.ts742
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicExchange.spec.ts470
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicLoad.spec.ts192
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicMax.spec.ts101
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicMin.spec.ts100
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicOr.spec.ts131
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicStore.spec.ts301
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicSub.spec.ts101
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicXor.spec.ts135
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/harness.ts208
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/bitcast.spec.ts1275
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/builtin.ts24
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/ceil.spec.ts101
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/clamp.spec.ts195
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/cos.spec.ts84
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/cosh.spec.ts68
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/countLeadingZeros.spec.ts250
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/countOneBits.spec.ts249
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/countTrailingZeros.spec.ts250
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/cross.spec.ts113
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/degrees.spec.ts95
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/determinant.spec.ts137
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/distance.spec.ts241
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dot.spec.ts182
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdx.spec.ts23
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdxCoarse.spec.ts22
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdxFine.spec.ts21
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdy.spec.ts22
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdyCoarse.spec.ts22
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdyFine.spec.ts21
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/exp.spec.ts90
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/exp2.spec.ts90
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/extractBits.spec.ts337
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/faceForward.spec.ts256
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/firstLeadingBit.spec.ts350
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/firstTrailingBit.spec.ts250
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/floor.spec.ts96
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fma.spec.ts113
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fract.spec.ts103
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/frexp.spec.ts475
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fwidth.spec.ts21
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fwidthCoarse.spec.ts21
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fwidthFine.spec.ts21
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/insertBits.spec.ts386
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/inversesqrt.spec.ts81
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/ldexp.spec.ts121
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/length.spec.ts178
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/log.spec.ts89
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/log2.spec.ts89
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/max.spec.ts165
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/min.spec.ts164
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/mix.spec.ts275
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/modf.spec.ts661
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/normalize.spec.ts137
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack2x16float.spec.ts88
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack2x16snorm.spec.ts55
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack2x16unorm.spec.ts55
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack4x8snorm.spec.ts60
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack4x8unorm.spec.ts60
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pow.spec.ts88
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/quantizeToF16.spec.ts70
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/radians.spec.ts90
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/reflect.spec.ts180
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/refract.spec.ts253
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/reverseBits.spec.ts250
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/round.spec.ts79
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/saturate.spec.ts100
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/select.spec.ts253
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/sign.spec.ts109
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/sin.spec.ts84
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/sinh.spec.ts68
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/smoothstep.spec.ts94
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/sqrt.spec.ts68
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/step.spec.ts87
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/storageBarrier.spec.ts38
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/tan.spec.ts78
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/tanh.spec.ts62
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureDimension.spec.ts160
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureGather.spec.ts270
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureGatherCompare.spec.ts134
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureLoad.spec.ts185
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureNumLayers.spec.ts100
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureNumLevels.spec.ts65
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureNumSamples.spec.ts37
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSample.spec.ts273
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleBias.spec.ts163
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleCompare.spec.ts145
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleCompareLevel.spec.ts149
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleGrad.spec.ts136
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleLevel.spec.ts274
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureStore.spec.ts122
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/transpose.spec.ts158
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/trunc.spec.ts75
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack2x16float.spec.ts48
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack2x16snorm.spec.ts48
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack2x16unorm.spec.ts48
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack4x8snorm.spec.ts48
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack4x8unorm.spec.ts48
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/utils.ts45
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/workgroupBarrier.spec.ts38
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/case_cache.ts200
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/expression.ts1436
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/af_arithmetic.spec.ts43
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/af_assignment.spec.ts112
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/bool_conversion.spec.ts174
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/bool_logical.spec.ts33
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/f16_arithmetic.spec.ts44
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/f16_conversion.spec.ts301
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/f32_arithmetic.spec.ts41
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/f32_conversion.spec.ts257
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/i32_arithmetic.spec.ts37
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/i32_complement.spec.ts37
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/i32_conversion.spec.ts196
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/u32_complement.spec.ts37
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/u32_conversion.spec.ts206
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/unary.ts15
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/float_parse.spec.ts131
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/call.spec.ts83
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/complex.spec.ts42
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/eval_order.spec.ts1007
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/for.spec.ts271
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/harness.ts312
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/if.spec.ts102
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/loop.spec.ts125
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/phony.spec.ts135
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/return.spec.ts56
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/switch.spec.ts156
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/while.spec.ts140
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/adjacent.spec.ts272
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/atomicity.spec.ts102
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/barrier.spec.ts250
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/coherence.spec.ts525
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/memory_model_setup.ts1118
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/weak.spec.ts429
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/padding.spec.ts406
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/robust_access.spec.ts480
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/robust_access_vertex.spec.ts607
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/shader_io/compute_builtins.spec.ts297
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/shader_io/shared_structs.spec.ts332
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/shadow.spec.ts406
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/statement/increment_decrement.spec.ts381
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/zero_init.spec.ts546
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/regression/README.txt2
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/types.ts289
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/const_assert/const_assert.spec.ts201
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/const.spec.ts61
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/override.spec.ts31
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/ptr_spelling.spec.ts153
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/util.ts163
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/var_access_mode.spec.ts116
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/access/vector.spec.ts223
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/binary/bitwise_shift.spec.ts166
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/abs.spec.ts54
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/acos.spec.ts78
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/acosh.spec.ts80
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/asin.spec.ts78
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/asinh.spec.ts82
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/atan.spec.ts79
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/atan2.spec.ts106
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/atanh.spec.ts78
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/atomics.spec.ts70
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/bitcast.spec.ts393
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/ceil.spec.ts75
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/clamp.spec.ts57
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/const_override_validation.ts202
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/cos.spec.ts77
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/cosh.spec.ts78
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/degrees.spec.ts78
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/exp.spec.ts102
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/exp2.spec.ts102
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/inverseSqrt.spec.ts81
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/length.spec.ts221
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/log.spec.ts76
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/log2.spec.ts76
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/modf.spec.ts76
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/radians.spec.ts76
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/round.spec.ts84
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/saturate.spec.ts76
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/sign.spec.ts79
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/sin.spec.ts77
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/sinh.spec.ts78
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/sqrt.spec.ts81
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/tan.spec.ts83
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/functions/alias_analysis.spec.ts202
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/functions/restrictions.spec.ts757
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/align.spec.ts341
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/attribute.spec.ts87
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/binary_ops.spec.ts89
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/blankspace.spec.ts65
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/break.spec.ts84
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/builtin.spec.ts144
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/comments.spec.ts75
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/const.spec.ts57
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/const_assert.spec.ts38
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/diagnostic.spec.ts201
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/discard.spec.ts65
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/enable.spec.ts70
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/identifiers.spec.ts407
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/literal.spec.ts302
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/must_use.spec.ts269
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/pipeline_stage.spec.ts155
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/semicolon.spec.ts269
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/source.spec.ts29
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/unary_ops.spec.ts48
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/var_and_let.spec.ts106
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/binding.spec.ts140
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/builtins.spec.ts277
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/entry_point.spec.ts141
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/group.spec.ts140
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/group_and_binding.spec.ts171
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/id.spec.ts170
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/interpolate.spec.ts217
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/invariant.spec.ts99
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/locations.spec.ts382
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/size.spec.ts212
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/util.ts196
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/workgroup_size.spec.ts300
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_validation_test.ts177
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/types/alias.spec.ts123
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/types/struct.spec.ts99
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/types/vector.spec.ts78
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/uniformity/uniformity.spec.ts2444
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/shader/values.ts91
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/binary_stream.ts213
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/buffer.ts23
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/check_contents.ts272
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/color_space_conversion.ts265
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/command_buffer_maker.ts85
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/compare.ts472
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/constants.ts487
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/conversion.ts1635
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/copy_to_texture.ts192
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/create_elements.ts82
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/device_pool.ts414
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/floating_point.ts5441
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/math.ts2247
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/memory.ts25
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/pretty_diff_tables.ts51
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/prng.ts125
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/reinterpret.ts118
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/shader.ts196
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture.ts81
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/base.ts243
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/data_generation.ts83
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/layout.ts371
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/subresource.ts68
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_data.spec.ts334
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_data.ts980
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_view.ts201
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texture_ok.spec.ts159
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texture_ok.ts348
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/unions.ts45
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/README.txt5
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/configure.spec.ts426
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/context_creation.spec.ts47
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/getCurrentTexture.spec.ts383
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/getPreferredCanvasFormat.spec.ts19
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/readbackFromWebGPUCanvas.spec.ts481
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/ImageBitmap.spec.ts543
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/ImageData.spec.ts221
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/canvas.spec.ts841
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/image.spec.ts271
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/util.ts58
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/video.spec.ts119
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/external_texture/README.txt1
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/external_texture/video.spec.ts480
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/README.txt17
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_clear.html.ts34
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_clear.https.html12
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_colorspace.html.ts139
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_colorspace_bgra8unorm.https.html22
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_colorspace_rgba16float.https.html23
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_colorspace_rgba8unorm.https.html22
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex.html.ts772
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_bgra8unorm_copy.https.html24
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_bgra8unorm_draw.https.html24
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba16float_copy.https.html24
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba16float_draw.https.html24
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba16float_store.https.html24
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba8unorm_copy.https.html24
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba8unorm_draw.https.html24
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba8unorm_store.https.html24
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha.html.ts177
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_bgra8unorm_opaque_copy.https.html21
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_bgra8unorm_opaque_draw.https.html21
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_bgra8unorm_premultiplied_copy.https.html22
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_bgra8unorm_premultiplied_draw.https.html22
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba16float_opaque_copy.https.html21
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba16float_opaque_draw.https.html21
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba16float_premultiplied_copy.https.html22
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba16float_premultiplied_draw.https.html22
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba8unorm_opaque_copy.https.html21
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba8unorm_opaque_draw.https.html21
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba8unorm_premultiplied_copy.https.html22
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba8unorm_premultiplied_draw.https.html22
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_image_rendering.html.ts79
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_image_rendering.https.html15
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/create-pattern-data-url.ts23
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/gpu_ref_test.ts26
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_clear-ref.html22
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_colorspace-ref.html17
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_colorspace-ref.html.ts41
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_complex-ref.html26
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_composite_alpha_opaque-ref.html26
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_composite_alpha_premultiplied-ref.html26
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_image_rendering-ref.html25
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/resize_observer-ref.html90
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/resize_observer.html.ts150
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/resize_observer.https.html24
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/util.ts307
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/worker/worker.spec.ts35
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/worker/worker.ts83
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/worker/worker_launcher.ts18
-rw-r--r--dom/webgpu/tests/cts/checkout/standalone/index.html453
-rw-r--r--dom/webgpu/tests/cts/checkout/standalone/third_party/jquery/LICENSE.txt9
-rw-r--r--dom/webgpu/tests/cts/checkout/standalone/third_party/jquery/jquery-3.3.1.min.js2
-rw-r--r--dom/webgpu/tests/cts/checkout/standalone/third_party/normalize.min.css1
-rw-r--r--dom/webgpu/tests/cts/checkout/standalone/webgpu-logo-notext.svg34
-rwxr-xr-xdom/webgpu/tests/cts/checkout/tools/checklist11
-rwxr-xr-xdom/webgpu/tests/cts/checkout/tools/dev_server4
-rw-r--r--dom/webgpu/tests/cts/checkout/tools/eslint-plugin-gpuweb-cts/index.js6
-rw-r--r--dom/webgpu/tests/cts/checkout/tools/eslint-plugin-gpuweb-cts/package.json8
-rw-r--r--dom/webgpu/tests/cts/checkout/tools/eslint-plugin-gpuweb-cts/tabs-anywhere.js29
-rw-r--r--dom/webgpu/tests/cts/checkout/tools/eslint-plugin-gpuweb-cts/trailing-space-anywhere.js29
-rwxr-xr-xdom/webgpu/tests/cts/checkout/tools/gen_cache4
-rwxr-xr-xdom/webgpu/tests/cts/checkout/tools/gen_listings7
-rwxr-xr-xdom/webgpu/tests/cts/checkout/tools/gen_version33
-rw-r--r--dom/webgpu/tests/cts/checkout/tools/gen_wpt_cfg_chunked2sec.json6
-rw-r--r--dom/webgpu/tests/cts/checkout/tools/gen_wpt_cfg_unchunked.json5
-rwxr-xr-xdom/webgpu/tests/cts/checkout/tools/gen_wpt_cts_html39
-rwxr-xr-xdom/webgpu/tests/cts/checkout/tools/merge_listing_times9
-rwxr-xr-xdom/webgpu/tests/cts/checkout/tools/run_deno3
-rwxr-xr-xdom/webgpu/tests/cts/checkout/tools/run_node6
-rw-r--r--dom/webgpu/tests/cts/checkout/tools/run_wpt_ref_tests4
-rwxr-xr-xdom/webgpu/tests/cts/checkout/tools/validate6
-rw-r--r--dom/webgpu/tests/cts/checkout/tools/websocket-logger/.gitignore1
-rw-r--r--dom/webgpu/tests/cts/checkout/tools/websocket-logger/README.md9
-rwxr-xr-xdom/webgpu/tests/cts/checkout/tools/websocket-logger/main.js25
-rw-r--r--dom/webgpu/tests/cts/checkout/tools/websocket-logger/package-lock.json39
-rw-r--r--dom/webgpu/tests/cts/checkout/tools/websocket-logger/package.json14
-rw-r--r--dom/webgpu/tests/cts/checkout/tsconfig.json64
-rw-r--r--dom/webgpu/tests/cts/checkout/w3c.json5
-rw-r--r--dom/webgpu/tests/cts/checkout_commit.txt1
-rw-r--r--dom/webgpu/tests/cts/vendor/Cargo.lock889
-rw-r--r--dom/webgpu/tests/cts/vendor/Cargo.toml20
-rw-r--r--dom/webgpu/tests/cts/vendor/src/fs.rs331
-rw-r--r--dom/webgpu/tests/cts/vendor/src/main.rs565
-rw-r--r--dom/webgpu/tests/cts/vendor/src/path.rs23
-rw-r--r--dom/webgpu/tests/cts/vendor/src/process.rs85
914 files changed, 200960 insertions, 0 deletions
diff --git a/dom/webgpu/Adapter.cpp b/dom/webgpu/Adapter.cpp
new file mode 100644
index 0000000000..434ba7c6fa
--- /dev/null
+++ b/dom/webgpu/Adapter.cpp
@@ -0,0 +1,503 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/BindingDeclarations.h"
+#include "mozilla/dom/WebGPUBinding.h"
+#include "Adapter.h"
+
+#include <algorithm>
+#include "Device.h"
+#include "Instance.h"
+#include "SupportedFeatures.h"
+#include "SupportedLimits.h"
+#include "ipc/WebGPUChild.h"
+#include "mozilla/dom/Promise.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+
+namespace mozilla::webgpu {
+
+bool AdapterInfo::WrapObject(JSContext* const cx,
+ JS::Handle<JSObject*> givenProto,
+ JS::MutableHandle<JSObject*> reflector) {
+ return dom::GPUAdapterInfo_Binding::Wrap(cx, this, givenProto, reflector);
+}
+
+void AdapterInfo::GetWgpuName(nsString& s) const {
+ s = mAboutSupportInfo->name;
+}
+
+uint32_t AdapterInfo::WgpuVendor() const { return mAboutSupportInfo->vendor; }
+
+uint32_t AdapterInfo::WgpuDevice() const { return mAboutSupportInfo->device; }
+
+void AdapterInfo::GetWgpuDeviceType(nsString& s) const {
+ switch (mAboutSupportInfo->device_type) {
+ case ffi::WGPUDeviceType_Cpu:
+ s.AssignLiteral("Cpu");
+ return;
+ case ffi::WGPUDeviceType_DiscreteGpu:
+ s.AssignLiteral("DiscreteGpu");
+ return;
+ case ffi::WGPUDeviceType_IntegratedGpu:
+ s.AssignLiteral("IntegratedGpu");
+ return;
+ case ffi::WGPUDeviceType_VirtualGpu:
+ s.AssignLiteral("VirtualGpu");
+ return;
+ case ffi::WGPUDeviceType_Other:
+ s.AssignLiteral("Other");
+ return;
+ case ffi::WGPUDeviceType_Sentinel:
+ break;
+ }
+ MOZ_CRASH("Bad `ffi::WGPUDeviceType`");
+}
+
+void AdapterInfo::GetWgpuDriver(nsString& s) const {
+ s = mAboutSupportInfo->driver;
+}
+
+void AdapterInfo::GetWgpuDriverInfo(nsString& s) const {
+ s = mAboutSupportInfo->driver_info;
+}
+
+void AdapterInfo::GetWgpuBackend(nsString& s) const {
+ switch (mAboutSupportInfo->backend) {
+ case ffi::WGPUBackend_Empty:
+ s.AssignLiteral("Empty");
+ return;
+ case ffi::WGPUBackend_Vulkan:
+ s.AssignLiteral("Vulkan");
+ return;
+ case ffi::WGPUBackend_Metal:
+ s.AssignLiteral("Metal");
+ return;
+ case ffi::WGPUBackend_Dx12:
+ s.AssignLiteral("Dx12");
+ return;
+ case ffi::WGPUBackend_Gl:
+ s.AssignLiteral("Gl");
+ return;
+ case ffi::WGPUBackend_BrowserWebGpu: // This should never happen, because
+ // we _are_ the browser.
+ case ffi::WGPUBackend_Sentinel:
+ break;
+ }
+ MOZ_CRASH("Bad `ffi::WGPUBackend`");
+}
+
+// -
+
+GPU_IMPL_CYCLE_COLLECTION(Adapter, mParent, mBridge, mFeatures, mLimits)
+GPU_IMPL_JS_WRAP(Adapter)
+
+static Maybe<ffi::WGPUFeatures> ToWGPUFeatures(
+ const dom::GPUFeatureName aFeature) {
+ switch (aFeature) {
+ case dom::GPUFeatureName::Depth_clip_control:
+ return Some(WGPUFeatures_DEPTH_CLIP_CONTROL);
+
+ case dom::GPUFeatureName::Depth32float_stencil8:
+ return Some(WGPUFeatures_DEPTH32FLOAT_STENCIL8);
+
+ case dom::GPUFeatureName::Texture_compression_bc:
+ return Some(WGPUFeatures_TEXTURE_COMPRESSION_BC);
+
+ case dom::GPUFeatureName::Texture_compression_etc2:
+ return Some(WGPUFeatures_TEXTURE_COMPRESSION_ETC2);
+
+ case dom::GPUFeatureName::Texture_compression_astc:
+ return Some(WGPUFeatures_TEXTURE_COMPRESSION_ASTC);
+
+ case dom::GPUFeatureName::Timestamp_query:
+ return Some(WGPUFeatures_TIMESTAMP_QUERY);
+
+ case dom::GPUFeatureName::Indirect_first_instance:
+ return Some(WGPUFeatures_INDIRECT_FIRST_INSTANCE);
+
+ case dom::GPUFeatureName::Shader_f16:
+ return Some(WGPUFeatures_SHADER_F16);
+
+ case dom::GPUFeatureName::Rg11b10ufloat_renderable:
+ return Some(WGPUFeatures_RG11B10UFLOAT_RENDERABLE);
+
+ case dom::GPUFeatureName::Bgra8unorm_storage:
+ return Some(WGPUFeatures_BGRA8UNORM_STORAGE);
+
+ case dom::GPUFeatureName::Float32_filterable:
+ return Some(WGPUFeatures_FLOAT32_FILTERABLE);
+
+ case dom::GPUFeatureName::EndGuard_:
+ break;
+ }
+ MOZ_CRASH("Bad GPUFeatureName.");
+}
+
+static Maybe<ffi::WGPUFeatures> MakeFeatureBits(
+ const dom::Sequence<dom::GPUFeatureName>& aFeatures) {
+ ffi::WGPUFeatures bits = 0;
+ for (const auto& feature : aFeatures) {
+ const auto bit = ToWGPUFeatures(feature);
+ if (!bit) {
+ const auto featureStr = dom::GPUFeatureNameValues::GetString(feature);
+ (void)featureStr;
+ NS_WARNING(
+ nsPrintfCString("Requested feature bit for '%s' is not implemented.",
+ featureStr.data())
+ .get());
+ return Nothing();
+ }
+ bits |= *bit;
+ }
+ return Some(bits);
+}
+
+Adapter::Adapter(Instance* const aParent, WebGPUChild* const aBridge,
+ const std::shared_ptr<ffi::WGPUAdapterInformation>& aInfo)
+ : ChildOf(aParent),
+ mBridge(aBridge),
+ mId(aInfo->id),
+ mFeatures(new SupportedFeatures(this)),
+ mLimits(new SupportedLimits(this, aInfo->limits)),
+ mInfo(aInfo) {
+ ErrorResult ignoredRv; // It's onerous to plumb this in from outside in this
+ // case, and we don't really need to.
+
+ static const auto FEATURE_BY_BIT = []() {
+ auto ret = std::unordered_map<ffi::WGPUFeatures, dom::GPUFeatureName>{};
+
+ for (const auto feature :
+ MakeEnumeratedRange(dom::GPUFeatureName::EndGuard_)) {
+ const auto bitForFeature = ToWGPUFeatures(feature);
+ if (!bitForFeature) {
+ // There are some features that don't have bits.
+ continue;
+ }
+ ret[*bitForFeature] = feature;
+ }
+
+ return ret;
+ }();
+
+ auto remainingFeatureBits = aInfo->features;
+ auto bitMask = decltype(remainingFeatureBits){0};
+ while (remainingFeatureBits) {
+ if (bitMask) {
+ bitMask <<= 1;
+ } else {
+ bitMask = 1;
+ }
+ const auto bit = remainingFeatureBits & bitMask;
+ remainingFeatureBits &= ~bitMask; // Clear bit.
+ if (!bit) {
+ continue;
+ }
+
+ const auto featureForBit = FEATURE_BY_BIT.find(bit);
+ if (featureForBit != FEATURE_BY_BIT.end()) {
+ mFeatures->Add(featureForBit->second, ignoredRv);
+ } else {
+ // We don't recognize that bit, but maybe it's a wpgu-native-only feature.
+ }
+ }
+}
+
+Adapter::~Adapter() { Cleanup(); }
+
+void Adapter::Cleanup() {
+ if (mValid && mBridge && mBridge->CanSend()) {
+ mValid = false;
+ mBridge->SendAdapterDrop(mId);
+ }
+}
+
+const RefPtr<SupportedFeatures>& Adapter::Features() const { return mFeatures; }
+const RefPtr<SupportedLimits>& Adapter::Limits() const { return mLimits; }
+bool Adapter::IsFallbackAdapter() const {
+ return mInfo->device_type == ffi::WGPUDeviceType::WGPUDeviceType_Cpu;
+}
+
+static std::string_view ToJsKey(const Limit limit) {
+ switch (limit) {
+ case Limit::MaxTextureDimension1D:
+ return "maxTextureDimension1D";
+ case Limit::MaxTextureDimension2D:
+ return "maxTextureDimension2D";
+ case Limit::MaxTextureDimension3D:
+ return "maxTextureDimension3D";
+ case Limit::MaxTextureArrayLayers:
+ return "maxTextureArrayLayers";
+ case Limit::MaxBindGroups:
+ return "maxBindGroups";
+ case Limit::MaxBindGroupsPlusVertexBuffers:
+ return "maxBindGroupsPlusVertexBuffers";
+ case Limit::MaxBindingsPerBindGroup:
+ return "maxBindingsPerBindGroup";
+ case Limit::MaxDynamicUniformBuffersPerPipelineLayout:
+ return "maxDynamicUniformBuffersPerPipelineLayout";
+ case Limit::MaxDynamicStorageBuffersPerPipelineLayout:
+ return "maxDynamicStorageBuffersPerPipelineLayout";
+ case Limit::MaxSampledTexturesPerShaderStage:
+ return "maxSampledTexturesPerShaderStage";
+ case Limit::MaxSamplersPerShaderStage:
+ return "maxSamplersPerShaderStage";
+ case Limit::MaxStorageBuffersPerShaderStage:
+ return "maxStorageBuffersPerShaderStage";
+ case Limit::MaxStorageTexturesPerShaderStage:
+ return "maxStorageTexturesPerShaderStage";
+ case Limit::MaxUniformBuffersPerShaderStage:
+ return "maxUniformBuffersPerShaderStage";
+ case Limit::MaxUniformBufferBindingSize:
+ return "maxUniformBufferBindingSize";
+ case Limit::MaxStorageBufferBindingSize:
+ return "maxStorageBufferBindingSize";
+ case Limit::MinUniformBufferOffsetAlignment:
+ return "minUniformBufferOffsetAlignment";
+ case Limit::MinStorageBufferOffsetAlignment:
+ return "minStorageBufferOffsetAlignment";
+ case Limit::MaxVertexBuffers:
+ return "maxVertexBuffers";
+ case Limit::MaxBufferSize:
+ return "maxBufferSize";
+ case Limit::MaxVertexAttributes:
+ return "maxVertexAttributes";
+ case Limit::MaxVertexBufferArrayStride:
+ return "maxVertexBufferArrayStride";
+ case Limit::MaxInterStageShaderComponents:
+ return "maxInterStageShaderComponents";
+ case Limit::MaxInterStageShaderVariables:
+ return "maxInterStageShaderVariables";
+ case Limit::MaxColorAttachments:
+ return "maxColorAttachments";
+ case Limit::MaxColorAttachmentBytesPerSample:
+ return "maxColorAttachmentBytesPerSample";
+ case Limit::MaxComputeWorkgroupStorageSize:
+ return "maxComputeWorkgroupStorageSize";
+ case Limit::MaxComputeInvocationsPerWorkgroup:
+ return "maxComputeInvocationsPerWorkgroup";
+ case Limit::MaxComputeWorkgroupSizeX:
+ return "maxComputeWorkgroupSizeX";
+ case Limit::MaxComputeWorkgroupSizeY:
+ return "maxComputeWorkgroupSizeY";
+ case Limit::MaxComputeWorkgroupSizeZ:
+ return "maxComputeWorkgroupSizeZ";
+ case Limit::MaxComputeWorkgroupsPerDimension:
+ return "maxComputeWorkgroupsPerDimension";
+ }
+ MOZ_CRASH("Bad Limit");
+}
+
+// -
+// String helpers
+
+static auto ToACString(const nsAString& s) { return NS_ConvertUTF16toUTF8(s); }
+
+// -
+// Adapter::RequestDevice
+
+already_AddRefed<dom::Promise> Adapter::RequestDevice(
+ const dom::GPUDeviceDescriptor& aDesc, ErrorResult& aRv) {
+ RefPtr<dom::Promise> promise = dom::Promise::Create(GetParentObject(), aRv);
+ if (NS_WARN_IF(aRv.Failed())) {
+ return nullptr;
+ }
+
+ ffi::WGPULimits deviceLimits = *mLimits->mFfi;
+ for (const auto limit : MakeInclusiveEnumeratedRange(Limit::_LAST)) {
+ const auto defaultValue = [&]() -> double {
+ switch (limit) {
+ // clang-format off
+ case Limit::MaxTextureDimension1D: return 8192;
+ case Limit::MaxTextureDimension2D: return 8192;
+ case Limit::MaxTextureDimension3D: return 2048;
+ case Limit::MaxTextureArrayLayers: return 256;
+ case Limit::MaxBindGroups: return 4;
+ case Limit::MaxBindGroupsPlusVertexBuffers: return 24;
+ case Limit::MaxBindingsPerBindGroup: return 1000;
+ case Limit::MaxDynamicUniformBuffersPerPipelineLayout: return 8;
+ case Limit::MaxDynamicStorageBuffersPerPipelineLayout: return 4;
+ case Limit::MaxSampledTexturesPerShaderStage: return 16;
+ case Limit::MaxSamplersPerShaderStage: return 16;
+ case Limit::MaxStorageBuffersPerShaderStage: return 8;
+ case Limit::MaxStorageTexturesPerShaderStage: return 4;
+ case Limit::MaxUniformBuffersPerShaderStage: return 12;
+ case Limit::MaxUniformBufferBindingSize: return 65536;
+ case Limit::MaxStorageBufferBindingSize: return 134217728;
+ case Limit::MinUniformBufferOffsetAlignment: return 256;
+ case Limit::MinStorageBufferOffsetAlignment: return 256;
+ case Limit::MaxVertexBuffers: return 8;
+ case Limit::MaxBufferSize: return 268435456;
+ case Limit::MaxVertexAttributes: return 16;
+ case Limit::MaxVertexBufferArrayStride: return 2048;
+ case Limit::MaxInterStageShaderComponents: return 60;
+ case Limit::MaxInterStageShaderVariables: return 16;
+ case Limit::MaxColorAttachments: return 8;
+ case Limit::MaxColorAttachmentBytesPerSample: return 32;
+ case Limit::MaxComputeWorkgroupStorageSize: return 16384;
+ case Limit::MaxComputeInvocationsPerWorkgroup: return 256;
+ case Limit::MaxComputeWorkgroupSizeX: return 256;
+ case Limit::MaxComputeWorkgroupSizeY: return 256;
+ case Limit::MaxComputeWorkgroupSizeZ: return 64;
+ case Limit::MaxComputeWorkgroupsPerDimension: return 65535;
+ // clang-format on
+ }
+ MOZ_CRASH("Bad Limit");
+ }();
+ SetLimit(&deviceLimits, limit, defaultValue);
+ }
+
+ // -
+
+ [&]() { // So that we can `return;` instead of `return promise.forget();`.
+ if (!mBridge->CanSend()) {
+ promise->MaybeRejectWithInvalidStateError(
+ "WebGPUChild cannot send, must recreate Adapter");
+ return;
+ }
+
+ // -
+ // Validate Features
+
+ for (const auto requested : aDesc.mRequiredFeatures) {
+ const bool supported = mFeatures->Features().count(requested);
+ if (!supported) {
+ const auto fstr = dom::GPUFeatureNameValues::GetString(requested);
+ const auto astr = this->LabelOrId();
+ nsPrintfCString msg(
+ "requestDevice: Feature '%s' requested must be supported by "
+ "adapter %s",
+ fstr.data(), astr.get());
+ promise->MaybeRejectWithTypeError(msg);
+ return;
+ }
+ }
+
+ // -
+ // Validate Limits
+
+ if (aDesc.mRequiredLimits.WasPassed()) {
+ static const auto LIMIT_BY_JS_KEY = []() {
+ std::unordered_map<std::string_view, Limit> ret;
+ for (const auto limit : MakeInclusiveEnumeratedRange(Limit::_LAST)) {
+ const auto jsKeyU8 = ToJsKey(limit);
+ ret[jsKeyU8] = limit;
+ }
+ return ret;
+ }();
+
+ for (const auto& entry : aDesc.mRequiredLimits.Value().Entries()) {
+ const auto& keyU16 = entry.mKey;
+ const nsCString keyU8 = ToACString(keyU16);
+ const auto itr = LIMIT_BY_JS_KEY.find(keyU8.get());
+ if (itr == LIMIT_BY_JS_KEY.end()) {
+ nsPrintfCString msg("requestDevice: Limit '%s' not recognized.",
+ keyU8.get());
+ promise->MaybeRejectWithOperationError(msg);
+ return;
+ }
+
+ const auto& limit = itr->second;
+ uint64_t requestedValue = entry.mValue;
+ const auto supportedValue = GetLimit(*mLimits->mFfi, limit);
+ if (StringBeginsWith(keyU8, "max"_ns)) {
+ if (requestedValue > supportedValue) {
+ nsPrintfCString msg(
+ "requestDevice: Request for limit '%s' must be <= supported "
+ "%s, was %s.",
+ keyU8.get(), std::to_string(supportedValue).c_str(),
+ std::to_string(requestedValue).c_str());
+ promise->MaybeRejectWithOperationError(msg);
+ return;
+ }
+ // Clamp to default if lower than default
+ requestedValue =
+ std::max(requestedValue, GetLimit(deviceLimits, limit));
+ } else {
+ MOZ_ASSERT(StringBeginsWith(keyU8, "min"_ns));
+ if (requestedValue < supportedValue) {
+ nsPrintfCString msg(
+ "requestDevice: Request for limit '%s' must be >= supported "
+ "%s, was %s.",
+ keyU8.get(), std::to_string(supportedValue).c_str(),
+ std::to_string(requestedValue).c_str());
+ promise->MaybeRejectWithOperationError(msg);
+ return;
+ }
+ if (StringEndsWith(keyU8, "Alignment"_ns)) {
+ if (!IsPowerOfTwo(requestedValue)) {
+ nsPrintfCString msg(
+ "requestDevice: Request for limit '%s' must be a power of "
+ "two, "
+ "was %s.",
+ keyU8.get(), std::to_string(requestedValue).c_str());
+ promise->MaybeRejectWithOperationError(msg);
+ return;
+ }
+ }
+ /// Clamp to default if higher than default
+ requestedValue =
+ std::min(requestedValue, GetLimit(deviceLimits, limit));
+ }
+
+ SetLimit(&deviceLimits, limit, requestedValue);
+ }
+ }
+
+ // -
+
+ ffi::WGPUDeviceDescriptor ffiDesc = {};
+ ffiDesc.required_features = *MakeFeatureBits(aDesc.mRequiredFeatures);
+ ffiDesc.required_limits = deviceLimits;
+ auto request = mBridge->AdapterRequestDevice(mId, ffiDesc);
+ if (!request) {
+ promise->MaybeRejectWithNotSupportedError(
+ "Unable to instantiate a Device");
+ return;
+ }
+ RefPtr<Device> device =
+ new Device(this, request->mId, ffiDesc.required_limits);
+ for (const auto& feature : aDesc.mRequiredFeatures) {
+ device->mFeatures->Add(feature, aRv);
+ }
+
+ request->mPromise->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [promise, device](bool aSuccess) {
+ if (aSuccess) {
+ promise->MaybeResolve(device);
+ } else {
+ // In this path, request->mId has an error entry in the wgpu
+ // registry, so let Device::~Device clean things up on both the
+ // child and parent side.
+ promise->MaybeRejectWithInvalidStateError(
+ "Unable to fulfill requested features and limits");
+ }
+ },
+ [promise, device](const ipc::ResponseRejectReason& aReason) {
+ // We can't be sure how far along the WebGPUParent got in handling
+ // our AdapterRequestDevice message, but we can't communicate with it,
+ // so clear up our client state for this Device without trying to
+ // communicate with the parent about it.
+ device->CleanupUnregisteredInParent();
+ promise->MaybeRejectWithNotSupportedError("IPC error");
+ });
+ }();
+
+ return promise.forget();
+}
+
+// -
+
+already_AddRefed<dom::Promise> Adapter::RequestAdapterInfo(
+ const dom::Sequence<nsString>& /*aUnmaskHints*/, ErrorResult& aRv) const {
+ RefPtr<dom::Promise> promise = dom::Promise::Create(GetParentObject(), aRv);
+ if (!promise) return nullptr;
+
+ auto rai = UniquePtr<AdapterInfo>{new AdapterInfo(mInfo)};
+ promise->MaybeResolve(std::move(rai));
+ return promise.forget();
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/Adapter.h b/dom/webgpu/Adapter.h
new file mode 100644
index 0000000000..4156588e8e
--- /dev/null
+++ b/dom/webgpu/Adapter.h
@@ -0,0 +1,117 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_Adapter_H_
+#define GPU_Adapter_H_
+
+#include <memory>
+
+#include "mozilla/AlreadyAddRefed.h"
+#include "mozilla/dom/NonRefcountedDOMObject.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+#include "nsPrintfCString.h"
+#include "nsString.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+class ErrorResult;
+namespace dom {
+class Promise;
+struct GPUDeviceDescriptor;
+struct GPUExtensions;
+struct GPUFeatures;
+enum class GPUFeatureName : uint8_t;
+enum class WgpuBackend : uint8_t;
+enum class WgpuDeviceType : uint8_t;
+template <typename T>
+class Sequence;
+} // namespace dom
+
+namespace webgpu {
+class Device;
+class Instance;
+class SupportedFeatures;
+class SupportedLimits;
+class WebGPUChild;
+namespace ffi {
+struct WGPUAdapterInformation;
+} // namespace ffi
+
+class AdapterInfo final : public dom::NonRefcountedDOMObject {
+ private:
+ const std::shared_ptr<ffi::WGPUAdapterInformation> mAboutSupportInfo;
+
+ public:
+ explicit AdapterInfo(
+ const std::shared_ptr<ffi::WGPUAdapterInformation>& aAboutSupportInfo)
+ : mAboutSupportInfo(aAboutSupportInfo) {}
+
+ void GetVendor(nsString& s) const { s = nsString(); }
+ void GetArchitecture(nsString& s) const { s = nsString(); }
+ void GetDevice(nsString& s) const { s = nsString(); }
+ void GetDescription(nsString& s) const { s = nsString(); }
+
+ // Non-standard field getters; see also TODO BUGZILLA LINK
+ void GetWgpuName(nsString&) const;
+ uint32_t WgpuVendor() const;
+ uint32_t WgpuDevice() const;
+ void GetWgpuDeviceType(nsString&) const;
+ void GetWgpuDriver(nsString&) const;
+ void GetWgpuDriverInfo(nsString&) const;
+ void GetWgpuBackend(nsString&) const;
+
+ bool WrapObject(JSContext*, JS::Handle<JSObject*>,
+ JS::MutableHandle<JSObject*>);
+};
+
+inline auto ToHexCString(const uint64_t v) {
+ return nsPrintfCString("0x%" PRIx64, v);
+}
+
+class Adapter final : public ObjectBase, public ChildOf<Instance> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(Adapter)
+ GPU_DECL_JS_WRAP(Adapter)
+
+ RefPtr<WebGPUChild> mBridge;
+
+ private:
+ ~Adapter();
+ void Cleanup();
+
+ const RawId mId;
+ // Cant have them as `const` right now, since we wouldn't be able
+ // to unlink them in CC unlink.
+ RefPtr<SupportedFeatures> mFeatures;
+ RefPtr<SupportedLimits> mLimits;
+
+ const std::shared_ptr<ffi::WGPUAdapterInformation> mInfo;
+
+ public:
+ Adapter(Instance* const aParent, WebGPUChild* const aBridge,
+ const std::shared_ptr<ffi::WGPUAdapterInformation>& aInfo);
+ const RefPtr<SupportedFeatures>& Features() const;
+ const RefPtr<SupportedLimits>& Limits() const;
+ bool IsFallbackAdapter() const;
+
+ nsCString LabelOrId() const {
+ nsCString ret = this->CLabel();
+ if (ret.IsEmpty()) {
+ ret = ToHexCString(mId);
+ }
+ return ret;
+ }
+
+ already_AddRefed<dom::Promise> RequestDevice(
+ const dom::GPUDeviceDescriptor& aDesc, ErrorResult& aRv);
+
+ already_AddRefed<dom::Promise> RequestAdapterInfo(
+ const dom::Sequence<nsString>& aUnmaskHints, ErrorResult& aRv) const;
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_Adapter_H_
diff --git a/dom/webgpu/BindGroup.cpp b/dom/webgpu/BindGroup.cpp
new file mode 100644
index 0000000000..7ae5b8c3bb
--- /dev/null
+++ b/dom/webgpu/BindGroup.cpp
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "BindGroup.h"
+#include "ipc/WebGPUChild.h"
+
+#include "Device.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(BindGroup, mParent)
+GPU_IMPL_JS_WRAP(BindGroup)
+
+BindGroup::BindGroup(Device* const aParent, RawId aId)
+ : ChildOf(aParent), mId(aId) {
+ MOZ_RELEASE_ASSERT(aId);
+}
+
+BindGroup::~BindGroup() { Cleanup(); }
+
+void BindGroup::Cleanup() {
+ if (mValid && mParent) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendBindGroupDrop(mId);
+ }
+ }
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/BindGroup.h b/dom/webgpu/BindGroup.h
new file mode 100644
index 0000000000..4f67c906f3
--- /dev/null
+++ b/dom/webgpu/BindGroup.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_BindGroup_H_
+#define GPU_BindGroup_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+
+namespace mozilla::webgpu {
+
+class Device;
+
+class BindGroup final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(BindGroup)
+ GPU_DECL_JS_WRAP(BindGroup)
+
+ BindGroup(Device* const aParent, RawId aId);
+
+ const RawId mId;
+
+ private:
+ ~BindGroup();
+ void Cleanup();
+};
+
+} // namespace mozilla::webgpu
+
+#endif // GPU_BindGroup_H_
diff --git a/dom/webgpu/BindGroupLayout.cpp b/dom/webgpu/BindGroupLayout.cpp
new file mode 100644
index 0000000000..cb5035b308
--- /dev/null
+++ b/dom/webgpu/BindGroupLayout.cpp
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "BindGroupLayout.h"
+#include "ipc/WebGPUChild.h"
+
+#include "Device.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(BindGroupLayout, mParent)
+GPU_IMPL_JS_WRAP(BindGroupLayout)
+
+BindGroupLayout::BindGroupLayout(Device* const aParent, RawId aId, bool aOwning)
+ : ChildOf(aParent), mId(aId), mOwning(aOwning) {
+ MOZ_RELEASE_ASSERT(aId);
+}
+
+BindGroupLayout::~BindGroupLayout() { Cleanup(); }
+
+void BindGroupLayout::Cleanup() {
+ if (mValid && mParent) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (mOwning && bridge && bridge->IsOpen()) {
+ bridge->SendBindGroupLayoutDrop(mId);
+ }
+ }
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/BindGroupLayout.h b/dom/webgpu/BindGroupLayout.h
new file mode 100644
index 0000000000..fcd721ab5f
--- /dev/null
+++ b/dom/webgpu/BindGroupLayout.h
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_BindGroupLayout_H_
+#define GPU_BindGroupLayout_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+
+namespace mozilla::webgpu {
+
+class Device;
+
+class BindGroupLayout final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(BindGroupLayout)
+ GPU_DECL_JS_WRAP(BindGroupLayout)
+
+ BindGroupLayout(Device* const aParent, RawId aId, bool aOwning);
+
+ const RawId mId;
+ const bool mOwning;
+
+ private:
+ ~BindGroupLayout();
+ void Cleanup();
+};
+
+} // namespace mozilla::webgpu
+
+#endif // GPU_BindGroupLayout_H_
diff --git a/dom/webgpu/Buffer.cpp b/dom/webgpu/Buffer.cpp
new file mode 100644
index 0000000000..b7b689a9a0
--- /dev/null
+++ b/dom/webgpu/Buffer.cpp
@@ -0,0 +1,388 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "Buffer.h"
+
+#include "mozilla/dom/Promise.h"
+#include "mozilla/dom/ScriptSettings.h"
+#include "mozilla/HoldDropJSObjects.h"
+#include "mozilla/ipc/Shmem.h"
+#include "ipc/WebGPUChild.h"
+#include "js/ArrayBuffer.h"
+#include "js/RootingAPI.h"
+#include "nsContentUtils.h"
+#include "nsWrapperCache.h"
+#include "Device.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_JS_WRAP(Buffer)
+
+NS_IMPL_CYCLE_COLLECTION_CLASS(Buffer)
+NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(Buffer)
+ tmp->Drop();
+ NS_IMPL_CYCLE_COLLECTION_UNLINK(mParent)
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER
+NS_IMPL_CYCLE_COLLECTION_UNLINK_END
+NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(Buffer)
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mParent)
+NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
+NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(Buffer)
+ NS_IMPL_CYCLE_COLLECTION_TRACE_PRESERVED_WRAPPER
+ if (tmp->mMapped) {
+ for (uint32_t i = 0; i < tmp->mMapped->mArrayBuffers.Length(); ++i) {
+ NS_IMPL_CYCLE_COLLECTION_TRACE_JS_MEMBER_CALLBACK(
+ mMapped->mArrayBuffers[i])
+ }
+ }
+NS_IMPL_CYCLE_COLLECTION_TRACE_END
+
+Buffer::Buffer(Device* const aParent, RawId aId, BufferAddress aSize,
+ uint32_t aUsage, ipc::WritableSharedMemoryMapping&& aShmem)
+ : ChildOf(aParent), mId(aId), mSize(aSize), mUsage(aUsage) {
+ mozilla::HoldJSObjects(this);
+ mShmem =
+ std::make_shared<ipc::WritableSharedMemoryMapping>(std::move(aShmem));
+ MOZ_ASSERT(mParent);
+}
+
+Buffer::~Buffer() {
+ Drop();
+ mozilla::DropJSObjects(this);
+}
+
+already_AddRefed<Buffer> Buffer::Create(Device* aDevice, RawId aDeviceId,
+ const dom::GPUBufferDescriptor& aDesc,
+ ErrorResult& aRv) {
+ RefPtr<WebGPUChild> actor = aDevice->GetBridge();
+ RawId bufferId =
+ ffi::wgpu_client_make_buffer_id(actor->GetClient(), aDeviceId);
+
+ if (!aDevice->IsBridgeAlive()) {
+ // Create and return an invalid Buffer.
+ RefPtr<Buffer> buffer = new Buffer(aDevice, bufferId, aDesc.mSize, 0,
+ ipc::WritableSharedMemoryMapping());
+ buffer->mValid = false;
+ return buffer.forget();
+ }
+
+ auto handle = ipc::UnsafeSharedMemoryHandle();
+ auto mapping = ipc::WritableSharedMemoryMapping();
+
+ bool hasMapFlags = aDesc.mUsage & (dom::GPUBufferUsage_Binding::MAP_WRITE |
+ dom::GPUBufferUsage_Binding::MAP_READ);
+
+ bool allocSucceeded = false;
+ if (hasMapFlags || aDesc.mMappedAtCreation) {
+ // If shmem allocation fails, we continue and provide the parent side with
+ // an empty shmem which it will interpret as an OOM situtation.
+ const auto checked = CheckedInt<size_t>(aDesc.mSize);
+ const size_t maxSize = WGPUMAX_BUFFER_SIZE;
+ if (checked.isValid()) {
+ size_t size = checked.value();
+
+ if (size > 0 && size < maxSize) {
+ auto maybeShmem = ipc::UnsafeSharedMemoryHandle::CreateAndMap(size);
+
+ if (maybeShmem.isSome()) {
+ allocSucceeded = true;
+ handle = std::move(maybeShmem.ref().first);
+ mapping = std::move(maybeShmem.ref().second);
+
+ MOZ_RELEASE_ASSERT(mapping.Size() >= size);
+
+ // zero out memory
+ memset(mapping.Bytes().data(), 0, size);
+ }
+ }
+
+ if (size == 0) {
+ // Zero-sized buffers is a special case. We don't create a shmem since
+ // allocating the memory would not make sense, however mappable null
+ // buffers are allowed by the spec so we just pass the null handle which
+ // in practice deserializes into a null handle on the parent side and
+ // behaves like a zero-sized allocation.
+ allocSucceeded = true;
+ }
+ }
+ }
+
+ // If mapped at creation and the shmem allocation failed, immediately throw
+ // a range error and don't attempt to create the buffer.
+ if (aDesc.mMappedAtCreation && !allocSucceeded) {
+ aRv.ThrowRangeError("Allocation failed");
+ return nullptr;
+ }
+
+ actor->SendDeviceCreateBuffer(aDeviceId, bufferId, aDesc, std::move(handle));
+
+ RefPtr<Buffer> buffer = new Buffer(aDevice, bufferId, aDesc.mSize,
+ aDesc.mUsage, std::move(mapping));
+ buffer->SetLabel(aDesc.mLabel);
+
+ if (aDesc.mMappedAtCreation) {
+ // Mapped at creation's raison d'être is write access, since the buffer is
+ // being created and there isn't anything interesting to read in it yet.
+ bool writable = true;
+ buffer->SetMapped(0, aDesc.mSize, writable);
+ }
+
+ aDevice->TrackBuffer(buffer.get());
+
+ return buffer.forget();
+}
+
+void Buffer::Drop() {
+ if (!mValid) {
+ return;
+ }
+
+ mValid = false;
+
+ AbortMapRequest();
+
+ if (mMapped && !mMapped->mArrayBuffers.IsEmpty()) {
+ // The array buffers could live longer than us and our shmem, so make sure
+ // we clear the external buffer bindings.
+ dom::AutoJSAPI jsapi;
+ if (jsapi.Init(GetDevice().GetOwnerGlobal())) {
+ IgnoredErrorResult rv;
+ UnmapArrayBuffers(jsapi.cx(), rv);
+ }
+ }
+ mMapped.reset();
+
+ GetDevice().UntrackBuffer(this);
+
+ if (GetDevice().IsBridgeAlive()) {
+ GetDevice().GetBridge()->SendBufferDrop(mId);
+ }
+}
+
+void Buffer::SetMapped(BufferAddress aOffset, BufferAddress aSize,
+ bool aWritable) {
+ MOZ_ASSERT(!mMapped);
+ MOZ_RELEASE_ASSERT(aOffset <= mSize);
+ MOZ_RELEASE_ASSERT(aSize <= mSize - aOffset);
+
+ mMapped.emplace();
+ mMapped->mWritable = aWritable;
+ mMapped->mOffset = aOffset;
+ mMapped->mSize = aSize;
+}
+
+already_AddRefed<dom::Promise> Buffer::MapAsync(
+ uint32_t aMode, uint64_t aOffset, const dom::Optional<uint64_t>& aSize,
+ ErrorResult& aRv) {
+ RefPtr<dom::Promise> promise = dom::Promise::Create(GetParentObject(), aRv);
+ if (NS_WARN_IF(aRv.Failed())) {
+ return nullptr;
+ }
+
+ if (GetDevice().IsLost()) {
+ promise->MaybeRejectWithOperationError("Device Lost");
+ return promise.forget();
+ }
+
+ if (mMapRequest) {
+ promise->MaybeRejectWithOperationError("Buffer mapping is already pending");
+ return promise.forget();
+ }
+
+ BufferAddress size = 0;
+ if (aSize.WasPassed()) {
+ size = aSize.Value();
+ } else if (aOffset <= mSize) {
+ // Default to passing the reminder of the buffer after the provided offset.
+ size = mSize - aOffset;
+ } else {
+ // The provided offset is larger than the buffer size.
+ // The parent side will handle the error, we can let the requested size be
+ // zero.
+ }
+
+ RefPtr<Buffer> self(this);
+
+ auto mappingPromise = GetDevice().GetBridge()->SendBufferMap(
+ GetDevice().mId, mId, aMode, aOffset, size);
+ MOZ_ASSERT(mappingPromise);
+
+ mMapRequest = promise;
+
+ mappingPromise->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [promise, self](BufferMapResult&& aResult) {
+ // Unmap might have been called while the result was on the way back.
+ if (promise->State() != dom::Promise::PromiseState::Pending) {
+ return;
+ }
+
+ // mValid should be true or we should have called unmap while marking
+ // the buffer invalid, causing the promise to be rejected and the branch
+ // above to have early-returned.
+ MOZ_RELEASE_ASSERT(self->mValid);
+
+ switch (aResult.type()) {
+ case BufferMapResult::TBufferMapSuccess: {
+ auto& success = aResult.get_BufferMapSuccess();
+ self->mMapRequest = nullptr;
+ self->SetMapped(success.offset(), success.size(),
+ success.writable());
+ promise->MaybeResolve(0);
+ break;
+ }
+ case BufferMapResult::TBufferMapError: {
+ auto& error = aResult.get_BufferMapError();
+ self->RejectMapRequest(promise, error.message());
+ break;
+ }
+ default: {
+ MOZ_CRASH("unreachable");
+ }
+ }
+ },
+ [promise](const ipc::ResponseRejectReason&) {
+ promise->MaybeRejectWithAbortError("Internal communication error!");
+ });
+
+ return promise.forget();
+}
+
+static void ExternalBufferFreeCallback(void* aContents, void* aUserData) {
+ Unused << aContents;
+ auto shm = static_cast<std::shared_ptr<ipc::WritableSharedMemoryMapping>*>(
+ aUserData);
+ delete shm;
+}
+
+void Buffer::GetMappedRange(JSContext* aCx, uint64_t aOffset,
+ const dom::Optional<uint64_t>& aSize,
+ JS::Rooted<JSObject*>* aObject, ErrorResult& aRv) {
+ if (!mMapped) {
+ aRv.ThrowInvalidStateError("Buffer is not mapped");
+ return;
+ }
+
+ const auto checkedOffset = CheckedInt<size_t>(aOffset);
+ const auto checkedSize = aSize.WasPassed()
+ ? CheckedInt<size_t>(aSize.Value())
+ : CheckedInt<size_t>(mSize) - aOffset;
+ const auto checkedMinBufferSize = checkedOffset + checkedSize;
+
+ if (!checkedOffset.isValid() || !checkedSize.isValid() ||
+ !checkedMinBufferSize.isValid() || aOffset < mMapped->mOffset ||
+ checkedMinBufferSize.value() > mMapped->mOffset + mMapped->mSize) {
+ aRv.ThrowRangeError("Invalid range");
+ return;
+ }
+
+ auto offset = checkedOffset.value();
+ auto size = checkedSize.value();
+ auto span = mShmem->Bytes().Subspan(offset, size);
+
+ std::shared_ptr<ipc::WritableSharedMemoryMapping>* userData =
+ new std::shared_ptr<ipc::WritableSharedMemoryMapping>(mShmem);
+ UniquePtr<void, JS::BufferContentsDeleter> dataPtr{
+ span.data(), {&ExternalBufferFreeCallback, userData}};
+ JS::Rooted<JSObject*> arrayBuffer(
+ aCx, JS::NewExternalArrayBuffer(aCx, size, std::move(dataPtr)));
+ if (!arrayBuffer) {
+ aRv.NoteJSContextException(aCx);
+ return;
+ }
+
+ aObject->set(arrayBuffer);
+ mMapped->mArrayBuffers.AppendElement(*aObject);
+}
+
+void Buffer::UnmapArrayBuffers(JSContext* aCx, ErrorResult& aRv) {
+ MOZ_ASSERT(mMapped);
+
+ bool detachedArrayBuffers = true;
+ for (const auto& arrayBuffer : mMapped->mArrayBuffers) {
+ JS::Rooted<JSObject*> rooted(aCx, arrayBuffer);
+ if (!JS::DetachArrayBuffer(aCx, rooted)) {
+ detachedArrayBuffers = false;
+ }
+ };
+
+ mMapped->mArrayBuffers.Clear();
+
+ AbortMapRequest();
+
+ if (NS_WARN_IF(!detachedArrayBuffers)) {
+ aRv.NoteJSContextException(aCx);
+ return;
+ }
+}
+
+void Buffer::RejectMapRequest(dom::Promise* aPromise, nsACString& message) {
+ if (mMapRequest == aPromise) {
+ mMapRequest = nullptr;
+ }
+
+ aPromise->MaybeRejectWithOperationError(message);
+}
+
+void Buffer::AbortMapRequest() {
+ if (mMapRequest) {
+ mMapRequest->MaybeRejectWithAbortError("Buffer unmapped");
+ }
+ mMapRequest = nullptr;
+}
+
+void Buffer::Unmap(JSContext* aCx, ErrorResult& aRv) {
+ if (!mMapped) {
+ return;
+ }
+
+ UnmapArrayBuffers(aCx, aRv);
+
+ bool hasMapFlags = mUsage & (dom::GPUBufferUsage_Binding::MAP_WRITE |
+ dom::GPUBufferUsage_Binding::MAP_READ);
+
+ if (!hasMapFlags) {
+ // We get here if the buffer was mapped at creation without map flags.
+ // It won't be possible to map the buffer again so we can get rid of
+ // our shmem on this side.
+ mShmem = std::make_shared<ipc::WritableSharedMemoryMapping>();
+ }
+
+ if (!GetDevice().IsLost()) {
+ GetDevice().GetBridge()->SendBufferUnmap(GetDevice().mId, mId,
+ mMapped->mWritable);
+ }
+
+ mMapped.reset();
+}
+
+void Buffer::Destroy(JSContext* aCx, ErrorResult& aRv) {
+ if (mMapped) {
+ Unmap(aCx, aRv);
+ }
+
+ if (!GetDevice().IsLost()) {
+ GetDevice().GetBridge()->SendBufferDestroy(mId);
+ }
+ // TODO: we don't have to implement it right now, but it's used by the
+ // examples
+}
+
+dom::GPUBufferMapState Buffer::MapState() const {
+ // Implementation reference:
+ // <https://gpuweb.github.io/gpuweb/#dom-gpubuffer-mapstate>.
+
+ if (mMapped) {
+ return dom::GPUBufferMapState::Mapped;
+ }
+ if (mMapRequest) {
+ return dom::GPUBufferMapState::Pending;
+ }
+ return dom::GPUBufferMapState::Unmapped;
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/Buffer.h b/dom/webgpu/Buffer.h
new file mode 100644
index 0000000000..2f809a4768
--- /dev/null
+++ b/dom/webgpu/Buffer.h
@@ -0,0 +1,95 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_BUFFER_H_
+#define GPU_BUFFER_H_
+
+#include "js/RootingAPI.h"
+#include "mozilla/dom/Nullable.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+#include "nsTArray.h"
+#include "ObjectModel.h"
+#include "mozilla/ipc/RawShmem.h"
+#include <memory>
+
+namespace mozilla {
+class ErrorResult;
+
+namespace dom {
+struct GPUBufferDescriptor;
+template <typename T>
+class Optional;
+enum class GPUBufferMapState : uint8_t;
+} // namespace dom
+
+namespace webgpu {
+
+class Device;
+
+struct MappedInfo {
+ // True if mapping is requested for writing.
+ bool mWritable = false;
+ // Populated by `GetMappedRange`.
+ nsTArray<JS::Heap<JSObject*>> mArrayBuffers;
+ BufferAddress mOffset;
+ BufferAddress mSize;
+ MappedInfo() = default;
+ MappedInfo(const MappedInfo&) = delete;
+};
+
+class Buffer final : public ObjectBase, public ChildOf<Device> {
+ public:
+ NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_NATIVE_CLASS(Buffer)
+ NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING(Buffer)
+ GPU_DECL_JS_WRAP(Buffer)
+
+ static already_AddRefed<Buffer> Create(Device* aDevice, RawId aDeviceId,
+ const dom::GPUBufferDescriptor& aDesc,
+ ErrorResult& aRv);
+
+ already_AddRefed<dom::Promise> MapAsync(uint32_t aMode, uint64_t aOffset,
+ const dom::Optional<uint64_t>& aSize,
+ ErrorResult& aRv);
+ void GetMappedRange(JSContext* aCx, uint64_t aOffset,
+ const dom::Optional<uint64_t>& aSize,
+ JS::Rooted<JSObject*>* aObject, ErrorResult& aRv);
+ void Unmap(JSContext* aCx, ErrorResult& aRv);
+ void Destroy(JSContext* aCx, ErrorResult& aRv);
+
+ const RawId mId;
+
+ uint64_t Size() const { return mSize; }
+ uint32_t Usage() const { return mUsage; }
+ dom::GPUBufferMapState MapState() const;
+
+ private:
+ Buffer(Device* const aParent, RawId aId, BufferAddress aSize, uint32_t aUsage,
+ ipc::WritableSharedMemoryMapping&& aShmem);
+ virtual ~Buffer();
+ Device& GetDevice() { return *mParent; }
+ void Drop();
+ void UnmapArrayBuffers(JSContext* aCx, ErrorResult& aRv);
+ void RejectMapRequest(dom::Promise* aPromise, nsACString& message);
+ void AbortMapRequest();
+ void SetMapped(BufferAddress aOffset, BufferAddress aSize, bool aWritable);
+
+ // Note: we can't map a buffer with the size that don't fit into `size_t`
+ // (which may be smaller than `BufferAddress`), but general not all buffers
+ // are mapped.
+ const BufferAddress mSize;
+ const uint32_t mUsage;
+ nsString mLabel;
+ // Information about the currently active mapping.
+ Maybe<MappedInfo> mMapped;
+ RefPtr<dom::Promise> mMapRequest;
+ // mShmem does not point to a shared memory segment if the buffer is not
+ // mappable.
+ std::shared_ptr<ipc::WritableSharedMemoryMapping> mShmem;
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_BUFFER_H_
diff --git a/dom/webgpu/CanvasContext.cpp b/dom/webgpu/CanvasContext.cpp
new file mode 100644
index 0000000000..49f34196c4
--- /dev/null
+++ b/dom/webgpu/CanvasContext.cpp
@@ -0,0 +1,387 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "CanvasContext.h"
+#include "gfxUtils.h"
+#include "LayerUserData.h"
+#include "nsDisplayList.h"
+#include "mozilla/dom/HTMLCanvasElement.h"
+#include "mozilla/gfx/CanvasManagerChild.h"
+#include "mozilla/layers/CanvasRenderer.h"
+#include "mozilla/layers/CompositableForwarder.h"
+#include "mozilla/layers/ImageDataSerializer.h"
+#include "mozilla/layers/LayersSurfaces.h"
+#include "mozilla/layers/RenderRootStateManager.h"
+#include "mozilla/layers/WebRenderCanvasRenderer.h"
+#include "mozilla/StaticPrefs_privacy.h"
+#include "mozilla/SVGObserverUtils.h"
+#include "ipc/WebGPUChild.h"
+#include "Utility.h"
+
+namespace mozilla {
+
+inline void ImplCycleCollectionTraverse(
+ nsCycleCollectionTraversalCallback& aCallback,
+ dom::GPUCanvasConfiguration& aField, const char* aName, uint32_t aFlags) {
+ aField.TraverseForCC(aCallback, aFlags);
+}
+
+inline void ImplCycleCollectionUnlink(dom::GPUCanvasConfiguration& aField) {
+ aField.UnlinkForCC();
+}
+
+// -
+
+template <class T>
+inline void ImplCycleCollectionTraverse(
+ nsCycleCollectionTraversalCallback& aCallback,
+ const std::unique_ptr<T>& aField, const char* aName, uint32_t aFlags) {
+ if (aField) {
+ ImplCycleCollectionTraverse(aCallback, *aField, aName, aFlags);
+ }
+}
+
+template <class T>
+inline void ImplCycleCollectionUnlink(std::unique_ptr<T>& aField) {
+ aField = nullptr;
+}
+
+} // namespace mozilla
+
+// -
+
+namespace mozilla::webgpu {
+
+NS_IMPL_CYCLE_COLLECTING_ADDREF(CanvasContext)
+NS_IMPL_CYCLE_COLLECTING_RELEASE(CanvasContext)
+
+GPU_IMPL_CYCLE_COLLECTION_WRAPPERCACHE_WEAK_PTR(CanvasContext, mConfig,
+ mTexture, mBridge,
+ mCanvasElement,
+ mOffscreenCanvas)
+
+NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(CanvasContext)
+ NS_WRAPPERCACHE_INTERFACE_MAP_ENTRY
+ NS_INTERFACE_MAP_ENTRY(nsICanvasRenderingContextInternal)
+ NS_INTERFACE_MAP_ENTRY(nsISupports)
+NS_INTERFACE_MAP_END
+
+// -
+
+CanvasContext::CanvasContext() = default;
+
+CanvasContext::~CanvasContext() {
+ Cleanup();
+ RemovePostRefreshObserver();
+}
+
+void CanvasContext::Cleanup() { Unconfigure(); }
+
+JSObject* CanvasContext::WrapObject(JSContext* aCx,
+ JS::Handle<JSObject*> aGivenProto) {
+ return dom::GPUCanvasContext_Binding::Wrap(aCx, this, aGivenProto);
+}
+
+// -
+
+void CanvasContext::GetCanvas(
+ dom::OwningHTMLCanvasElementOrOffscreenCanvas& aRetVal) const {
+ if (mCanvasElement) {
+ aRetVal.SetAsHTMLCanvasElement() = mCanvasElement;
+ } else if (mOffscreenCanvas) {
+ aRetVal.SetAsOffscreenCanvas() = mOffscreenCanvas;
+ } else {
+ MOZ_CRASH(
+ "This should only happen briefly during CC Unlink, and no JS should "
+ "happen then.");
+ }
+}
+
+void CanvasContext::Configure(const dom::GPUCanvasConfiguration& aConfig) {
+ Unconfigure();
+
+ // Bug 1864904: Failures in validation should throw a TypeError, per spec.
+
+ // these formats are guaranteed by the spec
+ switch (aConfig.mFormat) {
+ case dom::GPUTextureFormat::Rgba8unorm:
+ case dom::GPUTextureFormat::Rgba8unorm_srgb:
+ mGfxFormat = gfx::SurfaceFormat::R8G8B8A8;
+ break;
+ case dom::GPUTextureFormat::Bgra8unorm:
+ case dom::GPUTextureFormat::Bgra8unorm_srgb:
+ mGfxFormat = gfx::SurfaceFormat::B8G8R8A8;
+ break;
+ default:
+ NS_WARNING("Specified swap chain format is not supported");
+ return;
+ }
+
+ mConfig.reset(new dom::GPUCanvasConfiguration(aConfig));
+ mRemoteTextureOwnerId = Some(layers::RemoteTextureOwnerId::GetNext());
+ mUseExternalTextureInSwapChain =
+ wgpu_client_use_external_texture_in_swapChain(
+ aConfig.mDevice->mId, ConvertTextureFormat(aConfig.mFormat));
+ mTexture = aConfig.mDevice->InitSwapChain(
+ mConfig.get(), mRemoteTextureOwnerId.ref(),
+ mUseExternalTextureInSwapChain, mGfxFormat, mCanvasSize);
+ if (!mTexture) {
+ Unconfigure();
+ return;
+ }
+
+ mTexture->mTargetContext = this;
+ mBridge = aConfig.mDevice->GetBridge();
+ if (mCanvasElement) {
+ mWaitingCanvasRendererInitialized = true;
+ }
+
+ ForceNewFrame();
+}
+
+void CanvasContext::Unconfigure() {
+ if (mBridge && mBridge->IsOpen() && mRemoteTextureOwnerId) {
+ mBridge->SendSwapChainDrop(
+ *mRemoteTextureOwnerId,
+ layers::ToRemoteTextureTxnType(mFwdTransactionTracker),
+ layers::ToRemoteTextureTxnId(mFwdTransactionTracker));
+ }
+ mRemoteTextureOwnerId = Nothing();
+ mFwdTransactionTracker = nullptr;
+ mBridge = nullptr;
+ mConfig = nullptr;
+ mTexture = nullptr;
+ mGfxFormat = gfx::SurfaceFormat::UNKNOWN;
+}
+
+NS_IMETHODIMP CanvasContext::SetDimensions(int32_t aWidth, int32_t aHeight) {
+ aWidth = std::max(1, aWidth);
+ aHeight = std::max(1, aHeight);
+ const auto newSize = gfx::IntSize{aWidth, aHeight};
+ if (newSize == mCanvasSize) return NS_OK; // No-op no-change resizes.
+
+ mCanvasSize = newSize;
+ if (mConfig) {
+ const auto copy = dom::GPUCanvasConfiguration{
+ *mConfig}; // So we can't null it out on ourselves.
+ Configure(copy);
+ }
+ return NS_OK;
+}
+
+RefPtr<Texture> CanvasContext::GetCurrentTexture(ErrorResult& aRv) {
+ if (!mTexture) {
+ aRv.ThrowOperationError("Canvas not configured");
+ return nullptr;
+ }
+
+ MOZ_ASSERT(mConfig);
+ MOZ_ASSERT(mRemoteTextureOwnerId.isSome());
+
+ if (mNewTextureRequested) {
+ mNewTextureRequested = false;
+
+ mTexture = mConfig->mDevice->CreateTextureForSwapChain(
+ mConfig.get(), mCanvasSize, mRemoteTextureOwnerId.ref());
+ mTexture->mTargetContext = this;
+ }
+ return mTexture;
+}
+
+void CanvasContext::MaybeQueueSwapChainPresent() {
+ MOZ_ASSERT(mTexture);
+
+ if (mTexture) {
+ mBridge->NotifyWaitForSubmit(mTexture->mId);
+ }
+
+ if (mPendingSwapChainPresent) {
+ return;
+ }
+
+ mPendingSwapChainPresent = true;
+
+ if (mWaitingCanvasRendererInitialized) {
+ return;
+ }
+
+ InvalidateCanvasContent();
+}
+
+Maybe<layers::SurfaceDescriptor> CanvasContext::SwapChainPresent() {
+ mPendingSwapChainPresent = false;
+ if (!mBridge || !mBridge->IsOpen() || mRemoteTextureOwnerId.isNothing() ||
+ !mTexture) {
+ return Nothing();
+ }
+ mLastRemoteTextureId = Some(layers::RemoteTextureId::GetNext());
+ mBridge->SwapChainPresent(mTexture->mId, *mLastRemoteTextureId,
+ *mRemoteTextureOwnerId);
+ if (mUseExternalTextureInSwapChain) {
+ mTexture->Destroy();
+ mNewTextureRequested = true;
+ }
+ return Some(layers::SurfaceDescriptorRemoteTexture(*mLastRemoteTextureId,
+ *mRemoteTextureOwnerId));
+}
+
+bool CanvasContext::UpdateWebRenderCanvasData(
+ mozilla::nsDisplayListBuilder* aBuilder, WebRenderCanvasData* aCanvasData) {
+ auto* renderer = aCanvasData->GetCanvasRenderer();
+
+ if (renderer && mRemoteTextureOwnerId.isSome() &&
+ renderer->GetRemoteTextureOwnerId() == mRemoteTextureOwnerId) {
+ return true;
+ }
+
+ renderer = aCanvasData->CreateCanvasRenderer();
+ if (!InitializeCanvasRenderer(aBuilder, renderer)) {
+ // Clear CanvasRenderer of WebRenderCanvasData
+ aCanvasData->ClearCanvasRenderer();
+ return false;
+ }
+ return true;
+}
+
+bool CanvasContext::InitializeCanvasRenderer(
+ nsDisplayListBuilder* aBuilder, layers::CanvasRenderer* aRenderer) {
+ if (mRemoteTextureOwnerId.isNothing()) {
+ return false;
+ }
+
+ layers::CanvasRendererData data;
+ data.mContext = this;
+ data.mSize = mCanvasSize;
+ data.mIsOpaque = false;
+ data.mRemoteTextureOwnerId = mRemoteTextureOwnerId;
+
+ aRenderer->Initialize(data);
+ aRenderer->SetDirty();
+
+ if (mWaitingCanvasRendererInitialized) {
+ InvalidateCanvasContent();
+ }
+ mWaitingCanvasRendererInitialized = false;
+
+ return true;
+}
+
+mozilla::UniquePtr<uint8_t[]> CanvasContext::GetImageBuffer(
+ int32_t* out_format, gfx::IntSize* out_imageSize) {
+ *out_format = 0;
+ *out_imageSize = {};
+
+ gfxAlphaType any;
+ RefPtr<gfx::SourceSurface> snapshot = GetSurfaceSnapshot(&any);
+ if (!snapshot) {
+ return nullptr;
+ }
+
+ RefPtr<gfx::DataSourceSurface> dataSurface = snapshot->GetDataSurface();
+ *out_imageSize = dataSurface->GetSize();
+
+ if (ShouldResistFingerprinting(RFPTarget::CanvasRandomization)) {
+ gfxUtils::GetImageBufferWithRandomNoise(
+ dataSurface,
+ /* aIsAlphaPremultiplied */ true, GetCookieJarSettings(), &*out_format);
+ }
+
+ return gfxUtils::GetImageBuffer(dataSurface, /* aIsAlphaPremultiplied */ true,
+ &*out_format);
+}
+
+NS_IMETHODIMP CanvasContext::GetInputStream(const char* aMimeType,
+ const nsAString& aEncoderOptions,
+ nsIInputStream** aStream) {
+ gfxAlphaType any;
+ RefPtr<gfx::SourceSurface> snapshot = GetSurfaceSnapshot(&any);
+ if (!snapshot) {
+ return NS_ERROR_FAILURE;
+ }
+
+ RefPtr<gfx::DataSourceSurface> dataSurface = snapshot->GetDataSurface();
+
+ if (ShouldResistFingerprinting(RFPTarget::CanvasRandomization)) {
+ gfxUtils::GetInputStreamWithRandomNoise(
+ dataSurface, /* aIsAlphaPremultiplied */ true, aMimeType,
+ aEncoderOptions, GetCookieJarSettings(), aStream);
+ }
+
+ return gfxUtils::GetInputStream(dataSurface, /* aIsAlphaPremultiplied */ true,
+ aMimeType, aEncoderOptions, aStream);
+}
+
+already_AddRefed<mozilla::gfx::SourceSurface> CanvasContext::GetSurfaceSnapshot(
+ gfxAlphaType* aOutAlphaType) {
+ if (aOutAlphaType) {
+ *aOutAlphaType = gfxAlphaType::Premult;
+ }
+
+ auto* const cm = gfx::CanvasManagerChild::Get();
+ if (!cm) {
+ return nullptr;
+ }
+
+ if (!mBridge || !mBridge->IsOpen() || mRemoteTextureOwnerId.isNothing()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(mRemoteTextureOwnerId.isSome());
+ return cm->GetSnapshot(cm->Id(), mBridge->Id(), mRemoteTextureOwnerId,
+ mGfxFormat, /* aPremultiply */ false,
+ /* aYFlip */ false);
+}
+
+Maybe<layers::SurfaceDescriptor> CanvasContext::GetFrontBuffer(
+ WebGLFramebufferJS*, const bool) {
+ if (mPendingSwapChainPresent) {
+ auto desc = SwapChainPresent();
+ MOZ_ASSERT(!mPendingSwapChainPresent);
+ return desc;
+ }
+ return Nothing();
+}
+
+already_AddRefed<layers::FwdTransactionTracker>
+CanvasContext::UseCompositableForwarder(
+ layers::CompositableForwarder* aForwarder) {
+ return layers::FwdTransactionTracker::GetOrCreate(mFwdTransactionTracker);
+}
+
+void CanvasContext::ForceNewFrame() {
+ if (!mCanvasElement && !mOffscreenCanvas) {
+ return;
+ }
+
+ // Force a new frame to be built, which will execute the
+ // `CanvasContextType::WebGPU` switch case in `CreateWebRenderCommands` and
+ // populate the WR user data.
+ if (mCanvasElement) {
+ mCanvasElement->InvalidateCanvas();
+ } else if (mOffscreenCanvas) {
+ dom::OffscreenCanvasDisplayData data;
+ data.mSize = mCanvasSize;
+ data.mIsOpaque = false;
+ data.mOwnerId = mRemoteTextureOwnerId;
+ mOffscreenCanvas->UpdateDisplayData(data);
+ }
+}
+
+void CanvasContext::InvalidateCanvasContent() {
+ if (!mCanvasElement && !mOffscreenCanvas) {
+ MOZ_ASSERT_UNREACHABLE("unexpected to be called");
+ return;
+ }
+
+ if (mCanvasElement) {
+ SVGObserverUtils::InvalidateDirectRenderingObservers(mCanvasElement);
+ mCanvasElement->InvalidateCanvasContent(nullptr);
+ } else if (mOffscreenCanvas) {
+ mOffscreenCanvas->QueueCommitToCompositor();
+ }
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/CanvasContext.h b/dom/webgpu/CanvasContext.h
new file mode 100644
index 0000000000..58ef04e861
--- /dev/null
+++ b/dom/webgpu/CanvasContext.h
@@ -0,0 +1,119 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_CanvasContext_H_
+#define GPU_CanvasContext_H_
+
+#include "nsICanvasRenderingContextInternal.h"
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "mozilla/layers/LayersTypes.h"
+#include "mozilla/webrender/WebRenderAPI.h"
+
+namespace mozilla {
+namespace dom {
+class OwningHTMLCanvasElementOrOffscreenCanvas;
+class Promise;
+struct GPUCanvasConfiguration;
+enum class GPUTextureFormat : uint8_t;
+} // namespace dom
+namespace webgpu {
+class Adapter;
+class Texture;
+
+class CanvasContext final : public nsICanvasRenderingContextInternal,
+ public nsWrapperCache {
+ private:
+ virtual ~CanvasContext();
+ void Cleanup();
+
+ public:
+ // nsISupports interface + CC
+ NS_DECL_CYCLE_COLLECTING_ISUPPORTS
+ NS_DECL_CYCLE_COLLECTION_WRAPPERCACHE_CLASS(CanvasContext)
+
+ CanvasContext();
+
+ JSObject* WrapObject(JSContext* aCx,
+ JS::Handle<JSObject*> aGivenProto) override;
+
+ public: // nsICanvasRenderingContextInternal
+ int32_t GetWidth() override { return mCanvasSize.width; }
+ int32_t GetHeight() override { return mCanvasSize.height; }
+
+ NS_IMETHOD SetDimensions(int32_t aWidth, int32_t aHeight) override;
+ NS_IMETHOD InitializeWithDrawTarget(
+ nsIDocShell* aShell, NotNull<gfx::DrawTarget*> aTarget) override {
+ return NS_OK;
+ }
+
+ bool UpdateWebRenderCanvasData(mozilla::nsDisplayListBuilder* aBuilder,
+ WebRenderCanvasData* aCanvasData) override;
+
+ bool InitializeCanvasRenderer(nsDisplayListBuilder* aBuilder,
+ layers::CanvasRenderer* aRenderer) override;
+ mozilla::UniquePtr<uint8_t[]> GetImageBuffer(
+ int32_t* out_format, gfx::IntSize* out_imageSize) override;
+ NS_IMETHOD GetInputStream(const char* aMimeType,
+ const nsAString& aEncoderOptions,
+ nsIInputStream** aStream) override;
+ already_AddRefed<mozilla::gfx::SourceSurface> GetSurfaceSnapshot(
+ gfxAlphaType* aOutAlphaType) override;
+
+ void SetOpaqueValueFromOpaqueAttr(bool aOpaqueAttrValue) override {}
+ bool GetIsOpaque() override { return true; }
+
+ void ResetBitmap() override { Unconfigure(); }
+
+ void MarkContextClean() override {}
+
+ NS_IMETHOD Redraw(const gfxRect& aDirty) override { return NS_OK; }
+
+ void DidRefresh() override {}
+
+ void MarkContextCleanForFrameCapture() override {}
+ Watchable<FrameCaptureState>* GetFrameCaptureState() override {
+ return nullptr;
+ }
+
+ Maybe<layers::SurfaceDescriptor> GetFrontBuffer(WebGLFramebufferJS*,
+ const bool) override;
+
+ already_AddRefed<layers::FwdTransactionTracker> UseCompositableForwarder(
+ layers::CompositableForwarder* aForwarder) override;
+
+ public:
+ void GetCanvas(dom::OwningHTMLCanvasElementOrOffscreenCanvas&) const;
+
+ void Configure(const dom::GPUCanvasConfiguration& aConfig);
+ void Unconfigure();
+
+ RefPtr<Texture> GetCurrentTexture(ErrorResult& aRv);
+ void MaybeQueueSwapChainPresent();
+ Maybe<layers::SurfaceDescriptor> SwapChainPresent();
+ void ForceNewFrame();
+ void InvalidateCanvasContent();
+
+ private:
+ gfx::IntSize mCanvasSize;
+ std::unique_ptr<dom::GPUCanvasConfiguration> mConfig;
+ bool mPendingSwapChainPresent = false;
+ bool mWaitingCanvasRendererInitialized = false;
+
+ RefPtr<WebGPUChild> mBridge;
+ RefPtr<Texture> mTexture;
+ gfx::SurfaceFormat mGfxFormat = gfx::SurfaceFormat::R8G8B8A8;
+
+ Maybe<layers::RemoteTextureId> mLastRemoteTextureId;
+ Maybe<layers::RemoteTextureOwnerId> mRemoteTextureOwnerId;
+ RefPtr<layers::FwdTransactionTracker> mFwdTransactionTracker;
+ bool mUseExternalTextureInSwapChain = false;
+ bool mNewTextureRequested = false;
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_CanvasContext_H_
diff --git a/dom/webgpu/CommandBuffer.cpp b/dom/webgpu/CommandBuffer.cpp
new file mode 100644
index 0000000000..ff9bbd8d5d
--- /dev/null
+++ b/dom/webgpu/CommandBuffer.cpp
@@ -0,0 +1,44 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "CommandBuffer.h"
+#include "CommandEncoder.h"
+#include "ipc/WebGPUChild.h"
+
+#include "mozilla/webgpu/CanvasContext.h"
+#include "Device.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(CommandBuffer, mParent)
+GPU_IMPL_JS_WRAP(CommandBuffer)
+
+CommandBuffer::CommandBuffer(Device* const aParent, RawId aId,
+ nsTArray<WeakPtr<CanvasContext>>&& aTargetContexts,
+ RefPtr<CommandEncoder>&& aEncoder)
+ : ChildOf(aParent), mId(aId), mTargetContexts(std::move(aTargetContexts)) {
+ mEncoder = std::move(aEncoder);
+ MOZ_RELEASE_ASSERT(aId);
+}
+
+CommandBuffer::~CommandBuffer() {}
+
+void CommandBuffer::Cleanup() { mEncoder = nullptr; }
+
+Maybe<RawId> CommandBuffer::Commit() {
+ if (!mValid) {
+ return Nothing();
+ }
+ mValid = false;
+ for (const auto& targetContext : mTargetContexts) {
+ if (targetContext) {
+ targetContext->MaybeQueueSwapChainPresent();
+ }
+ }
+ return Some(mId);
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/CommandBuffer.h b/dom/webgpu/CommandBuffer.h
new file mode 100644
index 0000000000..b9c2495fb7
--- /dev/null
+++ b/dom/webgpu/CommandBuffer.h
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_CommandBuffer_H_
+#define GPU_CommandBuffer_H_
+
+#include "mozilla/WeakPtr.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+
+namespace mozilla::webgpu {
+
+class CanvasContext;
+class Device;
+
+class CommandBuffer final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(CommandBuffer)
+ GPU_DECL_JS_WRAP(CommandBuffer)
+
+ CommandBuffer(Device* const aParent, RawId aId,
+ nsTArray<WeakPtr<CanvasContext>>&& aTargetContexts,
+ RefPtr<CommandEncoder>&& aEncoder);
+
+ Maybe<RawId> Commit();
+
+ private:
+ CommandBuffer() = delete;
+ ~CommandBuffer();
+ void Cleanup();
+
+ const RawId mId;
+ const nsTArray<WeakPtr<CanvasContext>> mTargetContexts;
+ // Command buffers and encoders share the same identity (this is a
+ // simplifcation currently made by wgpu). To avoid dropping the same ID twice,
+ // the wgpu resource lifetime is tied to the encoder which is held alive by
+ // the command buffer.
+ RefPtr<CommandEncoder> mEncoder;
+};
+
+} // namespace mozilla::webgpu
+
+#endif // GPU_CommandBuffer_H_
diff --git a/dom/webgpu/CommandEncoder.cpp b/dom/webgpu/CommandEncoder.cpp
new file mode 100644
index 0000000000..15d95401d4
--- /dev/null
+++ b/dom/webgpu/CommandEncoder.cpp
@@ -0,0 +1,270 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "CommandEncoder.h"
+
+#include "CommandBuffer.h"
+#include "Buffer.h"
+#include "ComputePassEncoder.h"
+#include "Device.h"
+#include "RenderPassEncoder.h"
+#include "Utility.h"
+#include "mozilla/webgpu/CanvasContext.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+#include "ipc/WebGPUChild.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(CommandEncoder, mParent, mBridge)
+GPU_IMPL_JS_WRAP(CommandEncoder)
+
+void CommandEncoder::ConvertTextureDataLayoutToFFI(
+ const dom::GPUImageDataLayout& aLayout,
+ ffi::WGPUImageDataLayout* aLayoutFFI) {
+ *aLayoutFFI = {};
+ aLayoutFFI->offset = aLayout.mOffset;
+
+ if (aLayout.mBytesPerRow.WasPassed()) {
+ aLayoutFFI->bytes_per_row = &aLayout.mBytesPerRow.Value();
+ } else {
+ aLayoutFFI->bytes_per_row = nullptr;
+ }
+
+ if (aLayout.mRowsPerImage.WasPassed()) {
+ aLayoutFFI->rows_per_image = &aLayout.mRowsPerImage.Value();
+ } else {
+ aLayoutFFI->rows_per_image = nullptr;
+ }
+}
+
+void CommandEncoder::ConvertTextureCopyViewToFFI(
+ const dom::GPUImageCopyTexture& aCopy,
+ ffi::WGPUImageCopyTexture* aViewFFI) {
+ *aViewFFI = {};
+ aViewFFI->texture = aCopy.mTexture->mId;
+ aViewFFI->mip_level = aCopy.mMipLevel;
+ if (aCopy.mOrigin.WasPassed()) {
+ const auto& origin = aCopy.mOrigin.Value();
+ if (origin.IsRangeEnforcedUnsignedLongSequence()) {
+ const auto& seq = origin.GetAsRangeEnforcedUnsignedLongSequence();
+ aViewFFI->origin.x = seq.Length() > 0 ? seq[0] : 0;
+ aViewFFI->origin.y = seq.Length() > 1 ? seq[1] : 0;
+ aViewFFI->origin.z = seq.Length() > 2 ? seq[2] : 0;
+ } else if (origin.IsGPUOrigin3DDict()) {
+ const auto& dict = origin.GetAsGPUOrigin3DDict();
+ aViewFFI->origin.x = dict.mX;
+ aViewFFI->origin.y = dict.mY;
+ aViewFFI->origin.z = dict.mZ;
+ } else {
+ MOZ_CRASH("Unexpected origin type");
+ }
+ }
+}
+
+static ffi::WGPUImageCopyTexture ConvertTextureCopyView(
+ const dom::GPUImageCopyTexture& aCopy) {
+ ffi::WGPUImageCopyTexture view = {};
+ CommandEncoder::ConvertTextureCopyViewToFFI(aCopy, &view);
+ return view;
+}
+
+CommandEncoder::CommandEncoder(Device* const aParent,
+ WebGPUChild* const aBridge, RawId aId)
+ : ChildOf(aParent), mId(aId), mBridge(aBridge) {
+ MOZ_RELEASE_ASSERT(aId);
+}
+
+CommandEncoder::~CommandEncoder() { Cleanup(); }
+
+void CommandEncoder::Cleanup() {
+ if (!mValid) {
+ return;
+ }
+ mValid = false;
+ if (mBridge->IsOpen()) {
+ mBridge->SendCommandEncoderDrop(mId);
+ }
+}
+
+void CommandEncoder::CopyBufferToBuffer(const Buffer& aSource,
+ BufferAddress aSourceOffset,
+ const Buffer& aDestination,
+ BufferAddress aDestinationOffset,
+ BufferAddress aSize) {
+ if (!mBridge->IsOpen()) {
+ return;
+ }
+
+ ipc::ByteBuf bb;
+ ffi::wgpu_command_encoder_copy_buffer_to_buffer(
+ aSource.mId, aSourceOffset, aDestination.mId, aDestinationOffset, aSize,
+ ToFFI(&bb));
+ mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(bb));
+}
+
+void CommandEncoder::CopyBufferToTexture(
+ const dom::GPUImageCopyBuffer& aSource,
+ const dom::GPUImageCopyTexture& aDestination,
+ const dom::GPUExtent3D& aCopySize) {
+ if (!mBridge->IsOpen()) {
+ return;
+ }
+
+ ipc::ByteBuf bb;
+ ffi::WGPUImageDataLayout src_layout = {};
+ CommandEncoder::ConvertTextureDataLayoutToFFI(aSource, &src_layout);
+ ffi::wgpu_command_encoder_copy_buffer_to_texture(
+ aSource.mBuffer->mId, &src_layout, ConvertTextureCopyView(aDestination),
+ ConvertExtent(aCopySize), ToFFI(&bb));
+ mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(bb));
+
+ const auto& targetContext = aDestination.mTexture->mTargetContext;
+ if (targetContext) {
+ mTargetContexts.AppendElement(targetContext);
+ }
+}
+void CommandEncoder::CopyTextureToBuffer(
+ const dom::GPUImageCopyTexture& aSource,
+ const dom::GPUImageCopyBuffer& aDestination,
+ const dom::GPUExtent3D& aCopySize) {
+ if (!mBridge->IsOpen()) {
+ return;
+ }
+
+ ipc::ByteBuf bb;
+ ffi::WGPUImageDataLayout dstLayout = {};
+ CommandEncoder::ConvertTextureDataLayoutToFFI(aDestination, &dstLayout);
+ ffi::wgpu_command_encoder_copy_texture_to_buffer(
+ ConvertTextureCopyView(aSource), aDestination.mBuffer->mId, &dstLayout,
+ ConvertExtent(aCopySize), ToFFI(&bb));
+ mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(bb));
+}
+void CommandEncoder::CopyTextureToTexture(
+ const dom::GPUImageCopyTexture& aSource,
+ const dom::GPUImageCopyTexture& aDestination,
+ const dom::GPUExtent3D& aCopySize) {
+ if (!mBridge->IsOpen()) {
+ return;
+ }
+
+ ipc::ByteBuf bb;
+ ffi::wgpu_command_encoder_copy_texture_to_texture(
+ ConvertTextureCopyView(aSource), ConvertTextureCopyView(aDestination),
+ ConvertExtent(aCopySize), ToFFI(&bb));
+ mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(bb));
+
+ const auto& targetContext = aDestination.mTexture->mTargetContext;
+ if (targetContext) {
+ mTargetContexts.AppendElement(targetContext);
+ }
+}
+
+void CommandEncoder::ClearBuffer(const Buffer& aBuffer, const uint64_t aOffset,
+ const dom::Optional<uint64_t>& aSize) {
+ uint64_t sizeVal = 0xdeaddead;
+ uint64_t* size = nullptr;
+ if (aSize.WasPassed()) {
+ sizeVal = aSize.Value();
+ size = &sizeVal;
+ }
+
+ ipc::ByteBuf bb;
+ ffi::wgpu_command_encoder_clear_buffer(aBuffer.mId, aOffset, size,
+ ToFFI(&bb));
+ mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(bb));
+}
+
+void CommandEncoder::PushDebugGroup(const nsAString& aString) {
+ if (!mBridge->IsOpen()) {
+ return;
+ }
+
+ ipc::ByteBuf bb;
+ NS_ConvertUTF16toUTF8 marker(aString);
+ ffi::wgpu_command_encoder_push_debug_group(&marker, ToFFI(&bb));
+ mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(bb));
+}
+void CommandEncoder::PopDebugGroup() {
+ if (!mBridge->IsOpen()) {
+ return;
+ }
+
+ ipc::ByteBuf bb;
+ ffi::wgpu_command_encoder_pop_debug_group(ToFFI(&bb));
+ mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(bb));
+}
+void CommandEncoder::InsertDebugMarker(const nsAString& aString) {
+ if (!mBridge->IsOpen()) {
+ return;
+ }
+
+ ipc::ByteBuf bb;
+ NS_ConvertUTF16toUTF8 marker(aString);
+ ffi::wgpu_command_encoder_insert_debug_marker(&marker, ToFFI(&bb));
+ mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(bb));
+}
+
+already_AddRefed<ComputePassEncoder> CommandEncoder::BeginComputePass(
+ const dom::GPUComputePassDescriptor& aDesc) {
+ RefPtr<ComputePassEncoder> pass = new ComputePassEncoder(this, aDesc);
+ return pass.forget();
+}
+
+already_AddRefed<RenderPassEncoder> CommandEncoder::BeginRenderPass(
+ const dom::GPURenderPassDescriptor& aDesc) {
+ for (const auto& at : aDesc.mColorAttachments) {
+ auto* targetContext = at.mView->GetTargetContext();
+ if (targetContext) {
+ mTargetContexts.AppendElement(targetContext);
+ }
+ if (at.mResolveTarget.WasPassed()) {
+ targetContext = at.mResolveTarget.Value().GetTargetContext();
+ mTargetContexts.AppendElement(targetContext);
+ }
+ }
+
+ RefPtr<RenderPassEncoder> pass = new RenderPassEncoder(this, aDesc);
+ return pass.forget();
+}
+
+void CommandEncoder::EndComputePass(ffi::WGPUComputePass& aPass) {
+ if (!mBridge->IsOpen()) {
+ return;
+ }
+
+ ipc::ByteBuf byteBuf;
+ ffi::wgpu_compute_pass_finish(&aPass, ToFFI(&byteBuf));
+ mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(byteBuf));
+}
+
+void CommandEncoder::EndRenderPass(ffi::WGPURenderPass& aPass) {
+ if (!mBridge->IsOpen()) {
+ return;
+ }
+
+ ipc::ByteBuf byteBuf;
+ ffi::wgpu_render_pass_finish(&aPass, ToFFI(&byteBuf));
+ mBridge->SendCommandEncoderAction(mId, mParent->mId, std::move(byteBuf));
+}
+
+already_AddRefed<CommandBuffer> CommandEncoder::Finish(
+ const dom::GPUCommandBufferDescriptor& aDesc) {
+ // We rely on knowledge that `CommandEncoderId` == `CommandBufferId`
+ // TODO: refactor this to truly behave as if the encoder is being finished,
+ // and a new command buffer ID is being created from it. Resolve the ID
+ // type aliasing at the place that introduces it: `wgpu-core`.
+ RawId deviceId = mParent->mId;
+ if (mBridge->CanSend()) {
+ mBridge->SendCommandEncoderFinish(mId, deviceId, aDesc);
+ }
+
+ RefPtr<CommandEncoder> me(this);
+ RefPtr<CommandBuffer> comb = new CommandBuffer(
+ mParent, mId, std::move(mTargetContexts), std::move(me));
+ return comb.forget();
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/CommandEncoder.h b/dom/webgpu/CommandEncoder.h
new file mode 100644
index 0000000000..52b10a5b2e
--- /dev/null
+++ b/dom/webgpu/CommandEncoder.h
@@ -0,0 +1,109 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_CommandEncoder_H_
+#define GPU_CommandEncoder_H_
+
+#include "mozilla/dom/TypedArray.h"
+#include "mozilla/WeakPtr.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+class ErrorResult;
+
+namespace dom {
+struct GPUComputePassDescriptor;
+template <typename T>
+class Sequence;
+struct GPUCommandBufferDescriptor;
+class GPUComputePipelineOrGPURenderPipeline;
+class RangeEnforcedUnsignedLongSequenceOrGPUExtent3DDict;
+struct GPUImageCopyBuffer;
+struct GPUImageCopyTexture;
+struct GPUImageBitmapCopyView;
+struct GPUImageDataLayout;
+struct GPURenderPassDescriptor;
+using GPUExtent3D = RangeEnforcedUnsignedLongSequenceOrGPUExtent3DDict;
+} // namespace dom
+namespace webgpu {
+namespace ffi {
+struct WGPUComputePass;
+struct WGPURenderPass;
+struct WGPUImageDataLayout;
+struct WGPUImageCopyTexture_TextureId;
+struct WGPUExtent3d;
+} // namespace ffi
+
+class BindGroup;
+class Buffer;
+class CanvasContext;
+class CommandBuffer;
+class ComputePassEncoder;
+class Device;
+class RenderPassEncoder;
+
+class CommandEncoder final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(CommandEncoder)
+ GPU_DECL_JS_WRAP(CommandEncoder)
+
+ CommandEncoder(Device* const aParent, WebGPUChild* const aBridge, RawId aId);
+
+ const RawId mId;
+
+ static void ConvertTextureDataLayoutToFFI(
+ const dom::GPUImageDataLayout& aLayout,
+ ffi::WGPUImageDataLayout* aLayoutFFI);
+ static void ConvertTextureCopyViewToFFI(
+ const dom::GPUImageCopyTexture& aCopy,
+ ffi::WGPUImageCopyTexture_TextureId* aViewFFI);
+
+ private:
+ ~CommandEncoder();
+ void Cleanup();
+
+ RefPtr<WebGPUChild> mBridge;
+ nsTArray<WeakPtr<CanvasContext>> mTargetContexts;
+
+ public:
+ const auto& GetDevice() const { return mParent; };
+
+ void EndComputePass(ffi::WGPUComputePass& aPass);
+ void EndRenderPass(ffi::WGPURenderPass& aPass);
+
+ void CopyBufferToBuffer(const Buffer& aSource, BufferAddress aSourceOffset,
+ const Buffer& aDestination,
+ BufferAddress aDestinationOffset,
+ BufferAddress aSize);
+ void CopyBufferToTexture(const dom::GPUImageCopyBuffer& aSource,
+ const dom::GPUImageCopyTexture& aDestination,
+ const dom::GPUExtent3D& aCopySize);
+ void CopyTextureToBuffer(const dom::GPUImageCopyTexture& aSource,
+ const dom::GPUImageCopyBuffer& aDestination,
+ const dom::GPUExtent3D& aCopySize);
+ void CopyTextureToTexture(const dom::GPUImageCopyTexture& aSource,
+ const dom::GPUImageCopyTexture& aDestination,
+ const dom::GPUExtent3D& aCopySize);
+ void ClearBuffer(const Buffer& aBuffer, const uint64_t aOffset,
+ const dom::Optional<uint64_t>& aSize);
+
+ void PushDebugGroup(const nsAString& aString);
+ void PopDebugGroup();
+ void InsertDebugMarker(const nsAString& aString);
+
+ already_AddRefed<ComputePassEncoder> BeginComputePass(
+ const dom::GPUComputePassDescriptor& aDesc);
+ already_AddRefed<RenderPassEncoder> BeginRenderPass(
+ const dom::GPURenderPassDescriptor& aDesc);
+ already_AddRefed<CommandBuffer> Finish(
+ const dom::GPUCommandBufferDescriptor& aDesc);
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_CommandEncoder_H_
diff --git a/dom/webgpu/CompilationInfo.cpp b/dom/webgpu/CompilationInfo.cpp
new file mode 100644
index 0000000000..0d46db2e59
--- /dev/null
+++ b/dom/webgpu/CompilationInfo.cpp
@@ -0,0 +1,33 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CompilationInfo.h"
+#include "CompilationMessage.h"
+#include "ShaderModule.h"
+#include "mozilla/dom/WebGPUBinding.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(CompilationInfo, mParent, mMessages)
+GPU_IMPL_JS_WRAP(CompilationInfo)
+
+CompilationInfo::CompilationInfo(Device* const aParent) : ChildOf(aParent) {}
+
+void CompilationInfo::SetMessages(
+ nsTArray<mozilla::webgpu::WebGPUCompilationMessage>& aMessages) {
+ for (auto& msg : aMessages) {
+ mMessages.AppendElement(MakeAndAddRef<mozilla::webgpu::CompilationMessage>(
+ mParent, msg.lineNum, msg.linePos, msg.offset, std::move(msg.message)));
+ }
+}
+
+void CompilationInfo::GetMessages(
+ nsTArray<RefPtr<mozilla::webgpu::CompilationMessage>>& aMessages) {
+ for (auto& msg : mMessages) {
+ aMessages.AppendElement(msg);
+ }
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/CompilationInfo.h b/dom/webgpu/CompilationInfo.h
new file mode 100644
index 0000000000..aafe311d40
--- /dev/null
+++ b/dom/webgpu/CompilationInfo.h
@@ -0,0 +1,38 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_CompilationInfo_H_
+#define GPU_CompilationInfo_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "CompilationMessage.h"
+
+namespace mozilla::webgpu {
+class ShaderModule;
+
+class CompilationInfo final : public nsWrapperCache, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(CompilationInfo)
+ GPU_DECL_JS_WRAP(CompilationInfo)
+
+ explicit CompilationInfo(Device* const aParent);
+
+ void SetMessages(
+ nsTArray<mozilla::webgpu::WebGPUCompilationMessage>& aMessages);
+
+ void GetMessages(
+ nsTArray<RefPtr<mozilla::webgpu::CompilationMessage>>& aMessages);
+
+ private:
+ ~CompilationInfo() = default;
+ void Cleanup() {}
+
+ nsTArray<RefPtr<mozilla::webgpu::CompilationMessage>> mMessages;
+};
+
+} // namespace mozilla::webgpu
+
+#endif // GPU_CompilationInfo_H_
diff --git a/dom/webgpu/CompilationMessage.cpp b/dom/webgpu/CompilationMessage.cpp
new file mode 100644
index 0000000000..1463ec408e
--- /dev/null
+++ b/dom/webgpu/CompilationMessage.cpp
@@ -0,0 +1,24 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "CompilationMessage.h"
+#include "CompilationInfo.h"
+#include "mozilla/dom/WebGPUBinding.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(CompilationMessage, mParent)
+GPU_IMPL_JS_WRAP(CompilationMessage)
+
+CompilationMessage::CompilationMessage(Device* const aParent, uint64_t aLineNum,
+ uint64_t aLinePos, uint64_t aOffset,
+ nsString&& aMessage)
+ : ChildOf(aParent),
+ mLineNum(aLineNum),
+ mLinePos(aLinePos),
+ mOffset(aOffset),
+ mMessage(std::move(aMessage)) {}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/CompilationMessage.h b/dom/webgpu/CompilationMessage.h
new file mode 100644
index 0000000000..bac786dec6
--- /dev/null
+++ b/dom/webgpu/CompilationMessage.h
@@ -0,0 +1,53 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_CompilationMessage_H_
+#define GPU_CompilationMessage_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "mozilla/dom/WebGPUBinding.h"
+
+namespace mozilla {
+namespace dom {
+class DOMString;
+} // namespace dom
+namespace webgpu {
+class CompilationInfo;
+
+class CompilationMessage final : public nsWrapperCache, public ChildOf<Device> {
+ dom::GPUCompilationMessageType mType = dom::GPUCompilationMessageType::Error;
+ uint64_t mLineNum = 0;
+ uint64_t mLinePos = 0;
+ uint64_t mOffset = 0;
+ uint64_t mLength = 0;
+ nsString mMessage;
+
+ public:
+ GPU_DECL_CYCLE_COLLECTION(CompilationMessage)
+ GPU_DECL_JS_WRAP(CompilationMessage)
+
+ explicit CompilationMessage(Device* const aParent, uint64_t aLineNum,
+ uint64_t aLinePos, uint64_t aOffset,
+ nsString&& aMessage);
+
+ void GetMessage(dom::DOMString& aMessage) {
+ aMessage.AsAString().Assign(mMessage);
+ }
+ dom::GPUCompilationMessageType Type() const { return mType; }
+ uint64_t LineNum() const { return mLineNum; }
+ uint64_t LinePos() const { return mLinePos; }
+ uint64_t Offset() const { return mOffset; }
+ uint64_t Length() const { return mLength; }
+
+ private:
+ ~CompilationMessage() = default;
+ void Cleanup() {}
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_CompilationMessage_H_
diff --git a/dom/webgpu/ComputePassEncoder.cpp b/dom/webgpu/ComputePassEncoder.cpp
new file mode 100644
index 0000000000..2820a575e8
--- /dev/null
+++ b/dom/webgpu/ComputePassEncoder.cpp
@@ -0,0 +1,109 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "ComputePassEncoder.h"
+#include "BindGroup.h"
+#include "ComputePipeline.h"
+#include "CommandEncoder.h"
+
+#include "mozilla/webgpu/ffi/wgpu.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(ComputePassEncoder, mParent, mUsedBindGroups,
+ mUsedPipelines)
+GPU_IMPL_JS_WRAP(ComputePassEncoder)
+
+void ffiWGPUComputePassDeleter::operator()(ffi::WGPUComputePass* raw) {
+ if (raw) {
+ ffi::wgpu_compute_pass_destroy(raw);
+ }
+}
+
+ffi::WGPUComputePass* BeginComputePass(
+ RawId aEncoderId, const dom::GPUComputePassDescriptor& aDesc) {
+ MOZ_RELEASE_ASSERT(aEncoderId);
+ ffi::WGPUComputePassDescriptor desc = {};
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+
+ return ffi::wgpu_command_encoder_begin_compute_pass(aEncoderId, &desc);
+}
+
+ComputePassEncoder::ComputePassEncoder(
+ CommandEncoder* const aParent, const dom::GPUComputePassDescriptor& aDesc)
+ : ChildOf(aParent), mPass(BeginComputePass(aParent->mId, aDesc)) {}
+
+ComputePassEncoder::~ComputePassEncoder() {
+ if (mValid) {
+ mValid = false;
+ }
+}
+
+void ComputePassEncoder::SetBindGroup(
+ uint32_t aSlot, const BindGroup& aBindGroup,
+ const dom::Sequence<uint32_t>& aDynamicOffsets) {
+ if (mValid) {
+ mUsedBindGroups.AppendElement(&aBindGroup);
+ ffi::wgpu_compute_pass_set_bind_group(mPass.get(), aSlot, aBindGroup.mId,
+ aDynamicOffsets.Elements(),
+ aDynamicOffsets.Length());
+ }
+}
+
+void ComputePassEncoder::SetPipeline(const ComputePipeline& aPipeline) {
+ if (mValid) {
+ mUsedPipelines.AppendElement(&aPipeline);
+ ffi::wgpu_compute_pass_set_pipeline(mPass.get(), aPipeline.mId);
+ }
+}
+
+void ComputePassEncoder::DispatchWorkgroups(uint32_t workgroupCountX,
+ uint32_t workgroupCountY,
+ uint32_t workgroupCountZ) {
+ if (mValid) {
+ ffi::wgpu_compute_pass_dispatch_workgroups(
+ mPass.get(), workgroupCountX, workgroupCountY, workgroupCountZ);
+ }
+}
+
+void ComputePassEncoder::DispatchWorkgroupsIndirect(
+ const Buffer& aIndirectBuffer, uint64_t aIndirectOffset) {
+ if (mValid) {
+ ffi::wgpu_compute_pass_dispatch_workgroups_indirect(
+ mPass.get(), aIndirectBuffer.mId, aIndirectOffset);
+ }
+}
+
+void ComputePassEncoder::PushDebugGroup(const nsAString& aString) {
+ if (mValid) {
+ const NS_ConvertUTF16toUTF8 utf8(aString);
+ ffi::wgpu_compute_pass_push_debug_group(mPass.get(), utf8.get(), 0);
+ }
+}
+void ComputePassEncoder::PopDebugGroup() {
+ if (mValid) {
+ ffi::wgpu_compute_pass_pop_debug_group(mPass.get());
+ }
+}
+void ComputePassEncoder::InsertDebugMarker(const nsAString& aString) {
+ if (mValid) {
+ const NS_ConvertUTF16toUTF8 utf8(aString);
+ ffi::wgpu_compute_pass_insert_debug_marker(mPass.get(), utf8.get(), 0);
+ }
+}
+
+void ComputePassEncoder::End() {
+ if (mValid) {
+ mValid = false;
+ auto* pass = mPass.release();
+ MOZ_ASSERT(pass);
+ mParent->EndComputePass(*pass);
+ }
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/ComputePassEncoder.h b/dom/webgpu/ComputePassEncoder.h
new file mode 100644
index 0000000000..8160a09e2e
--- /dev/null
+++ b/dom/webgpu/ComputePassEncoder.h
@@ -0,0 +1,73 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_ComputePassEncoder_H_
+#define GPU_ComputePassEncoder_H_
+
+#include "mozilla/dom/TypedArray.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+class ErrorResult;
+
+namespace dom {
+struct GPUComputePassDescriptor;
+}
+
+namespace webgpu {
+namespace ffi {
+struct WGPUComputePass;
+} // namespace ffi
+
+class BindGroup;
+class Buffer;
+class CommandEncoder;
+class ComputePipeline;
+
+struct ffiWGPUComputePassDeleter {
+ void operator()(ffi::WGPUComputePass*);
+};
+
+class ComputePassEncoder final : public ObjectBase,
+ public ChildOf<CommandEncoder> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(ComputePassEncoder)
+ GPU_DECL_JS_WRAP(ComputePassEncoder)
+
+ ComputePassEncoder(CommandEncoder* const aParent,
+ const dom::GPUComputePassDescriptor& aDesc);
+
+ private:
+ virtual ~ComputePassEncoder();
+ void Cleanup() {}
+
+ std::unique_ptr<ffi::WGPUComputePass, ffiWGPUComputePassDeleter> mPass;
+ // keep all the used objects alive while the pass is recorded
+ nsTArray<RefPtr<const BindGroup>> mUsedBindGroups;
+ nsTArray<RefPtr<const ComputePipeline>> mUsedPipelines;
+
+ public:
+ // programmable pass encoder
+ void SetBindGroup(uint32_t aSlot, const BindGroup& aBindGroup,
+ const dom::Sequence<uint32_t>& aDynamicOffsets);
+ // self
+ void SetPipeline(const ComputePipeline& aPipeline);
+
+ void DispatchWorkgroups(uint32_t workgroupCountX, uint32_t workgroupCountY,
+ uint32_t workgroupCountZ);
+ void DispatchWorkgroupsIndirect(const Buffer& aIndirectBuffer,
+ uint64_t aIndirectOffset);
+
+ void PushDebugGroup(const nsAString& aString);
+ void PopDebugGroup();
+ void InsertDebugMarker(const nsAString& aString);
+
+ void End();
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_ComputePassEncoder_H_
diff --git a/dom/webgpu/ComputePipeline.cpp b/dom/webgpu/ComputePipeline.cpp
new file mode 100644
index 0000000000..ca50c5583a
--- /dev/null
+++ b/dom/webgpu/ComputePipeline.cpp
@@ -0,0 +1,58 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ComputePipeline.h"
+
+#include "Device.h"
+#include "ipc/WebGPUChild.h"
+#include "mozilla/dom/WebGPUBinding.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(ComputePipeline, mParent)
+GPU_IMPL_JS_WRAP(ComputePipeline)
+
+ComputePipeline::ComputePipeline(Device* const aParent, RawId aId,
+ RawId aImplicitPipelineLayoutId,
+ nsTArray<RawId>&& aImplicitBindGroupLayoutIds)
+ : ChildOf(aParent),
+ mImplicitPipelineLayoutId(aImplicitPipelineLayoutId),
+ mImplicitBindGroupLayoutIds(std::move(aImplicitBindGroupLayoutIds)),
+ mId(aId) {
+ MOZ_RELEASE_ASSERT(aId);
+}
+
+ComputePipeline::~ComputePipeline() { Cleanup(); }
+
+void ComputePipeline::Cleanup() {
+ if (mValid && mParent) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendComputePipelineDrop(mId);
+ if (mImplicitPipelineLayoutId) {
+ bridge->SendImplicitLayoutDrop(mImplicitPipelineLayoutId,
+ mImplicitBindGroupLayoutIds);
+ }
+ }
+ }
+}
+
+already_AddRefed<BindGroupLayout> ComputePipeline::GetBindGroupLayout(
+ uint32_t aIndex) const {
+ auto bridge = mParent->GetBridge();
+ auto* client = bridge->GetClient();
+
+ ipc::ByteBuf bb;
+ const RawId bglId = ffi::wgpu_client_compute_pipeline_get_bind_group_layout(
+ client, mId, aIndex, ToFFI(&bb));
+
+ bridge->SendDeviceAction(mParent->GetId(), std::move(bb));
+
+ RefPtr<BindGroupLayout> object = new BindGroupLayout(mParent, bglId, false);
+ return object.forget();
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/ComputePipeline.h b/dom/webgpu/ComputePipeline.h
new file mode 100644
index 0000000000..5dbd972912
--- /dev/null
+++ b/dom/webgpu/ComputePipeline.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_ComputePipeline_H_
+#define GPU_ComputePipeline_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+#include "nsTArray.h"
+
+namespace mozilla::webgpu {
+
+class BindGroupLayout;
+class Device;
+
+class ComputePipeline final : public ObjectBase, public ChildOf<Device> {
+ const RawId mImplicitPipelineLayoutId;
+ const nsTArray<RawId> mImplicitBindGroupLayoutIds;
+
+ public:
+ GPU_DECL_CYCLE_COLLECTION(ComputePipeline)
+ GPU_DECL_JS_WRAP(ComputePipeline)
+
+ const RawId mId;
+
+ ComputePipeline(Device* const aParent, RawId aId,
+ RawId aImplicitPipelineLayoutId,
+ nsTArray<RawId>&& aImplicitBindGroupLayoutIds);
+ already_AddRefed<BindGroupLayout> GetBindGroupLayout(uint32_t index) const;
+
+ private:
+ ~ComputePipeline();
+ void Cleanup();
+};
+
+} // namespace mozilla::webgpu
+
+#endif // GPU_ComputePipeline_H_
diff --git a/dom/webgpu/Device.cpp b/dom/webgpu/Device.cpp
new file mode 100644
index 0000000000..a659047af1
--- /dev/null
+++ b/dom/webgpu/Device.cpp
@@ -0,0 +1,1058 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "js/ArrayBuffer.h"
+#include "js/Value.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/ErrorResult.h"
+#include "mozilla/Logging.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/dom/Console.h"
+#include "mozilla/dom/Promise.h"
+#include "mozilla/dom/WebGPUBinding.h"
+#include "Device.h"
+#include "CommandEncoder.h"
+#include "BindGroup.h"
+
+#include "Adapter.h"
+#include "Buffer.h"
+#include "CompilationInfo.h"
+#include "ComputePipeline.h"
+#include "DeviceLostInfo.h"
+#include "InternalError.h"
+#include "OutOfMemoryError.h"
+#include "PipelineLayout.h"
+#include "Queue.h"
+#include "RenderBundleEncoder.h"
+#include "RenderPipeline.h"
+#include "Sampler.h"
+#include "SupportedFeatures.h"
+#include "SupportedLimits.h"
+#include "Texture.h"
+#include "TextureView.h"
+#include "ValidationError.h"
+#include "ipc/WebGPUChild.h"
+#include "Utility.h"
+#include "nsGlobalWindowInner.h"
+
+namespace mozilla::webgpu {
+
+mozilla::LazyLogModule gWebGPULog("WebGPU");
+
+GPU_IMPL_CYCLE_COLLECTION_WRAPPERCACHE_INHERITED(Device, DOMEventTargetHelper,
+ mBridge, mQueue, mFeatures,
+ mLimits, mLostPromise);
+NS_IMPL_ISUPPORTS_CYCLE_COLLECTION_INHERITED_0(Device, DOMEventTargetHelper)
+GPU_IMPL_JS_WRAP(Device)
+
+/* static */ CheckedInt<uint32_t> Device::BufferStrideWithMask(
+ const gfx::IntSize& aSize, const gfx::SurfaceFormat& aFormat) {
+ constexpr uint32_t kBufferAlignmentMask = 0xff;
+ return CheckedInt<uint32_t>(aSize.width) * gfx::BytesPerPixel(aFormat) +
+ kBufferAlignmentMask;
+}
+
+RefPtr<WebGPUChild> Device::GetBridge() { return mBridge; }
+
+Device::Device(Adapter* const aParent, RawId aId,
+ const ffi::WGPULimits& aRawLimits)
+ : DOMEventTargetHelper(aParent->GetParentObject()),
+ mId(aId),
+ // features are filled in Adapter::RequestDevice
+ mFeatures(new SupportedFeatures(aParent)),
+ mLimits(new SupportedLimits(aParent, aRawLimits)),
+ mBridge(aParent->mBridge),
+ mQueue(new class Queue(this, aParent->mBridge, aId)) {
+ mBridge->RegisterDevice(this);
+}
+
+Device::~Device() { Cleanup(); }
+
+void Device::Cleanup() {
+ if (!mValid) {
+ return;
+ }
+
+ mValid = false;
+
+ if (mBridge) {
+ mBridge->UnregisterDevice(mId);
+ }
+}
+
+void Device::CleanupUnregisteredInParent() {
+ if (mBridge) {
+ mBridge->FreeUnregisteredInParentDevice(mId);
+ }
+ mValid = false;
+}
+
+bool Device::IsLost() const {
+ return !mBridge || !mBridge->CanSend() ||
+ (mLostPromise &&
+ (mLostPromise->State() != dom::Promise::PromiseState::Pending));
+}
+
+bool Device::IsBridgeAlive() const { return mBridge && mBridge->CanSend(); }
+
+// Generate an error on the Device timeline for this device.
+//
+// aMessage is interpreted as UTF-8.
+void Device::GenerateValidationError(const nsCString& aMessage) {
+ if (!IsBridgeAlive()) {
+ return; // Just drop it?
+ }
+ mBridge->SendGenerateError(Some(mId), dom::GPUErrorFilter::Validation,
+ aMessage);
+}
+
+void Device::TrackBuffer(Buffer* aBuffer) { mTrackedBuffers.Insert(aBuffer); }
+
+void Device::UntrackBuffer(Buffer* aBuffer) { mTrackedBuffers.Remove(aBuffer); }
+
+void Device::GetLabel(nsAString& aValue) const { aValue = mLabel; }
+void Device::SetLabel(const nsAString& aLabel) { mLabel = aLabel; }
+
+dom::Promise* Device::GetLost(ErrorResult& aRv) {
+ aRv = NS_OK;
+ if (!mLostPromise) {
+ mLostPromise = dom::Promise::Create(GetParentObject(), aRv);
+ if (mLostPromise && !mBridge->CanSend()) {
+ auto info = MakeRefPtr<DeviceLostInfo>(GetParentObject(),
+ u"WebGPUChild destroyed"_ns);
+ mLostPromise->MaybeResolve(info);
+ }
+ }
+ return mLostPromise;
+}
+
+void Device::ResolveLost(Maybe<dom::GPUDeviceLostReason> aReason,
+ const nsAString& aMessage) {
+ IgnoredErrorResult rv;
+ dom::Promise* lostPromise = GetLost(rv);
+ if (!lostPromise) {
+ // Promise doesn't exist? Maybe out of memory.
+ return;
+ }
+ if (!lostPromise->PromiseObj()) {
+ // The underlying JS object is gone.
+ return;
+ }
+ if (lostPromise->State() != dom::Promise::PromiseState::Pending) {
+ // lostPromise was already resolved or rejected.
+ return;
+ }
+ RefPtr<DeviceLostInfo> info;
+ if (aReason.isSome()) {
+ info = MakeRefPtr<DeviceLostInfo>(GetParentObject(), *aReason, aMessage);
+ } else {
+ info = MakeRefPtr<DeviceLostInfo>(GetParentObject(), aMessage);
+ }
+ lostPromise->MaybeResolve(info);
+}
+
+already_AddRefed<Buffer> Device::CreateBuffer(
+ const dom::GPUBufferDescriptor& aDesc, ErrorResult& aRv) {
+ return Buffer::Create(this, mId, aDesc, aRv);
+}
+
+already_AddRefed<Texture> Device::CreateTextureForSwapChain(
+ const dom::GPUCanvasConfiguration* const aConfig,
+ const gfx::IntSize& aCanvasSize, layers::RemoteTextureOwnerId aOwnerId) {
+ MOZ_ASSERT(aConfig);
+
+ dom::GPUTextureDescriptor desc;
+ desc.mDimension = dom::GPUTextureDimension::_2d;
+ auto& sizeDict = desc.mSize.SetAsGPUExtent3DDict();
+ sizeDict.mWidth = aCanvasSize.width;
+ sizeDict.mHeight = aCanvasSize.height;
+ sizeDict.mDepthOrArrayLayers = 1;
+ desc.mFormat = aConfig->mFormat;
+ desc.mMipLevelCount = 1;
+ desc.mSampleCount = 1;
+ desc.mUsage = aConfig->mUsage | dom::GPUTextureUsage_Binding::COPY_SRC;
+ desc.mViewFormats = aConfig->mViewFormats;
+
+ return CreateTexture(desc, Some(aOwnerId));
+}
+
+already_AddRefed<Texture> Device::CreateTexture(
+ const dom::GPUTextureDescriptor& aDesc) {
+ return CreateTexture(aDesc, /* aOwnerId */ Nothing());
+}
+
+already_AddRefed<Texture> Device::CreateTexture(
+ const dom::GPUTextureDescriptor& aDesc,
+ Maybe<layers::RemoteTextureOwnerId> aOwnerId) {
+ ffi::WGPUTextureDescriptor desc = {};
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+
+ if (aDesc.mSize.IsRangeEnforcedUnsignedLongSequence()) {
+ const auto& seq = aDesc.mSize.GetAsRangeEnforcedUnsignedLongSequence();
+ desc.size.width = seq.Length() > 0 ? seq[0] : 1;
+ desc.size.height = seq.Length() > 1 ? seq[1] : 1;
+ desc.size.depth_or_array_layers = seq.Length() > 2 ? seq[2] : 1;
+ } else if (aDesc.mSize.IsGPUExtent3DDict()) {
+ const auto& dict = aDesc.mSize.GetAsGPUExtent3DDict();
+ desc.size.width = dict.mWidth;
+ desc.size.height = dict.mHeight;
+ desc.size.depth_or_array_layers = dict.mDepthOrArrayLayers;
+ } else {
+ MOZ_CRASH("Unexpected union");
+ }
+ desc.mip_level_count = aDesc.mMipLevelCount;
+ desc.sample_count = aDesc.mSampleCount;
+ desc.dimension = ffi::WGPUTextureDimension(aDesc.mDimension);
+ desc.format = ConvertTextureFormat(aDesc.mFormat);
+ desc.usage = aDesc.mUsage;
+
+ AutoTArray<ffi::WGPUTextureFormat, 8> viewFormats;
+ for (auto format : aDesc.mViewFormats) {
+ viewFormats.AppendElement(ConvertTextureFormat(format));
+ }
+ desc.view_formats = {viewFormats.Elements(), viewFormats.Length()};
+
+ Maybe<ffi::WGPUSwapChainId> ownerId;
+ if (aOwnerId.isSome()) {
+ ownerId = Some(ffi::WGPUSwapChainId{aOwnerId->mId});
+ }
+
+ ipc::ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_texture(
+ mBridge->GetClient(), mId, &desc, ownerId.ptrOr(nullptr), ToFFI(&bb));
+
+ if (mBridge->CanSend()) {
+ mBridge->SendDeviceAction(mId, std::move(bb));
+ }
+
+ RefPtr<Texture> texture = new Texture(this, id, aDesc);
+ return texture.forget();
+}
+
+already_AddRefed<Sampler> Device::CreateSampler(
+ const dom::GPUSamplerDescriptor& aDesc) {
+ ffi::WGPUSamplerDescriptor desc = {};
+ webgpu::StringHelper label(aDesc.mLabel);
+
+ desc.label = label.Get();
+ desc.address_modes[0] = ffi::WGPUAddressMode(aDesc.mAddressModeU);
+ desc.address_modes[1] = ffi::WGPUAddressMode(aDesc.mAddressModeV);
+ desc.address_modes[2] = ffi::WGPUAddressMode(aDesc.mAddressModeW);
+ desc.mag_filter = ffi::WGPUFilterMode(aDesc.mMagFilter);
+ desc.min_filter = ffi::WGPUFilterMode(aDesc.mMinFilter);
+ desc.mipmap_filter = ffi::WGPUFilterMode(aDesc.mMipmapFilter);
+ desc.lod_min_clamp = aDesc.mLodMinClamp;
+ desc.lod_max_clamp = aDesc.mLodMaxClamp;
+
+ ffi::WGPUCompareFunction comparison = ffi::WGPUCompareFunction_Sentinel;
+ if (aDesc.mCompare.WasPassed()) {
+ comparison = ConvertCompareFunction(aDesc.mCompare.Value());
+ desc.compare = &comparison;
+ }
+
+ ipc::ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_sampler(mBridge->GetClient(), mId, &desc,
+ ToFFI(&bb));
+
+ if (mBridge->CanSend()) {
+ mBridge->SendDeviceAction(mId, std::move(bb));
+ }
+
+ RefPtr<Sampler> sampler = new Sampler(this, id);
+ return sampler.forget();
+}
+
+already_AddRefed<CommandEncoder> Device::CreateCommandEncoder(
+ const dom::GPUCommandEncoderDescriptor& aDesc) {
+ ffi::WGPUCommandEncoderDescriptor desc = {};
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+
+ ipc::ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_command_encoder(mBridge->GetClient(), mId,
+ &desc, ToFFI(&bb));
+ if (mBridge->CanSend()) {
+ mBridge->SendDeviceAction(mId, std::move(bb));
+ }
+
+ RefPtr<CommandEncoder> encoder = new CommandEncoder(this, mBridge, id);
+ return encoder.forget();
+}
+
+already_AddRefed<RenderBundleEncoder> Device::CreateRenderBundleEncoder(
+ const dom::GPURenderBundleEncoderDescriptor& aDesc) {
+ RefPtr<RenderBundleEncoder> encoder =
+ new RenderBundleEncoder(this, mBridge, aDesc);
+ return encoder.forget();
+}
+
+already_AddRefed<BindGroupLayout> Device::CreateBindGroupLayout(
+ const dom::GPUBindGroupLayoutDescriptor& aDesc) {
+ struct OptionalData {
+ ffi::WGPUTextureViewDimension dim;
+ ffi::WGPURawTextureSampleType type;
+ ffi::WGPUTextureFormat format;
+ };
+ nsTArray<OptionalData> optional(aDesc.mEntries.Length());
+ for (const auto& entry : aDesc.mEntries) {
+ OptionalData data = {};
+ if (entry.mTexture.WasPassed()) {
+ const auto& texture = entry.mTexture.Value();
+ data.dim = ffi::WGPUTextureViewDimension(texture.mViewDimension);
+ switch (texture.mSampleType) {
+ case dom::GPUTextureSampleType::Float:
+ data.type = ffi::WGPURawTextureSampleType_Float;
+ break;
+ case dom::GPUTextureSampleType::Unfilterable_float:
+ data.type = ffi::WGPURawTextureSampleType_UnfilterableFloat;
+ break;
+ case dom::GPUTextureSampleType::Uint:
+ data.type = ffi::WGPURawTextureSampleType_Uint;
+ break;
+ case dom::GPUTextureSampleType::Sint:
+ data.type = ffi::WGPURawTextureSampleType_Sint;
+ break;
+ case dom::GPUTextureSampleType::Depth:
+ data.type = ffi::WGPURawTextureSampleType_Depth;
+ break;
+ case dom::GPUTextureSampleType::EndGuard_:
+ MOZ_ASSERT_UNREACHABLE();
+ }
+ }
+ if (entry.mStorageTexture.WasPassed()) {
+ const auto& texture = entry.mStorageTexture.Value();
+ data.dim = ffi::WGPUTextureViewDimension(texture.mViewDimension);
+ data.format = ConvertTextureFormat(texture.mFormat);
+ }
+ optional.AppendElement(data);
+ }
+
+ nsTArray<ffi::WGPUBindGroupLayoutEntry> entries(aDesc.mEntries.Length());
+ for (size_t i = 0; i < aDesc.mEntries.Length(); ++i) {
+ const auto& entry = aDesc.mEntries[i];
+ ffi::WGPUBindGroupLayoutEntry e = {};
+ e.binding = entry.mBinding;
+ e.visibility = entry.mVisibility;
+ if (entry.mBuffer.WasPassed()) {
+ switch (entry.mBuffer.Value().mType) {
+ case dom::GPUBufferBindingType::Uniform:
+ e.ty = ffi::WGPURawBindingType_UniformBuffer;
+ break;
+ case dom::GPUBufferBindingType::Storage:
+ e.ty = ffi::WGPURawBindingType_StorageBuffer;
+ break;
+ case dom::GPUBufferBindingType::Read_only_storage:
+ e.ty = ffi::WGPURawBindingType_ReadonlyStorageBuffer;
+ break;
+ case dom::GPUBufferBindingType::EndGuard_:
+ MOZ_ASSERT_UNREACHABLE();
+ }
+ e.has_dynamic_offset = entry.mBuffer.Value().mHasDynamicOffset;
+ }
+ if (entry.mTexture.WasPassed()) {
+ e.ty = ffi::WGPURawBindingType_SampledTexture;
+ e.view_dimension = &optional[i].dim;
+ e.texture_sample_type = &optional[i].type;
+ e.multisampled = entry.mTexture.Value().mMultisampled;
+ }
+ if (entry.mStorageTexture.WasPassed()) {
+ e.ty = entry.mStorageTexture.Value().mAccess ==
+ dom::GPUStorageTextureAccess::Write_only
+ ? ffi::WGPURawBindingType_WriteonlyStorageTexture
+ : ffi::WGPURawBindingType_ReadonlyStorageTexture;
+ e.view_dimension = &optional[i].dim;
+ e.storage_texture_format = &optional[i].format;
+ }
+ if (entry.mSampler.WasPassed()) {
+ e.ty = ffi::WGPURawBindingType_Sampler;
+ switch (entry.mSampler.Value().mType) {
+ case dom::GPUSamplerBindingType::Filtering:
+ e.sampler_filter = true;
+ break;
+ case dom::GPUSamplerBindingType::Non_filtering:
+ break;
+ case dom::GPUSamplerBindingType::Comparison:
+ e.sampler_compare = true;
+ break;
+ case dom::GPUSamplerBindingType::EndGuard_:
+ MOZ_ASSERT_UNREACHABLE();
+ }
+ }
+ entries.AppendElement(e);
+ }
+
+ ffi::WGPUBindGroupLayoutDescriptor desc = {};
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+ desc.entries = entries.Elements();
+ desc.entries_length = entries.Length();
+
+ ipc::ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_bind_group_layout(mBridge->GetClient(),
+ mId, &desc, ToFFI(&bb));
+ if (mBridge->CanSend()) {
+ mBridge->SendDeviceAction(mId, std::move(bb));
+ }
+
+ RefPtr<BindGroupLayout> object = new BindGroupLayout(this, id, true);
+ return object.forget();
+}
+
+already_AddRefed<PipelineLayout> Device::CreatePipelineLayout(
+ const dom::GPUPipelineLayoutDescriptor& aDesc) {
+ nsTArray<ffi::WGPUBindGroupLayoutId> bindGroupLayouts(
+ aDesc.mBindGroupLayouts.Length());
+
+ for (const auto& layout : aDesc.mBindGroupLayouts) {
+ bindGroupLayouts.AppendElement(layout->mId);
+ }
+
+ ffi::WGPUPipelineLayoutDescriptor desc = {};
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+ desc.bind_group_layouts = bindGroupLayouts.Elements();
+ desc.bind_group_layouts_length = bindGroupLayouts.Length();
+
+ ipc::ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_pipeline_layout(mBridge->GetClient(), mId,
+ &desc, ToFFI(&bb));
+ if (mBridge->CanSend()) {
+ mBridge->SendDeviceAction(mId, std::move(bb));
+ }
+
+ RefPtr<PipelineLayout> object = new PipelineLayout(this, id);
+ return object.forget();
+}
+
+already_AddRefed<BindGroup> Device::CreateBindGroup(
+ const dom::GPUBindGroupDescriptor& aDesc) {
+ nsTArray<ffi::WGPUBindGroupEntry> entries(aDesc.mEntries.Length());
+ for (const auto& entry : aDesc.mEntries) {
+ ffi::WGPUBindGroupEntry e = {};
+ e.binding = entry.mBinding;
+ if (entry.mResource.IsGPUBufferBinding()) {
+ const auto& bufBinding = entry.mResource.GetAsGPUBufferBinding();
+ if (!bufBinding.mBuffer->mId) {
+ NS_WARNING("Buffer binding has no id -- ignoring.");
+ continue;
+ }
+ e.buffer = bufBinding.mBuffer->mId;
+ e.offset = bufBinding.mOffset;
+ e.size = bufBinding.mSize.WasPassed() ? bufBinding.mSize.Value() : 0;
+ } else if (entry.mResource.IsGPUTextureView()) {
+ e.texture_view = entry.mResource.GetAsGPUTextureView()->mId;
+ } else if (entry.mResource.IsGPUSampler()) {
+ e.sampler = entry.mResource.GetAsGPUSampler()->mId;
+ } else {
+ // Not a buffer, nor a texture view, nor a sampler. If we pass
+ // this to wgpu_client, it'll panic. Log a warning instead and
+ // ignore this entry.
+ NS_WARNING("Bind group entry has unknown type.");
+ continue;
+ }
+ entries.AppendElement(e);
+ }
+
+ ffi::WGPUBindGroupDescriptor desc = {};
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+ desc.layout = aDesc.mLayout->mId;
+ desc.entries = entries.Elements();
+ desc.entries_length = entries.Length();
+
+ ipc::ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_bind_group(mBridge->GetClient(), mId,
+ &desc, ToFFI(&bb));
+ if (mBridge->CanSend()) {
+ mBridge->SendDeviceAction(mId, std::move(bb));
+ }
+
+ RefPtr<BindGroup> object = new BindGroup(this, id);
+ return object.forget();
+}
+
+MOZ_CAN_RUN_SCRIPT void reportCompilationMessagesToConsole(
+ const RefPtr<ShaderModule>& aShaderModule,
+ const nsTArray<WebGPUCompilationMessage>& aMessages) {
+ auto* global = aShaderModule->GetParentObject();
+
+ dom::AutoJSAPI api;
+ if (!api.Init(global)) {
+ return;
+ }
+
+ const auto& cx = api.cx();
+
+ ErrorResult rv;
+ RefPtr<dom::Console> console =
+ nsGlobalWindowInner::Cast(global->GetAsInnerWindow())->GetConsole(cx, rv);
+ if (rv.Failed()) {
+ return;
+ }
+
+ dom::GlobalObject globalObj(cx, global->GetGlobalJSObject());
+
+ dom::Sequence<JS::Value> args;
+ dom::SequenceRooter<JS::Value> msgArgsRooter(cx, &args);
+ auto SetSingleStrAsArgs =
+ [&](const nsString& message, dom::Sequence<JS::Value>* args)
+ MOZ_CAN_RUN_SCRIPT {
+ args->Clear();
+ JS::Rooted<JSString*> jsStr(
+ cx, JS_NewUCStringCopyN(cx, message.Data(), message.Length()));
+ if (!jsStr) {
+ return;
+ }
+ JS::Rooted<JS::Value> val(cx, JS::StringValue(jsStr));
+ if (!args->AppendElement(val, fallible)) {
+ return;
+ }
+ };
+
+ nsString label;
+ aShaderModule->GetLabel(label);
+ auto appendNiceLabelIfPresent = [&label](nsString* buf) MOZ_CAN_RUN_SCRIPT {
+ if (!label.IsEmpty()) {
+ buf->AppendLiteral(u" \"");
+ buf->Append(label);
+ buf->AppendLiteral(u"\"");
+ }
+ };
+
+ // We haven't actually inspected a message for severity, but
+ // it doesn't actually matter, since we don't do anything at
+ // this level.
+ auto highestSeveritySeen = WebGPUCompilationMessageType::Info;
+ uint64_t errorCount = 0;
+ uint64_t warningCount = 0;
+ uint64_t infoCount = 0;
+ for (const auto& message : aMessages) {
+ bool higherThanSeen =
+ static_cast<std::underlying_type_t<WebGPUCompilationMessageType>>(
+ message.messageType) <
+ static_cast<std::underlying_type_t<WebGPUCompilationMessageType>>(
+ highestSeveritySeen);
+ if (higherThanSeen) {
+ highestSeveritySeen = message.messageType;
+ }
+ switch (message.messageType) {
+ case WebGPUCompilationMessageType::Error:
+ errorCount += 1;
+ break;
+ case WebGPUCompilationMessageType::Warning:
+ warningCount += 1;
+ break;
+ case WebGPUCompilationMessageType::Info:
+ infoCount += 1;
+ break;
+ }
+ }
+ switch (highestSeveritySeen) {
+ case WebGPUCompilationMessageType::Info:
+ // shouldn't happen, but :shrug:
+ break;
+ case WebGPUCompilationMessageType::Warning: {
+ nsString msg(
+ u"Encountered one or more warnings while creating shader module");
+ appendNiceLabelIfPresent(&msg);
+ SetSingleStrAsArgs(msg, &args);
+ console->Warn(globalObj, args);
+ break;
+ }
+ case WebGPUCompilationMessageType::Error: {
+ nsString msg(
+ u"Encountered one or more errors while creating shader module");
+ appendNiceLabelIfPresent(&msg);
+ SetSingleStrAsArgs(msg, &args);
+ console->Error(globalObj, args);
+ break;
+ }
+ }
+
+ nsString header;
+ header.AppendLiteral(u"WebGPU compilation info for shader module");
+ appendNiceLabelIfPresent(&header);
+ header.AppendLiteral(u" (");
+ header.AppendInt(errorCount);
+ header.AppendLiteral(u" error(s), ");
+ header.AppendInt(warningCount);
+ header.AppendLiteral(u" warning(s), ");
+ header.AppendInt(infoCount);
+ header.AppendLiteral(u" info)");
+ SetSingleStrAsArgs(header, &args);
+ console->GroupCollapsed(globalObj, args);
+
+ for (const auto& message : aMessages) {
+ SetSingleStrAsArgs(message.message, &args);
+ switch (message.messageType) {
+ case WebGPUCompilationMessageType::Error:
+ console->Error(globalObj, args);
+ break;
+ case WebGPUCompilationMessageType::Warning:
+ console->Warn(globalObj, args);
+ break;
+ case WebGPUCompilationMessageType::Info:
+ console->Info(globalObj, args);
+ break;
+ }
+ }
+ console->GroupEnd(globalObj);
+}
+
+already_AddRefed<ShaderModule> Device::CreateShaderModule(
+ JSContext* aCx, const dom::GPUShaderModuleDescriptor& aDesc,
+ ErrorResult& aRv) {
+ Unused << aCx;
+
+ RefPtr<dom::Promise> promise = dom::Promise::Create(GetParentObject(), aRv);
+ if (NS_WARN_IF(aRv.Failed())) {
+ return nullptr;
+ }
+
+ RawId moduleId =
+ ffi::wgpu_client_make_shader_module_id(mBridge->GetClient(), mId);
+
+ RefPtr<ShaderModule> shaderModule = new ShaderModule(this, moduleId, promise);
+
+ shaderModule->SetLabel(aDesc.mLabel);
+
+ RefPtr<Device> device = this;
+
+ if (mBridge->CanSend()) {
+ mBridge
+ ->SendDeviceCreateShaderModule(mId, moduleId, aDesc.mLabel, aDesc.mCode)
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [promise, device,
+ shaderModule](nsTArray<WebGPUCompilationMessage>&& messages)
+ MOZ_CAN_RUN_SCRIPT {
+ if (!messages.IsEmpty()) {
+ reportCompilationMessagesToConsole(shaderModule,
+ std::cref(messages));
+ }
+ RefPtr<CompilationInfo> infoObject(
+ new CompilationInfo(device));
+ infoObject->SetMessages(messages);
+ promise->MaybeResolve(infoObject);
+ },
+ [promise](const ipc::ResponseRejectReason& aReason) {
+ promise->MaybeRejectWithNotSupportedError("IPC error");
+ });
+ } else {
+ promise->MaybeRejectWithNotSupportedError("IPC error");
+ }
+
+ return shaderModule.forget();
+}
+
+RawId CreateComputePipelineImpl(PipelineCreationContext* const aContext,
+ WebGPUChild* aBridge,
+ const dom::GPUComputePipelineDescriptor& aDesc,
+ ipc::ByteBuf* const aByteBuf) {
+ ffi::WGPUComputePipelineDescriptor desc = {};
+ nsCString entryPoint;
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+
+ if (aDesc.mLayout.IsGPUAutoLayoutMode()) {
+ desc.layout = 0;
+ } else if (aDesc.mLayout.IsGPUPipelineLayout()) {
+ desc.layout = aDesc.mLayout.GetAsGPUPipelineLayout()->mId;
+ } else {
+ MOZ_ASSERT_UNREACHABLE();
+ }
+ desc.stage.module = aDesc.mCompute.mModule->mId;
+ CopyUTF16toUTF8(aDesc.mCompute.mEntryPoint, entryPoint);
+ desc.stage.entry_point = entryPoint.get();
+
+ RawId implicit_bgl_ids[WGPUMAX_BIND_GROUPS] = {};
+ RawId id = ffi::wgpu_client_create_compute_pipeline(
+ aBridge->GetClient(), aContext->mParentId, &desc, ToFFI(aByteBuf),
+ &aContext->mImplicitPipelineLayoutId, implicit_bgl_ids);
+
+ for (const auto& cur : implicit_bgl_ids) {
+ if (!cur) break;
+ aContext->mImplicitBindGroupLayoutIds.AppendElement(cur);
+ }
+
+ return id;
+}
+
+RawId CreateRenderPipelineImpl(PipelineCreationContext* const aContext,
+ WebGPUChild* aBridge,
+ const dom::GPURenderPipelineDescriptor& aDesc,
+ ipc::ByteBuf* const aByteBuf) {
+ // A bunch of stack locals that we can have pointers into
+ nsTArray<ffi::WGPUVertexBufferLayout> vertexBuffers;
+ nsTArray<ffi::WGPUVertexAttribute> vertexAttributes;
+ ffi::WGPURenderPipelineDescriptor desc = {};
+ nsCString vsEntry, fsEntry;
+ ffi::WGPUIndexFormat stripIndexFormat = ffi::WGPUIndexFormat_Uint16;
+ ffi::WGPUFace cullFace = ffi::WGPUFace_Front;
+ ffi::WGPUVertexState vertexState = {};
+ ffi::WGPUFragmentState fragmentState = {};
+ nsTArray<ffi::WGPUColorTargetState> colorStates;
+ nsTArray<ffi::WGPUBlendState> blendStates;
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+
+ if (aDesc.mLayout.IsGPUAutoLayoutMode()) {
+ desc.layout = 0;
+ } else if (aDesc.mLayout.IsGPUPipelineLayout()) {
+ desc.layout = aDesc.mLayout.GetAsGPUPipelineLayout()->mId;
+ } else {
+ MOZ_ASSERT_UNREACHABLE();
+ }
+
+ {
+ const auto& stage = aDesc.mVertex;
+ vertexState.stage.module = stage.mModule->mId;
+ CopyUTF16toUTF8(stage.mEntryPoint, vsEntry);
+ vertexState.stage.entry_point = vsEntry.get();
+
+ for (const auto& vertex_desc : stage.mBuffers) {
+ ffi::WGPUVertexBufferLayout vb_desc = {};
+ if (!vertex_desc.IsNull()) {
+ const auto& vd = vertex_desc.Value();
+ vb_desc.array_stride = vd.mArrayStride;
+ vb_desc.step_mode = ffi::WGPUVertexStepMode(vd.mStepMode);
+ // Note: we are setting the length but not the pointer
+ vb_desc.attributes_length = vd.mAttributes.Length();
+ for (const auto& vat : vd.mAttributes) {
+ ffi::WGPUVertexAttribute ad = {};
+ ad.offset = vat.mOffset;
+ ad.format = ffi::WGPUVertexFormat(vat.mFormat);
+ ad.shader_location = vat.mShaderLocation;
+ vertexAttributes.AppendElement(ad);
+ }
+ }
+ vertexBuffers.AppendElement(vb_desc);
+ }
+ // Now patch up all the pointers to attribute lists.
+ size_t numAttributes = 0;
+ for (auto& vb_desc : vertexBuffers) {
+ vb_desc.attributes = vertexAttributes.Elements() + numAttributes;
+ numAttributes += vb_desc.attributes_length;
+ }
+
+ vertexState.buffers = vertexBuffers.Elements();
+ vertexState.buffers_length = vertexBuffers.Length();
+ desc.vertex = &vertexState;
+ }
+
+ if (aDesc.mFragment.WasPassed()) {
+ const auto& stage = aDesc.mFragment.Value();
+ fragmentState.stage.module = stage.mModule->mId;
+ CopyUTF16toUTF8(stage.mEntryPoint, fsEntry);
+ fragmentState.stage.entry_point = fsEntry.get();
+
+ // Note: we pre-collect the blend states into a different array
+ // so that we can have non-stale pointers into it.
+ for (const auto& colorState : stage.mTargets) {
+ ffi::WGPUColorTargetState desc = {};
+ desc.format = ConvertTextureFormat(colorState.mFormat);
+ desc.write_mask = colorState.mWriteMask;
+ colorStates.AppendElement(desc);
+ ffi::WGPUBlendState bs = {};
+ if (colorState.mBlend.WasPassed()) {
+ const auto& blend = colorState.mBlend.Value();
+ bs.alpha = ConvertBlendComponent(blend.mAlpha);
+ bs.color = ConvertBlendComponent(blend.mColor);
+ }
+ blendStates.AppendElement(bs);
+ }
+ for (size_t i = 0; i < colorStates.Length(); ++i) {
+ if (stage.mTargets[i].mBlend.WasPassed()) {
+ colorStates[i].blend = &blendStates[i];
+ }
+ }
+
+ fragmentState.targets = colorStates.Elements();
+ fragmentState.targets_length = colorStates.Length();
+ desc.fragment = &fragmentState;
+ }
+
+ {
+ const auto& prim = aDesc.mPrimitive;
+ desc.primitive.topology = ffi::WGPUPrimitiveTopology(prim.mTopology);
+ if (prim.mStripIndexFormat.WasPassed()) {
+ stripIndexFormat = ffi::WGPUIndexFormat(prim.mStripIndexFormat.Value());
+ desc.primitive.strip_index_format = &stripIndexFormat;
+ }
+ desc.primitive.front_face = ffi::WGPUFrontFace(prim.mFrontFace);
+ if (prim.mCullMode != dom::GPUCullMode::None) {
+ cullFace = prim.mCullMode == dom::GPUCullMode::Front ? ffi::WGPUFace_Front
+ : ffi::WGPUFace_Back;
+ desc.primitive.cull_mode = &cullFace;
+ }
+ desc.primitive.unclipped_depth = prim.mUnclippedDepth;
+ }
+ desc.multisample = ConvertMultisampleState(aDesc.mMultisample);
+
+ ffi::WGPUDepthStencilState depthStencilState = {};
+ if (aDesc.mDepthStencil.WasPassed()) {
+ depthStencilState = ConvertDepthStencilState(aDesc.mDepthStencil.Value());
+ desc.depth_stencil = &depthStencilState;
+ }
+
+ RawId implicit_bgl_ids[WGPUMAX_BIND_GROUPS] = {};
+ RawId id = ffi::wgpu_client_create_render_pipeline(
+ aBridge->GetClient(), aContext->mParentId, &desc, ToFFI(aByteBuf),
+ &aContext->mImplicitPipelineLayoutId, implicit_bgl_ids);
+
+ for (const auto& cur : implicit_bgl_ids) {
+ if (!cur) break;
+ aContext->mImplicitBindGroupLayoutIds.AppendElement(cur);
+ }
+
+ return id;
+}
+
+already_AddRefed<ComputePipeline> Device::CreateComputePipeline(
+ const dom::GPUComputePipelineDescriptor& aDesc) {
+ PipelineCreationContext context = {mId};
+ ipc::ByteBuf bb;
+ RawId id = CreateComputePipelineImpl(&context, mBridge, aDesc, &bb);
+
+ if (mBridge->CanSend()) {
+ mBridge->SendDeviceAction(mId, std::move(bb));
+ }
+
+ RefPtr<ComputePipeline> object =
+ new ComputePipeline(this, id, context.mImplicitPipelineLayoutId,
+ std::move(context.mImplicitBindGroupLayoutIds));
+ return object.forget();
+}
+
+already_AddRefed<RenderPipeline> Device::CreateRenderPipeline(
+ const dom::GPURenderPipelineDescriptor& aDesc) {
+ PipelineCreationContext context = {mId};
+ ipc::ByteBuf bb;
+ RawId id = CreateRenderPipelineImpl(&context, mBridge, aDesc, &bb);
+
+ if (mBridge->CanSend()) {
+ mBridge->SendDeviceAction(mId, std::move(bb));
+ }
+
+ RefPtr<RenderPipeline> object =
+ new RenderPipeline(this, id, context.mImplicitPipelineLayoutId,
+ std::move(context.mImplicitBindGroupLayoutIds));
+ return object.forget();
+}
+
+already_AddRefed<dom::Promise> Device::CreateComputePipelineAsync(
+ const dom::GPUComputePipelineDescriptor& aDesc, ErrorResult& aRv) {
+ RefPtr<dom::Promise> promise = dom::Promise::Create(GetParentObject(), aRv);
+ if (NS_WARN_IF(aRv.Failed())) {
+ return nullptr;
+ }
+
+ std::shared_ptr<PipelineCreationContext> context(
+ new PipelineCreationContext());
+ context->mParentId = mId;
+
+ ipc::ByteBuf bb;
+ RawId pipelineId =
+ CreateComputePipelineImpl(context.get(), mBridge, aDesc, &bb);
+
+ if (mBridge->CanSend()) {
+ mBridge->SendDeviceActionWithAck(mId, std::move(bb))
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [self = RefPtr{this}, context, pipelineId, promise](bool aDummy) {
+ Unused << aDummy;
+ RefPtr<ComputePipeline> object = new ComputePipeline(
+ self, pipelineId, context->mImplicitPipelineLayoutId,
+ std::move(context->mImplicitBindGroupLayoutIds));
+ promise->MaybeResolve(object);
+ },
+ [promise](const ipc::ResponseRejectReason&) {
+ promise->MaybeRejectWithOperationError(
+ "Internal communication error");
+ });
+ } else {
+ promise->MaybeRejectWithOperationError("Internal communication error");
+ }
+
+ return promise.forget();
+}
+
+already_AddRefed<dom::Promise> Device::CreateRenderPipelineAsync(
+ const dom::GPURenderPipelineDescriptor& aDesc, ErrorResult& aRv) {
+ RefPtr<dom::Promise> promise = dom::Promise::Create(GetParentObject(), aRv);
+ if (NS_WARN_IF(aRv.Failed())) {
+ return nullptr;
+ }
+
+ std::shared_ptr<PipelineCreationContext> context(
+ new PipelineCreationContext());
+ context->mParentId = mId;
+
+ ipc::ByteBuf bb;
+ RawId pipelineId =
+ CreateRenderPipelineImpl(context.get(), mBridge, aDesc, &bb);
+
+ if (mBridge->CanSend()) {
+ mBridge->SendDeviceActionWithAck(mId, std::move(bb))
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [self = RefPtr{this}, context, promise, pipelineId](bool aDummy) {
+ Unused << aDummy;
+ RefPtr<RenderPipeline> object = new RenderPipeline(
+ self, pipelineId, context->mImplicitPipelineLayoutId,
+ std::move(context->mImplicitBindGroupLayoutIds));
+ promise->MaybeResolve(object);
+ },
+ [promise](const ipc::ResponseRejectReason&) {
+ promise->MaybeRejectWithOperationError(
+ "Internal communication error");
+ });
+ } else {
+ promise->MaybeRejectWithOperationError("Internal communication error");
+ }
+
+ return promise.forget();
+}
+
+already_AddRefed<Texture> Device::InitSwapChain(
+ const dom::GPUCanvasConfiguration* const aConfig,
+ const layers::RemoteTextureOwnerId aOwnerId,
+ bool aUseExternalTextureInSwapChain, gfx::SurfaceFormat aFormat,
+ gfx::IntSize aCanvasSize) {
+ MOZ_ASSERT(aConfig);
+
+ if (!mBridge->CanSend()) {
+ return nullptr;
+ }
+
+ // Check that aCanvasSize and aFormat will generate a texture stride
+ // within limits.
+ const auto bufferStrideWithMask = BufferStrideWithMask(aCanvasSize, aFormat);
+ if (!bufferStrideWithMask.isValid()) {
+ return nullptr;
+ }
+
+ const layers::RGBDescriptor rgbDesc(aCanvasSize, aFormat);
+ // buffer count doesn't matter much, will be created on demand
+ const size_t maxBufferCount = 10;
+ mBridge->DeviceCreateSwapChain(mId, rgbDesc, maxBufferCount, aOwnerId,
+ aUseExternalTextureInSwapChain);
+
+ // TODO: `mColorSpace`: <https://bugzilla.mozilla.org/show_bug.cgi?id=1846608>
+ // TODO: `mAlphaMode`: <https://bugzilla.mozilla.org/show_bug.cgi?id=1846605>
+ return CreateTextureForSwapChain(aConfig, aCanvasSize, aOwnerId);
+}
+
+bool Device::CheckNewWarning(const nsACString& aMessage) {
+ return mKnownWarnings.EnsureInserted(aMessage);
+}
+
+void Device::Destroy() {
+ if (IsLost()) {
+ return;
+ }
+
+ // Unmap all buffers from this device, as specified by
+ // https://gpuweb.github.io/gpuweb/#dom-gpudevice-destroy.
+ dom::AutoJSAPI jsapi;
+ if (jsapi.Init(GetOwnerGlobal())) {
+ IgnoredErrorResult rv;
+ for (const auto& buffer : mTrackedBuffers) {
+ buffer->Unmap(jsapi.cx(), rv);
+ }
+
+ mTrackedBuffers.Clear();
+ }
+
+ mBridge->SendDeviceDestroy(mId);
+}
+
+void Device::PushErrorScope(const dom::GPUErrorFilter& aFilter) {
+ if (!IsBridgeAlive()) {
+ return;
+ }
+ mBridge->SendDevicePushErrorScope(mId, aFilter);
+}
+
+already_AddRefed<dom::Promise> Device::PopErrorScope(ErrorResult& aRv) {
+ /*
+ https://www.w3.org/TR/webgpu/#errors-and-debugging:
+ > After a device is lost (described below), errors are no longer surfaced.
+ > At this point, implementations do not need to run validation or error
+ tracking: > popErrorScope() and uncapturederror stop reporting errors, > and
+ the validity of objects on the device becomes unobservable.
+ */
+ RefPtr<dom::Promise> promise = dom::Promise::Create(GetParentObject(), aRv);
+ if (NS_WARN_IF(aRv.Failed())) {
+ return nullptr;
+ }
+
+ if (!IsBridgeAlive()) {
+ WebGPUChild::JsWarning(
+ GetOwnerGlobal(),
+ "popErrorScope resolving to null because device is already lost."_ns);
+ promise->MaybeResolve(JS::NullHandleValue);
+ return promise.forget();
+ }
+
+ auto errorPromise = mBridge->SendDevicePopErrorScope(mId);
+
+ errorPromise->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [self = RefPtr{this}, promise](const PopErrorScopeResult& aResult) {
+ RefPtr<Error> error;
+
+ switch (aResult.resultType) {
+ case PopErrorScopeResultType::NoError:
+ promise->MaybeResolve(JS::NullHandleValue);
+ return;
+
+ case PopErrorScopeResultType::DeviceLost:
+ WebGPUChild::JsWarning(
+ self->GetOwnerGlobal(),
+ "popErrorScope resolving to null because device was lost."_ns);
+ promise->MaybeResolve(JS::NullHandleValue);
+ return;
+
+ case PopErrorScopeResultType::ThrowOperationError:
+ promise->MaybeRejectWithOperationError(aResult.message);
+ return;
+
+ case PopErrorScopeResultType::OutOfMemory:
+ error =
+ new OutOfMemoryError(self->GetParentObject(), aResult.message);
+ break;
+
+ case PopErrorScopeResultType::ValidationError:
+ error =
+ new ValidationError(self->GetParentObject(), aResult.message);
+ break;
+
+ case PopErrorScopeResultType::InternalError:
+ error = new InternalError(self->GetParentObject(), aResult.message);
+ break;
+ }
+ promise->MaybeResolve(std::move(error));
+ },
+ [self = RefPtr{this}, promise](const ipc::ResponseRejectReason&) {
+ // Device was lost.
+ WebGPUChild::JsWarning(
+ self->GetOwnerGlobal(),
+ "popErrorScope resolving to null because device was just lost."_ns);
+ promise->MaybeResolve(JS::NullHandleValue);
+ });
+
+ return promise.forget();
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/Device.h b/dom/webgpu/Device.h
new file mode 100644
index 0000000000..486fbd03ae
--- /dev/null
+++ b/dom/webgpu/Device.h
@@ -0,0 +1,190 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_DEVICE_H_
+#define GPU_DEVICE_H_
+
+#include "ObjectModel.h"
+#include "nsTHashSet.h"
+#include "mozilla/MozPromise.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/WeakPtr.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+#include "mozilla/webgpu/PWebGPUTypes.h"
+#include "mozilla/webrender/WebRenderAPI.h"
+#include "mozilla/DOMEventTargetHelper.h"
+
+namespace mozilla {
+namespace dom {
+struct GPUExtensions;
+struct GPUFeatures;
+struct GPULimits;
+struct GPUExtent3DDict;
+
+struct GPUBufferDescriptor;
+struct GPUTextureDescriptor;
+struct GPUSamplerDescriptor;
+struct GPUBindGroupLayoutDescriptor;
+struct GPUPipelineLayoutDescriptor;
+struct GPUBindGroupDescriptor;
+struct GPUBlendStateDescriptor;
+struct GPUDepthStencilStateDescriptor;
+struct GPUInputStateDescriptor;
+struct GPUShaderModuleDescriptor;
+struct GPUAttachmentStateDescriptor;
+struct GPUComputePipelineDescriptor;
+struct GPURenderBundleEncoderDescriptor;
+struct GPURenderPipelineDescriptor;
+struct GPUCommandEncoderDescriptor;
+struct GPUCanvasConfiguration;
+
+class EventHandlerNonNull;
+class Promise;
+template <typename T>
+class Sequence;
+class GPUBufferOrGPUTexture;
+enum class GPUDeviceLostReason : uint8_t;
+enum class GPUErrorFilter : uint8_t;
+enum class GPUFeatureName : uint8_t;
+class GPULogCallback;
+} // namespace dom
+namespace ipc {
+enum class ResponseRejectReason;
+} // namespace ipc
+
+namespace webgpu {
+namespace ffi {
+struct WGPULimits;
+}
+class Adapter;
+class BindGroup;
+class BindGroupLayout;
+class Buffer;
+class CommandEncoder;
+class ComputePipeline;
+class Fence;
+class InputState;
+class PipelineLayout;
+class Queue;
+class RenderBundleEncoder;
+class RenderPipeline;
+class Sampler;
+class ShaderModule;
+class SupportedFeatures;
+class SupportedLimits;
+class Texture;
+class WebGPUChild;
+
+using MappingPromise =
+ MozPromise<BufferMapResult, ipc::ResponseRejectReason, true>;
+
+class Device final : public DOMEventTargetHelper, public SupportsWeakPtr {
+ public:
+ NS_DECL_ISUPPORTS_INHERITED
+ NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(Device, DOMEventTargetHelper)
+ GPU_DECL_JS_WRAP(Device)
+
+ const RawId mId;
+ RefPtr<SupportedFeatures> mFeatures;
+ RefPtr<SupportedLimits> mLimits;
+
+ static CheckedInt<uint32_t> BufferStrideWithMask(
+ const gfx::IntSize& aSize, const gfx::SurfaceFormat& aFormat);
+
+ explicit Device(Adapter* const aParent, RawId aId, const ffi::WGPULimits&);
+
+ RefPtr<WebGPUChild> GetBridge();
+ already_AddRefed<Texture> InitSwapChain(
+ const dom::GPUCanvasConfiguration* const aConfig,
+ const layers::RemoteTextureOwnerId aOwnerId,
+ bool aUseExternalTextureInSwapChain, gfx::SurfaceFormat aFormat,
+ gfx::IntSize aCanvasSize);
+ bool CheckNewWarning(const nsACString& aMessage);
+
+ void CleanupUnregisteredInParent();
+
+ void GenerateValidationError(const nsCString& aMessage);
+ void TrackBuffer(Buffer* aBuffer);
+ void UntrackBuffer(Buffer* aBuffer);
+
+ bool IsLost() const;
+ bool IsBridgeAlive() const;
+
+ RawId GetId() const { return mId; }
+
+ private:
+ ~Device();
+ void Cleanup();
+
+ RefPtr<WebGPUChild> mBridge;
+ bool mValid = true;
+ nsString mLabel;
+ RefPtr<dom::Promise> mLostPromise;
+ RefPtr<Queue> mQueue;
+ nsTHashSet<nsCString> mKnownWarnings;
+ nsTHashSet<Buffer*> mTrackedBuffers;
+
+ public:
+ void GetLabel(nsAString& aValue) const;
+ void SetLabel(const nsAString& aLabel);
+ dom::Promise* GetLost(ErrorResult& aRv);
+ void ResolveLost(Maybe<dom::GPUDeviceLostReason> aReason,
+ const nsAString& aMessage);
+
+ const RefPtr<SupportedFeatures>& Features() const { return mFeatures; }
+ const RefPtr<SupportedLimits>& Limits() const { return mLimits; }
+ const RefPtr<Queue>& GetQueue() const { return mQueue; }
+
+ already_AddRefed<Buffer> CreateBuffer(const dom::GPUBufferDescriptor& aDesc,
+ ErrorResult& aRv);
+
+ already_AddRefed<Texture> CreateTextureForSwapChain(
+ const dom::GPUCanvasConfiguration* const aConfig,
+ const gfx::IntSize& aCanvasSize,
+ const layers::RemoteTextureOwnerId aOwnerId);
+ already_AddRefed<Texture> CreateTexture(
+ const dom::GPUTextureDescriptor& aDesc);
+ already_AddRefed<Texture> CreateTexture(
+ const dom::GPUTextureDescriptor& aDesc,
+ Maybe<layers::RemoteTextureOwnerId> aOwnerId);
+ already_AddRefed<Sampler> CreateSampler(
+ const dom::GPUSamplerDescriptor& aDesc);
+
+ already_AddRefed<CommandEncoder> CreateCommandEncoder(
+ const dom::GPUCommandEncoderDescriptor& aDesc);
+ already_AddRefed<RenderBundleEncoder> CreateRenderBundleEncoder(
+ const dom::GPURenderBundleEncoderDescriptor& aDesc);
+
+ already_AddRefed<BindGroupLayout> CreateBindGroupLayout(
+ const dom::GPUBindGroupLayoutDescriptor& aDesc);
+ already_AddRefed<PipelineLayout> CreatePipelineLayout(
+ const dom::GPUPipelineLayoutDescriptor& aDesc);
+ already_AddRefed<BindGroup> CreateBindGroup(
+ const dom::GPUBindGroupDescriptor& aDesc);
+
+ MOZ_CAN_RUN_SCRIPT already_AddRefed<ShaderModule> CreateShaderModule(
+ JSContext* aCx, const dom::GPUShaderModuleDescriptor& aDesc,
+ ErrorResult& aRv);
+ already_AddRefed<ComputePipeline> CreateComputePipeline(
+ const dom::GPUComputePipelineDescriptor& aDesc);
+ already_AddRefed<RenderPipeline> CreateRenderPipeline(
+ const dom::GPURenderPipelineDescriptor& aDesc);
+ already_AddRefed<dom::Promise> CreateComputePipelineAsync(
+ const dom::GPUComputePipelineDescriptor& aDesc, ErrorResult& aRv);
+ already_AddRefed<dom::Promise> CreateRenderPipelineAsync(
+ const dom::GPURenderPipelineDescriptor& aDesc, ErrorResult& aRv);
+
+ void PushErrorScope(const dom::GPUErrorFilter& aFilter);
+ already_AddRefed<dom::Promise> PopErrorScope(ErrorResult& aRv);
+
+ void Destroy();
+
+ IMPL_EVENT_HANDLER(uncapturederror)
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_DEVICE_H_
diff --git a/dom/webgpu/DeviceLostInfo.cpp b/dom/webgpu/DeviceLostInfo.cpp
new file mode 100644
index 0000000000..4f1153ea60
--- /dev/null
+++ b/dom/webgpu/DeviceLostInfo.cpp
@@ -0,0 +1,13 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "DeviceLostInfo.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(DeviceLostInfo, mGlobal)
+GPU_IMPL_JS_WRAP(DeviceLostInfo)
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/DeviceLostInfo.h b/dom/webgpu/DeviceLostInfo.h
new file mode 100644
index 0000000000..1ab77610c7
--- /dev/null
+++ b/dom/webgpu/DeviceLostInfo.h
@@ -0,0 +1,51 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_DeviceLostInfo_H_
+#define GPU_DeviceLostInfo_H_
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "mozilla/Maybe.h"
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+
+namespace mozilla::webgpu {
+class Device;
+
+class DeviceLostInfo final : public nsWrapperCache {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(DeviceLostInfo)
+ GPU_DECL_JS_WRAP(DeviceLostInfo)
+
+ explicit DeviceLostInfo(nsIGlobalObject* const aGlobal,
+ const nsAString& aMessage)
+ : mGlobal(aGlobal), mMessage(aMessage) {}
+ DeviceLostInfo(nsIGlobalObject* const aGlobal,
+ dom::GPUDeviceLostReason aReason, const nsAString& aMessage)
+ : mGlobal(aGlobal), mReason(Some(aReason)), mMessage(aMessage) {}
+
+ private:
+ ~DeviceLostInfo() = default;
+ void Cleanup() {}
+
+ nsCOMPtr<nsIGlobalObject> mGlobal;
+ const Maybe<dom::GPUDeviceLostReason> mReason;
+ const nsAutoString mMessage;
+
+ public:
+ void GetReason(JSContext* aCx, JS::MutableHandle<JS::Value> aRetval) {
+ if (!mReason || !dom::ToJSValue(aCx, mReason.value(), aRetval)) {
+ aRetval.setUndefined();
+ }
+ }
+
+ void GetMessage(nsAString& aValue) const { aValue = mMessage; }
+
+ nsIGlobalObject* GetParentObject() const { return mGlobal; }
+};
+
+} // namespace mozilla::webgpu
+
+#endif // GPU_DeviceLostInfo_H_
diff --git a/dom/webgpu/Error.cpp b/dom/webgpu/Error.cpp
new file mode 100644
index 0000000000..fc331553df
--- /dev/null
+++ b/dom/webgpu/Error.cpp
@@ -0,0 +1,20 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "Error.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(Error, mGlobal)
+
+Error::Error(nsIGlobalObject* const aGlobal, const nsACString& aMessage)
+ : mGlobal(aGlobal) {
+ CopyUTF8toUTF16(aMessage, mMessage);
+}
+
+Error::Error(nsIGlobalObject* const aGlobal, const nsAString& aMessage)
+ : mGlobal(aGlobal), mMessage(aMessage) {}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/Error.h b/dom/webgpu/Error.h
new file mode 100644
index 0000000000..59e46c2e23
--- /dev/null
+++ b/dom/webgpu/Error.h
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_Error_H_
+#define GPU_Error_H_
+
+#include "js/Value.h"
+#include "mozilla/WeakPtr.h"
+#include "nsIGlobalObject.h"
+#include "nsString.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+class ErrorResult;
+namespace dom {
+class GlobalObject;
+} // namespace dom
+namespace webgpu {
+
+class Error : public nsWrapperCache, public SupportsWeakPtr {
+ protected:
+ nsCOMPtr<nsIGlobalObject> mGlobal;
+ nsString mMessage;
+
+ public:
+ GPU_DECL_CYCLE_COLLECTION(Error)
+
+ Error(nsIGlobalObject* const aGlobal, const nsAString& aMessage);
+ Error(nsIGlobalObject* const aGlobal, const nsACString& aMessage);
+
+ protected:
+ virtual ~Error() = default;
+ virtual void Cleanup() {}
+
+ public:
+ void GetMessage(nsAString& aMessage) const { aMessage = mMessage; }
+ nsIGlobalObject* GetParentObject() const { return mGlobal; }
+ virtual JSObject* WrapObject(JSContext*, JS::Handle<JSObject*>) = 0;
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_Error_H_
diff --git a/dom/webgpu/ExternalTexture.cpp b/dom/webgpu/ExternalTexture.cpp
new file mode 100644
index 0000000000..0cab2ae3f2
--- /dev/null
+++ b/dom/webgpu/ExternalTexture.cpp
@@ -0,0 +1,39 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ExternalTexture.h"
+
+#ifdef XP_WIN
+# include "mozilla/webgpu/ExternalTextureD3D11.h"
+#endif
+
+namespace mozilla::webgpu {
+
+// static
+UniquePtr<ExternalTexture> ExternalTexture::Create(
+ const uint32_t aWidth, const uint32_t aHeight,
+ const struct ffi::WGPUTextureFormat aFormat,
+ const ffi::WGPUTextureUsages aUsage) {
+ UniquePtr<ExternalTexture> texture;
+#ifdef XP_WIN
+ texture = ExternalTextureD3D11::Create(aWidth, aHeight, aFormat, aUsage);
+#endif
+ return texture;
+}
+
+ExternalTexture::ExternalTexture(const uint32_t aWidth, const uint32_t aHeight,
+ const struct ffi::WGPUTextureFormat aFormat,
+ const ffi::WGPUTextureUsages aUsage)
+ : mWidth(aWidth), mHeight(aHeight), mFormat(aFormat), mUsage(aUsage) {}
+
+ExternalTexture::~ExternalTexture() {}
+
+void ExternalTexture::SetSubmissionIndex(uint64_t aSubmissionIndex) {
+ MOZ_ASSERT(aSubmissionIndex != 0);
+
+ mSubmissionIndex = aSubmissionIndex;
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/ExternalTexture.h b/dom/webgpu/ExternalTexture.h
new file mode 100644
index 0000000000..2c42d478ae
--- /dev/null
+++ b/dom/webgpu/ExternalTexture.h
@@ -0,0 +1,60 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ExternalTexture_H_
+#define ExternalTexture_H_
+
+#include "mozilla/gfx/Point.h"
+#include "mozilla/layers/LayersSurfaces.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+
+namespace mozilla {
+
+namespace ipc {
+class Shmem;
+}
+
+namespace webgpu {
+
+// A texture that can be used by the WebGPU implementation but is created and
+// owned by Gecko
+class ExternalTexture {
+ public:
+ static UniquePtr<ExternalTexture> Create(
+ const uint32_t aWidth, const uint32_t aHeight,
+ const struct ffi::WGPUTextureFormat aFormat,
+ const ffi::WGPUTextureUsages aUsage);
+
+ ExternalTexture(const uint32_t aWidth, const uint32_t aHeight,
+ const struct ffi::WGPUTextureFormat aFormat,
+ const ffi::WGPUTextureUsages aUsage);
+ virtual ~ExternalTexture();
+
+ virtual void* GetExternalTextureHandle() { return nullptr; }
+
+ virtual Maybe<layers::SurfaceDescriptor> ToSurfaceDescriptor(
+ Maybe<gfx::FenceInfo>& aFenceInfo) = 0;
+
+ virtual void GetSnapshot(const ipc::Shmem& aDestShmem,
+ const gfx::IntSize& aSize) {}
+
+ gfx::IntSize GetSize() { return gfx::IntSize(mWidth, mHeight); }
+
+ void SetSubmissionIndex(uint64_t aSubmissionIndex);
+ uint64_t GetSubmissionIndex() const { return mSubmissionIndex; }
+
+ const uint32_t mWidth;
+ const uint32_t mHeight;
+ const struct ffi::WGPUTextureFormat mFormat;
+ const ffi::WGPUTextureUsages mUsage;
+
+ protected:
+ uint64_t mSubmissionIndex = 0;
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_ExternalTexture_H_
diff --git a/dom/webgpu/ExternalTextureD3D11.cpp b/dom/webgpu/ExternalTextureD3D11.cpp
new file mode 100644
index 0000000000..34be281b5b
--- /dev/null
+++ b/dom/webgpu/ExternalTextureD3D11.cpp
@@ -0,0 +1,168 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ExternalTextureD3D11.h"
+
+#include <d3d11.h>
+
+#include "mozilla/gfx/DeviceManagerDx.h"
+#include "mozilla/gfx/Logging.h"
+#include "mozilla/layers/ImageDataSerializer.h"
+
+namespace mozilla::webgpu {
+
+// static
+UniquePtr<ExternalTextureD3D11> ExternalTextureD3D11::Create(
+ const uint32_t aWidth, const uint32_t aHeight,
+ const struct ffi::WGPUTextureFormat aFormat,
+ const ffi::WGPUTextureUsages aUsage) {
+ const RefPtr<ID3D11Device> d3d11Device =
+ gfx::DeviceManagerDx::Get()->GetCompositorDevice();
+ if (!d3d11Device) {
+ gfxCriticalNoteOnce << "CompositorDevice does not exist";
+ return nullptr;
+ }
+
+ if (aFormat.tag != ffi::WGPUTextureFormat_Bgra8Unorm) {
+ gfxCriticalNoteOnce << "Non supported format: " << aFormat.tag;
+ return nullptr;
+ }
+
+ CD3D11_TEXTURE2D_DESC desc(
+ DXGI_FORMAT_B8G8R8A8_UNORM, aWidth, aHeight, 1, 1,
+ D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET);
+
+ if (aUsage & WGPUTextureUsages_STORAGE_BINDING) {
+ desc.BindFlags |= D3D11_BIND_UNORDERED_ACCESS;
+ }
+
+ desc.MiscFlags =
+ D3D11_RESOURCE_MISC_SHARED_NTHANDLE | D3D11_RESOURCE_MISC_SHARED;
+
+ RefPtr<ID3D11Texture2D> texture;
+ HRESULT hr =
+ d3d11Device->CreateTexture2D(&desc, nullptr, getter_AddRefs(texture));
+ if (FAILED(hr)) {
+ gfxCriticalNoteOnce << "CreateTexture2D failed: " << gfx::hexa(hr);
+ return nullptr;
+ }
+
+ RefPtr<IDXGIResource1> resource;
+ texture->QueryInterface((IDXGIResource1**)getter_AddRefs(resource));
+ if (!resource) {
+ gfxCriticalNoteOnce << "Failed to get IDXGIResource";
+ return 0;
+ }
+
+ HANDLE sharedHandle;
+ hr = resource->CreateSharedHandle(
+ nullptr, DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE, nullptr,
+ &sharedHandle);
+ if (FAILED(hr)) {
+ gfxCriticalNoteOnce << "GetSharedHandle failed: " << gfx::hexa(hr);
+ return 0;
+ }
+
+ RefPtr<gfx::FileHandleWrapper> handle =
+ new gfx::FileHandleWrapper(UniqueFileHandle(sharedHandle));
+
+ return MakeUnique<ExternalTextureD3D11>(aWidth, aHeight, aFormat, aUsage,
+ texture, std::move(handle));
+}
+
+ExternalTextureD3D11::ExternalTextureD3D11(
+ const uint32_t aWidth, const uint32_t aHeight,
+ const struct ffi::WGPUTextureFormat aFormat,
+ const ffi::WGPUTextureUsages aUsage, const RefPtr<ID3D11Texture2D> aTexture,
+ RefPtr<gfx::FileHandleWrapper>&& aSharedHandle)
+ : ExternalTexture(aWidth, aHeight, aFormat, aUsage),
+ mTexture(aTexture),
+ mSharedHandle(std::move(aSharedHandle)) {
+ MOZ_ASSERT(mTexture);
+}
+
+ExternalTextureD3D11::~ExternalTextureD3D11() {}
+
+void* ExternalTextureD3D11::GetExternalTextureHandle() {
+ if (!mSharedHandle) {
+ return nullptr;
+ }
+
+ return mSharedHandle->GetHandle();
+}
+
+Maybe<layers::SurfaceDescriptor> ExternalTextureD3D11::ToSurfaceDescriptor(
+ Maybe<gfx::FenceInfo>& aFenceInfo) {
+ const auto format = gfx::SurfaceFormat::B8G8R8A8;
+ return Some(layers::SurfaceDescriptorD3D10(
+ mSharedHandle,
+ /* gpuProcessTextureId */ Nothing(),
+ /* arrayIndex */ 0, format, gfx::IntSize(mWidth, mHeight),
+ gfx::ColorSpace2::SRGB, gfx::ColorRange::FULL,
+ /* hasKeyedMutex */ false, aFenceInfo,
+ /* gpuProcessQueryId */ Nothing()));
+}
+
+void ExternalTextureD3D11::GetSnapshot(const ipc::Shmem& aDestShmem,
+ const gfx::IntSize& aSize) {
+ RefPtr<ID3D11Device> device;
+ mTexture->GetDevice(getter_AddRefs(device));
+ if (!device) {
+ MOZ_ASSERT_UNREACHABLE("unexpected to be called");
+ gfxCriticalNoteOnce << "Failed to get ID3D11Device";
+ return;
+ }
+
+ RefPtr<ID3D11DeviceContext> deviceContext;
+ device->GetImmediateContext(getter_AddRefs(deviceContext));
+ if (!deviceContext) {
+ MOZ_ASSERT_UNREACHABLE("unexpected to be called");
+ gfxCriticalNoteOnce << "Failed to get ID3D11DeviceContext";
+ return;
+ }
+
+ D3D11_TEXTURE2D_DESC textureDesc = {0};
+ mTexture->GetDesc(&textureDesc);
+
+ textureDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
+ textureDesc.Usage = D3D11_USAGE_STAGING;
+ textureDesc.BindFlags = 0;
+ textureDesc.MiscFlags = 0;
+ textureDesc.MipLevels = 1;
+
+ RefPtr<ID3D11Texture2D> cpuTexture;
+ HRESULT hr = device->CreateTexture2D(&textureDesc, nullptr,
+ getter_AddRefs(cpuTexture));
+ if (FAILED(hr)) {
+ MOZ_ASSERT_UNREACHABLE("unexpected to be called");
+ gfxCriticalNote << "Failed to create ID3D11Texture2D: " << gfx::hexa(hr);
+ return;
+ }
+
+ deviceContext->CopyResource(cpuTexture, mTexture);
+
+ D3D11_MAPPED_SUBRESOURCE map;
+ hr = deviceContext->Map(cpuTexture, 0, D3D11_MAP_READ, 0, &map);
+ if (FAILED(hr)) {
+ MOZ_ASSERT_UNREACHABLE("unexpected to be called");
+ gfxCriticalNote << "Failed to map ID3D11Texture2D: " << gfx::hexa(hr);
+ return;
+ }
+
+ const uint32_t stride = layers::ImageDataSerializer::ComputeRGBStride(
+ gfx::SurfaceFormat::B8G8R8A8, aSize.width);
+ uint8_t* src = static_cast<uint8_t*>(map.pData);
+ uint8_t* dst = aDestShmem.get<uint8_t>();
+
+ MOZ_ASSERT(stride * aSize.height <= aDestShmem.Size<uint8_t>());
+
+ for (int y = 0; y < aSize.height; y++) {
+ memcpy(dst, src, stride);
+ src += map.RowPitch;
+ dst += stride;
+ }
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/ExternalTextureD3D11.h b/dom/webgpu/ExternalTextureD3D11.h
new file mode 100644
index 0000000000..74f4cbdc86
--- /dev/null
+++ b/dom/webgpu/ExternalTextureD3D11.h
@@ -0,0 +1,48 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_ExternalTextureD3D11_H_
+#define GPU_ExternalTextureD3D11_H_
+
+#include "mozilla/gfx/FileHandleWrapper.h"
+#include "mozilla/webgpu/ExternalTexture.h"
+
+struct ID3D11Texture2D;
+
+namespace mozilla {
+
+namespace webgpu {
+
+class ExternalTextureD3D11 final : public ExternalTexture {
+ public:
+ static UniquePtr<ExternalTextureD3D11> Create(
+ const uint32_t aWidth, const uint32_t aHeight,
+ const struct ffi::WGPUTextureFormat aFormat,
+ const ffi::WGPUTextureUsages aUsage);
+
+ ExternalTextureD3D11(const uint32_t aWidth, const uint32_t aHeight,
+ const struct ffi::WGPUTextureFormat aFormat,
+ const ffi::WGPUTextureUsages aUsage,
+ const RefPtr<ID3D11Texture2D> aTexture,
+ RefPtr<gfx::FileHandleWrapper>&& aSharedHandle);
+ virtual ~ExternalTextureD3D11();
+
+ void* GetExternalTextureHandle() override;
+
+ Maybe<layers::SurfaceDescriptor> ToSurfaceDescriptor(
+ Maybe<gfx::FenceInfo>& aFenceInfo) override;
+
+ void GetSnapshot(const ipc::Shmem& aDestShmem,
+ const gfx::IntSize& aSize) override;
+
+ protected:
+ const RefPtr<ID3D11Texture2D> mTexture;
+ const RefPtr<gfx::FileHandleWrapper> mSharedHandle;
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_Texture_H_
diff --git a/dom/webgpu/Instance.cpp b/dom/webgpu/Instance.cpp
new file mode 100644
index 0000000000..4bf58b7fa8
--- /dev/null
+++ b/dom/webgpu/Instance.cpp
@@ -0,0 +1,125 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "Instance.h"
+
+#include "Adapter.h"
+#include "nsIGlobalObject.h"
+#include "ipc/WebGPUChild.h"
+#include "ipc/WebGPUTypes.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+#include "mozilla/dom/Promise.h"
+#include "mozilla/gfx/CanvasManagerChild.h"
+#include "mozilla/gfx/gfxVars.h"
+#include "mozilla/StaticPrefs_dom.h"
+
+#include <optional>
+#include <string_view>
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(Instance, mOwner)
+
+static inline nsDependentCString ToCString(const std::string_view s) {
+ return {s.data(), s.length()};
+}
+
+/* static */ bool Instance::PrefEnabled(JSContext* aCx, JSObject* aObj) {
+ if (!StaticPrefs::dom_webgpu_enabled()) {
+ return false;
+ }
+
+ if (NS_IsMainThread()) {
+ return true;
+ }
+
+ return StaticPrefs::dom_webgpu_workers_enabled();
+}
+
+/*static*/
+already_AddRefed<Instance> Instance::Create(nsIGlobalObject* aOwner) {
+ RefPtr<Instance> result = new Instance(aOwner);
+ return result.forget();
+}
+
+Instance::Instance(nsIGlobalObject* aOwner) : mOwner(aOwner) {}
+
+Instance::~Instance() { Cleanup(); }
+
+void Instance::Cleanup() {}
+
+JSObject* Instance::WrapObject(JSContext* cx,
+ JS::Handle<JSObject*> givenProto) {
+ return dom::GPU_Binding::Wrap(cx, this, givenProto);
+}
+
+already_AddRefed<dom::Promise> Instance::RequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions, ErrorResult& aRv) {
+ RefPtr<dom::Promise> promise = dom::Promise::Create(mOwner, aRv);
+ if (NS_WARN_IF(aRv.Failed())) {
+ return nullptr;
+ }
+
+ // -
+ // Check if we should allow the request.
+
+ const auto errStr = [&]() -> std::optional<std::string_view> {
+#ifdef RELEASE_OR_BETA
+ if (true) {
+ return "WebGPU is not yet available in Release or Beta builds.";
+ }
+#endif
+ if (!gfx::gfxVars::AllowWebGPU()) {
+ return "WebGPU is disabled by blocklist.";
+ }
+ if (!StaticPrefs::dom_webgpu_enabled()) {
+ return "WebGPU is disabled by dom.webgpu.enabled:false.";
+ }
+ return {};
+ }();
+ if (errStr) {
+ promise->MaybeRejectWithNotSupportedError(ToCString(*errStr));
+ return promise.forget();
+ }
+
+ // -
+ // Make the request.
+
+ auto* const canvasManager = gfx::CanvasManagerChild::Get();
+ if (!canvasManager) {
+ promise->MaybeRejectWithInvalidStateError(
+ "Failed to create CanavasManagerChild");
+ return promise.forget();
+ }
+
+ RefPtr<WebGPUChild> bridge = canvasManager->GetWebGPUChild();
+ if (!bridge) {
+ promise->MaybeRejectWithInvalidStateError("Failed to create WebGPUChild");
+ return promise.forget();
+ }
+
+ RefPtr<Instance> instance = this;
+
+ bridge->InstanceRequestAdapter(aOptions)->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [promise, instance, bridge](ipc::ByteBuf aInfoBuf) {
+ auto info = std::make_shared<ffi::WGPUAdapterInformation>();
+ ffi::wgpu_client_adapter_extract_info(ToFFI(&aInfoBuf), info.get());
+ MOZ_ASSERT(info->id != 0);
+ RefPtr<Adapter> adapter = new Adapter(instance, bridge, info);
+ promise->MaybeResolve(adapter);
+ },
+ [promise](const Maybe<ipc::ResponseRejectReason>& aResponseReason) {
+ if (aResponseReason.isSome()) {
+ promise->MaybeRejectWithAbortError("Internal communication error!");
+ } else {
+ promise->MaybeResolve(JS::NullHandleValue);
+ }
+ });
+
+ return promise.forget();
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/Instance.h b/dom/webgpu/Instance.h
new file mode 100644
index 0000000000..c50a6f9bbd
--- /dev/null
+++ b/dom/webgpu/Instance.h
@@ -0,0 +1,62 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_INSTANCE_H_
+#define GPU_INSTANCE_H_
+
+#include "mozilla/AlreadyAddRefed.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/dom/WebGPUBinding.h"
+#include "mozilla/layers/BuildConstants.h"
+#include "nsCOMPtr.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+class ErrorResult;
+namespace dom {
+class Promise;
+struct GPURequestAdapterOptions;
+} // namespace dom
+
+namespace webgpu {
+class Adapter;
+class GPUAdapter;
+class WebGPUChild;
+
+class Instance final : public nsWrapperCache {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(Instance)
+ GPU_DECL_JS_WRAP(Instance)
+
+ nsIGlobalObject* GetParentObject() const { return mOwner; }
+
+ static bool PrefEnabled(JSContext* aCx, JSObject* aObj);
+
+ static already_AddRefed<Instance> Create(nsIGlobalObject* aOwner);
+
+ already_AddRefed<dom::Promise> RequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions, ErrorResult& aRv);
+
+ dom::GPUTextureFormat GetPreferredCanvasFormat() const {
+ if (kIsAndroid) {
+ return dom::GPUTextureFormat::Rgba8unorm;
+ }
+ return dom::GPUTextureFormat::Bgra8unorm;
+ };
+
+ private:
+ explicit Instance(nsIGlobalObject* aOwner);
+ virtual ~Instance();
+ void Cleanup();
+
+ nsCOMPtr<nsIGlobalObject> mOwner;
+
+ public:
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_INSTANCE_H_
diff --git a/dom/webgpu/InternalError.cpp b/dom/webgpu/InternalError.cpp
new file mode 100644
index 0000000000..6cda9673f6
--- /dev/null
+++ b/dom/webgpu/InternalError.cpp
@@ -0,0 +1,21 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "InternalError.h"
+#include "mozilla/dom/WebGPUBinding.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_JS_WRAP(InternalError)
+
+already_AddRefed<InternalError> InternalError::Constructor(
+ const dom::GlobalObject& aGlobal, const nsAString& aString,
+ ErrorResult& aRv) {
+ nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
+ MOZ_RELEASE_ASSERT(global);
+ return MakeAndAddRef<InternalError>(global, aString);
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/InternalError.h b/dom/webgpu/InternalError.h
new file mode 100644
index 0000000000..cb7b8537af
--- /dev/null
+++ b/dom/webgpu/InternalError.h
@@ -0,0 +1,40 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_InternalError_H_
+#define GPU_InternalError_H_
+
+#include "Error.h"
+
+namespace mozilla {
+class ErrorResult;
+namespace dom {
+class GlobalObject;
+} // namespace dom
+namespace webgpu {
+
+class InternalError final : public Error {
+ public:
+ GPU_DECL_JS_WRAP(InternalError)
+
+ InternalError(nsIGlobalObject* const aGlobal, const nsAString& aMessage)
+ : Error(aGlobal, aMessage) {}
+
+ InternalError(nsIGlobalObject* const aGlobal, const nsACString& aMessage)
+ : Error(aGlobal, aMessage) {}
+
+ private:
+ ~InternalError() override = default;
+
+ public:
+ static already_AddRefed<InternalError> Constructor(
+ const dom::GlobalObject& aGlobal, const nsAString& aString,
+ ErrorResult& aRv);
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_InternalError_H_
diff --git a/dom/webgpu/ObjectModel.cpp b/dom/webgpu/ObjectModel.cpp
new file mode 100644
index 0000000000..9e8bddc5ef
--- /dev/null
+++ b/dom/webgpu/ObjectModel.cpp
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ObjectModel.h"
+
+#include "Adapter.h"
+#include "ShaderModule.h"
+#include "CompilationInfo.h"
+#include "Device.h"
+#include "CommandEncoder.h"
+#include "Instance.h"
+#include "Texture.h"
+#include "nsIGlobalObject.h"
+
+namespace mozilla::webgpu {
+
+template <typename T>
+ChildOf<T>::ChildOf(T* const parent) : mParent(parent) {}
+
+template <typename T>
+ChildOf<T>::~ChildOf() = default;
+
+template <typename T>
+nsIGlobalObject* ChildOf<T>::GetParentObject() const {
+ return mParent->GetParentObject();
+}
+
+void ObjectBase::GetLabel(nsAString& aValue) const { aValue = mLabel; }
+void ObjectBase::SetLabel(const nsAString& aLabel) { mLabel = aLabel; }
+
+template class ChildOf<Adapter>;
+template class ChildOf<ShaderModule>;
+template class ChildOf<CompilationInfo>;
+template class ChildOf<CommandEncoder>;
+template class ChildOf<Device>;
+template class ChildOf<Instance>;
+template class ChildOf<Texture>;
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/ObjectModel.h b/dom/webgpu/ObjectModel.h
new file mode 100644
index 0000000000..59e154bd44
--- /dev/null
+++ b/dom/webgpu/ObjectModel.h
@@ -0,0 +1,134 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_OBJECT_MODEL_H_
+#define GPU_OBJECT_MODEL_H_
+
+#include "nsWrapperCache.h"
+#include "nsString.h"
+
+class nsIGlobalObject;
+
+namespace mozilla::webgpu {
+class WebGPUChild;
+
+template <typename T>
+class ChildOf {
+ protected:
+ explicit ChildOf(T* const parent);
+ virtual ~ChildOf();
+
+ RefPtr<T> mParent;
+
+ public:
+ nsIGlobalObject* GetParentObject() const;
+};
+
+class ObjectBase : public nsWrapperCache {
+ protected:
+ virtual ~ObjectBase() = default;
+
+ // False if this object is definitely invalid.
+ //
+ // See WebGPU §3.2, "Invalid Internal Objects & Contagious Invalidity".
+ //
+ // There could also be state in the GPU process indicating that our
+ // counterpart object there is invalid; certain GPU process operations will
+ // report an error back to use if we try to use it. But if it's useful to know
+ // whether the object is "definitely invalid", this should suffice.
+ bool mValid = true;
+
+ public:
+ // Return true if this WebGPU object may be valid.
+ //
+ // This is used by methods that want to know whether somebody other than
+ // `this` is valid. Generally, WebGPU object methods check `this->mValid`
+ // directly.
+ bool IsValid() const { return mValid; }
+
+ void GetLabel(nsAString& aValue) const;
+ void SetLabel(const nsAString& aLabel);
+
+ auto CLabel() const { return NS_ConvertUTF16toUTF8(mLabel); }
+
+ protected:
+ // Object label, initialized from GPUObjectDescriptorBase.label.
+ nsString mLabel;
+};
+
+} // namespace mozilla::webgpu
+
+#define GPU_DECL_JS_WRAP(T) \
+ JSObject* WrapObject(JSContext* cx, JS::Handle<JSObject*> givenProto) \
+ override;
+
+#define GPU_DECL_CYCLE_COLLECTION(T) \
+ NS_DECL_CYCLE_COLLECTION_NATIVE_WRAPPERCACHE_CLASS(T) \
+ NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING(T)
+
+#define GPU_IMPL_JS_WRAP(T) \
+ JSObject* T::WrapObject(JSContext* cx, JS::Handle<JSObject*> givenProto) { \
+ return dom::GPU##T##_Binding::Wrap(cx, this, givenProto); \
+ }
+
+// Note: we don't use `NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE` directly
+// because there is a custom action we need to always do.
+#define GPU_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(T, ...) \
+ NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE_CLASS(T) \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(T) \
+ tmp->Cleanup(); \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK(__VA_ARGS__) \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_END \
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(T) \
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE(__VA_ARGS__) \
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
+
+#define GPU_IMPL_CYCLE_COLLECTION_WRAPPERCACHE_WEAK_PTR(T, ...) \
+ NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE_CLASS(T) \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(T) \
+ tmp->Cleanup(); \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK(__VA_ARGS__) \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_WEAK_PTR \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_END \
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(T) \
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE(__VA_ARGS__) \
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
+
+#define GPU_IMPL_CYCLE_COLLECTION_WRAPPERCACHE_INHERITED(T, P, ...) \
+ NS_IMPL_CYCLE_COLLECTION_CLASS(T) \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(T, P) \
+ tmp->Cleanup(); \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK(__VA_ARGS__) \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_WEAK_PTR \
+ NS_IMPL_CYCLE_COLLECTION_UNLINK_END \
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(T, P) \
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE(__VA_ARGS__) \
+ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
+
+#define GPU_IMPL_CYCLE_COLLECTION(T, ...) \
+ GPU_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(T, __VA_ARGS__)
+
+template <typename T>
+void ImplCycleCollectionTraverse(nsCycleCollectionTraversalCallback& callback,
+ nsTArray<RefPtr<const T>>& field,
+ const char* name, uint32_t flags) {
+ for (auto& element : field) {
+ CycleCollectionNoteChild(callback, const_cast<T*>(element.get()), name,
+ flags);
+ }
+}
+
+template <typename T>
+void ImplCycleCollectionUnlink(nsTArray<RefPtr<const T>>& field) {
+ for (auto& element : field) {
+ ImplCycleCollectionUnlink(element);
+ }
+ field.Clear();
+}
+
+#endif // GPU_OBJECT_MODEL_H_
diff --git a/dom/webgpu/OutOfMemoryError.cpp b/dom/webgpu/OutOfMemoryError.cpp
new file mode 100644
index 0000000000..420096c44c
--- /dev/null
+++ b/dom/webgpu/OutOfMemoryError.cpp
@@ -0,0 +1,21 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "OutOfMemoryError.h"
+#include "mozilla/dom/WebGPUBinding.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_JS_WRAP(OutOfMemoryError)
+
+already_AddRefed<OutOfMemoryError> OutOfMemoryError::Constructor(
+ const dom::GlobalObject& aGlobal, const nsAString& aString,
+ ErrorResult& aRv) {
+ nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
+ MOZ_RELEASE_ASSERT(global);
+ return MakeAndAddRef<OutOfMemoryError>(global, aString);
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/OutOfMemoryError.h b/dom/webgpu/OutOfMemoryError.h
new file mode 100644
index 0000000000..3af96f9d2b
--- /dev/null
+++ b/dom/webgpu/OutOfMemoryError.h
@@ -0,0 +1,40 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_OutOfMemoryError_H_
+#define GPU_OutOfMemoryError_H_
+
+#include "Error.h"
+
+namespace mozilla {
+class ErrorResult;
+namespace dom {
+class GlobalObject;
+} // namespace dom
+namespace webgpu {
+
+class OutOfMemoryError final : public Error {
+ public:
+ GPU_DECL_JS_WRAP(OutOfMemoryError)
+
+ OutOfMemoryError(nsIGlobalObject* const aGlobal, const nsAString& aMessage)
+ : Error(aGlobal, aMessage) {}
+
+ OutOfMemoryError(nsIGlobalObject* const aGlobal, const nsACString& aMessage)
+ : Error(aGlobal, aMessage) {}
+
+ private:
+ ~OutOfMemoryError() override = default;
+
+ public:
+ static already_AddRefed<OutOfMemoryError> Constructor(
+ const dom::GlobalObject& aGlobal, const nsAString& aString,
+ ErrorResult& aRv);
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_OutOfMemoryError_H_
diff --git a/dom/webgpu/PipelineLayout.cpp b/dom/webgpu/PipelineLayout.cpp
new file mode 100644
index 0000000000..716be9c74a
--- /dev/null
+++ b/dom/webgpu/PipelineLayout.cpp
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "PipelineLayout.h"
+#include "ipc/WebGPUChild.h"
+
+#include "Device.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(PipelineLayout, mParent)
+GPU_IMPL_JS_WRAP(PipelineLayout)
+
+PipelineLayout::PipelineLayout(Device* const aParent, RawId aId)
+ : ChildOf(aParent), mId(aId) {
+ MOZ_RELEASE_ASSERT(aId);
+}
+
+PipelineLayout::~PipelineLayout() { Cleanup(); }
+
+void PipelineLayout::Cleanup() {
+ if (mValid && mParent) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendPipelineLayoutDrop(mId);
+ }
+ }
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/PipelineLayout.h b/dom/webgpu/PipelineLayout.h
new file mode 100644
index 0000000000..65293d778d
--- /dev/null
+++ b/dom/webgpu/PipelineLayout.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_PipelineLayout_H_
+#define GPU_PipelineLayout_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+
+namespace mozilla::webgpu {
+
+class Device;
+
+class PipelineLayout final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(PipelineLayout)
+ GPU_DECL_JS_WRAP(PipelineLayout)
+
+ PipelineLayout(Device* const aParent, RawId aId);
+
+ const RawId mId;
+
+ private:
+ virtual ~PipelineLayout();
+ void Cleanup();
+};
+
+} // namespace mozilla::webgpu
+
+#endif // GPU_PipelineLayout_H_
diff --git a/dom/webgpu/QuerySet.cpp b/dom/webgpu/QuerySet.cpp
new file mode 100644
index 0000000000..05f30f6cc8
--- /dev/null
+++ b/dom/webgpu/QuerySet.cpp
@@ -0,0 +1,22 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "QuerySet.h"
+
+#include "Device.h"
+
+namespace mozilla::webgpu {
+
+QuerySet::~QuerySet() = default;
+
+GPU_IMPL_CYCLE_COLLECTION(QuerySet, mParent)
+GPU_IMPL_JS_WRAP(QuerySet)
+
+void QuerySet::Destroy() {
+ // TODO
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/QuerySet.h b/dom/webgpu/QuerySet.h
new file mode 100644
index 0000000000..e7e6f4968b
--- /dev/null
+++ b/dom/webgpu/QuerySet.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_QuerySet_H_
+#define GPU_QuerySet_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+
+namespace mozilla::webgpu {
+
+class Device;
+
+class QuerySet final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(QuerySet)
+ GPU_DECL_JS_WRAP(QuerySet)
+
+ QuerySet() = delete;
+ void Destroy();
+
+ private:
+ virtual ~QuerySet();
+ void Cleanup() {}
+};
+
+} // namespace mozilla::webgpu
+
+#endif // GPU_QuerySet_H_
diff --git a/dom/webgpu/Queue.cpp b/dom/webgpu/Queue.cpp
new file mode 100644
index 0000000000..26952ee173
--- /dev/null
+++ b/dom/webgpu/Queue.cpp
@@ -0,0 +1,413 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "mozilla/dom/UnionTypes.h"
+#include "Queue.h"
+
+#include <algorithm>
+
+#include "CommandBuffer.h"
+#include "CommandEncoder.h"
+#include "ipc/WebGPUChild.h"
+#include "mozilla/Casting.h"
+#include "mozilla/ErrorResult.h"
+#include "mozilla/dom/HTMLCanvasElement.h"
+#include "mozilla/dom/ImageBitmap.h"
+#include "mozilla/dom/OffscreenCanvas.h"
+#include "mozilla/dom/Promise.h"
+#include "mozilla/dom/WebGLTexelConversions.h"
+#include "mozilla/dom/WebGLTypes.h"
+#include "nsLayoutUtils.h"
+#include "Utility.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(Queue, mParent, mBridge)
+GPU_IMPL_JS_WRAP(Queue)
+
+Queue::Queue(Device* const aParent, WebGPUChild* aBridge, RawId aId)
+ : ChildOf(aParent), mBridge(aBridge), mId(aId) {
+ MOZ_RELEASE_ASSERT(aId);
+}
+
+Queue::~Queue() { Cleanup(); }
+
+void Queue::Submit(
+ const dom::Sequence<OwningNonNull<CommandBuffer>>& aCommandBuffers) {
+ nsTArray<RawId> list(aCommandBuffers.Length());
+ for (uint32_t i = 0; i < aCommandBuffers.Length(); ++i) {
+ auto idMaybe = aCommandBuffers[i]->Commit();
+ if (idMaybe) {
+ list.AppendElement(*idMaybe);
+ }
+ }
+
+ mBridge->QueueSubmit(mId, mParent->mId, list);
+}
+
+already_AddRefed<dom::Promise> Queue::OnSubmittedWorkDone(ErrorResult& aRv) {
+ RefPtr<dom::Promise> promise = dom::Promise::Create(GetParentObject(), aRv);
+ if (NS_WARN_IF(aRv.Failed())) {
+ return nullptr;
+ }
+ mBridge->QueueOnSubmittedWorkDone(mId, promise);
+
+ return promise.forget();
+}
+
+void Queue::WriteBuffer(const Buffer& aBuffer, uint64_t aBufferOffset,
+ const dom::ArrayBufferViewOrArrayBuffer& aData,
+ uint64_t aDataOffset,
+ const dom::Optional<uint64_t>& aSize,
+ ErrorResult& aRv) {
+ if (!aBuffer.mId) {
+ // Invalid buffers are unknown to the parent -- don't try to write
+ // to them.
+ return;
+ }
+
+ dom::ProcessTypedArraysFixed(aData, [&](const Span<const uint8_t>& aData) {
+ uint64_t length = aData.Length();
+ const auto checkedSize = aSize.WasPassed()
+ ? CheckedInt<size_t>(aSize.Value())
+ : CheckedInt<size_t>(length) - aDataOffset;
+ if (!checkedSize.isValid()) {
+ aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
+ return;
+ }
+
+ const auto& size = checkedSize.value();
+ if (aDataOffset + size > length) {
+ aRv.ThrowAbortError(nsPrintfCString("Wrong data size %" PRIuPTR, size));
+ return;
+ }
+
+ if (size % 4 != 0) {
+ aRv.ThrowAbortError("Byte size must be a multiple of 4");
+ return;
+ }
+
+ auto alloc = mozilla::ipc::UnsafeSharedMemoryHandle::CreateAndMap(size);
+ if (alloc.isNothing()) {
+ aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
+ return;
+ }
+
+ auto handle = std::move(alloc.ref().first);
+ auto mapping = std::move(alloc.ref().second);
+
+ memcpy(mapping.Bytes().data(), aData.Elements() + aDataOffset, size);
+ ipc::ByteBuf bb;
+ ffi::wgpu_queue_write_buffer(aBuffer.mId, aBufferOffset, ToFFI(&bb));
+ mBridge->SendQueueWriteAction(mId, mParent->mId, std::move(bb),
+ std::move(handle));
+ });
+}
+
+void Queue::WriteTexture(const dom::GPUImageCopyTexture& aDestination,
+ const dom::ArrayBufferViewOrArrayBuffer& aData,
+ const dom::GPUImageDataLayout& aDataLayout,
+ const dom::GPUExtent3D& aSize, ErrorResult& aRv) {
+ ffi::WGPUImageCopyTexture copyView = {};
+ CommandEncoder::ConvertTextureCopyViewToFFI(aDestination, &copyView);
+ ffi::WGPUImageDataLayout dataLayout = {};
+ CommandEncoder::ConvertTextureDataLayoutToFFI(aDataLayout, &dataLayout);
+ dataLayout.offset = 0; // our Shmem has the contents starting from 0.
+ ffi::WGPUExtent3d extent = {};
+ ConvertExtent3DToFFI(aSize, &extent);
+
+ dom::ProcessTypedArraysFixed(aData, [&](const Span<const uint8_t>& aData) {
+ if (aData.IsEmpty()) {
+ aRv.ThrowAbortError("Input size cannot be zero.");
+ return;
+ }
+
+ const auto checkedSize =
+ CheckedInt<size_t>(aData.Length()) - aDataLayout.mOffset;
+ if (!checkedSize.isValid()) {
+ aRv.ThrowAbortError("Offset is higher than the size");
+ return;
+ }
+ const auto size = checkedSize.value();
+
+ auto alloc = mozilla::ipc::UnsafeSharedMemoryHandle::CreateAndMap(size);
+ if (alloc.isNothing()) {
+ aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
+ return;
+ }
+
+ auto handle = std::move(alloc.ref().first);
+ auto mapping = std::move(alloc.ref().second);
+
+ memcpy(mapping.Bytes().data(), aData.Elements() + aDataLayout.mOffset,
+ size);
+
+ ipc::ByteBuf bb;
+ ffi::wgpu_queue_write_texture(copyView, dataLayout, extent, ToFFI(&bb));
+ mBridge->SendQueueWriteAction(mId, mParent->mId, std::move(bb),
+ std::move(handle));
+ });
+}
+
+static WebGLTexelFormat ToWebGLTexelFormat(gfx::SurfaceFormat aFormat) {
+ switch (aFormat) {
+ case gfx::SurfaceFormat::B8G8R8A8:
+ case gfx::SurfaceFormat::B8G8R8X8:
+ return WebGLTexelFormat::BGRA8;
+ case gfx::SurfaceFormat::R8G8B8A8:
+ case gfx::SurfaceFormat::R8G8B8X8:
+ return WebGLTexelFormat::RGBA8;
+ default:
+ return WebGLTexelFormat::FormatNotSupportingAnyConversion;
+ }
+}
+
+static WebGLTexelFormat ToWebGLTexelFormat(dom::GPUTextureFormat aFormat) {
+ // TODO: We need support for Rbg10a2unorm as well.
+ switch (aFormat) {
+ case dom::GPUTextureFormat::R8unorm:
+ return WebGLTexelFormat::R8;
+ case dom::GPUTextureFormat::R16float:
+ return WebGLTexelFormat::R16F;
+ case dom::GPUTextureFormat::R32float:
+ return WebGLTexelFormat::R32F;
+ case dom::GPUTextureFormat::Rg8unorm:
+ return WebGLTexelFormat::RG8;
+ case dom::GPUTextureFormat::Rg16float:
+ return WebGLTexelFormat::RG16F;
+ case dom::GPUTextureFormat::Rg32float:
+ return WebGLTexelFormat::RG32F;
+ case dom::GPUTextureFormat::Rgba8unorm:
+ case dom::GPUTextureFormat::Rgba8unorm_srgb:
+ return WebGLTexelFormat::RGBA8;
+ case dom::GPUTextureFormat::Bgra8unorm:
+ case dom::GPUTextureFormat::Bgra8unorm_srgb:
+ return WebGLTexelFormat::BGRA8;
+ case dom::GPUTextureFormat::Rgba16float:
+ return WebGLTexelFormat::RGBA16F;
+ case dom::GPUTextureFormat::Rgba32float:
+ return WebGLTexelFormat::RGBA32F;
+ default:
+ return WebGLTexelFormat::FormatNotSupportingAnyConversion;
+ }
+}
+
+void Queue::CopyExternalImageToTexture(
+ const dom::GPUImageCopyExternalImage& aSource,
+ const dom::GPUImageCopyTextureTagged& aDestination,
+ const dom::GPUExtent3D& aCopySize, ErrorResult& aRv) {
+ const auto dstFormat = ToWebGLTexelFormat(aDestination.mTexture->Format());
+ if (dstFormat == WebGLTexelFormat::FormatNotSupportingAnyConversion) {
+ aRv.ThrowInvalidStateError("Unsupported destination format");
+ return;
+ }
+
+ const uint32_t surfaceFlags = nsLayoutUtils::SFE_ALLOW_NON_PREMULT;
+ SurfaceFromElementResult sfeResult;
+ switch (aSource.mSource.GetType()) {
+ case decltype(aSource.mSource)::Type::eImageBitmap: {
+ const auto& bitmap = aSource.mSource.GetAsImageBitmap();
+ if (bitmap->IsClosed()) {
+ aRv.ThrowInvalidStateError("Detached ImageBitmap");
+ return;
+ }
+
+ sfeResult = nsLayoutUtils::SurfaceFromImageBitmap(bitmap, surfaceFlags);
+ break;
+ }
+ case decltype(aSource.mSource)::Type::eHTMLCanvasElement: {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ const auto& canvas = aSource.mSource.GetAsHTMLCanvasElement();
+ if (canvas->Width() == 0 || canvas->Height() == 0) {
+ aRv.ThrowInvalidStateError("Zero-sized HTMLCanvasElement");
+ return;
+ }
+
+ sfeResult = nsLayoutUtils::SurfaceFromElement(canvas, surfaceFlags);
+ break;
+ }
+ case decltype(aSource.mSource)::Type::eOffscreenCanvas: {
+ const auto& canvas = aSource.mSource.GetAsOffscreenCanvas();
+ if (canvas->Width() == 0 || canvas->Height() == 0) {
+ aRv.ThrowInvalidStateError("Zero-sized OffscreenCanvas");
+ return;
+ }
+
+ sfeResult =
+ nsLayoutUtils::SurfaceFromOffscreenCanvas(canvas, surfaceFlags);
+ break;
+ }
+ }
+
+ if (!sfeResult.mCORSUsed) {
+ nsIGlobalObject* global = mParent->GetOwnerGlobal();
+ nsIPrincipal* dstPrincipal = global ? global->PrincipalOrNull() : nullptr;
+ if (!sfeResult.mPrincipal || !dstPrincipal ||
+ !dstPrincipal->Subsumes(sfeResult.mPrincipal)) {
+ aRv.ThrowSecurityError("Cross-origin elements require CORS!");
+ return;
+ }
+ }
+
+ if (sfeResult.mIsWriteOnly) {
+ aRv.ThrowSecurityError("Write only source data not supported!");
+ return;
+ }
+
+ RefPtr<gfx::SourceSurface> surface = sfeResult.GetSourceSurface();
+ if (!surface) {
+ aRv.ThrowInvalidStateError("No surface available from source");
+ return;
+ }
+
+ RefPtr<gfx::DataSourceSurface> dataSurface = surface->GetDataSurface();
+ if (!dataSurface) {
+ aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
+ return;
+ }
+
+ bool srcPremultiplied;
+ switch (sfeResult.mAlphaType) {
+ case gfxAlphaType::Premult:
+ srcPremultiplied = true;
+ break;
+ case gfxAlphaType::NonPremult:
+ srcPremultiplied = false;
+ break;
+ case gfxAlphaType::Opaque:
+ // No (un)premultiplication necessary so match the output.
+ srcPremultiplied = aDestination.mPremultipliedAlpha;
+ break;
+ }
+
+ const auto surfaceFormat = dataSurface->GetFormat();
+ const auto srcFormat = ToWebGLTexelFormat(surfaceFormat);
+ if (srcFormat == WebGLTexelFormat::FormatNotSupportingAnyConversion) {
+ gfxCriticalError() << "Unsupported surface format from source "
+ << surfaceFormat;
+ MOZ_CRASH();
+ }
+
+ gfx::DataSourceSurface::ScopedMap map(dataSurface,
+ gfx::DataSourceSurface::READ);
+ if (!map.IsMapped()) {
+ aRv.ThrowInvalidStateError("Cannot map surface from source");
+ return;
+ }
+
+ if (!aSource.mOrigin.IsGPUOrigin2DDict()) {
+ aRv.ThrowInvalidStateError("Cannot get origin from source");
+ return;
+ }
+
+ ffi::WGPUExtent3d extent = {};
+ ConvertExtent3DToFFI(aCopySize, &extent);
+ if (extent.depth_or_array_layers > 1) {
+ aRv.ThrowOperationError("Depth is greater than 1");
+ return;
+ }
+
+ uint32_t srcOriginX;
+ uint32_t srcOriginY;
+ if (aSource.mOrigin.IsRangeEnforcedUnsignedLongSequence()) {
+ const auto& seq = aSource.mOrigin.GetAsRangeEnforcedUnsignedLongSequence();
+ srcOriginX = seq.Length() > 0 ? seq[0] : 0;
+ srcOriginY = seq.Length() > 1 ? seq[1] : 0;
+ } else if (aSource.mOrigin.IsGPUOrigin2DDict()) {
+ const auto& dict = aSource.mOrigin.GetAsGPUOrigin2DDict();
+ srcOriginX = dict.mX;
+ srcOriginY = dict.mY;
+ } else {
+ MOZ_CRASH("Unexpected origin type!");
+ }
+
+ const auto checkedMaxWidth = CheckedInt<uint32_t>(srcOriginX) + extent.width;
+ const auto checkedMaxHeight =
+ CheckedInt<uint32_t>(srcOriginY) + extent.height;
+ if (!checkedMaxWidth.isValid() || !checkedMaxHeight.isValid()) {
+ aRv.ThrowOperationError("Offset and copy size exceed integer bounds");
+ return;
+ }
+
+ const gfx::IntSize surfaceSize = dataSurface->GetSize();
+ const auto surfaceWidth = AssertedCast<uint32_t>(surfaceSize.width);
+ const auto surfaceHeight = AssertedCast<uint32_t>(surfaceSize.height);
+ if (surfaceWidth < checkedMaxWidth.value() ||
+ surfaceHeight < checkedMaxHeight.value()) {
+ aRv.ThrowOperationError("Offset and copy size exceed surface bounds");
+ return;
+ }
+
+ const auto dstWidth = extent.width;
+ const auto dstHeight = extent.height;
+ if (dstWidth == 0 || dstHeight == 0) {
+ aRv.ThrowOperationError("Destination size is empty");
+ return;
+ }
+
+ if (!aDestination.mTexture->mBytesPerBlock) {
+ // TODO(bug 1781071) This should emmit a GPUValidationError on the device
+ // timeline.
+ aRv.ThrowInvalidStateError("Invalid destination format");
+ return;
+ }
+
+ // Note: This assumes bytes per block == bytes per pixel which is the case
+ // here because the spec only allows non-compressed texture formats for the
+ // destination.
+ const auto dstStride = CheckedInt<uint32_t>(extent.width) *
+ aDestination.mTexture->mBytesPerBlock.value();
+ const auto dstByteLength = dstStride * extent.height;
+ if (!dstStride.isValid() || !dstByteLength.isValid()) {
+ aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
+ return;
+ }
+
+ auto alloc = mozilla::ipc::UnsafeSharedMemoryHandle::CreateAndMap(
+ dstByteLength.value());
+ if (alloc.isNothing()) {
+ aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
+ return;
+ }
+
+ auto handle = std::move(alloc.ref().first);
+ auto mapping = std::move(alloc.ref().second);
+
+ const int32_t pixelSize = gfx::BytesPerPixel(surfaceFormat);
+ auto* dstBegin = mapping.Bytes().data();
+ const auto* srcBegin =
+ map.GetData() + srcOriginX * pixelSize + srcOriginY * map.GetStride();
+ const auto srcOriginPos = gl::OriginPos::TopLeft;
+ const auto srcStride = AssertedCast<uint32_t>(map.GetStride());
+ const auto dstOriginPos =
+ aSource.mFlipY ? gl::OriginPos::BottomLeft : gl::OriginPos::TopLeft;
+ bool wasTrivial;
+
+ auto dstStrideVal = dstStride.value();
+
+ if (!ConvertImage(dstWidth, dstHeight, srcBegin, srcStride, srcOriginPos,
+ srcFormat, srcPremultiplied, dstBegin, dstStrideVal,
+ dstOriginPos, dstFormat, aDestination.mPremultipliedAlpha,
+ &wasTrivial)) {
+ MOZ_ASSERT_UNREACHABLE("ConvertImage failed!");
+ aRv.ThrowInvalidStateError(
+ nsPrintfCString("Failed to convert source to destination format "
+ "(%i/%i), please file a bug!",
+ (int)srcFormat, (int)dstFormat));
+ return;
+ }
+
+ ffi::WGPUImageDataLayout dataLayout = {0, &dstStrideVal, &dstHeight};
+ ffi::WGPUImageCopyTexture copyView = {};
+ CommandEncoder::ConvertTextureCopyViewToFFI(aDestination, &copyView);
+ ipc::ByteBuf bb;
+ ffi::wgpu_queue_write_texture(copyView, dataLayout, extent, ToFFI(&bb));
+ mBridge->SendQueueWriteAction(mId, mParent->mId, std::move(bb),
+ std::move(handle));
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/Queue.h b/dom/webgpu/Queue.h
new file mode 100644
index 0000000000..d8cc890ff0
--- /dev/null
+++ b/dom/webgpu/Queue.h
@@ -0,0 +1,76 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_Queue_H_
+#define GPU_Queue_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "mozilla/dom/TypedArray.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+
+namespace mozilla {
+class ErrorResult;
+namespace dom {
+class RangeEnforcedUnsignedLongSequenceOrGPUExtent3DDict;
+class ArrayBufferViewOrArrayBuffer;
+template <typename T>
+class Optional;
+template <typename T>
+class Sequence;
+struct GPUImageCopyTexture;
+struct GPUImageDataLayout;
+struct TextureCopyView;
+struct TextureDataLayout;
+using GPUExtent3D = RangeEnforcedUnsignedLongSequenceOrGPUExtent3DDict;
+} // namespace dom
+namespace webgpu {
+
+class Buffer;
+class CommandBuffer;
+class Device;
+class Fence;
+
+class Queue final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(Queue)
+ GPU_DECL_JS_WRAP(Queue)
+
+ Queue(Device* const aParent, WebGPUChild* aBridge, RawId aId);
+
+ void Submit(
+ const dom::Sequence<OwningNonNull<CommandBuffer>>& aCommandBuffers);
+
+ already_AddRefed<dom::Promise> OnSubmittedWorkDone(ErrorResult& aRv);
+
+ void WriteBuffer(const Buffer& aBuffer, uint64_t aBufferOffset,
+ const dom::ArrayBufferViewOrArrayBuffer& aData,
+ uint64_t aDataOffset, const dom::Optional<uint64_t>& aSize,
+ ErrorResult& aRv);
+
+ void WriteTexture(const dom::GPUImageCopyTexture& aDestination,
+ const dom::ArrayBufferViewOrArrayBuffer& aData,
+ const dom::GPUImageDataLayout& aDataLayout,
+ const dom::GPUExtent3D& aSize, ErrorResult& aRv);
+
+ void CopyExternalImageToTexture(
+ const dom::GPUImageCopyExternalImage& aSource,
+ const dom::GPUImageCopyTextureTagged& aDestination,
+ const dom::GPUExtent3D& aCopySize, ErrorResult& aRv);
+
+ private:
+ virtual ~Queue();
+ void Cleanup() {}
+
+ RefPtr<WebGPUChild> mBridge;
+ const RawId mId;
+
+ public:
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_Queue_H_
diff --git a/dom/webgpu/RenderBundle.cpp b/dom/webgpu/RenderBundle.cpp
new file mode 100644
index 0000000000..a60d8d5f8e
--- /dev/null
+++ b/dom/webgpu/RenderBundle.cpp
@@ -0,0 +1,35 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "RenderBundle.h"
+
+#include "Device.h"
+#include "ipc/WebGPUChild.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(RenderBundle, mParent)
+GPU_IMPL_JS_WRAP(RenderBundle)
+
+RenderBundle::RenderBundle(Device* const aParent, RawId aId)
+ : ChildOf(aParent), mId(aId) {
+ // TODO: we may be running into this if we finish an encoder twice.
+ MOZ_RELEASE_ASSERT(aId);
+}
+
+RenderBundle::~RenderBundle() { Cleanup(); }
+
+void RenderBundle::Cleanup() {
+ if (mValid && mParent) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendRenderBundleDrop(mId);
+ }
+ }
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/RenderBundle.h b/dom/webgpu/RenderBundle.h
new file mode 100644
index 0000000000..0fef6af781
--- /dev/null
+++ b/dom/webgpu/RenderBundle.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_RenderBundle_H_
+#define GPU_RenderBundle_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+
+namespace mozilla::webgpu {
+
+class Device;
+
+class RenderBundle final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(RenderBundle)
+ GPU_DECL_JS_WRAP(RenderBundle)
+
+ RenderBundle(Device* const aParent, RawId aId);
+
+ const RawId mId;
+
+ private:
+ virtual ~RenderBundle();
+ void Cleanup();
+};
+
+} // namespace mozilla::webgpu
+
+#endif // GPU_RenderBundle_H_
diff --git a/dom/webgpu/RenderBundleEncoder.cpp b/dom/webgpu/RenderBundleEncoder.cpp
new file mode 100644
index 0000000000..54ebf12d64
--- /dev/null
+++ b/dom/webgpu/RenderBundleEncoder.cpp
@@ -0,0 +1,211 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "RenderBundleEncoder.h"
+
+#include "BindGroup.h"
+#include "Buffer.h"
+#include "RenderBundle.h"
+#include "RenderPipeline.h"
+#include "Utility.h"
+#include "ipc/WebGPUChild.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(RenderBundleEncoder, mParent, mUsedBindGroups,
+ mUsedBuffers, mUsedPipelines, mUsedTextureViews)
+GPU_IMPL_JS_WRAP(RenderBundleEncoder)
+
+void ffiWGPURenderBundleEncoderDeleter::operator()(
+ ffi::WGPURenderBundleEncoder* raw) {
+ if (raw) {
+ ffi::wgpu_render_bundle_encoder_destroy(raw);
+ }
+}
+
+ffi::WGPURenderBundleEncoder* CreateRenderBundleEncoder(
+ RawId aDeviceId, const dom::GPURenderBundleEncoderDescriptor& aDesc,
+ WebGPUChild* const aBridge) {
+ if (!aBridge->CanSend()) {
+ return nullptr;
+ }
+
+ ffi::WGPURenderBundleEncoderDescriptor desc = {};
+ desc.sample_count = aDesc.mSampleCount;
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+
+ ffi::WGPUTextureFormat depthStencilFormat = {ffi::WGPUTextureFormat_Sentinel};
+ if (aDesc.mDepthStencilFormat.WasPassed()) {
+ depthStencilFormat =
+ ConvertTextureFormat(aDesc.mDepthStencilFormat.Value());
+ desc.depth_stencil_format = &depthStencilFormat;
+ }
+
+ std::vector<ffi::WGPUTextureFormat> colorFormats = {};
+ for (const auto i : IntegerRange(aDesc.mColorFormats.Length())) {
+ ffi::WGPUTextureFormat format = {ffi::WGPUTextureFormat_Sentinel};
+ format = ConvertTextureFormat(aDesc.mColorFormats[i]);
+ colorFormats.push_back(format);
+ }
+
+ desc.color_formats = colorFormats.data();
+ desc.color_formats_length = colorFormats.size();
+
+ ipc::ByteBuf failureAction;
+ auto* bundle = ffi::wgpu_device_create_render_bundle_encoder(
+ aDeviceId, &desc, ToFFI(&failureAction));
+ // Report an error only if the operation failed.
+ if (!bundle) {
+ aBridge->SendDeviceAction(aDeviceId, std::move(failureAction));
+ }
+ return bundle;
+}
+
+RenderBundleEncoder::RenderBundleEncoder(
+ Device* const aParent, WebGPUChild* const aBridge,
+ const dom::GPURenderBundleEncoderDescriptor& aDesc)
+ : ChildOf(aParent),
+ mEncoder(CreateRenderBundleEncoder(aParent->mId, aDesc, aBridge)) {
+ mValid = mEncoder.get() != nullptr;
+}
+
+RenderBundleEncoder::~RenderBundleEncoder() { Cleanup(); }
+
+void RenderBundleEncoder::Cleanup() {
+ if (mValid) {
+ mValid = false;
+ }
+}
+
+void RenderBundleEncoder::SetBindGroup(
+ uint32_t aSlot, const BindGroup& aBindGroup,
+ const dom::Sequence<uint32_t>& aDynamicOffsets) {
+ if (mValid) {
+ mUsedBindGroups.AppendElement(&aBindGroup);
+ ffi::wgpu_render_bundle_set_bind_group(
+ mEncoder.get(), aSlot, aBindGroup.mId, aDynamicOffsets.Elements(),
+ aDynamicOffsets.Length());
+ }
+}
+
+void RenderBundleEncoder::SetPipeline(const RenderPipeline& aPipeline) {
+ if (mValid) {
+ mUsedPipelines.AppendElement(&aPipeline);
+ ffi::wgpu_render_bundle_set_pipeline(mEncoder.get(), aPipeline.mId);
+ }
+}
+
+void RenderBundleEncoder::SetIndexBuffer(
+ const Buffer& aBuffer, const dom::GPUIndexFormat& aIndexFormat,
+ uint64_t aOffset, uint64_t aSize) {
+ if (mValid) {
+ mUsedBuffers.AppendElement(&aBuffer);
+ const auto iformat = aIndexFormat == dom::GPUIndexFormat::Uint32
+ ? ffi::WGPUIndexFormat_Uint32
+ : ffi::WGPUIndexFormat_Uint16;
+ ffi::wgpu_render_bundle_set_index_buffer(mEncoder.get(), aBuffer.mId,
+ iformat, aOffset, aSize);
+ }
+}
+
+void RenderBundleEncoder::SetVertexBuffer(uint32_t aSlot, const Buffer& aBuffer,
+ uint64_t aOffset, uint64_t aSize) {
+ if (mValid) {
+ mUsedBuffers.AppendElement(&aBuffer);
+ ffi::wgpu_render_bundle_set_vertex_buffer(mEncoder.get(), aSlot,
+ aBuffer.mId, aOffset, aSize);
+ }
+}
+
+void RenderBundleEncoder::Draw(uint32_t aVertexCount, uint32_t aInstanceCount,
+ uint32_t aFirstVertex, uint32_t aFirstInstance) {
+ if (mValid) {
+ ffi::wgpu_render_bundle_draw(mEncoder.get(), aVertexCount, aInstanceCount,
+ aFirstVertex, aFirstInstance);
+ }
+}
+
+void RenderBundleEncoder::DrawIndexed(uint32_t aIndexCount,
+ uint32_t aInstanceCount,
+ uint32_t aFirstIndex, int32_t aBaseVertex,
+ uint32_t aFirstInstance) {
+ if (mValid) {
+ ffi::wgpu_render_bundle_draw_indexed(mEncoder.get(), aIndexCount,
+ aInstanceCount, aFirstIndex,
+ aBaseVertex, aFirstInstance);
+ }
+}
+
+void RenderBundleEncoder::DrawIndirect(const Buffer& aIndirectBuffer,
+ uint64_t aIndirectOffset) {
+ if (mValid) {
+ ffi::wgpu_render_bundle_draw_indirect(mEncoder.get(), aIndirectBuffer.mId,
+ aIndirectOffset);
+ }
+}
+
+void RenderBundleEncoder::DrawIndexedIndirect(const Buffer& aIndirectBuffer,
+ uint64_t aIndirectOffset) {
+ if (mValid) {
+ ffi::wgpu_render_bundle_draw_indexed_indirect(
+ mEncoder.get(), aIndirectBuffer.mId, aIndirectOffset);
+ }
+}
+
+void RenderBundleEncoder::PushDebugGroup(const nsAString& aString) {
+ if (mValid) {
+ const NS_ConvertUTF16toUTF8 utf8(aString);
+ ffi::wgpu_render_bundle_push_debug_group(mEncoder.get(), utf8.get());
+ }
+}
+void RenderBundleEncoder::PopDebugGroup() {
+ if (mValid) {
+ ffi::wgpu_render_bundle_pop_debug_group(mEncoder.get());
+ }
+}
+void RenderBundleEncoder::InsertDebugMarker(const nsAString& aString) {
+ if (mValid) {
+ const NS_ConvertUTF16toUTF8 utf8(aString);
+ ffi::wgpu_render_bundle_insert_debug_marker(mEncoder.get(), utf8.get());
+ }
+}
+
+already_AddRefed<RenderBundle> RenderBundleEncoder::Finish(
+ const dom::GPURenderBundleDescriptor& aDesc) {
+ RawId deviceId = mParent->mId;
+ auto bridge = mParent->GetBridge();
+ MOZ_RELEASE_ASSERT(bridge);
+
+ ffi::WGPURenderBundleDescriptor desc = {};
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+
+ ipc::ByteBuf bb;
+ RawId id;
+ if (mValid) {
+ mValid = false;
+
+ auto* encoder = mEncoder.release();
+ id = ffi::wgpu_client_create_render_bundle(bridge->GetClient(), encoder,
+ deviceId, &desc, ToFFI(&bb));
+
+ } else {
+ id = ffi::wgpu_client_create_render_bundle_error(
+ bridge->GetClient(), deviceId, label.Get(), ToFFI(&bb));
+ }
+
+ if (bridge->CanSend()) {
+ bridge->SendDeviceAction(deviceId, std::move(bb));
+ }
+
+ RefPtr<RenderBundle> bundle = new RenderBundle(mParent, id);
+ return bundle.forget();
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/RenderBundleEncoder.h b/dom/webgpu/RenderBundleEncoder.h
new file mode 100644
index 0000000000..d21a26b833
--- /dev/null
+++ b/dom/webgpu/RenderBundleEncoder.h
@@ -0,0 +1,74 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_RenderBundleEncoder_H_
+#define GPU_RenderBundleEncoder_H_
+
+#include "mozilla/dom/TypedArray.h"
+#include "ObjectModel.h"
+
+namespace mozilla::webgpu {
+namespace ffi {
+struct WGPURenderBundleEncoder;
+} // namespace ffi
+
+class Device;
+class RenderBundle;
+
+struct ffiWGPURenderBundleEncoderDeleter {
+ void operator()(ffi::WGPURenderBundleEncoder*);
+};
+
+class RenderBundleEncoder final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(RenderBundleEncoder)
+ GPU_DECL_JS_WRAP(RenderBundleEncoder)
+
+ RenderBundleEncoder(Device* const aParent, WebGPUChild* const aBridge,
+ const dom::GPURenderBundleEncoderDescriptor& aDesc);
+
+ private:
+ ~RenderBundleEncoder();
+ void Cleanup();
+
+ std::unique_ptr<ffi::WGPURenderBundleEncoder, ffiWGPURenderBundleEncoderDeleter> mEncoder;
+ // keep all the used objects alive while the encoder is finished
+ nsTArray<RefPtr<const BindGroup>> mUsedBindGroups;
+ nsTArray<RefPtr<const Buffer>> mUsedBuffers;
+ nsTArray<RefPtr<const RenderPipeline>> mUsedPipelines;
+ nsTArray<RefPtr<const TextureView>> mUsedTextureViews;
+
+ public:
+ // programmable pass encoder
+ void SetBindGroup(uint32_t aSlot, const BindGroup& aBindGroup,
+ const dom::Sequence<uint32_t>& aDynamicOffsets);
+ // render encoder base
+ void SetPipeline(const RenderPipeline& aPipeline);
+ void SetIndexBuffer(const Buffer& aBuffer,
+ const dom::GPUIndexFormat& aIndexFormat, uint64_t aOffset,
+ uint64_t aSize);
+ void SetVertexBuffer(uint32_t aSlot, const Buffer& aBuffer, uint64_t aOffset,
+ uint64_t aSize);
+ void Draw(uint32_t aVertexCount, uint32_t aInstanceCount,
+ uint32_t aFirstVertex, uint32_t aFirstInstance);
+ void DrawIndexed(uint32_t aIndexCount, uint32_t aInstanceCount,
+ uint32_t aFirstIndex, int32_t aBaseVertex,
+ uint32_t aFirstInstance);
+ void DrawIndirect(const Buffer& aIndirectBuffer, uint64_t aIndirectOffset);
+ void DrawIndexedIndirect(const Buffer& aIndirectBuffer,
+ uint64_t aIndirectOffset);
+
+ void PushDebugGroup(const nsAString& aString);
+ void PopDebugGroup();
+ void InsertDebugMarker(const nsAString& aString);
+
+ // self
+ already_AddRefed<RenderBundle> Finish(
+ const dom::GPURenderBundleDescriptor& aDesc);
+};
+
+} // namespace mozilla::webgpu
+
+#endif // GPU_RenderBundleEncoder_H_
diff --git a/dom/webgpu/RenderPassEncoder.cpp b/dom/webgpu/RenderPassEncoder.cpp
new file mode 100644
index 0000000000..c5cb19ce15
--- /dev/null
+++ b/dom/webgpu/RenderPassEncoder.cpp
@@ -0,0 +1,328 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "RenderPassEncoder.h"
+#include "BindGroup.h"
+#include "CommandEncoder.h"
+#include "RenderBundle.h"
+#include "RenderPipeline.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(RenderPassEncoder, mParent, mUsedBindGroups,
+ mUsedBuffers, mUsedPipelines, mUsedTextureViews,
+ mUsedRenderBundles)
+GPU_IMPL_JS_WRAP(RenderPassEncoder)
+
+void ffiWGPURenderPassDeleter::operator()(ffi::WGPURenderPass* raw) {
+ if (raw) {
+ ffi::wgpu_render_pass_destroy(raw);
+ }
+}
+
+static ffi::WGPULoadOp ConvertLoadOp(const dom::GPULoadOp& aOp) {
+ switch (aOp) {
+ case dom::GPULoadOp::Load:
+ return ffi::WGPULoadOp_Load;
+ case dom::GPULoadOp::Clear:
+ return ffi::WGPULoadOp_Clear;
+ case dom::GPULoadOp::EndGuard_:
+ break;
+ }
+ MOZ_CRASH("bad GPULoadOp");
+}
+
+static ffi::WGPUStoreOp ConvertStoreOp(const dom::GPUStoreOp& aOp) {
+ switch (aOp) {
+ case dom::GPUStoreOp::Store:
+ return ffi::WGPUStoreOp_Store;
+ case dom::GPUStoreOp::Discard:
+ return ffi::WGPUStoreOp_Discard;
+ case dom::GPUStoreOp::EndGuard_:
+ break;
+ }
+ MOZ_CRASH("bad GPUStoreOp");
+}
+
+static ffi::WGPUColor ConvertColor(const dom::Sequence<double>& aSeq) {
+ ffi::WGPUColor color;
+ color.r = aSeq.SafeElementAt(0, 0.0);
+ color.g = aSeq.SafeElementAt(1, 0.0);
+ color.b = aSeq.SafeElementAt(2, 0.0);
+ color.a = aSeq.SafeElementAt(3, 1.0);
+ return color;
+}
+
+static ffi::WGPUColor ConvertColor(const dom::GPUColorDict& aColor) {
+ ffi::WGPUColor color = {aColor.mR, aColor.mG, aColor.mB, aColor.mA};
+ return color;
+}
+
+static ffi::WGPUColor ConvertColor(
+ const dom::DoubleSequenceOrGPUColorDict& aColor) {
+ if (aColor.IsDoubleSequence()) {
+ return ConvertColor(aColor.GetAsDoubleSequence());
+ }
+ if (aColor.IsGPUColorDict()) {
+ return ConvertColor(aColor.GetAsGPUColorDict());
+ }
+ MOZ_ASSERT_UNREACHABLE(
+ "Unexpected dom::DoubleSequenceOrGPUColorDict variant");
+ return ffi::WGPUColor();
+}
+static ffi::WGPUColor ConvertColor(
+ const dom::OwningDoubleSequenceOrGPUColorDict& aColor) {
+ if (aColor.IsDoubleSequence()) {
+ return ConvertColor(aColor.GetAsDoubleSequence());
+ }
+ if (aColor.IsGPUColorDict()) {
+ return ConvertColor(aColor.GetAsGPUColorDict());
+ }
+ MOZ_ASSERT_UNREACHABLE(
+ "Unexpected dom::OwningDoubleSequenceOrGPUColorDict variant");
+ return ffi::WGPUColor();
+}
+
+ffi::WGPURenderPass* BeginRenderPass(
+ CommandEncoder* const aParent, const dom::GPURenderPassDescriptor& aDesc) {
+ ffi::WGPURenderPassDescriptor desc = {};
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+
+ ffi::WGPURenderPassDepthStencilAttachment dsDesc = {};
+ if (aDesc.mDepthStencilAttachment.WasPassed()) {
+ const auto& dsa = aDesc.mDepthStencilAttachment.Value();
+ dsDesc.view = dsa.mView->mId;
+
+ // -
+
+ if (dsa.mDepthClearValue.WasPassed()) {
+ dsDesc.depth.clear_value = dsa.mDepthClearValue.Value();
+ }
+ if (dsa.mDepthLoadOp.WasPassed()) {
+ dsDesc.depth.load_op = ConvertLoadOp(dsa.mDepthLoadOp.Value());
+ }
+ if (dsa.mDepthStoreOp.WasPassed()) {
+ dsDesc.depth.store_op = ConvertStoreOp(dsa.mDepthStoreOp.Value());
+ }
+ dsDesc.depth.read_only = dsa.mDepthReadOnly;
+
+ // -
+
+ dsDesc.stencil.clear_value = dsa.mStencilClearValue;
+ if (dsa.mStencilLoadOp.WasPassed()) {
+ dsDesc.stencil.load_op = ConvertLoadOp(dsa.mStencilLoadOp.Value());
+ }
+ if (dsa.mStencilStoreOp.WasPassed()) {
+ dsDesc.stencil.store_op = ConvertStoreOp(dsa.mStencilStoreOp.Value());
+ }
+ dsDesc.stencil.read_only = dsa.mStencilReadOnly;
+
+ // -
+
+ desc.depth_stencil_attachment = &dsDesc;
+ }
+
+ if (aDesc.mColorAttachments.Length() > WGPUMAX_COLOR_ATTACHMENTS) {
+ aParent->GetDevice()->GenerateValidationError(nsLiteralCString(
+ "Too many color attachments in GPURenderPassDescriptor"));
+ return nullptr;
+ }
+
+ std::array<ffi::WGPURenderPassColorAttachment, WGPUMAX_COLOR_ATTACHMENTS>
+ colorDescs = {};
+ desc.color_attachments = colorDescs.data();
+ desc.color_attachments_length = aDesc.mColorAttachments.Length();
+
+ for (size_t i = 0; i < aDesc.mColorAttachments.Length(); ++i) {
+ const auto& ca = aDesc.mColorAttachments[i];
+ ffi::WGPURenderPassColorAttachment& cd = colorDescs[i];
+ cd.view = ca.mView->mId;
+ cd.channel.store_op = ConvertStoreOp(ca.mStoreOp);
+
+ if (ca.mResolveTarget.WasPassed()) {
+ cd.resolve_target = ca.mResolveTarget.Value().mId;
+ }
+
+ cd.channel.load_op = ConvertLoadOp(ca.mLoadOp);
+ if (ca.mClearValue.WasPassed()) {
+ cd.channel.clear_value = ConvertColor(ca.mClearValue.Value());
+ }
+ }
+
+ return ffi::wgpu_command_encoder_begin_render_pass(aParent->mId, &desc);
+}
+
+RenderPassEncoder::RenderPassEncoder(CommandEncoder* const aParent,
+ const dom::GPURenderPassDescriptor& aDesc)
+ : ChildOf(aParent), mPass(BeginRenderPass(aParent, aDesc)) {
+ if (!mPass) {
+ mValid = false;
+ return;
+ }
+
+ for (const auto& at : aDesc.mColorAttachments) {
+ mUsedTextureViews.AppendElement(at.mView);
+ }
+ if (aDesc.mDepthStencilAttachment.WasPassed()) {
+ mUsedTextureViews.AppendElement(
+ aDesc.mDepthStencilAttachment.Value().mView);
+ }
+}
+
+RenderPassEncoder::~RenderPassEncoder() {
+ if (mValid) {
+ mValid = false;
+ }
+}
+
+void RenderPassEncoder::SetBindGroup(
+ uint32_t aSlot, const BindGroup& aBindGroup,
+ const dom::Sequence<uint32_t>& aDynamicOffsets) {
+ if (mValid) {
+ mUsedBindGroups.AppendElement(&aBindGroup);
+ ffi::wgpu_render_pass_set_bind_group(mPass.get(), aSlot, aBindGroup.mId,
+ aDynamicOffsets.Elements(),
+ aDynamicOffsets.Length());
+ }
+}
+
+void RenderPassEncoder::SetPipeline(const RenderPipeline& aPipeline) {
+ if (mValid) {
+ mUsedPipelines.AppendElement(&aPipeline);
+ ffi::wgpu_render_pass_set_pipeline(mPass.get(), aPipeline.mId);
+ }
+}
+
+void RenderPassEncoder::SetIndexBuffer(const Buffer& aBuffer,
+ const dom::GPUIndexFormat& aIndexFormat,
+ uint64_t aOffset, uint64_t aSize) {
+ if (mValid) {
+ mUsedBuffers.AppendElement(&aBuffer);
+ const auto iformat = aIndexFormat == dom::GPUIndexFormat::Uint32
+ ? ffi::WGPUIndexFormat_Uint32
+ : ffi::WGPUIndexFormat_Uint16;
+ ffi::wgpu_render_pass_set_index_buffer(mPass.get(), aBuffer.mId, iformat,
+ aOffset, aSize);
+ }
+}
+
+void RenderPassEncoder::SetVertexBuffer(uint32_t aSlot, const Buffer& aBuffer,
+ uint64_t aOffset, uint64_t aSize) {
+ if (mValid) {
+ mUsedBuffers.AppendElement(&aBuffer);
+ ffi::wgpu_render_pass_set_vertex_buffer(mPass.get(), aSlot, aBuffer.mId,
+ aOffset, aSize);
+ }
+}
+
+void RenderPassEncoder::Draw(uint32_t aVertexCount, uint32_t aInstanceCount,
+ uint32_t aFirstVertex, uint32_t aFirstInstance) {
+ if (mValid) {
+ ffi::wgpu_render_pass_draw(mPass.get(), aVertexCount, aInstanceCount,
+ aFirstVertex, aFirstInstance);
+ }
+}
+
+void RenderPassEncoder::DrawIndexed(uint32_t aIndexCount,
+ uint32_t aInstanceCount,
+ uint32_t aFirstIndex, int32_t aBaseVertex,
+ uint32_t aFirstInstance) {
+ if (mValid) {
+ ffi::wgpu_render_pass_draw_indexed(mPass.get(), aIndexCount, aInstanceCount,
+ aFirstIndex, aBaseVertex,
+ aFirstInstance);
+ }
+}
+
+void RenderPassEncoder::DrawIndirect(const Buffer& aIndirectBuffer,
+ uint64_t aIndirectOffset) {
+ if (mValid) {
+ ffi::wgpu_render_pass_draw_indirect(mPass.get(), aIndirectBuffer.mId,
+ aIndirectOffset);
+ }
+}
+
+void RenderPassEncoder::DrawIndexedIndirect(const Buffer& aIndirectBuffer,
+ uint64_t aIndirectOffset) {
+ if (mValid) {
+ ffi::wgpu_render_pass_draw_indexed_indirect(
+ mPass.get(), aIndirectBuffer.mId, aIndirectOffset);
+ }
+}
+
+void RenderPassEncoder::SetViewport(float x, float y, float width, float height,
+ float minDepth, float maxDepth) {
+ if (mValid) {
+ ffi::wgpu_render_pass_set_viewport(mPass.get(), x, y, width, height,
+ minDepth, maxDepth);
+ }
+}
+
+void RenderPassEncoder::SetScissorRect(uint32_t x, uint32_t y, uint32_t width,
+ uint32_t height) {
+ if (mValid) {
+ ffi::wgpu_render_pass_set_scissor_rect(mPass.get(), x, y, width, height);
+ }
+}
+
+void RenderPassEncoder::SetBlendConstant(
+ const dom::DoubleSequenceOrGPUColorDict& color) {
+ if (mValid) {
+ ffi::WGPUColor aColor = ConvertColor(color);
+ ffi::wgpu_render_pass_set_blend_constant(mPass.get(), &aColor);
+ }
+}
+
+void RenderPassEncoder::SetStencilReference(uint32_t reference) {
+ if (mValid) {
+ ffi::wgpu_render_pass_set_stencil_reference(mPass.get(), reference);
+ }
+}
+
+void RenderPassEncoder::ExecuteBundles(
+ const dom::Sequence<OwningNonNull<RenderBundle>>& aBundles) {
+ if (mValid) {
+ nsTArray<ffi::WGPURenderBundleId> renderBundles(aBundles.Length());
+ for (const auto& bundle : aBundles) {
+ mUsedRenderBundles.AppendElement(bundle);
+ renderBundles.AppendElement(bundle->mId);
+ }
+ ffi::wgpu_render_pass_execute_bundles(mPass.get(), renderBundles.Elements(),
+ renderBundles.Length());
+ }
+}
+
+void RenderPassEncoder::PushDebugGroup(const nsAString& aString) {
+ if (mValid) {
+ const NS_ConvertUTF16toUTF8 utf8(aString);
+ ffi::wgpu_render_pass_push_debug_group(mPass.get(), utf8.get(), 0);
+ }
+}
+void RenderPassEncoder::PopDebugGroup() {
+ if (mValid) {
+ ffi::wgpu_render_pass_pop_debug_group(mPass.get());
+ }
+}
+void RenderPassEncoder::InsertDebugMarker(const nsAString& aString) {
+ if (mValid) {
+ const NS_ConvertUTF16toUTF8 utf8(aString);
+ ffi::wgpu_render_pass_insert_debug_marker(mPass.get(), utf8.get(), 0);
+ }
+}
+
+void RenderPassEncoder::End() {
+ if (mValid) {
+ mValid = false;
+ auto* pass = mPass.release();
+ MOZ_ASSERT(pass);
+ mParent->EndRenderPass(*pass);
+ }
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/RenderPassEncoder.h b/dom/webgpu/RenderPassEncoder.h
new file mode 100644
index 0000000000..5ca414b4ea
--- /dev/null
+++ b/dom/webgpu/RenderPassEncoder.h
@@ -0,0 +1,101 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_RenderPassEncoder_H_
+#define GPU_RenderPassEncoder_H_
+
+#include "mozilla/dom/TypedArray.h"
+#include "ObjectModel.h"
+
+namespace mozilla {
+class ErrorResult;
+
+namespace dom {
+class DoubleSequenceOrGPUColorDict;
+struct GPURenderPassDescriptor;
+template <typename T>
+class Sequence;
+namespace binding_detail {
+template <typename T>
+class AutoSequence;
+} // namespace binding_detail
+} // namespace dom
+namespace webgpu {
+namespace ffi {
+struct WGPURenderPass;
+} // namespace ffi
+
+class BindGroup;
+class Buffer;
+class CommandEncoder;
+class RenderBundle;
+class RenderPipeline;
+class TextureView;
+
+struct ffiWGPURenderPassDeleter {
+ void operator()(ffi::WGPURenderPass*);
+};
+
+class RenderPassEncoder final : public ObjectBase,
+ public ChildOf<CommandEncoder> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(RenderPassEncoder)
+ GPU_DECL_JS_WRAP(RenderPassEncoder)
+
+ RenderPassEncoder(CommandEncoder* const aParent,
+ const dom::GPURenderPassDescriptor& aDesc);
+
+ protected:
+ virtual ~RenderPassEncoder();
+ void Cleanup() {}
+
+ std::unique_ptr<ffi::WGPURenderPass, ffiWGPURenderPassDeleter> mPass;
+ // keep all the used objects alive while the pass is recorded
+ nsTArray<RefPtr<const BindGroup>> mUsedBindGroups;
+ nsTArray<RefPtr<const Buffer>> mUsedBuffers;
+ nsTArray<RefPtr<const RenderPipeline>> mUsedPipelines;
+ nsTArray<RefPtr<const TextureView>> mUsedTextureViews;
+ nsTArray<RefPtr<const RenderBundle>> mUsedRenderBundles;
+
+ public:
+ // programmable pass encoder
+ void SetBindGroup(uint32_t aSlot, const BindGroup& aBindGroup,
+ const dom::Sequence<uint32_t>& aDynamicOffsets);
+ // render encoder base
+ void SetPipeline(const RenderPipeline& aPipeline);
+ void SetIndexBuffer(const Buffer& aBuffer,
+ const dom::GPUIndexFormat& aIndexFormat, uint64_t aOffset,
+ uint64_t aSize);
+ void SetVertexBuffer(uint32_t aSlot, const Buffer& aBuffer, uint64_t aOffset,
+ uint64_t aSize);
+ void Draw(uint32_t aVertexCount, uint32_t aInstanceCount,
+ uint32_t aFirstVertex, uint32_t aFirstInstance);
+ void DrawIndexed(uint32_t aIndexCount, uint32_t aInstanceCount,
+ uint32_t aFirstIndex, int32_t aBaseVertex,
+ uint32_t aFirstInstance);
+ void DrawIndirect(const Buffer& aIndirectBuffer, uint64_t aIndirectOffset);
+ void DrawIndexedIndirect(const Buffer& aIndirectBuffer,
+ uint64_t aIndirectOffset);
+ // self
+ void SetViewport(float x, float y, float width, float height, float minDepth,
+ float maxDepth);
+ void SetScissorRect(uint32_t x, uint32_t y, uint32_t width, uint32_t height);
+ void SetBlendConstant(const dom::DoubleSequenceOrGPUColorDict& color);
+ void SetStencilReference(uint32_t reference);
+
+ void PushDebugGroup(const nsAString& aString);
+ void PopDebugGroup();
+ void InsertDebugMarker(const nsAString& aString);
+
+ void ExecuteBundles(
+ const dom::Sequence<OwningNonNull<RenderBundle>>& aBundles);
+
+ void End();
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_RenderPassEncoder_H_
diff --git a/dom/webgpu/RenderPipeline.cpp b/dom/webgpu/RenderPipeline.cpp
new file mode 100644
index 0000000000..78e13d31ef
--- /dev/null
+++ b/dom/webgpu/RenderPipeline.cpp
@@ -0,0 +1,58 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "RenderPipeline.h"
+
+#include "Device.h"
+#include "ipc/WebGPUChild.h"
+#include "mozilla/dom/WebGPUBinding.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(RenderPipeline, mParent)
+GPU_IMPL_JS_WRAP(RenderPipeline)
+
+RenderPipeline::RenderPipeline(Device* const aParent, RawId aId,
+ RawId aImplicitPipelineLayoutId,
+ nsTArray<RawId>&& aImplicitBindGroupLayoutIds)
+ : ChildOf(aParent),
+ mImplicitPipelineLayoutId(aImplicitPipelineLayoutId),
+ mImplicitBindGroupLayoutIds(std::move(aImplicitBindGroupLayoutIds)),
+ mId(aId) {
+ MOZ_RELEASE_ASSERT(aId);
+}
+
+RenderPipeline::~RenderPipeline() { Cleanup(); }
+
+void RenderPipeline::Cleanup() {
+ if (mValid && mParent) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendRenderPipelineDrop(mId);
+ if (mImplicitPipelineLayoutId) {
+ bridge->SendImplicitLayoutDrop(mImplicitPipelineLayoutId,
+ mImplicitBindGroupLayoutIds);
+ }
+ }
+ }
+}
+
+already_AddRefed<BindGroupLayout> RenderPipeline::GetBindGroupLayout(
+ uint32_t aIndex) const {
+ auto bridge = mParent->GetBridge();
+ auto* client = bridge->GetClient();
+
+ ipc::ByteBuf bb;
+ const RawId bglId = ffi::wgpu_client_render_pipeline_get_bind_group_layout(
+ client, mId, aIndex, ToFFI(&bb));
+
+ bridge->SendDeviceAction(mParent->GetId(), std::move(bb));
+
+ RefPtr<BindGroupLayout> object = new BindGroupLayout(mParent, bglId, false);
+ return object.forget();
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/RenderPipeline.h b/dom/webgpu/RenderPipeline.h
new file mode 100644
index 0000000000..859259da27
--- /dev/null
+++ b/dom/webgpu/RenderPipeline.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_RenderPipeline_H_
+#define GPU_RenderPipeline_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+#include "nsTArray.h"
+
+namespace mozilla::webgpu {
+
+class BindGroupLayout;
+class Device;
+
+class RenderPipeline final : public ObjectBase, public ChildOf<Device> {
+ const RawId mImplicitPipelineLayoutId;
+ const nsTArray<RawId> mImplicitBindGroupLayoutIds;
+
+ public:
+ GPU_DECL_CYCLE_COLLECTION(RenderPipeline)
+ GPU_DECL_JS_WRAP(RenderPipeline)
+
+ const RawId mId;
+
+ RenderPipeline(Device* const aParent, RawId aId,
+ RawId aImplicitPipelineLayoutId,
+ nsTArray<RawId>&& aImplicitBindGroupLayoutIds);
+ already_AddRefed<BindGroupLayout> GetBindGroupLayout(uint32_t index) const;
+
+ private:
+ virtual ~RenderPipeline();
+ void Cleanup();
+};
+
+} // namespace mozilla::webgpu
+
+#endif // GPU_RenderPipeline_H_
diff --git a/dom/webgpu/Sampler.cpp b/dom/webgpu/Sampler.cpp
new file mode 100644
index 0000000000..6ee5f3c41f
--- /dev/null
+++ b/dom/webgpu/Sampler.cpp
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "Sampler.h"
+#include "ipc/WebGPUChild.h"
+
+#include "Device.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(Sampler, mParent)
+GPU_IMPL_JS_WRAP(Sampler)
+
+Sampler::Sampler(Device* const aParent, RawId aId)
+ : ChildOf(aParent), mId(aId) {
+ MOZ_RELEASE_ASSERT(aId);
+}
+
+Sampler::~Sampler() { Cleanup(); }
+
+void Sampler::Cleanup() {
+ if (mValid && mParent) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendSamplerDrop(mId);
+ }
+ }
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/Sampler.h b/dom/webgpu/Sampler.h
new file mode 100644
index 0000000000..02e01982cd
--- /dev/null
+++ b/dom/webgpu/Sampler.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_SAMPLER_H_
+#define GPU_SAMPLER_H_
+
+#include "mozilla/webgpu/WebGPUTypes.h"
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+
+namespace mozilla::webgpu {
+
+class Device;
+
+class Sampler final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(Sampler)
+ GPU_DECL_JS_WRAP(Sampler)
+
+ Sampler(Device* const aParent, RawId aId);
+
+ const RawId mId;
+
+ private:
+ virtual ~Sampler();
+ void Cleanup();
+};
+
+} // namespace mozilla::webgpu
+
+#endif // GPU_SAMPLER_H_
diff --git a/dom/webgpu/ShaderModule.cpp b/dom/webgpu/ShaderModule.cpp
new file mode 100644
index 0000000000..bd123c8423
--- /dev/null
+++ b/dom/webgpu/ShaderModule.cpp
@@ -0,0 +1,47 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/dom/WebGPUBinding.h"
+#include "mozilla/dom/Promise.h"
+#include "ShaderModule.h"
+#include "CompilationInfo.h"
+#include "ipc/WebGPUChild.h"
+
+#include "Device.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(ShaderModule, mParent, mCompilationInfo)
+GPU_IMPL_JS_WRAP(ShaderModule)
+
+ShaderModule::ShaderModule(Device* const aParent, RawId aId,
+ const RefPtr<dom::Promise>& aCompilationInfo)
+ : ChildOf(aParent), mId(aId), mCompilationInfo(aCompilationInfo) {
+ MOZ_RELEASE_ASSERT(aId);
+}
+
+ShaderModule::~ShaderModule() { Cleanup(); }
+
+void ShaderModule::Cleanup() {
+ if (mValid && mParent) {
+ mValid = false;
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendShaderModuleDrop(mId);
+ }
+ }
+}
+
+already_AddRefed<dom::Promise> ShaderModule::CompilationInfo(ErrorResult& aRv) {
+ return GetCompilationInfo(aRv);
+}
+
+already_AddRefed<dom::Promise> ShaderModule::GetCompilationInfo(
+ ErrorResult& aRv) {
+ RefPtr<dom::Promise> tmp = mCompilationInfo;
+ return tmp.forget();
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/ShaderModule.h b/dom/webgpu/ShaderModule.h
new file mode 100644
index 0000000000..9c20c71c7d
--- /dev/null
+++ b/dom/webgpu/ShaderModule.h
@@ -0,0 +1,39 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_ShaderModule_H_
+#define GPU_ShaderModule_H_
+
+#include "mozilla/webgpu/WebGPUTypes.h"
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+
+namespace mozilla::webgpu {
+
+class CompilationInfo;
+class Device;
+
+class ShaderModule final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(ShaderModule)
+ GPU_DECL_JS_WRAP(ShaderModule)
+
+ ShaderModule(Device* const aParent, RawId aId,
+ const RefPtr<dom::Promise>& aCompilationInfo);
+ already_AddRefed<dom::Promise> CompilationInfo(ErrorResult& aRv);
+ already_AddRefed<dom::Promise> GetCompilationInfo(ErrorResult& aRv);
+
+ const RawId mId;
+
+ private:
+ virtual ~ShaderModule();
+ void Cleanup();
+
+ RefPtr<dom::Promise> mCompilationInfo;
+};
+
+} // namespace mozilla::webgpu
+
+#endif // GPU_ShaderModule_H_
diff --git a/dom/webgpu/SupportedFeatures.cpp b/dom/webgpu/SupportedFeatures.cpp
new file mode 100644
index 0000000000..294524bc81
--- /dev/null
+++ b/dom/webgpu/SupportedFeatures.cpp
@@ -0,0 +1,27 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "SupportedFeatures.h"
+#include "Adapter.h"
+#include "mozilla/dom/WebGPUBinding.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(SupportedFeatures, mParent)
+GPU_IMPL_JS_WRAP(SupportedFeatures)
+
+SupportedFeatures::SupportedFeatures(Adapter* const aParent)
+ : ChildOf(aParent) {}
+
+void SupportedFeatures::Add(const dom::GPUFeatureName aFeature,
+ ErrorResult& aRv) {
+ const auto u8 = dom::GPUFeatureNameValues::GetString(aFeature);
+ const auto u16 = NS_ConvertUTF8toUTF16(u8);
+ dom::GPUSupportedFeatures_Binding::SetlikeHelpers::Add(this, u16, aRv);
+
+ mFeatures.insert(aFeature);
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/SupportedFeatures.h b/dom/webgpu/SupportedFeatures.h
new file mode 100644
index 0000000000..b60c554f6c
--- /dev/null
+++ b/dom/webgpu/SupportedFeatures.h
@@ -0,0 +1,43 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_SupportedFeatures_H_
+#define GPU_SupportedFeatures_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+
+#include <unordered_set>
+
+namespace mozilla {
+class ErrorResult;
+namespace dom {
+enum class GPUFeatureName : uint8_t;
+} // namespace dom
+} // namespace mozilla
+
+namespace mozilla::webgpu {
+class Adapter;
+
+class SupportedFeatures final : public nsWrapperCache, public ChildOf<Adapter> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(SupportedFeatures)
+ GPU_DECL_JS_WRAP(SupportedFeatures)
+
+ explicit SupportedFeatures(Adapter* const aParent);
+
+ void Add(dom::GPUFeatureName, ErrorResult&);
+ const auto& Features() const { return mFeatures; }
+
+ private:
+ ~SupportedFeatures() = default;
+ void Cleanup() {}
+
+ std::unordered_set<dom::GPUFeatureName> mFeatures;
+};
+
+} // namespace mozilla::webgpu
+
+#endif // GPU_SupportedFeatures_H_
diff --git a/dom/webgpu/SupportedLimits.cpp b/dom/webgpu/SupportedLimits.cpp
new file mode 100644
index 0000000000..4a99affb05
--- /dev/null
+++ b/dom/webgpu/SupportedLimits.cpp
@@ -0,0 +1,201 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "SupportedLimits.h"
+#include "Adapter.h"
+#include "mozilla/dom/WebGPUBinding.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(SupportedLimits, mParent)
+GPU_IMPL_JS_WRAP(SupportedLimits)
+
+SupportedLimits::SupportedLimits(Adapter* const aParent,
+ const ffi::WGPULimits& aLimits)
+ : ChildOf(aParent), mFfi(std::make_unique<ffi::WGPULimits>(aLimits)) {}
+
+SupportedLimits::~SupportedLimits() = default;
+
+uint64_t GetLimit(const ffi::WGPULimits& limits, const Limit limit) {
+ switch (limit) {
+ case Limit::MaxTextureDimension1D:
+ return limits.max_texture_dimension_1d;
+ case Limit::MaxTextureDimension2D:
+ return limits.max_texture_dimension_2d;
+ case Limit::MaxTextureDimension3D:
+ return limits.max_texture_dimension_3d;
+ case Limit::MaxTextureArrayLayers:
+ return limits.max_texture_array_layers;
+ case Limit::MaxBindGroups:
+ return limits.max_bind_groups;
+ case Limit::MaxBindGroupsPlusVertexBuffers:
+ // Not in ffi::WGPULimits, so synthesize:
+ return GetLimit(limits, Limit::MaxBindGroups) +
+ GetLimit(limits, Limit::MaxVertexBuffers);
+ case Limit::MaxBindingsPerBindGroup:
+ return limits.max_bindings_per_bind_group;
+ case Limit::MaxDynamicUniformBuffersPerPipelineLayout:
+ return limits.max_dynamic_uniform_buffers_per_pipeline_layout;
+ case Limit::MaxDynamicStorageBuffersPerPipelineLayout:
+ return limits.max_dynamic_storage_buffers_per_pipeline_layout;
+ case Limit::MaxSampledTexturesPerShaderStage:
+ return limits.max_sampled_textures_per_shader_stage;
+ case Limit::MaxSamplersPerShaderStage:
+ return limits.max_samplers_per_shader_stage;
+ case Limit::MaxStorageBuffersPerShaderStage:
+ return limits.max_storage_buffers_per_shader_stage;
+ case Limit::MaxStorageTexturesPerShaderStage:
+ return limits.max_storage_textures_per_shader_stage;
+ case Limit::MaxUniformBuffersPerShaderStage:
+ return limits.max_uniform_buffers_per_shader_stage;
+ case Limit::MaxUniformBufferBindingSize:
+ return limits.max_uniform_buffer_binding_size;
+ case Limit::MaxStorageBufferBindingSize:
+ return limits.max_storage_buffer_binding_size;
+ case Limit::MinUniformBufferOffsetAlignment:
+ return limits.min_uniform_buffer_offset_alignment;
+ case Limit::MinStorageBufferOffsetAlignment:
+ return limits.min_storage_buffer_offset_alignment;
+ case Limit::MaxVertexBuffers:
+ return limits.max_vertex_buffers;
+ case Limit::MaxBufferSize:
+ return limits.max_buffer_size;
+ case Limit::MaxVertexAttributes:
+ return limits.max_vertex_attributes;
+ case Limit::MaxVertexBufferArrayStride:
+ return limits.max_vertex_buffer_array_stride;
+ case Limit::MaxInterStageShaderComponents:
+ return limits.max_inter_stage_shader_components;
+ case Limit::MaxInterStageShaderVariables:
+ return 16; // From the spec. (not in ffi::WGPULimits)
+ case Limit::MaxColorAttachments:
+ return 8; // From the spec. (not in ffi::WGPULimits)
+ case Limit::MaxColorAttachmentBytesPerSample:
+ return 32; // From the spec. (not in ffi::WGPULimits)
+ case Limit::MaxComputeWorkgroupStorageSize:
+ return limits.max_compute_workgroup_storage_size;
+ case Limit::MaxComputeInvocationsPerWorkgroup:
+ return limits.max_compute_invocations_per_workgroup;
+ case Limit::MaxComputeWorkgroupSizeX:
+ return limits.max_compute_workgroup_size_x;
+ case Limit::MaxComputeWorkgroupSizeY:
+ return limits.max_compute_workgroup_size_y;
+ case Limit::MaxComputeWorkgroupSizeZ:
+ return limits.max_compute_workgroup_size_z;
+ case Limit::MaxComputeWorkgroupsPerDimension:
+ return limits.max_compute_workgroups_per_dimension;
+ }
+ MOZ_CRASH("Bad Limit");
+}
+
+void SetLimit(ffi::WGPULimits* const limits, const Limit limit,
+ const double val) {
+ const auto autoVal = LazyAssertedCast(static_cast<uint64_t>(val));
+ switch (limit) {
+ case Limit::MaxTextureDimension1D:
+ limits->max_texture_dimension_1d = autoVal;
+ return;
+ case Limit::MaxTextureDimension2D:
+ limits->max_texture_dimension_2d = autoVal;
+ return;
+ case Limit::MaxTextureDimension3D:
+ limits->max_texture_dimension_3d = autoVal;
+ return;
+ case Limit::MaxTextureArrayLayers:
+ limits->max_texture_array_layers = autoVal;
+ return;
+ case Limit::MaxBindGroups:
+ limits->max_bind_groups = autoVal;
+ return;
+ case Limit::MaxBindGroupsPlusVertexBuffers:
+ // Not in ffi::WGPULimits, and we're allowed to give back better
+ // limits than requested.
+ return;
+ case Limit::MaxBindingsPerBindGroup:
+ limits->max_bindings_per_bind_group = autoVal;
+ return;
+ case Limit::MaxDynamicUniformBuffersPerPipelineLayout:
+ limits->max_dynamic_uniform_buffers_per_pipeline_layout = autoVal;
+ return;
+ case Limit::MaxDynamicStorageBuffersPerPipelineLayout:
+ limits->max_dynamic_storage_buffers_per_pipeline_layout = autoVal;
+ return;
+ case Limit::MaxSampledTexturesPerShaderStage:
+ limits->max_sampled_textures_per_shader_stage = autoVal;
+ return;
+ case Limit::MaxSamplersPerShaderStage:
+ limits->max_samplers_per_shader_stage = autoVal;
+ return;
+ case Limit::MaxStorageBuffersPerShaderStage:
+ limits->max_storage_buffers_per_shader_stage = autoVal;
+ return;
+ case Limit::MaxStorageTexturesPerShaderStage:
+ limits->max_storage_textures_per_shader_stage = autoVal;
+ return;
+ case Limit::MaxUniformBuffersPerShaderStage:
+ limits->max_uniform_buffers_per_shader_stage = autoVal;
+ return;
+ case Limit::MaxUniformBufferBindingSize:
+ limits->max_uniform_buffer_binding_size = autoVal;
+ return;
+ case Limit::MaxStorageBufferBindingSize:
+ limits->max_storage_buffer_binding_size = autoVal;
+ return;
+ case Limit::MinUniformBufferOffsetAlignment:
+ limits->min_uniform_buffer_offset_alignment = autoVal;
+ return;
+ case Limit::MinStorageBufferOffsetAlignment:
+ limits->min_storage_buffer_offset_alignment = autoVal;
+ return;
+ case Limit::MaxVertexBuffers:
+ limits->max_vertex_buffers = autoVal;
+ return;
+ case Limit::MaxBufferSize:
+ limits->max_buffer_size = autoVal;
+ return;
+ case Limit::MaxVertexAttributes:
+ limits->max_vertex_attributes = autoVal;
+ return;
+ case Limit::MaxVertexBufferArrayStride:
+ limits->max_vertex_buffer_array_stride = autoVal;
+ return;
+ case Limit::MaxInterStageShaderComponents:
+ limits->max_inter_stage_shader_components = autoVal;
+ return;
+ case Limit::MaxInterStageShaderVariables:
+ // Not in ffi::WGPULimits, and we're allowed to give back better
+ // limits than requested.
+ return;
+ case Limit::MaxColorAttachments:
+ // Not in ffi::WGPULimits, and we're allowed to give back better
+ // limits than requested.
+ return;
+ case Limit::MaxColorAttachmentBytesPerSample:
+ // Not in ffi::WGPULimits, and we're allowed to give back better
+ // limits than requested.
+ return;
+ case Limit::MaxComputeWorkgroupStorageSize:
+ limits->max_compute_workgroup_storage_size = autoVal;
+ return;
+ case Limit::MaxComputeInvocationsPerWorkgroup:
+ limits->max_compute_invocations_per_workgroup = autoVal;
+ return;
+ case Limit::MaxComputeWorkgroupSizeX:
+ limits->max_compute_workgroup_size_x = autoVal;
+ return;
+ case Limit::MaxComputeWorkgroupSizeY:
+ limits->max_compute_workgroup_size_y = autoVal;
+ return;
+ case Limit::MaxComputeWorkgroupSizeZ:
+ limits->max_compute_workgroup_size_z = autoVal;
+ return;
+ case Limit::MaxComputeWorkgroupsPerDimension:
+ limits->max_compute_workgroups_per_dimension = autoVal;
+ return;
+ }
+ MOZ_CRASH("Bad Limit");
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/SupportedLimits.h b/dom/webgpu/SupportedLimits.h
new file mode 100644
index 0000000000..934feb9e2e
--- /dev/null
+++ b/dom/webgpu/SupportedLimits.h
@@ -0,0 +1,113 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_SupportedLimits_H_
+#define GPU_SupportedLimits_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+
+#include <memory>
+
+namespace mozilla::webgpu {
+namespace ffi {
+struct WGPULimits;
+}
+class Adapter;
+
+enum class Limit : uint8_t {
+ MaxTextureDimension1D,
+ MaxTextureDimension2D,
+ MaxTextureDimension3D,
+ MaxTextureArrayLayers,
+ MaxBindGroups,
+ MaxBindGroupsPlusVertexBuffers,
+ MaxBindingsPerBindGroup,
+ MaxDynamicUniformBuffersPerPipelineLayout,
+ MaxDynamicStorageBuffersPerPipelineLayout,
+ MaxSampledTexturesPerShaderStage,
+ MaxSamplersPerShaderStage,
+ MaxStorageBuffersPerShaderStage,
+ MaxStorageTexturesPerShaderStage,
+ MaxUniformBuffersPerShaderStage,
+ MaxUniformBufferBindingSize,
+ MaxStorageBufferBindingSize,
+ MinUniformBufferOffsetAlignment,
+ MinStorageBufferOffsetAlignment,
+ MaxVertexBuffers,
+ MaxBufferSize,
+ MaxVertexAttributes,
+ MaxVertexBufferArrayStride,
+ MaxInterStageShaderComponents,
+ MaxInterStageShaderVariables,
+ MaxColorAttachments,
+ MaxColorAttachmentBytesPerSample,
+ MaxComputeWorkgroupStorageSize,
+ MaxComputeInvocationsPerWorkgroup,
+ MaxComputeWorkgroupSizeX,
+ MaxComputeWorkgroupSizeY,
+ MaxComputeWorkgroupSizeZ,
+ MaxComputeWorkgroupsPerDimension,
+ _LAST = MaxComputeWorkgroupsPerDimension,
+};
+
+uint64_t GetLimit(const ffi::WGPULimits&, Limit);
+void SetLimit(ffi::WGPULimits*, Limit, double);
+
+class SupportedLimits final : public nsWrapperCache, public ChildOf<Adapter> {
+ public:
+ const std::unique_ptr<ffi::WGPULimits> mFfi;
+
+ GPU_DECL_CYCLE_COLLECTION(SupportedLimits)
+ GPU_DECL_JS_WRAP(SupportedLimits)
+
+#define _(X) \
+ auto X() const { return GetLimit(*mFfi, Limit::X); }
+
+ _(MaxTextureDimension1D)
+ _(MaxTextureDimension2D)
+ _(MaxTextureDimension3D)
+ _(MaxTextureArrayLayers)
+ _(MaxBindGroups)
+ _(MaxBindGroupsPlusVertexBuffers)
+ _(MaxBindingsPerBindGroup)
+ _(MaxDynamicUniformBuffersPerPipelineLayout)
+ _(MaxDynamicStorageBuffersPerPipelineLayout)
+ _(MaxSampledTexturesPerShaderStage)
+ _(MaxSamplersPerShaderStage)
+ _(MaxStorageBuffersPerShaderStage)
+ _(MaxStorageTexturesPerShaderStage)
+ _(MaxUniformBuffersPerShaderStage)
+ _(MaxUniformBufferBindingSize)
+ _(MaxStorageBufferBindingSize)
+ _(MinUniformBufferOffsetAlignment)
+ _(MinStorageBufferOffsetAlignment)
+ _(MaxVertexBuffers)
+ _(MaxBufferSize)
+ _(MaxVertexAttributes)
+ _(MaxVertexBufferArrayStride)
+ _(MaxInterStageShaderComponents)
+ _(MaxInterStageShaderVariables)
+ _(MaxColorAttachments)
+ _(MaxColorAttachmentBytesPerSample)
+ _(MaxComputeWorkgroupStorageSize)
+ _(MaxComputeInvocationsPerWorkgroup)
+ _(MaxComputeWorkgroupSizeX)
+ _(MaxComputeWorkgroupSizeY)
+ _(MaxComputeWorkgroupSizeZ)
+ _(MaxComputeWorkgroupsPerDimension)
+
+#undef _
+
+ SupportedLimits(Adapter* const aParent, const ffi::WGPULimits&);
+
+ private:
+ ~SupportedLimits();
+ void Cleanup() {}
+};
+
+} // namespace mozilla::webgpu
+
+#endif // GPU_SupportedLimits_H_
diff --git a/dom/webgpu/Texture.cpp b/dom/webgpu/Texture.cpp
new file mode 100644
index 0000000000..c7bc406118
--- /dev/null
+++ b/dom/webgpu/Texture.cpp
@@ -0,0 +1,121 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "Texture.h"
+
+#include "ipc/WebGPUChild.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+#include "mozilla/webgpu/CanvasContext.h"
+#include "mozilla/dom/WebGPUBinding.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+#include "TextureView.h"
+#include "Utility.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(Texture, mParent)
+GPU_IMPL_JS_WRAP(Texture)
+
+static Maybe<uint8_t> GetBytesPerBlockSingleAspect(
+ dom::GPUTextureFormat aFormat) {
+ auto format = ConvertTextureFormat(aFormat);
+ uint32_t bytes = ffi::wgpu_texture_format_block_size_single_aspect(format);
+ if (bytes == 0) {
+ // The above function returns zero if the texture has multiple aspects like
+ // depth and stencil.
+ return Nothing();
+ }
+
+ return Some((uint8_t)bytes);
+}
+
+Texture::Texture(Device* const aParent, RawId aId,
+ const dom::GPUTextureDescriptor& aDesc)
+ : ChildOf(aParent),
+ mId(aId),
+ mFormat(aDesc.mFormat),
+ mBytesPerBlock(GetBytesPerBlockSingleAspect(aDesc.mFormat)),
+ mSize(ConvertExtent(aDesc.mSize)),
+ mMipLevelCount(aDesc.mMipLevelCount),
+ mSampleCount(aDesc.mSampleCount),
+ mDimension(aDesc.mDimension),
+ mUsage(aDesc.mUsage) {
+ MOZ_RELEASE_ASSERT(aId);
+}
+
+void Texture::Cleanup() {
+ if (!mParent) {
+ return;
+ }
+
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendTextureDrop(mId);
+ }
+
+ // After cleanup is called, no other method should ever be called on the
+ // object so we don't have to null-check mParent in other places.
+ // This serves the purpose of preventing SendTextureDrop from happening
+ // twice. TODO: Does it matter for breaking cycles too? Cleanup is called
+ // by the macros that deal with cycle colleciton.
+ mParent = nullptr;
+}
+
+Texture::~Texture() { Cleanup(); }
+
+already_AddRefed<TextureView> Texture::CreateView(
+ const dom::GPUTextureViewDescriptor& aDesc) {
+ auto bridge = mParent->GetBridge();
+
+ ffi::WGPUTextureViewDescriptor desc = {};
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+
+ ffi::WGPUTextureFormat format = {ffi::WGPUTextureFormat_Sentinel};
+ if (aDesc.mFormat.WasPassed()) {
+ format = ConvertTextureFormat(aDesc.mFormat.Value());
+ desc.format = &format;
+ }
+ ffi::WGPUTextureViewDimension dimension =
+ ffi::WGPUTextureViewDimension_Sentinel;
+ if (aDesc.mDimension.WasPassed()) {
+ dimension = ffi::WGPUTextureViewDimension(aDesc.mDimension.Value());
+ desc.dimension = &dimension;
+ }
+
+ // Ideally we'd just do something like "aDesc.mMipLevelCount.ptrOr(nullptr)"
+ // but dom::Optional does not seem to have very many nice things.
+ uint32_t mipCount =
+ aDesc.mMipLevelCount.WasPassed() ? aDesc.mMipLevelCount.Value() : 0;
+ uint32_t layerCount =
+ aDesc.mArrayLayerCount.WasPassed() ? aDesc.mArrayLayerCount.Value() : 0;
+
+ desc.aspect = ffi::WGPUTextureAspect(aDesc.mAspect);
+ desc.base_mip_level = aDesc.mBaseMipLevel;
+ desc.mip_level_count = aDesc.mMipLevelCount.WasPassed() ? &mipCount : nullptr;
+ desc.base_array_layer = aDesc.mBaseArrayLayer;
+ desc.array_layer_count =
+ aDesc.mArrayLayerCount.WasPassed() ? &layerCount : nullptr;
+
+ ipc::ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_texture_view(bridge->GetClient(), mId,
+ &desc, ToFFI(&bb));
+ if (bridge->CanSend()) {
+ bridge->SendTextureAction(mId, mParent->mId, std::move(bb));
+ }
+
+ RefPtr<TextureView> view = new TextureView(this, id);
+ return view.forget();
+}
+
+void Texture::Destroy() {
+ auto bridge = mParent->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendTextureDestroy(mId, mParent->GetId());
+ }
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/Texture.h b/dom/webgpu/Texture.h
new file mode 100644
index 0000000000..e31878f825
--- /dev/null
+++ b/dom/webgpu/Texture.h
@@ -0,0 +1,73 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_Texture_H_
+#define GPU_Texture_H_
+
+#include <cstdint>
+#include "mozilla/WeakPtr.h"
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+
+namespace mozilla {
+namespace dom {
+struct GPUTextureDescriptor;
+struct GPUTextureViewDescriptor;
+enum class GPUTextureDimension : uint8_t;
+enum class GPUTextureFormat : uint8_t;
+enum class GPUTextureUsageFlags : uint32_t;
+} // namespace dom
+
+namespace webgpu {
+
+class CanvasContext;
+class Device;
+class TextureView;
+
+class Texture final : public ObjectBase, public ChildOf<Device> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(Texture)
+ GPU_DECL_JS_WRAP(Texture)
+
+ Texture(Device* const aParent, RawId aId,
+ const dom::GPUTextureDescriptor& aDesc);
+ Device* GetParentDevice() { return mParent; }
+ const RawId mId;
+ const dom::GPUTextureFormat mFormat;
+ const Maybe<uint8_t> mBytesPerBlock;
+
+ WeakPtr<CanvasContext> mTargetContext;
+
+ private:
+ virtual ~Texture();
+ void Cleanup();
+
+ const ffi::WGPUExtent3d mSize;
+ const uint32_t mMipLevelCount;
+ const uint32_t mSampleCount;
+ const dom::GPUTextureDimension mDimension;
+ const uint32_t mUsage;
+
+ public:
+ already_AddRefed<TextureView> CreateView(
+ const dom::GPUTextureViewDescriptor& aDesc);
+ void Destroy();
+
+ uint32_t Width() const { return mSize.width; }
+ uint32_t Height() const { return mSize.height; }
+ uint32_t DepthOrArrayLayers() const { return mSize.depth_or_array_layers; }
+ uint32_t MipLevelCount() const { return mMipLevelCount; }
+ uint32_t SampleCount() const { return mSampleCount; }
+ dom::GPUTextureDimension Dimension() const { return mDimension; }
+ dom::GPUTextureFormat Format() const { return mFormat; }
+ uint32_t Usage() const { return mUsage; }
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_Texture_H_
diff --git a/dom/webgpu/TextureView.cpp b/dom/webgpu/TextureView.cpp
new file mode 100644
index 0000000000..c36818e9ea
--- /dev/null
+++ b/dom/webgpu/TextureView.cpp
@@ -0,0 +1,39 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "TextureView.h"
+
+#include "Device.h"
+#include "mozilla/dom/WebGPUBinding.h"
+#include "mozilla/webgpu/CanvasContext.h"
+#include "ipc/WebGPUChild.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_CYCLE_COLLECTION(TextureView, mParent)
+GPU_IMPL_JS_WRAP(TextureView)
+
+TextureView::TextureView(Texture* const aParent, RawId aId)
+ : ChildOf(aParent), mId(aId) {
+ MOZ_RELEASE_ASSERT(aId);
+}
+
+TextureView::~TextureView() { Cleanup(); }
+
+CanvasContext* TextureView::GetTargetContext() const {
+ return mParent->mTargetContext;
+} // namespace webgpu
+
+void TextureView::Cleanup() {
+ if (mValid && mParent && mParent->GetParentDevice()) {
+ mValid = false;
+ auto bridge = mParent->GetParentDevice()->GetBridge();
+ if (bridge && bridge->IsOpen()) {
+ bridge->SendTextureViewDrop(mId);
+ }
+ }
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/TextureView.h b/dom/webgpu/TextureView.h
new file mode 100644
index 0000000000..a0c69c106b
--- /dev/null
+++ b/dom/webgpu/TextureView.h
@@ -0,0 +1,35 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_TextureView_H_
+#define GPU_TextureView_H_
+
+#include "nsWrapperCache.h"
+#include "ObjectModel.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+
+namespace mozilla::webgpu {
+
+class CanvasContext;
+class Texture;
+
+class TextureView final : public ObjectBase, public ChildOf<Texture> {
+ public:
+ GPU_DECL_CYCLE_COLLECTION(TextureView)
+ GPU_DECL_JS_WRAP(TextureView)
+
+ TextureView(Texture* const aParent, RawId aId);
+ CanvasContext* GetTargetContext() const;
+
+ const RawId mId;
+
+ private:
+ virtual ~TextureView();
+ void Cleanup();
+};
+
+} // namespace mozilla::webgpu
+
+#endif // GPU_TextureView_H_
diff --git a/dom/webgpu/Utility.cpp b/dom/webgpu/Utility.cpp
new file mode 100644
index 0000000000..fdb5732e8a
--- /dev/null
+++ b/dom/webgpu/Utility.cpp
@@ -0,0 +1,287 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "Utility.h"
+#include "mozilla/dom/WebGPUBinding.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+
+namespace mozilla::webgpu {
+
+template <typename E>
+void ConvertToExtent3D(const E& aExtent, ffi::WGPUExtent3d* aExtentFFI) {
+ *aExtentFFI = {};
+ if (aExtent.IsRangeEnforcedUnsignedLongSequence()) {
+ const auto& seq = aExtent.GetAsRangeEnforcedUnsignedLongSequence();
+ aExtentFFI->width = seq.Length() > 0 ? seq[0] : 0;
+ aExtentFFI->height = seq.Length() > 1 ? seq[1] : 1;
+ aExtentFFI->depth_or_array_layers = seq.Length() > 2 ? seq[2] : 1;
+ } else if (aExtent.IsGPUExtent3DDict()) {
+ const auto& dict = aExtent.GetAsGPUExtent3DDict();
+ aExtentFFI->width = dict.mWidth;
+ aExtentFFI->height = dict.mHeight;
+ aExtentFFI->depth_or_array_layers = dict.mDepthOrArrayLayers;
+ } else {
+ MOZ_CRASH("Unexpected extent type");
+ }
+}
+
+void ConvertExtent3DToFFI(const dom::GPUExtent3D& aExtent,
+ ffi::WGPUExtent3d* aExtentFFI) {
+ ConvertToExtent3D(aExtent, aExtentFFI);
+}
+
+void ConvertExtent3DToFFI(const dom::OwningGPUExtent3D& aExtent,
+ ffi::WGPUExtent3d* aExtentFFI) {
+ ConvertToExtent3D(aExtent, aExtentFFI);
+}
+
+ffi::WGPUExtent3d ConvertExtent(const dom::GPUExtent3D& aExtent) {
+ ffi::WGPUExtent3d extent = {};
+ ConvertToExtent3D(aExtent, &extent);
+ return extent;
+}
+
+ffi::WGPUExtent3d ConvertExtent(const dom::OwningGPUExtent3D& aExtent) {
+ ffi::WGPUExtent3d extent = {};
+ ConvertToExtent3D(aExtent, &extent);
+ return extent;
+}
+
+ffi::WGPUCompareFunction ConvertCompareFunction(
+ const dom::GPUCompareFunction& aCompare) {
+ // Value of 0 = Undefined is reserved on the C side for "null" semantics.
+ return ffi::WGPUCompareFunction(UnderlyingValue(aCompare) + 1);
+}
+
+ffi::WGPUTextureFormat ConvertTextureFormat(
+ const dom::GPUTextureFormat& aFormat) {
+ ffi::WGPUTextureFormat result = {ffi::WGPUTextureFormat_Sentinel};
+ switch (aFormat) {
+ case dom::GPUTextureFormat::R8unorm:
+ result.tag = ffi::WGPUTextureFormat_R8Unorm;
+ break;
+ case dom::GPUTextureFormat::R8snorm:
+ result.tag = ffi::WGPUTextureFormat_R8Snorm;
+ break;
+ case dom::GPUTextureFormat::R8uint:
+ result.tag = ffi::WGPUTextureFormat_R8Uint;
+ break;
+ case dom::GPUTextureFormat::R8sint:
+ result.tag = ffi::WGPUTextureFormat_R8Sint;
+ break;
+ case dom::GPUTextureFormat::R16uint:
+ result.tag = ffi::WGPUTextureFormat_R16Uint;
+ break;
+ case dom::GPUTextureFormat::R16sint:
+ result.tag = ffi::WGPUTextureFormat_R16Sint;
+ break;
+ case dom::GPUTextureFormat::R16float:
+ result.tag = ffi::WGPUTextureFormat_R16Float;
+ break;
+ case dom::GPUTextureFormat::Rg8unorm:
+ result.tag = ffi::WGPUTextureFormat_Rg8Unorm;
+ break;
+ case dom::GPUTextureFormat::Rg8snorm:
+ result.tag = ffi::WGPUTextureFormat_Rg8Snorm;
+ break;
+ case dom::GPUTextureFormat::Rg8uint:
+ result.tag = ffi::WGPUTextureFormat_Rg8Uint;
+ break;
+ case dom::GPUTextureFormat::Rg8sint:
+ result.tag = ffi::WGPUTextureFormat_Rg8Sint;
+ break;
+ case dom::GPUTextureFormat::R32uint:
+ result.tag = ffi::WGPUTextureFormat_R32Uint;
+ break;
+ case dom::GPUTextureFormat::R32sint:
+ result.tag = ffi::WGPUTextureFormat_R32Sint;
+ break;
+ case dom::GPUTextureFormat::R32float:
+ result.tag = ffi::WGPUTextureFormat_R32Float;
+ break;
+ case dom::GPUTextureFormat::Rg16uint:
+ result.tag = ffi::WGPUTextureFormat_Rg16Uint;
+ break;
+ case dom::GPUTextureFormat::Rg16sint:
+ result.tag = ffi::WGPUTextureFormat_Rg16Sint;
+ break;
+ case dom::GPUTextureFormat::Rg16float:
+ result.tag = ffi::WGPUTextureFormat_Rg16Float;
+ break;
+ case dom::GPUTextureFormat::Rgba8unorm:
+ result.tag = ffi::WGPUTextureFormat_Rgba8Unorm;
+ break;
+ case dom::GPUTextureFormat::Rgba8unorm_srgb:
+ result.tag = ffi::WGPUTextureFormat_Rgba8UnormSrgb;
+ break;
+ case dom::GPUTextureFormat::Rgba8snorm:
+ result.tag = ffi::WGPUTextureFormat_Rgba8Snorm;
+ break;
+ case dom::GPUTextureFormat::Rgba8uint:
+ result.tag = ffi::WGPUTextureFormat_Rgba8Uint;
+ break;
+ case dom::GPUTextureFormat::Rgba8sint:
+ result.tag = ffi::WGPUTextureFormat_Rgba8Sint;
+ break;
+ case dom::GPUTextureFormat::Bgra8unorm:
+ result.tag = ffi::WGPUTextureFormat_Bgra8Unorm;
+ break;
+ case dom::GPUTextureFormat::Bgra8unorm_srgb:
+ result.tag = ffi::WGPUTextureFormat_Bgra8UnormSrgb;
+ break;
+ case dom::GPUTextureFormat::Rgb9e5ufloat:
+ result.tag = ffi::WGPUTextureFormat_Rgb9e5Ufloat;
+ break;
+ case dom::GPUTextureFormat::Rgb10a2unorm:
+ result.tag = ffi::WGPUTextureFormat_Rgb10a2Unorm;
+ break;
+ case dom::GPUTextureFormat::Rg11b10ufloat:
+ result.tag = ffi::WGPUTextureFormat_Rg11b10Float;
+ break;
+ case dom::GPUTextureFormat::Rg32uint:
+ result.tag = ffi::WGPUTextureFormat_Rg32Uint;
+ break;
+ case dom::GPUTextureFormat::Rg32sint:
+ result.tag = ffi::WGPUTextureFormat_Rg32Sint;
+ break;
+ case dom::GPUTextureFormat::Rg32float:
+ result.tag = ffi::WGPUTextureFormat_Rg32Float;
+ break;
+ case dom::GPUTextureFormat::Rgba16uint:
+ result.tag = ffi::WGPUTextureFormat_Rgba16Uint;
+ break;
+ case dom::GPUTextureFormat::Rgba16sint:
+ result.tag = ffi::WGPUTextureFormat_Rgba16Sint;
+ break;
+ case dom::GPUTextureFormat::Rgba16float:
+ result.tag = ffi::WGPUTextureFormat_Rgba16Float;
+ break;
+ case dom::GPUTextureFormat::Rgba32uint:
+ result.tag = ffi::WGPUTextureFormat_Rgba32Uint;
+ break;
+ case dom::GPUTextureFormat::Rgba32sint:
+ result.tag = ffi::WGPUTextureFormat_Rgba32Sint;
+ break;
+ case dom::GPUTextureFormat::Rgba32float:
+ result.tag = ffi::WGPUTextureFormat_Rgba32Float;
+ break;
+ case dom::GPUTextureFormat::Depth32float:
+ result.tag = ffi::WGPUTextureFormat_Depth32Float;
+ break;
+ case dom::GPUTextureFormat::Bc1_rgba_unorm:
+ result.tag = ffi::WGPUTextureFormat_Bc1RgbaUnorm;
+ break;
+ case dom::GPUTextureFormat::Bc1_rgba_unorm_srgb:
+ result.tag = ffi::WGPUTextureFormat_Bc1RgbaUnormSrgb;
+ break;
+ case dom::GPUTextureFormat::Bc4_r_unorm:
+ result.tag = ffi::WGPUTextureFormat_Bc4RUnorm;
+ break;
+ case dom::GPUTextureFormat::Bc4_r_snorm:
+ result.tag = ffi::WGPUTextureFormat_Bc4RSnorm;
+ break;
+ case dom::GPUTextureFormat::Bc2_rgba_unorm:
+ result.tag = ffi::WGPUTextureFormat_Bc2RgbaUnorm;
+ break;
+ case dom::GPUTextureFormat::Bc2_rgba_unorm_srgb:
+ result.tag = ffi::WGPUTextureFormat_Bc2RgbaUnormSrgb;
+ break;
+ case dom::GPUTextureFormat::Bc3_rgba_unorm:
+ result.tag = ffi::WGPUTextureFormat_Bc3RgbaUnorm;
+ break;
+ case dom::GPUTextureFormat::Bc3_rgba_unorm_srgb:
+ result.tag = ffi::WGPUTextureFormat_Bc3RgbaUnormSrgb;
+ break;
+ case dom::GPUTextureFormat::Bc5_rg_unorm:
+ result.tag = ffi::WGPUTextureFormat_Bc5RgUnorm;
+ break;
+ case dom::GPUTextureFormat::Bc5_rg_snorm:
+ result.tag = ffi::WGPUTextureFormat_Bc5RgSnorm;
+ break;
+ case dom::GPUTextureFormat::Bc6h_rgb_ufloat:
+ result.tag = ffi::WGPUTextureFormat_Bc6hRgbUfloat;
+ break;
+ case dom::GPUTextureFormat::Bc6h_rgb_float:
+ result.tag = ffi::WGPUTextureFormat_Bc6hRgbFloat;
+ break;
+ case dom::GPUTextureFormat::Bc7_rgba_unorm:
+ result.tag = ffi::WGPUTextureFormat_Bc7RgbaUnorm;
+ break;
+ case dom::GPUTextureFormat::Bc7_rgba_unorm_srgb:
+ result.tag = ffi::WGPUTextureFormat_Bc7RgbaUnormSrgb;
+ break;
+ case dom::GPUTextureFormat::Stencil8:
+ result.tag = ffi::WGPUTextureFormat_Stencil8;
+ break;
+ case dom::GPUTextureFormat::Depth16unorm:
+ result.tag = ffi::WGPUTextureFormat_Depth16Unorm;
+ break;
+ case dom::GPUTextureFormat::Depth24plus:
+ result.tag = ffi::WGPUTextureFormat_Depth24Plus;
+ break;
+ case dom::GPUTextureFormat::Depth24plus_stencil8:
+ result.tag = ffi::WGPUTextureFormat_Depth24PlusStencil8;
+ break;
+ case dom::GPUTextureFormat::Depth32float_stencil8:
+ result.tag = ffi::WGPUTextureFormat_Depth32FloatStencil8;
+ break;
+ case dom::GPUTextureFormat::EndGuard_:
+ MOZ_ASSERT_UNREACHABLE();
+ }
+
+ // Clang will check for us that the switch above is exhaustive,
+ // but not if we add a 'default' case. So, check this here.
+ MOZ_ASSERT(result.tag != ffi::WGPUTextureFormat_Sentinel,
+ "unexpected texture format enum");
+
+ return result;
+}
+
+ffi::WGPUMultisampleState ConvertMultisampleState(
+ const dom::GPUMultisampleState& aDesc) {
+ ffi::WGPUMultisampleState desc = {};
+ desc.count = aDesc.mCount;
+ desc.mask = aDesc.mMask;
+ desc.alpha_to_coverage_enabled = aDesc.mAlphaToCoverageEnabled;
+ return desc;
+}
+
+ffi::WGPUBlendComponent ConvertBlendComponent(
+ const dom::GPUBlendComponent& aDesc) {
+ ffi::WGPUBlendComponent desc = {};
+ desc.src_factor = ffi::WGPUBlendFactor(aDesc.mSrcFactor);
+ desc.dst_factor = ffi::WGPUBlendFactor(aDesc.mDstFactor);
+ desc.operation = ffi::WGPUBlendOperation(aDesc.mOperation);
+ return desc;
+}
+
+ffi::WGPUStencilFaceState ConvertStencilFaceState(
+ const dom::GPUStencilFaceState& aDesc) {
+ ffi::WGPUStencilFaceState desc = {};
+ desc.compare = ConvertCompareFunction(aDesc.mCompare);
+ desc.fail_op = ffi::WGPUStencilOperation(aDesc.mFailOp);
+ desc.depth_fail_op = ffi::WGPUStencilOperation(aDesc.mDepthFailOp);
+ desc.pass_op = ffi::WGPUStencilOperation(aDesc.mPassOp);
+ return desc;
+}
+
+ffi::WGPUDepthStencilState ConvertDepthStencilState(
+ const dom::GPUDepthStencilState& aDesc) {
+ ffi::WGPUDepthStencilState desc = {};
+ desc.format = ConvertTextureFormat(aDesc.mFormat);
+ desc.depth_write_enabled = aDesc.mDepthWriteEnabled;
+ desc.depth_compare = ConvertCompareFunction(aDesc.mDepthCompare);
+ desc.stencil.front = ConvertStencilFaceState(aDesc.mStencilFront);
+ desc.stencil.back = ConvertStencilFaceState(aDesc.mStencilBack);
+ desc.stencil.read_mask = aDesc.mStencilReadMask;
+ desc.stencil.write_mask = aDesc.mStencilWriteMask;
+ desc.bias.constant = aDesc.mDepthBias;
+ desc.bias.slope_scale = aDesc.mDepthBiasSlopeScale;
+ desc.bias.clamp = aDesc.mDepthBiasClamp;
+ return desc;
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/Utility.h b/dom/webgpu/Utility.h
new file mode 100644
index 0000000000..a58faedc14
--- /dev/null
+++ b/dom/webgpu/Utility.h
@@ -0,0 +1,58 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_UTIL_H_
+#define GPU_UTIL_H_
+
+#include "mozilla/dom/WebGPUBinding.h"
+
+namespace mozilla {
+class ErrorResult;
+
+namespace dom {
+struct GPUComputePassDescriptor;
+template <typename T>
+class Sequence;
+using GPUExtent3D = RangeEnforcedUnsignedLongSequenceOrGPUExtent3DDict;
+using OwningGPUExtent3D =
+ OwningRangeEnforcedUnsignedLongSequenceOrGPUExtent3DDict;
+} // namespace dom
+namespace webgpu {
+namespace ffi {
+struct WGPUExtent3d;
+} // namespace ffi
+
+void ConvertExtent3DToFFI(const dom::GPUExtent3D& aExtent,
+ ffi::WGPUExtent3d* aExtentFFI);
+
+void ConvertExtent3DToFFI(const dom::OwningGPUExtent3D& aExtent,
+ ffi::WGPUExtent3d* aExtentFFI);
+
+ffi::WGPUExtent3d ConvertExtent(const dom::GPUExtent3D& aExtent);
+
+ffi::WGPUExtent3d ConvertExtent(const dom::OwningGPUExtent3D& aExtent);
+
+ffi::WGPUCompareFunction ConvertCompareFunction(
+ const dom::GPUCompareFunction& aCompare);
+
+ffi::WGPUTextureFormat ConvertTextureFormat(
+ const dom::GPUTextureFormat& aFormat);
+
+ffi::WGPUMultisampleState ConvertMultisampleState(
+ const dom::GPUMultisampleState& aDesc);
+
+ffi::WGPUBlendComponent ConvertBlendComponent(
+ const dom::GPUBlendComponent& aDesc);
+
+ffi::WGPUStencilFaceState ConvertStencilFaceState(
+ const dom::GPUStencilFaceState& aDesc);
+
+ffi::WGPUDepthStencilState ConvertDepthStencilState(
+ const dom::GPUDepthStencilState& aDesc);
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_UTIL_H_
diff --git a/dom/webgpu/ValidationError.cpp b/dom/webgpu/ValidationError.cpp
new file mode 100644
index 0000000000..3b3083c64f
--- /dev/null
+++ b/dom/webgpu/ValidationError.cpp
@@ -0,0 +1,21 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ValidationError.h"
+#include "mozilla/dom/WebGPUBinding.h"
+
+namespace mozilla::webgpu {
+
+GPU_IMPL_JS_WRAP(ValidationError)
+
+already_AddRefed<ValidationError> ValidationError::Constructor(
+ const dom::GlobalObject& aGlobal, const nsAString& aString,
+ ErrorResult& aRv) {
+ nsCOMPtr<nsIGlobalObject> global = do_QueryInterface(aGlobal.GetAsSupports());
+ MOZ_RELEASE_ASSERT(global);
+ return MakeAndAddRef<ValidationError>(global, aString);
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/ValidationError.h b/dom/webgpu/ValidationError.h
new file mode 100644
index 0000000000..d8e9db3a78
--- /dev/null
+++ b/dom/webgpu/ValidationError.h
@@ -0,0 +1,40 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GPU_ValidationError_H_
+#define GPU_ValidationError_H_
+
+#include "Error.h"
+
+namespace mozilla {
+class ErrorResult;
+namespace dom {
+class GlobalObject;
+} // namespace dom
+namespace webgpu {
+
+class ValidationError final : public Error {
+ public:
+ GPU_DECL_JS_WRAP(ValidationError)
+
+ ValidationError(nsIGlobalObject* const aGlobal, const nsAString& aMessage)
+ : Error(aGlobal, aMessage) {}
+
+ ValidationError(nsIGlobalObject* const aGlobal, const nsACString& aMessage)
+ : Error(aGlobal, aMessage) {}
+
+ private:
+ ~ValidationError() override = default;
+
+ public:
+ static already_AddRefed<ValidationError> Constructor(
+ const dom::GlobalObject& aGlobal, const nsAString& aString,
+ ErrorResult& aRv);
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // GPU_ValidationError_H_
diff --git a/dom/webgpu/crashtests/1809567.html b/dom/webgpu/crashtests/1809567.html
new file mode 100644
index 0000000000..7b922182eb
--- /dev/null
+++ b/dom/webgpu/crashtests/1809567.html
@@ -0,0 +1,72 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <body>
+ <script>
+ // The bulk of the test is wrapped in an async function because
+ // the WebGPU API returns promises of adapters and devices,
+ // which we would like to conveniently await.
+ async function orphan_webgpu_device() {
+ // Create an iframe in the same origin as this code.
+ let iframe = document.createElement('iframe');
+ document.body.appendChild(iframe);
+
+ // Define a function in that iframe that creates a WebGPU
+ // `GPUDevice`.
+ let script = iframe.contentDocument.createElement('script');
+ script.type = 'text/javascript';
+ script.text = `
+ async function create_device() {
+ // WebGPU is not yet available in beta or release.
+ if (!navigator.gpu) {
+ return null;
+ }
+
+ let adapter = await navigator.gpu.requestAdapter({ });
+ // Not all GPUs are capable of supporting WebGPU.
+ if (!adapter) {
+ return null;
+ }
+
+ return await adapter.requestDevice({ });
+ }
+ `;
+ iframe.contentDocument.body.appendChild(script);
+
+ // Call that function to create a `GPUDevice` in the iframe.
+ let device = await iframe.contentWindow.create_device();
+
+ // If we can't run WebGPU in this browser, then we can't reach the crash.
+ if (device) {
+ // Remove the iframe from our document. This closes its window.
+ iframe.remove();
+
+ try {
+ // When a Web API JavaScript object has had its parent window
+ // closed, C++ implementations of its WebIDL methods become unable
+ // to create JavaScript objects as usual: calling
+ // `EventTarget::GetParentObject` returns `nullptr`.
+ //
+ // Since we removed `iframe` from this document, the following
+ // call will fail trying to create a `Promise` of the module's
+ // `GPUCompilationInfo`.
+ device.createShaderModule({ code: '' });
+ } catch (error) {
+ // Eating errors indiscriminately wastes later developers' time.
+ if (error.name != "NS_ERROR_UNEXPECTED") {
+ throw error;
+ }
+ }
+ }
+ }
+
+ orphan_webgpu_device()
+ .catch((error) => {
+ console.log(error);
+ })
+ .finally(() => {
+ // End the crashtest.
+ document.documentElement.removeAttribute("class");
+ });
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/crashtests/crashtests.list b/dom/webgpu/crashtests/crashtests.list
new file mode 100644
index 0000000000..4f0094f2a3
--- /dev/null
+++ b/dom/webgpu/crashtests/crashtests.list
@@ -0,0 +1 @@
+load 1809567.html
diff --git a/dom/webgpu/ipc/PWebGPU.ipdl b/dom/webgpu/ipc/PWebGPU.ipdl
new file mode 100644
index 0000000000..5146dd6826
--- /dev/null
+++ b/dom/webgpu/ipc/PWebGPU.ipdl
@@ -0,0 +1,100 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: sw=2 ts=8 et :
+ */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+using mozilla::layers::RGBDescriptor from "mozilla/layers/LayersSurfaces.h";
+using mozilla::layers::RemoteTextureId from "mozilla/layers/LayersTypes.h";
+using mozilla::layers::RemoteTextureOwnerId from "mozilla/layers/LayersTypes.h";
+using mozilla::layers::RemoteTextureTxnType from "mozilla/layers/LayersTypes.h";
+using mozilla::layers::RemoteTextureTxnId from "mozilla/layers/LayersTypes.h";
+using mozilla::webgpu::RawId from "mozilla/webgpu/WebGPUTypes.h";
+using mozilla::dom::GPUErrorFilter from "mozilla/dom/WebGPUBinding.h";
+using mozilla::dom::GPURequestAdapterOptions from "mozilla/dom/WebGPUBinding.h";
+using mozilla::dom::GPUCommandBufferDescriptor from "mozilla/dom/WebGPUBinding.h";
+using mozilla::dom::GPUBufferDescriptor from "mozilla/dom/WebGPUBinding.h";
+using mozilla::webgpu::PopErrorScopeResult from "mozilla/webgpu/WebGPUTypes.h";
+using mozilla::webgpu::WebGPUCompilationMessage from "mozilla/webgpu/WebGPUTypes.h";
+[MoveOnly] using class mozilla::ipc::UnsafeSharedMemoryHandle from "mozilla/ipc/RawShmem.h";
+using struct mozilla::void_t from "mozilla/ipc/IPCCore.h";
+
+include "mozilla/ipc/ByteBufUtils.h";
+include "mozilla/layers/LayersMessageUtils.h";
+include "mozilla/webgpu/WebGPUSerialize.h";
+include "mozilla/layers/WebRenderMessageUtils.h";
+include protocol PCanvasManager;
+include PWebGPUTypes;
+
+namespace mozilla {
+namespace webgpu {
+
+/**
+ * Represents the connection between a WebGPUChild actor that issues WebGPU
+ * command from the content process, and a WebGPUParent in the compositor
+ * process that runs the commands.
+ */
+async protocol PWebGPU
+{
+ manager PCanvasManager;
+
+parent:
+ async DeviceAction(RawId selfId, ByteBuf buf);
+ async DeviceActionWithAck(RawId selfId, ByteBuf buf) returns (bool dummy);
+ async TextureAction(RawId selfId, RawId aDeviceId, ByteBuf buf);
+ async CommandEncoderAction(RawId selfId, RawId aDeviceId, ByteBuf buf);
+ async BumpImplicitBindGroupLayout(RawId pipelineId, bool isCompute, uint32_t index, RawId assignId);
+
+ async DeviceCreateBuffer(RawId deviceId, RawId bufferId, GPUBufferDescriptor desc, UnsafeSharedMemoryHandle shm);
+
+ async InstanceRequestAdapter(GPURequestAdapterOptions options, RawId[] ids) returns (ByteBuf byteBuf);
+ async AdapterRequestDevice(RawId selfId, ByteBuf buf, RawId newId) returns (bool success);
+ async AdapterDrop(RawId selfId);
+ // TODO: We want to return an array of compilation messages.
+ async DeviceCreateShaderModule(RawId selfId, RawId bufferId, nsString label, nsCString code) returns (WebGPUCompilationMessage[] messages);
+ async BufferMap(RawId deviceId, RawId bufferId, uint32_t aMode, uint64_t offset, uint64_t size) returns (BufferMapResult result);
+ async BufferUnmap(RawId deviceId, RawId bufferId, bool flush);
+ async BufferDestroy(RawId selfId);
+ async BufferDrop(RawId selfId);
+ async TextureDestroy(RawId selfId, RawId deviceId);
+ async TextureDrop(RawId selfId);
+ async TextureViewDrop(RawId selfId);
+ async SamplerDrop(RawId selfId);
+ async DeviceDestroy(RawId selfId);
+ async DeviceDrop(RawId selfId);
+
+ async CommandEncoderFinish(RawId selfId, RawId deviceId, GPUCommandBufferDescriptor desc);
+ async CommandEncoderDrop(RawId selfId);
+ async RenderBundleDrop(RawId selfId);
+ async QueueSubmit(RawId selfId, RawId aDeviceId, RawId[] commandBuffers, RawId[] textureIds);
+ async QueueOnSubmittedWorkDone(RawId selfId) returns (void_t ok);
+ async QueueWriteAction(RawId selfId, RawId aDeviceId, ByteBuf buf, UnsafeSharedMemoryHandle shmem);
+
+ async BindGroupLayoutDrop(RawId selfId);
+ async PipelineLayoutDrop(RawId selfId);
+ async BindGroupDrop(RawId selfId);
+ async ShaderModuleDrop(RawId selfId);
+ async ComputePipelineDrop(RawId selfId);
+ async RenderPipelineDrop(RawId selfId);
+ async ImplicitLayoutDrop(RawId implicitPlId, RawId[] implicitBglIds);
+ async DeviceCreateSwapChain(RawId selfId, RawId queueId, RGBDescriptor desc, RawId[] bufferIds, RemoteTextureOwnerId ownerId, bool useExternalTextureInSwapChain);
+ async SwapChainPresent(RawId textureId, RawId commandEncoderId, RemoteTextureId remoteTextureId, RemoteTextureOwnerId remoteTextureOwnerId);
+ async SwapChainDrop(RemoteTextureOwnerId ownerId, RemoteTextureTxnType txnType, RemoteTextureTxnId txnId);
+
+ async DevicePushErrorScope(RawId selfId, GPUErrorFilter aFilter);
+ async DevicePopErrorScope(RawId selfId) returns (PopErrorScopeResult result);
+
+ // Generate an error on the Device timeline for `deviceId`.
+ // The `message` parameter is interpreted as UTF-8.
+ async GenerateError(RawId? deviceId, GPUErrorFilter type, nsCString message);
+
+child:
+ async UncapturedError(RawId? aDeviceId, nsCString message);
+ async DropAction(ByteBuf buf);
+ async DeviceLost(RawId aDeviceId, uint8_t? reason, nsCString message);
+ async __delete__();
+};
+
+} // webgpu
+} // mozilla
diff --git a/dom/webgpu/ipc/PWebGPUTypes.ipdlh b/dom/webgpu/ipc/PWebGPUTypes.ipdlh
new file mode 100644
index 0000000000..98f062856c
--- /dev/null
+++ b/dom/webgpu/ipc/PWebGPUTypes.ipdlh
@@ -0,0 +1,26 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+using struct mozilla::null_t from "mozilla/ipc/IPCCore.h";
+
+namespace mozilla {
+namespace webgpu {
+
+struct BufferMapSuccess {
+ uint64_t offset;
+ uint64_t size;
+ bool writable;
+};
+
+struct BufferMapError {
+ nsCString message;
+};
+
+union BufferMapResult {
+ BufferMapSuccess;
+ BufferMapError;
+};
+
+} // namespace layers
+} // namespace mozilla
diff --git a/dom/webgpu/ipc/WebGPUChild.cpp b/dom/webgpu/ipc/WebGPUChild.cpp
new file mode 100644
index 0000000000..663dd5cb89
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUChild.cpp
@@ -0,0 +1,270 @@
+/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WebGPUChild.h"
+
+#include "js/RootingAPI.h"
+#include "js/String.h"
+#include "js/TypeDecls.h"
+#include "js/Value.h"
+#include "js/Warnings.h" // JS::WarnUTF8
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/EnumTypeTraits.h"
+#include "mozilla/dom/Promise.h"
+#include "mozilla/dom/ScriptSettings.h"
+#include "mozilla/dom/WebGPUBinding.h"
+#include "mozilla/dom/GPUUncapturedErrorEvent.h"
+#include "mozilla/webgpu/ValidationError.h"
+#include "mozilla/webgpu/WebGPUTypes.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+#include "Adapter.h"
+#include "DeviceLostInfo.h"
+#include "PipelineLayout.h"
+#include "Sampler.h"
+#include "CompilationInfo.h"
+#include "mozilla/ipc/RawShmem.h"
+#include "Utility.h"
+
+#include <utility>
+
+namespace mozilla::webgpu {
+
+NS_IMPL_CYCLE_COLLECTION(WebGPUChild)
+
+void WebGPUChild::JsWarning(nsIGlobalObject* aGlobal,
+ const nsACString& aMessage) {
+ const auto& flatString = PromiseFlatCString(aMessage);
+ if (aGlobal) {
+ dom::AutoJSAPI api;
+ if (api.Init(aGlobal)) {
+ JS::WarnUTF8(api.cx(), "%s", flatString.get());
+ }
+ } else {
+ printf_stderr("Validation error without device target: %s\n",
+ flatString.get());
+ }
+}
+
+static UniquePtr<ffi::WGPUClient> initialize() {
+ ffi::WGPUInfrastructure infra = ffi::wgpu_client_new();
+ return UniquePtr<ffi::WGPUClient>{infra.client};
+}
+
+WebGPUChild::WebGPUChild() : mClient(initialize()) {}
+
+WebGPUChild::~WebGPUChild() = default;
+
+RefPtr<AdapterPromise> WebGPUChild::InstanceRequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions) {
+ const int max_ids = 10;
+ RawId ids[max_ids] = {0};
+ unsigned long count =
+ ffi::wgpu_client_make_adapter_ids(mClient.get(), ids, max_ids);
+
+ nsTArray<RawId> sharedIds(count);
+ for (unsigned long i = 0; i != count; ++i) {
+ sharedIds.AppendElement(ids[i]);
+ }
+
+ return SendInstanceRequestAdapter(aOptions, sharedIds)
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [](ipc::ByteBuf&& aInfoBuf) {
+ // Ideally, we'd just send an empty ByteBuf, but the IPC code
+ // complains if the capacity is zero...
+ // So for the case where an adapter wasn't found, we just
+ // transfer a single 0u64 in this buffer.
+ return aInfoBuf.mLen > sizeof(uint64_t)
+ ? AdapterPromise::CreateAndResolve(std::move(aInfoBuf),
+ __func__)
+ : AdapterPromise::CreateAndReject(Nothing(), __func__);
+ },
+ [](const ipc::ResponseRejectReason& aReason) {
+ return AdapterPromise::CreateAndReject(Some(aReason), __func__);
+ });
+}
+
+Maybe<DeviceRequest> WebGPUChild::AdapterRequestDevice(
+ RawId aSelfId, const ffi::WGPUDeviceDescriptor& aDesc) {
+ RawId id = ffi::wgpu_client_make_device_id(mClient.get(), aSelfId);
+
+ ByteBuf bb;
+ ffi::wgpu_client_serialize_device_descriptor(&aDesc, ToFFI(&bb));
+
+ DeviceRequest request;
+ request.mId = id;
+ request.mPromise = SendAdapterRequestDevice(aSelfId, std::move(bb), id);
+
+ return Some(std::move(request));
+}
+
+RawId WebGPUChild::RenderBundleEncoderFinish(
+ ffi::WGPURenderBundleEncoder& aEncoder, RawId aDeviceId,
+ const dom::GPURenderBundleDescriptor& aDesc) {
+ ffi::WGPURenderBundleDescriptor desc = {};
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+
+ ipc::ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_render_bundle(
+ mClient.get(), &aEncoder, aDeviceId, &desc, ToFFI(&bb));
+
+ SendDeviceAction(aDeviceId, std::move(bb));
+
+ return id;
+}
+
+RawId WebGPUChild::RenderBundleEncoderFinishError(RawId aDeviceId,
+ const nsString& aLabel) {
+ webgpu::StringHelper label(aLabel);
+
+ ipc::ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_render_bundle_error(
+ mClient.get(), aDeviceId, label.Get(), ToFFI(&bb));
+
+ SendDeviceAction(aDeviceId, std::move(bb));
+
+ return id;
+}
+
+ipc::IPCResult WebGPUChild::RecvUncapturedError(const Maybe<RawId> aDeviceId,
+ const nsACString& aMessage) {
+ RefPtr<Device> device;
+ if (aDeviceId) {
+ const auto itr = mDeviceMap.find(*aDeviceId);
+ if (itr != mDeviceMap.end()) {
+ device = itr->second.get();
+ MOZ_ASSERT(device);
+ }
+ }
+ if (!device) {
+ JsWarning(nullptr, aMessage);
+ } else {
+ // We don't want to spam the errors to the console indefinitely
+ if (device->CheckNewWarning(aMessage)) {
+ JsWarning(device->GetOwnerGlobal(), aMessage);
+
+ dom::GPUUncapturedErrorEventInit init;
+ init.mError = new ValidationError(device->GetParentObject(), aMessage);
+ RefPtr<mozilla::dom::GPUUncapturedErrorEvent> event =
+ dom::GPUUncapturedErrorEvent::Constructor(
+ device, u"uncapturederror"_ns, init);
+ device->DispatchEvent(*event);
+ }
+ }
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUChild::RecvDropAction(const ipc::ByteBuf& aByteBuf) {
+ const auto* byteBuf = ToFFI(&aByteBuf);
+ ffi::wgpu_client_drop_action(mClient.get(), byteBuf);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUChild::RecvDeviceLost(RawId aDeviceId,
+ Maybe<uint8_t> aReason,
+ const nsACString& aMessage) {
+ RefPtr<Device> device;
+ const auto itr = mDeviceMap.find(aDeviceId);
+ if (itr != mDeviceMap.end()) {
+ device = itr->second.get();
+ MOZ_ASSERT(device);
+ }
+
+ if (device) {
+ auto message = NS_ConvertUTF8toUTF16(aMessage);
+ if (aReason.isSome()) {
+ dom::GPUDeviceLostReason reason =
+ static_cast<dom::GPUDeviceLostReason>(*aReason);
+ device->ResolveLost(Some(reason), message);
+ } else {
+ device->ResolveLost(Nothing(), message);
+ }
+ }
+ return IPC_OK();
+}
+
+void WebGPUChild::DeviceCreateSwapChain(
+ RawId aSelfId, const RGBDescriptor& aRgbDesc, size_t maxBufferCount,
+ const layers::RemoteTextureOwnerId& aOwnerId,
+ bool aUseExternalTextureInSwapChain) {
+ RawId queueId = aSelfId; // TODO: multiple queues
+ nsTArray<RawId> bufferIds(maxBufferCount);
+ for (size_t i = 0; i < maxBufferCount; ++i) {
+ bufferIds.AppendElement(
+ ffi::wgpu_client_make_buffer_id(mClient.get(), aSelfId));
+ }
+ SendDeviceCreateSwapChain(aSelfId, queueId, aRgbDesc, bufferIds, aOwnerId,
+ aUseExternalTextureInSwapChain);
+}
+
+void WebGPUChild::QueueOnSubmittedWorkDone(
+ const RawId aSelfId, const RefPtr<dom::Promise>& aPromise) {
+ SendQueueOnSubmittedWorkDone(aSelfId)->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [aPromise]() { aPromise->MaybeResolveWithUndefined(); },
+ [aPromise](const ipc::ResponseRejectReason& aReason) {
+ aPromise->MaybeRejectWithNotSupportedError("IPC error");
+ });
+}
+
+void WebGPUChild::SwapChainPresent(RawId aTextureId,
+ const RemoteTextureId& aRemoteTextureId,
+ const RemoteTextureOwnerId& aOwnerId) {
+ // Hack: the function expects `DeviceId`, but it only uses it for `backend()`
+ // selection.
+ RawId encoderId = ffi::wgpu_client_make_encoder_id(mClient.get(), aTextureId);
+ SendSwapChainPresent(aTextureId, encoderId, aRemoteTextureId, aOwnerId);
+}
+
+void WebGPUChild::RegisterDevice(Device* const aDevice) {
+ mDeviceMap.insert({aDevice->mId, aDevice});
+}
+
+void WebGPUChild::UnregisterDevice(RawId aDeviceId) {
+ if (IsOpen()) {
+ SendDeviceDrop(aDeviceId);
+ }
+ mDeviceMap.erase(aDeviceId);
+}
+
+void WebGPUChild::FreeUnregisteredInParentDevice(RawId aId) {
+ ffi::wgpu_client_kill_device_id(mClient.get(), aId);
+ mDeviceMap.erase(aId);
+}
+
+void WebGPUChild::ActorDestroy(ActorDestroyReason) {
+ // Resolving the promise could cause us to update the original map if the
+ // callee frees the Device objects immediately. Since any remaining entries
+ // in the map are no longer valid, we can just move the map onto the stack.
+ const auto deviceMap = std::move(mDeviceMap);
+ mDeviceMap.clear();
+
+ for (const auto& targetIter : deviceMap) {
+ RefPtr<Device> device = targetIter.second.get();
+ if (!device) {
+ // The Device may have gotten freed when we resolved the Promise for
+ // another Device in the map.
+ continue;
+ }
+
+ device->ResolveLost(Nothing(), u"WebGPUChild destroyed"_ns);
+ }
+}
+
+void WebGPUChild::QueueSubmit(RawId aSelfId, RawId aDeviceId,
+ nsTArray<RawId>& aCommandBuffers) {
+ SendQueueSubmit(aSelfId, aDeviceId, aCommandBuffers,
+ mSwapChainTexturesWaitingForSubmit);
+ mSwapChainTexturesWaitingForSubmit.Clear();
+}
+
+void WebGPUChild::NotifyWaitForSubmit(RawId aTextureId) {
+ mSwapChainTexturesWaitingForSubmit.AppendElement(aTextureId);
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/ipc/WebGPUChild.h b/dom/webgpu/ipc/WebGPUChild.h
new file mode 100644
index 0000000000..37525420bd
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUChild.h
@@ -0,0 +1,115 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBGPU_CHILD_H_
+#define WEBGPU_CHILD_H_
+
+#include "mozilla/webgpu/PWebGPUChild.h"
+#include "mozilla/MozPromise.h"
+#include "mozilla/WeakPtr.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+
+namespace mozilla {
+namespace ipc {
+class UnsafeSharedMemoryHandle;
+} // namespace ipc
+namespace dom {
+struct GPURequestAdapterOptions;
+} // namespace dom
+namespace layers {
+class CompositorBridgeChild;
+} // namespace layers
+namespace webgpu {
+namespace ffi {
+struct WGPUClient;
+struct WGPULimits;
+struct WGPUTextureViewDescriptor;
+} // namespace ffi
+
+using AdapterPromise =
+ MozPromise<ipc::ByteBuf, Maybe<ipc::ResponseRejectReason>, true>;
+using PipelinePromise = MozPromise<RawId, ipc::ResponseRejectReason, true>;
+using DevicePromise = MozPromise<bool, ipc::ResponseRejectReason, true>;
+
+struct PipelineCreationContext {
+ RawId mParentId = 0;
+ RawId mImplicitPipelineLayoutId = 0;
+ nsTArray<RawId> mImplicitBindGroupLayoutIds;
+};
+
+struct DeviceRequest {
+ RawId mId = 0;
+ RefPtr<DevicePromise> mPromise;
+ // Note: we could put `ffi::WGPULimits` in here as well,
+ // but we don't want to #include ffi stuff in this header
+};
+
+ffi::WGPUByteBuf* ToFFI(ipc::ByteBuf* x);
+
+class WebGPUChild final : public PWebGPUChild, public SupportsWeakPtr {
+ public:
+ friend class layers::CompositorBridgeChild;
+
+ NS_DECL_CYCLE_COLLECTION_NATIVE_CLASS(WebGPUChild)
+ NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING_INHERITED(WebGPUChild)
+
+ public:
+ explicit WebGPUChild();
+
+ bool IsOpen() const { return CanSend(); }
+
+ RefPtr<AdapterPromise> InstanceRequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions);
+ Maybe<DeviceRequest> AdapterRequestDevice(RawId aSelfId,
+ const ffi::WGPUDeviceDescriptor&);
+ RawId RenderBundleEncoderFinish(ffi::WGPURenderBundleEncoder& aEncoder,
+ RawId aDeviceId,
+ const dom::GPURenderBundleDescriptor& aDesc);
+ RawId RenderBundleEncoderFinishError(RawId aDeviceId, const nsString& aLabel);
+
+ ffi::WGPUClient* GetClient() const { return mClient.get(); }
+
+ void DeviceCreateSwapChain(RawId aSelfId, const RGBDescriptor& aRgbDesc,
+ size_t maxBufferCount,
+ const layers::RemoteTextureOwnerId& aOwnerId,
+ bool aUseExternalTextureInSwapChain);
+
+ void QueueOnSubmittedWorkDone(const RawId aSelfId,
+ const RefPtr<dom::Promise>& aPromise);
+
+ void SwapChainPresent(RawId aTextureId,
+ const RemoteTextureId& aRemoteTextureId,
+ const RemoteTextureOwnerId& aOwnerId);
+
+ void RegisterDevice(Device* const aDevice);
+ void UnregisterDevice(RawId aId);
+ void FreeUnregisteredInParentDevice(RawId aId);
+
+ void QueueSubmit(RawId aSelfId, RawId aDeviceId,
+ nsTArray<RawId>& aCommandBuffers);
+ void NotifyWaitForSubmit(RawId aTextureId);
+
+ static void JsWarning(nsIGlobalObject* aGlobal, const nsACString& aMessage);
+
+ private:
+ virtual ~WebGPUChild();
+
+ UniquePtr<ffi::WGPUClient> const mClient;
+ std::unordered_map<RawId, WeakPtr<Device>> mDeviceMap;
+ nsTArray<RawId> mSwapChainTexturesWaitingForSubmit;
+
+ public:
+ ipc::IPCResult RecvUncapturedError(Maybe<RawId> aDeviceId,
+ const nsACString& aMessage);
+ ipc::IPCResult RecvDropAction(const ipc::ByteBuf& aByteBuf);
+ ipc::IPCResult RecvDeviceLost(RawId aDeviceId, Maybe<uint8_t> aReason,
+ const nsACString& aMessage);
+ void ActorDestroy(ActorDestroyReason) override;
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // WEBGPU_CHILD_H_
diff --git a/dom/webgpu/ipc/WebGPUParent.cpp b/dom/webgpu/ipc/WebGPUParent.cpp
new file mode 100644
index 0000000000..9b79988245
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUParent.cpp
@@ -0,0 +1,1557 @@
+/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WebGPUParent.h"
+
+#include <unordered_set>
+
+#include "mozilla/PodOperations.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/dom/WebGPUBinding.h"
+#include "mozilla/gfx/FileHandleWrapper.h"
+#include "mozilla/layers/CompositorThread.h"
+#include "mozilla/layers/ImageDataSerializer.h"
+#include "mozilla/layers/RemoteTextureMap.h"
+#include "mozilla/layers/TextureHost.h"
+#include "mozilla/layers/WebRenderImageHost.h"
+#include "mozilla/layers/WebRenderTextureHost.h"
+#include "mozilla/webgpu/ExternalTexture.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+
+#if defined(XP_WIN)
+# include "mozilla/gfx/DeviceManagerDx.h"
+#endif
+
+namespace mozilla::webgpu {
+
+const uint64_t POLL_TIME_MS = 100;
+
+static mozilla::LazyLogModule sLogger("WebGPU");
+
+namespace ffi {
+
+extern bool wgpu_server_use_external_texture_for_swap_chain(
+ void* aParam, WGPUSwapChainId aSwapChainId) {
+ auto* parent = static_cast<WebGPUParent*>(aParam);
+
+ return parent->UseExternalTextureForSwapChain(aSwapChainId);
+}
+
+extern bool wgpu_server_ensure_external_texture_for_swap_chain(
+ void* aParam, WGPUSwapChainId aSwapChainId, WGPUDeviceId aDeviceId,
+ WGPUTextureId aTextureId, uint32_t aWidth, uint32_t aHeight,
+ struct WGPUTextureFormat aFormat, WGPUTextureUsages aUsage) {
+ auto* parent = static_cast<WebGPUParent*>(aParam);
+
+ return parent->EnsureExternalTextureForSwapChain(
+ aSwapChainId, aDeviceId, aTextureId, aWidth, aHeight, aFormat, aUsage);
+}
+
+extern void* wgpu_server_get_external_texture_handle(void* aParam,
+ WGPUTextureId aId) {
+ auto* parent = static_cast<WebGPUParent*>(aParam);
+
+ auto texture = parent->GetExternalTexture(aId);
+ if (!texture) {
+ MOZ_ASSERT_UNREACHABLE("unexpected to be called");
+ return nullptr;
+ }
+
+ void* sharedHandle = nullptr;
+#ifdef XP_WIN
+ sharedHandle = texture->GetExternalTextureHandle();
+ if (!sharedHandle) {
+ MOZ_ASSERT_UNREACHABLE("unexpected to be called");
+ gfxCriticalNoteOnce << "Failed to get shared handle";
+ return nullptr;
+ }
+#else
+ MOZ_ASSERT_UNREACHABLE("unexpected to be called");
+#endif
+ return sharedHandle;
+}
+
+} // namespace ffi
+
+// A fixed-capacity buffer for receiving textual error messages from
+// `wgpu_bindings`.
+//
+// The `ToFFI` method returns an `ffi::WGPUErrorBuffer` pointing to our
+// buffer, for you to pass to fallible FFI-visible `wgpu_bindings`
+// functions. These indicate failure by storing an error message in the
+// buffer, which you can retrieve by calling `GetError`.
+//
+// If you call `ToFFI` on this type, you must also call `GetError` to check for
+// an error. Otherwise, the destructor asserts.
+//
+// TODO: refactor this to avoid stack-allocating the buffer all the time.
+class ErrorBuffer {
+ // if the message doesn't fit, it will be truncated
+ static constexpr unsigned BUFFER_SIZE = 512;
+ ffi::WGPUErrorBufferType mType = ffi::WGPUErrorBufferType_None;
+ char mMessageUtf8[BUFFER_SIZE] = {};
+ bool mAwaitingGetError = false;
+
+ public:
+ ErrorBuffer() { mMessageUtf8[0] = 0; }
+ ErrorBuffer(const ErrorBuffer&) = delete;
+ ~ErrorBuffer() { MOZ_ASSERT(!mAwaitingGetError); }
+
+ ffi::WGPUErrorBuffer ToFFI() {
+ mAwaitingGetError = true;
+ ffi::WGPUErrorBuffer errorBuf = {&mType, mMessageUtf8, BUFFER_SIZE};
+ return errorBuf;
+ }
+
+ ffi::WGPUErrorBufferType GetType() { return mType; }
+
+ static Maybe<dom::GPUErrorFilter> ErrorTypeToFilterType(
+ ffi::WGPUErrorBufferType aType) {
+ switch (aType) {
+ case ffi::WGPUErrorBufferType_None:
+ case ffi::WGPUErrorBufferType_DeviceLost:
+ return {};
+ case ffi::WGPUErrorBufferType_Internal:
+ return Some(dom::GPUErrorFilter::Internal);
+ case ffi::WGPUErrorBufferType_Validation:
+ return Some(dom::GPUErrorFilter::Validation);
+ case ffi::WGPUErrorBufferType_OutOfMemory:
+ return Some(dom::GPUErrorFilter::Out_of_memory);
+ case ffi::WGPUErrorBufferType_Sentinel:
+ break;
+ }
+
+ MOZ_CRASH("invalid `ErrorBufferType`");
+ }
+
+ struct Error {
+ dom::GPUErrorFilter type;
+ bool isDeviceLost;
+ nsCString message;
+ };
+
+ // Retrieve the error message was stored in this buffer. Asserts that
+ // this instance actually contains an error (viz., that `GetType() !=
+ // ffi::WGPUErrorBufferType_None`).
+ //
+ // Mark this `ErrorBuffer` as having been handled, so its destructor
+ // won't assert.
+ Maybe<Error> GetError() {
+ mAwaitingGetError = false;
+ if (mType == ffi::WGPUErrorBufferType_DeviceLost) {
+ // This error is for a lost device, so we return an Error struct
+ // with the isDeviceLost bool set to true. It doesn't matter what
+ // GPUErrorFilter type we use, so we just use Validation. The error
+ // will not be reported.
+ return Some(Error{dom::GPUErrorFilter::Validation, true,
+ nsCString{mMessageUtf8}});
+ }
+ auto filterType = ErrorTypeToFilterType(mType);
+ if (!filterType) {
+ return {};
+ }
+ return Some(Error{*filterType, false, nsCString{mMessageUtf8}});
+ }
+};
+
+struct PendingSwapChainDrop {
+ layers::RemoteTextureTxnType mTxnType;
+ layers::RemoteTextureTxnId mTxnId;
+};
+
+class PresentationData {
+ NS_INLINE_DECL_REFCOUNTING(PresentationData);
+
+ public:
+ WeakPtr<WebGPUParent> mParent;
+ const bool mUseExternalTextureInSwapChain;
+ const RawId mDeviceId;
+ const RawId mQueueId;
+ const layers::RGBDescriptor mDesc;
+
+ uint64_t mSubmissionIndex = 0;
+
+ std::deque<std::shared_ptr<ExternalTexture>> mRecycledExternalTextures;
+
+ std::unordered_set<layers::RemoteTextureId, layers::RemoteTextureId::HashFn>
+ mWaitingReadbackTexturesForPresent;
+ Maybe<PendingSwapChainDrop> mPendingSwapChainDrop;
+
+ const uint32_t mSourcePitch;
+ std::vector<RawId> mUnassignedBufferIds MOZ_GUARDED_BY(mBuffersLock);
+ std::vector<RawId> mAvailableBufferIds MOZ_GUARDED_BY(mBuffersLock);
+ std::vector<RawId> mQueuedBufferIds MOZ_GUARDED_BY(mBuffersLock);
+ Mutex mBuffersLock;
+
+ PresentationData(WebGPUParent* aParent, bool aUseExternalTextureInSwapChain,
+ RawId aDeviceId, RawId aQueueId,
+ const layers::RGBDescriptor& aDesc, uint32_t aSourcePitch,
+ const nsTArray<RawId>& aBufferIds)
+ : mParent(aParent),
+ mUseExternalTextureInSwapChain(aUseExternalTextureInSwapChain),
+ mDeviceId(aDeviceId),
+ mQueueId(aQueueId),
+ mDesc(aDesc),
+ mSourcePitch(aSourcePitch),
+ mBuffersLock("WebGPU presentation buffers") {
+ MOZ_COUNT_CTOR(PresentationData);
+
+ for (const RawId id : aBufferIds) {
+ mUnassignedBufferIds.push_back(id);
+ }
+ }
+
+ private:
+ ~PresentationData() { MOZ_COUNT_DTOR(PresentationData); }
+};
+
+WebGPUParent::WebGPUParent() : mContext(ffi::wgpu_server_new(this)) {
+ mTimer.Start(base::TimeDelta::FromMilliseconds(POLL_TIME_MS), this,
+ &WebGPUParent::MaintainDevices);
+}
+
+WebGPUParent::~WebGPUParent() {
+ // All devices should have been dropped, but maybe they weren't. To
+ // ensure we don't leak memory, clear the mDeviceLostRequests.
+ mDeviceLostRequests.clear();
+}
+
+void WebGPUParent::MaintainDevices() {
+ ffi::wgpu_server_poll_all_devices(mContext.get(), false);
+}
+
+void WebGPUParent::LoseDevice(const RawId aDeviceId, Maybe<uint8_t> aReason,
+ const nsACString& aMessage) {
+ // Check to see if we've already sent a DeviceLost message to aDeviceId.
+ if (mLostDeviceIds.Contains(aDeviceId)) {
+ return;
+ }
+
+ // If the connection has been dropped, there is nobody to receive
+ // the DeviceLost message anyway.
+ if (CanSend()) {
+ if (!SendDeviceLost(aDeviceId, aReason, aMessage)) {
+ NS_ERROR("SendDeviceLost failed");
+ return;
+ }
+ }
+
+ mLostDeviceIds.Insert(aDeviceId);
+}
+
+bool WebGPUParent::ForwardError(const Maybe<RawId> aDeviceId,
+ ErrorBuffer& aError) {
+ if (auto error = aError.GetError()) {
+ // If this is a error has isDeviceLost true, then instead of reporting
+ // the error, we swallow it and call LoseDevice if we have an
+ // aDeviceID. This is to comply with the spec declaration in
+ // https://gpuweb.github.io/gpuweb/#lose-the-device
+ // "No errors are generated after device loss."
+ if (error->isDeviceLost) {
+ if (aDeviceId.isSome()) {
+ LoseDevice(*aDeviceId, Nothing(), error->message);
+ }
+ return false;
+ }
+ ReportError(aDeviceId, error->type, error->message);
+ return true;
+ }
+ return false;
+}
+
+// Generate an error on the Device timeline of aDeviceId.
+// aMessage is interpreted as UTF-8.
+void WebGPUParent::ReportError(const Maybe<RawId> aDeviceId,
+ const GPUErrorFilter aType,
+ const nsCString& aMessage) {
+ // find the appropriate error scope
+ if (aDeviceId) {
+ const auto& itr = mErrorScopeStackByDevice.find(*aDeviceId);
+ if (itr != mErrorScopeStackByDevice.end()) {
+ auto& stack = itr->second;
+ for (auto& scope : Reversed(stack)) {
+ if (scope.filter != aType) {
+ continue;
+ }
+ if (!scope.firstMessage) {
+ scope.firstMessage = Some(aMessage);
+ }
+ return;
+ }
+ }
+ }
+ // No error scope found, so fall back to the uncaptured error handler
+ if (!SendUncapturedError(aDeviceId, aMessage)) {
+ NS_ERROR("SendDeviceUncapturedError failed");
+ }
+}
+
+ipc::IPCResult WebGPUParent::RecvInstanceRequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions,
+ const nsTArray<RawId>& aTargetIds,
+ InstanceRequestAdapterResolver&& resolver) {
+ ffi::WGPURequestAdapterOptions options = {};
+ if (aOptions.mPowerPreference.WasPassed()) {
+ options.power_preference = static_cast<ffi::WGPUPowerPreference>(
+ aOptions.mPowerPreference.Value());
+ } else {
+ options.power_preference = ffi::WGPUPowerPreference_LowPower;
+ }
+ options.force_fallback_adapter = aOptions.mForceFallbackAdapter;
+
+ auto luid = GetCompositorDeviceLuid();
+
+ ErrorBuffer error;
+ int8_t index = ffi::wgpu_server_instance_request_adapter(
+ mContext.get(), &options, aTargetIds.Elements(), aTargetIds.Length(),
+ luid.ptrOr(nullptr), error.ToFFI());
+
+ ByteBuf infoByteBuf;
+ // Rust side expects an `Option`, so 0 maps to `None`.
+ uint64_t adapterId = 0;
+ if (index >= 0) {
+ adapterId = aTargetIds[index];
+ }
+ ffi::wgpu_server_adapter_pack_info(mContext.get(), adapterId,
+ ToFFI(&infoByteBuf));
+ resolver(std::move(infoByteBuf));
+ ForwardError(0, error);
+
+ // free the unused IDs
+ ipc::ByteBuf dropByteBuf;
+ for (size_t i = 0; i < aTargetIds.Length(); ++i) {
+ if (static_cast<int8_t>(i) != index) {
+ wgpu_server_adapter_free(aTargetIds[i], ToFFI(&dropByteBuf));
+ }
+ }
+ if (dropByteBuf.mData && !SendDropAction(std::move(dropByteBuf))) {
+ NS_ERROR("Unable to free free unused adapter IDs");
+ }
+ return IPC_OK();
+}
+
+/* static */ void WebGPUParent::DeviceLostCallback(uint8_t* aUserData,
+ uint8_t aReason,
+ const char* aMessage) {
+ DeviceLostRequest* req = reinterpret_cast<DeviceLostRequest*>(aUserData);
+ if (!req->mParent) {
+ // Parent is dead, never mind.
+ return;
+ }
+
+ RawId deviceId = req->mDeviceId;
+
+ // If aReason is 0, that corresponds to the "unknown" reason, which
+ // we treat as a Nothing() value. Any other value (which is positive)
+ // is mapped to the GPUDeviceLostReason values by subtracting 1.
+ Maybe<uint8_t> reason;
+ if (aReason > 0) {
+ uint8_t mappedReasonValue = (aReason - 1u);
+ reason = Some(mappedReasonValue);
+ }
+ nsAutoCString message(aMessage);
+ req->mParent->LoseDevice(deviceId, reason, message);
+
+ // We're no longer tracking the memory for this callback, so erase
+ // it to ensure we don't leak memory.
+ req->mParent->mDeviceLostRequests.erase(deviceId);
+}
+
+ipc::IPCResult WebGPUParent::RecvAdapterRequestDevice(
+ RawId aAdapterId, const ipc::ByteBuf& aByteBuf, RawId aDeviceId,
+ AdapterRequestDeviceResolver&& resolver) {
+ ErrorBuffer error;
+ ffi::wgpu_server_adapter_request_device(
+ mContext.get(), aAdapterId, ToFFI(&aByteBuf), aDeviceId, error.ToFFI());
+ if (ForwardError(0, error)) {
+ uint8_t reasonDestroyed = 0; // GPUDeviceLostReason::Destroyed
+ auto maybeError = error.GetError();
+ MOZ_ASSERT(maybeError.isSome());
+ LoseDevice(aDeviceId, Some(reasonDestroyed), maybeError->message);
+ resolver(false);
+ return IPC_OK();
+ }
+
+ mErrorScopeStackByDevice.insert({aDeviceId, {}});
+
+ // Setup the device lost callback.
+ std::unique_ptr<DeviceLostRequest> req(
+ new DeviceLostRequest{this, aDeviceId});
+ auto iter = mDeviceLostRequests.insert({aDeviceId, std::move(req)});
+ MOZ_ASSERT(iter.second, "Should be able to insert DeviceLostRequest.");
+ auto record = iter.first;
+ DeviceLostRequest* req_shadow = (record->second).get();
+ ffi::WGPUDeviceLostClosureC callback = {
+ &DeviceLostCallback, reinterpret_cast<uint8_t*>(req_shadow)};
+ ffi::wgpu_server_set_device_lost_callback(mContext.get(), aDeviceId,
+ callback);
+
+ resolver(true);
+
+#if defined(XP_WIN)
+ HANDLE handle =
+ wgpu_server_get_device_fence_handle(mContext.get(), aDeviceId);
+ if (handle) {
+ mFenceHandle = new gfx::FileHandleWrapper(UniqueFileHandle(handle));
+ }
+#endif
+
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvAdapterDrop(RawId aAdapterId) {
+ ffi::wgpu_server_adapter_drop(mContext.get(), aAdapterId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvDeviceDestroy(RawId aDeviceId) {
+ ffi::wgpu_server_device_destroy(mContext.get(), aDeviceId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvDeviceDrop(RawId aDeviceId) {
+ ffi::wgpu_server_device_drop(mContext.get(), aDeviceId);
+ MOZ_ASSERT(mDeviceLostRequests.find(aDeviceId) == mDeviceLostRequests.end(),
+ "DeviceLostRequest should have been invoked, then erased.");
+
+ mErrorScopeStackByDevice.erase(aDeviceId);
+ mLostDeviceIds.Remove(aDeviceId);
+ return IPC_OK();
+}
+
+WebGPUParent::BufferMapData* WebGPUParent::GetBufferMapData(RawId aBufferId) {
+ const auto iter = mSharedMemoryMap.find(aBufferId);
+ if (iter == mSharedMemoryMap.end()) {
+ return nullptr;
+ }
+
+ return &iter->second;
+}
+
+ipc::IPCResult WebGPUParent::RecvDeviceCreateBuffer(
+ RawId aDeviceId, RawId aBufferId, dom::GPUBufferDescriptor&& aDesc,
+ ipc::UnsafeSharedMemoryHandle&& aShmem) {
+ webgpu::StringHelper label(aDesc.mLabel);
+
+ auto shmem =
+ ipc::WritableSharedMemoryMapping::Open(std::move(aShmem)).value();
+
+ bool hasMapFlags = aDesc.mUsage & (dom::GPUBufferUsage_Binding::MAP_WRITE |
+ dom::GPUBufferUsage_Binding::MAP_READ);
+ bool shmAllocationFailed = false;
+ if (hasMapFlags || aDesc.mMappedAtCreation) {
+ if (shmem.Size() < aDesc.mSize) {
+ MOZ_RELEASE_ASSERT(shmem.Size() == 0);
+ // If we requested a non-zero mappable buffer and get a size of zero, it
+ // indicates that the shmem allocation failed on the client side.
+ shmAllocationFailed = true;
+ } else {
+ uint64_t offset = 0;
+ uint64_t size = 0;
+
+ if (aDesc.mMappedAtCreation) {
+ size = aDesc.mSize;
+ }
+
+ BufferMapData data = {std::move(shmem), hasMapFlags, offset, size,
+ aDeviceId};
+ mSharedMemoryMap.insert({aBufferId, std::move(data)});
+ }
+ }
+
+ ErrorBuffer error;
+ ffi::wgpu_server_device_create_buffer(mContext.get(), aDeviceId, aBufferId,
+ label.Get(), aDesc.mSize, aDesc.mUsage,
+ aDesc.mMappedAtCreation,
+ shmAllocationFailed, error.ToFFI());
+ ForwardError(aDeviceId, error);
+ return IPC_OK();
+}
+
+struct MapRequest {
+ RefPtr<WebGPUParent> mParent;
+ ffi::WGPUGlobal* mContext;
+ ffi::WGPUBufferId mBufferId;
+ ffi::WGPUHostMap mHostMap;
+ uint64_t mOffset;
+ uint64_t mSize;
+ WebGPUParent::BufferMapResolver mResolver;
+};
+
+static const char* MapStatusString(ffi::WGPUBufferMapAsyncStatus status) {
+ switch (status) {
+ case ffi::WGPUBufferMapAsyncStatus_Success:
+ return "Success";
+ case ffi::WGPUBufferMapAsyncStatus_AlreadyMapped:
+ return "Already mapped";
+ case ffi::WGPUBufferMapAsyncStatus_MapAlreadyPending:
+ return "Map is already pending";
+ case ffi::WGPUBufferMapAsyncStatus_Aborted:
+ return "Map aborted";
+ case ffi::WGPUBufferMapAsyncStatus_ContextLost:
+ return "Context lost";
+ case ffi::WGPUBufferMapAsyncStatus_Invalid:
+ return "Invalid buffer";
+ case ffi::WGPUBufferMapAsyncStatus_InvalidRange:
+ return "Invalid range";
+ case ffi::WGPUBufferMapAsyncStatus_InvalidAlignment:
+ return "Invalid alignment";
+ case ffi::WGPUBufferMapAsyncStatus_InvalidUsageFlags:
+ return "Invalid usage flags";
+ case ffi::WGPUBufferMapAsyncStatus_Error:
+ return "Map failed";
+ case ffi::WGPUBufferMapAsyncStatus_Sentinel: // For -Wswitch
+ break;
+ }
+
+ MOZ_CRASH("Bad ffi::WGPUBufferMapAsyncStatus");
+}
+
+void WebGPUParent::MapCallback(ffi::WGPUBufferMapAsyncStatus aStatus,
+ uint8_t* aUserData) {
+ auto* req = reinterpret_cast<MapRequest*>(aUserData);
+
+ if (!req->mParent->CanSend()) {
+ delete req;
+ return;
+ }
+
+ BufferMapResult result;
+
+ auto bufferId = req->mBufferId;
+ auto* mapData = req->mParent->GetBufferMapData(bufferId);
+ MOZ_RELEASE_ASSERT(mapData);
+
+ if (aStatus != ffi::WGPUBufferMapAsyncStatus_Success) {
+ // A buffer map operation that fails with a DeviceError gets
+ // mapped to the ContextLost status. If we have this status, we
+ // need to lose the device.
+ if (aStatus == ffi::WGPUBufferMapAsyncStatus_ContextLost) {
+ req->mParent->LoseDevice(
+ mapData->mDeviceId, Nothing(),
+ nsPrintfCString("Buffer %" PRIu64 " invalid", bufferId));
+ }
+
+ result = BufferMapError(nsPrintfCString("Mapping WebGPU buffer failed: %s",
+ MapStatusString(aStatus)));
+ } else {
+ auto size = req->mSize;
+ auto offset = req->mOffset;
+
+ if (req->mHostMap == ffi::WGPUHostMap_Read && size > 0) {
+ ErrorBuffer error;
+ const auto src = ffi::wgpu_server_buffer_get_mapped_range(
+ req->mContext, req->mBufferId, offset, size, error.ToFFI());
+
+ MOZ_RELEASE_ASSERT(!error.GetError());
+
+ MOZ_RELEASE_ASSERT(mapData->mShmem.Size() >= offset + size);
+ if (src.ptr != nullptr && src.length >= size) {
+ auto dst = mapData->mShmem.Bytes().Subspan(offset, size);
+ memcpy(dst.data(), src.ptr, size);
+ }
+ }
+
+ result =
+ BufferMapSuccess(offset, size, req->mHostMap == ffi::WGPUHostMap_Write);
+
+ mapData->mMappedOffset = offset;
+ mapData->mMappedSize = size;
+ }
+
+ req->mResolver(std::move(result));
+ delete req;
+}
+
+ipc::IPCResult WebGPUParent::RecvBufferMap(RawId aDeviceId, RawId aBufferId,
+ uint32_t aMode, uint64_t aOffset,
+ uint64_t aSize,
+ BufferMapResolver&& aResolver) {
+ MOZ_LOG(sLogger, LogLevel::Info,
+ ("RecvBufferMap %" PRIu64 " offset=%" PRIu64 " size=%" PRIu64 "\n",
+ aBufferId, aOffset, aSize));
+
+ ffi::WGPUHostMap mode;
+ switch (aMode) {
+ case dom::GPUMapMode_Binding::READ:
+ mode = ffi::WGPUHostMap_Read;
+ break;
+ case dom::GPUMapMode_Binding::WRITE:
+ mode = ffi::WGPUHostMap_Write;
+ break;
+ default: {
+ nsCString errorString(
+ "GPUBuffer.mapAsync 'mode' argument must be either GPUMapMode.READ "
+ "or GPUMapMode.WRITE");
+ aResolver(BufferMapError(errorString));
+ return IPC_OK();
+ }
+ }
+
+ auto* mapData = GetBufferMapData(aBufferId);
+
+ if (!mapData) {
+ nsCString errorString("Buffer is not mappable");
+ aResolver(BufferMapError(errorString));
+ return IPC_OK();
+ }
+
+ auto* request =
+ new MapRequest{this, mContext.get(), aBufferId, mode,
+ aOffset, aSize, std::move(aResolver)};
+
+ ffi::WGPUBufferMapCallbackC callback = {&MapCallback,
+ reinterpret_cast<uint8_t*>(request)};
+ ErrorBuffer mapError;
+ ffi::wgpu_server_buffer_map(mContext.get(), aBufferId, aOffset, aSize, mode,
+ callback, mapError.ToFFI());
+ ForwardError(aDeviceId, mapError);
+
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBufferUnmap(RawId aDeviceId, RawId aBufferId,
+ bool aFlush) {
+ MOZ_LOG(sLogger, LogLevel::Info,
+ ("RecvBufferUnmap %" PRIu64 " flush=%d\n", aBufferId, aFlush));
+
+ auto* mapData = GetBufferMapData(aBufferId);
+
+ if (mapData && aFlush) {
+ uint64_t offset = mapData->mMappedOffset;
+ uint64_t size = mapData->mMappedSize;
+
+ ErrorBuffer getRangeError;
+ const auto mapped = ffi::wgpu_server_buffer_get_mapped_range(
+ mContext.get(), aBufferId, offset, size, getRangeError.ToFFI());
+ ForwardError(aDeviceId, getRangeError);
+
+ if (mapped.ptr != nullptr && mapped.length >= size) {
+ auto shmSize = mapData->mShmem.Size();
+ MOZ_RELEASE_ASSERT(offset <= shmSize);
+ MOZ_RELEASE_ASSERT(size <= shmSize - offset);
+
+ auto src = mapData->mShmem.Bytes().Subspan(offset, size);
+ memcpy(mapped.ptr, src.data(), size);
+ }
+
+ mapData->mMappedOffset = 0;
+ mapData->mMappedSize = 0;
+ }
+
+ ErrorBuffer unmapError;
+ ffi::wgpu_server_buffer_unmap(mContext.get(), aBufferId, unmapError.ToFFI());
+ ForwardError(aDeviceId, unmapError);
+
+ if (mapData && !mapData->mHasMapFlags) {
+ // We get here if the buffer was mapped at creation without map flags.
+ // We don't need the shared memory anymore.
+ DeallocBufferShmem(aBufferId);
+ }
+
+ return IPC_OK();
+}
+
+void WebGPUParent::DeallocBufferShmem(RawId aBufferId) {
+ const auto iter = mSharedMemoryMap.find(aBufferId);
+ if (iter != mSharedMemoryMap.end()) {
+ mSharedMemoryMap.erase(iter);
+ }
+}
+
+ipc::IPCResult WebGPUParent::RecvBufferDrop(RawId aBufferId) {
+ ffi::wgpu_server_buffer_drop(mContext.get(), aBufferId);
+ MOZ_LOG(sLogger, LogLevel::Info, ("RecvBufferDrop %" PRIu64 "\n", aBufferId));
+
+ DeallocBufferShmem(aBufferId);
+
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBufferDestroy(RawId aBufferId) {
+ ffi::wgpu_server_buffer_destroy(mContext.get(), aBufferId);
+ MOZ_LOG(sLogger, LogLevel::Info,
+ ("RecvBufferDestroy %" PRIu64 "\n", aBufferId));
+
+ DeallocBufferShmem(aBufferId);
+
+ return IPC_OK();
+}
+
+void WebGPUParent::RemoveExternalTexture(RawId aTextureId) {
+ auto it = mExternalTextures.find(aTextureId);
+ if (it != mExternalTextures.end()) {
+ mExternalTextures.erase(it);
+ }
+}
+
+ipc::IPCResult WebGPUParent::RecvTextureDestroy(RawId aTextureId,
+ RawId aDeviceId) {
+ ffi::wgpu_server_texture_destroy(mContext.get(), aTextureId);
+ RemoveExternalTexture(aTextureId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvTextureDrop(RawId aTextureId) {
+ ffi::wgpu_server_texture_drop(mContext.get(), aTextureId);
+ RemoveExternalTexture(aTextureId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvTextureViewDrop(RawId aTextureViewId) {
+ ffi::wgpu_server_texture_view_drop(mContext.get(), aTextureViewId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvSamplerDrop(RawId aSamplerId) {
+ ffi::wgpu_server_sampler_drop(mContext.get(), aSamplerId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvCommandEncoderFinish(
+ RawId aEncoderId, RawId aDeviceId,
+ const dom::GPUCommandBufferDescriptor& aDesc) {
+ Unused << aDesc;
+ ffi::WGPUCommandBufferDescriptor desc = {};
+
+ webgpu::StringHelper label(aDesc.mLabel);
+ desc.label = label.Get();
+
+ ErrorBuffer error;
+ ffi::wgpu_server_encoder_finish(mContext.get(), aEncoderId, &desc,
+ error.ToFFI());
+
+ ForwardError(aDeviceId, error);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvCommandEncoderDrop(RawId aEncoderId) {
+ ffi::wgpu_server_encoder_drop(mContext.get(), aEncoderId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvRenderBundleDrop(RawId aBundleId) {
+ ffi::wgpu_server_render_bundle_drop(mContext.get(), aBundleId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvQueueSubmit(
+ RawId aQueueId, RawId aDeviceId, const nsTArray<RawId>& aCommandBuffers,
+ const nsTArray<RawId>& aTextureIds) {
+ ErrorBuffer error;
+ auto index = ffi::wgpu_server_queue_submit(
+ mContext.get(), aQueueId, aCommandBuffers.Elements(),
+ aCommandBuffers.Length(), error.ToFFI());
+ // Check if index is valid. 0 means error.
+ if (index != 0) {
+ for (const auto& textureId : aTextureIds) {
+ auto it = mExternalTextures.find(textureId);
+ if (it != mExternalTextures.end()) {
+ auto& externalTexture = it->second;
+
+ externalTexture->SetSubmissionIndex(index);
+ }
+ }
+ }
+ ForwardError(aDeviceId, error);
+ return IPC_OK();
+}
+
+struct OnSubmittedWorkDoneRequest {
+ RefPtr<WebGPUParent> mParent;
+ WebGPUParent::QueueOnSubmittedWorkDoneResolver mResolver;
+};
+
+void OnSubmittedWorkDoneCallback(uint8_t* userdata) {
+ auto req = std::unique_ptr<OnSubmittedWorkDoneRequest>(
+ reinterpret_cast<OnSubmittedWorkDoneRequest*>(userdata));
+ if (req->mParent->CanSend()) {
+ req->mResolver(void_t());
+ }
+}
+
+ipc::IPCResult WebGPUParent::RecvQueueOnSubmittedWorkDone(
+ RawId aQueueId, std::function<void(mozilla::void_t)>&& aResolver) {
+ std::unique_ptr<OnSubmittedWorkDoneRequest> request(
+ new OnSubmittedWorkDoneRequest{this, std::move(aResolver)});
+
+ ffi::WGPUSubmittedWorkDoneClosureC callback = {
+ &OnSubmittedWorkDoneCallback,
+ reinterpret_cast<uint8_t*>(request.release())};
+ ffi::wgpu_server_on_submitted_work_done(mContext.get(), aQueueId, callback);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvQueueWriteAction(
+ RawId aQueueId, RawId aDeviceId, const ipc::ByteBuf& aByteBuf,
+ ipc::UnsafeSharedMemoryHandle&& aShmem) {
+ auto mapping =
+ ipc::WritableSharedMemoryMapping::Open(std::move(aShmem)).value();
+
+ ErrorBuffer error;
+ ffi::wgpu_server_queue_write_action(mContext.get(), aQueueId,
+ ToFFI(&aByteBuf), mapping.Bytes().data(),
+ mapping.Size(), error.ToFFI());
+ ForwardError(aDeviceId, error);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBindGroupLayoutDrop(RawId aBindGroupId) {
+ ffi::wgpu_server_bind_group_layout_drop(mContext.get(), aBindGroupId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvPipelineLayoutDrop(RawId aLayoutId) {
+ ffi::wgpu_server_pipeline_layout_drop(mContext.get(), aLayoutId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBindGroupDrop(RawId aBindGroupId) {
+ ffi::wgpu_server_bind_group_drop(mContext.get(), aBindGroupId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvShaderModuleDrop(RawId aModuleId) {
+ ffi::wgpu_server_shader_module_drop(mContext.get(), aModuleId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvComputePipelineDrop(RawId aPipelineId) {
+ ffi::wgpu_server_compute_pipeline_drop(mContext.get(), aPipelineId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvRenderPipelineDrop(RawId aPipelineId) {
+ ffi::wgpu_server_render_pipeline_drop(mContext.get(), aPipelineId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvImplicitLayoutDrop(
+ RawId aImplicitPlId, const nsTArray<RawId>& aImplicitBglIds) {
+ ffi::wgpu_server_pipeline_layout_drop(mContext.get(), aImplicitPlId);
+ for (const auto& id : aImplicitBglIds) {
+ ffi::wgpu_server_bind_group_layout_drop(mContext.get(), id);
+ }
+ return IPC_OK();
+}
+
+// TODO: proper destruction
+
+ipc::IPCResult WebGPUParent::RecvDeviceCreateSwapChain(
+ RawId aDeviceId, RawId aQueueId, const RGBDescriptor& aDesc,
+ const nsTArray<RawId>& aBufferIds,
+ const layers::RemoteTextureOwnerId& aOwnerId,
+ bool aUseExternalTextureInSwapChain) {
+ switch (aDesc.format()) {
+ case gfx::SurfaceFormat::R8G8B8A8:
+ case gfx::SurfaceFormat::B8G8R8A8:
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE("Invalid surface format!");
+ return IPC_OK();
+ }
+
+ const auto bufferStrideWithMask =
+ Device::BufferStrideWithMask(aDesc.size(), aDesc.format());
+ if (!bufferStrideWithMask.isValid()) {
+ MOZ_ASSERT_UNREACHABLE("Invalid width / buffer stride!");
+ return IPC_OK();
+ }
+
+ constexpr uint32_t kBufferAlignmentMask = 0xff;
+ const uint32_t bufferStride =
+ bufferStrideWithMask.value() & ~kBufferAlignmentMask;
+
+ const auto rows = CheckedInt<uint32_t>(aDesc.size().height);
+ if (!rows.isValid()) {
+ MOZ_ASSERT_UNREACHABLE("Invalid height!");
+ return IPC_OK();
+ }
+
+ if (!mRemoteTextureOwner) {
+ mRemoteTextureOwner =
+ MakeRefPtr<layers::RemoteTextureOwnerClient>(OtherPid());
+ }
+ mRemoteTextureOwner->RegisterTextureOwner(aOwnerId);
+
+ auto data = MakeRefPtr<PresentationData>(this, aUseExternalTextureInSwapChain,
+ aDeviceId, aQueueId, aDesc,
+ bufferStride, aBufferIds);
+ if (!mPresentationDataMap.emplace(aOwnerId, data).second) {
+ NS_ERROR("External image is already registered as WebGPU canvas!");
+ }
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvDeviceCreateShaderModule(
+ RawId aDeviceId, RawId aModuleId, const nsString& aLabel,
+ const nsCString& aCode, DeviceCreateShaderModuleResolver&& aOutMessage) {
+ // TODO: this should probably be an optional label in the IPC message.
+ const nsACString* label = nullptr;
+ NS_ConvertUTF16toUTF8 utf8Label(aLabel);
+ if (!utf8Label.IsEmpty()) {
+ label = &utf8Label;
+ }
+
+ ffi::WGPUShaderModuleCompilationMessage message;
+ ErrorBuffer error;
+
+ bool ok = ffi::wgpu_server_device_create_shader_module(
+ mContext.get(), aDeviceId, aModuleId, label, &aCode, &message,
+ error.ToFFI());
+
+ ForwardError(aDeviceId, error);
+
+ nsTArray<WebGPUCompilationMessage> messages;
+
+ if (!ok) {
+ WebGPUCompilationMessage msg;
+ msg.lineNum = message.line_number;
+ msg.linePos = message.line_pos;
+ msg.offset = message.utf16_offset;
+ msg.length = message.utf16_length;
+ msg.message = message.message;
+ // wgpu currently only returns errors.
+ msg.messageType = WebGPUCompilationMessageType::Error;
+
+ messages.AppendElement(msg);
+ }
+
+ aOutMessage(messages);
+
+ return IPC_OK();
+}
+
+struct ReadbackPresentRequest {
+ ReadbackPresentRequest(
+ const ffi::WGPUGlobal* aContext, RefPtr<PresentationData>& aData,
+ RefPtr<layers::RemoteTextureOwnerClient>& aRemoteTextureOwner,
+ const layers::RemoteTextureId aTextureId,
+ const layers::RemoteTextureOwnerId aOwnerId)
+ : mContext(aContext),
+ mData(aData),
+ mRemoteTextureOwner(aRemoteTextureOwner),
+ mTextureId(aTextureId),
+ mOwnerId(aOwnerId) {}
+
+ const ffi::WGPUGlobal* mContext;
+ RefPtr<PresentationData> mData;
+ RefPtr<layers::RemoteTextureOwnerClient> mRemoteTextureOwner;
+ const layers::RemoteTextureId mTextureId;
+ const layers::RemoteTextureOwnerId mOwnerId;
+};
+
+static void ReadbackPresentCallback(ffi::WGPUBufferMapAsyncStatus status,
+ uint8_t* userdata) {
+ UniquePtr<ReadbackPresentRequest> req(
+ reinterpret_cast<ReadbackPresentRequest*>(userdata));
+
+ const auto onExit = mozilla::MakeScopeExit([&]() {
+ auto& waitingTextures = req->mData->mWaitingReadbackTexturesForPresent;
+ auto it = waitingTextures.find(req->mTextureId);
+ MOZ_ASSERT(it != waitingTextures.end());
+ if (it != waitingTextures.end()) {
+ waitingTextures.erase(it);
+ }
+ if (req->mData->mPendingSwapChainDrop.isSome() && waitingTextures.empty()) {
+ if (req->mData->mParent) {
+ auto& pendingDrop = req->mData->mPendingSwapChainDrop.ref();
+ req->mData->mParent->RecvSwapChainDrop(
+ req->mOwnerId, pendingDrop.mTxnType, pendingDrop.mTxnId);
+ req->mData->mPendingSwapChainDrop = Nothing();
+ }
+ }
+ });
+
+ if (!req->mRemoteTextureOwner->IsRegistered(req->mOwnerId)) {
+ // SwapChain is already Destroyed
+ return;
+ }
+
+ PresentationData* data = req->mData.get();
+ // get the buffer ID
+ RawId bufferId;
+ {
+ MutexAutoLock lock(data->mBuffersLock);
+ bufferId = data->mQueuedBufferIds.back();
+ data->mQueuedBufferIds.pop_back();
+ }
+
+ // Ensure we'll make the bufferId available for reuse
+ auto releaseBuffer = MakeScopeExit([data = RefPtr{data}, bufferId] {
+ MutexAutoLock lock(data->mBuffersLock);
+ data->mAvailableBufferIds.push_back(bufferId);
+ });
+
+ MOZ_LOG(sLogger, LogLevel::Info,
+ ("ReadbackPresentCallback for buffer %" PRIu64 " status=%d\n",
+ bufferId, status));
+ // copy the data
+ if (status == ffi::WGPUBufferMapAsyncStatus_Success) {
+ const auto bufferSize = data->mDesc.size().height * data->mSourcePitch;
+ ErrorBuffer getRangeError;
+ const auto mapped = ffi::wgpu_server_buffer_get_mapped_range(
+ req->mContext, bufferId, 0, bufferSize, getRangeError.ToFFI());
+ if (req->mData->mParent) {
+ req->mData->mParent->ForwardError(data->mDeviceId, getRangeError);
+ } else if (auto innerError = getRangeError.GetError()) {
+ // If an error occured in get_mapped_range, treat it as an internal error
+ // and crash. The error handling story for something unexpected happening
+ // during the present glue needs to befigured out in a more global way.
+ MOZ_LOG(sLogger, LogLevel::Info,
+ ("WebGPU present: buffer get_mapped_range failed: %s\n",
+ innerError->message.get()));
+ }
+
+ MOZ_RELEASE_ASSERT(mapped.length >= bufferSize);
+ auto textureData =
+ req->mRemoteTextureOwner->CreateOrRecycleBufferTextureData(
+ data->mDesc.size(), data->mDesc.format(), req->mOwnerId);
+ if (!textureData) {
+ gfxCriticalNoteOnce << "Failed to allocate BufferTextureData";
+ return;
+ }
+ layers::MappedTextureData mappedData;
+ if (textureData && textureData->BorrowMappedData(mappedData)) {
+ uint8_t* src = mapped.ptr;
+ uint8_t* dst = mappedData.data;
+ for (auto row = 0; row < data->mDesc.size().height; ++row) {
+ memcpy(dst, src, mappedData.stride);
+ dst += mappedData.stride;
+ src += data->mSourcePitch;
+ }
+ req->mRemoteTextureOwner->PushTexture(req->mTextureId, req->mOwnerId,
+ std::move(textureData));
+ } else {
+ NS_WARNING("WebGPU present skipped: the swapchain is resized!");
+ }
+ ErrorBuffer unmapError;
+ wgpu_server_buffer_unmap(req->mContext, bufferId, unmapError.ToFFI());
+ if (req->mData->mParent) {
+ req->mData->mParent->ForwardError(data->mDeviceId, unmapError);
+ } else if (auto innerError = unmapError.GetError()) {
+ MOZ_LOG(sLogger, LogLevel::Info,
+ ("WebGPU present: buffer unmap failed: %s\n",
+ innerError->message.get()));
+ }
+ } else {
+ // TODO: better handle errors
+ NS_WARNING("WebGPU frame mapping failed!");
+ }
+}
+
+ipc::IPCResult WebGPUParent::GetFrontBufferSnapshot(
+ IProtocol* aProtocol, const layers::RemoteTextureOwnerId& aOwnerId,
+ Maybe<Shmem>& aShmem, gfx::IntSize& aSize) {
+ const auto& lookup = mPresentationDataMap.find(aOwnerId);
+ if (lookup == mPresentationDataMap.end() || !mRemoteTextureOwner ||
+ !mRemoteTextureOwner->IsRegistered(aOwnerId)) {
+ return IPC_OK();
+ }
+
+ RefPtr<PresentationData> data = lookup->second.get();
+ aSize = data->mDesc.size();
+ uint32_t stride = layers::ImageDataSerializer::ComputeRGBStride(
+ data->mDesc.format(), aSize.width);
+ uint32_t len = data->mDesc.size().height * stride;
+ Shmem shmem;
+ if (!AllocShmem(len, &shmem)) {
+ return IPC_OK();
+ }
+
+ mRemoteTextureOwner->GetLatestBufferSnapshot(aOwnerId, shmem, aSize);
+ aShmem.emplace(std::move(shmem));
+
+ return IPC_OK();
+}
+
+void WebGPUParent::PostExternalTexture(
+ const std::shared_ptr<ExternalTexture>&& aExternalTexture,
+ const layers::RemoteTextureId aRemoteTextureId,
+ const layers::RemoteTextureOwnerId aOwnerId) {
+ const auto& lookup = mPresentationDataMap.find(aOwnerId);
+ if (lookup == mPresentationDataMap.end() || !mRemoteTextureOwner ||
+ !mRemoteTextureOwner->IsRegistered(aOwnerId)) {
+ NS_WARNING("WebGPU presenting on a destroyed swap chain!");
+ return;
+ }
+
+ const auto surfaceFormat = gfx::SurfaceFormat::B8G8R8A8;
+ const auto size = aExternalTexture->GetSize();
+ const auto index = aExternalTexture->GetSubmissionIndex();
+ MOZ_ASSERT(index != 0);
+
+ Maybe<gfx::FenceInfo> fenceInfo;
+ if (mFenceHandle) {
+ fenceInfo = Some(gfx::FenceInfo(mFenceHandle, index));
+ }
+
+ Maybe<layers::SurfaceDescriptor> desc =
+ aExternalTexture->ToSurfaceDescriptor(fenceInfo);
+ if (!desc) {
+ MOZ_ASSERT_UNREACHABLE("unexpected to be called");
+ return;
+ }
+
+ mRemoteTextureOwner->PushTexture(aRemoteTextureId, aOwnerId, aExternalTexture,
+ size, surfaceFormat, *desc);
+
+ RefPtr<PresentationData> data = lookup->second.get();
+
+ auto recycledTexture = mRemoteTextureOwner->GetRecycledExternalTexture(
+ size, surfaceFormat, desc->type(), aOwnerId);
+ if (recycledTexture) {
+ data->mRecycledExternalTextures.push_back(recycledTexture);
+ }
+}
+
+ipc::IPCResult WebGPUParent::RecvSwapChainPresent(
+ RawId aTextureId, RawId aCommandEncoderId,
+ const layers::RemoteTextureId& aRemoteTextureId,
+ const layers::RemoteTextureOwnerId& aOwnerId) {
+ // step 0: get the data associated with the swapchain
+ const auto& lookup = mPresentationDataMap.find(aOwnerId);
+ if (lookup == mPresentationDataMap.end() || !mRemoteTextureOwner ||
+ !mRemoteTextureOwner->IsRegistered(aOwnerId)) {
+ NS_WARNING("WebGPU presenting on a destroyed swap chain!");
+ return IPC_OK();
+ }
+
+ RefPtr<PresentationData> data = lookup->second.get();
+
+ if (data->mUseExternalTextureInSwapChain) {
+ auto it = mExternalTextures.find(aTextureId);
+ if (it == mExternalTextures.end()) {
+ MOZ_ASSERT_UNREACHABLE("unexpected to be called");
+ return IPC_OK();
+ }
+ std::shared_ptr<ExternalTexture> externalTexture = it->second;
+ mExternalTextures.erase(it);
+
+ PostExternalTexture(std::move(externalTexture), aRemoteTextureId, aOwnerId);
+ return IPC_OK();
+ }
+
+ RawId bufferId = 0;
+ const auto& size = data->mDesc.size();
+ const auto bufferSize = data->mDesc.size().height * data->mSourcePitch;
+
+ // step 1: find an available staging buffer, or create one
+ {
+ MutexAutoLock lock(data->mBuffersLock);
+ if (!data->mAvailableBufferIds.empty()) {
+ bufferId = data->mAvailableBufferIds.back();
+ data->mAvailableBufferIds.pop_back();
+ } else if (!data->mUnassignedBufferIds.empty()) {
+ bufferId = data->mUnassignedBufferIds.back();
+ data->mUnassignedBufferIds.pop_back();
+
+ ffi::WGPUBufferUsages usage =
+ WGPUBufferUsages_COPY_DST | WGPUBufferUsages_MAP_READ;
+
+ ErrorBuffer error;
+ ffi::wgpu_server_device_create_buffer(mContext.get(), data->mDeviceId,
+ bufferId, nullptr, bufferSize,
+ usage, false, false, error.ToFFI());
+ if (ForwardError(data->mDeviceId, error)) {
+ return IPC_OK();
+ }
+ } else {
+ bufferId = 0;
+ }
+
+ if (bufferId) {
+ data->mQueuedBufferIds.insert(data->mQueuedBufferIds.begin(), bufferId);
+ }
+ }
+
+ MOZ_LOG(sLogger, LogLevel::Info,
+ ("RecvSwapChainPresent with buffer %" PRIu64 "\n", bufferId));
+ if (!bufferId) {
+ // TODO: add a warning - no buffer are available!
+ return IPC_OK();
+ }
+
+ // step 3: submit a copy command for the frame
+ ffi::WGPUCommandEncoderDescriptor encoderDesc = {};
+ {
+ ErrorBuffer error;
+ ffi::wgpu_server_device_create_encoder(mContext.get(), data->mDeviceId,
+ &encoderDesc, aCommandEncoderId,
+ error.ToFFI());
+ if (ForwardError(data->mDeviceId, error)) {
+ return IPC_OK();
+ }
+ }
+
+ const ffi::WGPUImageCopyTexture texView = {
+ aTextureId,
+ };
+ const ffi::WGPUImageDataLayout bufLayout = {
+ 0,
+ &data->mSourcePitch,
+ nullptr,
+ };
+ const ffi::WGPUExtent3d extent = {
+ static_cast<uint32_t>(size.width),
+ static_cast<uint32_t>(size.height),
+ 1,
+ };
+
+ {
+ ErrorBuffer error;
+ ffi::wgpu_server_encoder_copy_texture_to_buffer(
+ mContext.get(), aCommandEncoderId, &texView, bufferId, &bufLayout,
+ &extent, error.ToFFI());
+ if (ForwardError(data->mDeviceId, error)) {
+ return IPC_OK();
+ }
+ }
+ ffi::WGPUCommandBufferDescriptor commandDesc = {};
+ {
+ ErrorBuffer error;
+ ffi::wgpu_server_encoder_finish(mContext.get(), aCommandEncoderId,
+ &commandDesc, error.ToFFI());
+ if (ForwardError(data->mDeviceId, error)) {
+ return IPC_OK();
+ }
+ }
+
+ {
+ ErrorBuffer error;
+ ffi::wgpu_server_queue_submit(mContext.get(), data->mQueueId,
+ &aCommandEncoderId, 1, error.ToFFI());
+ if (ForwardError(data->mDeviceId, error)) {
+ return IPC_OK();
+ }
+ }
+
+ auto& waitingTextures = data->mWaitingReadbackTexturesForPresent;
+ auto it = waitingTextures.find(aRemoteTextureId);
+ MOZ_ASSERT(it == waitingTextures.end());
+ if (it == waitingTextures.end()) {
+ waitingTextures.emplace(aRemoteTextureId);
+ }
+
+ // step 4: request the pixels to be copied into the external texture
+ // TODO: this isn't strictly necessary. When WR wants to Lock() the external
+ // texture,
+ // we can just give it the contents of the last mapped buffer instead of the
+ // copy.
+ auto presentRequest = MakeUnique<ReadbackPresentRequest>(
+ mContext.get(), data, mRemoteTextureOwner, aRemoteTextureId, aOwnerId);
+
+ ffi::WGPUBufferMapCallbackC callback = {
+ &ReadbackPresentCallback,
+ reinterpret_cast<uint8_t*>(presentRequest.release())};
+
+ ErrorBuffer error;
+ ffi::wgpu_server_buffer_map(mContext.get(), bufferId, 0, bufferSize,
+ ffi::WGPUHostMap_Read, callback, error.ToFFI());
+ if (ForwardError(data->mDeviceId, error)) {
+ return IPC_OK();
+ }
+
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvSwapChainDrop(
+ const layers::RemoteTextureOwnerId& aOwnerId,
+ layers::RemoteTextureTxnType aTxnType, layers::RemoteTextureTxnId aTxnId) {
+ const auto& lookup = mPresentationDataMap.find(aOwnerId);
+ MOZ_ASSERT(lookup != mPresentationDataMap.end());
+ if (lookup == mPresentationDataMap.end()) {
+ NS_WARNING("WebGPU presenting on a destroyed swap chain!");
+ return IPC_OK();
+ }
+
+ RefPtr<PresentationData> data = lookup->second.get();
+
+ auto waitingCount = data->mWaitingReadbackTexturesForPresent.size();
+ if (waitingCount > 0) {
+ // Defer SwapChainDrop until readback complete
+ data->mPendingSwapChainDrop = Some(PendingSwapChainDrop{aTxnType, aTxnId});
+ return IPC_OK();
+ }
+
+ if (mRemoteTextureOwner) {
+ if (aTxnType && aTxnId) {
+ mRemoteTextureOwner->WaitForTxn(aOwnerId, aTxnType, aTxnId);
+ }
+ mRemoteTextureOwner->UnregisterTextureOwner(aOwnerId);
+ }
+
+ mPresentationDataMap.erase(lookup);
+
+ MutexAutoLock lock(data->mBuffersLock);
+ ipc::ByteBuf dropByteBuf;
+ for (const auto bid : data->mUnassignedBufferIds) {
+ wgpu_server_buffer_free(bid, ToFFI(&dropByteBuf));
+ }
+ if (dropByteBuf.mData && !SendDropAction(std::move(dropByteBuf))) {
+ NS_WARNING("Unable to free an ID for non-assigned buffer");
+ }
+ for (const auto bid : data->mAvailableBufferIds) {
+ ffi::wgpu_server_buffer_drop(mContext.get(), bid);
+ }
+ for (const auto bid : data->mQueuedBufferIds) {
+ ffi::wgpu_server_buffer_drop(mContext.get(), bid);
+ }
+ return IPC_OK();
+}
+
+void WebGPUParent::ActorDestroy(ActorDestroyReason aWhy) {
+ mTimer.Stop();
+ mPresentationDataMap.clear();
+ if (mRemoteTextureOwner) {
+ mRemoteTextureOwner->UnregisterAllTextureOwners();
+ mRemoteTextureOwner = nullptr;
+ }
+ ffi::wgpu_server_poll_all_devices(mContext.get(), true);
+ mContext = nullptr;
+}
+
+ipc::IPCResult WebGPUParent::RecvDeviceAction(RawId aDeviceId,
+ const ipc::ByteBuf& aByteBuf) {
+ ErrorBuffer error;
+ ffi::wgpu_server_device_action(mContext.get(), aDeviceId, ToFFI(&aByteBuf),
+ error.ToFFI());
+
+ ForwardError(aDeviceId, error);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvDeviceActionWithAck(
+ RawId aDeviceId, const ipc::ByteBuf& aByteBuf,
+ DeviceActionWithAckResolver&& aResolver) {
+ auto result = RecvDeviceAction(aDeviceId, aByteBuf);
+ aResolver(true);
+ return result;
+}
+
+ipc::IPCResult WebGPUParent::RecvTextureAction(RawId aTextureId,
+ RawId aDeviceId,
+ const ipc::ByteBuf& aByteBuf) {
+ ErrorBuffer error;
+ ffi::wgpu_server_texture_action(mContext.get(), aTextureId, ToFFI(&aByteBuf),
+ error.ToFFI());
+
+ ForwardError(aDeviceId, error);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvCommandEncoderAction(
+ RawId aEncoderId, RawId aDeviceId, const ipc::ByteBuf& aByteBuf) {
+ ErrorBuffer error;
+ ffi::wgpu_server_command_encoder_action(mContext.get(), aEncoderId,
+ ToFFI(&aByteBuf), error.ToFFI());
+ ForwardError(aDeviceId, error);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBumpImplicitBindGroupLayout(RawId aPipelineId,
+ bool aIsCompute,
+ uint32_t aIndex,
+ RawId aAssignId) {
+ ErrorBuffer error;
+ if (aIsCompute) {
+ ffi::wgpu_server_compute_pipeline_get_bind_group_layout(
+ mContext.get(), aPipelineId, aIndex, aAssignId, error.ToFFI());
+ } else {
+ ffi::wgpu_server_render_pipeline_get_bind_group_layout(
+ mContext.get(), aPipelineId, aIndex, aAssignId, error.ToFFI());
+ }
+
+ ForwardError(0, error);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvDevicePushErrorScope(
+ RawId aDeviceId, const dom::GPUErrorFilter aFilter) {
+ const auto& itr = mErrorScopeStackByDevice.find(aDeviceId);
+ if (itr == mErrorScopeStackByDevice.end()) {
+ // Content can cause this simply by destroying a device and then
+ // calling `pushErrorScope`.
+ return IPC_OK();
+ }
+ auto& stack = itr->second;
+
+ // Let's prevent `while (true) { pushErrorScope(); }`.
+ constexpr size_t MAX_ERROR_SCOPE_STACK_SIZE = 1'000'000;
+ if (stack.size() >= MAX_ERROR_SCOPE_STACK_SIZE) {
+ nsPrintfCString m("pushErrorScope: Hit MAX_ERROR_SCOPE_STACK_SIZE of %zu",
+ MAX_ERROR_SCOPE_STACK_SIZE);
+ ReportError(Some(aDeviceId), dom::GPUErrorFilter::Out_of_memory, m);
+ return IPC_OK();
+ }
+
+ const auto newScope = ErrorScope{aFilter};
+ stack.push_back(newScope);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvDevicePopErrorScope(
+ RawId aDeviceId, DevicePopErrorScopeResolver&& aResolver) {
+ const auto popResult = [&]() {
+ const auto& itr = mErrorScopeStackByDevice.find(aDeviceId);
+ if (itr == mErrorScopeStackByDevice.end()) {
+ // Content can cause this simply by destroying a device and then
+ // calling `popErrorScope`.
+ return PopErrorScopeResult{PopErrorScopeResultType::DeviceLost};
+ }
+
+ auto& stack = itr->second;
+ if (!stack.size()) {
+ // Content can cause this simply by calling `popErrorScope` when
+ // there is no error scope pushed.
+ return PopErrorScopeResult{PopErrorScopeResultType::ThrowOperationError,
+ "popErrorScope on empty stack"_ns};
+ }
+
+ const auto& scope = stack.back();
+ const auto popLater = MakeScopeExit([&]() { stack.pop_back(); });
+
+ auto ret = PopErrorScopeResult{PopErrorScopeResultType::NoError};
+ if (scope.firstMessage) {
+ ret.message = *scope.firstMessage;
+ switch (scope.filter) {
+ case dom::GPUErrorFilter::Validation:
+ ret.resultType = PopErrorScopeResultType::ValidationError;
+ break;
+ case dom::GPUErrorFilter::Out_of_memory:
+ ret.resultType = PopErrorScopeResultType::OutOfMemory;
+ break;
+ case dom::GPUErrorFilter::Internal:
+ ret.resultType = PopErrorScopeResultType::InternalError;
+ break;
+ case dom::GPUErrorFilter::EndGuard_:
+ MOZ_CRASH("Bad GPUErrorFilter");
+ }
+ }
+ return ret;
+ }();
+ aResolver(popResult);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvGenerateError(const Maybe<RawId> aDeviceId,
+ const dom::GPUErrorFilter aType,
+ const nsCString& aMessage) {
+ ReportError(aDeviceId, aType, aMessage);
+ return IPC_OK();
+}
+
+bool WebGPUParent::UseExternalTextureForSwapChain(
+ ffi::WGPUSwapChainId aSwapChainId) {
+ auto ownerId = layers::RemoteTextureOwnerId{aSwapChainId._0};
+ const auto& lookup = mPresentationDataMap.find(ownerId);
+ if (lookup == mPresentationDataMap.end()) {
+ MOZ_ASSERT_UNREACHABLE("unexpected to be called");
+ return IPC_OK();
+ }
+
+ RefPtr<PresentationData> data = lookup->second.get();
+
+ return data->mUseExternalTextureInSwapChain;
+}
+
+bool WebGPUParent::EnsureExternalTextureForSwapChain(
+ ffi::WGPUSwapChainId aSwapChainId, ffi::WGPUDeviceId aDeviceId,
+ ffi::WGPUTextureId aTextureId, uint32_t aWidth, uint32_t aHeight,
+ struct ffi::WGPUTextureFormat aFormat, ffi::WGPUTextureUsages aUsage) {
+ auto ownerId = layers::RemoteTextureOwnerId{aSwapChainId._0};
+ const auto& lookup = mPresentationDataMap.find(ownerId);
+ if (lookup == mPresentationDataMap.end()) {
+ MOZ_ASSERT_UNREACHABLE("unexpected to be called");
+ return false;
+ }
+
+ RefPtr<PresentationData> data = lookup->second.get();
+ if (!data->mUseExternalTextureInSwapChain) {
+ MOZ_ASSERT_UNREACHABLE("unexpected to be called");
+ return false;
+ }
+
+ // Recycled ExternalTexture if it exists.
+ if (!data->mRecycledExternalTextures.empty()) {
+ std::shared_ptr<ExternalTexture> texture =
+ data->mRecycledExternalTextures.front();
+ // Check if the texture is recyclable.
+ if (texture->mWidth == aWidth && texture->mHeight == aHeight &&
+ texture->mFormat.tag == aFormat.tag && texture->mUsage == aUsage) {
+ data->mRecycledExternalTextures.pop_front();
+ mExternalTextures.emplace(aTextureId, texture);
+ return true;
+ }
+ data->mRecycledExternalTextures.clear();
+ }
+
+ auto externalTexture = CreateExternalTexture(aDeviceId, aTextureId, aWidth,
+ aHeight, aFormat, aUsage);
+ if (!externalTexture) {
+ return false;
+ }
+ return true;
+}
+
+std::shared_ptr<ExternalTexture> WebGPUParent::CreateExternalTexture(
+ ffi::WGPUDeviceId aDeviceId, ffi::WGPUTextureId aTextureId, uint32_t aWidth,
+ uint32_t aHeight, const struct ffi::WGPUTextureFormat aFormat,
+ ffi::WGPUTextureUsages aUsage) {
+ MOZ_RELEASE_ASSERT(mExternalTextures.find(aTextureId) ==
+ mExternalTextures.end());
+
+ UniquePtr<ExternalTexture> texture =
+ ExternalTexture::Create(aWidth, aHeight, aFormat, aUsage);
+ if (!texture) {
+ MOZ_ASSERT_UNREACHABLE("unexpected to be called");
+ return nullptr;
+ }
+
+ std::shared_ptr<ExternalTexture> shared(texture.release());
+ mExternalTextures.emplace(aTextureId, shared);
+
+ return shared;
+}
+
+std::shared_ptr<ExternalTexture> WebGPUParent::GetExternalTexture(
+ ffi::WGPUTextureId aId) {
+ auto it = mExternalTextures.find(aId);
+ if (it == mExternalTextures.end()) {
+ return nullptr;
+ }
+ return it->second;
+}
+
+/* static */
+Maybe<ffi::WGPUFfiLUID> WebGPUParent::GetCompositorDeviceLuid() {
+#if defined(XP_WIN)
+ const RefPtr<ID3D11Device> d3d11Device =
+ gfx::DeviceManagerDx::Get()->GetCompositorDevice();
+ if (!d3d11Device) {
+ gfxCriticalNoteOnce << "CompositorDevice does not exist";
+ return Nothing();
+ }
+
+ RefPtr<IDXGIDevice> dxgiDevice;
+ d3d11Device->QueryInterface((IDXGIDevice**)getter_AddRefs(dxgiDevice));
+
+ RefPtr<IDXGIAdapter> dxgiAdapter;
+ dxgiDevice->GetAdapter(getter_AddRefs(dxgiAdapter));
+
+ DXGI_ADAPTER_DESC desc;
+ if (FAILED(dxgiAdapter->GetDesc(&desc))) {
+ gfxCriticalNoteOnce << "Failed to get DXGI_ADAPTER_DESC";
+ return Nothing();
+ }
+
+ return Some(
+ ffi::WGPUFfiLUID{desc.AdapterLuid.LowPart, desc.AdapterLuid.HighPart});
+#else
+ return Nothing();
+#endif
+}
+
+} // namespace mozilla::webgpu
diff --git a/dom/webgpu/ipc/WebGPUParent.h b/dom/webgpu/ipc/WebGPUParent.h
new file mode 100644
index 0000000000..6ad539c21e
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUParent.h
@@ -0,0 +1,238 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBGPU_PARENT_H_
+#define WEBGPU_PARENT_H_
+
+#include <unordered_map>
+
+#include "mozilla/WeakPtr.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+#include "mozilla/webgpu/PWebGPUParent.h"
+#include "mozilla/webrender/WebRenderAPI.h"
+#include "mozilla/ipc/RawShmem.h"
+#include "WebGPUTypes.h"
+#include "base/timer.h"
+
+namespace mozilla {
+
+namespace layers {
+class RemoteTextureOwnerClient;
+} // namespace layers
+
+namespace webgpu {
+
+class ErrorBuffer;
+class ExternalTexture;
+class PresentationData;
+
+// Destroy/Drop messages:
+// - Messages with "Destroy" in their name request deallocation of resources
+// owned by the
+// object and put the object in a destroyed state without deleting the object.
+// It is still safe to reffer to these objects.
+// - Messages with "Drop" in their name can be thought of as C++ destructors.
+// They completely
+// delete the object, so future attempts at accessing to these objects will
+// crash. The child process should *never* send a Drop message if it still
+// holds references to the object. An object that has been destroyed still
+// needs to be dropped when the last reference to it dies on the child
+// process.
+
+class WebGPUParent final : public PWebGPUParent, public SupportsWeakPtr {
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebGPUParent, override)
+
+ public:
+ explicit WebGPUParent();
+
+ ipc::IPCResult RecvInstanceRequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions,
+ const nsTArray<RawId>& aTargetIds,
+ InstanceRequestAdapterResolver&& resolver);
+ ipc::IPCResult RecvAdapterRequestDevice(
+ RawId aAdapterId, const ipc::ByteBuf& aByteBuf, RawId aDeviceId,
+ AdapterRequestDeviceResolver&& resolver);
+ ipc::IPCResult RecvAdapterDrop(RawId aAdapterId);
+ ipc::IPCResult RecvDeviceDestroy(RawId aDeviceId);
+ ipc::IPCResult RecvDeviceDrop(RawId aDeviceId);
+ ipc::IPCResult RecvDeviceCreateBuffer(RawId aDeviceId, RawId aBufferId,
+ dom::GPUBufferDescriptor&& aDesc,
+ ipc::UnsafeSharedMemoryHandle&& aShmem);
+ ipc::IPCResult RecvBufferMap(RawId aDeviceId, RawId aBufferId, uint32_t aMode,
+ uint64_t aOffset, uint64_t size,
+ BufferMapResolver&& aResolver);
+ ipc::IPCResult RecvBufferUnmap(RawId aDeviceId, RawId aBufferId, bool aFlush);
+ ipc::IPCResult RecvBufferDestroy(RawId aBufferId);
+ ipc::IPCResult RecvBufferDrop(RawId aBufferId);
+ ipc::IPCResult RecvTextureDestroy(RawId aTextureId, RawId aDeviceId);
+ ipc::IPCResult RecvTextureDrop(RawId aTextureId);
+ ipc::IPCResult RecvTextureViewDrop(RawId aTextureViewId);
+ ipc::IPCResult RecvSamplerDrop(RawId aSamplerId);
+ ipc::IPCResult RecvCommandEncoderFinish(
+ RawId aEncoderId, RawId aDeviceId,
+ const dom::GPUCommandBufferDescriptor& aDesc);
+ ipc::IPCResult RecvCommandEncoderDrop(RawId aEncoderId);
+ ipc::IPCResult RecvCommandBufferDrop(RawId aCommandBufferId);
+ ipc::IPCResult RecvRenderBundleDrop(RawId aBundleId);
+ ipc::IPCResult RecvQueueSubmit(RawId aQueueId, RawId aDeviceId,
+ const nsTArray<RawId>& aCommandBuffers,
+ const nsTArray<RawId>& aTextureIds);
+ ipc::IPCResult RecvQueueOnSubmittedWorkDone(
+ RawId aQueueId, std::function<void(mozilla::void_t)>&& aResolver);
+ ipc::IPCResult RecvQueueWriteAction(RawId aQueueId, RawId aDeviceId,
+ const ipc::ByteBuf& aByteBuf,
+ ipc::UnsafeSharedMemoryHandle&& aShmem);
+ ipc::IPCResult RecvBindGroupLayoutDrop(RawId aBindGroupLayoutId);
+ ipc::IPCResult RecvPipelineLayoutDrop(RawId aPipelineLayoutId);
+ ipc::IPCResult RecvBindGroupDrop(RawId aBindGroupId);
+ ipc::IPCResult RecvShaderModuleDrop(RawId aModuleId);
+ ipc::IPCResult RecvComputePipelineDrop(RawId aPipelineId);
+ ipc::IPCResult RecvRenderPipelineDrop(RawId aPipelineId);
+ ipc::IPCResult RecvImplicitLayoutDrop(RawId aImplicitPlId,
+ const nsTArray<RawId>& aImplicitBglIds);
+ ipc::IPCResult RecvDeviceCreateSwapChain(
+ RawId aDeviceId, RawId aQueueId, const layers::RGBDescriptor& aDesc,
+ const nsTArray<RawId>& aBufferIds,
+ const layers::RemoteTextureOwnerId& aOwnerId,
+ bool aUseExternalTextureInSwapChain);
+ ipc::IPCResult RecvDeviceCreateShaderModule(
+ RawId aDeviceId, RawId aModuleId, const nsString& aLabel,
+ const nsCString& aCode, DeviceCreateShaderModuleResolver&& aOutMessage);
+
+ ipc::IPCResult RecvSwapChainPresent(
+ RawId aTextureId, RawId aCommandEncoderId,
+ const layers::RemoteTextureId& aRemoteTextureId,
+ const layers::RemoteTextureOwnerId& aOwnerId);
+ ipc::IPCResult RecvSwapChainDrop(const layers::RemoteTextureOwnerId& aOwnerId,
+ layers::RemoteTextureTxnType aTxnType,
+ layers::RemoteTextureTxnId aTxnId);
+
+ ipc::IPCResult RecvDeviceAction(RawId aDeviceId,
+ const ipc::ByteBuf& aByteBuf);
+ ipc::IPCResult RecvDeviceActionWithAck(
+ RawId aDeviceId, const ipc::ByteBuf& aByteBuf,
+ DeviceActionWithAckResolver&& aResolver);
+ ipc::IPCResult RecvTextureAction(RawId aTextureId, RawId aDevice,
+ const ipc::ByteBuf& aByteBuf);
+ ipc::IPCResult RecvCommandEncoderAction(RawId aEncoderId, RawId aDeviceId,
+ const ipc::ByteBuf& aByteBuf);
+ ipc::IPCResult RecvBumpImplicitBindGroupLayout(RawId aPipelineId,
+ bool aIsCompute,
+ uint32_t aIndex,
+ RawId aAssignId);
+
+ ipc::IPCResult RecvDevicePushErrorScope(RawId aDeviceId, dom::GPUErrorFilter);
+ ipc::IPCResult RecvDevicePopErrorScope(
+ RawId aDeviceId, DevicePopErrorScopeResolver&& aResolver);
+ ipc::IPCResult RecvGenerateError(Maybe<RawId> aDeviceId, dom::GPUErrorFilter,
+ const nsCString& message);
+
+ ipc::IPCResult GetFrontBufferSnapshot(
+ IProtocol* aProtocol, const layers::RemoteTextureOwnerId& aOwnerId,
+ Maybe<Shmem>& aShmem, gfx::IntSize& aSize);
+
+ void ActorDestroy(ActorDestroyReason aWhy) override;
+
+ struct BufferMapData {
+ ipc::WritableSharedMemoryMapping mShmem;
+ // True if buffer's usage has MAP_READ or MAP_WRITE set.
+ bool mHasMapFlags;
+ uint64_t mMappedOffset;
+ uint64_t mMappedSize;
+ RawId mDeviceId;
+ };
+
+ BufferMapData* GetBufferMapData(RawId aBufferId);
+
+ bool UseExternalTextureForSwapChain(ffi::WGPUSwapChainId aSwapChainId);
+
+ bool EnsureExternalTextureForSwapChain(ffi::WGPUSwapChainId aSwapChainId,
+ ffi::WGPUDeviceId aDeviceId,
+ ffi::WGPUTextureId aTextureId,
+ uint32_t aWidth, uint32_t aHeight,
+ struct ffi::WGPUTextureFormat aFormat,
+ ffi::WGPUTextureUsages aUsage);
+
+ std::shared_ptr<ExternalTexture> CreateExternalTexture(
+ ffi::WGPUDeviceId aDeviceId, ffi::WGPUTextureId aTextureId,
+ uint32_t aWidth, uint32_t aHeight,
+ const struct ffi::WGPUTextureFormat aFormat,
+ ffi::WGPUTextureUsages aUsage);
+
+ std::shared_ptr<ExternalTexture> GetExternalTexture(ffi::WGPUTextureId aId);
+
+ void PostExternalTexture(
+ const std::shared_ptr<ExternalTexture>&& aExternalTexture,
+ const layers::RemoteTextureId aRemoteTextureId,
+ const layers::RemoteTextureOwnerId aOwnerId);
+
+ bool ForwardError(const RawId aDeviceId, ErrorBuffer& aError) {
+ return ForwardError(Some(aDeviceId), aError);
+ }
+
+ private:
+ static void MapCallback(ffi::WGPUBufferMapAsyncStatus aStatus,
+ uint8_t* aUserData);
+ static void DeviceLostCallback(uint8_t* aUserData, uint8_t aReason,
+ const char* aMessage);
+ void DeallocBufferShmem(RawId aBufferId);
+
+ void RemoveExternalTexture(RawId aTextureId);
+
+ virtual ~WebGPUParent();
+ void MaintainDevices();
+ void LoseDevice(const RawId aDeviceId, Maybe<uint8_t> aReason,
+ const nsACString& aMessage);
+
+ bool ForwardError(Maybe<RawId> aDeviceId, ErrorBuffer& aError);
+
+ void ReportError(Maybe<RawId> aDeviceId, GPUErrorFilter,
+ const nsCString& message);
+
+ static Maybe<ffi::WGPUFfiLUID> GetCompositorDeviceLuid();
+
+ UniquePtr<ffi::WGPUGlobal> mContext;
+ base::RepeatingTimer<WebGPUParent> mTimer;
+
+ /// A map from wgpu buffer ids to data about their shared memory segments.
+ /// Includes entries about mappedAtCreation, MAP_READ and MAP_WRITE buffers,
+ /// regardless of their state.
+ std::unordered_map<uint64_t, BufferMapData> mSharedMemoryMap;
+ /// Associated presentation data for each swapchain.
+ std::unordered_map<layers::RemoteTextureOwnerId, RefPtr<PresentationData>,
+ layers::RemoteTextureOwnerId::HashFn>
+ mPresentationDataMap;
+
+ RefPtr<layers::RemoteTextureOwnerClient> mRemoteTextureOwner;
+
+ /// Associated stack of error scopes for each device.
+ std::unordered_map<uint64_t, std::vector<ErrorScope>>
+ mErrorScopeStackByDevice;
+
+ std::unordered_map<ffi::WGPUTextureId, std::shared_ptr<ExternalTexture>>
+ mExternalTextures;
+
+ // Store a set of DeviceIds that have been SendDeviceLost. We use this to
+ // limit each Device to one DeviceLost message.
+ nsTHashSet<RawId> mLostDeviceIds;
+
+ // Shared handle of wgpu device's fence.
+ RefPtr<gfx::FileHandleWrapper> mFenceHandle;
+
+ // Store DeviceLostRequest structs for each device as unique_ptrs mapped
+ // to their device ids. We keep these unique_ptrs alive as long as the
+ // device is alive.
+ struct DeviceLostRequest {
+ WeakPtr<WebGPUParent> mParent;
+ RawId mDeviceId;
+ };
+ std::unordered_map<RawId, std::unique_ptr<DeviceLostRequest>>
+ mDeviceLostRequests;
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // WEBGPU_PARENT_H_
diff --git a/dom/webgpu/ipc/WebGPUSerialize.h b/dom/webgpu/ipc/WebGPUSerialize.h
new file mode 100644
index 0000000000..8d78d784cb
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUSerialize.h
@@ -0,0 +1,63 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBGPU_SERIALIZE_H_
+#define WEBGPU_SERIALIZE_H_
+
+#include "WebGPUTypes.h"
+#include "ipc/EnumSerializer.h"
+#include "ipc/IPCMessageUtils.h"
+#include "mozilla/dom/WebGPUBinding.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+
+namespace IPC {
+
+#define DEFINE_IPC_SERIALIZER_ENUM_GUARD(something, guard) \
+ template <> \
+ struct ParamTraits<something> \
+ : public ContiguousEnumSerializer<something, something(0), guard> {}
+
+#define DEFINE_IPC_SERIALIZER_DOM_ENUM(something) \
+ DEFINE_IPC_SERIALIZER_ENUM_GUARD(something, something::EndGuard_)
+#define DEFINE_IPC_SERIALIZER_FFI_ENUM(something) \
+ DEFINE_IPC_SERIALIZER_ENUM_GUARD(something, something##_Sentinel)
+
+// -
+
+DEFINE_IPC_SERIALIZER_DOM_ENUM(mozilla::dom::GPUErrorFilter);
+DEFINE_IPC_SERIALIZER_DOM_ENUM(mozilla::dom::GPUPowerPreference);
+
+DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUHostMap);
+
+DEFINE_IPC_SERIALIZER_WITHOUT_FIELDS(mozilla::dom::GPUCommandBufferDescriptor);
+
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPURequestAdapterOptions,
+ mPowerPreference, mForceFallbackAdapter);
+
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPUBufferDescriptor, mSize,
+ mUsage, mMappedAtCreation);
+
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::PopErrorScopeResult,
+ resultType, message);
+
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::WebGPUCompilationMessage,
+ message, lineNum, linePos);
+
+#undef DEFINE_IPC_SERIALIZER_FFI_ENUM
+#undef DEFINE_IPC_SERIALIZER_DOM_ENUM
+#undef DEFINE_IPC_SERIALIZER_ENUM_GUARD
+
+// -
+
+template <>
+struct ParamTraits<mozilla::webgpu::PopErrorScopeResultType>
+ : public ContiguousEnumSerializerInclusive<
+ mozilla::webgpu::PopErrorScopeResultType,
+ mozilla::webgpu::PopErrorScopeResultType{0},
+ mozilla::webgpu::PopErrorScopeResultType::_LAST> {};
+
+} // namespace IPC
+
+#endif // WEBGPU_SERIALIZE_H_
diff --git a/dom/webgpu/ipc/WebGPUTypes.h b/dom/webgpu/ipc/WebGPUTypes.h
new file mode 100644
index 0000000000..ce6685ded5
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUTypes.h
@@ -0,0 +1,82 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBGPU_TYPES_H_
+#define WEBGPU_TYPES_H_
+
+#include <cstdint>
+#include "mozilla/Maybe.h"
+#include "nsString.h"
+#include "mozilla/dom/BindingDeclarations.h"
+
+namespace mozilla::dom {
+enum class GPUErrorFilter : uint8_t;
+} // namespace mozilla::dom
+
+namespace mozilla::webgpu {
+
+using RawId = uint64_t;
+using BufferAddress = uint64_t;
+
+struct ErrorScope {
+ dom::GPUErrorFilter filter;
+ Maybe<nsCString> firstMessage;
+};
+
+enum class PopErrorScopeResultType : uint8_t {
+ NoError,
+ ThrowOperationError,
+ ValidationError,
+ OutOfMemory,
+ InternalError,
+ DeviceLost,
+ _LAST = DeviceLost,
+};
+
+struct PopErrorScopeResult {
+ PopErrorScopeResultType resultType;
+ nsCString message;
+};
+
+enum class WebGPUCompilationMessageType { Error, Warning, Info };
+
+// TODO: Better name? CompilationMessage alread taken by the dom object.
+/// The serializable counterpart of the dom object CompilationMessage.
+struct WebGPUCompilationMessage {
+ nsString message;
+ uint64_t lineNum = 0;
+ uint64_t linePos = 0;
+ // In utf16 code units.
+ uint64_t offset = 0;
+ // In utf16 code units.
+ uint64_t length = 0;
+ WebGPUCompilationMessageType messageType =
+ WebGPUCompilationMessageType::Error;
+};
+
+/// A helper to reduce the boiler plate of turning the many Optional<nsAString>
+/// we get from the dom to the nullable nsACString* we pass to the wgpu ffi.
+class StringHelper {
+ public:
+ explicit StringHelper(const nsString& aWide) {
+ if (!aWide.IsEmpty()) {
+ mNarrow = Some(NS_ConvertUTF16toUTF8(aWide));
+ }
+ }
+
+ const nsACString* Get() const {
+ if (mNarrow.isSome()) {
+ return mNarrow.ptr();
+ }
+ return nullptr;
+ }
+
+ private:
+ Maybe<NS_ConvertUTF16toUTF8> mNarrow;
+};
+
+} // namespace mozilla::webgpu
+
+#endif // WEBGPU_TYPES_H_
diff --git a/dom/webgpu/mochitest/mochitest-no-pref.toml b/dom/webgpu/mochitest/mochitest-no-pref.toml
new file mode 100644
index 0000000000..511b840c0b
--- /dev/null
+++ b/dom/webgpu/mochitest/mochitest-no-pref.toml
@@ -0,0 +1,10 @@
+[DEFAULT]
+subsuite = "webgpu"
+run-if = ["release_or_beta"]
+
+# Even if the pref were enabled, WebGPU is only available in secure contexts.
+#
+# See spec WebIDL, like this: https://www.w3.org/TR/webgpu/#navigatorgpu
+scheme = "https"
+
+["test_disabled.html"]
diff --git a/dom/webgpu/mochitest/mochitest.toml b/dom/webgpu/mochitest/mochitest.toml
new file mode 100644
index 0000000000..f32d7a9a86
--- /dev/null
+++ b/dom/webgpu/mochitest/mochitest.toml
@@ -0,0 +1,116 @@
+[DEFAULT]
+subsuite = "webgpu"
+run-if = ["!release_or_beta"]
+prefs = [
+ "dom.webgpu.enabled=true",
+ "dom.webgpu.workers.enabled=true",
+ "gfx.offscreencanvas.enabled=true",
+]
+support-files = [
+ "worker_wrapper.js",
+ "test_basic_canvas.worker.js",
+ "test_submit_render_empty.worker.js",
+]
+
+# WebGPU is only available in secure contexts.
+#
+# See spec WebIDL, like this: https://www.w3.org/TR/webgpu/#navigatorgpu
+scheme = "https"
+
+["test_basic_canvas.worker.html"]
+fail-if = [
+ "os == 'linux' && os_version == '18.04'",
+ "os == 'mac'",
+]
+
+["test_buffer_mapping.html"]
+fail-if = [
+ "os == 'linux' && os_version == '18.04'",
+ "os == 'mac'",
+]
+
+["test_buffer_mapping_invalid_device.html"]
+fail-if = [
+ "os == 'linux' && os_version == '18.04'",
+ "os == 'mac'",
+]
+
+["test_command_buffer_creation.html"]
+fail-if = [
+ "os == 'linux' && os_version == '18.04'",
+ "os == 'mac'",
+]
+
+["test_context_configure.html"]
+fail-if = [
+ "os == 'linux' && os_version == '18.04'",
+ "os == 'mac'",
+]
+
+["test_device_creation.html"]
+fail-if = [
+ "os == 'linux' && os_version == '18.04'",
+ "os == 'mac'",
+]
+
+["test_device_lost.html"]
+fail-if = [
+ "os == 'linux' && os_version == '18.04'",
+ "os == 'mac'",
+]
+
+["test_double_encoder_finish.html"]
+fail-if = [
+ "os == 'linux' && os_version == '18.04'",
+ "os == 'mac'",
+]
+
+["test_enabled.html"]
+
+["test_error_scope.html"]
+fail-if = [
+ "os == 'linux' && os_version == '18.04'",
+ "os == 'mac'",
+]
+
+["test_insecure_context.html"]
+# This test checks that WebGPU is not available in insecure contexts.
+scheme = "http"
+
+["test_navigator_gpu_not_replaceable.html"]
+
+["test_queue_copyExternalImageToTexture.html"]
+fail-if = [
+ "os == 'linux' && os_version == '18.04'",
+ "os == 'mac'",
+]
+
+["test_queue_write.html"]
+fail-if = [
+ "os == 'linux' && os_version == '18.04'",
+ "os == 'mac'",
+]
+
+["test_queue_write_invalid_device.html"]
+fail-if = [
+ "os == 'linux' && os_version == '18.04'",
+ "os == 'mac'",
+]
+
+["test_submit_compute_empty.html"]
+fail-if = [
+ "os == 'linux' && os_version == '18.04'",
+ "os == 'mac'",
+]
+
+["test_submit_render_empty.html"]
+fail-if = [
+ "os == 'linux' && os_version == '18.04'",
+ "os == 'mac'",
+]
+
+["test_submit_render_empty.worker.html"]
+fail-if = [
+ "os == 'linux' && os_version == '18.04'",
+ "os == 'mac'",
+]
diff --git a/dom/webgpu/mochitest/test_basic_canvas.worker.html b/dom/webgpu/mochitest/test_basic_canvas.worker.html
new file mode 100644
index 0000000000..a23ee9fc70
--- /dev/null
+++ b/dom/webgpu/mochitest/test_basic_canvas.worker.html
@@ -0,0 +1,18 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script src="worker_wrapper.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <canvas id="canvas"></canvas>
+ <script>
+ const canvas = document.getElementById("canvas");
+ const offscreen = canvas.transferControlToOffscreen();
+
+ runWorkerTest("test_basic_canvas.worker.js", { offscreen }, [offscreen]);
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/mochitest/test_basic_canvas.worker.js b/dom/webgpu/mochitest/test_basic_canvas.worker.js
new file mode 100644
index 0000000000..cec6d6f210
--- /dev/null
+++ b/dom/webgpu/mochitest/test_basic_canvas.worker.js
@@ -0,0 +1,32 @@
+self.addEventListener("message", async function (event) {
+ try {
+ const offscreen = event.data.offscreen;
+ const context = offscreen.getContext("webgpu");
+
+ const swapChainFormat = navigator.gpu.getPreferredCanvasFormat();
+ const adapter = await navigator.gpu.requestAdapter();
+ const device = await adapter.requestDevice();
+
+ context.configure({
+ device,
+ format: swapChainFormat,
+ size: { width: 100, height: 100, depth: 1 },
+ });
+
+ const texture = context.getCurrentTexture();
+
+ self.postMessage([
+ {
+ value: texture !== undefined,
+ message: "texture !== undefined",
+ },
+ ]);
+ } catch (e) {
+ self.postMessage([
+ {
+ value: false,
+ message: "Unhandled exception " + e,
+ },
+ ]);
+ }
+});
diff --git a/dom/webgpu/mochitest/test_buffer_mapping.html b/dom/webgpu/mochitest/test_buffer_mapping.html
new file mode 100644
index 0000000000..01dfbf893e
--- /dev/null
+++ b/dom/webgpu/mochitest/test_buffer_mapping.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <script>
+ ok(
+ SpecialPowers.getBoolPref("dom.webgpu.enabled"),
+ "Pref should be enabled."
+ );
+
+ async function testBody() {
+ const adapter = await navigator.gpu.requestAdapter();
+ const device = await adapter.requestDevice();
+
+ const bufferRead = device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
+ });
+ const bufferWrite = device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.COPY_SRC,
+ mappedAtCreation: true,
+ });
+ new Float32Array(bufferWrite.getMappedRange()).set([1.0]);
+ bufferWrite.unmap();
+
+ const encoder = device.createCommandEncoder();
+ encoder.copyBufferToBuffer(bufferWrite, 0, bufferRead, 0, 4);
+ device.queue.submit([encoder.finish()]);
+
+ await bufferRead.mapAsync(GPUMapMode.READ);
+
+ try {
+ bufferRead.getMappedRange(0, 5);
+ ok(false, "mapped with size outside buffer should throw");
+ } catch (e) {
+ ok(
+ true,
+ "mapped with size outside buffer should throw OperationError"
+ );
+ }
+
+ try {
+ bufferRead.getMappedRange(4, 1);
+ ok(false, "mapped with offset outside buffer should throw");
+ } catch (e) {
+ ok(
+ true,
+ "mapped with offset outside buffer should throw OperationError"
+ );
+ }
+
+ const data = bufferRead.getMappedRange();
+ is(data.byteLength, 4, "array should be 4 bytes long");
+
+ const value = new Float32Array(data)[0];
+ ok(value == 1.0, "value == 1.0");
+
+ bufferRead.unmap();
+ is(data.byteLength, 0, "array should be detached after explicit unmap");
+ }
+
+ SimpleTest.waitForExplicitFinish();
+ testBody()
+ .catch(e => ok(false, "Unhandled exception " + e))
+ .finally(() => SimpleTest.finish());
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/mochitest/test_buffer_mapping_invalid_device.html b/dom/webgpu/mochitest/test_buffer_mapping_invalid_device.html
new file mode 100644
index 0000000000..6b7e7fafad
--- /dev/null
+++ b/dom/webgpu/mochitest/test_buffer_mapping_invalid_device.html
@@ -0,0 +1,60 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <script>
+ ok(
+ SpecialPowers.getBoolPref("dom.webgpu.enabled"),
+ "Pref should be enabled."
+ );
+
+ async function testBody() {
+ const adapter = await navigator.gpu.requestAdapter({});
+ const device = await adapter.requestDevice({});
+ const bindGroupLayout = device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 256,
+ storageTexture: { format: "bc1-rgba-unorm-srgb" },
+ visibility: GPUShaderStage.FRAGMENT,
+ },
+ ],
+ });
+ const buffer1 = device.createBuffer({
+ size: 32,
+ usage: GPUBufferUsage.MAP_READ,
+ });
+
+ // Call device.destroy, which makes the device invalid. Further object creation
+ // on device will create objects that are also invalid.
+ device.destroy();
+
+ // Create an invalid buffer2.
+ const buffer2 = device.createBuffer({
+ size: 32,
+ usage: GPUBufferUsage.MAP_WRITE,
+ });
+
+ // Create an invalid bind group, referencing invalid buffer2.
+ const bindGroup = device.createBindGroup({
+ layout: bindGroupLayout,
+ entries: [
+ { binding: 1, resource: { buffer: buffer1 } },
+ { binding: 2, resource: { buffer: buffer2 } },
+ ],
+ });
+
+ ok(bindGroup, "Created a bind group referencing an invalid buffer.");
+ }
+
+ SimpleTest.waitForExplicitFinish();
+ testBody()
+ .catch(e => ok(false, "Unhandled exception " + e))
+ .finally(() => SimpleTest.finish());
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/mochitest/test_command_buffer_creation.html b/dom/webgpu/mochitest/test_command_buffer_creation.html
new file mode 100644
index 0000000000..a92c038afd
--- /dev/null
+++ b/dom/webgpu/mochitest/test_command_buffer_creation.html
@@ -0,0 +1,29 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <script>
+ ok(
+ SpecialPowers.getBoolPref("dom.webgpu.enabled"),
+ "Pref should be enabled."
+ );
+
+ const func = async function () {
+ const adapter = await navigator.gpu.requestAdapter();
+ const device = await adapter.requestDevice();
+ const encoder = device.createCommandEncoder();
+ const command_buffer = encoder.finish();
+ ok(command_buffer !== undefined, "command_buffer !== undefined");
+ };
+
+ SimpleTest.waitForExplicitFinish();
+ func()
+ .catch(e => ok(false, "Unhandled exception " + e))
+ .finally(() => SimpleTest.finish());
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/mochitest/test_context_configure.html b/dom/webgpu/mochitest/test_context_configure.html
new file mode 100644
index 0000000000..7ca96d0afe
--- /dev/null
+++ b/dom/webgpu/mochitest/test_context_configure.html
@@ -0,0 +1,48 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <script>
+ ok(
+ SpecialPowers.getBoolPref("dom.webgpu.enabled"),
+ "Pref should be enabled."
+ );
+
+ async function testBody() {
+ const adapter = await navigator.gpu.requestAdapter({});
+ const device = await adapter.requestDevice({});
+ const canvas = document.createElement("canvas");
+ const context = canvas.getContext("webgpu");
+ const format = navigator.gpu.getPreferredCanvasFormat(adapter);
+
+ // Attempt to configure with a too-large canvas, which should
+ // fail due to device texture limits.
+ canvas.width = 1970696937;
+ let expectedError;
+ try {
+ context.configure({
+ device,
+ format,
+ });
+ } catch (error) {
+ expectedError = error;
+ }
+ // Bug 1864904: This should become an "is".
+ todo_is(
+ typeof expectedError,
+ "TypeError",
+ "Failed configure should generate a TypeError."
+ );
+ }
+
+ SimpleTest.waitForExplicitFinish();
+ testBody()
+ .catch(e => ok(false, "Unhandled exception " + e))
+ .finally(() => SimpleTest.finish());
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/mochitest/test_device_creation.html b/dom/webgpu/mochitest/test_device_creation.html
new file mode 100644
index 0000000000..678359c323
--- /dev/null
+++ b/dom/webgpu/mochitest/test_device_creation.html
@@ -0,0 +1,29 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <script>
+ ok(
+ SpecialPowers.getBoolPref("dom.webgpu.enabled"),
+ "Pref should be enabled."
+ );
+
+ const func = async function () {
+ const adapter = await navigator.gpu.requestAdapter();
+ const limits = adapter.limits;
+ const features = adapter.features;
+ const device = await adapter.requestDevice();
+ ok(device !== undefined, "device !== undefined");
+ };
+
+ SimpleTest.waitForExplicitFinish();
+ func()
+ .catch(e => ok(false, "Unhandled exception " + e))
+ .finally(() => SimpleTest.finish());
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/mochitest/test_device_lost.html b/dom/webgpu/mochitest/test_device_lost.html
new file mode 100644
index 0000000000..bc6614f0ef
--- /dev/null
+++ b/dom/webgpu/mochitest/test_device_lost.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <script>
+ ok(
+ SpecialPowers.getBoolPref("dom.webgpu.enabled"),
+ "Pref should be enabled."
+ );
+
+ const destroy_causes_lost = async function () {
+ const adapter = await navigator.gpu.requestAdapter();
+ ok(adapter !== undefined, "adapter !== undefined");
+ const device = await adapter.requestDevice();
+ ok(device !== undefined, "device !== undefined");
+
+ const lostPromise = device.lost;
+ device.destroy();
+ const deviceLostReason = await lostPromise;
+
+ is(
+ deviceLostReason.reason,
+ "destroyed",
+ "Destroy reason should correspond to GPUDeviceLostReason.destroyed"
+ );
+ is(deviceLostReason.message, "", "Destroy message should be blank");
+ };
+
+ const drop_causes_lost_is_unobservable = async function () {
+ const adapter = await navigator.gpu.requestAdapter();
+ ok(adapter !== undefined, "adapter !== undefined");
+
+ let lostPromise;
+ // Create a scope with a device that will go out of scope
+ // and then be dropped.
+ {
+ const device = await adapter.requestDevice();
+ ok(device !== undefined, "device !== undefined");
+
+ lostPromise = device.lost;
+ }
+
+ SimpleTest.requestFlakyTimeout(
+ "Racing against promise that should never resolve."
+ );
+ const TIMEOUT_MS = 5000;
+ let timeoutPromise = new Promise(resolve => {
+ let timeoutValue = { reason: "timeout" };
+ // eslint-disable-next-line mozilla/no-arbitrary-setTimeout
+ setTimeout(() => resolve(timeoutValue), TIMEOUT_MS);
+ });
+
+ const firstPromise = await Promise.race([lostPromise, timeoutPromise]);
+ is(
+ firstPromise.reason,
+ "timeout",
+ "timeoutPromise should return before lostPromise."
+ );
+ };
+
+ SimpleTest.waitForExplicitFinish();
+
+ destroy_causes_lost()
+ .then(() => drop_causes_lost_is_unobservable())
+ .catch(e => ok(false, `Unhandled exception ${e}`))
+ .finally(() => SimpleTest.finish());
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/mochitest/test_disabled.html b/dom/webgpu/mochitest/test_disabled.html
new file mode 100644
index 0000000000..12eb01e465
--- /dev/null
+++ b/dom/webgpu/mochitest/test_disabled.html
@@ -0,0 +1,17 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <script>
+ ok(
+ !SpecialPowers.getBoolPref("dom.webgpu.enabled"),
+ "Pref should be disabled."
+ );
+ ok(navigator.gpu === undefined, "navigator.gpu === undefined");
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/mochitest/test_double_encoder_finish.html b/dom/webgpu/mochitest/test_double_encoder_finish.html
new file mode 100644
index 0000000000..24c91c3165
--- /dev/null
+++ b/dom/webgpu/mochitest/test_double_encoder_finish.html
@@ -0,0 +1,36 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <script>
+ ok(
+ SpecialPowers.getBoolPref("dom.webgpu.enabled"),
+ "Pref should be enabled."
+ );
+
+ const func = async function () {
+ const adapter = await navigator.gpu.requestAdapter();
+ const device = await adapter.requestDevice();
+ const encoder = device.createCommandEncoder();
+
+ const command_buffer = encoder.finish();
+ ok(command_buffer !== undefined, "command_buffer !== undefined");
+
+ const invalid_command_buffer = encoder.finish();
+ ok(
+ invalid_command_buffer !== undefined,
+ "invalid_command_buffer !== undefined"
+ );
+ };
+
+ SimpleTest.waitForExplicitFinish();
+ func()
+ .catch(e => ok(false, "Unhandled exception " + e))
+ .finally(() => SimpleTest.finish());
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/mochitest/test_enabled.html b/dom/webgpu/mochitest/test_enabled.html
new file mode 100644
index 0000000000..318788bf1e
--- /dev/null
+++ b/dom/webgpu/mochitest/test_enabled.html
@@ -0,0 +1,17 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <script>
+ ok(
+ SpecialPowers.getBoolPref("dom.webgpu.enabled"),
+ "Pref should be enabled."
+ );
+ ok(navigator.gpu !== undefined, "navigator.gpu !== undefined");
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/mochitest/test_error_scope.html b/dom/webgpu/mochitest/test_error_scope.html
new file mode 100644
index 0000000000..f89b4b6e78
--- /dev/null
+++ b/dom/webgpu/mochitest/test_error_scope.html
@@ -0,0 +1,43 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <script>
+ ok(
+ SpecialPowers.getBoolPref("dom.webgpu.enabled"),
+ "Pref should be enabled."
+ );
+
+ const func = async function () {
+ const adapter = await navigator.gpu.requestAdapter();
+ const device = await adapter.requestDevice();
+
+ device.pushErrorScope("validation");
+ const buffer = device.createBuffer({ size: 0, usage: 0 });
+ const error = await device.popErrorScope();
+
+ isnot(
+ error,
+ null,
+ "Attempt to createBuffer with size 0 and usage 0 should generate an error."
+ );
+
+ try {
+ await device.popErrorScope();
+ ok(false, "Should have thrown");
+ } catch (ex) {
+ ok(ex.name == "OperationError", "Should throw an OperationError");
+ }
+ };
+
+ SimpleTest.waitForExplicitFinish();
+ func()
+ .catch(e => ok(false, "Unhandled exception " + e))
+ .finally(() => SimpleTest.finish());
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/mochitest/test_insecure_context.html b/dom/webgpu/mochitest/test_insecure_context.html
new file mode 100644
index 0000000000..dcc4a313b9
--- /dev/null
+++ b/dom/webgpu/mochitest/test_insecure_context.html
@@ -0,0 +1,22 @@
+<!-- This is somewhat redundant with
+ dom/tests/mochitest/general/test_interfaces.js, but I think it's good to
+ have something here as well. -->
+
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <script>
+ ok(
+ SpecialPowers.getBoolPref("dom.webgpu.enabled"),
+ "Pref should be enabled."
+ );
+ ok(!isSecureContext, "test should not run in a secure context");
+ ok(navigator.gpu === undefined, "navigator.gpu === undefined");
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/mochitest/test_navigator_gpu_not_replaceable.html b/dom/webgpu/mochitest/test_navigator_gpu_not_replaceable.html
new file mode 100644
index 0000000000..6b6b7b9715
--- /dev/null
+++ b/dom/webgpu/mochitest/test_navigator_gpu_not_replaceable.html
@@ -0,0 +1,32 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <script>
+ ok(
+ SpecialPowers.getBoolPref("dom.webgpu.enabled"),
+ "Pref should be enabled."
+ );
+ const originalNavigatorGpu = navigator.gpu;
+
+ const BAD_GPU_VAL = "ohcrapthisisbad";
+ ok(
+ navigator.gpu !== BAD_GPU_VAL,
+ "`navigator.gpu` should never equal `BAD_GPU_VAL` (before replacement attempt)"
+ );
+ navigator.gpu = BAD_GPU_VAL;
+ ok(
+ navigator.gpu !== BAD_GPU_VAL,
+ "`navigator.gpu` should never equal `BAD_GPU_VAL` (after replacement attempt)"
+ );
+ ok(
+ navigator.gpu === originalNavigatorGpu,
+ "`navigator.gpu` should equal originally observed value after attempted replacement"
+ );
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/mochitest/test_queue_copyExternalImageToTexture.html b/dom/webgpu/mochitest/test_queue_copyExternalImageToTexture.html
new file mode 100644
index 0000000000..279b4a52b4
--- /dev/null
+++ b/dom/webgpu/mochitest/test_queue_copyExternalImageToTexture.html
@@ -0,0 +1,261 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css" />
+ </head>
+
+ <body>
+ <script type="text/javascript">
+ "use strict";
+
+ ok(
+ SpecialPowers.getBoolPref("dom.webgpu.enabled"),
+ "WebGPU pref should be enabled."
+ );
+ ok(
+ SpecialPowers.getBoolPref("gfx.offscreencanvas.enabled"),
+ "OffscreenCanvas pref should be enabled."
+ );
+
+ SimpleTest.waitForExplicitFinish();
+
+ function requestAnimationFramePromise() {
+ return new Promise(requestAnimationFrame);
+ }
+
+ function createSourceCanvasWebgl() {
+ const offscreenCanvas = new OffscreenCanvas(200, 200);
+ const gl = offscreenCanvas.getContext("webgl");
+
+ const COLOR_VALUE = 127.0 / 255.0;
+ const ALPHA_VALUE = 127.0 / 255.0;
+
+ gl.enable(gl.SCISSOR_TEST);
+
+ gl.scissor(0, 0, 100, 100);
+ gl.clearColor(COLOR_VALUE, 0.0, 0.0, ALPHA_VALUE);
+ gl.clear(gl.COLOR_BUFFER_BIT);
+
+ gl.scissor(100, 0, 100, 100);
+ gl.clearColor(0.0, COLOR_VALUE, 0.0, ALPHA_VALUE);
+ gl.clear(gl.COLOR_BUFFER_BIT);
+
+ gl.scissor(0, 100, 100, 100);
+ gl.clearColor(0.0, 0.0, COLOR_VALUE, ALPHA_VALUE);
+ gl.clear(gl.COLOR_BUFFER_BIT);
+
+ gl.scissor(100, 100, 100, 100);
+ gl.clearColor(0.0, 0.0, 0.0, ALPHA_VALUE);
+ gl.clear(gl.COLOR_BUFFER_BIT);
+
+ return {
+ source: offscreenCanvas,
+ origin: { x: 0, y: 0 },
+ flipY: true,
+ };
+ }
+
+ function createSourceCanvas2d() {
+ const offscreenCanvas = new OffscreenCanvas(200, 200);
+ const context = offscreenCanvas.getContext("2d");
+
+ context.fillStyle = "rgba(255,0,0,0.498)";
+ context.fillRect(0, 0, 100, 100);
+
+ context.fillStyle = "rgba(0,255,0,0.498)";
+ context.fillRect(100, 0, 100, 100);
+
+ context.fillStyle = "rgba(0,0,255,0.498)";
+ context.fillRect(0, 100, 100, 100);
+
+ context.fillStyle = "rgba(0,0,0,0.498)";
+ context.fillRect(100, 100, 100, 100);
+
+ return {
+ source: offscreenCanvas,
+ origin: { x: 0, y: 0 },
+ flipY: false,
+ };
+ }
+
+ function createSourceImageBitmap() {
+ const sourceCanvas = createSourceCanvas2d();
+ return {
+ source: sourceCanvas.source.transferToImageBitmap(),
+ origin: { x: 0, y: 0 },
+ flipY: false,
+ };
+ }
+
+ async function mapDestTexture(
+ device,
+ source,
+ destFormat,
+ premultiply,
+ copySize
+ ) {
+ const bytesPerRow = 256 * 4; // 256 aligned for 200 pixels
+ const texture = device.createTexture({
+ format: destFormat,
+ size: copySize,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ device.queue.copyExternalImageToTexture(
+ source,
+ { texture, premultipliedAlpha: premultiply },
+ copySize
+ );
+
+ const buffer = device.createBuffer({
+ size: 1024 * 200,
+ usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
+ });
+
+ const encoder = device.createCommandEncoder();
+ encoder.copyTextureToBuffer(
+ { texture },
+ { buffer, bytesPerRow },
+ copySize
+ );
+ device.queue.submit([encoder.finish()]);
+
+ await buffer.mapAsync(GPUMapMode.READ);
+ return buffer;
+ }
+
+ async function verifyBuffer(
+ test,
+ device,
+ source,
+ format,
+ premultiply,
+ copyDim,
+ topLeftPixelData
+ ) {
+ try {
+ const buffer = await mapDestTexture(
+ device,
+ source,
+ format,
+ premultiply,
+ copyDim
+ );
+ const arrayBuffer = buffer.getMappedRange();
+ const view = new Uint8Array(arrayBuffer);
+ for (let i = 0; i < topLeftPixelData.length; ++i) {
+ is(
+ view[i],
+ topLeftPixelData[i],
+ test +
+ " " +
+ format +
+ " (" +
+ source.origin.x +
+ "," +
+ source.origin.y +
+ ") channel " +
+ i
+ );
+ }
+ } catch (e) {
+ ok(false, "WebGPU exception: " + e);
+ }
+ }
+
+ async function verifySourceCanvas(test, device, source) {
+ await verifyBuffer(
+ test,
+ device,
+ source,
+ "rgba8unorm",
+ /* premultiply */ true,
+ { width: 200, height: 200 },
+ [127, 0, 0, 127]
+ );
+ await verifyBuffer(
+ test,
+ device,
+ source,
+ "bgra8unorm",
+ /* premultiply */ true,
+ { width: 200, height: 200 },
+ [0, 0, 127, 127]
+ );
+ await verifyBuffer(
+ test,
+ device,
+ source,
+ "rgba8unorm",
+ /* premultiply */ false,
+ { width: 200, height: 200 },
+ [255, 0, 0, 127]
+ );
+ await verifyBuffer(
+ test,
+ device,
+ source,
+ "bgra8unorm",
+ /* premultiply */ false,
+ { width: 200, height: 200 },
+ [0, 0, 255, 127]
+ );
+
+ // The copy is flipped but the origin is relative to the original source data,
+ // so we need to invert for WebGL.
+ const topRightPixelData =
+ test === "webgl" ? [0, 0, 0, 127] : [0, 127, 0, 127];
+ const topRightOrigin = { origin: { x: 100, y: 0 } };
+ await verifyBuffer(
+ test,
+ device,
+ { ...source, ...topRightOrigin },
+ "bgra8unorm",
+ /* premultiply */ true,
+ { width: 100, height: 100 },
+ topRightPixelData
+ );
+
+ const bottomLeftPixelData =
+ test === "webgl" ? [0, 0, 127, 127] : [127, 0, 0, 127];
+ const bottomLeftOrigin = { origin: { x: 0, y: 100 } };
+ await verifyBuffer(
+ test,
+ device,
+ { ...source, ...bottomLeftOrigin },
+ "bgra8unorm",
+ /* premultiply */ true,
+ { width: 100, height: 100 },
+ bottomLeftPixelData
+ );
+ }
+
+ async function writeDestCanvas(source2d, sourceWebgl, sourceImageBitmap) {
+ const adapter = await navigator.gpu.requestAdapter();
+ const device = await adapter.requestDevice();
+ await verifySourceCanvas("2d", device, source2d);
+ await verifySourceCanvas("imageBitmap", device, sourceImageBitmap);
+ await verifySourceCanvas("webgl", device, sourceWebgl);
+ }
+
+ async function runTest() {
+ try {
+ const source2d = createSourceCanvas2d();
+ const sourceWebgl = createSourceCanvasWebgl();
+ const sourceImageBitmap = createSourceImageBitmap();
+ await requestAnimationFramePromise();
+ await requestAnimationFramePromise();
+ await writeDestCanvas(source2d, sourceWebgl, sourceImageBitmap);
+ } catch (e) {
+ ok(false, "Uncaught exception: " + e);
+ } finally {
+ SimpleTest.finish();
+ }
+ }
+
+ runTest();
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/mochitest/test_queue_write.html b/dom/webgpu/mochitest/test_queue_write.html
new file mode 100644
index 0000000000..585c1617cd
--- /dev/null
+++ b/dom/webgpu/mochitest/test_queue_write.html
@@ -0,0 +1,50 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <script>
+ ok(
+ SpecialPowers.getBoolPref("dom.webgpu.enabled"),
+ "Pref should be enabled."
+ );
+
+ const func = async function () {
+ const adapter = await navigator.gpu.requestAdapter();
+ const device = await adapter.requestDevice();
+ const buffer = device.createBuffer({
+ size: 16,
+ usage:
+ GPUBufferUsage.COPY_DST |
+ GPUBufferUsage.COPY_SRC |
+ GPUBufferUsage.VERTEX,
+ });
+ const arrayBuf = new ArrayBuffer(16);
+ new Int32Array(arrayBuf).fill(5);
+ device.queue.writeBuffer(buffer, 0, arrayBuf, 0);
+ const texture = device.createTexture({
+ size: [2, 2, 1],
+ dimension: "2d",
+ format: "rgba8unorm",
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC,
+ });
+ device.queue.writeTexture(
+ { texture },
+ arrayBuf,
+ { bytesPerRow: 8 },
+ [2, 2, 1]
+ );
+ // this isn't a process check, we need to read back the contents and verify the writes happened
+ ok(device !== undefined, "");
+ };
+
+ SimpleTest.waitForExplicitFinish();
+ func()
+ .catch(e => ok(false, "Unhandled exception " + e))
+ .finally(() => SimpleTest.finish());
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/mochitest/test_queue_write_invalid_device.html b/dom/webgpu/mochitest/test_queue_write_invalid_device.html
new file mode 100644
index 0000000000..09180dee7f
--- /dev/null
+++ b/dom/webgpu/mochitest/test_queue_write_invalid_device.html
@@ -0,0 +1,44 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <script>
+ ok(
+ SpecialPowers.getBoolPref("dom.webgpu.enabled"),
+ "Pref should be enabled."
+ );
+
+ const func = async function () {
+ const adapter = await navigator.gpu.requestAdapter();
+ const device = await adapter.requestDevice();
+
+ // Destroy the device, making it invalid.
+ device.destroy();
+
+ // Creating a buffer on an invalid device will create an invalid
+ // buffer.
+ const buffer = device.createBuffer({
+ size: 16,
+ usage:
+ GPUBufferUsage.COPY_DST |
+ GPUBufferUsage.COPY_SRC |
+ GPUBufferUsage.VERTEX,
+ });
+ const arrayBuf = new ArrayBuffer(16);
+ new Int32Array(arrayBuf).fill(5);
+
+ // Writing to an invalid buffer should not throw an error.
+ device.queue.writeBuffer(buffer, 0, arrayBuf, 0);
+ };
+
+ SimpleTest.waitForExplicitFinish();
+ func()
+ .catch(e => ok(false, "Unhandled exception " + e))
+ .finally(() => SimpleTest.finish());
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/mochitest/test_submit_compute_empty.html b/dom/webgpu/mochitest/test_submit_compute_empty.html
new file mode 100644
index 0000000000..82cb9473c5
--- /dev/null
+++ b/dom/webgpu/mochitest/test_submit_compute_empty.html
@@ -0,0 +1,32 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <script>
+ ok(
+ SpecialPowers.getBoolPref("dom.webgpu.enabled"),
+ "Pref should be enabled."
+ );
+
+ const func = async function () {
+ const adapter = await navigator.gpu.requestAdapter();
+ const device = await adapter.requestDevice();
+ const encoder = device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.end();
+ const command_buffer = encoder.finish();
+ device.queue.submit([command_buffer]);
+ ok(command_buffer !== undefined, "command_buffer !== undefined");
+ };
+
+ SimpleTest.waitForExplicitFinish();
+ func()
+ .catch(e => ok(false, "Unhandled exception " + e))
+ .finally(() => SimpleTest.finish());
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/mochitest/test_submit_render_empty.html b/dom/webgpu/mochitest/test_submit_render_empty.html
new file mode 100644
index 0000000000..bac0d1ede7
--- /dev/null
+++ b/dom/webgpu/mochitest/test_submit_render_empty.html
@@ -0,0 +1,57 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <script>
+ ok(
+ SpecialPowers.getBoolPref("dom.webgpu.enabled"),
+ "Pref should be enabled."
+ );
+
+ const func = async function () {
+ const adapter = await navigator.gpu.requestAdapter();
+ const device = await adapter.requestDevice();
+
+ const swapChainFormat = "rgba8unorm";
+ const bundleEncoder = device.createRenderBundleEncoder({
+ colorFormats: [swapChainFormat],
+ });
+ const bundle = bundleEncoder.finish({});
+
+ const texture = device.createTexture({
+ size: { width: 100, height: 100, depth: 1 },
+ format: swapChainFormat,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ const view = texture.createView();
+
+ const encoder = device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view,
+ clearValue: { r: 0, g: 0, b: 0, a: 0 },
+ loadOp: "clear",
+ storeOp: "store",
+ },
+ ],
+ });
+ pass.executeBundles([bundle]);
+ pass.end();
+ const command_buffer = encoder.finish();
+
+ device.queue.submit([command_buffer]);
+ ok(command_buffer !== undefined, "command_buffer !== undefined");
+ };
+
+ SimpleTest.waitForExplicitFinish();
+ func()
+ .catch(e => ok(false, "Unhandled exception " + e))
+ .finally(() => SimpleTest.finish());
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/mochitest/test_submit_render_empty.worker.html b/dom/webgpu/mochitest/test_submit_render_empty.worker.html
new file mode 100644
index 0000000000..8db3168be0
--- /dev/null
+++ b/dom/webgpu/mochitest/test_submit_render_empty.worker.html
@@ -0,0 +1,14 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script src="worker_wrapper.js"></script>
+ <link rel="stylesheet" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <script>
+ runWorkerTest("test_submit_render_empty.worker.js", {}, []);
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/mochitest/test_submit_render_empty.worker.js b/dom/webgpu/mochitest/test_submit_render_empty.worker.js
new file mode 100644
index 0000000000..334230c3c1
--- /dev/null
+++ b/dom/webgpu/mochitest/test_submit_render_empty.worker.js
@@ -0,0 +1,49 @@
+self.addEventListener("message", async function (event) {
+ try {
+ const adapter = await navigator.gpu.requestAdapter();
+ const device = await adapter.requestDevice();
+
+ const swapChainFormat = "rgba8unorm";
+ const bundleEncoder = device.createRenderBundleEncoder({
+ colorFormats: [swapChainFormat],
+ });
+ const bundle = bundleEncoder.finish({});
+
+ const texture = device.createTexture({
+ size: { width: 100, height: 100, depth: 1 },
+ format: swapChainFormat,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ const view = texture.createView();
+
+ const encoder = device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view,
+ clearValue: { r: 0, g: 0, b: 0, a: 0 },
+ loadOp: "clear",
+ storeOp: "store",
+ },
+ ],
+ });
+ pass.executeBundles([bundle]);
+ pass.end();
+ const command_buffer = encoder.finish();
+
+ device.queue.submit([command_buffer]);
+ self.postMessage([
+ {
+ value: command_buffer !== undefined,
+ message: "command_buffer !== undefined",
+ },
+ ]);
+ } catch (e) {
+ self.postMessage([
+ {
+ value: false,
+ message: "Unhandled exception " + e,
+ },
+ ]);
+ }
+});
diff --git a/dom/webgpu/mochitest/worker_wrapper.js b/dom/webgpu/mochitest/worker_wrapper.js
new file mode 100644
index 0000000000..6f6de9002d
--- /dev/null
+++ b/dom/webgpu/mochitest/worker_wrapper.js
@@ -0,0 +1,33 @@
+ok(
+ SpecialPowers.getBoolPref("dom.webgpu.enabled"),
+ "WebGPU pref should be enabled."
+);
+ok(
+ SpecialPowers.getBoolPref("gfx.offscreencanvas.enabled"),
+ "OffscreenCanvas pref should be enabled."
+);
+SimpleTest.waitForExplicitFinish();
+
+const workerWrapperFunc = async function (worker_path, data, transfer) {
+ const worker = new Worker(worker_path);
+
+ const results = new Promise((resolve, reject) => {
+ worker.addEventListener("message", event => {
+ resolve(event.data);
+ });
+ });
+
+ worker.postMessage(data, transfer);
+ for (const result of await results) {
+ ok(result.value, result.message);
+ }
+};
+
+async function runWorkerTest(worker_path, data, transfer) {
+ try {
+ await workerWrapperFunc(worker_path, data, transfer);
+ } catch (e) {
+ ok(false, "Unhandled exception " + e);
+ }
+ SimpleTest.finish();
+}
diff --git a/dom/webgpu/moz.build b/dom/webgpu/moz.build
new file mode 100644
index 0000000000..9d7b81ba3f
--- /dev/null
+++ b/dom/webgpu/moz.build
@@ -0,0 +1,88 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+with Files("**"):
+ BUG_COMPONENT = ("Core", "Graphics: WebGPU")
+
+MOCHITEST_MANIFESTS += [
+ "mochitest/mochitest-no-pref.toml",
+ "mochitest/mochitest.toml",
+]
+
+DIRS += []
+
+h_and_cpp = [
+ "Adapter",
+ "BindGroup",
+ "BindGroupLayout",
+ "Buffer",
+ "CanvasContext",
+ "CommandBuffer",
+ "CommandEncoder",
+ "CompilationInfo",
+ "CompilationMessage",
+ "ComputePassEncoder",
+ "ComputePipeline",
+ "Device",
+ "DeviceLostInfo",
+ "Error",
+ "ExternalTexture",
+ "Instance",
+ "InternalError",
+ "ObjectModel",
+ "OutOfMemoryError",
+ "PipelineLayout",
+ "QuerySet",
+ "Queue",
+ "RenderBundle",
+ "RenderBundleEncoder",
+ "RenderPassEncoder",
+ "RenderPipeline",
+ "Sampler",
+ "ShaderModule",
+ "SupportedFeatures",
+ "SupportedLimits",
+ "Texture",
+ "TextureView",
+ "Utility",
+ "ValidationError",
+]
+EXPORTS.mozilla.webgpu += [x + ".h" for x in h_and_cpp]
+UNIFIED_SOURCES += [x + ".cpp" for x in h_and_cpp]
+
+IPDL_SOURCES += [
+ "ipc/PWebGPU.ipdl",
+ "ipc/PWebGPUTypes.ipdlh",
+]
+
+EXPORTS.mozilla.webgpu += [
+ "ipc/WebGPUChild.h",
+ "ipc/WebGPUParent.h",
+ "ipc/WebGPUSerialize.h",
+ "ipc/WebGPUTypes.h",
+]
+
+UNIFIED_SOURCES += [
+ "ipc/WebGPUChild.cpp",
+ "ipc/WebGPUParent.cpp",
+]
+
+if CONFIG["MOZ_ENABLE_D3D10_LAYER"]:
+ DEFINES["MOZ_ENABLE_D3D10_LAYER"] = True
+ EXPORTS.mozilla.webgpu += [
+ "ExternalTextureD3D11.h",
+ ]
+ UNIFIED_SOURCES += [
+ "ExternalTextureD3D11.cpp",
+ ]
+
+if CONFIG["CC_TYPE"] in ("clang", "clang-cl"):
+ CXXFLAGS += ["-Werror=implicit-int-conversion", "-Wno-shorten-64-to-32"]
+ CXXFLAGS += ["-Werror=switch"]
+
+include("/ipc/chromium/chromium-config.mozbuild")
+
+FINAL_LIBRARY = "xul"
diff --git a/dom/webgpu/tests/cts/README.md b/dom/webgpu/tests/cts/README.md
new file mode 100644
index 0000000000..283beeb91f
--- /dev/null
+++ b/dom/webgpu/tests/cts/README.md
@@ -0,0 +1,17 @@
+# WebGPU CTS vendor checkout
+
+This directory contains the following:
+
+```sh
+.
+├── README.md # You are here!
+├── arguments.txt # Used by `vendor/`
+├── checkout/ # Our vendored copy of WebGPU CTS
+├── myexpectations.txt # Used by `vendor/`
+└── vendor/ # Rust binary crate for updating `checkout/` and generating WPT tests
+```
+
+## Re-vendoring
+
+You can re-vendor by running the Rust binary crate from its Cargo project root. Change your working
+directory to `vendor/` and invoke `cargo run -- --help` for more details.
diff --git a/dom/webgpu/tests/cts/checkout/.eslint-resolver.js b/dom/webgpu/tests/cts/checkout/.eslint-resolver.js
new file mode 100644
index 0000000000..e2b0f32d35
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/.eslint-resolver.js
@@ -0,0 +1,23 @@
+const path = require('path');
+const resolve = require('resolve')
+
+// Implements the following resolver spec:
+// https://github.com/benmosher/eslint-plugin-import/blob/master/resolvers/README.md
+exports.interfaceVersion = 2
+
+exports.resolve = function (source, file, config) {
+ if (resolve.isCore(source)) return { found: true, path: null }
+
+ source = source.replace(/\.js$/, '.ts');
+ try {
+ return {
+ found: true, path: resolve.sync(source, {
+ extensions: [],
+ basedir: path.dirname(path.resolve(file)),
+ ...config,
+ })
+ }
+ } catch (err) {
+ return { found: false }
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/.eslintignore b/dom/webgpu/tests/cts/checkout/.eslintignore
new file mode 100644
index 0000000000..a4a42b1266
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/.eslintignore
@@ -0,0 +1 @@
+/src/external/*
diff --git a/dom/webgpu/tests/cts/checkout/.eslintrc.json b/dom/webgpu/tests/cts/checkout/.eslintrc.json
new file mode 100644
index 0000000000..a525004c4d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/.eslintrc.json
@@ -0,0 +1,138 @@
+{
+ "root": true,
+ "parser": "@typescript-eslint/parser",
+ "parserOptions": { "project": "./tsconfig.json" },
+ "extends": [
+ "./node_modules/gts",
+ "plugin:import/errors",
+ "plugin:import/warnings",
+ "plugin:import/typescript"
+ ],
+ "env": {
+ "browser": true,
+ "node": true
+ },
+ "plugins": ["node", "ban", "import", "deprecation", "gpuweb-cts"],
+ "rules": {
+ // Core rules
+ "linebreak-style": ["warn", "unix"],
+ "no-console": "warn",
+ "no-throw-literal": "warn",
+ "no-undef": "off",
+ "no-useless-rename": "warn",
+ "object-shorthand": "warn",
+ "prefer-promise-reject-errors": "warn",
+ "quotes": ["warn", "single", { "avoidEscape": true, "allowTemplateLiterals": true }],
+
+ // All test TODOs must be tracked inside file/test descriptions or READMEs.
+ // Comments relating to TODOs in descriptions can be marked with references like "[1]".
+ // TODOs not relating to test coverage can be marked MAINTENANCE_TODO or similar.
+ "no-warning-comments": ["warn", { "terms": ["todo", "fixme", "xxx"], "location": "anywhere" }],
+
+ // Plugin: gpuweb-cts
+ "gpuweb-cts/string-trailing-space": "warn",
+ "gpuweb-cts/string-tabs": "warn",
+
+ // Plugin: @typescript-eslint
+ "@typescript-eslint/no-inferrable-types": "off",
+ "@typescript-eslint/consistent-type-assertions": "warn",
+ // Recommended lints
+ // https://github.com/typescript-eslint/typescript-eslint/blob/main/packages/eslint-plugin/docs/rules/README.md
+ "@typescript-eslint/adjacent-overload-signatures": "warn",
+ "@typescript-eslint/await-thenable": "warn",
+ "@typescript-eslint/ban-ts-comment": "warn",
+ "@typescript-eslint/no-empty-interface": "warn",
+ "@typescript-eslint/no-explicit-any": "warn",
+ "@typescript-eslint/no-extra-non-null-assertion": "warn",
+ "@typescript-eslint/no-floating-promises": "warn",
+ "@typescript-eslint/no-for-in-array": "warn",
+ "@typescript-eslint/no-misused-new": "warn",
+ "@typescript-eslint/no-namespace": "warn",
+ "@typescript-eslint/no-non-null-asserted-optional-chain": "warn",
+ "@typescript-eslint/no-this-alias": "warn",
+ "@typescript-eslint/no-unnecessary-type-assertion": "warn",
+ "@typescript-eslint/no-unnecessary-type-constraint": "warn",
+ "@typescript-eslint/no-unused-vars": [
+ "warn",
+ // MAINTENANCE_TODO: Enable warnings for args
+ { "vars": "all", "args": "none", "varsIgnorePattern": "^_", "argsIgnorePattern": "^_" }
+ ],
+ "@typescript-eslint/prefer-as-const": "warn",
+ "@typescript-eslint/prefer-for-of": "warn",
+ "@typescript-eslint/prefer-namespace-keyword": "warn",
+ "@typescript-eslint/require-await": "warn",
+ "@typescript-eslint/restrict-plus-operands": "warn",
+ "@typescript-eslint/triple-slash-reference": "warn",
+ "@typescript-eslint/unbound-method": "warn",
+ // MAINTENANCE_TODO: Try to clean up and enable these recommended lints?
+ //"@typescript-eslint/no-unsafe-argument": "warn",
+ //"@typescript-eslint/no-unsafe-assignment": "warn",
+ //"@typescript-eslint/no-unsafe-call": "warn",
+ //"@typescript-eslint/no-unsafe-member-access": "warn",
+ //"@typescript-eslint/no-unsafe-return": "warn",
+ // Note: These recommended lints are probably not practical to enable.
+ //"@typescript-eslint/no-misused-promises": "warn",
+ //"@typescript-eslint/no-non-null-assertion": "warn",
+ //"@typescript-eslint/no-var-requires": "warn",
+ //"@typescript-eslint/restrict-template-expressions": "warn",
+
+ // Plugin: ban
+ "ban/ban": [
+ "warn",
+ {
+ "name": "setTimeout",
+ "message": "WPT disallows setTimeout; use `common/util/timeout.js`."
+ }
+ ],
+
+ // Plugin: deprecation
+ //"deprecation/deprecation": "warn",
+
+ // Plugin: import
+ "import/order": [
+ "warn",
+ {
+ "groups": ["builtin", "external", "internal", "parent", "sibling", "index"],
+ "newlines-between": "always",
+ "alphabetize": { "order": "asc", "caseInsensitive": false }
+ }
+ ],
+ "import/newline-after-import": ["warn", { "count": 1 }],
+ "import/no-duplicates": "warn",
+ "import/no-restricted-paths": [
+ "error",
+ {
+ "zones": [
+ {
+ "target": "./src/webgpu",
+ "from": "./src/common",
+ "except": ["./framework", "./util"],
+ "message": "Non-framework common/ code imported from webgpu/ suite"
+ },
+ {
+ "target": "./src/unittests",
+ "from": "./src/common",
+ "except": ["./framework", "./util", "./internal"],
+ "message": "Non-framework common/ code imported from unittests/ suite"
+ },
+ {
+ "target": "./src/webgpu",
+ "from": "./src/unittests",
+ "message": "unittests/ suite imported from webgpu/ suite"
+ },
+ {
+ "target": "./src/common",
+ "from": "./src",
+ "except": ["./common", "./external"],
+ "message": "Non common/ code imported from common/"
+ }
+ ]
+ }
+ ]
+ },
+ "settings": {
+ "import/resolver": {
+ "./.eslint-resolver": {}
+ }
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/.github/pull_request_template.md b/dom/webgpu/tests/cts/checkout/.github/pull_request_template.md
new file mode 100644
index 0000000000..7fadba0fc3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/.github/pull_request_template.md
@@ -0,0 +1,21 @@
+
+
+
+Issue: #<!-- Fill in the issue number here. See docs/intro/life_of.md -->
+
+<hr>
+
+**Requirements for PR author:**
+
+- [ ] All missing test coverage is tracked with "TODO" or `.unimplemented()`.
+- [ ] New helpers are `/** documented */` and new helper files are found in `helper_index.txt`.
+- [ ] Test behaves as expected in a WebGPU implementation. (If not passing, explain above.)
+
+**Requirements for [reviewer sign-off](https://github.com/gpuweb/cts/blob/main/docs/reviews.md):**
+
+- [ ] Tests are properly located in the test tree.
+- [ ] [Test descriptions](https://github.com/gpuweb/cts/blob/main/docs/intro/plans.md) allow a reader to "read only the test plans and evaluate coverage completeness", and accurately reflect the test code.
+- [ ] Tests provide complete coverage (including validation control cases). **Missing coverage MUST be covered by TODOs.**
+- [ ] Helpers and types promote readability and maintainability.
+
+When landing this PR, be sure to make any necessary issue status updates.
diff --git a/dom/webgpu/tests/cts/checkout/.github/workflows/pr.yml b/dom/webgpu/tests/cts/checkout/.github/workflows/pr.yml
new file mode 100644
index 0000000000..a398bf13ac
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/.github/workflows/pr.yml
@@ -0,0 +1,25 @@
+name: Pull Request CI
+
+on:
+ pull_request:
+ branches: [main]
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ persist-credentials: false
+ - uses: actions/setup-node@v3
+ with:
+ node-version: "16.x"
+ - run: npm ci
+ - run: npm test
+ - name: copy out-wpt to wpt tree
+ run: |
+ git clone --depth 2 https://github.com/web-platform-tests/wpt.git
+ rsync -av out-wpt/ wpt/webgpu
+ - name: test wpt lint
+ run: ./wpt lint
+ working-directory: ./wpt
diff --git a/dom/webgpu/tests/cts/checkout/.github/workflows/push.yml b/dom/webgpu/tests/cts/checkout/.github/workflows/push.yml
new file mode 100644
index 0000000000..6aa7a34e04
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/.github/workflows/push.yml
@@ -0,0 +1,26 @@
+name: Push CI
+
+on:
+ push:
+ branches: [main]
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2.3.1
+ with:
+ persist-credentials: false
+ - uses: actions/setup-node@v2-beta
+ with:
+ node-version: "16.x"
+ - run: npm ci
+ - run: |
+ npm test
+ mkdir deploy-build/
+ cp -r README.md src standalone out out-wpt docs tools deploy-build/
+ - uses: JamesIves/github-pages-deploy-action@4.1.4
+ with:
+ BRANCH: gh-pages
+ FOLDER: deploy-build
+ CLEAN: true
diff --git a/dom/webgpu/tests/cts/checkout/.gitignore b/dom/webgpu/tests/cts/checkout/.gitignore
new file mode 100644
index 0000000000..f115ad4f69
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/.gitignore
@@ -0,0 +1,196 @@
+# VSCode - see .vscode/README.md
+.vscode/
+
+# Build files
+/out/
+/out-wpt/
+/out-node/
+/out-wpt-reftest-screenshots/
+.tscache/
+*.tmp.txt
+/docs/tsdoc/
+
+# Cache files
+/standalone/data
+
+# Created by https://www.gitignore.io/api/linux,macos,windows,node
+# Edit at https://www.gitignore.io/?templates=linux,macos,windows,node
+
+### Linux ###
+*~
+
+# temporary files which can be created if a process still has a handle open of a deleted file
+.fuse_hidden*
+
+# KDE directory preferences
+.directory
+
+# Linux trash folder which might appear on any partition or disk
+.Trash-*
+
+# .nfs files are created when an open file is removed but is still being accessed
+.nfs*
+
+### macOS ###
+# General
+.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+### Node ###
+# Logs
+logs
+*.log
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+lerna-debug.log*
+
+# Diagnostic reports (https://nodejs.org/api/report.html)
+report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
+
+# Runtime data
+pids
+*.pid
+*.seed
+*.pid.lock
+
+# Directory for instrumented libs generated by jscoverage/JSCover
+lib-cov
+
+# Coverage directory used by tools like istanbul
+coverage
+*.lcov
+
+# nyc test coverage
+.nyc_output
+
+# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
+.grunt
+
+# Bower dependency directory (https://bower.io/)
+bower_components
+
+# node-waf configuration
+.lock-wscript
+
+# Compiled binary addons (https://nodejs.org/api/addons.html)
+build/Release
+
+# Dependency directories
+node_modules/
+jspm_packages/
+
+# TypeScript v1 declaration files
+typings/
+
+# TypeScript cache
+*.tsbuildinfo
+
+# Optional npm cache directory
+.npm
+
+# Optional eslint cache
+.eslintcache
+
+# Optional REPL history
+.node_repl_history
+
+# Output of 'npm pack'
+*.tgz
+
+# Yarn Integrity file
+.yarn-integrity
+
+# dotenv environment variables file
+.env
+.env.test
+
+# parcel-bundler cache (https://parceljs.org/)
+.cache
+
+# next.js build output
+.next
+
+# nuxt.js build output
+.nuxt
+
+# rollup.js default build output
+dist/
+
+# Uncomment the public line if your project uses Gatsby
+# https://nextjs.org/blog/next-9-1#public-directory-support
+# https://create-react-app.dev/docs/using-the-public-folder/#docsNav
+# public
+
+# Storybook build outputs
+.out
+.storybook-out
+
+# vuepress build output
+.vuepress/dist
+
+# Serverless directories
+.serverless/
+
+# FuseBox cache
+.fusebox/
+
+# DynamoDB Local files
+.dynamodb/
+
+# Temporary folders
+tmp/
+temp/
+
+### Windows ###
+# Windows thumbnail cache files
+Thumbs.db
+Thumbs.db:encryptable
+ehthumbs.db
+ehthumbs_vista.db
+
+# Dump file
+*.stackdump
+
+# Folder config file
+[Dd]esktop.ini
+
+# Recycle Bin used on file shares
+$RECYCLE.BIN/
+
+# Windows Installer files
+*.cab
+*.msi
+*.msix
+*.msm
+*.msp
+
+# Windows shortcuts
+*.lnk
+
+trace/
+
+# End of https://www.gitignore.io/api/linux,macos,windows,node
diff --git a/dom/webgpu/tests/cts/checkout/CONTRIBUTING.md b/dom/webgpu/tests/cts/checkout/CONTRIBUTING.md
new file mode 100644
index 0000000000..50eb83267b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# GPU for the Web
+
+This repository is being used for work in the [W3C GPU for the Web Community
+Group](https://www.w3.org/community/gpu/), governed by the [W3C Community
+License Agreement (CLA)](http://www.w3.org/community/about/agreements/cla/). To
+make substantive contributions, you must join the CG.
+
+Contributions to the source code repository are subject to the terms of the
+[3-Clause BSD License](./LICENSE.txt).
+**Contributions will also be exported to
+[web-platform-tests](https://github.com/web-platform-tests/wpt)
+under the same license, and under the terms of its
+[CONTRIBUTING.md](https://github.com/web-platform-tests/wpt/blob/master/CONTRIBUTING.md).**
+
+If you are not the sole contributor to a contribution (pull request), please identify all
+contributors in the pull request comment.
+
+To add a contributor (other than yourself, that's automatic), mark them one per line as follows:
+
+```
++@github_username
+```
+
+If you added a contributor by mistake, you can remove them in a comment with:
+
+```
+-@github_username
+```
+
+If you are making a pull request on behalf of someone else but you had no part in designing the
+feature, you can remove yourself with the above syntax.
diff --git a/dom/webgpu/tests/cts/checkout/Gruntfile.js b/dom/webgpu/tests/cts/checkout/Gruntfile.js
new file mode 100644
index 0000000000..cf2207fcff
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/Gruntfile.js
@@ -0,0 +1,247 @@
+/* eslint-disable node/no-unpublished-require */
+/* eslint-disable prettier/prettier */
+/* eslint-disable no-console */
+
+const timer = require('grunt-timer');
+
+module.exports = function (grunt) {
+ timer.init(grunt);
+
+ // Project configuration.
+ grunt.initConfig({
+ pkg: grunt.file.readJSON('package.json'),
+
+ clean: {
+ out: ['out/', 'out-wpt/', 'out-node/'],
+ },
+
+ run: {
+ 'generate-version': {
+ cmd: 'node',
+ args: ['tools/gen_version'],
+ },
+ 'generate-listings': {
+ // Overwrites the listings.js files in out/. Must run before copy:out-wpt-generated;
+ // must not run before run:build-out (if it is run).
+ cmd: 'node',
+ args: ['tools/gen_listings', 'out/', 'src/webgpu', 'src/stress', 'src/manual', 'src/unittests', 'src/demo'],
+ },
+ validate: {
+ cmd: 'node',
+ args: ['tools/validate', 'src/webgpu', 'src/stress', 'src/manual', 'src/unittests', 'src/demo'],
+ },
+ 'validate-cache': {
+ cmd: 'node',
+ args: ['tools/gen_cache', 'out', 'src/webgpu', '--validate'],
+ },
+ 'generate-wpt-cts-html': {
+ cmd: 'node',
+ args: ['tools/gen_wpt_cts_html', 'tools/gen_wpt_cfg_unchunked.json'],
+ },
+ 'generate-wpt-cts-html-chunked2sec': {
+ cmd: 'node',
+ args: ['tools/gen_wpt_cts_html', 'tools/gen_wpt_cfg_chunked2sec.json'],
+ },
+ 'generate-cache': {
+ cmd: 'node',
+ args: ['tools/gen_cache', 'out', 'src/webgpu'],
+ },
+ unittest: {
+ cmd: 'node',
+ args: ['tools/run_node', 'unittests:*'],
+ },
+ 'build-out': {
+ // Must run before run:generate-listings, which will overwrite some files.
+ cmd: 'node',
+ args: [
+ 'node_modules/@babel/cli/bin/babel',
+ '--extensions=.ts,.js',
+ '--source-maps=true',
+ '--out-dir=out/',
+ 'src/',
+ ],
+ },
+ 'build-out-wpt': {
+ cmd: 'node',
+ args: [
+ 'node_modules/@babel/cli/bin/babel',
+ '--extensions=.ts,.js',
+ '--source-maps=false',
+ '--delete-dir-on-start',
+ '--out-dir=out-wpt/',
+ 'src/',
+ '--only=src/common/',
+ '--only=src/external/',
+ '--only=src/webgpu/',
+ // These files will be generated, instead of compiled from TypeScript.
+ '--ignore=src/common/internal/version.ts',
+ '--ignore=src/webgpu/listing.ts',
+ // These files are only used by non-WPT builds.
+ '--ignore=src/common/runtime/cmdline.ts',
+ '--ignore=src/common/runtime/server.ts',
+ '--ignore=src/common/runtime/standalone.ts',
+ '--ignore=src/common/runtime/helper/sys.ts',
+ '--ignore=src/common/tools',
+ ],
+ },
+ 'build-out-node': {
+ cmd: 'node',
+ args: [
+ 'node_modules/typescript/lib/tsc.js',
+ '--project', 'node.tsconfig.json',
+ '--outDir', 'out-node/',
+ ],
+ },
+ 'copy-assets': {
+ cmd: 'node',
+ args: [
+ 'node_modules/@babel/cli/bin/babel',
+ 'src/resources/',
+ '--out-dir=out/resources/',
+ '--copy-files'
+ ],
+ },
+ 'copy-assets-wpt': {
+ cmd: 'node',
+ args: [
+ 'node_modules/@babel/cli/bin/babel',
+ 'src/resources/',
+ '--out-dir=out-wpt/resources/',
+ '--copy-files'
+ ],
+ },
+ lint: {
+ cmd: 'node',
+ args: ['node_modules/eslint/bin/eslint', 'src/**/*.ts', '--max-warnings=0'],
+ },
+ fix: {
+ cmd: 'node',
+ args: ['node_modules/eslint/bin/eslint', 'src/**/*.ts', '--fix'],
+ },
+ 'autoformat-out-wpt': {
+ cmd: 'node',
+ // MAINTENANCE_TODO(gpuweb/cts#3128): This autoformat step is broken after a dependencies upgrade.
+ args: ['node_modules/prettier/bin/prettier.cjs', '--log-level=warn', '--write', 'out-wpt/**/*.js'],
+ },
+ tsdoc: {
+ cmd: 'node',
+ args: ['node_modules/typedoc/bin/typedoc'],
+ },
+ 'tsdoc-treatWarningsAsErrors': {
+ cmd: 'node',
+ args: ['node_modules/typedoc/bin/typedoc', '--treatWarningsAsErrors'],
+ },
+
+ serve: {
+ cmd: 'node',
+ args: ['node_modules/http-server/bin/http-server', '-p8080', '-a127.0.0.1', '-c-1']
+ }
+ },
+
+ copy: {
+ 'out-wpt-generated': {
+ files: [
+ // Must run after run:generate-version and run:generate-listings.
+ { expand: true, cwd: 'out', src: 'common/internal/version.js', dest: 'out-wpt/' },
+ { expand: true, cwd: 'out', src: 'webgpu/listing.js', dest: 'out-wpt/' },
+ ],
+ },
+ 'out-wpt-htmlfiles': {
+ files: [
+ { expand: true, cwd: 'src', src: 'webgpu/**/*.html', dest: 'out-wpt/' },
+ ],
+ },
+ },
+
+ ts: {
+ check: {
+ tsconfig: {
+ tsconfig: 'tsconfig.json',
+ passThrough: true,
+ },
+ },
+ },
+ });
+
+ grunt.loadNpmTasks('grunt-contrib-clean');
+ grunt.loadNpmTasks('grunt-contrib-copy');
+ grunt.loadNpmTasks('grunt-run');
+ grunt.loadNpmTasks('grunt-ts');
+
+ const helpMessageTasks = [];
+ function registerTaskAndAddToHelp(name, desc, deps) {
+ grunt.registerTask(name, deps);
+ addExistingTaskToHelp(name, desc);
+ }
+ function addExistingTaskToHelp(name, desc) {
+ helpMessageTasks.push({ name, desc });
+ }
+
+ grunt.registerTask('build-standalone', 'Build out/ (no listings, no checks, no WPT)', [
+ 'run:build-out',
+ 'run:copy-assets',
+ 'run:generate-version',
+ ]);
+ grunt.registerTask('build-wpt', 'Build out-wpt/ (no checks; run after generate-listings)', [
+ 'run:build-out-wpt',
+ 'run:copy-assets-wpt',
+ 'run:autoformat-out-wpt',
+ 'run:generate-version',
+ 'copy:out-wpt-generated',
+ 'copy:out-wpt-htmlfiles',
+ 'run:generate-wpt-cts-html',
+ 'run:generate-wpt-cts-html-chunked2sec',
+ ]);
+ grunt.registerTask('build-done-message', () => {
+ process.stderr.write('\nBuild completed! Running checks/tests');
+ });
+
+ registerTaskAndAddToHelp('pre', 'Run all presubmit checks: standalone+wpt+typecheck+unittest+lint', [
+ 'clean',
+ 'run:validate',
+ 'run:validate-cache',
+ 'build-standalone',
+ 'run:generate-listings',
+ 'build-wpt',
+ 'run:build-out-node',
+ 'build-done-message',
+ 'ts:check',
+ 'run:unittest',
+ 'run:lint',
+ 'run:tsdoc-treatWarningsAsErrors',
+ ]);
+ registerTaskAndAddToHelp('standalone', 'Build standalone and typecheck', [
+ 'build-standalone',
+ 'run:generate-listings',
+ 'build-done-message',
+ 'run:validate',
+ 'ts:check',
+ ]);
+ registerTaskAndAddToHelp('wpt', 'Build for WPT and typecheck', [
+ 'run:generate-listings',
+ 'build-wpt',
+ 'build-done-message',
+ 'run:validate',
+ 'ts:check',
+ ]);
+ registerTaskAndAddToHelp('unittest', 'Build standalone, typecheck, and unittest', [
+ 'standalone',
+ 'run:unittest',
+ ]);
+ registerTaskAndAddToHelp('check', 'Just typecheck', [
+ 'ts:check',
+ ]);
+
+ registerTaskAndAddToHelp('serve', 'Serve out/ on 127.0.0.1:8080 (does NOT compile source)', ['run:serve']);
+ registerTaskAndAddToHelp('fix', 'Fix lint and formatting', ['run:fix']);
+
+ addExistingTaskToHelp('clean', 'Clean out/ and out-wpt/');
+
+ grunt.registerTask('default', '', () => {
+ console.error('\nAvailable tasks (see grunt --help for info):');
+ for (const { name, desc } of helpMessageTasks) {
+ console.error(`$ grunt ${name}`);
+ console.error(` ${desc}`);
+ }
+ });
+};
diff --git a/dom/webgpu/tests/cts/checkout/LICENSE.txt b/dom/webgpu/tests/cts/checkout/LICENSE.txt
new file mode 100644
index 0000000000..c7a75d7d22
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/LICENSE.txt
@@ -0,0 +1,26 @@
+Copyright 2019 WebGPU CTS Contributors
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ 3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/dom/webgpu/tests/cts/checkout/README.md b/dom/webgpu/tests/cts/checkout/README.md
new file mode 100644
index 0000000000..1614f9a979
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/README.md
@@ -0,0 +1,22 @@
+# WebGPU Conformance Test Suite
+
+This is the conformance test suite for WebGPU.
+It tests the behaviors defined by the [WebGPU specification](https://gpuweb.github.io/gpuweb/).
+
+The contents of this test suite are considered **normative**; implementations must pass
+them to be WebGPU-conformant. Mismatches between the specification and tests are bugs.
+
+This test suite can be embedded inside [WPT](https://github.com/web-platform-tests/wpt) or run in standalone.
+
+## [Launch the standalone CTS runner / test plan viewer](https://gpuweb.github.io/cts/standalone/)
+
+## Contributing
+
+Please read the [introductory guidelines](docs/intro/README.md) before contributing.
+Other documentation may be found in [`docs/`](docs/) and in the [helper index](https://gpuweb.github.io/cts/docs/tsdoc/) ([source](docs/helper_index.txt)).
+
+Read [CONTRIBUTING.md](CONTRIBUTING.md) on licensing.
+
+For realtime communication about WebGPU spec and test, join the
+[#WebGPU:matrix.org room](https://app.element.io/#/room/#WebGPU:matrix.org)
+on Matrix.
diff --git a/dom/webgpu/tests/cts/checkout/babel.config.js b/dom/webgpu/tests/cts/checkout/babel.config.js
new file mode 100644
index 0000000000..ad977bc510
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/babel.config.js
@@ -0,0 +1,21 @@
+module.exports = function (api) {
+ api.cache(true);
+ return {
+ presets: ['@babel/preset-typescript'],
+ plugins: [
+ 'const-enum',
+ [
+ 'add-header-comment',
+ {
+ header: ['AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts'],
+ },
+ ],
+ ],
+ compact: false,
+ // Keeps comments from getting hoisted to the end of the previous line of code.
+ // (Also keeps lines close to their original line numbers - but for WPT we
+ // reformat with prettier anyway.)
+ retainLines: true,
+ shouldPrintComment: val => !/eslint|prettier-ignore/.test(val),
+ };
+};
diff --git a/dom/webgpu/tests/cts/checkout/cts.code-workspace b/dom/webgpu/tests/cts/checkout/cts.code-workspace
new file mode 100644
index 0000000000..9c7320ce4b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/cts.code-workspace
@@ -0,0 +1,111 @@
+// Note: VS Code's setting precedence is `.vscode/` > `cts.code-workspace` > global user settings.
+{
+ "folders": [
+ {
+ "name": "cts",
+ "path": "."
+ },
+ {
+ "name": "webgpu",
+ "path": "src/webgpu"
+ }
+ ],
+ "settings": {
+ "editor.defaultFormatter": "esbenp.prettier-vscode",
+ "editor.detectIndentation": false,
+ "editor.rulers": [100],
+ "editor.tabSize": 2,
+ "files.insertFinalNewline": true,
+ "files.trimFinalNewlines": true,
+ "files.trimTrailingWhitespace": true,
+ "files.exclude": {
+ "*.tmp.txt": true,
+ ".gitignore": true,
+ ".travis.yml": true,
+ ".tscache": true,
+ "deploy_key.enc": true,
+ "node_modules": true,
+ "out": true,
+ "out-node": true,
+ "out-wpt": true,
+ "docs/tsdoc": true,
+ "package-lock.json": true
+ },
+ // Configure VSCode to use the right style when automatically adding imports on autocomplete.
+ "typescript.preferences.importModuleSpecifier": "relative",
+ "typescript.preferences.importModuleSpecifierEnding": "js",
+ "typescript.preferences.quoteStyle": "single",
+ "typescript.tsdk": "cts/node_modules/typescript/lib"
+ },
+ "tasks": {
+ "version": "2.0.0",
+ "tasks": [
+ // Only supports "shell" and "process" tasks.
+ // https://code.visualstudio.com/docs/editor/multi-root-workspaces#_workspace-task-configuration
+ {
+ // Use "group": "build" instead of "test" so it's easy to access from cmd-shift-B.
+ "group": "build",
+ "label": "npm: test",
+ "detail": "Run all presubmit checks",
+
+ "type": "shell",
+ "command": "npm run test",
+ "problemMatcher": []
+ },
+ {
+ "group": "build",
+ "label": "npm: check",
+ "detail": "Just typecheck",
+
+ "type": "shell",
+ "command": "npm run check",
+ "problemMatcher": ["$tsc"]
+ },
+ {
+ "group": "build",
+ "label": "npm: standalone",
+ "detail": "Build standalone and typecheck",
+
+ "type": "shell",
+ "command": "npm run standalone",
+ "problemMatcher": []
+ },
+ {
+ "group": "build",
+ "label": "npm: wpt",
+ "detail": "Build for WPT and typecheck",
+
+ "type": "shell",
+ "command": "npm run wpt",
+ "problemMatcher": []
+ },
+ {
+ "group": "build",
+ "label": "npm: unittest",
+ "detail": "Build standalone, typecheck, and unittest",
+
+ "type": "shell",
+ "command": "npm run unittest",
+ "problemMatcher": []
+ },
+ {
+ "group": "build",
+ "label": "npm: tsdoc",
+ "detail": "Build docs/tsdoc/",
+
+ "type": "shell",
+ "command": "npm run tsdoc",
+ "problemMatcher": []
+ },
+ {
+ "group": "build",
+ "label": "grunt: run:lint",
+ "detail": "Run eslint",
+
+ "type": "shell",
+ "command": "npx grunt run:lint",
+ "problemMatcher": ["$eslint-stylish"]
+ },
+ ]
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/docs/adding_timing_metadata.md b/dom/webgpu/tests/cts/checkout/docs/adding_timing_metadata.md
new file mode 100644
index 0000000000..fe32cead20
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/docs/adding_timing_metadata.md
@@ -0,0 +1,163 @@
+# Adding Timing Metadata
+
+## listing_meta.json files
+
+`listing_meta.json` files are SEMI AUTO-GENERATED.
+
+The raw data may be edited manually, to add entries or change timing values.
+
+The **list** of tests must stay up to date, so it can be used by external
+tools. This is verified by presubmit checks.
+
+The `subcaseMS` values are estimates. They can be set to 0 if for some reason
+you can't estimate the time (or there's an existing test with a long name and
+slow subcases that would result in query strings that are too long), but this
+will produce a non-fatal warning. Avoid creating new warnings whenever
+possible. Any existing failures should be fixed (eventually).
+
+### Performance
+
+Note this data is typically captured by developers using higher-end
+computers, so typical test machines might execute more slowly. For this
+reason, the WPT chunking should be configured to generate chunks much shorter
+than 5 seconds (a typical default time limit in WPT test executors) so they
+should still execute in under 5 seconds on lower-end computers.
+
+## Problem
+
+When adding new tests to the CTS you may occasionally see an error like this
+when running `npm test` or `npm run standalone`:
+
+```
+ERROR: Tests missing from listing_meta.json. Please add the new tests (set subcaseMS to 0 if you cannot estimate it):
+ webgpu:shader,execution,expression,binary,af_matrix_addition:matrix:*
+
+/home/runner/work/cts/cts/src/common/util/util.ts:38
+ throw new Error(msg && (typeof msg === 'string' ? msg : msg()));
+ ^
+Error:
+ at assert (/home/runner/work/cts/cts/src/common/util/util.ts:38:11)
+ at crawl (/home/runner/work/cts/cts/src/common/tools/crawl.ts:155:11)
+Warning: non-zero exit code 1
+ Use --force to continue.
+
+Aborted due to warnings.
+```
+
+What this error message is trying to tell us, is that there is no entry for
+`webgpu:shader,execution,expression,binary,af_matrix_addition:matrix:*` in
+`src/webgpu/listing_meta.json`.
+
+These entries are estimates for the amount of time that subcases take to run,
+and are used as inputs into the WPT tooling to attempt to portion out tests into
+approximately same-sized chunks.
+
+If a value has been defaulted to 0 by someone, you will see warnings like this:
+
+```
+...
+WARNING: subcaseMS≤0 found in listing_meta.json (allowed, but try to avoid):
+ webgpu:shader,execution,expression,binary,af_matrix_addition:matrix:*
+...
+```
+
+These messages should be resolved by adding appropriate entries to the JSON
+file.
+
+## Solution 1 (manual, best for simple tests)
+
+If you're developing new tests and need to update this file, it is sometimes
+easiest to do so manually. Run your tests under your usual development workflow
+and see how long they take. In the standalone web runner `npm start`, the total
+time for a test case is reported on the right-hand side when the case logs are
+expanded.
+
+Record the average time per *subcase* across all cases of the test (you may need
+to compute this) into the `listing_meta.json` file.
+
+## Solution 2 (semi-automated)
+
+There exists tooling in the CTS repo for generating appropriate estimates for
+these values, though they do require some manual intervention. The rest of this
+doc will be a walkthrough of running these tools.
+
+Timing data can be captured in bulk and "merged" into this file using
+the `merge_listing_times` tool. This is useful when a large number of tests
+change or otherwise a lot of tests need to be updated, but it also automates the
+manual steps above.
+
+The tool can also be used without any inputs to reformat `listing_meta.json`.
+Please read the help message of `merge_listing_times` for more information.
+
+### Placeholder Value
+
+If your development workflow requires a clean build, the first step is to add a
+placeholder value for entry to `src/webgpu/listing_meta.json`, since there is a
+chicken-and-egg problem for updating these values.
+
+```
+ "webgpu:shader,execution,expression,binary,af_matrix_addition:matrix:*": { "subcaseMS": 0 },
+```
+
+(It should have a value of 0, since later tooling updates the value if the newer
+value is higher.)
+
+### Websocket Logger
+
+The first tool that needs to be run is `websocket-logger`, which receives data
+on a WebSocket channel to capture timing data when CTS is run. This
+should be run in a separate process/terminal, since it needs to stay running
+throughout the following steps.
+
+In the `tools/websocket-logger/` directory:
+
+```
+npm ci
+npm start
+```
+
+The output from this command will indicate where the results are being logged,
+which will be needed later. For example:
+
+```
+...
+Writing to wslog-2023-09-12T18-57-34.txt
+...
+```
+
+### Running CTS
+
+Now we need to run the specific cases in CTS that we need to time.
+This should be possible under any development workflow (as long as its runtime environment, like Node, supports WebSockets), but the most well-tested way is using the standalone web runner.
+
+This requires serving the CTS locally. In the project root:
+
+```
+npm run standalone
+npm start
+```
+
+Once this is started you can then direct a WebGPU enabled browser to the
+specific CTS entry and run the tests, for example:
+
+```
+http://localhost:8080/standalone/?q=webgpu:shader,execution,expression,binary,af_matrix_addition:matrix:*
+```
+
+If the tests have a high variance in runtime, you can run them multiple times.
+The longest recorded time will be used.
+
+### Merging metadata
+
+The final step is to merge the new data that has been captured into the JSON
+file.
+
+This can be done using the following command:
+
+```
+tools/merge_listing_times webgpu -- tools/websocket-logger/wslog-2023-09-12T18-57-34.txt
+```
+
+where the text file is the result file from websocket-logger.
+
+Now you just need to commit the pending diff in your repo.
diff --git a/dom/webgpu/tests/cts/checkout/docs/build.md b/dom/webgpu/tests/cts/checkout/docs/build.md
new file mode 100644
index 0000000000..2d7b2f968c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/docs/build.md
@@ -0,0 +1,43 @@
+# Building
+
+Building the project is not usually needed for local development.
+However, for exports to WPT, or deployment (https://gpuweb.github.io/cts/),
+files can be pre-generated.
+
+The project builds into two directories:
+
+- `out/`: Built framework and test files, needed to run standalone or command line.
+- `out-wpt/`: Build directory for export into WPT. Contains:
+ - An adapter for running WebGPU CTS tests under WPT
+ - A copy of the needed files from `out/`
+ - A copy of any `.html` test cases from `src/`
+
+To build and run all pre-submit checks (including type and lint checks and
+unittests), use:
+
+```sh
+npm test
+```
+
+For checks only:
+
+```sh
+npm run check
+```
+
+For a quicker iterative build:
+
+```sh
+npm run standalone
+```
+
+## Run
+
+To serve the built files (rather than using the dev server), run `npx grunt serve`.
+
+## Export to WPT
+
+Run `npm run wpt`.
+
+Copy (or symlink) the `out-wpt/` directory as the `webgpu/` directory in your
+WPT checkout or your browser's "internal" WPT test directory.
diff --git a/dom/webgpu/tests/cts/checkout/docs/deno.md b/dom/webgpu/tests/cts/checkout/docs/deno.md
new file mode 100644
index 0000000000..22a54c79bd
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/docs/deno.md
@@ -0,0 +1,24 @@
+# Running the CTS on Deno
+
+Since version 1.8, Deno experimentally implements the WebGPU API out of the box.
+You can use the `./tools/deno` script to run the CTS in Deno. To do this you
+will first need to install Deno: [stable](https://deno.land#installation), or
+build the main branch from source
+(`cargo install --git https://github.com/denoland/deno --bin deno`).
+
+On macOS and recent Linux, you can just run `./tools/run_deno` as is. On Windows and
+older Linux releases you will need to run
+`deno run --unstable --allow-read --allow-write --allow-env ./tools/deno`.
+
+## Usage
+
+```
+Usage:
+ tools/run_deno [OPTIONS...] QUERIES...
+ tools/run_deno 'unittests:*' 'webgpu:buffers,*'
+Options:
+ --verbose Print result/log of every test as it runs.
+ --debug Include debug messages in logging.
+ --print-json Print the complete result JSON in the output.
+ --expectations Path to expectations file.
+```
diff --git a/dom/webgpu/tests/cts/checkout/docs/fp_primer.md b/dom/webgpu/tests/cts/checkout/docs/fp_primer.md
new file mode 100644
index 0000000000..a8302fb461
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/docs/fp_primer.md
@@ -0,0 +1,871 @@
+# Floating Point Primer
+
+This document is meant to be a primer of the concepts related to floating point
+numbers that are needed to be understood when working on tests in WebGPU's CTS.
+
+WebGPU's CTS is responsible for testing if an implementation of WebGPU
+satisfies the spec, and thus meets the expectations of programmers based on the
+contract defined by the spec.
+
+Floating point math makes up a significant portion of the WGSL spec, and has
+many subtle corner cases to get correct.
+
+Additionally, floating point math, unlike integer math, is broadly not exact, so
+how inaccurate a calculation is allowed to be is required to be stated in the
+spec and tested in the CTS, as opposed to testing for a singular correct
+response.
+
+Thus, the WebGPU CTS has a significant amount of machinery around how to
+correctly test floating point expectations in a fluent manner.
+
+## Floating Point Numbers
+
+For some of the following discussion of floating point numbers 32-bit
+floating numbers are assumed, also known as single precision IEEE floating
+point numbers or `f32`s. Most of the discussions that apply to this format apply
+to other concrete formats that are handled, i.e. 16-bit/f16/half-precision.
+There are some significant differences with respect to AbstractFloats, which
+will be discussed in its own section.
+
+Details of how these formats work are discussed as needed below, but for a more
+involved discussion, please see the references in the Resources sections.
+
+Additionally, in the Appendix there is a table of interesting/common values that
+are often referenced in tests or this document.
+
+A floating point number system defines
+- A finite set of values to stand as representatives for the infinite set of
+ real numbers, and
+- Arithmetic operations on those representatives, trying to approximate the
+ ideal operations on real numbers.
+
+The cardinality mismatch alone implies that any floating point number system necessarily loses information.
+
+This means that not all numbers in the bounds can be exactly represented as a
+floating point value.
+
+For example, the integer `1` is exactly represented as a f32 as `0x3f800000`,
+but the next nearest number `0x3f800001` is `1.00000011920928955`.
+
+So any number between `1` and `1.00000011920928955` is not exactly representable
+as a f32 and instead is approximated as either `1` or `1.00000011920928955`.
+
+When a number X is not exactly representable by a floating point value, there
+are normally two neighbouring numbers that could reasonably represent X: the
+nearest floating point value above X, and the nearest floating point value below
+X. Which of these values gets used is dictated by the rounding mode being used,
+which may be something like always round towards 0 or go to the nearest
+neighbour, or something else entirely.
+
+The process of converting numbers between different precisions is called
+quantization. WGSL does not prescribe a specific rounding mode when
+quantizing, so either of the neighbouring values is considered valid when
+converting a non-exactly representable value to a floating point value. This has
+significant implications on the CTS that are discussed later.
+
+From here on, we assume you are familiar with the internal structure of a
+floating point number (a sign bit, a biased exponent, and a mantissa). For
+reference, see
+[binary64 on Wikipedia](https://en.wikipedia.org/wiki/Double-precision_floating-point_format),
+[binary32 on Wikipedia](https://en.wikipedia.org/wiki/Single-precision_floating-point_format),
+and
+[binary16 on Wikipedia](https://en.wikipedia.org/wiki/Half-precision_floating-point_format).
+
+In the floating points formats described above, there are two possible zero
+values, one with all bits being 0, called positive zero, and one all the same
+except with the sign bit being 1, called negative zero.
+
+For WGSL, and thus the CTS's purposes, these values are considered equivalent.
+Typescript, which the CTS is written in, treats all zeros as positive zeros,
+unless you explicitly escape hatch to differentiate between them, so most of the
+time there being two zeros doesn't materially affect code.
+
+### Normal Numbers
+
+Normal numbers are floating point numbers whose biased exponent is not all 0s or
+all 1s. When working with normal numbers the mantissa starts with an implied
+leading 1. For WGSL these numbers behave as you expect for floating point values
+with no interesting caveats.
+
+### Subnormal Numbers
+
+Subnormal numbers are finite non-zero numbers whose biased exponent is all 0s,
+sometimes called denorms.
+
+These are the closest numbers to zero, both positive and negative, and fill in
+the gap between the normal numbers with the smallest magnitude, and 0.
+
+Some devices, for performance reasons, do not handle operations on the
+subnormal numbers, and instead treat them as being zero, this is called *flush
+to zero* or FTZ behaviour.
+
+This means in the CTS that when a subnormal number is consumed or produced by an
+operation, an implementation may choose to replace it with zero.
+
+Like the rounding mode for quantization, this adds significant complexity to the
+CTS, which will be discussed later.
+
+### Inf & NaNs
+
+Floating point numbers include positive and negative infinity to represent
+values that are out of the bounds supported by the current precision.
+
+Implementations may assume that infinities are not present. When an evaluation
+at runtime would produce an infinity, an indeterminate value is produced
+instead.
+
+When a value goes out of bounds for a specific precision there are special
+rounding rules that apply. If it is 'near' the edge of finite values for that
+precision, it is considered to be near-overflowing, and the implementation may
+choose to round it to the edge value or the appropriate infinity. If it is not
+near the finite values, which it is considered to be far-overflowing, then it
+must be rounded to the appropriate infinity.
+
+This of course is vague, but the spec does have a precise definition where the
+transition from near to far overflow is.
+
+Let `x` be our value.
+
+Let `exp_max` be the (unbiased) exponent of the largest finite value for the
+floating point type.
+
+If `|x|` < `2 ** (exp_max + 1)`, but not in
+the finite range, than it is considered to be near-overflowing for the
+floating point type.
+
+If the magnitude is equal to or greater than this limit, then it is
+far-overflowing for the floating point type.
+
+This concept of near-overflow vs far-overflow divides the real number line into
+5 distinct regions.
+
+| Region | Rule |
+|-----------------------------------------------|---------------------------------|
+| -∞ < `x` <= `-(2 ** (exp_max + 1))` | must round to -∞ |
+| `-(2 ** (exp_max + 1))` < `x` <= min fp value | must round to -∞ or min value |
+| min fp value < `x` < max fp value | round as discussed below |
+| max fp value <= `x` < `2 ** (exp_max + 1)` | must round to max value or ∞ |
+| `2 ** (exp_max + 1))` < `x` | implementations must round to ∞ |
+
+
+The CTS encodes the least restrictive interpretation of the rules in the spec,
+i.e. assuming someone has made a slightly adversarial implementation that always
+chooses the thing with the least accuracy.
+
+This means that the above rules about infinities and overflow combine to say
+that any time a non-finite value for the specific floating point type is seen,
+any finite value is acceptable afterward. This is because the non-finite value
+may be converted to an infinity and then an indeterminate value can be used
+instead of the infinity.
+
+(This comes with the caveat that this is only for runtime execution on a GPU,
+the rules for compile time execution will be discussed below.)
+
+Signaling NaNs are treated as quiet NaNs in the WGSL spec. And quiet NaNs have
+the same "may-convert-to-indeterminate-value" behaviour that infinities have, so
+for the purpose of the CTS they are handled by the infinite/out of bounds logic
+normally.
+
+## Notation/Terminology
+
+When discussing floating point values in the CTS, there are a few terms used
+with precise meanings, which will be elaborated here.
+
+Additionally, any specific notation used will be specified here to avoid
+confusion.
+
+### Operations
+
+The CTS tests for the proper execution of builtins, i.e. `sin`, `sqrt`, `abs`,
+etc, and expressions, i.e. `*`, `/`, `<`, etc, when provided with floating
+point inputs. These collectively can be referred to as floating point
+operations.
+
+Operations, which can be thought of as mathematical functions, are mappings from
+a set of inputs to a set of outputs.
+
+Denoted `f(x, y) = X`, where `f` is a placeholder or the name of the operation,
+lower case variables are the inputs to the function, and uppercase variables are
+the outputs of the function.
+
+Operations have one or more inputs and an output value.
+
+Values are generally defined as floats, integers, booleans, vectors, and
+matrices. Consult the [WGSL Spec](https://www.w3.org/TR/WGSL/) for the exact
+list of types and their definitions.
+
+Most operations inputs and output are the same type of value. There are some
+exceptions that accept or emit heterogeneous data types, normally a floating
+point type and a integer type or a boolean.
+
+There are a couple of builtins (`frexp` and `modf`) that return composite
+outputs where there are multiple values being returned, there is a single result
+value made of structured data. Whereas composite inputs are handle by having
+multiple input parameters.
+
+Some examples of different types of operations:
+
+`multiplication(x, y) = X`, which represents the WGSL expression `x * y`, takes
+in floating point values, `x` and `y`, and produces a floating point value `X`.
+
+`lessThan(x, y) = X`, which represents the WGSL expression `x < y`, again takes
+in floating point values, but in this case returns a boolean value.
+
+`ldexp(x, y) = X`, which builds a floating point value, takes in a floating
+point value `x` and a restricted integer `y`.
+
+### Domain, Range, and Intervals
+
+For an operation `f(x) = X`, the interval of valid values for the input, `x`, is
+called the *domain*, and the interval for valid results, `X`, is called the
+*range*.
+
+An interval, `[a, b]`, is a set of real numbers that contains `a`, `b`, and all
+the real numbers between them.
+
+Open-ended intervals, i.e. ones that don't include `a` and/or `b`, are avoided,
+and are called out explicitly when they occur.
+
+The convention in this doc and the CTS code is that `a <= b`, so `a` can be
+referred to as the beginning of the interval and `b` as the end of the interval.
+
+When talking about intervals, this doc and the code endeavours to avoid using
+the term **range** to refer to the span of values that an interval covers,
+instead using the term bounds to avoid confusion of terminology around output of
+operations.
+
+## Accuracy
+
+As mentioned above floating point numbers are not able to represent all the
+possible values over their bounds, but instead represent discrete values in that
+interval, and approximate the remainder.
+
+Additionally, floating point numbers are not evenly distributed over the real
+number line, but instead are more densely clustered around zero, with the space
+between values increasing in steps as the magnitude increases.
+
+When discussing operations on floating point numbers, there is often reference
+to a true value. This is the value that given no performance constraints and
+infinite precision you would get, i.e `acos(1) = π`, where π has infinite
+digits of precision.
+
+For the CTS it is often sufficient to calculate the true value using TypeScript,
+since its native number format is higher precision (double-precision/f64), so
+all f64, f32, and f16 values can be represented in it. Where this breaks down
+will be discussed in the section on compile time vs runtime execution.
+
+The true value is sometimes representable exactly as a floating point value, but
+often is not.
+
+Additionally, many operations are implemented using approximations from
+numerical analysis, where there is a tradeoff between the precision of the
+result and the cost.
+
+Thus, the spec specifies what the accuracy constraints for specific operations
+is, how close to truth an implementation is required to be, to be
+considered conforming.
+
+There are 5 different ways that accuracy requirements are defined in the spec:
+
+1. *Exact*
+
+ This is the situation where it is expected that true value for an operation
+ is always expected to be exactly representable. This doesn't happen for any
+ of the operations that return floating point values, but does occur for
+ logical operations that return boolean values.
+
+
+2. *Correctly Rounded*
+
+ For the case that the true value is exactly representable as a floating
+ point, this is the equivalent of exactly from above. In the event that the
+ true value is not exact, then the acceptable answer for most numbers is
+ either the nearest representable value above or below the true value.
+
+ For values near the subnormal range, e.g. close to zero, this becomes more
+ complex, since an implementation may FTZ at any point. So if the exact
+ solution is subnormal or either of the neighbours of the true value are
+ subnormal, zero becomes a possible result, thus the acceptance interval is
+ wider than naively expected.
+
+ On the edge of and beyond the bounds of a floating point type the definition
+ of correctly rounded becomes complex, which is discussed in detail in the
+ section on overflow.
+
+
+3. *Absolute Error*
+
+ This type of accuracy specifies an error value, ε, and the calculated result
+ is expected to be within that distance from the true value, i.e.
+ `[ X - ε, X + ε ]`.
+
+ The main drawback with this manner of specifying accuracy is that it doesn't
+ scale with the level of precision in floating point numbers themselves at a
+ specific value. Thus, it tends to be only used for specifying accuracy over
+ specific limited intervals, i.e. [-π, π].
+
+
+4. *Units of Least Precision (ULP)*
+
+ The solution to the issue of not scaling with precision of floating point is
+ to use units of least precision.
+
+ ULP(X) is min (b-a) over all pairs (a,b) of representable floating point
+ numbers such that (a <= X <= b and a =/= b). For a more formal discussion of
+ ULP see
+ [On the definition of ulp(x)](https://hal.inria.fr/inria-00070503/document).
+
+ n * ULP or nULP means `[X - n * ULP @ X, X + n * ULP @ X]`.
+
+
+5. *Inherited*
+
+ When an operation's accuracy is defined in terms of other operations, then
+ its accuracy is said to be inherited. Handling of inherited accuracies is
+ one of the main driving factors in the design of testing framework, so will
+ need to be discussed in detail.
+
+## Acceptance Intervals
+
+The first four accuracy types; Exact, Correctly Rounded, Absolute Error, and
+ULP, sometimes called simple accuracies, can be defined in isolation from each
+other, and by association can be implemented using relatively independent
+implementations.
+
+The original implementation of the floating point framework did this as it was
+being built out, but ran into difficulties when defining the inherited
+accuracies.
+
+For examples, `tan(x) inherits from sin(x)/cos(x)`, one can take the defined
+rules and manually build up a bespoke solution for checking the results, but
+this is tedious, error-prone, and doesn't allow for code re-use.
+
+Instead, it would be better if there was a single conceptual framework that one
+can express all the 'simple' accuracy requirements in, and then have a mechanism
+for composing them to define inherited accuracies.
+
+In the WebGPU CTS this is done via the concept of acceptance intervals, which is
+derived from a similar concept in the Vulkan CTS, though implemented
+significantly differently.
+
+The core of this idea is that each of different accuracy types can be integrated
+into the definition of the operation, so that instead of transforming an input
+from the domain to a point in the range, the operation is producing an interval
+in the range, that is the acceptable values an implementation may emit.
+
+
+The simple accuracies can be defined as follows:
+
+1. *Exact*
+
+ `f(x) => [X, X]`
+
+
+2. *Correctly Rounded*
+
+ If `X` is precisely defined as a floating point value
+
+ `f(x) => [X, X]`
+
+ otherwise,
+
+ `[a, b]` where `a` is the largest representable number with `a <= X`, and `b`
+ is the smallest representable number with `X <= b`
+
+
+3. *Absolute Error*
+
+ `f(x) => [ X - ε, X + ε ]`, where ε is the absolute error value
+
+
+4. **ULP Error**
+
+ `f(x) = X => [X - n*ULP(X), X + n*ULP(X)]`
+
+As defined, these definitions handle mapping from a point in the domain into an
+interval in the range.
+
+This is insufficient for implementing inherited accuracies, since inheritance
+sometimes involve mapping domain intervals to range intervals.
+
+Here we use the convention for naturally extending a function on real numbers
+into a function on intervals of real numbers, i.e. `f([a, b]) = [A, B]`.
+
+Given that floating point numbers have a finite number of precise values for any
+given interval, one could implement just running the accuracy computation for
+every point in the interval and then spanning together the resultant intervals.
+That would be very inefficient though and make your reviewer sad to read.
+
+For mapping intervals to intervals the key insight is that we only need to be
+concerned with the extrema of the operation in the interval, since the
+acceptance interval is the bounds of the possible outputs.
+
+In more precise terms:
+```
+ f(x) => X, x = [a, b] and X = [A, B]
+
+ X = [min(f(x)), max(f(x))]
+ X = [min(f([a, b])), max(f([a, b]))]
+ X = [f(m), f(n)]
+```
+where `m` and `n` are in `[a, b]`, `m <= n`, and produce the min and max results
+for `f` on the interval, respectively.
+
+So how do we find the minima and maxima for our operation in the domain?
+
+The common general solution for this requires using calculus to calculate the
+derivative of `f`, `f'`, and then find the zeroes `f'` to find inflection
+points of `f`.
+
+This solution wouldn't be sufficient for all builtins, i.e. `step` which is not
+differentiable at edge values.
+
+Thankfully we do not need a general solution for the CTS, since all the builtin
+operations are defined in the spec, so `f` is from a known set of options.
+
+These operations can be divided into two broad categories: monotonic, and
+non-monotonic, with respect to an interval.
+
+The monotonic operations are ones that preserve the order of inputs in their
+outputs (or reverse it). Their graph only ever decreases or increases,
+never changing from one or the other, though it can have flat sections.
+
+The non-monotonic operations are ones whose graph would have both regions of
+increase and decrease.
+
+The monotonic operations, when mapping an interval to an interval, are simple to
+handle, since the extrema are guaranteed to be the ends of the domain, `a` and
+`b`.
+
+So `f([a, b])` = `[f(a), f(b)]` or `[f(b), f(a)]`. We could figure out if `f` is
+increasing or decreasing beforehand to determine if it should be `[f(a), f(b)]`
+or `[f(b), f(a)]`.
+
+It is simpler to just use min & max to have an implementation that is agnostic
+to the details of `f`.
+```
+ A = f(a), B = f(b)
+ X = [min(A, B), max(A, B)]
+```
+
+The non-monotonic functions that we need to handle for interval-to-interval
+mappings are more complex. Thankfully are a small number of the overall
+operations that need to be handled, since they are only the operations that are
+used in an inherited accuracy and take in the output of another operation as
+part of that inherited accuracy.
+
+So in the CTS we just have bespoke implementations for each of them.
+
+Part of the operation definition in the CTS is a function that takes in the
+domain interval, and returns a sub-interval such that the subject function is
+monotonic over that sub-interval, and hence the function's minima and maxima are
+at the ends.
+
+This adjusted domain interval can then be fed through the same machinery as the
+monotonic functions.
+
+### Inherited Accuracy
+
+So with all of that background out of the way, we can now define an inherited
+accuracy in terms of acceptance intervals.
+
+The crux of this is the insight that the range of one operation can become the
+domain of another operation to compose them together.
+
+And since we have defined how to do this interval to interval mapping above,
+transforming things becomes mechanical and thus implementable in reusable code.
+
+When talking about inherited accuracies `f(x) => g(x)` is used to denote that
+`f`'s accuracy is a defined as `g`.
+
+An example to illustrate inherited accuracies, in f32:
+
+```
+ tan(x) => sin(x)/cos(x)
+
+ sin(x) => [sin(x) - 2 ** -11, sin(x) + 2 ** -11]`
+ cos(x) => [cos(x) - 2 ** -11, cos(x) + 2-11]
+
+ x/y => [x/y - 2.5 * ULP(x/y), x/y + 2.5 * ULP(x/y)]
+```
+
+`sin(x)` and `cos(x)` are non-monotonic, so calculating out a closed generic
+form over an interval is a pain, since the min and max vary depending on the
+value of x. Let's isolate this to a single point, so you don't have to read
+literally pages of expanded intervals.
+
+```
+ x = π/2
+
+ sin(π/2) => [sin(π/2) - 2 ** -11, sin(π/2) + 2 ** -11]
+ => [0 - 2 ** -11, 0 + 2 ** -11]
+ => [-0.000488…, 0.000488…]
+ cos(π/2) => [cos(π/2) - 2 ** -11, cos(π/2) + 2 ** -11]
+ => [-0.500488…, -0.499511…]
+
+ tan(π/2) => sin(π/2)/cos(π/2)
+ => [-0.000488…, 0.000488…]/[-0.500488…, -0.499511…]
+ => [min(-0.000488…/-0.500488…, -0.000488…/-0.499511…, 0.000488…/-0.500488…, 0.000488…/-0.499511…),
+ max(-0.000488…/-0.500488…, -0.000488…/-0.499511…, 0.000488…/-0.500488…, 0.000488…/-0.499511…)]
+ => [0.000488…/-0.499511…, 0.000488…/0.499511…]
+ => [-0.0009775171, 0.0009775171]
+```
+
+For clarity this has omitted a bunch of complexity around FTZ behaviours, and
+that these operations are only defined for specific domains, but the high-level
+concepts hold.
+
+For each of the inherited operations we could implement a manually written out
+closed form solution, but that would be quite error-prone and not be
+re-using code between builtins.
+
+Instead, the CTS takes advantage of the fact in addition to testing
+implementations of `tan(x)` we are going to be testing implementations of
+`sin(x)`, `cos(x)` and `x/y`, so there should be functions to generate
+acceptance intervals for those operations.
+
+The `tan(x)` acceptance interval can be constructed by generating the acceptance
+intervals for `sin(x)`, `cos(x)` and `x/y` via function calls and composing the
+results.
+
+This algorithmically looks something like this:
+
+```
+ tan(x):
+ Calculate sin(x) interval
+ Calculate cos(x) interval
+ Calculate sin(x) result divided by cos(x) result
+ Return division result
+```
+
+## Compile vs Run Time Evaluation
+
+The above discussions have been primarily agnostic to when and where a
+calculation is occurring, with an implicit bias to runtime execution on a GPU.
+
+In reality where/when a computation is occurring has a significant impact on the
+expected outcome when dealing with edge cases.
+
+### Terminology
+
+There are two related axes that will be referred to when it comes to evaluation.
+These are compile vs run time, and CPU vs GPU. Broadly speaking compile time
+execution happens on the host CPU, and run time evaluation occurs on a dedicated
+GPU.
+
+(Software graphics implementations like WARP and SwiftShader technically break this by
+being a software emulation of a GPU that runs on the CPU, but conceptually one can
+think of these implementations being a type of GPU in this context, since it has
+similar constraints when it comes to precision, etc.)
+
+Compile time evaluation is execution that occurs when setting up a shader
+module, i.e. when compiling WGSL to a platform specific shading language. It is
+part of resolving values for things like constants, and occurs once before the
+shader is run by the caller. It includes constant evaluation and override
+evaluation. All AbstractFloat operations are compile time evaluated.
+
+Runtime evaluation is execution that occurs every time the shader is run, and
+may include dynamic data that is provided between invocations. It is work that
+is sent to the GPU for execution in the shader.
+
+WGSL const-expressions and override-expressions are evaluated before runtime and
+both are considered "compile time" in this discussion. WGSL runtime-expressions
+are evaluated at runtime.
+
+### Behavioural Differences
+
+For a well-defined operation with a finite result, runtime and compile time
+evaluation should be indistinguishable.
+
+For example:
+```
+// runtime
+@group(0) @binding(0) var a : f32;
+@group(0) @binding(1) var b : f32;
+
+let c: f32 = a + b
+```
+and
+```
+// compile time
+const c: f32 = 1.0f + 2.0f
+```
+should produce the same result of `3.0` in the variable `c`, assuming `1.0` and `2.0`
+were passed in as `a` and `b`.
+
+The only difference, is when/where the execution occurs.
+
+The difference in behaviour between these two occur when the result of the
+operation is not finite for the underlying floating point type.
+
+If instead of `1.0` and `2.0`, we had `10.0` and `f32.max`, so the true result is
+`f32.max + 10.0`, the behaviours differ. Specifically the runtime
+evaluated version will still run, but the result in `c` will be an indeterminate
+value, which is any finite f32 value. For the compile time example instead,
+compiling the shader will fail validation.
+
+This applies to any operation, and isn't restricted to just addition. Anytime a
+value goes outside the finite values the shader will hit these results,
+indeterminate for runtime execution and validation failure for compile time
+execution.
+
+Unfortunately we are dealing with intervals of results and not precise results.
+So this leads to more even conceptual complexity. For runtime evaluation, this
+isn't too bad, because the rule becomes: if any part of the interval is
+non-finite then an indeterminate value can be a result, and the interval for an
+indeterminate result `[fp min, fp max]`, will include any finite portions of the
+interval.
+
+Compile time evaluation becomes significantly more complex, because difference
+isn't what interval is returned, but does this shader compile or not, which are
+mutually exclusive. This is compounded even further by having to consider
+near-overflow vs far-overflow behaviour. Thankfully this can be broken down into
+a case by case basis based on where an interval falls.
+
+Assuming `X`, is the well-defined result of an operation, i.e. not indeterminate
+due to the operation isn't defined for the inputs:
+
+| Region | | Result |
+|------------------------------|------------------------------------------------------|--------------------------------|
+| `abs(X) <= fp max` | interval falls completely in the finite bounds | validation succeeds |
+| `abs(X) >= 2 ** (exp_max+1)` | interval falls completely in the far-overflow bounds | validation fails |
+| Otherwise | interval intersects the near-overflow region | validation may succeed or fail |
+
+The final case is somewhat difficult from a CTS perspective, because now it
+isn't sufficient to know that a non-finite result has occurred, but what the
+specific result is needs to be tracked. Additionally, the expected result is
+somewhat ambiguous, since a shader may or may not compile. This could in theory
+still be tested by the CTS, via having switching logic that determines in this
+region, if the shader compiles expect these results, otherwise pass the test.
+This adds a significant amount of complexity to the testing code for thoroughly
+testing a relatively small segment of values. Other environments do not have the
+behaviour in this region as rigorously defined nor tested, so fully testing
+here would likely find lots of issues that would just need to be mitigated in
+the CTS.
+
+Currently, we choose to avoid testing validation of near-overflow scenarios.
+
+### Additional Technical Limitations
+
+The above description of compile and runtime evaluation was somewhat based in
+the theoretical world that the intervals being used for testing are infinitely
+precise, when in actuality they are implemented by the ECMAScript `number` type,
+which is implemented as a f64 value.
+
+For the vast majority of cases, even out of bounds and overflow, this is
+sufficient. There is one small slice where this breaks down. Specifically if
+the result just outside the finite range by less than 1 f64 ULP of the edge
+value. An example of this is `2 ** -11 + f32.max`. This will be between `f32.max`
+and `f32.max + ULPF64(f32.max)`. This becomes a problem, because this value
+technically falls into the out-of-bounds region, but depending on how
+quantization for f64 is handled in the test runner will be either `f32.max` or
+`f32.max + ULPF64(f32.max)`. So as a compile time evaluation either we expect an
+implementation to always handle this, or it might fail, but we cannot easily
+detect it, since this is pushing hard on the limits of precision of the testing
+environment.
+
+(A parallel version of this probably exists on the other side of the
+out-of-bounds region, but I don't have a proven example of this)
+
+The high road fix to this problem is to use an arbitrary precision floating
+point implementation. Unfortunately such a library is not on the standards
+track for ECMAScript at this time, so we would have to evaluate and pick a
+third party dependency to use. Beyond the selection process, this would also
+require a significant refactoring of the existing framework code for fixing a
+very marginal case.
+
+(This differs from Float16 support, where the prototyped version of the proposed
+API has been pulled in, and the long term plan it use the ECMAScript
+implementation's version, once all the major runtimes support it. So it can
+be viewed as a polyfill).
+
+This region currently is not tested as part of the decision to defer testing on
+the entire out-of-bounds but not overflowing region.
+
+In the future if we decide to add testing to the out-of-bounds region, to avoid
+perfect being the enemy of good here, it is likely the CTS would still avoid
+testing these regions where f64 precision breaks down. If someone is interested
+in taking on the effort needed to migrate to an arbitrary precision float
+library, or if this turns out to be a significant issue in the future, this
+decision can be revisited.
+
+## Abstract Float
+
+### Accuracy
+
+For the concrete floating point types (f32 & f16) the accuracy of operations are
+defined in terms of their own type. Specifically for f32, correctly rounded
+refers to the nearest f32 values, and ULP is in terms of the distance between
+f32 values.
+
+AbstractFloat internally is defined as a f64, and this applies for exact and
+correctly rounded accuracies. Thus, correctly rounded refers to the nearest f64
+values. However, AbstractFloat differs for ULP and absolute errors. Reading
+the spec strictly, these all have unbounded accuracies, but it is recommended
+that their accuracies be at least as good as the f32 equivalent.
+
+The difference between f32 and f64 ULP at a specific value X are significant, so
+at least as good as f32 requirement is always less strict than if it was
+calculated in terms of f64. Similarly, for absolute accuracies the interval
+`[x - epsilon, x + epsilon]` is always equal or wider if calculated as f32s
+vs f64s.
+
+If an inherited accuracy is only defined in terms of correctly rounded
+accuracies, then the interval is calculated in terms of f64s. If any of the
+defining accuracies are ULP or absolute errors, then the result falls into the
+unbounded accuracy, but recommended to be at least as good as f32 bucket.
+
+What this means from a CTS implementation is that for these "at least as good as
+f32" error intervals, if the infinitely accurate result is finite for f32, then
+the error interval for f64 is just the f32 interval. If the result is not finite
+for f32, then the accuracy interval is just the unbounded interval.
+
+How this is implemented in the CTS is by having the FPTraits for AbstractFloat
+forward to the f32 implementation for the operations that are tested to be as
+good as f32.
+
+### Implementation
+
+AbstractFloats are a compile time construct that exist in WGSL. They are
+expressible as literal values or the result of operations that return them, but
+a variable cannot be typed as an AbstractFloat. Instead, the variable needs be a
+concrete type, i.e. f32 or f16, and the AbstractFloat value will be quantized
+on assignment.
+
+Because they cannot be stored nor passed via buffers, it is tricky to test them.
+There are two approaches that have been proposed for testing the results of
+operations that return AbstractFloats.
+
+As of the writing of this doc, this second option for testing AbstractFloats
+is the one being pursued in the CTS.
+
+#### const_assert
+
+The first proposal is to lean on the `const_assert` statement that exists in
+WGSL. For each test case a snippet of code would be written out that has a form
+something like this
+
+```
+// foo(x) is the operation under test
+const_assert lower < foo(x) // Result was below the acceptance interval
+const_assert upper > foo(x) // Result was above the acceptance interval
+```
+
+where lower and upper would actually be string replaced with literals for the
+bounds of the acceptance interval when generating the shader text.
+
+This approach has a number of limitations that made it unacceptable for the CTS.
+First, how errors are reported is a pain to debug. Someone working with the CTS
+would either get a report of a failed shader compile, or a failed compile with
+the line number, but they will not get the result of `foo(x)`. Just that it is
+out of range. Additionally, if you place many of these stanzas in the same
+shader to optimize dispatch, you will not get a report that these 3 of 32 cases
+failed with these results, you will just get this batch failed. All of these
+makes for a very poor experience in attempting to understand what is failing.
+
+Beyond the lack of ergonomics, this approach also makes things like AF
+comparison and const_assert very load bearing for the CTS. It is possible that
+a bug could exist in an implementation of const_assert for example that would
+cause it to not fail shader compilation, which could lead to silent passing of
+tests. Conceptually you can think of this instead of depending on a signal to
+indicate something is working, we would be depending on a signal that it isn't
+working, and assuming if we don't receive that signal everything is good, not
+that our signal mechanism was broken.
+
+#### Extracting Bits
+
+The other proposal that was developed depends on the fact that AbstractFloat is
+spec'd to be a f64 internally. So the CTS could store the result of an operation
+as two 32-bit unsigned integers (or broken up into sign, exponent, and
+mantissa). These stored integers could be exported to the testing framework via
+a buffer, which could in turn rebuild the f64 values.
+
+This approach allows the CTS to test values directly in the testing framework,
+thus provide the same diagnostics as other tests, as well as reusing the same
+running harness.
+
+The major downsides come from actually implementing extracting the bits. Due to
+the restrictions on AbstractFloats the actual code to extract the bits is
+tricky. Specifically there is no simple bit cast to something like an
+AbstractInt that can be used. Instead, `frexp` needs to be used with additional
+operations. This leads to problems, since as is `frexp` is not defined for
+subnormal values, so it is impossible to extract a subnormal AbstractFloat,
+though 0 could be returned when one is encountered.
+
+Test that do try to extract bits to determine the result should either avoid
+cases with subnormal results or check for the nearest normal or zero number.
+
+The inability to store AbstractFloats in non-lossy fashion also has additional
+issues, since this means that user defined functions that take in or return
+them do not exist in WGSL. Thus, the snippet of code for extracting
+AbstractFloats cannot just be inserted as a function at the top of a testing
+shader, and then invoked on each test case. Instead, it needs to be inlined
+into the shader at each call-site. Actually implementing this in the CTS isn't
+difficult, but it does make the shaders significantly longer and more
+difficult to read. It also may have an impact on how many test cases can be in
+a batch, since runtime for some backends is sensitive to the length of the
+shader being run.
+
+# Appendix
+
+### Significant f64 Values
+
+| Name | Decimal (~) | Hex | Sign Bit | Exponent Bits | Significand Bits |
+|------------------------|----------------:|----------------------:|---------:|--------------:|-----------------------------------------------------------------:|
+| Negative Infinity | -∞ | 0xfff0 0000 0000 0000 | 1 | 111 1111 1111 | 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 |
+| Min Negative Normal | -1.79769313E308 | 0xffef ffff ffff ffff | 1 | 111 1111 1110 | 1111 1111 1111 1111 1111 1111 1111 1111 1111 1111 1111 1111 1111 |
+| Max Negative Normal | -2.2250738E−308 | 0x8010 0000 0000 0000 | 1 | 000 0000 0001 | 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 |
+| Min Negative Subnormal | -2.2250738E−308 | 0x800f ffff ffff ffff | 1 | 000 0000 0000 | 1111 1111 1111 1111 1111 1111 1111 1111 1111 1111 1111 1111 1111 |
+| Max Negative Subnormal | -4.9406564E−324 | 0x8000 0000 0000 0001 | 1 | 000 0000 0000 | 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0001 |
+| Negative Zero | -0 | 0x8000 0000 0000 0000 | 1 | 000 0000 0000 | 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 |
+| Positive Zero | 0 | 0x0000 0000 0000 0000 | 0 | 000 0000 0000 | 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 |
+| Min Positive Subnormal | 4.9406564E−324 | 0x0000 0000 0000 0001 | 0 | 000 0000 0000 | 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0001 |
+| Max Positive Subnormal | 2.2250738E−308 | 0x000f ffff ffff ffff | 0 | 000 0000 0000 | 1111 1111 1111 1111 1111 1111 1111 1111 1111 1111 1111 1111 1111 |
+| Min Positive Normal | 2.2250738E−308 | 0x0010 0000 0000 0000 | 0 | 000 0000 0001 | 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 |
+| Max Positive Normal | 1.79769313E308 | 0x7fef ffff ffff ffff | 0 | 111 1111 1110 | 1111 1111 1111 1111 1111 1111 1111 1111 1111 1111 1111 1111 1111 |
+| Negative Infinity | ∞ | 0x7ff0 0000 0000 0000 | 0 | 111 1111 1111 | 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 |
+
+### Significant f32 Values
+
+| Name | Decimal (~) | Hex | Sign Bit | Exponent Bits | Significand Bits |
+|------------------------|---------------:|------------:|---------:|--------------:|-----------------------------:|
+| Negative Infinity | -∞ | 0xff80 0000 | 1 | 1111 1111 | 0000 0000 0000 0000 0000 000 |
+| Min Negative Normal | -3.40282346E38 | 0xff7f ffff | 1 | 1111 1110 | 1111 1111 1111 1111 1111 111 |
+| Max Negative Normal | -1.1754943E−38 | 0x8080 0000 | 1 | 0000 0001 | 0000 0000 0000 0000 0000 000 |
+| Min Negative Subnormal | -1.1754942E-38 | 0x807f ffff | 1 | 0000 0000 | 1111 1111 1111 1111 1111 111 |
+| Max Negative Subnormal | -1.4012984E−45 | 0x8000 0001 | 1 | 0000 0000 | 0000 0000 0000 0000 0000 001 |
+| Negative Zero | -0 | 0x8000 0000 | 1 | 0000 0000 | 0000 0000 0000 0000 0000 000 |
+| Positive Zero | 0 | 0x0000 0000 | 0 | 0000 0000 | 0000 0000 0000 0000 0000 000 |
+| Min Positive Subnormal | 1.4012984E−45 | 0x0000 0001 | 0 | 0000 0000 | 0000 0000 0000 0000 0000 001 |
+| Max Positive Subnormal | 1.1754942E-38 | 0x007f ffff | 0 | 0000 0000 | 1111 1111 1111 1111 1111 111 |
+| Min Positive Normal | 1.1754943E−38 | 0x0080 0000 | 0 | 0000 0001 | 0000 0000 0000 0000 0000 000 |
+| Max Positive Normal | 3.40282346E38 | 0x7f7f ffff | 0 | 1111 1110 | 1111 1111 1111 1111 1111 111 |
+| Negative Infinity | ∞ | 0x7f80 0000 | 0 | 1111 1111 | 0000 0000 0000 0000 0000 000 |
+
+### Significant f16 Values
+
+| Name | Decimal (~) | Hex | Sign Bit | Exponent Bits | Significand Bits |
+|------------------------|--------------:|-------:|---------:|--------------:|-----------------:|
+| Negative Infinity | -∞ | 0xfc00 | 1 | 111 11 | 00 0000 0000 |
+| Min Negative Normal | -65504 | 0xfbff | 1 | 111 10 | 11 1111 1111 |
+| Max Negative Normal | -6.1035156E−5 | 0x8400 | 1 | 000 01 | 00 0000 0000 |
+| Min Negative Subnormal | -6.0975552E−5 | 0x83ff | 1 | 000 00 | 11 1111 1111 |
+| Max Negative Subnormal | -5.9604645E−8 | 0x8001 | 1 | 000 00 | 00 0000 0001 |
+| Negative Zero | -0 | 0x8000 | 1 | 000 00 | 00 0000 0000 |
+| Positive Zero | 0 | 0x0000 | 0 | 000 00 | 00 0000 0000 |
+| Min Positive Subnormal | 5.9604645E−8 | 0x0001 | 0 | 000 00 | 00 0000 0001 |
+| Max Positive Subnormal | 6.0975552E−5 | 0x03ff | 0 | 000 00 | 11 1111 1111 |
+| Min Positive Normal | 6.1035156E−5 | 0x0400 | 0 | 000 01 | 00 0000 0000 |
+| Max Positive Normal | 65504 | 0x7bff | 0 | 111 10 | 11 1111 1111 |
+| Negative Infinity | ∞ | 0x7c00 | 0 | 111 11 | 00 0000 0000 |
+
+# Resources
+- [WebGPU Spec](https://www.w3.org/TR/webgpu/)
+- [WGSL Spec](https://www.w3.org/TR/WGSL/)
+- [binary64 on Wikipedia](https://en.wikipedia.org/wiki/Double-precision_floating-point_format)
+- [binary32 on Wikipedia](https://en.wikipedia.org/wiki/Single-precision_floating-point_format)
+- [binary16 on Wikipedia](https://en.wikipedia.org/wiki/Half-precision_floating-point_format)
+- [IEEE-754 Floating Point Converter](https://www.h-schmidt.net/FloatConverter/IEEE754.html)
+- [IEEE 754 Calculator](http://weitz.de/ieee/)
+- [On the definition of ulp(x)](https://hal.inria.fr/inria-00070503/document)
+- [Float Exposed](https://float.exposed/)
diff --git a/dom/webgpu/tests/cts/checkout/docs/helper_index.txt b/dom/webgpu/tests/cts/checkout/docs/helper_index.txt
new file mode 100644
index 0000000000..3cdf868bb4
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/docs/helper_index.txt
@@ -0,0 +1,93 @@
+<!--
+ View this file in Typedoc!
+
+ - At https://gpuweb.github.io/cts/docs/tsdoc/
+ - Or locally:
+ - npm run tsdoc
+ - npm start
+ - http://localhost:8080/docs/tsdoc/
+
+ This file is parsed as a tsdoc.
+-->
+
+## Index of Test Helpers
+
+This index is a quick-reference of helper functions in the test suite.
+Use it to determine whether you can reuse a helper, instead of writing new code,
+to improve readability and reviewability.
+
+Whenever a new generally-useful helper is added, it should be indexed here.
+
+**See linked documentation for full helper listings.**
+
+- {@link common/framework/params_builder!CaseParamsBuilder} and {@link common/framework/params_builder!SubcaseParamsBuilder}:
+ Combinatorial generation of test parameters. They are iterated by the test framework at runtime.
+ See `examples.spec.ts` for basic examples of how this behaves.
+ - {@link common/framework/params_builder!CaseParamsBuilder}:
+ `ParamsBuilder` for adding "cases" to a test.
+ - {@link common/framework/params_builder!CaseParamsBuilder#beginSubcases}:
+ "Finalizes" the `CaseParamsBuilder`, returning a `SubcaseParamsBuilder`.
+ - {@link common/framework/params_builder!SubcaseParamsBuilder}:
+ `ParamsBuilder` for adding "subcases" to a test.
+
+### Fixtures
+
+(Uncheck the "Inherited" box to hide inherited methods from documentation pages.)
+
+- {@link common/framework/fixture!Fixture}: Base fixture for all tests.
+- {@link webgpu/gpu_test!GPUTest}: Base fixture for WebGPU tests.
+- {@link webgpu/api/validation/validation_test!ValidationTest}: Base fixture for WebGPU validation tests.
+- {@link webgpu/shader/validation/shader_validation_test!ShaderValidationTest}: Base fixture for WGSL shader validation tests.
+- {@link webgpu/idl/idl_test!IDLTest}:
+ Base fixture for testing the exposed interface is correct (without actually using WebGPU).
+
+### WebGPU Helpers
+
+- {@link webgpu/capability_info}: Structured information about texture formats, binding types, etc.
+- {@link webgpu/constants}:
+ Constant values (needed anytime a WebGPU constant is needed outside of a test function).
+- {@link webgpu/util/buffer}: Helpers for GPUBuffers.
+- {@link webgpu/util/texture}: Helpers for GPUTextures.
+- {@link webgpu/util/unions}: Helpers for various union typedefs in the WebGPU spec.
+- {@link webgpu/util/math}: Helpers for common math operations.
+- {@link webgpu/util/check_contents}: Check the contents of TypedArrays, with nice messages.
+ Also can be composed with {@link webgpu/gpu_test!GPUTest#expectGPUBufferValuesPassCheck}, used to implement
+ GPUBuffer checking helpers in GPUTest.
+- {@link webgpu/util/conversion}: Numeric encoding/decoding for float/unorm/snorm values, etc.
+- {@link webgpu/util/copy_to_texture}:
+ Helper class for copyToTexture test suites for execution copy and check results.
+- {@link webgpu/util/color_space_conversion}:
+ Helper functions to do color space conversion. The algorithm is the same as defined in
+ CSS Color Module Level 4.
+- {@link webgpu/util/create_elements}:
+ Helpers for creating web elements like HTMLCanvasElement, OffscreenCanvas, etc.
+- {@link webgpu/util/shader}: Helpers for creating fragment shader based on intended output values, plainType, and componentCount.
+- {@link webgpu/util/prng}: Seed-able deterministic pseudo random number generator. Replacement for Math.random().
+- {@link webgpu/util/texture/base}: General texture-related helpers.
+- {@link webgpu/util/texture/data_generation}: Helper for generating dummy texture data.
+- {@link webgpu/util/texture/layout}: Helpers for working with linear image data
+ (like in copyBufferToTexture, copyTextureToBuffer, writeTexture).
+- {@link webgpu/util/texture/subresource}: Helpers for working with texture subresource ranges.
+- {@link webgpu/util/texture/texel_data}: Helpers encoding/decoding texel formats.
+- {@link webgpu/util/texture/texel_view}: Helper class to create and view texture data through various representations.
+- {@link webgpu/util/texture/texture_ok}: Helpers for checking texture contents.
+- {@link webgpu/shader/types}: Helpers for WGSL data types.
+- {@link webgpu/shader/execution/expression/expression}: Helpers for WGSL expression execution tests.
+- {@link webgpu/web_platform/util}: Helpers for web platform features (e.g. video elements).
+
+### General Helpers
+
+- {@link common/framework/resources}: Provides the path to the `resources/` directory.
+- {@link common/util/navigator_gpu}: Finds and returns the `navigator.gpu` object or equivalent.
+- {@link common/util/util}: Miscellaneous utilities.
+ - {@link common/util/util!assert}: Assert a condition, otherwise throw an exception.
+ - {@link common/util/util!unreachable}: Assert unreachable code.
+ - {@link common/util/util!assertReject}, {@link common/util/util!resolveOnTimeout},
+ {@link common/util/util!rejectOnTimeout},
+ {@link common/util/util!raceWithRejectOnTimeout}, and more.
+- {@link common/util/collect_garbage}:
+ Attempt to trigger garbage collection, for testing that garbage collection is not observable.
+- {@link common/util/preprocessor}: A simple template-based, non-line-based preprocessor,
+ implementing if/elif/else/endif. Possibly useful for WGSL shader generation.
+- {@link common/util/timeout}: Use this instead of `setTimeout`.
+- {@link common/util/types}: Type metaprogramming helpers.
diff --git a/dom/webgpu/tests/cts/checkout/docs/implementing.md b/dom/webgpu/tests/cts/checkout/docs/implementing.md
new file mode 100644
index 0000000000..ae6848839a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/docs/implementing.md
@@ -0,0 +1,97 @@
+# Test Implementation
+
+Concepts important to understand when writing tests. See existing tests for examples to copy from.
+
+## Test fixtures
+
+Most tests can use one of the several common test fixtures:
+
+- `Fixture`: Base fixture, provides core functions like `expect()`, `skip()`.
+- `GPUTest`: Wraps every test in error scopes. Provides helpers like `expectContents()`.
+- `ValidationTest`: Extends `GPUTest`, provides helpers like `expectValidationError()`, `getErrorTextureView()`.
+- Or create your own. (Often not necessary - helper functions can be used instead.)
+
+Test fixtures or helper functions may be defined in `.spec.ts` files, but if used by multiple
+test files, should be defined in separate `.ts` files (without `.spec`) alongside the files that
+use them.
+
+### GPUDevices in tests
+
+`GPUDevice`s are largely stateless (except for `lost`-ness, error scope stack, and `label`).
+This allows the CTS to reuse one device across multiple test cases using the `DevicePool`,
+which provides `GPUDevice` objects to tests.
+
+Currently, there is one `GPUDevice` with the default descriptor, and
+a cache of several more, for devices with additional capabilities.
+Devices in the `DevicePool` are automatically removed when certain things go wrong.
+
+Later, there may be multiple `GPUDevice`s to allow multiple test cases to run concurrently.
+
+## Test parameterization
+
+The CTS provides helpers (`.params()` and friends) for creating large cartesian products of test parameters.
+These generate "test cases" further subdivided into "test subcases".
+See `basic,*` in `examples.spec.ts` for examples, and the [helper index](./helper_index.txt)
+for a list of capabilities.
+
+Test parameterization should be applied liberally to ensure the maximum coverage
+possible within reasonable time. You can skip some with `.filter()`. And remember: computers are
+pretty fast - thousands of test cases can be reasonable.
+
+Use existing lists of parameters values (such as
+[`kTextureFormats`](https://github.com/gpuweb/cts/blob/0f38b85/src/suites/cts/capability_info.ts#L61),
+to parameterize tests), instead of making your own list. Use the info tables (such as
+`kTextureFormatInfo`) to define and retrieve information about the parameters.
+
+## Asynchrony in tests
+
+Since there are no synchronous operations in WebGPU, almost every test is asynchronous in some
+way. For example:
+
+- Checking the result of a readback.
+- Capturing the result of a `popErrorScope()`.
+
+That said, test functions don't always need to be `async`; see below.
+
+### Checking asynchronous errors/results
+
+Validation is inherently asynchronous (`popErrorScope()` returns a promise). However, the error
+scope stack itself is synchronous - operations immediately after a `popErrorScope()` are outside
+that error scope.
+
+As a result, tests can assert things like validation errors/successes without having an `async`
+test body.
+
+**Example:**
+
+```typescript
+t.expectValidationError(() => {
+ device.createThing();
+});
+```
+
+does:
+
+- `pushErrorScope('validation')`
+- `popErrorScope()` and "eventually" check whether it returned an error.
+
+**Example:**
+
+```typescript
+t.expectGPUBufferValuesEqual(srcBuffer, expectedData);
+```
+
+does:
+
+- copy `srcBuffer` into a new mappable buffer `dst`
+- `dst.mapReadAsync()`, and "eventually" check what data it returned.
+
+Internally, this is accomplished via an "eventual expectation": `eventualAsyncExpectation()`
+takes an async function, calls it immediately, and stores off the resulting `Promise` to
+automatically await at the end before determining the pass/fail state.
+
+### Asynchronous parallelism
+
+A side effect of test asynchrony is that it's possible for multiple tests to be in flight at
+once. We do not currently do this, but it will eventually be an option to run `N` tests in
+"parallel", for faster local test runs.
diff --git a/dom/webgpu/tests/cts/checkout/docs/intro/README.md b/dom/webgpu/tests/cts/checkout/docs/intro/README.md
new file mode 100644
index 0000000000..e5f8bcedc6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/docs/intro/README.md
@@ -0,0 +1,99 @@
+# Introduction
+
+These documents contains guidelines for contributors to the WebGPU CTS (Conformance Test Suite)
+on how to write effective tests, and on the testing philosophy to adopt.
+
+The WebGPU CTS is arguably more important than the WebGPU specification itself, because
+it is what forces implementation to be interoperable by checking they conform to the specification.
+However writing a CTS is hard and requires a lot of effort to reach good coverage.
+
+More than a collection of tests like regular end2end and unit tests for software artifacts, a CTS
+needs to be exhaustive. Contrast for example the WebGL2 CTS with the ANGLE end2end tests: they
+cover the same functionality (WebGL 2 / OpenGL ES 3) but are structured very differently:
+
+- ANGLE's test suite has one or two tests per functionality to check it works correctly, plus
+ regression tests and special tests to cover implementation details.
+- WebGL2's CTS can have thousands of tests per API aspect to cover every combination of
+ parameters (and global state) used by an operation.
+
+Below are guidelines based on our collective experience with graphics API CTSes like WebGL's.
+They are expected to evolve over time and have exceptions, but should give a general idea of what
+to do.
+
+## Contributing
+
+Testing tasks are tracked in the [CTS project tracker](https://github.com/orgs/gpuweb/projects/3).
+Go here if you're looking for tasks, or if you have a test idea that isn't already covered.
+
+If contributing conformance tests, the directory you'll work in is [`src/webgpu/`](../src/webgpu/).
+This directory is organized according to the goal of the test (API validation behavior vs
+actual results) and its target (API entry points and spec areas, e.g. texture sampling).
+
+The contents of a test file (`src/webgpu/**/*.spec.ts`) are twofold:
+
+- Documentation ("test plans") on what tests do, how they do it, and what cases they cover.
+ Some test plans are fully or partially unimplemented:
+ they either contain "TODO" in a description or are `.unimplemented()`.
+- Actual tests.
+
+**Please read the following short documents before contributing.**
+
+### 0. [Developing](developing.md)
+
+- Reviewers should also read [Review Requirements](../reviews.md).
+
+### 1. [Life of a Test Change](life_of.md)
+
+### 2. [Adding or Editing Test Plans](plans.md)
+
+### 3. [Implementing Tests](tests.md)
+
+## [Additional Documentation](../)
+
+## Examples
+
+### Operation testing of vertex input id generation
+
+This section provides an example of the planning process for a test.
+It has not been refined into a set of final test plan descriptions.
+(Note: this predates the actual implementation of these tests, so doesn't match the actual tests.)
+
+Somewhere under the `api/operation` node are tests checking that running `GPURenderPipelines` on
+the device using the `GPURenderEncoderBase.draw` family of functions works correctly. Render
+pipelines are composed of several stages that are mostly independent so they can be split in
+several parts such as `vertex_input`, `rasterization`, `blending`.
+
+Vertex input itself has several parts that are mostly separate in hardware:
+
+- generation of the vertex and instance indices to run for this draw
+- fetching of vertex data from vertex buffers based on these indices
+- conversion from the vertex attribute `GPUVertexFormat` to the datatype for the input variable
+ in the shader
+
+Each of these are tested separately and have cases for each combination of the variables that may
+affect them. This means that `api/operation/render/vertex_input/id_generation` checks that the
+correct operation is performed for the cartesian product of all the following dimensions:
+
+- for encoding in a `GPURenderPassEncoder` or a `GPURenderBundleEncoder`
+- whether the draw is direct or indirect
+- whether the draw is indexed or not
+- for various values of the `firstInstance` argument
+- for various values of the `instanceCount` argument
+- if the draw is not indexed:
+ - for various values of the `firstVertex` argument
+ - for various values of the `vertexCount` argument
+- if the draw is indexed:
+ - for each `GPUIndexFormat`
+ - for various values of the indices in the index buffer including the primitive restart values
+ - for various values for the `offset` argument to `setIndexBuffer`
+ - for various values of the `firstIndex` argument
+ - for various values of the `indexCount` argument
+ - for various values of the `baseVertex` argument
+
+"Various values" above mean several small values, including `0` and the second smallest valid
+value to check for corner cases, as well as some large value.
+
+An instance of the test sets up a `draw*` call based on the parameters, using point rendering and
+a fragment shader that outputs to a storage buffer. After the draw the test checks the content of
+the storage buffer to make sure all expected vertex shader invocation, and only these ones have
+been generated.
diff --git a/dom/webgpu/tests/cts/checkout/docs/intro/convert_to_issue.png b/dom/webgpu/tests/cts/checkout/docs/intro/convert_to_issue.png
new file mode 100644
index 0000000000..672324a9d9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/docs/intro/convert_to_issue.png
Binary files differ
diff --git a/dom/webgpu/tests/cts/checkout/docs/intro/developing.md b/dom/webgpu/tests/cts/checkout/docs/intro/developing.md
new file mode 100644
index 0000000000..5b1aeed36d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/docs/intro/developing.md
@@ -0,0 +1,134 @@
+# Developing
+
+The WebGPU CTS is written in TypeScript.
+
+## Setup
+
+After checking out the repository and installing node/npm, run:
+
+```sh
+npm ci
+```
+
+Before uploading, you can run pre-submit checks (`npm test`) to make sure it will pass CI.
+Use `npm run fix` to fix linting issues.
+
+`npm run` will show available npm scripts.
+Some more scripts can be listed using `npx grunt`.
+
+## Dev Server
+
+To start the development server, use:
+
+```sh
+npm start
+```
+
+Then, browse to the standalone test runner at the printed URL.
+
+The server will generate and compile code on the fly, so no build step is necessary.
+Only a reload is needed to see saved changes.
+(TODO: except, currently, `README.txt` and file `description` changes won't be reflected in
+the standalone runner.)
+
+Note: The first load of a test suite may take some time as generating the test suite listing can
+take a few seconds.
+
+## Standalone Test Runner / Test Plan Viewer
+
+**The standalone test runner also serves as a test plan viewer.**
+(This can be done in a browser without WebGPU support.)
+You can use this to preview how your test plan will appear.
+
+You can view different suites (webgpu, unittests, stress, etc.) or different subtrees of
+the test suite.
+
+- `http://localhost:8080/standalone/` (defaults to `?runnow=0&worker=0&debug=0&q=webgpu:*`)
+- `http://localhost:8080/standalone/?q=unittests:*`
+- `http://localhost:8080/standalone/?q=unittests:basic:*`
+
+The following url parameters change how the harness runs:
+
+- `runnow=1` runs all matching tests on page load.
+- `debug=1` enables verbose debug logging from tests.
+- `worker=1` runs the tests on a Web Worker instead of the main thread.
+- `power_preference=low-power` runs most tests passing `powerPreference: low-power` to `requestAdapter`
+- `power_preference=high-performance` runs most tests passing `powerPreference: high-performance` to `requestAdapter`
+
+### Web Platform Tests (wpt) - Ref Tests
+
+You can inspect the actual and reference pages for web platform reftests in the standalone
+runner by navigating to them. For example, by loading:
+
+ - `http://localhost:8080/out/webgpu/web_platform/reftests/canvas_clear.https.html`
+ - `http://localhost:8080/out/webgpu/web_platform/reftests/ref/canvas_clear-ref.html`
+
+You can also run a minimal ref test runner.
+
+ - open 2 terminals / command lines.
+ - in one, `npm start`
+ - in the other, `node tools/run_wpt_ref_tests <path-to-browser-executable> [name-of-test]`
+
+Without `[name-of-test]` all ref tests will be run. `[name-of-test]` is just a simple check for
+substring so passing in `rgba` will run every test with `rgba` in its filename.
+
+Examples:
+
+MacOS
+
+```
+# Chrome
+node tools/run_wpt_ref_tests /Applications/Google\ Chrome\ Canary.app/Contents/MacOS/Google\ Chrome\ Canary
+```
+
+Windows
+
+```
+# Chrome
+node .\tools\run_wpt_ref_tests "C:\Users\your-user-name\AppData\Local\Google\Chrome SxS\Application\chrome.exe"
+```
+
+## Editor
+
+Since this project is written in TypeScript, it integrates best with
+[Visual Studio Code](https://code.visualstudio.com/).
+This is optional, but highly recommended: it automatically adds `import` lines and
+provides robust completions, cross-references, renames, error highlighting,
+deprecation highlighting, and type/JSDoc popups.
+
+Open the `cts.code-workspace` workspace file to load settings convenient for this project.
+You can make local configuration changes in `.vscode/`, which is untracked by Git.
+
+## Pull Requests
+
+When opening a pull request, fill out the PR checklist and attach the issue number.
+If an issue hasn't been opened, find the draft issue on the
+[project tracker](https://github.com/orgs/gpuweb/projects/3) and choose "Convert to issue":
+
+![convert to issue button screenshot](convert_to_issue.png)
+
+Opening a pull request will automatically notify reviewers.
+
+To make the review process smoother, once a reviewer has started looking at your change:
+
+- Avoid major additions or changes that would be best done in a follow-up PR.
+- Avoid rebases (`git rebase`) and force pushes (`git push -f`). These can make
+ it difficult for reviewers to review incremental changes as GitHub often cannot
+ view a useful diff across a rebase. If it's necessary to resolve conflicts
+ with upstream changes, use a merge commit (`git merge`) and don't include any
+ consequential changes in the merge, so a reviewer can skip over merge commits
+ when working through the individual commits in the PR.
+- When you address a review comment, mark the thread as "Resolved".
+
+Pull requests will (usually) be landed with the "Squash and merge" option.
+
+### TODOs
+
+The word "TODO" refers to missing test coverage. It may only appear inside file/test descriptions
+and README files (enforced by linting).
+
+To use comments to refer to TODOs inside the description, use a backreference, e.g., in the
+description, `TODO: Also test the FROBNICATE usage flag [1]`, and somewhere in the code, `[1]:
+Need to add FROBNICATE to this list.`.
+
+Use `MAINTENANCE_TODO` for TODOs which don't impact test coverage.
diff --git a/dom/webgpu/tests/cts/checkout/docs/intro/life_of.md b/dom/webgpu/tests/cts/checkout/docs/intro/life_of.md
new file mode 100644
index 0000000000..8dced4ad84
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/docs/intro/life_of.md
@@ -0,0 +1,46 @@
+# Life of a Test Change
+
+A "test change" could be a new test, an expansion of an existing test, a test bug fix, or a
+modification to existing tests to make them match new spec changes.
+
+**CTS contributors should contribute to the tracker and strive to keep it up to date, especially
+relating to their own changes.**
+
+Filing new draft issues in the CTS project tracker is very lightweight.
+Anyone with access should do this eagerly, to ensure no testing ideas are forgotten.
+(And if you don't have access, just file a regular issue.)
+
+1. Enter a [draft issue](https://github.com/orgs/gpuweb/projects/3), with the Status
+ set to "New (not in repo)", and any available info included in the issue description
+ (notes/plans to ensure full test coverage of the change). The source of this may be:
+
+ - Anything in the spec/API that is found not to be covered by the CTS yet.
+ - Any test is found to be outdated or otherwise buggy.
+ - A spec change from the "Needs CTS Issue" column in the
+ [spec project tracker](https://github.com/orgs/gpuweb/projects/1).
+ Once information on the required test changes is entered into the CTS project tracker,
+ the spec issue moves to "Specification Done".
+
+ Note: at some point, someone may make a PR to flush "New (not in repo)" issues into `TODO`s in
+ CTS file/test description text, changing their "Status" to "Open".
+ These may be done in bulk without linking back to the issue.
+
+1. As necessary:
+
+ - Convert the draft issue to a full, numbered issue for linking from later PRs.
+
+ ![convert to issue button screenshot](convert_to_issue.png)
+
+ - Update the "Assignees" of the issue when an issue is assigned or unassigned
+ (you can assign yourself).
+ - Change the "Status" of the issue to "Started" once you start the task.
+
+1. Open one or more PRs, **each linking to the associated issue**.
+ Each PR may is reviewed and landed, and may leave further TODOs for parts it doesn't complete.
+
+ 1. Test are "planned" in test descriptions. (For complex tests, open a separate PR with the
+ tests `.unimplemented()` so a reviewer can evaluate the plan before you implement tests.)
+ 1. Tests are implemented.
+
+1. When **no TODOs remain** for an issue, close it and change its status to "Complete".
+ (Enter a new more, specific draft issue into the tracker if you need to track related TODOs.)
diff --git a/dom/webgpu/tests/cts/checkout/docs/intro/plans.md b/dom/webgpu/tests/cts/checkout/docs/intro/plans.md
new file mode 100644
index 0000000000..f8d7af3a78
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/docs/intro/plans.md
@@ -0,0 +1,82 @@
+# Adding or Editing Test Plans
+
+## 1. Write a test plan
+
+For new tests, if some notes exist already, incorporate them into your plan.
+
+A detailed test plan should be written and reviewed before substantial test code is written.
+This allows reviewers a chance to identify additional tests and cases, opportunities for
+generalizations that would improve the strength of tests, similar existing tests or test plans,
+and potentially useful [helpers](../helper_index.txt).
+
+**A test plan must serve two functions:**
+
+- Describes the test, succinctly, but in enough detail that a reader can read *only* the test
+ plans and evaluate coverage completeness of a file/directory.
+- Describes the test precisely enough that, when code is added, the reviewer can ensure that the
+ test really covers what the test plan says.
+
+There should be one test plan for each test. It should describe what it tests, how, and describe
+important cases that need to be covered. Here's an example:
+
+```ts
+g.test('x,some_detail')
+ .desc(
+ `
+Tests [some detail] about x. Tests calling x in various 'mode's { mode1, mode2 },
+with various values of 'arg', and checks correctness of the result.
+Tries to trigger [some conditional path].
+
+- Valid values (control case) // <- (to make sure the test function works well)
+- Unaligned values (should fail) // <- (only validation tests need to intentionally hit invalid cases)
+- Extreme values`
+ )
+ .params(u =>
+ u //
+ .combine('mode', ['mode1', 'mode2'])
+ .beginSubcases()
+ .combine('arg', [
+ // Valid // <- Comment params as you see fit.
+ 4,
+ 8,
+ 100,
+ // Invalid
+ 2,
+ 6,
+ 1e30,
+ ])
+ )
+ .unimplemented();
+```
+
+"Cases" each appear as individual items in the `/standalone/` runner.
+"Subcases" run inside each case, like a for-loop wrapping the `.fn(`test function`)`.
+Documentation on the parameter builder can be found in the [helper index](../helper_index.txt).
+
+It's often impossible to predict the exact case/subcase structure before implementing tests, so they
+can be added during implementation, instead of planning.
+
+For any notes which are not specific to a single test, or for preliminary notes for tests that
+haven't been planned in full detail, put them in the test file's `description` variable at
+the top. Or, if they aren't associated with a test file, put them in a `README.txt` file.
+
+**Any notes about missing test coverage must be marked with the word `TODO` inside a
+description or README.** This makes them appear on the `/standalone/` page.
+
+## 2. Open a pull request
+
+Open a PR, and work with the reviewer(s) to revise the test plan.
+
+Usually (probably), plans will be landed in separate PRs before test implementations.
+
+## Conventions used in test plans
+
+- `Iff`: If and only if
+- `x=`: "cartesian-cross equals", like `+=` for cartesian product.
+ Used for combinatorial test coverage.
+ - Sometimes this will result in too many test cases; simplify/reduce as needed
+ during planning *or* implementation.
+- `{x,y,z}`: list of cases to test
+ - e.g. `x= texture format {r8unorm, r8snorm}`
+- *Control case*: a case included to make sure that the rest of the cases aren't
+ missing their target by testing some other error case.
diff --git a/dom/webgpu/tests/cts/checkout/docs/intro/tests.md b/dom/webgpu/tests/cts/checkout/docs/intro/tests.md
new file mode 100644
index 0000000000..a67b6a20cc
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/docs/intro/tests.md
@@ -0,0 +1,25 @@
+# Implementing Tests
+
+Once a test plan is done, you can start writing tests.
+To add new tests, imitate the pattern in neigboring tests or neighboring files.
+New test files must be named ending in `.spec.ts`.
+
+For an example test file, see [`src/webgpu/examples.spec.ts`](../../src/webgpu/examples.spec.ts).
+For a more complex, well-structured reference test file, see
+[`src/webgpu/api/validation/vertex_state.spec.ts`](../../src/webgpu/api/validation/vertex_state.spec.ts).
+
+Implement some tests and open a pull request. You can open a PR any time you're ready for a review.
+(If two tests are non-trivial but independent, consider separate pull requests.)
+
+Before uploading, you can run pre-submit checks (`npm test`) to make sure it will pass CI.
+Use `npm run fix` to fix linting issues.
+
+## Test Helpers
+
+It's best to be familiar with helpers available in the test suite for simplifying
+test implementations.
+
+New test helpers can be added at any time to either of those files, or to new `.ts` files anywhere
+near the `.spec.ts` file where they're used.
+
+Documentation on existing helpers can be found in the [helper index](../helper_index.txt).
diff --git a/dom/webgpu/tests/cts/checkout/docs/organization.md b/dom/webgpu/tests/cts/checkout/docs/organization.md
new file mode 100644
index 0000000000..fd7020afd6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/docs/organization.md
@@ -0,0 +1,166 @@
+# Test Organization
+
+## `src/webgpu/`
+
+Because of the glorious amount of test needed, the WebGPU CTS is organized as a tree of arbitrary
+depth (a filesystem with multiple tests per file).
+
+Each directory may have a `README.txt` describing its contents.
+Tests are grouped in large families (each of which has a `README.txt`);
+the root and first few levels looks like the following (some nodes omitted for simplicity):
+
+- **`api`** with tests for full coverage of the Javascript API surface of WebGPU.
+ - **`validation`** with positive and negative tests for all the validation rules of the API.
+ - **`operation`** with tests that checks the result of performing valid WebGPU operations,
+ taking advantage of parametrization to exercise interactions between parts of the API.
+ - **`regression`** for one-off tests that reproduce bugs found in implementations to prevent
+ the bugs from appearing again.
+- **`shader`** with tests for full coverage of the shaders that can be passed to WebGPU.
+ - **`validation`**.
+ - **`execution`** similar to `api/operation`.
+ - **`regression`**.
+- **`idl`** with tests to check that the WebGPU IDL is correctly implemented, for examples that
+ objects exposed exactly the correct members, and that methods throw when passed incomplete
+ dictionaries.
+- **`web-platform`** with tests for Web platform-specific interactions like `GPUSwapChain` and
+ `<canvas>`, WebXR and `GPUQueue.copyExternalImageToTexture`.
+
+At the same time test hierarchies can be used to split the testing of a single sub-object into
+several file for maintainability. For example `GPURenderPipeline` has a large descriptor and some
+parts could be tested independently like `vertex_input` vs. `primitive_topology` vs. `blending`
+but all live under the `render_pipeline` directory.
+
+In addition to the test tree, each test can be parameterized. For coverage it is important to
+test all enums values, for example for `GPUTextureFormat`. Instead of having a loop to iterate
+over all the `GPUTextureFormat`, it is better to parameterize the test over them. Each format
+will have a different entry in the test list which will help WebGPU implementers debug the test,
+or suppress the failure without losing test coverage while they fix the bug.
+
+Extra capabilities (limits and features) are often tested in the same files as the rest of the API.
+For example, a compressed texture format capability would simply add a `GPUTextureFormat` to the
+parametrization lists of many tests, while a capability adding significant new functionality
+like ray-tracing could have a separate subtree.
+
+Operation tests for optional features should be skipped using `t.selectDeviceOrSkipTestCase()` or
+`t.skip()`. Validation tests should be written that test the behavior with and without the
+capability enabled via `t.selectDeviceOrSkipTestCase()`, to ensure the functionality is valid
+only with the capability enabled.
+
+### Validation tests
+
+Validation tests check the validation rules that are (or will be) set by the
+WebGPU spec. Validation tests try to carefully trigger the individual validation
+rules in the spec, without simultaneously triggering other rules.
+
+Validation errors *generally* generate WebGPU errors, not exceptions.
+But check the spec on a case-by-case basis.
+
+Like all `GPUTest`s, `ValidationTest`s are wrapped in both types of error scope. These
+"catch-all" error scopes look for any errors during the test, and report them as test failures.
+Since error scopes can be nested, validation tests can nest an error scope to expect that there
+*are* errors from specific operations.
+
+#### Parameterization
+
+Test parameterization can help write many validation tests more succinctly,
+while making it easier for both authors and reviewers to be confident that
+an aspect of the API is tested fully. Examples:
+
+- [`webgpu:api,validation,render_pass,resolve:resolve_attachment:*`](https://github.com/gpuweb/cts/blob/ded3b7c8a4680a1a01621a8ac859facefadf32d0/src/webgpu/api/validation/render_pass/resolve.spec.ts#L35)
+- [`webgpu:api,validation,createBindGroupLayout:bindingTypeSpecific_optional_members:*`](https://github.com/gpuweb/cts/blob/ded3b7c8a4680a1a01621a8ac859facefadf32d0/src/webgpu/api/validation/createBindGroupLayout.spec.ts#L68)
+
+Use your own discretion when deciding the balance between heavily parameterizing
+a test and writing multiple separate tests.
+
+#### Guidelines
+
+There are many aspects that should be tested in all validation tests:
+
+- each individual argument to a method call (including `this`) or member of a descriptor
+ dictionary should be tested including:
+ - what happens when an error object is passed.
+ - what happens when an optional feature enum or method is used.
+ - what happens for numeric values when they are at 0, too large, too small, etc.
+- each validation rule in the specification should be checked both with a control success case,
+ and error cases.
+- each set of arguments or state that interact for validation.
+
+When testing numeric values, it is important to check on both sides of the boundary: if the error
+happens for value N and not N - 1, both should be tested. Alignment of integer values should also
+be tested but boundary testing of alignment should be between a value aligned to 2^N and a value
+aligned to 2^(N-1).
+
+Finally, this is probably also where we would test that extensions follow the rule that: if the
+browser supports a feature but it is not enabled on the device, then calling methods from that
+feature throws `TypeError`.
+
+- Test providing unknown properties *that are definitely not part of any feature* are
+ valid/ignored. (Unfortunately, due to the rules of IDL, adding a member to a dictionary is
+ always a breaking change. So this is how we have to test this unless we can get a "strict"
+ dictionary type in IDL. We can't test adding members from non-enabled extensions.)
+
+### Operation tests
+
+Operation tests test the actual results of using the API. They execute
+(sometimes significant) code and check that the result is within the expected
+set of behaviors (which can be quite complex to compute).
+
+Note that operation tests need to test a lot of interactions between different
+parts of the API, and so can become quite complex. Try to reduce the complexity by
+utilizing combinatorics and [helpers](./helper_index.txt), and splitting/merging test files as needed.
+
+#### Errors
+
+Operation tests are usually `GPUTest`s. As a result, they automatically fail on any validation
+errors that occur during the test.
+
+When it's easier to write an operation test with invalid cases, use
+`ParamsBuilder.filter`/`.unless` to avoid invalid cases, or detect and
+`expect` validation errors in some cases.
+
+#### Implementation
+
+Use helpers like `expectContents` (and more to come) to check the values of data on the GPU.
+(These are "eventual expectations" - the harness will wait for them to finish at the end).
+
+When testing something inside a shader, it's not always necessary to output the result to a
+render output. In fragment shaders, you can output to a storage buffer. In vertex shaders, you
+can't - but you can render with points (simplest), send the result to the fragment shader, and
+output it from there. (Someday, we may end up wanting a helper for this.)
+
+#### Testing Default Values
+
+Default value tests (for arguments and dictionary members) should usually be operation tests -
+all you have to do is include `undefined` in parameterizations of other tests to make sure the
+behavior with `undefined` has the same expected result that you have when the default value is
+specified explicitly.
+
+### IDL tests
+
+TODO: figure out how to implement these. https://github.com/gpuweb/cts/issues/332
+
+These tests test only rules that come directly from WebIDL. For example:
+
+- Values out of range for `[EnforceRange]` cause exceptions.
+- Required function arguments and dictionary members cause exceptions if omitted.
+- Arguments and dictionary members cause exceptions if passed the wrong type.
+
+They may also test positive cases like the following, but the behavior of these should be tested in
+operation tests.
+
+- OK to omit optional arguments/members.
+- OK to pass the correct argument/member type (or of any type in a union type).
+
+Every overload of every method should be tested.
+
+## `src/stress/`, `src/manual/`
+
+Stress tests and manual tests for WebGPU that are not intended to be run in an automated way.
+
+## `src/unittests/`
+
+Unit tests for the test framework (`src/common/framework/`).
+
+## `src/demo/`
+
+A demo of test hierarchies for the purpose of testing the `standalone` test runner page.
diff --git a/dom/webgpu/tests/cts/checkout/docs/reviews.md b/dom/webgpu/tests/cts/checkout/docs/reviews.md
new file mode 100644
index 0000000000..1a8c3f9624
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/docs/reviews.md
@@ -0,0 +1,70 @@
+# Review Requirements
+
+A review should have several items checked off before it is landed.
+Checkboxes are pre-filled into the pull request summary when it's created.
+
+The uploader may pre-check-off boxes if they are not applicable
+(e.g. TypeScript readability on a plan PR).
+
+## Readability
+
+A reviewer has "readability" for a topic if they have enough expertise in that topic to ensure
+good practices are followed in pull requests, or know when to loop in other reviewers.
+Perfection is not required!
+
+**It is up to reviewers' own discretion** whether they are qualified to check off a
+"readability" checkbox on any given pull request.
+
+- WebGPU Readability: Familiarity with the API to ensure:
+
+ - WebGPU is being used correctly; expected results seem reasonable.
+ - WebGPU is being tested completely; tests have control cases.
+ - Test code has a clear correspondence with the test description.
+ - [Test helpers](./helper_index.txt) are used or created appropriately
+ (where the reviewer is familiar with the helpers).
+
+- TypeScript Readability: Make sure TypeScript is utilized in a way that:
+
+ - Ensures test code is reasonably type-safe.
+ Reviewers may recommend changes to make type-safety either weaker (`as`, etc.) or stronger.
+ - Is understandable and has appropriate verbosity and dynamicity
+ (e.g. type inference and `as const` are used to reduce unnecessary boilerplate).
+
+## Plan Reviews
+
+**Changes *must* have an author or reviewer with the following readability:** WebGPU
+
+Reviewers must carefully ensure the following:
+
+- The test plan name accurately describes the area being tested.
+- The test plan covers the area described by the file/test name and file/test description
+ as fully as possible (or adds TODOs for incomplete areas).
+- Validation tests have control cases (where no validation error should occur).
+- Each validation rule is tested in isolation, in at least one case which does not validate any
+ other validation rules.
+
+See also: [Adding or Editing Test Plans](intro/plans.md).
+
+## Implementation Reviews
+
+**Changes *must* have an author or reviewer with the following readability:** WebGPU, TypeScript
+
+Reviewers must carefully ensure the following:
+
+- The coverage of the test implementation precisely matches the test description.
+- Everything required for test plan reviews above.
+
+Reviewers should ensure the following:
+
+- New test helpers are documented in [helper index](./helper_index.txt).
+- Framework and test helpers are used where they would make test code clearer.
+
+See also: [Implementing Tests](intro/tests.md).
+
+## Framework
+
+**Changes *must* have an author or reviewer with the following readability:** TypeScript
+
+Reviewers should ensure the following:
+
+- Changes are reasonably type-safe, and covered by unit tests where appropriate.
diff --git a/dom/webgpu/tests/cts/checkout/docs/terms.md b/dom/webgpu/tests/cts/checkout/docs/terms.md
new file mode 100644
index 0000000000..032639be57
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/docs/terms.md
@@ -0,0 +1,270 @@
+# Terminology
+
+Each test suite is organized as a tree, both in the filesystem and further within each file.
+
+- _Suites_, e.g. `src/webgpu/`.
+ - _READMEs_, e.g. `src/webgpu/README.txt`.
+ - _Test Spec Files_, e.g. `src/webgpu/examples.spec.ts`.
+ Identified by their file path.
+ Each test spec file provides a description and a _Test Group_.
+ A _Test Group_ defines a test fixture, and contains multiple:
+ - _Tests_.
+ Identified by a comma-separated list of parts (e.g. `basic,async`)
+ which define a path through a filesystem-like tree (analogy: `basic/async.txt`).
+ Defines a _test function_ and contains multiple:
+ - _Test Cases_.
+ Identified by a list of _Public Parameters_ (e.g. `x` = `1`, `y` = `2`).
+ Each Test Case has the same test function but different Public Parameters.
+
+## Test Tree
+
+A _Test Tree_ is a tree whose leaves are individual Test Cases.
+
+A Test Tree can be thought of as follows:
+
+- Suite, which is the root of a tree with "leaves" which are:
+ - Test Spec Files, each of which is a tree with "leaves" which are:
+ - Tests, each of which is a tree with leaves which are:
+ - Test Cases.
+
+(In the implementation, this conceptual tree of trees is decomposed into one big tree
+whose leaves are Test Cases.)
+
+**Type:** `TestTree`
+
+## Suite
+
+A suite of tests.
+A single suite has a directory structure, and many _test spec files_
+(`.spec.ts` files containing tests) and _READMEs_.
+Each member of a suite is identified by its path within the suite.
+
+**Example:** `src/webgpu/`
+
+### README
+
+**Example:** `src/webgpu/README.txt`
+
+Describes (in prose) the contents of a subdirectory in a suite.
+
+READMEs are only processed at build time, when generating the _Listing_ for a suite.
+
+**Type:** `TestSuiteListingEntryReadme`
+
+## Queries
+
+A _Query_ is a structured object which specifies a subset of cases in exactly one Suite.
+A Query can be represented uniquely as a string.
+Queries are used to:
+
+- Identify a subtree of a suite (by identifying the root node of that subtree).
+- Identify individual cases.
+- Represent the list of tests that a test runner (standalone, wpt, or cmdline) should run.
+- Identify subtrees which should not be "collapsed" during WPT `cts.https.html` generation,
+ so that that cts.https.html "variants" can have individual test expectations
+ (i.e. marked as "expected to fail", "skip", etc.).
+
+There are four types of `TestQuery`:
+
+- `TestQueryMultiFile` represents any subtree of the file hierarchy:
+ - `suite:*`
+ - `suite:path,to,*`
+ - `suite:path,to,file,*`
+- `TestQueryMultiTest` represents any subtree of the test hierarchy:
+ - `suite:path,to,file:*`
+ - `suite:path,to,file:path,to,*`
+ - `suite:path,to,file:path,to,test,*`
+- `TestQueryMultiCase` represents any subtree of the case hierarchy:
+ - `suite:path,to,file:path,to,test:*`
+ - `suite:path,to,file:path,to,test:my=0;*`
+ - `suite:path,to,file:path,to,test:my=0;params="here";*`
+- `TestQuerySingleCase` represents as single case:
+ - `suite:path,to,file:path,to,test:my=0;params="here"`
+
+Test Queries are a **weakly ordered set**: any query is
+_Unordered_, _Equal_, _StrictSuperset_, or _StrictSubset_ relative to any other.
+This property is used to construct the complete tree of test cases.
+In the examples above, every example query is a StrictSubset of the previous one
+(note: even `:*` is a subset of `,*`).
+
+In the WPT and standalone harnesses, the query is stored in the URL, e.g.
+`index.html?q=q:u,e:r,y:*`.
+
+Queries are selectively URL-encoded for readability and compatibility with browsers
+(see `encodeURIComponentSelectively`).
+
+**Type:** `TestQuery`
+
+## Listing
+
+A listing of the **test spec files** in a suite.
+
+This can be generated only in Node, which has filesystem access (see `src/tools/crawl.ts`).
+As part of the build step, a _listing file_ is generated (see `src/tools/gen.ts`) so that the
+Test Spec Files can be discovered by the web runner (since it does not have filesystem access).
+
+**Type:** `TestSuiteListing`
+
+### Listing File
+
+Each Suite has one Listing File (`suite/listing.[tj]s`), containing a list of the files
+in the suite.
+
+In `src/suite/listing.ts`, this is computed dynamically.
+In `out/suite/listing.js`, the listing has been pre-baked (by `tools/gen_listings`).
+
+**Type:** Once `import`ed, `ListingFile`
+
+**Example:** `out/webgpu/listing.js`
+
+## Test Spec File
+
+A Test Spec File has a `description` and a Test Group (under which tests and cases are defined).
+
+**Type:** Once `import`ed, `SpecFile`
+
+**Example:** `src/webgpu/**/*.spec.ts`
+
+## Test Group
+
+A subtree of tests. There is one Test Group per Test Spec File.
+
+The Test Fixture used for tests is defined at TestGroup creation.
+
+**Type:** `TestGroup`
+
+## Test
+
+One test. It has a single _test function_.
+
+It may represent multiple _test cases_, each of which runs the same Test Function with different
+Parameters.
+
+A test is named using `TestGroup.test()`, which returns a `TestBuilder`.
+`TestBuilder.params()`/`.paramsSimple()`/`.paramsSubcasesOnly()`
+can optionally be used to parametrically generate instances (cases and subcases) of the test.
+Finally, `TestBuilder.fn()` provides the Test Function
+(or, a test can be marked unimplemented with `TestBuilder.unimplemented()`).
+
+### Test Function
+
+When a test subcase is run, the Test Function receives an instance of the
+Test Fixture provided to the Test Group, producing test results.
+
+**Type:** `TestFn`
+
+## Test Case / Case
+
+A single case of a test. It is identified by a `TestCaseID`: a test name, and its parameters.
+
+Each case appears as an individual item (tree leaf) in `/standalone/`,
+and as an individual "step" in WPT.
+
+If `TestBuilder.params()`/`.paramsSimple()`/`.paramsSubcasesOnly()` are not used,
+there is exactly one case with one subcase, with parameters `{}`.
+
+**Type:** During test run time, a case is encapsulated as a `RunCase`.
+
+## Test Subcase / Subcase
+
+A single "subcase" of a test. It can also be identified by a `TestCaseID`, though
+not all contexts allow subdividing cases into subcases.
+
+All of the subcases of a case will run _inside_ the case, essentially as a for-loop wrapping the
+test function. They do _not_ appear individually in `/standalone/` or WPT.
+
+If `CaseParamsBuilder.beginSubcases()` is not used, there is exactly one subcase per case.
+
+## Test Parameters / Params
+
+Each Test Subcase has a (possibly empty) set of Test Parameters,
+The parameters are passed to the Test Function `f(t)` via `t.params`.
+
+A set of Public Parameters identifies a Test Case or Test Subcase within a Test.
+
+There are also Private Parameters: any parameter name beginning with an underscore (`_`).
+These parameters are not part of the Test Case identification, but are still passed into
+the Test Function. They can be used, e.g., to manually specify expected results.
+
+**Type:** `TestParams`
+
+## Test Fixture / Fixture
+
+_Test Fixtures_ provide helpers for tests to use.
+A new instance of the fixture is created for every run of every test case.
+
+There is always one fixture class for a whole test group (though this may change).
+
+The fixture is also how a test gets access to the _case recorder_,
+which allows it to produce test results.
+
+They are also how tests produce results: `.skip()`, `.fail()`, etc.
+
+**Type:** `Fixture`
+
+### `UnitTest` Fixture
+
+Provides basic fixture utilities most useful in the `unittests` suite.
+
+### `GPUTest` Fixture
+
+Provides utilities useful in WebGPU CTS tests.
+
+# Test Results
+
+## Logger
+
+A logger logs the results of a whole test run.
+
+It saves an empty `LiveTestSpecResult` into its results map, then creates a
+_test spec recorder_, which records the results for a group into the `LiveTestSpecResult`.
+
+**Type:** `Logger`
+
+### Test Case Recorder
+
+Refers to a `LiveTestCaseResult` created by the logger.
+Records the results of running a test case (its pass-status, run time, and logs) into it.
+
+**Types:** `TestCaseRecorder`, `LiveTestCaseResult`
+
+#### Test Case Status
+
+The `status` of a `LiveTestCaseResult` can be one of:
+
+- `'running'` (only while still running)
+- `'pass'`
+- `'skip'`
+- `'warn'`
+- `'fail'`
+
+The "worst" result from running a case is always reported (fail > warn > skip > pass).
+Note this means a test can still fail if it's "skipped", if it failed before
+`.skip()` was called.
+
+**Type:** `Status`
+
+## Results Format
+
+The results are returned in JSON format.
+
+They are designed to be easily merged in JavaScript:
+the `"results"` can be passed into the constructor of `Map` and merged from there.
+
+(TODO: Write a merge tool, if needed.)
+
+```js
+{
+ "version": "bf472c5698138cdf801006cd400f587e9b1910a5-dirty",
+ "results": [
+ [
+ "unittests:async_mutex:basic:",
+ { "status": "pass", "timems": 0.286, "logs": [] }
+ ],
+ [
+ "unittests:async_mutex:serial:",
+ { "status": "pass", "timems": 0.415, "logs": [] }
+ ]
+ ]
+}
+```
diff --git a/dom/webgpu/tests/cts/checkout/node.tsconfig.json b/dom/webgpu/tests/cts/checkout/node.tsconfig.json
new file mode 100644
index 0000000000..74707d408d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/node.tsconfig.json
@@ -0,0 +1,20 @@
+// Typescript configuration for compile sources and
+// dependent files for usage directly with Node.js. This
+// is useful for running scripts in tools/ directly with Node
+// without including extra dependencies.
+{
+ "extends": "./tsconfig.json",
+ "compilerOptions": {
+ "module": "commonjs",
+ "incremental": false,
+ "noEmit": false,
+ "declaration": false,
+ },
+
+ "exclude": [
+ "src/common/runtime/wpt.ts",
+ "src/common/runtime/standalone.ts",
+ "src/common/runtime/helper/test_worker.ts",
+ "src/webgpu/web_platform/worker/worker_launcher.ts"
+ ]
+}
diff --git a/dom/webgpu/tests/cts/checkout/package-lock.json b/dom/webgpu/tests/cts/checkout/package-lock.json
new file mode 100644
index 0000000000..361ee369cd
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/package-lock.json
@@ -0,0 +1,18083 @@
+{
+ "name": "@webgpu/cts",
+ "version": "0.1.0",
+ "lockfileVersion": 2,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "@webgpu/cts",
+ "version": "0.1.0",
+ "license": "BSD-3-Clause",
+ "devDependencies": {
+ "@babel/cli": "^7.23.0",
+ "@babel/core": "^7.23.2",
+ "@babel/preset-typescript": "^7.23.2",
+ "@types/babel__core": "^7.20.3",
+ "@types/dom-mediacapture-transform": "^0.1.8",
+ "@types/dom-webcodecs": "^0.1.9",
+ "@types/express": "^4.17.20",
+ "@types/jquery": "^3.5.25",
+ "@types/morgan": "^1.9.7",
+ "@types/node": "^20.8.10",
+ "@types/offscreencanvas": "^2019.7.2",
+ "@types/pngjs": "^6.0.3",
+ "@types/serve-index": "^1.9.3",
+ "@typescript-eslint/eslint-plugin": "^6.9.1",
+ "@typescript-eslint/parser": "^6.9.1",
+ "@webgpu/types": "^0.1.38",
+ "ansi-colors": "4.1.3",
+ "babel-plugin-add-header-comment": "^1.0.3",
+ "babel-plugin-const-enum": "^1.2.0",
+ "chokidar": "^3.5.3",
+ "eslint": "^8.52.0",
+ "eslint-plugin-ban": "^1.6.0",
+ "eslint-plugin-deprecation": "^2.0.0",
+ "eslint-plugin-gpuweb-cts": "file:./tools/eslint-plugin-gpuweb-cts",
+ "eslint-plugin-import": "^2.29.0",
+ "express": "^4.18.2",
+ "grunt": "^1.6.1",
+ "grunt-cli": "^1.4.3",
+ "grunt-contrib-clean": "^2.0.1",
+ "grunt-contrib-copy": "^1.0.0",
+ "grunt-run": "^0.8.1",
+ "grunt-timer": "^0.6.0",
+ "grunt-ts": "^6.0.0-beta.22",
+ "gts": "^5.2.0",
+ "http-server": "^14.1.1",
+ "morgan": "^1.10.0",
+ "playwright-core": "^1.39.0",
+ "pngjs": "^7.0.0",
+ "portfinder": "^1.0.32",
+ "prettier": "~3.0.3",
+ "screenshot-ftw": "^1.0.5",
+ "serve-index": "^1.9.1",
+ "ts-node": "^10.9.1",
+ "typedoc": "^0.25.3",
+ "typescript": "~5.2.2"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0",
+ "npm": ">=8.5.2"
+ }
+ },
+ "node_modules/@aashutoshrathi/word-wrap": {
+ "version": "1.2.6",
+ "resolved": "https://registry.npmjs.org/@aashutoshrathi/word-wrap/-/word-wrap-1.2.6.tgz",
+ "integrity": "sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/@ampproject/remapping": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz",
+ "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/gen-mapping": "^0.3.0",
+ "@jridgewell/trace-mapping": "^0.3.9"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/cli": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/cli/-/cli-7.23.0.tgz",
+ "integrity": "sha512-17E1oSkGk2IwNILM4jtfAvgjt+ohmpfBky8aLerUfYZhiPNg7ca+CRCxZn8QDxwNhV/upsc2VHBCqGFIR+iBfA==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/trace-mapping": "^0.3.17",
+ "commander": "^4.0.1",
+ "convert-source-map": "^2.0.0",
+ "fs-readdir-recursive": "^1.1.0",
+ "glob": "^7.2.0",
+ "make-dir": "^2.1.0",
+ "slash": "^2.0.0"
+ },
+ "bin": {
+ "babel": "bin/babel.js",
+ "babel-external-helpers": "bin/babel-external-helpers.js"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "optionalDependencies": {
+ "@nicolo-ribaudo/chokidar-2": "2.1.8-no-fsevents.3",
+ "chokidar": "^3.4.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/cli/node_modules/commander": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz",
+ "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==",
+ "dev": true,
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/@babel/code-frame": {
+ "version": "7.22.13",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz",
+ "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==",
+ "dev": true,
+ "dependencies": {
+ "@babel/highlight": "^7.22.13",
+ "chalk": "^2.4.2"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/compat-data": {
+ "version": "7.23.2",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.23.2.tgz",
+ "integrity": "sha512-0S9TQMmDHlqAZ2ITT95irXKfxN9bncq8ZCoJhun3nHL/lLUxd2NKBJYoNGWH7S0hz6fRQwWlAWn/ILM0C70KZQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/core": {
+ "version": "7.23.2",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.23.2.tgz",
+ "integrity": "sha512-n7s51eWdaWZ3vGT2tD4T7J6eJs3QoBXydv7vkUM06Bf1cbVD2Kc2UrkzhiQwobfV7NwOnQXYL7UBJ5VPU+RGoQ==",
+ "dev": true,
+ "dependencies": {
+ "@ampproject/remapping": "^2.2.0",
+ "@babel/code-frame": "^7.22.13",
+ "@babel/generator": "^7.23.0",
+ "@babel/helper-compilation-targets": "^7.22.15",
+ "@babel/helper-module-transforms": "^7.23.0",
+ "@babel/helpers": "^7.23.2",
+ "@babel/parser": "^7.23.0",
+ "@babel/template": "^7.22.15",
+ "@babel/traverse": "^7.23.2",
+ "@babel/types": "^7.23.0",
+ "convert-source-map": "^2.0.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.2",
+ "json5": "^2.2.3",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/babel"
+ }
+ },
+ "node_modules/@babel/generator": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz",
+ "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.23.0",
+ "@jridgewell/gen-mapping": "^0.3.2",
+ "@jridgewell/trace-mapping": "^0.3.17",
+ "jsesc": "^2.5.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-annotate-as-pure": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz",
+ "integrity": "sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-compilation-targets": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.15.tgz",
+ "integrity": "sha512-y6EEzULok0Qvz8yyLkCvVX+02ic+By2UdOhylwUOvOn9dvYc9mKICJuuU1n1XBI02YWsNsnrY1kc6DVbjcXbtw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/compat-data": "^7.22.9",
+ "@babel/helper-validator-option": "^7.22.15",
+ "browserslist": "^4.21.9",
+ "lru-cache": "^5.1.1",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-compilation-targets/node_modules/lru-cache": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
+ "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
+ "dev": true,
+ "dependencies": {
+ "yallist": "^3.0.2"
+ }
+ },
+ "node_modules/@babel/helper-compilation-targets/node_modules/yallist": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
+ "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
+ "dev": true
+ },
+ "node_modules/@babel/helper-create-class-features-plugin": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.22.15.tgz",
+ "integrity": "sha512-jKkwA59IXcvSaiK2UN45kKwSC9o+KuoXsBDvHvU/7BecYIp8GQ2UwrVvFgJASUT+hBnwJx6MhvMCuMzwZZ7jlg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-environment-visitor": "^7.22.5",
+ "@babel/helper-function-name": "^7.22.5",
+ "@babel/helper-member-expression-to-functions": "^7.22.15",
+ "@babel/helper-optimise-call-expression": "^7.22.5",
+ "@babel/helper-replace-supers": "^7.22.9",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5",
+ "@babel/helper-split-export-declaration": "^7.22.6",
+ "semver": "^6.3.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-environment-visitor": {
+ "version": "7.22.20",
+ "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz",
+ "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-function-name": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz",
+ "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/template": "^7.22.15",
+ "@babel/types": "^7.23.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-hoist-variables": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz",
+ "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-member-expression-to-functions": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.23.0.tgz",
+ "integrity": "sha512-6gfrPwh7OuT6gZyJZvd6WbTfrqAo7vm4xCzAXOusKqq/vWdKXphTpj5klHKNmRUU6/QRGlBsyU9mAIPaWHlqJA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.23.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-imports": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz",
+ "integrity": "sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.22.15"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-transforms": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.23.0.tgz",
+ "integrity": "sha512-WhDWw1tdrlT0gMgUJSlX0IQvoO1eN279zrAUbVB+KpV2c3Tylz8+GnKOLllCS6Z/iZQEyVYxhZVUdPTqs2YYPw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-environment-visitor": "^7.22.20",
+ "@babel/helper-module-imports": "^7.22.15",
+ "@babel/helper-simple-access": "^7.22.5",
+ "@babel/helper-split-export-declaration": "^7.22.6",
+ "@babel/helper-validator-identifier": "^7.22.20"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-optimise-call-expression": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz",
+ "integrity": "sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-plugin-utils": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz",
+ "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-replace-supers": {
+ "version": "7.22.20",
+ "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.22.20.tgz",
+ "integrity": "sha512-qsW0In3dbwQUbK8kejJ4R7IHVGwHJlV6lpG6UA7a9hSa2YEiAib+N1T2kr6PEeUT+Fl7najmSOS6SmAwCHK6Tw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-environment-visitor": "^7.22.20",
+ "@babel/helper-member-expression-to-functions": "^7.22.15",
+ "@babel/helper-optimise-call-expression": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-simple-access": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz",
+ "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-skip-transparent-expression-wrappers": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz",
+ "integrity": "sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-split-export-declaration": {
+ "version": "7.22.6",
+ "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz",
+ "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-string-parser": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz",
+ "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-identifier": {
+ "version": "7.22.20",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz",
+ "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-option": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.15.tgz",
+ "integrity": "sha512-bMn7RmyFjY/mdECUbgn9eoSY4vqvacUnS9i9vGAGttgFWesO6B4CYWA7XlpbWgBt71iv/hfbPlynohStqnu5hA==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helpers": {
+ "version": "7.23.2",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.2.tgz",
+ "integrity": "sha512-lzchcp8SjTSVe/fPmLwtWVBFC7+Tbn8LGHDVfDp9JGxpAY5opSaEFgt8UQvrnECWOTdji2mOWMz1rOhkHscmGQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/template": "^7.22.15",
+ "@babel/traverse": "^7.23.2",
+ "@babel/types": "^7.23.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/highlight": {
+ "version": "7.22.20",
+ "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz",
+ "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-validator-identifier": "^7.22.20",
+ "chalk": "^2.4.2",
+ "js-tokens": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/parser": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz",
+ "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==",
+ "dev": true,
+ "bin": {
+ "parser": "bin/babel-parser.js"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-jsx": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.22.5.tgz",
+ "integrity": "sha512-gvyP4hZrgrs/wWMaocvxZ44Hw0b3W8Pe+cMxc8V1ULQ07oh8VNbIRaoD1LRZVTvD+0nieDKjfgKg89sD7rrKrg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-typescript": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.22.5.tgz",
+ "integrity": "sha512-1mS2o03i7t1c6VzH6fdQ3OA8tcEIxwG18zIPRp+UY1Ihv6W+XZzBCVxExF9upussPXJ0xE9XRHwMoNs1ep/nRQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-modules-commonjs": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.23.0.tgz",
+ "integrity": "sha512-32Xzss14/UVc7k9g775yMIvkVK8xwKE0DPdP5JTapr3+Z9w4tzeOuLNY6BXDQR6BdnzIlXnCGAzsk/ICHBLVWQ==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-module-transforms": "^7.23.0",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-simple-access": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-typescript": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.22.15.tgz",
+ "integrity": "sha512-1uirS0TnijxvQLnlv5wQBwOX3E1wCFX7ITv+9pBV2wKEk4K+M5tqDaoNXnTH8tjEIYHLO98MwiTWO04Ggz4XuA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-create-class-features-plugin": "^7.22.15",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-typescript": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/preset-typescript": {
+ "version": "7.23.2",
+ "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.23.2.tgz",
+ "integrity": "sha512-u4UJc1XsS1GhIGteM8rnGiIvf9rJpiVgMEeCnwlLA7WJPC+jcXWJAGxYmeqs5hOZD8BbAfnV5ezBOxQbb4OUxA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-validator-option": "^7.22.15",
+ "@babel/plugin-syntax-jsx": "^7.22.5",
+ "@babel/plugin-transform-modules-commonjs": "^7.23.0",
+ "@babel/plugin-transform-typescript": "^7.22.15"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/template": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz",
+ "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==",
+ "dev": true,
+ "dependencies": {
+ "@babel/code-frame": "^7.22.13",
+ "@babel/parser": "^7.22.15",
+ "@babel/types": "^7.22.15"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/traverse": {
+ "version": "7.23.2",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz",
+ "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==",
+ "dev": true,
+ "dependencies": {
+ "@babel/code-frame": "^7.22.13",
+ "@babel/generator": "^7.23.0",
+ "@babel/helper-environment-visitor": "^7.22.20",
+ "@babel/helper-function-name": "^7.23.0",
+ "@babel/helper-hoist-variables": "^7.22.5",
+ "@babel/helper-split-export-declaration": "^7.22.6",
+ "@babel/parser": "^7.23.0",
+ "@babel/types": "^7.23.0",
+ "debug": "^4.1.0",
+ "globals": "^11.1.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/types": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz",
+ "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-string-parser": "^7.22.5",
+ "@babel/helper-validator-identifier": "^7.22.20",
+ "to-fast-properties": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@cspotcode/source-map-support": {
+ "version": "0.8.1",
+ "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz",
+ "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/trace-mapping": "0.3.9"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@cspotcode/source-map-support/node_modules/@jridgewell/trace-mapping": {
+ "version": "0.3.9",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz",
+ "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/resolve-uri": "^3.0.3",
+ "@jridgewell/sourcemap-codec": "^1.4.10"
+ }
+ },
+ "node_modules/@eslint-community/eslint-utils": {
+ "version": "4.4.0",
+ "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz",
+ "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==",
+ "dev": true,
+ "dependencies": {
+ "eslint-visitor-keys": "^3.3.0"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "peerDependencies": {
+ "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0"
+ }
+ },
+ "node_modules/@eslint-community/regexpp": {
+ "version": "4.10.0",
+ "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.10.0.tgz",
+ "integrity": "sha512-Cu96Sd2By9mCNTx2iyKOmq10v22jUVQv0lQnlGNy16oE9589yE+QADPbrMGCkA51cKZSg3Pu/aTJVTGfL/qjUA==",
+ "dev": true,
+ "engines": {
+ "node": "^12.0.0 || ^14.0.0 || >=16.0.0"
+ }
+ },
+ "node_modules/@eslint/eslintrc": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.2.tgz",
+ "integrity": "sha512-+wvgpDsrB1YqAMdEUCcnTlpfVBH7Vqn6A/NT3D8WVXFIaKMlErPIZT3oCIAVCOtarRpMtelZLqJeU3t7WY6X6g==",
+ "dev": true,
+ "dependencies": {
+ "ajv": "^6.12.4",
+ "debug": "^4.3.2",
+ "espree": "^9.6.0",
+ "globals": "^13.19.0",
+ "ignore": "^5.2.0",
+ "import-fresh": "^3.2.1",
+ "js-yaml": "^4.1.0",
+ "minimatch": "^3.1.2",
+ "strip-json-comments": "^3.1.1"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/@eslint/eslintrc/node_modules/argparse": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
+ "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
+ "dev": true
+ },
+ "node_modules/@eslint/eslintrc/node_modules/globals": {
+ "version": "13.23.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-13.23.0.tgz",
+ "integrity": "sha512-XAmF0RjlrjY23MA51q3HltdlGxUpXPvg0GioKiD9X6HD28iMjo2dKC8Vqwm7lne4GNr78+RHTfliktR6ZH09wA==",
+ "dev": true,
+ "dependencies": {
+ "type-fest": "^0.20.2"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/@eslint/eslintrc/node_modules/js-yaml": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
+ "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
+ "dev": true,
+ "dependencies": {
+ "argparse": "^2.0.1"
+ },
+ "bin": {
+ "js-yaml": "bin/js-yaml.js"
+ }
+ },
+ "node_modules/@eslint/eslintrc/node_modules/type-fest": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz",
+ "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/@eslint/js": {
+ "version": "8.52.0",
+ "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.52.0.tgz",
+ "integrity": "sha512-mjZVbpaeMZludF2fsWLD0Z9gCref1Tk4i9+wddjRvpUNqqcndPkBD09N/Mapey0b3jaXbLm2kICwFv2E64QinA==",
+ "dev": true,
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ }
+ },
+ "node_modules/@humanwhocodes/config-array": {
+ "version": "0.11.13",
+ "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.13.tgz",
+ "integrity": "sha512-JSBDMiDKSzQVngfRjOdFXgFfklaXI4K9nLF49Auh21lmBWRLIK3+xTErTWD4KU54pb6coM6ESE7Awz/FNU3zgQ==",
+ "dev": true,
+ "dependencies": {
+ "@humanwhocodes/object-schema": "^2.0.1",
+ "debug": "^4.1.1",
+ "minimatch": "^3.0.5"
+ },
+ "engines": {
+ "node": ">=10.10.0"
+ }
+ },
+ "node_modules/@humanwhocodes/module-importer": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz",
+ "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==",
+ "dev": true,
+ "engines": {
+ "node": ">=12.22"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/nzakas"
+ }
+ },
+ "node_modules/@humanwhocodes/object-schema": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.1.tgz",
+ "integrity": "sha512-dvuCeX5fC9dXgJn9t+X5atfmgQAzUOWqS1254Gh0m6i8wKd10ebXkfNKiRK+1GWi/yTvvLDHpoxLr0xxxeslWw==",
+ "dev": true
+ },
+ "node_modules/@jridgewell/gen-mapping": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz",
+ "integrity": "sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/set-array": "^1.0.1",
+ "@jridgewell/sourcemap-codec": "^1.4.10",
+ "@jridgewell/trace-mapping": "^0.3.9"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/resolve-uri": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz",
+ "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/set-array": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz",
+ "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/sourcemap-codec": {
+ "version": "1.4.14",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz",
+ "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==",
+ "dev": true
+ },
+ "node_modules/@jridgewell/trace-mapping": {
+ "version": "0.3.17",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.17.tgz",
+ "integrity": "sha512-MCNzAp77qzKca9+W/+I0+sEpaUnZoeasnghNeVc41VZCEKaCH73Vq3BZZ/SzWIgrqE4H4ceI+p+b6C0mHf9T4g==",
+ "dev": true,
+ "dependencies": {
+ "@jridgewell/resolve-uri": "3.1.0",
+ "@jridgewell/sourcemap-codec": "1.4.14"
+ }
+ },
+ "node_modules/@nicolo-ribaudo/chokidar-2": {
+ "version": "2.1.8-no-fsevents.3",
+ "resolved": "https://registry.npmjs.org/@nicolo-ribaudo/chokidar-2/-/chokidar-2-2.1.8-no-fsevents.3.tgz",
+ "integrity": "sha512-s88O1aVtXftvp5bCPB7WnmXc5IwOZZ7YPuwNPt+GtOOXpPvad1LfbmjYv+qII7zP6RU2QGnqve27dnLycEnyEQ==",
+ "dev": true,
+ "optional": true
+ },
+ "node_modules/@nodelib/fs.scandir": {
+ "version": "2.1.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
+ "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
+ "dev": true,
+ "dependencies": {
+ "@nodelib/fs.stat": "2.0.5",
+ "run-parallel": "^1.1.9"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/@nodelib/fs.stat": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
+ "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
+ "dev": true,
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/@nodelib/fs.walk": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
+ "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
+ "dev": true,
+ "dependencies": {
+ "@nodelib/fs.scandir": "2.1.5",
+ "fastq": "^1.6.0"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/@pkgr/utils": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/@pkgr/utils/-/utils-2.4.2.tgz",
+ "integrity": "sha512-POgTXhjrTfbTV63DiFXav4lBHiICLKKwDeaKn9Nphwj7WH6m0hMMCaJkMyRWjgtPFyRKRVoMXXjczsTQRDEhYw==",
+ "dev": true,
+ "dependencies": {
+ "cross-spawn": "^7.0.3",
+ "fast-glob": "^3.3.0",
+ "is-glob": "^4.0.3",
+ "open": "^9.1.0",
+ "picocolors": "^1.0.0",
+ "tslib": "^2.6.0"
+ },
+ "engines": {
+ "node": "^12.20.0 || ^14.18.0 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/unts"
+ }
+ },
+ "node_modules/@tsconfig/node10": {
+ "version": "1.0.9",
+ "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.9.tgz",
+ "integrity": "sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==",
+ "dev": true
+ },
+ "node_modules/@tsconfig/node12": {
+ "version": "1.0.11",
+ "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz",
+ "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==",
+ "dev": true
+ },
+ "node_modules/@tsconfig/node14": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz",
+ "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==",
+ "dev": true
+ },
+ "node_modules/@tsconfig/node16": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz",
+ "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==",
+ "dev": true
+ },
+ "node_modules/@types/babel__core": {
+ "version": "7.20.3",
+ "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.3.tgz",
+ "integrity": "sha512-54fjTSeSHwfan8AyHWrKbfBWiEUrNTZsUwPTDSNaaP1QDQIZbeNUg3a59E9D+375MzUw/x1vx2/0F5LBz+AeYA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/parser": "^7.20.7",
+ "@babel/types": "^7.20.7",
+ "@types/babel__generator": "*",
+ "@types/babel__template": "*",
+ "@types/babel__traverse": "*"
+ }
+ },
+ "node_modules/@types/babel__generator": {
+ "version": "7.6.4",
+ "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.4.tgz",
+ "integrity": "sha512-tFkciB9j2K755yrTALxD44McOrk+gfpIpvC3sxHjRawj6PfnQxrse4Clq5y/Rq+G3mrBurMax/lG8Qn2t9mSsg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__template": {
+ "version": "7.4.1",
+ "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.1.tgz",
+ "integrity": "sha512-azBFKemX6kMg5Io+/rdGT0dkGreboUVR0Cdm3fz9QJWpaQGJRQXl7C+6hOTCZcMll7KFyEQpgbYI2lHdsS4U7g==",
+ "dev": true,
+ "dependencies": {
+ "@babel/parser": "^7.1.0",
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "node_modules/@types/babel__traverse": {
+ "version": "7.14.2",
+ "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.14.2.tgz",
+ "integrity": "sha512-K2waXdXBi2302XUdcHcR1jCeU0LL4TD9HRs/gk0N2Xvrht+G/BfJa4QObBQZfhMdxiCpV3COl5Nfq4uKTeTnJA==",
+ "dev": true,
+ "dependencies": {
+ "@babel/types": "^7.3.0"
+ }
+ },
+ "node_modules/@types/body-parser": {
+ "version": "1.19.2",
+ "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.2.tgz",
+ "integrity": "sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==",
+ "dev": true,
+ "dependencies": {
+ "@types/connect": "*",
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/connect": {
+ "version": "3.4.35",
+ "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz",
+ "integrity": "sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==",
+ "dev": true,
+ "dependencies": {
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/dom-mediacapture-transform": {
+ "version": "0.1.8",
+ "resolved": "https://registry.npmjs.org/@types/dom-mediacapture-transform/-/dom-mediacapture-transform-0.1.8.tgz",
+ "integrity": "sha512-L27i831nPYT82MVGweu3Uyw9ekWbEXq9NfHMQ4DWqbD6DYzswkgYxwRHOhU0KBIWEc76NP/PsX3R8sQMrL680Q==",
+ "dev": true,
+ "dependencies": {
+ "@types/dom-webcodecs": "*"
+ }
+ },
+ "node_modules/@types/dom-webcodecs": {
+ "version": "0.1.9",
+ "resolved": "https://registry.npmjs.org/@types/dom-webcodecs/-/dom-webcodecs-0.1.9.tgz",
+ "integrity": "sha512-lOqlovxh4zB7p59rJwej8XG3uo0kv+hR+59Ky2MftcNS70ULWnWc6I2ZIM0xKcPFyvwU/DpRsTeFm8llayr5bA==",
+ "dev": true
+ },
+ "node_modules/@types/express": {
+ "version": "4.17.20",
+ "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.20.tgz",
+ "integrity": "sha512-rOaqlkgEvOW495xErXMsmyX3WKBInbhG5eqojXYi3cGUaLoRDlXa5d52fkfWZT963AZ3v2eZ4MbKE6WpDAGVsw==",
+ "dev": true,
+ "dependencies": {
+ "@types/body-parser": "*",
+ "@types/express-serve-static-core": "^4.17.33",
+ "@types/qs": "*",
+ "@types/serve-static": "*"
+ }
+ },
+ "node_modules/@types/express-serve-static-core": {
+ "version": "4.17.39",
+ "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.39.tgz",
+ "integrity": "sha512-BiEUfAiGCOllomsRAZOiMFP7LAnrifHpt56pc4Z7l9K6ACyN06Ns1JLMBxwkfLOjJRlSf06NwWsT7yzfpaVpyQ==",
+ "dev": true,
+ "dependencies": {
+ "@types/node": "*",
+ "@types/qs": "*",
+ "@types/range-parser": "*",
+ "@types/send": "*"
+ }
+ },
+ "node_modules/@types/jquery": {
+ "version": "3.5.25",
+ "resolved": "https://registry.npmjs.org/@types/jquery/-/jquery-3.5.25.tgz",
+ "integrity": "sha512-gykx2c+OZf5nx2tv/5fDQqmvGgTiXshELy5jf9IgXPtVfSBl57IUYByN4osbwMXwJijWGOEYQABzGaFZE79A0Q==",
+ "dev": true,
+ "dependencies": {
+ "@types/sizzle": "*"
+ }
+ },
+ "node_modules/@types/json-schema": {
+ "version": "7.0.14",
+ "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.14.tgz",
+ "integrity": "sha512-U3PUjAudAdJBeC2pgN8uTIKgxrb4nlDF3SF0++EldXQvQBGkpFZMSnwQiIoDU77tv45VgNkl/L4ouD+rEomujw==",
+ "dev": true
+ },
+ "node_modules/@types/json5": {
+ "version": "0.0.29",
+ "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz",
+ "integrity": "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==",
+ "dev": true
+ },
+ "node_modules/@types/mime": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.2.tgz",
+ "integrity": "sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw==",
+ "dev": true
+ },
+ "node_modules/@types/minimist": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/@types/minimist/-/minimist-1.2.2.tgz",
+ "integrity": "sha512-jhuKLIRrhvCPLqwPcx6INqmKeiA5EWrsCOPhrlFSrbrmU4ZMPjj5Ul/oLCMDO98XRUIwVm78xICz4EPCektzeQ==",
+ "dev": true
+ },
+ "node_modules/@types/morgan": {
+ "version": "1.9.7",
+ "resolved": "https://registry.npmjs.org/@types/morgan/-/morgan-1.9.7.tgz",
+ "integrity": "sha512-4sJFBUBrIZkP5EvMm1L6VCXp3SQe8dnXqlVpe1jsmTjS1JQVmSjnpMNs8DosQd6omBi/K7BSKJ6z/Mc3ki0K9g==",
+ "dev": true,
+ "dependencies": {
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/node": {
+ "version": "20.8.10",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.8.10.tgz",
+ "integrity": "sha512-TlgT8JntpcbmKUFzjhsyhGfP2fsiz1Mv56im6enJ905xG1DAYesxJaeSbGqQmAw8OWPdhyJGhGSQGKRNJ45u9w==",
+ "dev": true,
+ "dependencies": {
+ "undici-types": "~5.26.4"
+ }
+ },
+ "node_modules/@types/normalize-package-data": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.1.tgz",
+ "integrity": "sha512-Gj7cI7z+98M282Tqmp2K5EIsoouUEzbBJhQQzDE3jSIRk6r9gsz0oUokqIUR4u1R3dMHo0pDHM7sNOHyhulypw==",
+ "dev": true
+ },
+ "node_modules/@types/offscreencanvas": {
+ "version": "2019.7.2",
+ "resolved": "https://registry.npmjs.org/@types/offscreencanvas/-/offscreencanvas-2019.7.2.tgz",
+ "integrity": "sha512-ujCjOxeA07IbEBQYAkoOI+XFw5sT3nhWJ/xZfPR6reJppDG7iPQPZacQiLTtWH1b3a2NYXWlxvYqa40y/LAixQ==",
+ "dev": true
+ },
+ "node_modules/@types/pngjs": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/@types/pngjs/-/pngjs-6.0.3.tgz",
+ "integrity": "sha512-F/WaGVKEZ1XYFlEtsWtqWm92vRfQdOqSSTBPj07BRDKnDtRhCw50DpwEQtrrDwEZUoAZAzv2FaalZiNV/54BoQ==",
+ "dev": true,
+ "dependencies": {
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/qs": {
+ "version": "6.9.9",
+ "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.9.tgz",
+ "integrity": "sha512-wYLxw35euwqGvTDx6zfY1vokBFnsK0HNrzc6xNHchxfO2hpuRg74GbkEW7e3sSmPvj0TjCDT1VCa6OtHXnubsg==",
+ "dev": true
+ },
+ "node_modules/@types/range-parser": {
+ "version": "1.2.6",
+ "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.6.tgz",
+ "integrity": "sha512-+0autS93xyXizIYiyL02FCY8N+KkKPhILhcUSA276HxzreZ16kl+cmwvV2qAM/PuCCwPXzOXOWhiPcw20uSFcA==",
+ "dev": true
+ },
+ "node_modules/@types/semver": {
+ "version": "7.5.4",
+ "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.4.tgz",
+ "integrity": "sha512-MMzuxN3GdFwskAnb6fz0orFvhfqi752yjaXylr0Rp4oDg5H0Zn1IuyRhDVvYOwAXoJirx2xuS16I3WjxnAIHiQ==",
+ "dev": true
+ },
+ "node_modules/@types/send": {
+ "version": "0.17.3",
+ "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.3.tgz",
+ "integrity": "sha512-/7fKxvKUoETxjFUsuFlPB9YndePpxxRAOfGC/yJdc9kTjTeP5kRCTzfnE8kPUKCeyiyIZu0YQ76s50hCedI1ug==",
+ "dev": true,
+ "dependencies": {
+ "@types/mime": "^1",
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/serve-index": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.3.tgz",
+ "integrity": "sha512-4KG+yMEuvDPRrYq5fyVm/I2uqAJSAwZK9VSa+Zf+zUq9/oxSSvy3kkIqyL+jjStv6UCVi8/Aho0NHtB1Fwosrg==",
+ "dev": true,
+ "dependencies": {
+ "@types/express": "*"
+ }
+ },
+ "node_modules/@types/serve-static": {
+ "version": "1.13.10",
+ "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.13.10.tgz",
+ "integrity": "sha512-nCkHGI4w7ZgAdNkrEu0bv+4xNV/XDqW+DydknebMOQwkpDGx8G+HTlj7R7ABI8i8nKxVw0wtKPi1D+lPOkh4YQ==",
+ "dev": true,
+ "dependencies": {
+ "@types/mime": "^1",
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/sizzle": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/@types/sizzle/-/sizzle-2.3.3.tgz",
+ "integrity": "sha512-JYM8x9EGF163bEyhdJBpR2QX1R5naCJHC8ucJylJ3w9/CVBaskdQ8WqBf8MmQrd1kRvp/a4TS8HJ+bxzR7ZJYQ==",
+ "dev": true
+ },
+ "node_modules/@typescript-eslint/eslint-plugin": {
+ "version": "6.9.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.9.1.tgz",
+ "integrity": "sha512-w0tiiRc9I4S5XSXXrMHOWgHgxbrBn1Ro+PmiYhSg2ZVdxrAJtQgzU5o2m1BfP6UOn7Vxcc6152vFjQfmZR4xEg==",
+ "dev": true,
+ "dependencies": {
+ "@eslint-community/regexpp": "^4.5.1",
+ "@typescript-eslint/scope-manager": "6.9.1",
+ "@typescript-eslint/type-utils": "6.9.1",
+ "@typescript-eslint/utils": "6.9.1",
+ "@typescript-eslint/visitor-keys": "6.9.1",
+ "debug": "^4.3.4",
+ "graphemer": "^1.4.0",
+ "ignore": "^5.2.4",
+ "natural-compare": "^1.4.0",
+ "semver": "^7.5.4",
+ "ts-api-utils": "^1.0.1"
+ },
+ "engines": {
+ "node": "^16.0.0 || >=18.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependencies": {
+ "@typescript-eslint/parser": "^6.0.0 || ^6.0.0-alpha",
+ "eslint": "^7.0.0 || ^8.0.0"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/type-utils": {
+ "version": "6.9.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.9.1.tgz",
+ "integrity": "sha512-eh2oHaUKCK58qIeYp19F5V5TbpM52680sB4zNSz29VBQPTWIlE/hCj5P5B1AChxECe/fmZlspAWFuRniep1Skg==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/typescript-estree": "6.9.1",
+ "@typescript-eslint/utils": "6.9.1",
+ "debug": "^4.3.4",
+ "ts-api-utils": "^1.0.1"
+ },
+ "engines": {
+ "node": "^16.0.0 || >=18.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependencies": {
+ "eslint": "^7.0.0 || ^8.0.0"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@typescript-eslint/eslint-plugin/node_modules/semver": {
+ "version": "7.5.4",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
+ "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "dev": true,
+ "dependencies": {
+ "lru-cache": "^6.0.0"
+ },
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@typescript-eslint/parser": {
+ "version": "6.9.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.9.1.tgz",
+ "integrity": "sha512-C7AK2wn43GSaCUZ9do6Ksgi2g3mwFkMO3Cis96kzmgudoVaKyt62yNzJOktP0HDLb/iO2O0n2lBOzJgr6Q/cyg==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/scope-manager": "6.9.1",
+ "@typescript-eslint/types": "6.9.1",
+ "@typescript-eslint/typescript-estree": "6.9.1",
+ "@typescript-eslint/visitor-keys": "6.9.1",
+ "debug": "^4.3.4"
+ },
+ "engines": {
+ "node": "^16.0.0 || >=18.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependencies": {
+ "eslint": "^7.0.0 || ^8.0.0"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@typescript-eslint/scope-manager": {
+ "version": "6.9.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.9.1.tgz",
+ "integrity": "sha512-38IxvKB6NAne3g/+MyXMs2Cda/Sz+CEpmm+KLGEM8hx/CvnSRuw51i8ukfwB/B/sESdeTGet1NH1Wj7I0YXswg==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/types": "6.9.1",
+ "@typescript-eslint/visitor-keys": "6.9.1"
+ },
+ "engines": {
+ "node": "^16.0.0 || >=18.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/@typescript-eslint/type-utils": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.62.0.tgz",
+ "integrity": "sha512-xsSQreu+VnfbqQpW5vnCJdq1Z3Q0U31qiWmRhr98ONQmcp/yhiPJFPq8MXiJVLiksmOKSjIldZzkebzHuCGzew==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/typescript-estree": "5.62.0",
+ "@typescript-eslint/utils": "5.62.0",
+ "debug": "^4.3.4",
+ "tsutils": "^3.21.0"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependencies": {
+ "eslint": "*"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/scope-manager": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.62.0.tgz",
+ "integrity": "sha512-VXuvVvZeQCQb5Zgf4HAxc04q5j+WrNAtNh9OwCsCgpKqESMTu3tF/jhZ3xG6T4NZwWl65Bg8KuS2uEvhSfLl0w==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/types": "5.62.0",
+ "@typescript-eslint/visitor-keys": "5.62.0"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/types": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.62.0.tgz",
+ "integrity": "sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ==",
+ "dev": true,
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/typescript-estree": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.62.0.tgz",
+ "integrity": "sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/types": "5.62.0",
+ "@typescript-eslint/visitor-keys": "5.62.0",
+ "debug": "^4.3.4",
+ "globby": "^11.1.0",
+ "is-glob": "^4.0.3",
+ "semver": "^7.3.7",
+ "tsutils": "^3.21.0"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/utils": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.62.0.tgz",
+ "integrity": "sha512-n8oxjeb5aIbPFEtmQxQYOLI0i9n5ySBEY/ZEHHZqKQSFnxio1rv6dthascc9dLuwrL0RC5mPCxB7vnAVGAYWAQ==",
+ "dev": true,
+ "dependencies": {
+ "@eslint-community/eslint-utils": "^4.2.0",
+ "@types/json-schema": "^7.0.9",
+ "@types/semver": "^7.3.12",
+ "@typescript-eslint/scope-manager": "5.62.0",
+ "@typescript-eslint/types": "5.62.0",
+ "@typescript-eslint/typescript-estree": "5.62.0",
+ "eslint-scope": "^5.1.1",
+ "semver": "^7.3.7"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependencies": {
+ "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0"
+ }
+ },
+ "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/visitor-keys": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.62.0.tgz",
+ "integrity": "sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/types": "5.62.0",
+ "eslint-visitor-keys": "^3.3.0"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/@typescript-eslint/type-utils/node_modules/eslint-scope": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz",
+ "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==",
+ "dev": true,
+ "dependencies": {
+ "esrecurse": "^4.3.0",
+ "estraverse": "^4.1.1"
+ },
+ "engines": {
+ "node": ">=8.0.0"
+ }
+ },
+ "node_modules/@typescript-eslint/type-utils/node_modules/estraverse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz",
+ "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==",
+ "dev": true,
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/@typescript-eslint/type-utils/node_modules/semver": {
+ "version": "7.5.4",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
+ "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "dev": true,
+ "dependencies": {
+ "lru-cache": "^6.0.0"
+ },
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@typescript-eslint/types": {
+ "version": "6.9.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.9.1.tgz",
+ "integrity": "sha512-BUGslGOb14zUHOUmDB2FfT6SI1CcZEJYfF3qFwBeUrU6srJfzANonwRYHDpLBuzbq3HaoF2XL2hcr01c8f8OaQ==",
+ "dev": true,
+ "engines": {
+ "node": "^16.0.0 || >=18.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/@typescript-eslint/typescript-estree": {
+ "version": "6.9.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.9.1.tgz",
+ "integrity": "sha512-U+mUylTHfcqeO7mLWVQ5W/tMLXqVpRv61wm9ZtfE5egz7gtnmqVIw9ryh0mgIlkKk9rZLY3UHygsBSdB9/ftyw==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/types": "6.9.1",
+ "@typescript-eslint/visitor-keys": "6.9.1",
+ "debug": "^4.3.4",
+ "globby": "^11.1.0",
+ "is-glob": "^4.0.3",
+ "semver": "^7.5.4",
+ "ts-api-utils": "^1.0.1"
+ },
+ "engines": {
+ "node": "^16.0.0 || >=18.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": {
+ "version": "7.5.4",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
+ "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "dev": true,
+ "dependencies": {
+ "lru-cache": "^6.0.0"
+ },
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@typescript-eslint/utils": {
+ "version": "6.9.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.9.1.tgz",
+ "integrity": "sha512-L1T0A5nFdQrMVunpZgzqPL6y2wVreSyHhKGZryS6jrEN7bD9NplVAyMryUhXsQ4TWLnZmxc2ekar/lSGIlprCA==",
+ "dev": true,
+ "dependencies": {
+ "@eslint-community/eslint-utils": "^4.4.0",
+ "@types/json-schema": "^7.0.12",
+ "@types/semver": "^7.5.0",
+ "@typescript-eslint/scope-manager": "6.9.1",
+ "@typescript-eslint/types": "6.9.1",
+ "@typescript-eslint/typescript-estree": "6.9.1",
+ "semver": "^7.5.4"
+ },
+ "engines": {
+ "node": "^16.0.0 || >=18.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependencies": {
+ "eslint": "^7.0.0 || ^8.0.0"
+ }
+ },
+ "node_modules/@typescript-eslint/utils/node_modules/semver": {
+ "version": "7.5.4",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
+ "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "dev": true,
+ "dependencies": {
+ "lru-cache": "^6.0.0"
+ },
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/@typescript-eslint/visitor-keys": {
+ "version": "6.9.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.9.1.tgz",
+ "integrity": "sha512-MUaPUe/QRLEffARsmNfmpghuQkW436DvESW+h+M52w0coICHRfD6Np9/K6PdACwnrq1HmuLl+cSPZaJmeVPkSw==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/types": "6.9.1",
+ "eslint-visitor-keys": "^3.4.1"
+ },
+ "engines": {
+ "node": "^16.0.0 || >=18.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/@ungap/structured-clone": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz",
+ "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==",
+ "dev": true
+ },
+ "node_modules/@webgpu/types": {
+ "version": "0.1.38",
+ "resolved": "https://registry.npmjs.org/@webgpu/types/-/types-0.1.38.tgz",
+ "integrity": "sha512-7LrhVKz2PRh+DD7+S+PVaFd5HxaWQvoMqBbsV9fNJO1pjUs1P8bM2vQVNfk+3URTqbuTI7gkXi0rfsN0IadoBA==",
+ "dev": true
+ },
+ "node_modules/abbrev": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz",
+ "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==",
+ "dev": true
+ },
+ "node_modules/accepts": {
+ "version": "1.3.8",
+ "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz",
+ "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==",
+ "dev": true,
+ "dependencies": {
+ "mime-types": "~2.1.34",
+ "negotiator": "0.6.3"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/acorn": {
+ "version": "8.11.2",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.2.tgz",
+ "integrity": "sha512-nc0Axzp/0FILLEVsm4fNwLCwMttvhEI263QtVPQcbpfZZ3ts0hLsZGOpE6czNlid7CJ9MlyH8reXkpsf3YUY4w==",
+ "dev": true,
+ "bin": {
+ "acorn": "bin/acorn"
+ },
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/acorn-jsx": {
+ "version": "5.3.2",
+ "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz",
+ "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==",
+ "dev": true,
+ "peerDependencies": {
+ "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0"
+ }
+ },
+ "node_modules/acorn-walk": {
+ "version": "8.3.0",
+ "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.0.tgz",
+ "integrity": "sha512-FS7hV565M5l1R08MXqo8odwMTB02C2UqzB17RVgu9EyuYFBqJZ3/ZY97sQD5FewVu1UyDFc1yztUDrAwT0EypA==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/ajv": {
+ "version": "6.12.6",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
+ "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
+ "dev": true,
+ "dependencies": {
+ "fast-deep-equal": "^3.1.1",
+ "fast-json-stable-stringify": "^2.0.0",
+ "json-schema-traverse": "^0.4.1",
+ "uri-js": "^4.2.2"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/epoberezkin"
+ }
+ },
+ "node_modules/ansi-colors": {
+ "version": "4.1.3",
+ "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.3.tgz",
+ "integrity": "sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/ansi-escapes": {
+ "version": "4.3.2",
+ "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz",
+ "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==",
+ "dev": true,
+ "dependencies": {
+ "type-fest": "^0.21.3"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/ansi-sequence-parser": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/ansi-sequence-parser/-/ansi-sequence-parser-1.1.1.tgz",
+ "integrity": "sha512-vJXt3yiaUL4UU546s3rPXlsry/RnM730G1+HkpKE012AN0sx1eOrxSu95oKDIonskeLTijMgqWZ3uDEe3NFvyg==",
+ "dev": true
+ },
+ "node_modules/ansi-styles": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+ "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+ "dev": true,
+ "dependencies": {
+ "color-convert": "^1.9.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/anymatch": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz",
+ "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==",
+ "dev": true,
+ "dependencies": {
+ "normalize-path": "^3.0.0",
+ "picomatch": "^2.0.4"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/arg": {
+ "version": "4.1.3",
+ "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz",
+ "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==",
+ "dev": true
+ },
+ "node_modules/argparse": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+ "dev": true,
+ "dependencies": {
+ "sprintf-js": "~1.0.2"
+ }
+ },
+ "node_modules/arr-diff": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz",
+ "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/arr-flatten": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz",
+ "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/arr-union": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz",
+ "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/array-buffer-byte-length": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.0.tgz",
+ "integrity": "sha512-LPuwb2P+NrQw3XhxGc36+XSvuBPopovXYTR9Ew++Du9Yb/bx5AzBfrIsBoj0EZUifjQU+sHL21sseZ3jerWO/A==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "is-array-buffer": "^3.0.1"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/array-each": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/array-each/-/array-each-1.0.1.tgz",
+ "integrity": "sha1-p5SvDAWrF1KEbudTofIRoFugxE8=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/array-flatten": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
+ "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=",
+ "dev": true
+ },
+ "node_modules/array-includes": {
+ "version": "3.1.7",
+ "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.7.tgz",
+ "integrity": "sha512-dlcsNBIiWhPkHdOEEKnehA+RNUWDc4UqFtnIXU4uuYDPtA4LDkr7qip2p0VvFAEXNDr0yWZ9PJyIRiGjRLQzwQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1",
+ "get-intrinsic": "^1.2.1",
+ "is-string": "^1.0.7"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/array-slice": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/array-slice/-/array-slice-1.1.0.tgz",
+ "integrity": "sha512-B1qMD3RBP7O8o0H2KbrXDyB0IccejMF15+87Lvlor12ONPRHP6gTjXMNkt/d3ZuOGbAe66hFmaCfECI24Ufp6w==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/array-union": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz",
+ "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/array-unique": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz",
+ "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/array.prototype.findlastindex": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.3.tgz",
+ "integrity": "sha512-LzLoiOMAxvy+Gd3BAq3B7VeIgPdo+Q8hthvKtXybMvRV0jrXfJM/t8mw7nNlpEcVlVUnCnM2KSX4XU5HmpodOA==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1",
+ "es-shim-unscopables": "^1.0.0",
+ "get-intrinsic": "^1.2.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/array.prototype.flat": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.2.tgz",
+ "integrity": "sha512-djYB+Zx2vLewY8RWlNCUdHjDXs2XOgm602S9E7P/UpHgfeHL00cRiIF+IN/G/aUJ7kGPb6yO/ErDI5V2s8iycA==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1",
+ "es-shim-unscopables": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/array.prototype.flatmap": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.2.tgz",
+ "integrity": "sha512-Ewyx0c9PmpcsByhSW4r+9zDU7sGjFc86qf/kKtuSCRdhfbk0SNLLkaT5qvcHnRGgc5NP/ly/y+qkXkqONX54CQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1",
+ "es-shim-unscopables": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/arraybuffer.prototype.slice": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.2.tgz",
+ "integrity": "sha512-yMBKppFur/fbHu9/6USUe03bZ4knMYiwFBcyiaXB8Go0qNehwX6inYPzK9U0NeQvGxKthcmHcaR8P5MStSRBAw==",
+ "dev": true,
+ "dependencies": {
+ "array-buffer-byte-length": "^1.0.0",
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1",
+ "get-intrinsic": "^1.2.1",
+ "is-array-buffer": "^3.0.2",
+ "is-shared-array-buffer": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/arrify": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz",
+ "integrity": "sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/assign-symbols": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz",
+ "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/async": {
+ "version": "3.2.4",
+ "resolved": "https://registry.npmjs.org/async/-/async-3.2.4.tgz",
+ "integrity": "sha512-iAB+JbDEGXhyIUavoDl9WP/Jj106Kz9DEn1DPgYw5ruDn0e3Wgi3sKFm55sASdGBNOQB8F59d9qQ7deqrHA8wQ==",
+ "dev": true
+ },
+ "node_modules/async-each": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/async-each/-/async-each-1.0.3.tgz",
+ "integrity": "sha512-z/WhQ5FPySLdvREByI2vZiTWwCnF0moMJ1hK9YQwDTHKh6I7/uSckMetoRGb5UBZPC1z0jlw+n/XCgjeH7y1AQ==",
+ "dev": true
+ },
+ "node_modules/atob": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz",
+ "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==",
+ "dev": true,
+ "bin": {
+ "atob": "bin/atob.js"
+ },
+ "engines": {
+ "node": ">= 4.5.0"
+ }
+ },
+ "node_modules/available-typed-arrays": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz",
+ "integrity": "sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/babel-plugin-add-header-comment": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/babel-plugin-add-header-comment/-/babel-plugin-add-header-comment-1.0.3.tgz",
+ "integrity": "sha1-URxJAQYmQNWkgLSsPt1pRBlYUOw=",
+ "dev": true
+ },
+ "node_modules/babel-plugin-const-enum": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/babel-plugin-const-enum/-/babel-plugin-const-enum-1.2.0.tgz",
+ "integrity": "sha512-o1m/6iyyFnp9MRsK1dHF3bneqyf3AlM2q3A/YbgQr2pCat6B6XJVDv2TXqzfY2RYUi4mak6WAksSBPlyYGx9dg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.0.0",
+ "@babel/plugin-syntax-typescript": "^7.3.3",
+ "@babel/traverse": "^7.16.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/balanced-match": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
+ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
+ "dev": true
+ },
+ "node_modules/base": {
+ "version": "0.11.2",
+ "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz",
+ "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==",
+ "dev": true,
+ "dependencies": {
+ "cache-base": "^1.0.1",
+ "class-utils": "^0.3.5",
+ "component-emitter": "^1.2.1",
+ "define-property": "^1.0.0",
+ "isobject": "^3.0.1",
+ "mixin-deep": "^1.2.0",
+ "pascalcase": "^0.1.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/base/node_modules/define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+ "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+ "dev": true,
+ "dependencies": {
+ "is-descriptor": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/base/node_modules/is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "dependencies": {
+ "kind-of": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/base/node_modules/is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "dependencies": {
+ "kind-of": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/base/node_modules/is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "dependencies": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/bash-color": {
+ "version": "0.0.3",
+ "resolved": "https://registry.npmjs.org/bash-color/-/bash-color-0.0.3.tgz",
+ "integrity": "sha512-y0MC9pb/jBCn/1FzRm8/BZqOYrk0vBnW4pNkmAmLwLSA/rA9Wd3u0CYQnB8y5lW6VDf+Sf2kScntAUUS0KDGxw==",
+ "dev": true
+ },
+ "node_modules/basic-auth": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/basic-auth/-/basic-auth-2.0.1.tgz",
+ "integrity": "sha512-NF+epuEdnUYVlGuhaxbbq+dvJttwLnGY+YixlXlME5KpQ5W3CnXA5cVTneY3SPbPDRkcjMbifrwmFYcClgOZeg==",
+ "dev": true,
+ "dependencies": {
+ "safe-buffer": "5.1.2"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/batch": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz",
+ "integrity": "sha1-3DQxT05nkxgJP8dgJyUl+UvyXBY=",
+ "dev": true
+ },
+ "node_modules/big-integer": {
+ "version": "1.6.51",
+ "resolved": "https://registry.npmjs.org/big-integer/-/big-integer-1.6.51.tgz",
+ "integrity": "sha512-GPEid2Y9QU1Exl1rpO9B2IPJGHPSupF5GnVIP0blYvNOMer2bTvSWs1jGOUg04hTmu67nmLsQ9TBo1puaotBHg==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.6"
+ }
+ },
+ "node_modules/binary-extensions": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz",
+ "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/bindings": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz",
+ "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==",
+ "dev": true,
+ "optional": true,
+ "dependencies": {
+ "file-uri-to-path": "1.0.0"
+ }
+ },
+ "node_modules/body-parser": {
+ "version": "1.20.1",
+ "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz",
+ "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==",
+ "dev": true,
+ "dependencies": {
+ "bytes": "3.1.2",
+ "content-type": "~1.0.4",
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
+ "http-errors": "2.0.0",
+ "iconv-lite": "0.4.24",
+ "on-finished": "2.4.1",
+ "qs": "6.11.0",
+ "raw-body": "2.5.1",
+ "type-is": "~1.6.18",
+ "unpipe": "1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8",
+ "npm": "1.2.8000 || >= 1.4.16"
+ }
+ },
+ "node_modules/body-parser/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/body-parser/node_modules/depd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
+ "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/body-parser/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
+ "dev": true
+ },
+ "node_modules/body-parser/node_modules/on-finished": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
+ "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
+ "dev": true,
+ "dependencies": {
+ "ee-first": "1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/bplist-parser": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/bplist-parser/-/bplist-parser-0.2.0.tgz",
+ "integrity": "sha512-z0M+byMThzQmD9NILRniCUXYsYpjwnlO8N5uCFaCqIOpqRsJCrQL9NK3JsD67CN5a08nF5oIL2bD6loTdHOuKw==",
+ "dev": true,
+ "dependencies": {
+ "big-integer": "^1.6.44"
+ },
+ "engines": {
+ "node": ">= 5.10.0"
+ }
+ },
+ "node_modules/brace-expansion": {
+ "version": "1.1.11",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
+ "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "dev": true,
+ "dependencies": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "node_modules/braces": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
+ "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
+ "dev": true,
+ "dependencies": {
+ "fill-range": "^7.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/browserslist": {
+ "version": "4.22.1",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.22.1.tgz",
+ "integrity": "sha512-FEVc202+2iuClEhZhrWy6ZiAcRLvNMyYcxZ8raemul1DYVOVdFsbqckWLdsixQZCpJlwe77Z3UTalE7jsjnKfQ==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "caniuse-lite": "^1.0.30001541",
+ "electron-to-chromium": "^1.4.535",
+ "node-releases": "^2.0.13",
+ "update-browserslist-db": "^1.0.13"
+ },
+ "bin": {
+ "browserslist": "cli.js"
+ },
+ "engines": {
+ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
+ }
+ },
+ "node_modules/bundle-name": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/bundle-name/-/bundle-name-3.0.0.tgz",
+ "integrity": "sha512-PKA4BeSvBpQKQ8iPOGCSiell+N8P+Tf1DlwqmYhpe2gAhKPHn8EYOxVT+ShuGmhg8lN8XiSlS80yiExKXrURlw==",
+ "dev": true,
+ "dependencies": {
+ "run-applescript": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/bytes": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
+ "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/cache-base": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz",
+ "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==",
+ "dev": true,
+ "dependencies": {
+ "collection-visit": "^1.0.0",
+ "component-emitter": "^1.2.1",
+ "get-value": "^2.0.6",
+ "has-value": "^1.0.0",
+ "isobject": "^3.0.1",
+ "set-value": "^2.0.0",
+ "to-object-path": "^0.3.0",
+ "union-value": "^1.0.0",
+ "unset-value": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/call-bind": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.5.tgz",
+ "integrity": "sha512-C3nQxfFZxFRVoJoGKKI8y3MOEo129NQ+FgQ08iye+Mk4zNZZGdjfs06bVTr+DBSlA66Q2VEcMki/cUCP4SercQ==",
+ "dev": true,
+ "dependencies": {
+ "function-bind": "^1.1.2",
+ "get-intrinsic": "^1.2.1",
+ "set-function-length": "^1.1.1"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/callsites": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
+ "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/camelcase-keys": {
+ "version": "6.2.2",
+ "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-6.2.2.tgz",
+ "integrity": "sha512-YrwaA0vEKazPBkn0ipTiMpSajYDSe+KjQfrjhcBMxJt/znbvlHd8Pw/Vamaz5EB4Wfhs3SUR3Z9mwRu/P3s3Yg==",
+ "dev": true,
+ "dependencies": {
+ "camelcase": "^5.3.1",
+ "map-obj": "^4.0.0",
+ "quick-lru": "^4.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/caniuse-lite": {
+ "version": "1.0.30001559",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001559.tgz",
+ "integrity": "sha512-cPiMKZgqgkg5LY3/ntGeLFUpi6tzddBNS58A4tnTgQw1zON7u2sZMU7SzOeVH4tj20++9ggL+V6FDOFMTaFFYA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/caniuse-lite"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ]
+ },
+ "node_modules/chalk": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
+ "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^3.2.1",
+ "escape-string-regexp": "^1.0.5",
+ "supports-color": "^5.3.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/chardet": {
+ "version": "0.7.0",
+ "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz",
+ "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==",
+ "dev": true
+ },
+ "node_modules/chokidar": {
+ "version": "3.5.3",
+ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz",
+ "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "individual",
+ "url": "https://paulmillr.com/funding/"
+ }
+ ],
+ "dependencies": {
+ "anymatch": "~3.1.2",
+ "braces": "~3.0.2",
+ "glob-parent": "~5.1.2",
+ "is-binary-path": "~2.1.0",
+ "is-glob": "~4.0.1",
+ "normalize-path": "~3.0.0",
+ "readdirp": "~3.6.0"
+ },
+ "engines": {
+ "node": ">= 8.10.0"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.2"
+ }
+ },
+ "node_modules/class-utils": {
+ "version": "0.3.6",
+ "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz",
+ "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==",
+ "dev": true,
+ "dependencies": {
+ "arr-union": "^3.1.0",
+ "define-property": "^0.2.5",
+ "isobject": "^3.0.0",
+ "static-extend": "^0.1.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/cli-cursor": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz",
+ "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==",
+ "dev": true,
+ "dependencies": {
+ "restore-cursor": "^3.1.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/cli-width": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-3.0.0.tgz",
+ "integrity": "sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw==",
+ "dev": true,
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/collection-visit": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz",
+ "integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=",
+ "dev": true,
+ "dependencies": {
+ "map-visit": "^1.0.0",
+ "object-visit": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/color-convert": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
+ "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
+ "dev": true,
+ "dependencies": {
+ "color-name": "1.1.3"
+ }
+ },
+ "node_modules/color-name": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+ "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
+ "dev": true
+ },
+ "node_modules/colors": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/colors/-/colors-1.1.2.tgz",
+ "integrity": "sha1-FopHAXVran9RoSzgyXv6KMCE7WM=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.1.90"
+ }
+ },
+ "node_modules/component-emitter": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz",
+ "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==",
+ "dev": true
+ },
+ "node_modules/concat-map": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+ "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=",
+ "dev": true
+ },
+ "node_modules/content-disposition": {
+ "version": "0.5.4",
+ "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz",
+ "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==",
+ "dev": true,
+ "dependencies": {
+ "safe-buffer": "5.2.1"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/content-disposition/node_modules/safe-buffer": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
+ "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ]
+ },
+ "node_modules/content-type": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
+ "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/convert-source-map": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
+ "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
+ "dev": true
+ },
+ "node_modules/cookie": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz",
+ "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/cookie-signature": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
+ "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw=",
+ "dev": true
+ },
+ "node_modules/copy-descriptor": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz",
+ "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/core-util-is": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz",
+ "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==",
+ "dev": true
+ },
+ "node_modules/corser": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/corser/-/corser-2.0.1.tgz",
+ "integrity": "sha1-jtolLsqrWEDc2XXOuQ2TcMgZ/4c=",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4.0"
+ }
+ },
+ "node_modules/create-require": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz",
+ "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==",
+ "dev": true
+ },
+ "node_modules/cross-spawn": {
+ "version": "7.0.3",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
+ "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
+ "dev": true,
+ "dependencies": {
+ "path-key": "^3.1.0",
+ "shebang-command": "^2.0.0",
+ "which": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/csproj2ts": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/csproj2ts/-/csproj2ts-1.1.0.tgz",
+ "integrity": "sha512-sk0RTT51t4lUNQ7UfZrqjQx7q4g0m3iwNA6mvyh7gLsgQYvwKzfdyoAgicC9GqJvkoIkU0UmndV9c7VZ8pJ45Q==",
+ "dev": true,
+ "dependencies": {
+ "es6-promise": "^4.1.1",
+ "lodash": "^4.17.4",
+ "semver": "^5.4.1",
+ "xml2js": "^0.4.19"
+ }
+ },
+ "node_modules/csproj2ts/node_modules/es6-promise": {
+ "version": "4.2.8",
+ "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz",
+ "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==",
+ "dev": true
+ },
+ "node_modules/csproj2ts/node_modules/semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
+ "dev": true,
+ "bin": {
+ "semver": "bin/semver"
+ }
+ },
+ "node_modules/d": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz",
+ "integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==",
+ "dev": true,
+ "dependencies": {
+ "es5-ext": "^0.10.50",
+ "type": "^1.0.1"
+ }
+ },
+ "node_modules/dateformat": {
+ "version": "4.6.3",
+ "resolved": "https://registry.npmjs.org/dateformat/-/dateformat-4.6.3.tgz",
+ "integrity": "sha512-2P0p0pFGzHS5EMnhdxQi7aJN+iMheud0UhG4dlE1DLAlvL8JHjJJTX/CSm4JXwV0Ka5nGk3zC5mcb5bUQUxxMA==",
+ "dev": true,
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/debug": {
+ "version": "4.3.4",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
+ "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
+ "dev": true,
+ "dependencies": {
+ "ms": "2.1.2"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/decamelize": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz",
+ "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/decamelize-keys": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/decamelize-keys/-/decamelize-keys-1.1.0.tgz",
+ "integrity": "sha1-0XGoeTMlKAfrPLYdwcFEXQeN8tk=",
+ "dev": true,
+ "dependencies": {
+ "decamelize": "^1.1.0",
+ "map-obj": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/decamelize-keys/node_modules/map-obj": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz",
+ "integrity": "sha1-2TPOuSBdgr3PSIb2dCvcK03qFG0=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/decode-uri-component": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz",
+ "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10"
+ }
+ },
+ "node_modules/deep-is": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz",
+ "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==",
+ "dev": true
+ },
+ "node_modules/default-browser": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/default-browser/-/default-browser-4.0.0.tgz",
+ "integrity": "sha512-wX5pXO1+BrhMkSbROFsyxUm0i/cJEScyNhA4PPxc41ICuv05ZZB/MX28s8aZx6xjmatvebIapF6hLEKEcpneUA==",
+ "dev": true,
+ "dependencies": {
+ "bundle-name": "^3.0.0",
+ "default-browser-id": "^3.0.0",
+ "execa": "^7.1.1",
+ "titleize": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=14.16"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/default-browser-id": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/default-browser-id/-/default-browser-id-3.0.0.tgz",
+ "integrity": "sha512-OZ1y3y0SqSICtE8DE4S8YOE9UZOJ8wO16fKWVP5J1Qz42kV9jcnMVFrEE/noXb/ss3Q4pZIH79kxofzyNNtUNA==",
+ "dev": true,
+ "dependencies": {
+ "bplist-parser": "^0.2.0",
+ "untildify": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/default-browser/node_modules/execa": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/execa/-/execa-7.2.0.tgz",
+ "integrity": "sha512-UduyVP7TLB5IcAQl+OzLyLcS/l32W/GLg+AhHJ+ow40FOk2U3SAllPwR44v4vmdFwIWqpdwxxpQbF1n5ta9seA==",
+ "dev": true,
+ "dependencies": {
+ "cross-spawn": "^7.0.3",
+ "get-stream": "^6.0.1",
+ "human-signals": "^4.3.0",
+ "is-stream": "^3.0.0",
+ "merge-stream": "^2.0.0",
+ "npm-run-path": "^5.1.0",
+ "onetime": "^6.0.0",
+ "signal-exit": "^3.0.7",
+ "strip-final-newline": "^3.0.0"
+ },
+ "engines": {
+ "node": "^14.18.0 || ^16.14.0 || >=18.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sindresorhus/execa?sponsor=1"
+ }
+ },
+ "node_modules/default-browser/node_modules/human-signals": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-4.3.1.tgz",
+ "integrity": "sha512-nZXjEF2nbo7lIw3mgYjItAfgQXog3OjJogSbKa2CQIIvSGWcKgeJnQlNXip6NglNzYH45nSRiEVimMvYL8DDqQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=14.18.0"
+ }
+ },
+ "node_modules/default-browser/node_modules/is-stream": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz",
+ "integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==",
+ "dev": true,
+ "engines": {
+ "node": "^12.20.0 || ^14.13.1 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/default-browser/node_modules/mimic-fn": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz",
+ "integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/default-browser/node_modules/npm-run-path": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.1.0.tgz",
+ "integrity": "sha512-sJOdmRGrY2sjNTRMbSvluQqg+8X7ZK61yvzBEIDhz4f8z1TZFYABsqjjCBd/0PUNE9M6QDgHJXQkGUEm7Q+l9Q==",
+ "dev": true,
+ "dependencies": {
+ "path-key": "^4.0.0"
+ },
+ "engines": {
+ "node": "^12.20.0 || ^14.13.1 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/default-browser/node_modules/onetime": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz",
+ "integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==",
+ "dev": true,
+ "dependencies": {
+ "mimic-fn": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/default-browser/node_modules/path-key": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz",
+ "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/default-browser/node_modules/strip-final-newline": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz",
+ "integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/define-data-property": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.1.tgz",
+ "integrity": "sha512-E7uGkTzkk1d0ByLeSc6ZsFS79Axg+m1P/VsgYsxHgiuc3tFSj+MjMIwe90FC4lOAZzNBdY7kkO2P2wKdsQ1vgQ==",
+ "dev": true,
+ "dependencies": {
+ "get-intrinsic": "^1.2.1",
+ "gopd": "^1.0.1",
+ "has-property-descriptors": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/define-lazy-prop": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-3.0.0.tgz",
+ "integrity": "sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/define-properties": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz",
+ "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==",
+ "dev": true,
+ "dependencies": {
+ "define-data-property": "^1.0.1",
+ "has-property-descriptors": "^1.0.0",
+ "object-keys": "^1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "dev": true,
+ "dependencies": {
+ "is-descriptor": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/depd": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
+ "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/destroy": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz",
+ "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8",
+ "npm": "1.2.8000 || >= 1.4.16"
+ }
+ },
+ "node_modules/detect-file": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/detect-file/-/detect-file-1.0.0.tgz",
+ "integrity": "sha1-8NZtA2cqglyxtzvbP+YjEMjlUrc=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/detect-indent": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/detect-indent/-/detect-indent-4.0.0.tgz",
+ "integrity": "sha1-920GQ1LN9Docts5hnE7jqUdd4gg=",
+ "dev": true,
+ "dependencies": {
+ "repeating": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/detect-newline": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-2.1.0.tgz",
+ "integrity": "sha1-9B8cEL5LAOh7XxPaaAdZ8sW/0+I=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/diff": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz",
+ "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.3.1"
+ }
+ },
+ "node_modules/dir-glob": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz",
+ "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==",
+ "dev": true,
+ "dependencies": {
+ "path-type": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/doctrine": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz",
+ "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==",
+ "dev": true,
+ "dependencies": {
+ "esutils": "^2.0.2"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/duration": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/duration/-/duration-0.2.2.tgz",
+ "integrity": "sha512-06kgtea+bGreF5eKYgI/36A6pLXggY7oR4p1pq4SmdFBn1ReOL5D8RhG64VrqfTTKNucqqtBAwEj8aB88mcqrg==",
+ "dev": true,
+ "dependencies": {
+ "d": "1",
+ "es5-ext": "~0.10.46"
+ }
+ },
+ "node_modules/ee-first": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
+ "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=",
+ "dev": true
+ },
+ "node_modules/electron-to-chromium": {
+ "version": "1.4.573",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.573.tgz",
+ "integrity": "sha512-tzxxvKDTO3V5vzN2F+3v9jrK9gEbCdf1YYJUx/zVq1cyzyh+x1ddeYNNWh0ZS2ETNCVK3+Pns1LHIBq4w20X2Q==",
+ "dev": true
+ },
+ "node_modules/emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
+ "dev": true
+ },
+ "node_modules/encodeurl": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
+ "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/error-ex": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz",
+ "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==",
+ "dev": true,
+ "dependencies": {
+ "is-arrayish": "^0.2.1"
+ }
+ },
+ "node_modules/es-abstract": {
+ "version": "1.22.3",
+ "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.22.3.tgz",
+ "integrity": "sha512-eiiY8HQeYfYH2Con2berK+To6GrK2RxbPawDkGq4UiCQQfZHb6wX9qQqkbpPqaxQFcl8d9QzZqo0tGE0VcrdwA==",
+ "dev": true,
+ "dependencies": {
+ "array-buffer-byte-length": "^1.0.0",
+ "arraybuffer.prototype.slice": "^1.0.2",
+ "available-typed-arrays": "^1.0.5",
+ "call-bind": "^1.0.5",
+ "es-set-tostringtag": "^2.0.1",
+ "es-to-primitive": "^1.2.1",
+ "function.prototype.name": "^1.1.6",
+ "get-intrinsic": "^1.2.2",
+ "get-symbol-description": "^1.0.0",
+ "globalthis": "^1.0.3",
+ "gopd": "^1.0.1",
+ "has-property-descriptors": "^1.0.0",
+ "has-proto": "^1.0.1",
+ "has-symbols": "^1.0.3",
+ "hasown": "^2.0.0",
+ "internal-slot": "^1.0.5",
+ "is-array-buffer": "^3.0.2",
+ "is-callable": "^1.2.7",
+ "is-negative-zero": "^2.0.2",
+ "is-regex": "^1.1.4",
+ "is-shared-array-buffer": "^1.0.2",
+ "is-string": "^1.0.7",
+ "is-typed-array": "^1.1.12",
+ "is-weakref": "^1.0.2",
+ "object-inspect": "^1.13.1",
+ "object-keys": "^1.1.1",
+ "object.assign": "^4.1.4",
+ "regexp.prototype.flags": "^1.5.1",
+ "safe-array-concat": "^1.0.1",
+ "safe-regex-test": "^1.0.0",
+ "string.prototype.trim": "^1.2.8",
+ "string.prototype.trimend": "^1.0.7",
+ "string.prototype.trimstart": "^1.0.7",
+ "typed-array-buffer": "^1.0.0",
+ "typed-array-byte-length": "^1.0.0",
+ "typed-array-byte-offset": "^1.0.0",
+ "typed-array-length": "^1.0.4",
+ "unbox-primitive": "^1.0.2",
+ "which-typed-array": "^1.1.13"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/es-set-tostringtag": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.2.tgz",
+ "integrity": "sha512-BuDyupZt65P9D2D2vA/zqcI3G5xRsklm5N3xCwuiy+/vKy8i0ifdsQP1sLgO4tZDSCaQUSnmC48khknGMV3D2Q==",
+ "dev": true,
+ "dependencies": {
+ "get-intrinsic": "^1.2.2",
+ "has-tostringtag": "^1.0.0",
+ "hasown": "^2.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-shim-unscopables": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz",
+ "integrity": "sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw==",
+ "dev": true,
+ "dependencies": {
+ "hasown": "^2.0.0"
+ }
+ },
+ "node_modules/es-to-primitive": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz",
+ "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==",
+ "dev": true,
+ "dependencies": {
+ "is-callable": "^1.1.4",
+ "is-date-object": "^1.0.1",
+ "is-symbol": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/es5-ext": {
+ "version": "0.10.62",
+ "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.62.tgz",
+ "integrity": "sha512-BHLqn0klhEpnOKSrzn/Xsz2UIW8j+cGmo9JLzr8BiUapV8hPL9+FliFqjwr9ngW7jWdnxv6eO+/LqyhJVqgrjA==",
+ "dev": true,
+ "hasInstallScript": true,
+ "dependencies": {
+ "es6-iterator": "^2.0.3",
+ "es6-symbol": "^3.1.3",
+ "next-tick": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=0.10"
+ }
+ },
+ "node_modules/es6-iterator": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz",
+ "integrity": "sha512-zw4SRzoUkd+cl+ZoE15A9o1oQd920Bb0iOJMQkQhl3jNc03YqVjAhG7scf9C5KWRU/R13Orf588uCC6525o02g==",
+ "dev": true,
+ "dependencies": {
+ "d": "1",
+ "es5-ext": "^0.10.35",
+ "es6-symbol": "^3.1.1"
+ }
+ },
+ "node_modules/es6-promise": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-0.1.2.tgz",
+ "integrity": "sha1-8RLCn+paCZhTn8tqL9IUQ9KPBfc=",
+ "dev": true
+ },
+ "node_modules/es6-symbol": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz",
+ "integrity": "sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==",
+ "dev": true,
+ "dependencies": {
+ "d": "^1.0.1",
+ "ext": "^1.1.2"
+ }
+ },
+ "node_modules/escalade": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
+ "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/escape-html": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
+ "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=",
+ "dev": true
+ },
+ "node_modules/escape-string-regexp": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+ "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/eslint": {
+ "version": "8.52.0",
+ "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.52.0.tgz",
+ "integrity": "sha512-zh/JHnaixqHZsolRB/w9/02akBk9EPrOs9JwcTP2ek7yL5bVvXuRariiaAjjoJ5DvuwQ1WAE/HsMz+w17YgBCg==",
+ "dev": true,
+ "dependencies": {
+ "@eslint-community/eslint-utils": "^4.2.0",
+ "@eslint-community/regexpp": "^4.6.1",
+ "@eslint/eslintrc": "^2.1.2",
+ "@eslint/js": "8.52.0",
+ "@humanwhocodes/config-array": "^0.11.13",
+ "@humanwhocodes/module-importer": "^1.0.1",
+ "@nodelib/fs.walk": "^1.2.8",
+ "@ungap/structured-clone": "^1.2.0",
+ "ajv": "^6.12.4",
+ "chalk": "^4.0.0",
+ "cross-spawn": "^7.0.2",
+ "debug": "^4.3.2",
+ "doctrine": "^3.0.0",
+ "escape-string-regexp": "^4.0.0",
+ "eslint-scope": "^7.2.2",
+ "eslint-visitor-keys": "^3.4.3",
+ "espree": "^9.6.1",
+ "esquery": "^1.4.2",
+ "esutils": "^2.0.2",
+ "fast-deep-equal": "^3.1.3",
+ "file-entry-cache": "^6.0.1",
+ "find-up": "^5.0.0",
+ "glob-parent": "^6.0.2",
+ "globals": "^13.19.0",
+ "graphemer": "^1.4.0",
+ "ignore": "^5.2.0",
+ "imurmurhash": "^0.1.4",
+ "is-glob": "^4.0.0",
+ "is-path-inside": "^3.0.3",
+ "js-yaml": "^4.1.0",
+ "json-stable-stringify-without-jsonify": "^1.0.1",
+ "levn": "^0.4.1",
+ "lodash.merge": "^4.6.2",
+ "minimatch": "^3.1.2",
+ "natural-compare": "^1.4.0",
+ "optionator": "^0.9.3",
+ "strip-ansi": "^6.0.1",
+ "text-table": "^0.2.0"
+ },
+ "bin": {
+ "eslint": "bin/eslint.js"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/eslint-config-prettier": {
+ "version": "9.0.0",
+ "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-9.0.0.tgz",
+ "integrity": "sha512-IcJsTkJae2S35pRsRAwoCE+925rJJStOdkKnLVgtE+tEpqU0EVVM7OqrwxqgptKdX29NUwC82I5pXsGFIgSevw==",
+ "dev": true,
+ "bin": {
+ "eslint-config-prettier": "bin/cli.js"
+ },
+ "peerDependencies": {
+ "eslint": ">=7.0.0"
+ }
+ },
+ "node_modules/eslint-import-resolver-node": {
+ "version": "0.3.9",
+ "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.9.tgz",
+ "integrity": "sha512-WFj2isz22JahUv+B788TlO3N6zL3nNJGU8CcZbPZvVEkBPaJdCV4vy5wyghty5ROFbCRnm132v8BScu5/1BQ8g==",
+ "dev": true,
+ "dependencies": {
+ "debug": "^3.2.7",
+ "is-core-module": "^2.13.0",
+ "resolve": "^1.22.4"
+ }
+ },
+ "node_modules/eslint-import-resolver-node/node_modules/debug": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
+ "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
+ "dev": true,
+ "dependencies": {
+ "ms": "^2.1.1"
+ }
+ },
+ "node_modules/eslint-module-utils": {
+ "version": "2.8.0",
+ "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.8.0.tgz",
+ "integrity": "sha512-aWajIYfsqCKRDgUfjEXNN/JlrzauMuSEy5sbd7WXbtW3EH6A6MpwEh42c7qD+MqQo9QMJ6fWLAeIJynx0g6OAw==",
+ "dev": true,
+ "dependencies": {
+ "debug": "^3.2.7"
+ },
+ "engines": {
+ "node": ">=4"
+ },
+ "peerDependenciesMeta": {
+ "eslint": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/eslint-module-utils/node_modules/debug": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
+ "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
+ "dev": true,
+ "dependencies": {
+ "ms": "^2.1.1"
+ }
+ },
+ "node_modules/eslint-plugin-ban": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-ban/-/eslint-plugin-ban-1.6.0.tgz",
+ "integrity": "sha512-gZptoV+SFHOHO57/5lmPvizMvSXrjFatP9qlVQf3meL/WHo9TxSoERygrMlESl19CPh95U86asTxohT8OprwDw==",
+ "dev": true,
+ "dependencies": {
+ "requireindex": "~1.2.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/eslint-plugin-deprecation": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-deprecation/-/eslint-plugin-deprecation-2.0.0.tgz",
+ "integrity": "sha512-OAm9Ohzbj11/ZFyICyR5N6LbOIvQMp7ZU2zI7Ej0jIc8kiGUERXPNMfw2QqqHD1ZHtjMub3yPZILovYEYucgoQ==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/utils": "^6.0.0",
+ "tslib": "^2.3.1",
+ "tsutils": "^3.21.0"
+ },
+ "peerDependencies": {
+ "eslint": "^7.0.0 || ^8.0.0",
+ "typescript": "^4.2.4 || ^5.0.0"
+ }
+ },
+ "node_modules/eslint-plugin-es": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-es/-/eslint-plugin-es-3.0.1.tgz",
+ "integrity": "sha512-GUmAsJaN4Fc7Gbtl8uOBlayo2DqhwWvEzykMHSCZHU3XdJ+NSzzZcVhXh3VxX5icqQ+oQdIEawXX8xkR3mIFmQ==",
+ "dev": true,
+ "dependencies": {
+ "eslint-utils": "^2.0.0",
+ "regexpp": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8.10.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/mysticatea"
+ },
+ "peerDependencies": {
+ "eslint": ">=4.19.1"
+ }
+ },
+ "node_modules/eslint-plugin-gpuweb-cts": {
+ "resolved": "tools/eslint-plugin-gpuweb-cts",
+ "link": true
+ },
+ "node_modules/eslint-plugin-import": {
+ "version": "2.29.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.29.0.tgz",
+ "integrity": "sha512-QPOO5NO6Odv5lpoTkddtutccQjysJuFxoPS7fAHO+9m9udNHvTCPSAMW9zGAYj8lAIdr40I8yPCdUYrncXtrwg==",
+ "dev": true,
+ "dependencies": {
+ "array-includes": "^3.1.7",
+ "array.prototype.findlastindex": "^1.2.3",
+ "array.prototype.flat": "^1.3.2",
+ "array.prototype.flatmap": "^1.3.2",
+ "debug": "^3.2.7",
+ "doctrine": "^2.1.0",
+ "eslint-import-resolver-node": "^0.3.9",
+ "eslint-module-utils": "^2.8.0",
+ "hasown": "^2.0.0",
+ "is-core-module": "^2.13.1",
+ "is-glob": "^4.0.3",
+ "minimatch": "^3.1.2",
+ "object.fromentries": "^2.0.7",
+ "object.groupby": "^1.0.1",
+ "object.values": "^1.1.7",
+ "semver": "^6.3.1",
+ "tsconfig-paths": "^3.14.2"
+ },
+ "engines": {
+ "node": ">=4"
+ },
+ "peerDependencies": {
+ "eslint": "^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8"
+ }
+ },
+ "node_modules/eslint-plugin-import/node_modules/debug": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
+ "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
+ "dev": true,
+ "dependencies": {
+ "ms": "^2.1.1"
+ }
+ },
+ "node_modules/eslint-plugin-import/node_modules/doctrine": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz",
+ "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==",
+ "dev": true,
+ "dependencies": {
+ "esutils": "^2.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/eslint-plugin-node": {
+ "version": "11.1.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-node/-/eslint-plugin-node-11.1.0.tgz",
+ "integrity": "sha512-oUwtPJ1W0SKD0Tr+wqu92c5xuCeQqB3hSCHasn/ZgjFdA9iDGNkNf2Zi9ztY7X+hNuMib23LNGRm6+uN+KLE3g==",
+ "dev": true,
+ "dependencies": {
+ "eslint-plugin-es": "^3.0.0",
+ "eslint-utils": "^2.0.0",
+ "ignore": "^5.1.1",
+ "minimatch": "^3.0.4",
+ "resolve": "^1.10.1",
+ "semver": "^6.1.0"
+ },
+ "engines": {
+ "node": ">=8.10.0"
+ },
+ "peerDependencies": {
+ "eslint": ">=5.16.0"
+ }
+ },
+ "node_modules/eslint-plugin-prettier": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-prettier/-/eslint-plugin-prettier-5.0.0.tgz",
+ "integrity": "sha512-AgaZCVuYDXHUGxj/ZGu1u8H8CYgDY3iG6w5kUFw4AzMVXzB7VvbKgYR4nATIN+OvUrghMbiDLeimVjVY5ilq3w==",
+ "dev": true,
+ "dependencies": {
+ "prettier-linter-helpers": "^1.0.0",
+ "synckit": "^0.8.5"
+ },
+ "engines": {
+ "node": "^14.18.0 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/prettier"
+ },
+ "peerDependencies": {
+ "@types/eslint": ">=8.0.0",
+ "eslint": ">=8.0.0",
+ "prettier": ">=3.0.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/eslint": {
+ "optional": true
+ },
+ "eslint-config-prettier": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/eslint-scope": {
+ "version": "7.2.2",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz",
+ "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==",
+ "dev": true,
+ "dependencies": {
+ "esrecurse": "^4.3.0",
+ "estraverse": "^5.2.0"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/eslint-utils": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-2.1.0.tgz",
+ "integrity": "sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg==",
+ "dev": true,
+ "dependencies": {
+ "eslint-visitor-keys": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/mysticatea"
+ }
+ },
+ "node_modules/eslint-utils/node_modules/eslint-visitor-keys": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz",
+ "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/eslint-visitor-keys": {
+ "version": "3.4.3",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz",
+ "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==",
+ "dev": true,
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/eslint/node_modules/ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dev": true,
+ "dependencies": {
+ "color-convert": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/eslint/node_modules/argparse": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
+ "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
+ "dev": true
+ },
+ "node_modules/eslint/node_modules/chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/chalk?sponsor=1"
+ }
+ },
+ "node_modules/eslint/node_modules/color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dev": true,
+ "dependencies": {
+ "color-name": "~1.1.4"
+ },
+ "engines": {
+ "node": ">=7.0.0"
+ }
+ },
+ "node_modules/eslint/node_modules/color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+ "dev": true
+ },
+ "node_modules/eslint/node_modules/escape-string-regexp": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
+ "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/eslint/node_modules/glob-parent": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz",
+ "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==",
+ "dev": true,
+ "dependencies": {
+ "is-glob": "^4.0.3"
+ },
+ "engines": {
+ "node": ">=10.13.0"
+ }
+ },
+ "node_modules/eslint/node_modules/globals": {
+ "version": "13.23.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-13.23.0.tgz",
+ "integrity": "sha512-XAmF0RjlrjY23MA51q3HltdlGxUpXPvg0GioKiD9X6HD28iMjo2dKC8Vqwm7lne4GNr78+RHTfliktR6ZH09wA==",
+ "dev": true,
+ "dependencies": {
+ "type-fest": "^0.20.2"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/eslint/node_modules/has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/eslint/node_modules/js-yaml": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
+ "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
+ "dev": true,
+ "dependencies": {
+ "argparse": "^2.0.1"
+ },
+ "bin": {
+ "js-yaml": "bin/js-yaml.js"
+ }
+ },
+ "node_modules/eslint/node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dev": true,
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/eslint/node_modules/type-fest": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz",
+ "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/espree": {
+ "version": "9.6.1",
+ "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz",
+ "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==",
+ "dev": true,
+ "dependencies": {
+ "acorn": "^8.9.0",
+ "acorn-jsx": "^5.3.2",
+ "eslint-visitor-keys": "^3.4.1"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/esprima": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
+ "dev": true,
+ "bin": {
+ "esparse": "bin/esparse.js",
+ "esvalidate": "bin/esvalidate.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/esquery": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz",
+ "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==",
+ "dev": true,
+ "dependencies": {
+ "estraverse": "^5.1.0"
+ },
+ "engines": {
+ "node": ">=0.10"
+ }
+ },
+ "node_modules/esrecurse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
+ "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
+ "dev": true,
+ "dependencies": {
+ "estraverse": "^5.2.0"
+ },
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/estraverse": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz",
+ "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==",
+ "dev": true,
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/esutils": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
+ "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/etag": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
+ "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/eventemitter2": {
+ "version": "0.4.14",
+ "resolved": "https://registry.npmjs.org/eventemitter2/-/eventemitter2-0.4.14.tgz",
+ "integrity": "sha1-j2G3XN4BKy6esoTUVFWDtWQ7Yas=",
+ "dev": true
+ },
+ "node_modules/eventemitter3": {
+ "version": "4.0.7",
+ "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz",
+ "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==",
+ "dev": true
+ },
+ "node_modules/execa": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz",
+ "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==",
+ "dev": true,
+ "dependencies": {
+ "cross-spawn": "^7.0.3",
+ "get-stream": "^6.0.0",
+ "human-signals": "^2.1.0",
+ "is-stream": "^2.0.0",
+ "merge-stream": "^2.0.0",
+ "npm-run-path": "^4.0.1",
+ "onetime": "^5.1.2",
+ "signal-exit": "^3.0.3",
+ "strip-final-newline": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sindresorhus/execa?sponsor=1"
+ }
+ },
+ "node_modules/exit": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz",
+ "integrity": "sha1-BjJjj42HfMghB9MKD/8aF8uhzQw=",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/expand-brackets": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz",
+ "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=",
+ "dev": true,
+ "dependencies": {
+ "debug": "^2.3.3",
+ "define-property": "^0.2.5",
+ "extend-shallow": "^2.0.1",
+ "posix-character-classes": "^0.1.0",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/expand-brackets/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/expand-brackets/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=",
+ "dev": true
+ },
+ "node_modules/expand-tilde": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/expand-tilde/-/expand-tilde-2.0.2.tgz",
+ "integrity": "sha1-l+gBqgUt8CRU3kawK/YhZCzchQI=",
+ "dev": true,
+ "dependencies": {
+ "homedir-polyfill": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/express": {
+ "version": "4.18.2",
+ "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz",
+ "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==",
+ "dev": true,
+ "dependencies": {
+ "accepts": "~1.3.8",
+ "array-flatten": "1.1.1",
+ "body-parser": "1.20.1",
+ "content-disposition": "0.5.4",
+ "content-type": "~1.0.4",
+ "cookie": "0.5.0",
+ "cookie-signature": "1.0.6",
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "finalhandler": "1.2.0",
+ "fresh": "0.5.2",
+ "http-errors": "2.0.0",
+ "merge-descriptors": "1.0.1",
+ "methods": "~1.1.2",
+ "on-finished": "2.4.1",
+ "parseurl": "~1.3.3",
+ "path-to-regexp": "0.1.7",
+ "proxy-addr": "~2.0.7",
+ "qs": "6.11.0",
+ "range-parser": "~1.2.1",
+ "safe-buffer": "5.2.1",
+ "send": "0.18.0",
+ "serve-static": "1.15.0",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
+ "type-is": "~1.6.18",
+ "utils-merge": "1.0.1",
+ "vary": "~1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.10.0"
+ }
+ },
+ "node_modules/express/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/express/node_modules/depd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
+ "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/express/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=",
+ "dev": true
+ },
+ "node_modules/express/node_modules/on-finished": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
+ "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
+ "dev": true,
+ "dependencies": {
+ "ee-first": "1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/express/node_modules/safe-buffer": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
+ "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ]
+ },
+ "node_modules/express/node_modules/statuses": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
+ "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/ext": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/ext/-/ext-1.7.0.tgz",
+ "integrity": "sha512-6hxeJYaL110a9b5TEJSj0gojyHQAmA2ch5Os+ySCiA1QGdS697XWY1pzsrSjqA9LDEEgdB/KypIlR59RcLuHYw==",
+ "dev": true,
+ "dependencies": {
+ "type": "^2.7.2"
+ }
+ },
+ "node_modules/ext/node_modules/type": {
+ "version": "2.7.2",
+ "resolved": "https://registry.npmjs.org/type/-/type-2.7.2.tgz",
+ "integrity": "sha512-dzlvlNlt6AXU7EBSfpAscydQ7gXB+pPGsPnfJnZpiNJBDj7IaJzQlBZYGdEi4R9HmPdBv2XmWJ6YUtoTa7lmCw==",
+ "dev": true
+ },
+ "node_modules/extend": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
+ "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==",
+ "dev": true
+ },
+ "node_modules/extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+ "dev": true,
+ "dependencies": {
+ "is-extendable": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/external-editor": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz",
+ "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==",
+ "dev": true,
+ "dependencies": {
+ "chardet": "^0.7.0",
+ "iconv-lite": "^0.4.24",
+ "tmp": "^0.0.33"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/extglob": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz",
+ "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==",
+ "dev": true,
+ "dependencies": {
+ "array-unique": "^0.3.2",
+ "define-property": "^1.0.0",
+ "expand-brackets": "^2.1.4",
+ "extend-shallow": "^2.0.1",
+ "fragment-cache": "^0.2.1",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/extglob/node_modules/define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+ "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+ "dev": true,
+ "dependencies": {
+ "is-descriptor": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/extglob/node_modules/is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "dependencies": {
+ "kind-of": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/extglob/node_modules/is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "dependencies": {
+ "kind-of": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/extglob/node_modules/is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "dependencies": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/fast-deep-equal": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
+ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
+ "dev": true
+ },
+ "node_modules/fast-diff": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/fast-diff/-/fast-diff-1.3.0.tgz",
+ "integrity": "sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw==",
+ "dev": true
+ },
+ "node_modules/fast-glob": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.1.tgz",
+ "integrity": "sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg==",
+ "dev": true,
+ "dependencies": {
+ "@nodelib/fs.stat": "^2.0.2",
+ "@nodelib/fs.walk": "^1.2.3",
+ "glob-parent": "^5.1.2",
+ "merge2": "^1.3.0",
+ "micromatch": "^4.0.4"
+ },
+ "engines": {
+ "node": ">=8.6.0"
+ }
+ },
+ "node_modules/fast-json-stable-stringify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
+ "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
+ "dev": true
+ },
+ "node_modules/fast-levenshtein": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz",
+ "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==",
+ "dev": true
+ },
+ "node_modules/fastq": {
+ "version": "1.13.0",
+ "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.13.0.tgz",
+ "integrity": "sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw==",
+ "dev": true,
+ "dependencies": {
+ "reusify": "^1.0.4"
+ }
+ },
+ "node_modules/figures": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz",
+ "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==",
+ "dev": true,
+ "dependencies": {
+ "escape-string-regexp": "^1.0.5"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/file-entry-cache": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz",
+ "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==",
+ "dev": true,
+ "dependencies": {
+ "flat-cache": "^3.0.4"
+ },
+ "engines": {
+ "node": "^10.12.0 || >=12.0.0"
+ }
+ },
+ "node_modules/file-sync-cmp": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/file-sync-cmp/-/file-sync-cmp-0.1.1.tgz",
+ "integrity": "sha1-peeo/7+kk7Q7kju9TKiaU7Y7YSs=",
+ "dev": true
+ },
+ "node_modules/file-uri-to-path": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz",
+ "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==",
+ "dev": true,
+ "optional": true
+ },
+ "node_modules/fill-range": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
+ "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
+ "dev": true,
+ "dependencies": {
+ "to-regex-range": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/finalhandler": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz",
+ "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==",
+ "dev": true,
+ "dependencies": {
+ "debug": "2.6.9",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "on-finished": "2.4.1",
+ "parseurl": "~1.3.3",
+ "statuses": "2.0.1",
+ "unpipe": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/finalhandler/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/finalhandler/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
+ "dev": true
+ },
+ "node_modules/finalhandler/node_modules/on-finished": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
+ "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
+ "dev": true,
+ "dependencies": {
+ "ee-first": "1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/finalhandler/node_modules/statuses": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
+ "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/find-up": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz",
+ "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==",
+ "dev": true,
+ "dependencies": {
+ "locate-path": "^6.0.0",
+ "path-exists": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/findup-sync": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/findup-sync/-/findup-sync-5.0.0.tgz",
+ "integrity": "sha512-MzwXju70AuyflbgeOhzvQWAvvQdo1XL0A9bVvlXsYcFEBM87WR4OakL4OfZq+QRmr+duJubio+UtNQCPsVESzQ==",
+ "dev": true,
+ "dependencies": {
+ "detect-file": "^1.0.0",
+ "is-glob": "^4.0.3",
+ "micromatch": "^4.0.4",
+ "resolve-dir": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 10.13.0"
+ }
+ },
+ "node_modules/fined": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/fined/-/fined-1.2.0.tgz",
+ "integrity": "sha512-ZYDqPLGxDkDhDZBjZBb+oD1+j0rA4E0pXY50eplAAOPg2N/gUBSSk5IM1/QhPfyVo19lJ+CvXpqfvk+b2p/8Ng==",
+ "dev": true,
+ "dependencies": {
+ "expand-tilde": "^2.0.2",
+ "is-plain-object": "^2.0.3",
+ "object.defaults": "^1.1.0",
+ "object.pick": "^1.2.0",
+ "parse-filepath": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/flagged-respawn": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/flagged-respawn/-/flagged-respawn-1.0.1.tgz",
+ "integrity": "sha512-lNaHNVymajmk0OJMBn8fVUAU1BtDeKIqKoVhk4xAALB57aALg6b4W0MfJ/cUE0g9YBXy5XhSlPIpYIJ7HaY/3Q==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/flat-cache": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz",
+ "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==",
+ "dev": true,
+ "dependencies": {
+ "flatted": "^3.1.0",
+ "rimraf": "^3.0.2"
+ },
+ "engines": {
+ "node": "^10.12.0 || >=12.0.0"
+ }
+ },
+ "node_modules/flatted": {
+ "version": "3.2.5",
+ "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.5.tgz",
+ "integrity": "sha512-WIWGi2L3DyTUvUrwRKgGi9TwxQMUEqPOPQBVi71R96jZXJdFskXEmf54BoZaS1kknGODoIGASGEzBUYdyMCBJg==",
+ "dev": true
+ },
+ "node_modules/follow-redirects": {
+ "version": "1.14.9",
+ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.9.tgz",
+ "integrity": "sha512-MQDfihBQYMcyy5dhRDJUHcw7lb2Pv/TuE6xP1vyraLukNDHKbDxDNaOE3NbCAdKQApno+GPRyo1YAp89yCjK4w==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "individual",
+ "url": "https://github.com/sponsors/RubenVerborgh"
+ }
+ ],
+ "engines": {
+ "node": ">=4.0"
+ },
+ "peerDependenciesMeta": {
+ "debug": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/for-each": {
+ "version": "0.3.3",
+ "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.3.tgz",
+ "integrity": "sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==",
+ "dev": true,
+ "dependencies": {
+ "is-callable": "^1.1.3"
+ }
+ },
+ "node_modules/for-in": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz",
+ "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/for-own": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/for-own/-/for-own-1.0.0.tgz",
+ "integrity": "sha1-xjMy9BXO3EsE2/5wz4NklMU8tEs=",
+ "dev": true,
+ "dependencies": {
+ "for-in": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/forwarded": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
+ "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/fragment-cache": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz",
+ "integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=",
+ "dev": true,
+ "dependencies": {
+ "map-cache": "^0.2.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/fresh": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
+ "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/fs-readdir-recursive": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/fs-readdir-recursive/-/fs-readdir-recursive-1.1.0.tgz",
+ "integrity": "sha512-GNanXlVr2pf02+sPN40XN8HG+ePaNcvM0q5mZBd668Obwb0yD5GiUbZOFgwn8kGMY6I3mdyDJzieUy3PTYyTRA==",
+ "dev": true
+ },
+ "node_modules/fs.realpath": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
+ "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=",
+ "dev": true
+ },
+ "node_modules/fsevents": {
+ "version": "2.3.2",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz",
+ "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==",
+ "dev": true,
+ "hasInstallScript": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/function-bind": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "dev": true,
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/function.prototype.name": {
+ "version": "1.1.6",
+ "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.6.tgz",
+ "integrity": "sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1",
+ "functions-have-names": "^1.2.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/functional.js": {
+ "version": "0.6.16",
+ "resolved": "https://registry.npmjs.org/functional.js/-/functional.js-0.6.16.tgz",
+ "integrity": "sha512-WDtBOEhQLa+s/1XyOsElhwXiQCMSipqSevaTmpEZzV8bDSNAExbr08NeG8Qkr/PSQbxhyZzFx/CmFJutAG1S0A==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8.x"
+ }
+ },
+ "node_modules/functions-have-names": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz",
+ "integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==",
+ "dev": true,
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/gensync": {
+ "version": "1.0.0-beta.2",
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
+ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
+ "dev": true,
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/get-intrinsic": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.2.tgz",
+ "integrity": "sha512-0gSo4ml/0j98Y3lngkFEot/zhiCeWsbYIlZ+uZOVgzLyLaUw7wxUL+nCTP0XJvJg1AXulJRI3UJi8GsbDuxdGA==",
+ "dev": true,
+ "dependencies": {
+ "function-bind": "^1.1.2",
+ "has-proto": "^1.0.1",
+ "has-symbols": "^1.0.3",
+ "hasown": "^2.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-stream": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz",
+ "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/get-symbol-description": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.0.tgz",
+ "integrity": "sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "get-intrinsic": "^1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-value": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz",
+ "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/getobject": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/getobject/-/getobject-1.0.2.tgz",
+ "integrity": "sha512-2zblDBaFcb3rB4rF77XVnuINOE2h2k/OnqXAiy0IrTxUfV1iFp3la33oAQVY9pCpWU268WFYVt2t71hlMuLsOg==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/glob": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz",
+ "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==",
+ "dev": true,
+ "dependencies": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.0.4",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ },
+ "engines": {
+ "node": "*"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/glob-parent": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
+ "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
+ "dev": true,
+ "dependencies": {
+ "is-glob": "^4.0.1"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/global-modules": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-1.0.0.tgz",
+ "integrity": "sha512-sKzpEkf11GpOFuw0Zzjzmt4B4UZwjOcG757PPvrfhxcLFbq0wpsgpOqxpxtxFiCG4DtG93M6XRVbF2oGdev7bg==",
+ "dev": true,
+ "dependencies": {
+ "global-prefix": "^1.0.1",
+ "is-windows": "^1.0.1",
+ "resolve-dir": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/global-prefix": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-1.0.2.tgz",
+ "integrity": "sha1-2/dDxsFJklk8ZVVoy2btMsASLr4=",
+ "dev": true,
+ "dependencies": {
+ "expand-tilde": "^2.0.2",
+ "homedir-polyfill": "^1.0.1",
+ "ini": "^1.3.4",
+ "is-windows": "^1.0.1",
+ "which": "^1.2.14"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/global-prefix/node_modules/ini": {
+ "version": "1.3.8",
+ "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz",
+ "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==",
+ "dev": true
+ },
+ "node_modules/global-prefix/node_modules/which": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz",
+ "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==",
+ "dev": true,
+ "dependencies": {
+ "isexe": "^2.0.0"
+ },
+ "bin": {
+ "which": "bin/which"
+ }
+ },
+ "node_modules/globals": {
+ "version": "11.12.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz",
+ "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/globalthis": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.3.tgz",
+ "integrity": "sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA==",
+ "dev": true,
+ "dependencies": {
+ "define-properties": "^1.1.3"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/globby": {
+ "version": "11.1.0",
+ "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz",
+ "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==",
+ "dev": true,
+ "dependencies": {
+ "array-union": "^2.1.0",
+ "dir-glob": "^3.0.1",
+ "fast-glob": "^3.2.9",
+ "ignore": "^5.2.0",
+ "merge2": "^1.4.1",
+ "slash": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/globby/node_modules/slash": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
+ "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/gopd": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz",
+ "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==",
+ "dev": true,
+ "dependencies": {
+ "get-intrinsic": "^1.1.3"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/graceful-fs": {
+ "version": "4.2.9",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.9.tgz",
+ "integrity": "sha512-NtNxqUcXgpW2iMrfqSfR73Glt39K+BLwWsPs94yR63v45T0Wbej7eRmL5cWfwEgqXnmjQp3zaJTshdRW/qC2ZQ==",
+ "dev": true
+ },
+ "node_modules/graphemer": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz",
+ "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==",
+ "dev": true
+ },
+ "node_modules/grunt": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/grunt/-/grunt-1.6.1.tgz",
+ "integrity": "sha512-/ABUy3gYWu5iBmrUSRBP97JLpQUm0GgVveDCp6t3yRNIoltIYw7rEj3g5y1o2PGPR2vfTRGa7WC/LZHLTXnEzA==",
+ "dev": true,
+ "dependencies": {
+ "dateformat": "~4.6.2",
+ "eventemitter2": "~0.4.13",
+ "exit": "~0.1.2",
+ "findup-sync": "~5.0.0",
+ "glob": "~7.1.6",
+ "grunt-cli": "~1.4.3",
+ "grunt-known-options": "~2.0.0",
+ "grunt-legacy-log": "~3.0.0",
+ "grunt-legacy-util": "~2.0.1",
+ "iconv-lite": "~0.6.3",
+ "js-yaml": "~3.14.0",
+ "minimatch": "~3.0.4",
+ "nopt": "~3.0.6"
+ },
+ "bin": {
+ "grunt": "bin/grunt"
+ },
+ "engines": {
+ "node": ">=16"
+ }
+ },
+ "node_modules/grunt-cli": {
+ "version": "1.4.3",
+ "resolved": "https://registry.npmjs.org/grunt-cli/-/grunt-cli-1.4.3.tgz",
+ "integrity": "sha512-9Dtx/AhVeB4LYzsViCjUQkd0Kw0McN2gYpdmGYKtE2a5Yt7v1Q+HYZVWhqXc/kGnxlMtqKDxSwotiGeFmkrCoQ==",
+ "dev": true,
+ "dependencies": {
+ "grunt-known-options": "~2.0.0",
+ "interpret": "~1.1.0",
+ "liftup": "~3.0.1",
+ "nopt": "~4.0.1",
+ "v8flags": "~3.2.0"
+ },
+ "bin": {
+ "grunt": "bin/grunt"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/grunt-cli/node_modules/nopt": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/nopt/-/nopt-4.0.3.tgz",
+ "integrity": "sha512-CvaGwVMztSMJLOeXPrez7fyfObdZqNUK1cPAEzLHrTybIua9pMdmmPR5YwtfNftIOMv3DPUhFaxsZMNTQO20Kg==",
+ "dev": true,
+ "dependencies": {
+ "abbrev": "1",
+ "osenv": "^0.1.4"
+ },
+ "bin": {
+ "nopt": "bin/nopt.js"
+ }
+ },
+ "node_modules/grunt-contrib-clean": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/grunt-contrib-clean/-/grunt-contrib-clean-2.0.1.tgz",
+ "integrity": "sha512-uRvnXfhiZt8akb/ZRDHJpQQtkkVkqc/opWO4Po/9ehC2hPxgptB9S6JHDC/Nxswo4CJSM0iFPT/Iym3cEMWzKA==",
+ "dev": true,
+ "dependencies": {
+ "async": "^3.2.3",
+ "rimraf": "^2.6.2"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "peerDependencies": {
+ "grunt": ">=0.4.5"
+ }
+ },
+ "node_modules/grunt-contrib-clean/node_modules/rimraf": {
+ "version": "2.7.1",
+ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz",
+ "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==",
+ "dev": true,
+ "dependencies": {
+ "glob": "^7.1.3"
+ },
+ "bin": {
+ "rimraf": "bin.js"
+ }
+ },
+ "node_modules/grunt-contrib-copy": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/grunt-contrib-copy/-/grunt-contrib-copy-1.0.0.tgz",
+ "integrity": "sha1-cGDGWB6QS4qw0A8HbgqPbj58NXM=",
+ "dev": true,
+ "dependencies": {
+ "chalk": "^1.1.1",
+ "file-sync-cmp": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-contrib-copy/node_modules/ansi-regex": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
+ "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-contrib-copy/node_modules/ansi-styles": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz",
+ "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-contrib-copy/node_modules/chalk": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz",
+ "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^2.2.1",
+ "escape-string-regexp": "^1.0.2",
+ "has-ansi": "^2.0.0",
+ "strip-ansi": "^3.0.0",
+ "supports-color": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-contrib-copy/node_modules/strip-ansi": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
+ "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
+ "dev": true,
+ "dependencies": {
+ "ansi-regex": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-contrib-copy/node_modules/supports-color": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz",
+ "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/grunt-known-options": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/grunt-known-options/-/grunt-known-options-2.0.0.tgz",
+ "integrity": "sha512-GD7cTz0I4SAede1/+pAbmJRG44zFLPipVtdL9o3vqx9IEyb7b4/Y3s7r6ofI3CchR5GvYJ+8buCSioDv5dQLiA==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-legacy-log": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/grunt-legacy-log/-/grunt-legacy-log-3.0.0.tgz",
+ "integrity": "sha512-GHZQzZmhyq0u3hr7aHW4qUH0xDzwp2YXldLPZTCjlOeGscAOWWPftZG3XioW8MasGp+OBRIu39LFx14SLjXRcA==",
+ "dev": true,
+ "dependencies": {
+ "colors": "~1.1.2",
+ "grunt-legacy-log-utils": "~2.1.0",
+ "hooker": "~0.2.3",
+ "lodash": "~4.17.19"
+ },
+ "engines": {
+ "node": ">= 0.10.0"
+ }
+ },
+ "node_modules/grunt-legacy-log-utils": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/grunt-legacy-log-utils/-/grunt-legacy-log-utils-2.1.0.tgz",
+ "integrity": "sha512-lwquaPXJtKQk0rUM1IQAop5noEpwFqOXasVoedLeNzaibf/OPWjKYvvdqnEHNmU+0T0CaReAXIbGo747ZD+Aaw==",
+ "dev": true,
+ "dependencies": {
+ "chalk": "~4.1.0",
+ "lodash": "~4.17.19"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/grunt-legacy-log-utils/node_modules/ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dev": true,
+ "dependencies": {
+ "color-convert": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/grunt-legacy-log-utils/node_modules/chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/chalk?sponsor=1"
+ }
+ },
+ "node_modules/grunt-legacy-log-utils/node_modules/color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dev": true,
+ "dependencies": {
+ "color-name": "~1.1.4"
+ },
+ "engines": {
+ "node": ">=7.0.0"
+ }
+ },
+ "node_modules/grunt-legacy-log-utils/node_modules/color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+ "dev": true
+ },
+ "node_modules/grunt-legacy-log-utils/node_modules/has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/grunt-legacy-log-utils/node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dev": true,
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/grunt-legacy-util": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/grunt-legacy-util/-/grunt-legacy-util-2.0.1.tgz",
+ "integrity": "sha512-2bQiD4fzXqX8rhNdXkAywCadeqiPiay0oQny77wA2F3WF4grPJXCvAcyoWUJV+po/b15glGkxuSiQCK299UC2w==",
+ "dev": true,
+ "dependencies": {
+ "async": "~3.2.0",
+ "exit": "~0.1.2",
+ "getobject": "~1.0.0",
+ "hooker": "~0.2.3",
+ "lodash": "~4.17.21",
+ "underscore.string": "~3.3.5",
+ "which": "~2.0.2"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/grunt-run": {
+ "version": "0.8.1",
+ "resolved": "https://registry.npmjs.org/grunt-run/-/grunt-run-0.8.1.tgz",
+ "integrity": "sha512-+wvoOJevugcjMLldbVCyspRHHntwVIJiTGjx0HFq+UwXhVPe7AaAiUdY4135CS68pAoRLhd7pAILpL2ITe1tmA==",
+ "dev": true,
+ "dependencies": {
+ "strip-ansi": "^3.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ },
+ "peerDependencies": {
+ "grunt": ">=0.4.0"
+ }
+ },
+ "node_modules/grunt-run/node_modules/ansi-regex": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
+ "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-run/node_modules/strip-ansi": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
+ "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
+ "dev": true,
+ "dependencies": {
+ "ansi-regex": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-timer": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/grunt-timer/-/grunt-timer-0.6.0.tgz",
+ "integrity": "sha512-CZc6NsOGr/HMo70RLXTBCPJm8seJok/lQL2VFygXEvrhj6fYJEvyDIEdSUTSNiXSyC4eNoN8zUNrzMXGwinjdQ==",
+ "dev": true,
+ "dependencies": {
+ "bash-color": "^0.0.3",
+ "duration": "^0.2.0",
+ "functional.js": "^0.6.10",
+ "hooker": "^0.2.3"
+ },
+ "engines": {
+ "node": ">= 0.8.x"
+ }
+ },
+ "node_modules/grunt-ts": {
+ "version": "6.0.0-beta.22",
+ "resolved": "https://registry.npmjs.org/grunt-ts/-/grunt-ts-6.0.0-beta.22.tgz",
+ "integrity": "sha512-g9e+ZImQ7W38dfpwhp0+GUltXWidy3YGPfIA/IyGL5HMv6wmVmMMoSgscI5swhs2HSPf8yAvXAAJbwrouijoRg==",
+ "dev": true,
+ "dependencies": {
+ "chokidar": "^2.0.4",
+ "csproj2ts": "^1.1.0",
+ "detect-indent": "^4.0.0",
+ "detect-newline": "^2.1.0",
+ "es6-promise": "~0.1.1",
+ "jsmin2": "^1.2.1",
+ "lodash": "~4.17.10",
+ "ncp": "0.5.1",
+ "rimraf": "2.2.6",
+ "semver": "^5.3.0",
+ "strip-bom": "^2.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ },
+ "peerDependencies": {
+ "grunt": "^1.0.0 || ^0.4.0",
+ "typescript": ">=1"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/anymatch": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-2.0.0.tgz",
+ "integrity": "sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw==",
+ "dev": true,
+ "dependencies": {
+ "micromatch": "^3.1.4",
+ "normalize-path": "^2.1.1"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/anymatch/node_modules/normalize-path": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz",
+ "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=",
+ "dev": true,
+ "dependencies": {
+ "remove-trailing-separator": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/binary-extensions": {
+ "version": "1.13.1",
+ "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.13.1.tgz",
+ "integrity": "sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/braces": {
+ "version": "2.3.2",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz",
+ "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==",
+ "dev": true,
+ "dependencies": {
+ "arr-flatten": "^1.1.0",
+ "array-unique": "^0.3.2",
+ "extend-shallow": "^2.0.1",
+ "fill-range": "^4.0.0",
+ "isobject": "^3.0.1",
+ "repeat-element": "^1.1.2",
+ "snapdragon": "^0.8.1",
+ "snapdragon-node": "^2.0.1",
+ "split-string": "^3.0.2",
+ "to-regex": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/chokidar": {
+ "version": "2.1.8",
+ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-2.1.8.tgz",
+ "integrity": "sha512-ZmZUazfOzf0Nve7duiCKD23PFSCs4JPoYyccjUFF3aQkQadqBhfzhjkwBH2mNOG9cTBwhamM37EIsIkZw3nRgg==",
+ "deprecated": "Chokidar 2 does not receive security updates since 2019. Upgrade to chokidar 3 with 15x fewer dependencies",
+ "dev": true,
+ "dependencies": {
+ "anymatch": "^2.0.0",
+ "async-each": "^1.0.1",
+ "braces": "^2.3.2",
+ "glob-parent": "^3.1.0",
+ "inherits": "^2.0.3",
+ "is-binary-path": "^1.0.0",
+ "is-glob": "^4.0.0",
+ "normalize-path": "^3.0.0",
+ "path-is-absolute": "^1.0.0",
+ "readdirp": "^2.2.1",
+ "upath": "^1.1.1"
+ },
+ "optionalDependencies": {
+ "fsevents": "^1.2.7"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/define-property": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz",
+ "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==",
+ "dev": true,
+ "dependencies": {
+ "is-descriptor": "^1.0.2",
+ "isobject": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/fill-range": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz",
+ "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=",
+ "dev": true,
+ "dependencies": {
+ "extend-shallow": "^2.0.1",
+ "is-number": "^3.0.0",
+ "repeat-string": "^1.6.1",
+ "to-regex-range": "^2.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/fsevents": {
+ "version": "1.2.13",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.13.tgz",
+ "integrity": "sha512-oWb1Z6mkHIskLzEJ/XWX0srkpkTQ7vaopMQkyaEIoq0fmtFVxOthb8cCxeT+p3ynTdkk/RZwbgG4brR5BeWECw==",
+ "deprecated": "fsevents 1 will break on node v14+ and could be using insecure binaries. Upgrade to fsevents 2.",
+ "dev": true,
+ "hasInstallScript": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "dependencies": {
+ "bindings": "^1.5.0",
+ "nan": "^2.12.1"
+ },
+ "engines": {
+ "node": ">= 4.0"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/glob-parent": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz",
+ "integrity": "sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4=",
+ "dev": true,
+ "dependencies": {
+ "is-glob": "^3.1.0",
+ "path-dirname": "^1.0.0"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/glob-parent/node_modules/is-glob": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz",
+ "integrity": "sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=",
+ "dev": true,
+ "dependencies": {
+ "is-extglob": "^2.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "dependencies": {
+ "kind-of": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/is-binary-path": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-1.0.1.tgz",
+ "integrity": "sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg=",
+ "dev": true,
+ "dependencies": {
+ "binary-extensions": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "dependencies": {
+ "kind-of": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "dependencies": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dev": true,
+ "dependencies": {
+ "is-plain-object": "^2.0.4"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/is-number": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz",
+ "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=",
+ "dev": true,
+ "dependencies": {
+ "kind-of": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/is-number/node_modules/kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/micromatch": {
+ "version": "3.1.10",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz",
+ "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==",
+ "dev": true,
+ "dependencies": {
+ "arr-diff": "^4.0.0",
+ "array-unique": "^0.3.2",
+ "braces": "^2.3.1",
+ "define-property": "^2.0.2",
+ "extend-shallow": "^3.0.2",
+ "extglob": "^2.0.4",
+ "fragment-cache": "^0.2.1",
+ "kind-of": "^6.0.2",
+ "nanomatch": "^1.2.9",
+ "object.pick": "^1.3.0",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/micromatch/node_modules/extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "dev": true,
+ "dependencies": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/readdirp": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-2.2.1.tgz",
+ "integrity": "sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ==",
+ "dev": true,
+ "dependencies": {
+ "graceful-fs": "^4.1.11",
+ "micromatch": "^3.1.10",
+ "readable-stream": "^2.0.2"
+ },
+ "engines": {
+ "node": ">=0.10"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/rimraf": {
+ "version": "2.2.6",
+ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.2.6.tgz",
+ "integrity": "sha1-xZWXVpsU2VatKcrMQr3d9fDqT0w=",
+ "dev": true,
+ "bin": {
+ "rimraf": "bin.js"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
+ "dev": true,
+ "bin": {
+ "semver": "bin/semver"
+ }
+ },
+ "node_modules/grunt-ts/node_modules/to-regex-range": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz",
+ "integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=",
+ "dev": true,
+ "dependencies": {
+ "is-number": "^3.0.0",
+ "repeat-string": "^1.6.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt/node_modules/glob": {
+ "version": "7.1.7",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz",
+ "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==",
+ "dev": true,
+ "dependencies": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.0.4",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ },
+ "engines": {
+ "node": "*"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/grunt/node_modules/iconv-lite": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz",
+ "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==",
+ "dev": true,
+ "dependencies": {
+ "safer-buffer": ">= 2.1.2 < 3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/grunt/node_modules/minimatch": {
+ "version": "3.0.8",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.8.tgz",
+ "integrity": "sha512-6FsRAQsxQ61mw+qP1ZzbL9Bc78x2p5OqNgNpnoAFLTrX8n5Kxph0CsnhmKKNXTWjXqU5L0pGPR7hYk+XWZr60Q==",
+ "dev": true,
+ "dependencies": {
+ "brace-expansion": "^1.1.7"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/gts": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/gts/-/gts-5.2.0.tgz",
+ "integrity": "sha512-25qOnePUUX7upFc4ycqWersDBq+o1X6hXUTW56JOWCxPYKJXQ1RWzqT9q+2SU3LfPKJf+4sz4Dw3VT0p96Kv6g==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/eslint-plugin": "5.62.0",
+ "@typescript-eslint/parser": "5.62.0",
+ "chalk": "^4.1.2",
+ "eslint": "8.50.0",
+ "eslint-config-prettier": "9.0.0",
+ "eslint-plugin-node": "11.1.0",
+ "eslint-plugin-prettier": "5.0.0",
+ "execa": "^5.0.0",
+ "inquirer": "^7.3.3",
+ "json5": "^2.1.3",
+ "meow": "^9.0.0",
+ "ncp": "^2.0.0",
+ "prettier": "3.0.3",
+ "rimraf": "3.0.2",
+ "write-file-atomic": "^4.0.0"
+ },
+ "bin": {
+ "gts": "build/src/cli.js"
+ },
+ "engines": {
+ "node": ">=14"
+ },
+ "peerDependencies": {
+ "typescript": ">=3"
+ }
+ },
+ "node_modules/gts/node_modules/@eslint/js": {
+ "version": "8.50.0",
+ "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.50.0.tgz",
+ "integrity": "sha512-NCC3zz2+nvYd+Ckfh87rA47zfu2QsQpvc6k1yzTk+b9KzRj0wkGa8LSoGOXN6Zv4lRf/EIoZ80biDh9HOI+RNQ==",
+ "dev": true,
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ }
+ },
+ "node_modules/gts/node_modules/@typescript-eslint/eslint-plugin": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.62.0.tgz",
+ "integrity": "sha512-TiZzBSJja/LbhNPvk6yc0JrX9XqhQ0hdh6M2svYfsHGejaKFIAGd9MQ+ERIMzLGlN/kZoYIgdxFV0PuljTKXag==",
+ "dev": true,
+ "dependencies": {
+ "@eslint-community/regexpp": "^4.4.0",
+ "@typescript-eslint/scope-manager": "5.62.0",
+ "@typescript-eslint/type-utils": "5.62.0",
+ "@typescript-eslint/utils": "5.62.0",
+ "debug": "^4.3.4",
+ "graphemer": "^1.4.0",
+ "ignore": "^5.2.0",
+ "natural-compare-lite": "^1.4.0",
+ "semver": "^7.3.7",
+ "tsutils": "^3.21.0"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependencies": {
+ "@typescript-eslint/parser": "^5.0.0",
+ "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/gts/node_modules/@typescript-eslint/parser": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.62.0.tgz",
+ "integrity": "sha512-VlJEV0fOQ7BExOsHYAGrgbEiZoi8D+Bl2+f6V2RrXerRSylnp+ZBHmPvaIa8cz0Ajx7WO7Z5RqfgYg7ED1nRhA==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/scope-manager": "5.62.0",
+ "@typescript-eslint/types": "5.62.0",
+ "@typescript-eslint/typescript-estree": "5.62.0",
+ "debug": "^4.3.4"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependencies": {
+ "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/gts/node_modules/@typescript-eslint/scope-manager": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.62.0.tgz",
+ "integrity": "sha512-VXuvVvZeQCQb5Zgf4HAxc04q5j+WrNAtNh9OwCsCgpKqESMTu3tF/jhZ3xG6T4NZwWl65Bg8KuS2uEvhSfLl0w==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/types": "5.62.0",
+ "@typescript-eslint/visitor-keys": "5.62.0"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/gts/node_modules/@typescript-eslint/types": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.62.0.tgz",
+ "integrity": "sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ==",
+ "dev": true,
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/gts/node_modules/@typescript-eslint/typescript-estree": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.62.0.tgz",
+ "integrity": "sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/types": "5.62.0",
+ "@typescript-eslint/visitor-keys": "5.62.0",
+ "debug": "^4.3.4",
+ "globby": "^11.1.0",
+ "is-glob": "^4.0.3",
+ "semver": "^7.3.7",
+ "tsutils": "^3.21.0"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/gts/node_modules/@typescript-eslint/utils": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.62.0.tgz",
+ "integrity": "sha512-n8oxjeb5aIbPFEtmQxQYOLI0i9n5ySBEY/ZEHHZqKQSFnxio1rv6dthascc9dLuwrL0RC5mPCxB7vnAVGAYWAQ==",
+ "dev": true,
+ "dependencies": {
+ "@eslint-community/eslint-utils": "^4.2.0",
+ "@types/json-schema": "^7.0.9",
+ "@types/semver": "^7.3.12",
+ "@typescript-eslint/scope-manager": "5.62.0",
+ "@typescript-eslint/types": "5.62.0",
+ "@typescript-eslint/typescript-estree": "5.62.0",
+ "eslint-scope": "^5.1.1",
+ "semver": "^7.3.7"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ },
+ "peerDependencies": {
+ "eslint": "^6.0.0 || ^7.0.0 || ^8.0.0"
+ }
+ },
+ "node_modules/gts/node_modules/@typescript-eslint/utils/node_modules/eslint-scope": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz",
+ "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==",
+ "dev": true,
+ "dependencies": {
+ "esrecurse": "^4.3.0",
+ "estraverse": "^4.1.1"
+ },
+ "engines": {
+ "node": ">=8.0.0"
+ }
+ },
+ "node_modules/gts/node_modules/@typescript-eslint/visitor-keys": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.62.0.tgz",
+ "integrity": "sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw==",
+ "dev": true,
+ "dependencies": {
+ "@typescript-eslint/types": "5.62.0",
+ "eslint-visitor-keys": "^3.3.0"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/typescript-eslint"
+ }
+ },
+ "node_modules/gts/node_modules/ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dev": true,
+ "dependencies": {
+ "color-convert": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/gts/node_modules/argparse": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
+ "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
+ "dev": true
+ },
+ "node_modules/gts/node_modules/chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/chalk?sponsor=1"
+ }
+ },
+ "node_modules/gts/node_modules/color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dev": true,
+ "dependencies": {
+ "color-name": "~1.1.4"
+ },
+ "engines": {
+ "node": ">=7.0.0"
+ }
+ },
+ "node_modules/gts/node_modules/color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+ "dev": true
+ },
+ "node_modules/gts/node_modules/escape-string-regexp": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
+ "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/gts/node_modules/eslint": {
+ "version": "8.50.0",
+ "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.50.0.tgz",
+ "integrity": "sha512-FOnOGSuFuFLv/Sa+FDVRZl4GGVAAFFi8LecRsI5a1tMO5HIE8nCm4ivAlzt4dT3ol/PaaGC0rJEEXQmHJBGoOg==",
+ "dev": true,
+ "dependencies": {
+ "@eslint-community/eslint-utils": "^4.2.0",
+ "@eslint-community/regexpp": "^4.6.1",
+ "@eslint/eslintrc": "^2.1.2",
+ "@eslint/js": "8.50.0",
+ "@humanwhocodes/config-array": "^0.11.11",
+ "@humanwhocodes/module-importer": "^1.0.1",
+ "@nodelib/fs.walk": "^1.2.8",
+ "ajv": "^6.12.4",
+ "chalk": "^4.0.0",
+ "cross-spawn": "^7.0.2",
+ "debug": "^4.3.2",
+ "doctrine": "^3.0.0",
+ "escape-string-regexp": "^4.0.0",
+ "eslint-scope": "^7.2.2",
+ "eslint-visitor-keys": "^3.4.3",
+ "espree": "^9.6.1",
+ "esquery": "^1.4.2",
+ "esutils": "^2.0.2",
+ "fast-deep-equal": "^3.1.3",
+ "file-entry-cache": "^6.0.1",
+ "find-up": "^5.0.0",
+ "glob-parent": "^6.0.2",
+ "globals": "^13.19.0",
+ "graphemer": "^1.4.0",
+ "ignore": "^5.2.0",
+ "imurmurhash": "^0.1.4",
+ "is-glob": "^4.0.0",
+ "is-path-inside": "^3.0.3",
+ "js-yaml": "^4.1.0",
+ "json-stable-stringify-without-jsonify": "^1.0.1",
+ "levn": "^0.4.1",
+ "lodash.merge": "^4.6.2",
+ "minimatch": "^3.1.2",
+ "natural-compare": "^1.4.0",
+ "optionator": "^0.9.3",
+ "strip-ansi": "^6.0.1",
+ "text-table": "^0.2.0"
+ },
+ "bin": {
+ "eslint": "bin/eslint.js"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/eslint"
+ }
+ },
+ "node_modules/gts/node_modules/estraverse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz",
+ "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==",
+ "dev": true,
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/gts/node_modules/glob-parent": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz",
+ "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==",
+ "dev": true,
+ "dependencies": {
+ "is-glob": "^4.0.3"
+ },
+ "engines": {
+ "node": ">=10.13.0"
+ }
+ },
+ "node_modules/gts/node_modules/globals": {
+ "version": "13.23.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-13.23.0.tgz",
+ "integrity": "sha512-XAmF0RjlrjY23MA51q3HltdlGxUpXPvg0GioKiD9X6HD28iMjo2dKC8Vqwm7lne4GNr78+RHTfliktR6ZH09wA==",
+ "dev": true,
+ "dependencies": {
+ "type-fest": "^0.20.2"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/gts/node_modules/has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/gts/node_modules/js-yaml": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
+ "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
+ "dev": true,
+ "dependencies": {
+ "argparse": "^2.0.1"
+ },
+ "bin": {
+ "js-yaml": "bin/js-yaml.js"
+ }
+ },
+ "node_modules/gts/node_modules/ncp": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ncp/-/ncp-2.0.0.tgz",
+ "integrity": "sha1-GVoh1sRuNh0vsSgbo4uR6d9727M=",
+ "dev": true,
+ "bin": {
+ "ncp": "bin/ncp"
+ }
+ },
+ "node_modules/gts/node_modules/semver": {
+ "version": "7.5.4",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
+ "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "dev": true,
+ "dependencies": {
+ "lru-cache": "^6.0.0"
+ },
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/gts/node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dev": true,
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/gts/node_modules/type-fest": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz",
+ "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/hard-rejection": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/hard-rejection/-/hard-rejection-2.1.0.tgz",
+ "integrity": "sha512-VIZB+ibDhx7ObhAe7OVtoEbuP4h/MuOTHJ+J8h/eBXotJYl0fBgR72xDFCKgIh22OJZIOVNxBMWuhAr10r8HdA==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/has-ansi": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz",
+ "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=",
+ "dev": true,
+ "dependencies": {
+ "ansi-regex": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/has-ansi/node_modules/ansi-regex": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
+ "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/has-bigints": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz",
+ "integrity": "sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==",
+ "dev": true,
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-flag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+ "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/has-property-descriptors": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.1.tgz",
+ "integrity": "sha512-VsX8eaIewvas0xnvinAe9bw4WfIeODpGYikiWYLH+dma0Jw6KHYqWiWfhQlgOVK8D6PvjubK5Uc4P0iIhIcNVg==",
+ "dev": true,
+ "dependencies": {
+ "get-intrinsic": "^1.2.2"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz",
+ "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-symbols": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz",
+ "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-tostringtag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz",
+ "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==",
+ "dev": true,
+ "dependencies": {
+ "has-symbols": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-value": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz",
+ "integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=",
+ "dev": true,
+ "dependencies": {
+ "get-value": "^2.0.6",
+ "has-values": "^1.0.0",
+ "isobject": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/has-values": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz",
+ "integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=",
+ "dev": true,
+ "dependencies": {
+ "is-number": "^3.0.0",
+ "kind-of": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/has-values/node_modules/is-number": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz",
+ "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=",
+ "dev": true,
+ "dependencies": {
+ "kind-of": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/has-values/node_modules/is-number/node_modules/kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/has-values/node_modules/kind-of": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz",
+ "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=",
+ "dev": true,
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/hasown": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.0.tgz",
+ "integrity": "sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==",
+ "dev": true,
+ "dependencies": {
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/he": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz",
+ "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==",
+ "dev": true,
+ "bin": {
+ "he": "bin/he"
+ }
+ },
+ "node_modules/homedir-polyfill": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/homedir-polyfill/-/homedir-polyfill-1.0.3.tgz",
+ "integrity": "sha512-eSmmWE5bZTK2Nou4g0AI3zZ9rswp7GRKoKXS1BLUkvPviOqs4YTN1djQIqrXy9k5gEtdLPy86JjRwsNM9tnDcA==",
+ "dev": true,
+ "dependencies": {
+ "parse-passwd": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/hooker": {
+ "version": "0.2.3",
+ "resolved": "https://registry.npmjs.org/hooker/-/hooker-0.2.3.tgz",
+ "integrity": "sha1-uDT3I8xKJCqmWWNFnfbZhMXT2Vk=",
+ "dev": true,
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/hosted-git-info": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-4.1.0.tgz",
+ "integrity": "sha512-kyCuEOWjJqZuDbRHzL8V93NzQhwIB71oFWSyzVo+KPZI+pnQPPxucdkrOZvkLRnrf5URsQM+IJ09Dw29cRALIA==",
+ "dev": true,
+ "dependencies": {
+ "lru-cache": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/html-encoding-sniffer": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-3.0.0.tgz",
+ "integrity": "sha512-oWv4T4yJ52iKrufjnyZPkrN0CH3QnrUqdB6In1g5Fe1mia8GmF36gnfNySxoZtxD5+NmYw1EElVXiBk93UeskA==",
+ "dev": true,
+ "dependencies": {
+ "whatwg-encoding": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/http-errors": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz",
+ "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==",
+ "dev": true,
+ "dependencies": {
+ "depd": "2.0.0",
+ "inherits": "2.0.4",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
+ "toidentifier": "1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/http-errors/node_modules/depd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
+ "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/http-errors/node_modules/statuses": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
+ "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/http-proxy": {
+ "version": "1.18.1",
+ "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz",
+ "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==",
+ "dev": true,
+ "dependencies": {
+ "eventemitter3": "^4.0.0",
+ "follow-redirects": "^1.0.0",
+ "requires-port": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=8.0.0"
+ }
+ },
+ "node_modules/http-server": {
+ "version": "14.1.1",
+ "resolved": "https://registry.npmjs.org/http-server/-/http-server-14.1.1.tgz",
+ "integrity": "sha512-+cbxadF40UXd9T01zUHgA+rlo2Bg1Srer4+B4NwIHdaGxAGGv59nYRnGGDJ9LBk7alpS0US+J+bLLdQOOkJq4A==",
+ "dev": true,
+ "dependencies": {
+ "basic-auth": "^2.0.1",
+ "chalk": "^4.1.2",
+ "corser": "^2.0.1",
+ "he": "^1.2.0",
+ "html-encoding-sniffer": "^3.0.0",
+ "http-proxy": "^1.18.1",
+ "mime": "^1.6.0",
+ "minimist": "^1.2.6",
+ "opener": "^1.5.1",
+ "portfinder": "^1.0.28",
+ "secure-compare": "3.0.1",
+ "union": "~0.5.0",
+ "url-join": "^4.0.1"
+ },
+ "bin": {
+ "http-server": "bin/http-server"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/http-server/node_modules/ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dev": true,
+ "dependencies": {
+ "color-convert": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/http-server/node_modules/chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/chalk?sponsor=1"
+ }
+ },
+ "node_modules/http-server/node_modules/color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dev": true,
+ "dependencies": {
+ "color-name": "~1.1.4"
+ },
+ "engines": {
+ "node": ">=7.0.0"
+ }
+ },
+ "node_modules/http-server/node_modules/color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+ "dev": true
+ },
+ "node_modules/http-server/node_modules/has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/http-server/node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dev": true,
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/human-signals": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz",
+ "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==",
+ "dev": true,
+ "engines": {
+ "node": ">=10.17.0"
+ }
+ },
+ "node_modules/iconv-lite": {
+ "version": "0.4.24",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
+ "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
+ "dev": true,
+ "dependencies": {
+ "safer-buffer": ">= 2.1.2 < 3"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/ignore": {
+ "version": "5.2.4",
+ "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz",
+ "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==",
+ "dev": true,
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/import-fresh": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz",
+ "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==",
+ "dev": true,
+ "dependencies": {
+ "parent-module": "^1.0.0",
+ "resolve-from": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/imurmurhash": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
+ "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.8.19"
+ }
+ },
+ "node_modules/indent-string": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz",
+ "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/inflight": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
+ "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
+ "dev": true,
+ "dependencies": {
+ "once": "^1.3.0",
+ "wrappy": "1"
+ }
+ },
+ "node_modules/inherits": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
+ "dev": true
+ },
+ "node_modules/inquirer": {
+ "version": "7.3.3",
+ "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-7.3.3.tgz",
+ "integrity": "sha512-JG3eIAj5V9CwcGvuOmoo6LB9kbAYT8HXffUl6memuszlwDC/qvFAJw49XJ5NROSFNPxp3iQg1GqkFhaY/CR0IA==",
+ "dev": true,
+ "dependencies": {
+ "ansi-escapes": "^4.2.1",
+ "chalk": "^4.1.0",
+ "cli-cursor": "^3.1.0",
+ "cli-width": "^3.0.0",
+ "external-editor": "^3.0.3",
+ "figures": "^3.0.0",
+ "lodash": "^4.17.19",
+ "mute-stream": "0.0.8",
+ "run-async": "^2.4.0",
+ "rxjs": "^6.6.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0",
+ "through": "^2.3.6"
+ },
+ "engines": {
+ "node": ">=8.0.0"
+ }
+ },
+ "node_modules/inquirer/node_modules/ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dev": true,
+ "dependencies": {
+ "color-convert": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/inquirer/node_modules/chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "dev": true,
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/chalk?sponsor=1"
+ }
+ },
+ "node_modules/inquirer/node_modules/color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dev": true,
+ "dependencies": {
+ "color-name": "~1.1.4"
+ },
+ "engines": {
+ "node": ">=7.0.0"
+ }
+ },
+ "node_modules/inquirer/node_modules/color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+ "dev": true
+ },
+ "node_modules/inquirer/node_modules/has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/inquirer/node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dev": true,
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/internal-slot": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.6.tgz",
+ "integrity": "sha512-Xj6dv+PsbtwyPpEflsejS+oIZxmMlV44zAhG479uYu89MsjcYOhCFnNyKrkJrihbsiasQyY0afoCl/9BLR65bg==",
+ "dev": true,
+ "dependencies": {
+ "get-intrinsic": "^1.2.2",
+ "hasown": "^2.0.0",
+ "side-channel": "^1.0.4"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/interpret": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.1.0.tgz",
+ "integrity": "sha1-ftGxQQxqDg94z5XTuEQMY/eLhhQ=",
+ "dev": true
+ },
+ "node_modules/ipaddr.js": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
+ "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/is-absolute": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-absolute/-/is-absolute-1.0.0.tgz",
+ "integrity": "sha512-dOWoqflvcydARa360Gvv18DZ/gRuHKi2NU/wU5X1ZFzdYfH29nkiNZsF3mp4OJ3H4yo9Mx8A/uAGNzpzPN3yBA==",
+ "dev": true,
+ "dependencies": {
+ "is-relative": "^1.0.0",
+ "is-windows": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-accessor-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz",
+ "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=",
+ "dev": true,
+ "dependencies": {
+ "kind-of": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-accessor-descriptor/node_modules/kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-array-buffer": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.2.tgz",
+ "integrity": "sha512-y+FyyR/w8vfIRq4eQcM1EYgSTnmHXPqaF+IgzgraytCFq5Xh8lllDVmAZolPJiZttZLeFSINPYMaEJ7/vWUa1w==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "get-intrinsic": "^1.2.0",
+ "is-typed-array": "^1.1.10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-arrayish": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
+ "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=",
+ "dev": true
+ },
+ "node_modules/is-bigint": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz",
+ "integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==",
+ "dev": true,
+ "dependencies": {
+ "has-bigints": "^1.0.1"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-binary-path": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
+ "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
+ "dev": true,
+ "dependencies": {
+ "binary-extensions": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-boolean-object": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz",
+ "integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-buffer": {
+ "version": "1.1.6",
+ "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz",
+ "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==",
+ "dev": true
+ },
+ "node_modules/is-callable": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz",
+ "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-core-module": {
+ "version": "2.13.1",
+ "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz",
+ "integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==",
+ "dev": true,
+ "dependencies": {
+ "hasown": "^2.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-data-descriptor": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz",
+ "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=",
+ "dev": true,
+ "dependencies": {
+ "kind-of": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-data-descriptor/node_modules/kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-date-object": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz",
+ "integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==",
+ "dev": true,
+ "dependencies": {
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz",
+ "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==",
+ "dev": true,
+ "dependencies": {
+ "is-accessor-descriptor": "^0.1.6",
+ "is-data-descriptor": "^0.1.4",
+ "kind-of": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-descriptor/node_modules/kind-of": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz",
+ "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-docker": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-3.0.0.tgz",
+ "integrity": "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==",
+ "dev": true,
+ "bin": {
+ "is-docker": "cli.js"
+ },
+ "engines": {
+ "node": "^12.20.0 || ^14.13.1 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/is-extendable": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz",
+ "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-extglob": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+ "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-finite": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-finite/-/is-finite-1.1.0.tgz",
+ "integrity": "sha512-cdyMtqX/BOqqNBBiKlIVkytNHm49MtMlYyn1zxzvJKWmFMlGzm+ry5BBfYyeY9YmNKbRSo/o7OX9w9ale0wg3w==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/is-fullwidth-code-point": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-glob": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
+ "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
+ "dev": true,
+ "dependencies": {
+ "is-extglob": "^2.1.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-inside-container": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-inside-container/-/is-inside-container-1.0.0.tgz",
+ "integrity": "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==",
+ "dev": true,
+ "dependencies": {
+ "is-docker": "^3.0.0"
+ },
+ "bin": {
+ "is-inside-container": "cli.js"
+ },
+ "engines": {
+ "node": ">=14.16"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/is-negative-zero": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.2.tgz",
+ "integrity": "sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-number": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.12.0"
+ }
+ },
+ "node_modules/is-number-object": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.7.tgz",
+ "integrity": "sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==",
+ "dev": true,
+ "dependencies": {
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-path-inside": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz",
+ "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-plain-obj": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz",
+ "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-plain-object": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz",
+ "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==",
+ "dev": true,
+ "dependencies": {
+ "isobject": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-regex": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz",
+ "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-relative": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-relative/-/is-relative-1.0.0.tgz",
+ "integrity": "sha512-Kw/ReK0iqwKeu0MITLFuj0jbPAmEiOsIwyIXvvbfa6QfmN9pkD1M+8pdk7Rl/dTKbH34/XBFMbgD4iMJhLQbGA==",
+ "dev": true,
+ "dependencies": {
+ "is-unc-path": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-shared-array-buffer": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz",
+ "integrity": "sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-stream": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz",
+ "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/is-string": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz",
+ "integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==",
+ "dev": true,
+ "dependencies": {
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-symbol": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz",
+ "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==",
+ "dev": true,
+ "dependencies": {
+ "has-symbols": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-typed-array": {
+ "version": "1.1.12",
+ "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.12.tgz",
+ "integrity": "sha512-Z14TF2JNG8Lss5/HMqt0//T9JeHXttXy5pH/DBU4vi98ozO2btxzq9MwYDZYnKwU8nRsz/+GVFVRDq3DkVuSPg==",
+ "dev": true,
+ "dependencies": {
+ "which-typed-array": "^1.1.11"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-unc-path": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-unc-path/-/is-unc-path-1.0.0.tgz",
+ "integrity": "sha512-mrGpVd0fs7WWLfVsStvgF6iEJnbjDFZh9/emhRDcGWTduTfNHd9CHeUwH3gYIjdbwo4On6hunkztwOaAw0yllQ==",
+ "dev": true,
+ "dependencies": {
+ "unc-path-regex": "^0.1.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-utf8": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/is-utf8/-/is-utf8-0.2.1.tgz",
+ "integrity": "sha1-Sw2hRCEE0bM2NA6AeX6GXPOffXI=",
+ "dev": true
+ },
+ "node_modules/is-weakref": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz",
+ "integrity": "sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-windows": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz",
+ "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-wsl": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz",
+ "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==",
+ "dev": true,
+ "dependencies": {
+ "is-docker": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-wsl/node_modules/is-docker": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz",
+ "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==",
+ "dev": true,
+ "bin": {
+ "is-docker": "cli.js"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/isarray": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
+ "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=",
+ "dev": true
+ },
+ "node_modules/isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=",
+ "dev": true
+ },
+ "node_modules/isobject": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz",
+ "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
+ "dev": true
+ },
+ "node_modules/js-yaml": {
+ "version": "3.14.1",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz",
+ "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==",
+ "dev": true,
+ "dependencies": {
+ "argparse": "^1.0.7",
+ "esprima": "^4.0.0"
+ },
+ "bin": {
+ "js-yaml": "bin/js-yaml.js"
+ }
+ },
+ "node_modules/jsesc": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz",
+ "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==",
+ "dev": true,
+ "bin": {
+ "jsesc": "bin/jsesc"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/jsmin2": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/jsmin2/-/jsmin2-1.2.1.tgz",
+ "integrity": "sha1-iPvi+/dfCpH2YCD9mBzWk/S/5X4=",
+ "dev": true
+ },
+ "node_modules/json-parse-even-better-errors": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz",
+ "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==",
+ "dev": true
+ },
+ "node_modules/json-schema-traverse": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
+ "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
+ "dev": true
+ },
+ "node_modules/json-stable-stringify-without-jsonify": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz",
+ "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=",
+ "dev": true
+ },
+ "node_modules/json5": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
+ "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
+ "dev": true,
+ "bin": {
+ "json5": "lib/cli.js"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/jsonc-parser": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.2.0.tgz",
+ "integrity": "sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==",
+ "dev": true
+ },
+ "node_modules/kind-of": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
+ "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/levn": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz",
+ "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==",
+ "dev": true,
+ "dependencies": {
+ "prelude-ls": "^1.2.1",
+ "type-check": "~0.4.0"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/liftup": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/liftup/-/liftup-3.0.1.tgz",
+ "integrity": "sha512-yRHaiQDizWSzoXk3APcA71eOI/UuhEkNN9DiW2Tt44mhYzX4joFoCZlxsSOF7RyeLlfqzFLQI1ngFq3ggMPhOw==",
+ "dev": true,
+ "dependencies": {
+ "extend": "^3.0.2",
+ "findup-sync": "^4.0.0",
+ "fined": "^1.2.0",
+ "flagged-respawn": "^1.0.1",
+ "is-plain-object": "^2.0.4",
+ "object.map": "^1.0.1",
+ "rechoir": "^0.7.0",
+ "resolve": "^1.19.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/liftup/node_modules/findup-sync": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/findup-sync/-/findup-sync-4.0.0.tgz",
+ "integrity": "sha512-6jvvn/12IC4quLBL1KNokxC7wWTvYncaVUYSoxWw7YykPLuRrnv4qdHcSOywOI5RpkOVGeQRtWM8/q+G6W6qfQ==",
+ "dev": true,
+ "dependencies": {
+ "detect-file": "^1.0.0",
+ "is-glob": "^4.0.0",
+ "micromatch": "^4.0.2",
+ "resolve-dir": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/lines-and-columns": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz",
+ "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==",
+ "dev": true
+ },
+ "node_modules/locate-path": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",
+ "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==",
+ "dev": true,
+ "dependencies": {
+ "p-locate": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/lodash": {
+ "version": "4.17.21",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
+ "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
+ "dev": true
+ },
+ "node_modules/lodash.merge": {
+ "version": "4.6.2",
+ "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz",
+ "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==",
+ "dev": true
+ },
+ "node_modules/lru-cache": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
+ "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
+ "dev": true,
+ "dependencies": {
+ "yallist": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/lunr": {
+ "version": "2.3.9",
+ "resolved": "https://registry.npmjs.org/lunr/-/lunr-2.3.9.tgz",
+ "integrity": "sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow==",
+ "dev": true
+ },
+ "node_modules/make-dir": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz",
+ "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==",
+ "dev": true,
+ "dependencies": {
+ "pify": "^4.0.1",
+ "semver": "^5.6.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/make-dir/node_modules/semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
+ "dev": true,
+ "bin": {
+ "semver": "bin/semver"
+ }
+ },
+ "node_modules/make-error": {
+ "version": "1.3.6",
+ "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz",
+ "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==",
+ "dev": true
+ },
+ "node_modules/make-iterator": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/make-iterator/-/make-iterator-1.0.1.tgz",
+ "integrity": "sha512-pxiuXh0iVEq7VM7KMIhs5gxsfxCux2URptUQaXo4iZZJxBAzTPOLE2BumO5dbfVYq/hBJFBR/a1mFDmOx5AGmw==",
+ "dev": true,
+ "dependencies": {
+ "kind-of": "^6.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/map-cache": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz",
+ "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/map-obj": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.3.0.tgz",
+ "integrity": "sha512-hdN1wVrZbb29eBGiGjJbeP8JbKjq1urkHJ/LIP/NY48MZ1QVXUsQBV1G1zvYFHn1XE06cwjBsOI2K3Ulnj1YXQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/map-visit": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz",
+ "integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=",
+ "dev": true,
+ "dependencies": {
+ "object-visit": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/marked": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/marked/-/marked-4.3.0.tgz",
+ "integrity": "sha512-PRsaiG84bK+AMvxziE/lCFss8juXjNaWzVbN5tXAm4XjeaS9NAHhop+PjQxz2A9h8Q4M/xGmzP8vqNwy6JeK0A==",
+ "dev": true,
+ "bin": {
+ "marked": "bin/marked.js"
+ },
+ "engines": {
+ "node": ">= 12"
+ }
+ },
+ "node_modules/media-typer": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
+ "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/meow": {
+ "version": "9.0.0",
+ "resolved": "https://registry.npmjs.org/meow/-/meow-9.0.0.tgz",
+ "integrity": "sha512-+obSblOQmRhcyBt62furQqRAQpNyWXo8BuQ5bN7dG8wmwQ+vwHKp/rCFD4CrTP8CsDQD1sjoZ94K417XEUk8IQ==",
+ "dev": true,
+ "dependencies": {
+ "@types/minimist": "^1.2.0",
+ "camelcase-keys": "^6.2.2",
+ "decamelize": "^1.2.0",
+ "decamelize-keys": "^1.1.0",
+ "hard-rejection": "^2.1.0",
+ "minimist-options": "4.1.0",
+ "normalize-package-data": "^3.0.0",
+ "read-pkg-up": "^7.0.1",
+ "redent": "^3.0.0",
+ "trim-newlines": "^3.0.0",
+ "type-fest": "^0.18.0",
+ "yargs-parser": "^20.2.3"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/meow/node_modules/type-fest": {
+ "version": "0.18.1",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.18.1.tgz",
+ "integrity": "sha512-OIAYXk8+ISY+qTOwkHtKqzAuxchoMiD9Udx+FSGQDuiRR+PJKJHc2NJAXlbhkGwTt/4/nKZxELY1w3ReWOL8mw==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/merge-descriptors": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz",
+ "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E=",
+ "dev": true
+ },
+ "node_modules/merge-stream": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
+ "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==",
+ "dev": true
+ },
+ "node_modules/merge2": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
+ "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
+ "dev": true,
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/methods": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
+ "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/micromatch": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.4.tgz",
+ "integrity": "sha512-pRmzw/XUcwXGpD9aI9q/0XOwLNygjETJ8y0ao0wdqprrzDa4YnxLcz7fQRZr8voh8V10kGhABbNcHVk5wHgWwg==",
+ "dev": true,
+ "dependencies": {
+ "braces": "^3.0.1",
+ "picomatch": "^2.2.3"
+ },
+ "engines": {
+ "node": ">=8.6"
+ }
+ },
+ "node_modules/mime": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
+ "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==",
+ "dev": true,
+ "bin": {
+ "mime": "cli.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "dev": true,
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mimic-fn": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz",
+ "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/min-indent": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz",
+ "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/minimatch": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
+ "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "dev": true,
+ "dependencies": {
+ "brace-expansion": "^1.1.7"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/minimist": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz",
+ "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==",
+ "dev": true,
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/minimist-options": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/minimist-options/-/minimist-options-4.1.0.tgz",
+ "integrity": "sha512-Q4r8ghd80yhO/0j1O3B2BjweX3fiHg9cdOwjJd2J76Q135c+NDxGCqdYKQ1SKBuFfgWbAUzBfvYjPUEeNgqN1A==",
+ "dev": true,
+ "dependencies": {
+ "arrify": "^1.0.1",
+ "is-plain-obj": "^1.1.0",
+ "kind-of": "^6.0.3"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/mixin-deep": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz",
+ "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==",
+ "dev": true,
+ "dependencies": {
+ "for-in": "^1.0.2",
+ "is-extendable": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/mixin-deep/node_modules/is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dev": true,
+ "dependencies": {
+ "is-plain-object": "^2.0.4"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/morgan": {
+ "version": "1.10.0",
+ "resolved": "https://registry.npmjs.org/morgan/-/morgan-1.10.0.tgz",
+ "integrity": "sha512-AbegBVI4sh6El+1gNwvD5YIck7nSA36weD7xvIxG4in80j/UoK8AEGaWnnz8v1GxonMCltmlNs5ZKbGvl9b1XQ==",
+ "dev": true,
+ "dependencies": {
+ "basic-auth": "~2.0.1",
+ "debug": "2.6.9",
+ "depd": "~2.0.0",
+ "on-finished": "~2.3.0",
+ "on-headers": "~1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/morgan/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/morgan/node_modules/depd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
+ "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/morgan/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=",
+ "dev": true
+ },
+ "node_modules/ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
+ "dev": true
+ },
+ "node_modules/mute-stream": {
+ "version": "0.0.8",
+ "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz",
+ "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==",
+ "dev": true
+ },
+ "node_modules/nan": {
+ "version": "2.15.0",
+ "resolved": "https://registry.npmjs.org/nan/-/nan-2.15.0.tgz",
+ "integrity": "sha512-8ZtvEnA2c5aYCZYd1cvgdnU6cqwixRoYg70xPLWUws5ORTa/lnw+u4amixRS/Ac5U5mQVgp9pnlSUnbNWFaWZQ==",
+ "dev": true,
+ "optional": true
+ },
+ "node_modules/nanomatch": {
+ "version": "1.2.13",
+ "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz",
+ "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==",
+ "dev": true,
+ "dependencies": {
+ "arr-diff": "^4.0.0",
+ "array-unique": "^0.3.2",
+ "define-property": "^2.0.2",
+ "extend-shallow": "^3.0.2",
+ "fragment-cache": "^0.2.1",
+ "is-windows": "^1.0.2",
+ "kind-of": "^6.0.2",
+ "object.pick": "^1.3.0",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/nanomatch/node_modules/define-property": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz",
+ "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==",
+ "dev": true,
+ "dependencies": {
+ "is-descriptor": "^1.0.2",
+ "isobject": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/nanomatch/node_modules/extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "dev": true,
+ "dependencies": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/nanomatch/node_modules/is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "dependencies": {
+ "kind-of": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/nanomatch/node_modules/is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "dependencies": {
+ "kind-of": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/nanomatch/node_modules/is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "dependencies": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/nanomatch/node_modules/is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dev": true,
+ "dependencies": {
+ "is-plain-object": "^2.0.4"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/natural-compare": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
+ "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=",
+ "dev": true
+ },
+ "node_modules/natural-compare-lite": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/natural-compare-lite/-/natural-compare-lite-1.4.0.tgz",
+ "integrity": "sha512-Tj+HTDSJJKaZnfiuw+iaF9skdPpTo2GtEly5JHnWV/hfv2Qj/9RKsGISQtLh2ox3l5EAGw487hnBee0sIJ6v2g==",
+ "dev": true
+ },
+ "node_modules/ncp": {
+ "version": "0.5.1",
+ "resolved": "https://registry.npmjs.org/ncp/-/ncp-0.5.1.tgz",
+ "integrity": "sha1-dDmFMW49tFkoG1hxaehFc1oFQ58=",
+ "dev": true,
+ "bin": {
+ "ncp": "bin/ncp"
+ }
+ },
+ "node_modules/negotiator": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz",
+ "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/next-tick": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.1.0.tgz",
+ "integrity": "sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ==",
+ "dev": true
+ },
+ "node_modules/node-releases": {
+ "version": "2.0.13",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.13.tgz",
+ "integrity": "sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==",
+ "dev": true
+ },
+ "node_modules/nopt": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/nopt/-/nopt-3.0.6.tgz",
+ "integrity": "sha1-xkZdvwirzU2zWTF/eaxopkayj/k=",
+ "dev": true,
+ "dependencies": {
+ "abbrev": "1"
+ },
+ "bin": {
+ "nopt": "bin/nopt.js"
+ }
+ },
+ "node_modules/normalize-package-data": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-3.0.3.tgz",
+ "integrity": "sha512-p2W1sgqij3zMMyRC067Dg16bfzVH+w7hyegmpIvZ4JNjqtGOVAIvLmjBx3yP7YTe9vKJgkoNOPjwQGogDoMXFA==",
+ "dev": true,
+ "dependencies": {
+ "hosted-git-info": "^4.0.1",
+ "is-core-module": "^2.5.0",
+ "semver": "^7.3.4",
+ "validate-npm-package-license": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/normalize-package-data/node_modules/semver": {
+ "version": "7.3.5",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz",
+ "integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==",
+ "dev": true,
+ "dependencies": {
+ "lru-cache": "^6.0.0"
+ },
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/normalize-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
+ "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/npm-run-path": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz",
+ "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==",
+ "dev": true,
+ "dependencies": {
+ "path-key": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/object-copy": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz",
+ "integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=",
+ "dev": true,
+ "dependencies": {
+ "copy-descriptor": "^0.1.0",
+ "define-property": "^0.2.5",
+ "kind-of": "^3.0.3"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-copy/node_modules/kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-inspect": {
+ "version": "1.13.1",
+ "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz",
+ "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==",
+ "dev": true,
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/object-keys": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz",
+ "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/object-visit": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz",
+ "integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=",
+ "dev": true,
+ "dependencies": {
+ "isobject": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object.assign": {
+ "version": "4.1.4",
+ "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.4.tgz",
+ "integrity": "sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.4",
+ "has-symbols": "^1.0.3",
+ "object-keys": "^1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/object.defaults": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/object.defaults/-/object.defaults-1.1.0.tgz",
+ "integrity": "sha1-On+GgzS0B96gbaFtiNXNKeQ1/s8=",
+ "dev": true,
+ "dependencies": {
+ "array-each": "^1.0.1",
+ "array-slice": "^1.0.0",
+ "for-own": "^1.0.0",
+ "isobject": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object.fromentries": {
+ "version": "2.0.7",
+ "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.7.tgz",
+ "integrity": "sha512-UPbPHML6sL8PI/mOqPwsH4G6iyXcCGzLin8KvEPenOZN5lpCNBZZQ+V62vdjB1mQHrmqGQt5/OJzemUA+KJmEA==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/object.groupby": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/object.groupby/-/object.groupby-1.0.1.tgz",
+ "integrity": "sha512-HqaQtqLnp/8Bn4GL16cj+CUYbnpe1bh0TtEaWvybszDG4tgxCJuRpV8VGuvNaI1fAnI4lUJzDG55MXcOH4JZcQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1",
+ "get-intrinsic": "^1.2.1"
+ }
+ },
+ "node_modules/object.map": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/object.map/-/object.map-1.0.1.tgz",
+ "integrity": "sha1-z4Plncj8wK1fQlDh94s7gb2AHTc=",
+ "dev": true,
+ "dependencies": {
+ "for-own": "^1.0.0",
+ "make-iterator": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object.pick": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz",
+ "integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=",
+ "dev": true,
+ "dependencies": {
+ "isobject": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object.values": {
+ "version": "1.1.7",
+ "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.7.tgz",
+ "integrity": "sha512-aU6xnDFYT3x17e/f0IiiwlGPTy2jzMySGfUB4fq6z7CV8l85CWHDk5ErhyhpfDHhrOMwGFhSQkhMGHaIotA6Ng==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/on-finished": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
+ "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=",
+ "dev": true,
+ "dependencies": {
+ "ee-first": "1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/on-headers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz",
+ "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/once": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+ "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
+ "dev": true,
+ "dependencies": {
+ "wrappy": "1"
+ }
+ },
+ "node_modules/onetime": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz",
+ "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==",
+ "dev": true,
+ "dependencies": {
+ "mimic-fn": "^2.1.0"
+ },
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/open": {
+ "version": "9.1.0",
+ "resolved": "https://registry.npmjs.org/open/-/open-9.1.0.tgz",
+ "integrity": "sha512-OS+QTnw1/4vrf+9hh1jc1jnYjzSG4ttTBB8UxOwAnInG3Uo4ssetzC1ihqaIHjLJnA5GGlRl6QlZXOTQhRBUvg==",
+ "dev": true,
+ "dependencies": {
+ "default-browser": "^4.0.0",
+ "define-lazy-prop": "^3.0.0",
+ "is-inside-container": "^1.0.0",
+ "is-wsl": "^2.2.0"
+ },
+ "engines": {
+ "node": ">=14.16"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/opener": {
+ "version": "1.5.2",
+ "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz",
+ "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==",
+ "dev": true,
+ "bin": {
+ "opener": "bin/opener-bin.js"
+ }
+ },
+ "node_modules/optionator": {
+ "version": "0.9.3",
+ "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.3.tgz",
+ "integrity": "sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==",
+ "dev": true,
+ "dependencies": {
+ "@aashutoshrathi/word-wrap": "^1.2.3",
+ "deep-is": "^0.1.3",
+ "fast-levenshtein": "^2.0.6",
+ "levn": "^0.4.1",
+ "prelude-ls": "^1.2.1",
+ "type-check": "^0.4.0"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/os-homedir": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz",
+ "integrity": "sha1-/7xJiDNuDoM94MFox+8VISGqf7M=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/os-tmpdir": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz",
+ "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/osenv": {
+ "version": "0.1.5",
+ "resolved": "https://registry.npmjs.org/osenv/-/osenv-0.1.5.tgz",
+ "integrity": "sha512-0CWcCECdMVc2Rw3U5w9ZjqX6ga6ubk1xDVKxtBQPK7wis/0F2r9T6k4ydGYhecl7YUBxBVxhL5oisPsNxAPe2g==",
+ "dev": true,
+ "dependencies": {
+ "os-homedir": "^1.0.0",
+ "os-tmpdir": "^1.0.0"
+ }
+ },
+ "node_modules/p-limit": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
+ "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==",
+ "dev": true,
+ "dependencies": {
+ "yocto-queue": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/p-locate": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz",
+ "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==",
+ "dev": true,
+ "dependencies": {
+ "p-limit": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/parent-module": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
+ "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==",
+ "dev": true,
+ "dependencies": {
+ "callsites": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/parse-filepath": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/parse-filepath/-/parse-filepath-1.0.2.tgz",
+ "integrity": "sha1-pjISf1Oq89FYdvWHLz/6x2PWyJE=",
+ "dev": true,
+ "dependencies": {
+ "is-absolute": "^1.0.0",
+ "map-cache": "^0.2.0",
+ "path-root": "^0.1.1"
+ },
+ "engines": {
+ "node": ">=0.8"
+ }
+ },
+ "node_modules/parse-json": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz",
+ "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==",
+ "dev": true,
+ "dependencies": {
+ "@babel/code-frame": "^7.0.0",
+ "error-ex": "^1.3.1",
+ "json-parse-even-better-errors": "^2.3.0",
+ "lines-and-columns": "^1.1.6"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/parse-passwd": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/parse-passwd/-/parse-passwd-1.0.0.tgz",
+ "integrity": "sha1-bVuTSkVpk7I9N/QKOC1vFmao5cY=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/parseurl": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
+ "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/pascalcase": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz",
+ "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/path-dirname": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/path-dirname/-/path-dirname-1.0.2.tgz",
+ "integrity": "sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA=",
+ "dev": true
+ },
+ "node_modules/path-exists": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
+ "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/path-is-absolute": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
+ "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/path-key": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
+ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/path-parse": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
+ "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==",
+ "dev": true
+ },
+ "node_modules/path-root": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/path-root/-/path-root-0.1.1.tgz",
+ "integrity": "sha1-mkpoFMrBwM1zNgqV8yCDyOpHRbc=",
+ "dev": true,
+ "dependencies": {
+ "path-root-regex": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/path-root-regex": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/path-root-regex/-/path-root-regex-0.1.2.tgz",
+ "integrity": "sha1-v8zcjfWxLcUsi0PsONGNcsBLqW0=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/path-to-regexp": {
+ "version": "0.1.7",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz",
+ "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w=",
+ "dev": true
+ },
+ "node_modules/path-type": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz",
+ "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/picocolors": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz",
+ "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==",
+ "dev": true
+ },
+ "node_modules/picomatch": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
+ "dev": true,
+ "engines": {
+ "node": ">=8.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/pify": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz",
+ "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/playwright-core": {
+ "version": "1.39.0",
+ "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.39.0.tgz",
+ "integrity": "sha512-+k4pdZgs1qiM+OUkSjx96YiKsXsmb59evFoqv8SKO067qBA+Z2s/dCzJij/ZhdQcs2zlTAgRKfeiiLm8PQ2qvw==",
+ "dev": true,
+ "bin": {
+ "playwright-core": "cli.js"
+ },
+ "engines": {
+ "node": ">=16"
+ }
+ },
+ "node_modules/pngjs": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/pngjs/-/pngjs-7.0.0.tgz",
+ "integrity": "sha512-LKWqWJRhstyYo9pGvgor/ivk2w94eSjE3RGVuzLGlr3NmD8bf7RcYGze1mNdEHRP6TRP6rMuDHk5t44hnTRyow==",
+ "dev": true,
+ "engines": {
+ "node": ">=14.19.0"
+ }
+ },
+ "node_modules/portfinder": {
+ "version": "1.0.32",
+ "resolved": "https://registry.npmjs.org/portfinder/-/portfinder-1.0.32.tgz",
+ "integrity": "sha512-on2ZJVVDXRADWE6jnQaX0ioEylzgBpQk8r55NE4wjXW1ZxO+BgDlY6DXwj20i0V8eB4SenDQ00WEaxfiIQPcxg==",
+ "dev": true,
+ "dependencies": {
+ "async": "^2.6.4",
+ "debug": "^3.2.7",
+ "mkdirp": "^0.5.6"
+ },
+ "engines": {
+ "node": ">= 0.12.0"
+ }
+ },
+ "node_modules/portfinder/node_modules/async": {
+ "version": "2.6.4",
+ "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz",
+ "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==",
+ "dev": true,
+ "dependencies": {
+ "lodash": "^4.17.14"
+ }
+ },
+ "node_modules/portfinder/node_modules/debug": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
+ "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
+ "dev": true,
+ "dependencies": {
+ "ms": "^2.1.1"
+ }
+ },
+ "node_modules/portfinder/node_modules/mkdirp": {
+ "version": "0.5.6",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz",
+ "integrity": "sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==",
+ "dev": true,
+ "dependencies": {
+ "minimist": "^1.2.6"
+ },
+ "bin": {
+ "mkdirp": "bin/cmd.js"
+ }
+ },
+ "node_modules/posix-character-classes": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz",
+ "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/prelude-ls": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz",
+ "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/prettier": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.0.3.tgz",
+ "integrity": "sha512-L/4pUDMxcNa8R/EthV08Zt42WBO4h1rarVtK0K+QJG0X187OLo7l699jWw0GKuwzkPQ//jMFA/8Xm6Fh3J/DAg==",
+ "dev": true,
+ "bin": {
+ "prettier": "bin/prettier.cjs"
+ },
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "url": "https://github.com/prettier/prettier?sponsor=1"
+ }
+ },
+ "node_modules/prettier-linter-helpers": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/prettier-linter-helpers/-/prettier-linter-helpers-1.0.0.tgz",
+ "integrity": "sha512-GbK2cP9nraSSUF9N2XwUwqfzlAFlMNYYl+ShE/V+H8a9uNl/oUqB1w2EL54Jh0OlyRSd8RfWYJ3coVS4TROP2w==",
+ "dev": true,
+ "dependencies": {
+ "fast-diff": "^1.1.2"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/process-nextick-args": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
+ "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==",
+ "dev": true
+ },
+ "node_modules/proxy-addr": {
+ "version": "2.0.7",
+ "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
+ "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
+ "dev": true,
+ "dependencies": {
+ "forwarded": "0.2.0",
+ "ipaddr.js": "1.9.1"
+ },
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/punycode": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz",
+ "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/qs": {
+ "version": "6.11.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz",
+ "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==",
+ "dev": true,
+ "dependencies": {
+ "side-channel": "^1.0.4"
+ },
+ "engines": {
+ "node": ">=0.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/queue-microtask": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
+ "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ]
+ },
+ "node_modules/quick-lru": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-4.0.1.tgz",
+ "integrity": "sha512-ARhCpm70fzdcvNQfPoy49IaanKkTlRWF2JMzqhcJbhSFRZv7nPTvZJdcY7301IPmvW+/p0RgIWnQDLJxifsQ7g==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/range-parser": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
+ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/raw-body": {
+ "version": "2.5.1",
+ "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz",
+ "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==",
+ "dev": true,
+ "dependencies": {
+ "bytes": "3.1.2",
+ "http-errors": "2.0.0",
+ "iconv-lite": "0.4.24",
+ "unpipe": "1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/read-pkg": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz",
+ "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==",
+ "dev": true,
+ "dependencies": {
+ "@types/normalize-package-data": "^2.4.0",
+ "normalize-package-data": "^2.5.0",
+ "parse-json": "^5.0.0",
+ "type-fest": "^0.6.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/read-pkg-up": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz",
+ "integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==",
+ "dev": true,
+ "dependencies": {
+ "find-up": "^4.1.0",
+ "read-pkg": "^5.2.0",
+ "type-fest": "^0.8.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/read-pkg-up/node_modules/find-up": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz",
+ "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==",
+ "dev": true,
+ "dependencies": {
+ "locate-path": "^5.0.0",
+ "path-exists": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/read-pkg-up/node_modules/locate-path": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz",
+ "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==",
+ "dev": true,
+ "dependencies": {
+ "p-locate": "^4.1.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/read-pkg-up/node_modules/p-limit": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz",
+ "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==",
+ "dev": true,
+ "dependencies": {
+ "p-try": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/read-pkg-up/node_modules/p-locate": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz",
+ "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==",
+ "dev": true,
+ "dependencies": {
+ "p-limit": "^2.2.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/read-pkg-up/node_modules/p-try": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz",
+ "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/read-pkg-up/node_modules/type-fest": {
+ "version": "0.8.1",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz",
+ "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/read-pkg/node_modules/hosted-git-info": {
+ "version": "2.8.9",
+ "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz",
+ "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==",
+ "dev": true
+ },
+ "node_modules/read-pkg/node_modules/normalize-package-data": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz",
+ "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==",
+ "dev": true,
+ "dependencies": {
+ "hosted-git-info": "^2.1.4",
+ "resolve": "^1.10.0",
+ "semver": "2 || 3 || 4 || 5",
+ "validate-npm-package-license": "^3.0.1"
+ }
+ },
+ "node_modules/read-pkg/node_modules/semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
+ "dev": true,
+ "bin": {
+ "semver": "bin/semver"
+ }
+ },
+ "node_modules/read-pkg/node_modules/type-fest": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz",
+ "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/readable-stream": {
+ "version": "2.3.7",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz",
+ "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==",
+ "dev": true,
+ "dependencies": {
+ "core-util-is": "~1.0.0",
+ "inherits": "~2.0.3",
+ "isarray": "~1.0.0",
+ "process-nextick-args": "~2.0.0",
+ "safe-buffer": "~5.1.1",
+ "string_decoder": "~1.1.1",
+ "util-deprecate": "~1.0.1"
+ }
+ },
+ "node_modules/readdirp": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
+ "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
+ "dev": true,
+ "dependencies": {
+ "picomatch": "^2.2.1"
+ },
+ "engines": {
+ "node": ">=8.10.0"
+ }
+ },
+ "node_modules/rechoir": {
+ "version": "0.7.1",
+ "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.7.1.tgz",
+ "integrity": "sha512-/njmZ8s1wVeR6pjTZ+0nCnv8SpZNRMT2D1RLOJQESlYFDBvwpTA4KWJpZ+sBJ4+vhjILRcK7JIFdGCdxEAAitg==",
+ "dev": true,
+ "dependencies": {
+ "resolve": "^1.9.0"
+ },
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/redent": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz",
+ "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==",
+ "dev": true,
+ "dependencies": {
+ "indent-string": "^4.0.0",
+ "strip-indent": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/regex-not": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz",
+ "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==",
+ "dev": true,
+ "dependencies": {
+ "extend-shallow": "^3.0.2",
+ "safe-regex": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/regex-not/node_modules/extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "dev": true,
+ "dependencies": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/regex-not/node_modules/is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dev": true,
+ "dependencies": {
+ "is-plain-object": "^2.0.4"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/regexp.prototype.flags": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.1.tgz",
+ "integrity": "sha512-sy6TXMN+hnP/wMy+ISxg3krXx7BAtWVO4UouuCN/ziM9UEne0euamVNafDfvC83bRNr95y0V5iijeDQFUNpvrg==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "set-function-name": "^2.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/regexpp": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.2.0.tgz",
+ "integrity": "sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/mysticatea"
+ }
+ },
+ "node_modules/remove-trailing-separator": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz",
+ "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=",
+ "dev": true
+ },
+ "node_modules/repeat-element": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.4.tgz",
+ "integrity": "sha512-LFiNfRcSu7KK3evMyYOuCzv3L10TW7yC1G2/+StMjK8Y6Vqd2MG7r/Qjw4ghtuCOjFvlnms/iMmLqpvW/ES/WQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/repeat-string": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz",
+ "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10"
+ }
+ },
+ "node_modules/repeating": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/repeating/-/repeating-2.0.1.tgz",
+ "integrity": "sha1-UhTFOpJtNVJwdSf7q0FdvAjQbdo=",
+ "dev": true,
+ "dependencies": {
+ "is-finite": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/requireindex": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/requireindex/-/requireindex-1.2.0.tgz",
+ "integrity": "sha512-L9jEkOi3ASd9PYit2cwRfyppc9NoABujTP8/5gFcbERmo5jUoAKovIC3fsF17pkTnGsrByysqX+Kxd2OTNI1ww==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.5"
+ }
+ },
+ "node_modules/requires-port": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz",
+ "integrity": "sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8=",
+ "dev": true
+ },
+ "node_modules/resolve": {
+ "version": "1.22.8",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz",
+ "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==",
+ "dev": true,
+ "dependencies": {
+ "is-core-module": "^2.13.0",
+ "path-parse": "^1.0.7",
+ "supports-preserve-symlinks-flag": "^1.0.0"
+ },
+ "bin": {
+ "resolve": "bin/resolve"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/resolve-dir": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/resolve-dir/-/resolve-dir-1.0.1.tgz",
+ "integrity": "sha1-eaQGRMNivoLybv/nOcm7U4IEb0M=",
+ "dev": true,
+ "dependencies": {
+ "expand-tilde": "^2.0.0",
+ "global-modules": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/resolve-from": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
+ "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/resolve-url": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz",
+ "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo=",
+ "deprecated": "https://github.com/lydell/resolve-url#deprecated",
+ "dev": true
+ },
+ "node_modules/restore-cursor": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz",
+ "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==",
+ "dev": true,
+ "dependencies": {
+ "onetime": "^5.1.0",
+ "signal-exit": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/ret": {
+ "version": "0.1.15",
+ "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz",
+ "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.12"
+ }
+ },
+ "node_modules/reusify": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz",
+ "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==",
+ "dev": true,
+ "engines": {
+ "iojs": ">=1.0.0",
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/rimraf": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
+ "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
+ "dev": true,
+ "dependencies": {
+ "glob": "^7.1.3"
+ },
+ "bin": {
+ "rimraf": "bin.js"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/run-applescript": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/run-applescript/-/run-applescript-5.0.0.tgz",
+ "integrity": "sha512-XcT5rBksx1QdIhlFOCtgZkB99ZEouFZ1E2Kc2LHqNW13U3/74YGdkQRmThTwxy4QIyookibDKYZOPqX//6BlAg==",
+ "dev": true,
+ "dependencies": {
+ "execa": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/run-async": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.4.1.tgz",
+ "integrity": "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.12.0"
+ }
+ },
+ "node_modules/run-parallel": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
+ "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "dependencies": {
+ "queue-microtask": "^1.2.2"
+ }
+ },
+ "node_modules/rxjs": {
+ "version": "6.6.7",
+ "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.6.7.tgz",
+ "integrity": "sha512-hTdwr+7yYNIT5n4AMYp85KA6yw2Va0FLa3Rguvbpa4W3I5xynaBZo41cM3XM+4Q6fRMj3sBYIR1VAmZMXYJvRQ==",
+ "dev": true,
+ "dependencies": {
+ "tslib": "^1.9.0"
+ },
+ "engines": {
+ "npm": ">=2.0.0"
+ }
+ },
+ "node_modules/rxjs/node_modules/tslib": {
+ "version": "1.14.1",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz",
+ "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==",
+ "dev": true
+ },
+ "node_modules/safe-array-concat": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.0.1.tgz",
+ "integrity": "sha512-6XbUAseYE2KtOuGueyeobCySj9L4+66Tn6KQMOPQJrAJEowYKW/YR/MGJZl7FdydUdaFu4LYyDZjxf4/Nmo23Q==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "get-intrinsic": "^1.2.1",
+ "has-symbols": "^1.0.3",
+ "isarray": "^2.0.5"
+ },
+ "engines": {
+ "node": ">=0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/safe-array-concat/node_modules/isarray": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz",
+ "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==",
+ "dev": true
+ },
+ "node_modules/safe-buffer": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+ "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
+ "dev": true
+ },
+ "node_modules/safe-regex": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz",
+ "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=",
+ "dev": true,
+ "dependencies": {
+ "ret": "~0.1.10"
+ }
+ },
+ "node_modules/safe-regex-test": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.0.tgz",
+ "integrity": "sha512-JBUUzyOgEwXQY1NuPtvcj/qcBDbDmEvWufhlnXZIm75DEHp+afM1r1ujJpJsV/gSM4t59tpDyPi1sd6ZaPFfsA==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "get-intrinsic": "^1.1.3",
+ "is-regex": "^1.1.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
+ "dev": true
+ },
+ "node_modules/sax": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz",
+ "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==",
+ "dev": true
+ },
+ "node_modules/screenshot-ftw": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/screenshot-ftw/-/screenshot-ftw-1.0.5.tgz",
+ "integrity": "sha512-LPKvVt9TBvUD9CEb1xolbtS3CJODwkcF0NxnxdyXwBiT+nLokLaxuuISNUMzWxekjVgYqx077mG1gNhkvIE1Mg==",
+ "dev": true
+ },
+ "node_modules/secure-compare": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/secure-compare/-/secure-compare-3.0.1.tgz",
+ "integrity": "sha1-8aAymzCLIh+uN7mXTz1XjQypmeM=",
+ "dev": true
+ },
+ "node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "dev": true,
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/send": {
+ "version": "0.18.0",
+ "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz",
+ "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==",
+ "dev": true,
+ "dependencies": {
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "fresh": "0.5.2",
+ "http-errors": "2.0.0",
+ "mime": "1.6.0",
+ "ms": "2.1.3",
+ "on-finished": "2.4.1",
+ "range-parser": "~1.2.1",
+ "statuses": "2.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/send/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/send/node_modules/debug/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
+ "dev": true
+ },
+ "node_modules/send/node_modules/depd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
+ "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/send/node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "dev": true
+ },
+ "node_modules/send/node_modules/on-finished": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
+ "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
+ "dev": true,
+ "dependencies": {
+ "ee-first": "1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/send/node_modules/statuses": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
+ "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/serve-index": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz",
+ "integrity": "sha1-03aNabHn2C5c4FD/9bRTvqEqkjk=",
+ "dev": true,
+ "dependencies": {
+ "accepts": "~1.3.4",
+ "batch": "0.6.1",
+ "debug": "2.6.9",
+ "escape-html": "~1.0.3",
+ "http-errors": "~1.6.2",
+ "mime-types": "~2.1.17",
+ "parseurl": "~1.3.2"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/serve-index/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/serve-index/node_modules/http-errors": {
+ "version": "1.6.3",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
+ "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=",
+ "dev": true,
+ "dependencies": {
+ "depd": "~1.1.2",
+ "inherits": "2.0.3",
+ "setprototypeof": "1.1.0",
+ "statuses": ">= 1.4.0 < 2"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/serve-index/node_modules/inherits": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
+ "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=",
+ "dev": true
+ },
+ "node_modules/serve-index/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=",
+ "dev": true
+ },
+ "node_modules/serve-index/node_modules/setprototypeof": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz",
+ "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==",
+ "dev": true
+ },
+ "node_modules/serve-static": {
+ "version": "1.15.0",
+ "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz",
+ "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==",
+ "dev": true,
+ "dependencies": {
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "parseurl": "~1.3.3",
+ "send": "0.18.0"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/set-function-length": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.1.1.tgz",
+ "integrity": "sha512-VoaqjbBJKiWtg4yRcKBQ7g7wnGnLV3M8oLvVWwOk2PdYY6PEFegR1vezXR0tw6fZGF9csVakIRjrJiy2veSBFQ==",
+ "dev": true,
+ "dependencies": {
+ "define-data-property": "^1.1.1",
+ "get-intrinsic": "^1.2.1",
+ "gopd": "^1.0.1",
+ "has-property-descriptors": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/set-function-name": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/set-function-name/-/set-function-name-2.0.1.tgz",
+ "integrity": "sha512-tMNCiqYVkXIZgc2Hnoy2IvC/f8ezc5koaRFkCjrpWzGpCd3qbZXPzVy9MAZzK1ch/X0jvSkojys3oqJN0qCmdA==",
+ "dev": true,
+ "dependencies": {
+ "define-data-property": "^1.0.1",
+ "functions-have-names": "^1.2.3",
+ "has-property-descriptors": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/set-value": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz",
+ "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==",
+ "dev": true,
+ "dependencies": {
+ "extend-shallow": "^2.0.1",
+ "is-extendable": "^0.1.1",
+ "is-plain-object": "^2.0.3",
+ "split-string": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/setprototypeof": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz",
+ "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==",
+ "dev": true
+ },
+ "node_modules/shebang-command": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
+ "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
+ "dev": true,
+ "dependencies": {
+ "shebang-regex": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/shebang-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
+ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/shiki": {
+ "version": "0.14.5",
+ "resolved": "https://registry.npmjs.org/shiki/-/shiki-0.14.5.tgz",
+ "integrity": "sha512-1gCAYOcmCFONmErGTrS1fjzJLA7MGZmKzrBNX7apqSwhyITJg2O102uFzXUeBxNnEkDA9vHIKLyeKq0V083vIw==",
+ "dev": true,
+ "dependencies": {
+ "ansi-sequence-parser": "^1.1.0",
+ "jsonc-parser": "^3.2.0",
+ "vscode-oniguruma": "^1.7.0",
+ "vscode-textmate": "^8.0.0"
+ }
+ },
+ "node_modules/side-channel": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz",
+ "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.0",
+ "get-intrinsic": "^1.0.2",
+ "object-inspect": "^1.9.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/signal-exit": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz",
+ "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==",
+ "dev": true
+ },
+ "node_modules/slash": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz",
+ "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/snapdragon": {
+ "version": "0.8.2",
+ "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz",
+ "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==",
+ "dev": true,
+ "dependencies": {
+ "base": "^0.11.1",
+ "debug": "^2.2.0",
+ "define-property": "^0.2.5",
+ "extend-shallow": "^2.0.1",
+ "map-cache": "^0.2.2",
+ "source-map": "^0.5.6",
+ "source-map-resolve": "^0.5.0",
+ "use": "^3.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon-node": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz",
+ "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==",
+ "dev": true,
+ "dependencies": {
+ "define-property": "^1.0.0",
+ "isobject": "^3.0.0",
+ "snapdragon-util": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon-node/node_modules/define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+ "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+ "dev": true,
+ "dependencies": {
+ "is-descriptor": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon-node/node_modules/is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "dependencies": {
+ "kind-of": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon-node/node_modules/is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "dependencies": {
+ "kind-of": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon-node/node_modules/is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "dependencies": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon-util": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz",
+ "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==",
+ "dev": true,
+ "dependencies": {
+ "kind-of": "^3.2.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon-util/node_modules/kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/snapdragon/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/snapdragon/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=",
+ "dev": true
+ },
+ "node_modules/source-map": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+ "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/source-map-resolve": {
+ "version": "0.5.3",
+ "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz",
+ "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==",
+ "deprecated": "See https://github.com/lydell/source-map-resolve#deprecated",
+ "dev": true,
+ "dependencies": {
+ "atob": "^2.1.2",
+ "decode-uri-component": "^0.2.0",
+ "resolve-url": "^0.2.1",
+ "source-map-url": "^0.4.0",
+ "urix": "^0.1.0"
+ }
+ },
+ "node_modules/source-map-url": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.1.tgz",
+ "integrity": "sha512-cPiFOTLUKvJFIg4SKVScy4ilPPW6rFgMgfuZJPNoDuMs3nC1HbMUycBoJw77xFIp6z1UJQJOfx6C9GMH80DiTw==",
+ "deprecated": "See https://github.com/lydell/source-map-url#deprecated",
+ "dev": true
+ },
+ "node_modules/spdx-correct": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.1.tgz",
+ "integrity": "sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w==",
+ "dev": true,
+ "dependencies": {
+ "spdx-expression-parse": "^3.0.0",
+ "spdx-license-ids": "^3.0.0"
+ }
+ },
+ "node_modules/spdx-exceptions": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz",
+ "integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==",
+ "dev": true
+ },
+ "node_modules/spdx-expression-parse": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz",
+ "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==",
+ "dev": true,
+ "dependencies": {
+ "spdx-exceptions": "^2.1.0",
+ "spdx-license-ids": "^3.0.0"
+ }
+ },
+ "node_modules/spdx-license-ids": {
+ "version": "3.0.11",
+ "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.11.tgz",
+ "integrity": "sha512-Ctl2BrFiM0X3MANYgj3CkygxhRmr9mi6xhejbdO960nF6EDJApTYpn0BQnDKlnNBULKiCN1n3w9EBkHK8ZWg+g==",
+ "dev": true
+ },
+ "node_modules/split-string": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz",
+ "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==",
+ "dev": true,
+ "dependencies": {
+ "extend-shallow": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/split-string/node_modules/extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "dev": true,
+ "dependencies": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/split-string/node_modules/is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dev": true,
+ "dependencies": {
+ "is-plain-object": "^2.0.4"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/sprintf-js": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+ "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=",
+ "dev": true
+ },
+ "node_modules/static-extend": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz",
+ "integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=",
+ "dev": true,
+ "dependencies": {
+ "define-property": "^0.2.5",
+ "object-copy": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/statuses": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz",
+ "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/string_decoder": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
+ "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
+ "dev": true,
+ "dependencies": {
+ "safe-buffer": "~5.1.0"
+ }
+ },
+ "node_modules/string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "dev": true,
+ "dependencies": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/string.prototype.trim": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.8.tgz",
+ "integrity": "sha512-lfjY4HcixfQXOfaqCvcBuOIapyaroTXhbkfJN3gcB1OtyupngWK4sEET9Knd0cXd28kTUqu/kHoV4HKSJdnjiQ==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/string.prototype.trimend": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.7.tgz",
+ "integrity": "sha512-Ni79DqeB72ZFq1uH/L6zJ+DKZTkOtPIHovb3YZHQViE+HDouuU4mBrLOLDn5Dde3RF8qw5qVETEjhu9locMLvA==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/string.prototype.trimstart": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.7.tgz",
+ "integrity": "sha512-NGhtDFu3jCEm7B4Fy0DpLewdJQOZcQ0rGbwQ/+stjnrp2i+rlKeCvos9hOIeCmqwratM47OBxY7uFZzjxHXmrg==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/strip-ansi": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "dev": true,
+ "dependencies": {
+ "ansi-regex": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-bom": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-2.0.0.tgz",
+ "integrity": "sha1-YhmoVhZSBJHzV4i9vxRHqZx+aw4=",
+ "dev": true,
+ "dependencies": {
+ "is-utf8": "^0.2.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/strip-final-newline": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz",
+ "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/strip-indent": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz",
+ "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==",
+ "dev": true,
+ "dependencies": {
+ "min-indent": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-json-comments": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
+ "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/supports-color": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
+ "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
+ "dev": true,
+ "dependencies": {
+ "has-flag": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/supports-preserve-symlinks-flag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
+ "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/synckit": {
+ "version": "0.8.5",
+ "resolved": "https://registry.npmjs.org/synckit/-/synckit-0.8.5.tgz",
+ "integrity": "sha512-L1dapNV6vu2s/4Sputv8xGsCdAVlb5nRDMFU/E27D44l5U6cw1g0dGd45uLc+OXjNMmF4ntiMdCimzcjFKQI8Q==",
+ "dev": true,
+ "dependencies": {
+ "@pkgr/utils": "^2.3.1",
+ "tslib": "^2.5.0"
+ },
+ "engines": {
+ "node": "^14.18.0 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://opencollective.com/unts"
+ }
+ },
+ "node_modules/text-table": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
+ "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=",
+ "dev": true
+ },
+ "node_modules/through": {
+ "version": "2.3.8",
+ "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz",
+ "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=",
+ "dev": true
+ },
+ "node_modules/titleize": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/titleize/-/titleize-3.0.0.tgz",
+ "integrity": "sha512-KxVu8EYHDPBdUYdKZdKtU2aj2XfEx9AfjXxE/Aj0vT06w2icA09Vus1rh6eSu1y01akYg6BjIK/hxyLJINoMLQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/tmp": {
+ "version": "0.0.33",
+ "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz",
+ "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==",
+ "dev": true,
+ "dependencies": {
+ "os-tmpdir": "~1.0.2"
+ },
+ "engines": {
+ "node": ">=0.6.0"
+ }
+ },
+ "node_modules/to-fast-properties": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
+ "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/to-object-path": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz",
+ "integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=",
+ "dev": true,
+ "dependencies": {
+ "kind-of": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/to-object-path/node_modules/kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "dependencies": {
+ "is-buffer": "^1.1.5"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/to-regex": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz",
+ "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==",
+ "dev": true,
+ "dependencies": {
+ "define-property": "^2.0.2",
+ "extend-shallow": "^3.0.2",
+ "regex-not": "^1.0.2",
+ "safe-regex": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/to-regex-range": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "dev": true,
+ "dependencies": {
+ "is-number": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=8.0"
+ }
+ },
+ "node_modules/to-regex/node_modules/define-property": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz",
+ "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==",
+ "dev": true,
+ "dependencies": {
+ "is-descriptor": "^1.0.2",
+ "isobject": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/to-regex/node_modules/extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "dev": true,
+ "dependencies": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/to-regex/node_modules/is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "dependencies": {
+ "kind-of": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/to-regex/node_modules/is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "dependencies": {
+ "kind-of": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/to-regex/node_modules/is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "dependencies": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/to-regex/node_modules/is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dev": true,
+ "dependencies": {
+ "is-plain-object": "^2.0.4"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/toidentifier": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz",
+ "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.6"
+ }
+ },
+ "node_modules/trim-newlines": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-3.0.1.tgz",
+ "integrity": "sha512-c1PTsA3tYrIsLGkJkzHF+w9F2EyxfXGo4UyJc4pFL++FMjnq0HJS69T3M7d//gKrFKwy429bouPescbjecU+Zw==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/ts-api-utils": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.0.3.tgz",
+ "integrity": "sha512-wNMeqtMz5NtwpT/UZGY5alT+VoKdSsOOP/kqHFcUW1P/VRhH2wJ48+DN2WwUliNbQ976ETwDL0Ifd2VVvgonvg==",
+ "dev": true,
+ "engines": {
+ "node": ">=16.13.0"
+ },
+ "peerDependencies": {
+ "typescript": ">=4.2.0"
+ }
+ },
+ "node_modules/ts-node": {
+ "version": "10.9.1",
+ "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz",
+ "integrity": "sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==",
+ "dev": true,
+ "dependencies": {
+ "@cspotcode/source-map-support": "^0.8.0",
+ "@tsconfig/node10": "^1.0.7",
+ "@tsconfig/node12": "^1.0.7",
+ "@tsconfig/node14": "^1.0.0",
+ "@tsconfig/node16": "^1.0.2",
+ "acorn": "^8.4.1",
+ "acorn-walk": "^8.1.1",
+ "arg": "^4.1.0",
+ "create-require": "^1.1.0",
+ "diff": "^4.0.1",
+ "make-error": "^1.1.1",
+ "v8-compile-cache-lib": "^3.0.1",
+ "yn": "3.1.1"
+ },
+ "bin": {
+ "ts-node": "dist/bin.js",
+ "ts-node-cwd": "dist/bin-cwd.js",
+ "ts-node-esm": "dist/bin-esm.js",
+ "ts-node-script": "dist/bin-script.js",
+ "ts-node-transpile-only": "dist/bin-transpile.js",
+ "ts-script": "dist/bin-script-deprecated.js"
+ },
+ "peerDependencies": {
+ "@swc/core": ">=1.2.50",
+ "@swc/wasm": ">=1.2.50",
+ "@types/node": "*",
+ "typescript": ">=2.7"
+ },
+ "peerDependenciesMeta": {
+ "@swc/core": {
+ "optional": true
+ },
+ "@swc/wasm": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/tsconfig-paths": {
+ "version": "3.14.2",
+ "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.14.2.tgz",
+ "integrity": "sha512-o/9iXgCYc5L/JxCHPe3Hvh8Q/2xm5Z+p18PESBU6Ff33695QnCHBEjcytY2q19ua7Mbl/DavtBOLq+oG0RCL+g==",
+ "dev": true,
+ "dependencies": {
+ "@types/json5": "^0.0.29",
+ "json5": "^1.0.2",
+ "minimist": "^1.2.6",
+ "strip-bom": "^3.0.0"
+ }
+ },
+ "node_modules/tsconfig-paths/node_modules/json5": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz",
+ "integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==",
+ "dev": true,
+ "dependencies": {
+ "minimist": "^1.2.0"
+ },
+ "bin": {
+ "json5": "lib/cli.js"
+ }
+ },
+ "node_modules/tsconfig-paths/node_modules/strip-bom": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz",
+ "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==",
+ "dev": true,
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/tslib": {
+ "version": "2.6.2",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz",
+ "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==",
+ "dev": true
+ },
+ "node_modules/tsutils": {
+ "version": "3.21.0",
+ "resolved": "https://registry.npmjs.org/tsutils/-/tsutils-3.21.0.tgz",
+ "integrity": "sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==",
+ "dev": true,
+ "dependencies": {
+ "tslib": "^1.8.1"
+ },
+ "engines": {
+ "node": ">= 6"
+ },
+ "peerDependencies": {
+ "typescript": ">=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta"
+ }
+ },
+ "node_modules/tsutils/node_modules/tslib": {
+ "version": "1.14.1",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz",
+ "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==",
+ "dev": true
+ },
+ "node_modules/type": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/type/-/type-1.2.0.tgz",
+ "integrity": "sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg==",
+ "dev": true
+ },
+ "node_modules/type-check": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz",
+ "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==",
+ "dev": true,
+ "dependencies": {
+ "prelude-ls": "^1.2.1"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/type-fest": {
+ "version": "0.21.3",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz",
+ "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/type-is": {
+ "version": "1.6.18",
+ "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
+ "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
+ "dev": true,
+ "dependencies": {
+ "media-typer": "0.3.0",
+ "mime-types": "~2.1.24"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/typed-array-buffer": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.0.tgz",
+ "integrity": "sha512-Y8KTSIglk9OZEr8zywiIHG/kmQ7KWyjseXs1CbSo8vC42w7hg2HgYTxSWwP0+is7bWDc1H+Fo026CpHFwm8tkw==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "get-intrinsic": "^1.2.1",
+ "is-typed-array": "^1.1.10"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/typed-array-byte-length": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.0.tgz",
+ "integrity": "sha512-Or/+kvLxNpeQ9DtSydonMxCx+9ZXOswtwJn17SNLvhptaXYDJvkFFP5zbfU/uLmvnBJlI4yrnXRxpdWH/M5tNA==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "for-each": "^0.3.3",
+ "has-proto": "^1.0.1",
+ "is-typed-array": "^1.1.10"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/typed-array-byte-offset": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.0.tgz",
+ "integrity": "sha512-RD97prjEt9EL8YgAgpOkf3O4IF9lhJFr9g0htQkm0rchFp/Vx7LW5Q8fSXXub7BXAODyUQohRMyOc3faCPd0hg==",
+ "dev": true,
+ "dependencies": {
+ "available-typed-arrays": "^1.0.5",
+ "call-bind": "^1.0.2",
+ "for-each": "^0.3.3",
+ "has-proto": "^1.0.1",
+ "is-typed-array": "^1.1.10"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/typed-array-length": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.4.tgz",
+ "integrity": "sha512-KjZypGq+I/H7HI5HlOoGHkWUUGq+Q0TPhQurLbyrVrvnKTBgzLhIJ7j6J/XTQOi0d1RjyZ0wdas8bKs2p0x3Ng==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "for-each": "^0.3.3",
+ "is-typed-array": "^1.1.9"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/typedoc": {
+ "version": "0.25.3",
+ "resolved": "https://registry.npmjs.org/typedoc/-/typedoc-0.25.3.tgz",
+ "integrity": "sha512-Ow8Bo7uY1Lwy7GTmphRIMEo6IOZ+yYUyrc8n5KXIZg1svpqhZSWgni2ZrDhe+wLosFS8yswowUzljTAV/3jmWw==",
+ "dev": true,
+ "dependencies": {
+ "lunr": "^2.3.9",
+ "marked": "^4.3.0",
+ "minimatch": "^9.0.3",
+ "shiki": "^0.14.1"
+ },
+ "bin": {
+ "typedoc": "bin/typedoc"
+ },
+ "engines": {
+ "node": ">= 16"
+ },
+ "peerDependencies": {
+ "typescript": "4.6.x || 4.7.x || 4.8.x || 4.9.x || 5.0.x || 5.1.x || 5.2.x"
+ }
+ },
+ "node_modules/typedoc/node_modules/brace-expansion": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
+ "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
+ "dev": true,
+ "dependencies": {
+ "balanced-match": "^1.0.0"
+ }
+ },
+ "node_modules/typedoc/node_modules/minimatch": {
+ "version": "9.0.3",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz",
+ "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==",
+ "dev": true,
+ "dependencies": {
+ "brace-expansion": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=16 || 14 >=14.17"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/typescript": {
+ "version": "5.2.2",
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz",
+ "integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==",
+ "dev": true,
+ "bin": {
+ "tsc": "bin/tsc",
+ "tsserver": "bin/tsserver"
+ },
+ "engines": {
+ "node": ">=14.17"
+ }
+ },
+ "node_modules/unbox-primitive": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.2.tgz",
+ "integrity": "sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==",
+ "dev": true,
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "has-bigints": "^1.0.2",
+ "has-symbols": "^1.0.3",
+ "which-boxed-primitive": "^1.0.2"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/unc-path-regex": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/unc-path-regex/-/unc-path-regex-0.1.2.tgz",
+ "integrity": "sha1-5z3T17DXxe2G+6xrCufYxqadUPo=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/underscore.string": {
+ "version": "3.3.6",
+ "resolved": "https://registry.npmjs.org/underscore.string/-/underscore.string-3.3.6.tgz",
+ "integrity": "sha512-VoC83HWXmCrF6rgkyxS9GHv8W9Q5nhMKho+OadDJGzL2oDYbYEppBaCMH6pFlwLeqj2QS+hhkw2kpXkSdD1JxQ==",
+ "dev": true,
+ "dependencies": {
+ "sprintf-js": "^1.1.1",
+ "util-deprecate": "^1.0.2"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/underscore.string/node_modules/sprintf-js": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.2.tgz",
+ "integrity": "sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug==",
+ "dev": true
+ },
+ "node_modules/undici-types": {
+ "version": "5.26.5",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
+ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
+ "dev": true
+ },
+ "node_modules/union": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/union/-/union-0.5.0.tgz",
+ "integrity": "sha512-N6uOhuW6zO95P3Mel2I2zMsbsanvvtgn6jVqJv4vbVcz/JN0OkL9suomjQGmWtxJQXOCqUJvquc1sMeNz/IwlA==",
+ "dev": true,
+ "dependencies": {
+ "qs": "^6.4.0"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/union-value": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz",
+ "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==",
+ "dev": true,
+ "dependencies": {
+ "arr-union": "^3.1.0",
+ "get-value": "^2.0.6",
+ "is-extendable": "^0.1.1",
+ "set-value": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/unpipe": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
+ "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/unset-value": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz",
+ "integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=",
+ "dev": true,
+ "dependencies": {
+ "has-value": "^0.3.1",
+ "isobject": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/unset-value/node_modules/has-value": {
+ "version": "0.3.1",
+ "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz",
+ "integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=",
+ "dev": true,
+ "dependencies": {
+ "get-value": "^2.0.3",
+ "has-values": "^0.1.4",
+ "isobject": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/unset-value/node_modules/has-value/node_modules/isobject": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz",
+ "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=",
+ "dev": true,
+ "dependencies": {
+ "isarray": "1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/unset-value/node_modules/has-values": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz",
+ "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E=",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/untildify": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/untildify/-/untildify-4.0.0.tgz",
+ "integrity": "sha512-KK8xQ1mkzZeg9inewmFVDNkg3l5LUhoq9kN6iWYB/CC9YMG8HA+c1Q8HwDe6dEX7kErrEVNVBO3fWsVq5iDgtw==",
+ "dev": true,
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/upath": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/upath/-/upath-1.2.0.tgz",
+ "integrity": "sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg==",
+ "dev": true,
+ "engines": {
+ "node": ">=4",
+ "yarn": "*"
+ }
+ },
+ "node_modules/update-browserslist-db": {
+ "version": "1.0.13",
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz",
+ "integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==",
+ "dev": true,
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "dependencies": {
+ "escalade": "^3.1.1",
+ "picocolors": "^1.0.0"
+ },
+ "bin": {
+ "update-browserslist-db": "cli.js"
+ },
+ "peerDependencies": {
+ "browserslist": ">= 4.21.0"
+ }
+ },
+ "node_modules/uri-js": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
+ "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
+ "dev": true,
+ "dependencies": {
+ "punycode": "^2.1.0"
+ }
+ },
+ "node_modules/urix": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz",
+ "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI=",
+ "deprecated": "Please see https://github.com/lydell/urix#deprecated",
+ "dev": true
+ },
+ "node_modules/url-join": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz",
+ "integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==",
+ "dev": true
+ },
+ "node_modules/use": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz",
+ "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==",
+ "dev": true,
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/util-deprecate": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+ "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=",
+ "dev": true
+ },
+ "node_modules/utils-merge": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
+ "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.4.0"
+ }
+ },
+ "node_modules/v8-compile-cache-lib": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz",
+ "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==",
+ "dev": true
+ },
+ "node_modules/v8flags": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/v8flags/-/v8flags-3.2.0.tgz",
+ "integrity": "sha512-mH8etigqMfiGWdeXpaaqGfs6BndypxusHHcv2qSHyZkGEznCd/qAXCWWRzeowtL54147cktFOC4P5y+kl8d8Jg==",
+ "dev": true,
+ "dependencies": {
+ "homedir-polyfill": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/validate-npm-package-license": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz",
+ "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==",
+ "dev": true,
+ "dependencies": {
+ "spdx-correct": "^3.0.0",
+ "spdx-expression-parse": "^3.0.0"
+ }
+ },
+ "node_modules/vary": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
+ "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=",
+ "dev": true,
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/vscode-oniguruma": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/vscode-oniguruma/-/vscode-oniguruma-1.7.0.tgz",
+ "integrity": "sha512-L9WMGRfrjOhgHSdOYgCt/yRMsXzLDJSL7BPrOZt73gU0iWO4mpqzqQzOz5srxqTvMBaR0XZTSrVWo4j55Rc6cA==",
+ "dev": true
+ },
+ "node_modules/vscode-textmate": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/vscode-textmate/-/vscode-textmate-8.0.0.tgz",
+ "integrity": "sha512-AFbieoL7a5LMqcnOF04ji+rpXadgOXnZsxQr//r83kLPr7biP7am3g9zbaZIaBGwBRWeSvoMD4mgPdX3e4NWBg==",
+ "dev": true
+ },
+ "node_modules/whatwg-encoding": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-2.0.0.tgz",
+ "integrity": "sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg==",
+ "dev": true,
+ "dependencies": {
+ "iconv-lite": "0.6.3"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/whatwg-encoding/node_modules/iconv-lite": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz",
+ "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==",
+ "dev": true,
+ "dependencies": {
+ "safer-buffer": ">= 2.1.2 < 3.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/which": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
+ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
+ "dev": true,
+ "dependencies": {
+ "isexe": "^2.0.0"
+ },
+ "bin": {
+ "node-which": "bin/node-which"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/which-boxed-primitive": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz",
+ "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==",
+ "dev": true,
+ "dependencies": {
+ "is-bigint": "^1.0.1",
+ "is-boolean-object": "^1.1.0",
+ "is-number-object": "^1.0.4",
+ "is-string": "^1.0.5",
+ "is-symbol": "^1.0.3"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/which-typed-array": {
+ "version": "1.1.13",
+ "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.13.tgz",
+ "integrity": "sha512-P5Nra0qjSncduVPEAr7xhoF5guty49ArDTwzJ/yNuPIbZppyRxFQsRCWrocxIY+CnMVG+qfbU2FmDKyvSGClow==",
+ "dev": true,
+ "dependencies": {
+ "available-typed-arrays": "^1.0.5",
+ "call-bind": "^1.0.4",
+ "for-each": "^0.3.3",
+ "gopd": "^1.0.1",
+ "has-tostringtag": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/wrappy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+ "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=",
+ "dev": true
+ },
+ "node_modules/write-file-atomic": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz",
+ "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==",
+ "dev": true,
+ "dependencies": {
+ "imurmurhash": "^0.1.4",
+ "signal-exit": "^3.0.7"
+ },
+ "engines": {
+ "node": "^12.13.0 || ^14.15.0 || >=16.0.0"
+ }
+ },
+ "node_modules/xml2js": {
+ "version": "0.4.23",
+ "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.23.tgz",
+ "integrity": "sha512-ySPiMjM0+pLDftHgXY4By0uswI3SPKLDw/i3UXbnO8M/p28zqexCUoPmQFrYD+/1BzhGJSs2i1ERWKJAtiLrug==",
+ "dev": true,
+ "dependencies": {
+ "sax": ">=0.6.0",
+ "xmlbuilder": "~11.0.0"
+ },
+ "engines": {
+ "node": ">=4.0.0"
+ }
+ },
+ "node_modules/xmlbuilder": {
+ "version": "11.0.1",
+ "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-11.0.1.tgz",
+ "integrity": "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA==",
+ "dev": true,
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/yallist": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
+ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
+ "dev": true
+ },
+ "node_modules/yargs-parser": {
+ "version": "20.2.9",
+ "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz",
+ "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/yn": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz",
+ "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==",
+ "dev": true,
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/yocto-queue": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",
+ "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==",
+ "dev": true,
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "tools/eslint-custom-rules": {
+ "version": "0.0.0",
+ "extraneous": true,
+ "license": "BSD-3-Clause"
+ },
+ "tools/eslint-plugin-gpuweb-cts": {
+ "version": "0.0.0",
+ "dev": true,
+ "license": "BSD-3-Clause"
+ }
+ },
+ "dependencies": {
+ "@aashutoshrathi/word-wrap": {
+ "version": "1.2.6",
+ "resolved": "https://registry.npmjs.org/@aashutoshrathi/word-wrap/-/word-wrap-1.2.6.tgz",
+ "integrity": "sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==",
+ "dev": true
+ },
+ "@ampproject/remapping": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz",
+ "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==",
+ "dev": true,
+ "requires": {
+ "@jridgewell/gen-mapping": "^0.3.0",
+ "@jridgewell/trace-mapping": "^0.3.9"
+ }
+ },
+ "@babel/cli": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/cli/-/cli-7.23.0.tgz",
+ "integrity": "sha512-17E1oSkGk2IwNILM4jtfAvgjt+ohmpfBky8aLerUfYZhiPNg7ca+CRCxZn8QDxwNhV/upsc2VHBCqGFIR+iBfA==",
+ "dev": true,
+ "requires": {
+ "@jridgewell/trace-mapping": "^0.3.17",
+ "@nicolo-ribaudo/chokidar-2": "2.1.8-no-fsevents.3",
+ "chokidar": "^3.4.0",
+ "commander": "^4.0.1",
+ "convert-source-map": "^2.0.0",
+ "fs-readdir-recursive": "^1.1.0",
+ "glob": "^7.2.0",
+ "make-dir": "^2.1.0",
+ "slash": "^2.0.0"
+ },
+ "dependencies": {
+ "commander": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz",
+ "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==",
+ "dev": true
+ }
+ }
+ },
+ "@babel/code-frame": {
+ "version": "7.22.13",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz",
+ "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==",
+ "dev": true,
+ "requires": {
+ "@babel/highlight": "^7.22.13",
+ "chalk": "^2.4.2"
+ }
+ },
+ "@babel/compat-data": {
+ "version": "7.23.2",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.23.2.tgz",
+ "integrity": "sha512-0S9TQMmDHlqAZ2ITT95irXKfxN9bncq8ZCoJhun3nHL/lLUxd2NKBJYoNGWH7S0hz6fRQwWlAWn/ILM0C70KZQ==",
+ "dev": true
+ },
+ "@babel/core": {
+ "version": "7.23.2",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.23.2.tgz",
+ "integrity": "sha512-n7s51eWdaWZ3vGT2tD4T7J6eJs3QoBXydv7vkUM06Bf1cbVD2Kc2UrkzhiQwobfV7NwOnQXYL7UBJ5VPU+RGoQ==",
+ "dev": true,
+ "requires": {
+ "@ampproject/remapping": "^2.2.0",
+ "@babel/code-frame": "^7.22.13",
+ "@babel/generator": "^7.23.0",
+ "@babel/helper-compilation-targets": "^7.22.15",
+ "@babel/helper-module-transforms": "^7.23.0",
+ "@babel/helpers": "^7.23.2",
+ "@babel/parser": "^7.23.0",
+ "@babel/template": "^7.22.15",
+ "@babel/traverse": "^7.23.2",
+ "@babel/types": "^7.23.0",
+ "convert-source-map": "^2.0.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.2",
+ "json5": "^2.2.3",
+ "semver": "^6.3.1"
+ }
+ },
+ "@babel/generator": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz",
+ "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.23.0",
+ "@jridgewell/gen-mapping": "^0.3.2",
+ "@jridgewell/trace-mapping": "^0.3.17",
+ "jsesc": "^2.5.1"
+ }
+ },
+ "@babel/helper-annotate-as-pure": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz",
+ "integrity": "sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helper-compilation-targets": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.15.tgz",
+ "integrity": "sha512-y6EEzULok0Qvz8yyLkCvVX+02ic+By2UdOhylwUOvOn9dvYc9mKICJuuU1n1XBI02YWsNsnrY1kc6DVbjcXbtw==",
+ "dev": true,
+ "requires": {
+ "@babel/compat-data": "^7.22.9",
+ "@babel/helper-validator-option": "^7.22.15",
+ "browserslist": "^4.21.9",
+ "lru-cache": "^5.1.1",
+ "semver": "^6.3.1"
+ },
+ "dependencies": {
+ "lru-cache": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
+ "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
+ "dev": true,
+ "requires": {
+ "yallist": "^3.0.2"
+ }
+ },
+ "yallist": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
+ "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==",
+ "dev": true
+ }
+ }
+ },
+ "@babel/helper-create-class-features-plugin": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.22.15.tgz",
+ "integrity": "sha512-jKkwA59IXcvSaiK2UN45kKwSC9o+KuoXsBDvHvU/7BecYIp8GQ2UwrVvFgJASUT+hBnwJx6MhvMCuMzwZZ7jlg==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-environment-visitor": "^7.22.5",
+ "@babel/helper-function-name": "^7.22.5",
+ "@babel/helper-member-expression-to-functions": "^7.22.15",
+ "@babel/helper-optimise-call-expression": "^7.22.5",
+ "@babel/helper-replace-supers": "^7.22.9",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5",
+ "@babel/helper-split-export-declaration": "^7.22.6",
+ "semver": "^6.3.1"
+ }
+ },
+ "@babel/helper-environment-visitor": {
+ "version": "7.22.20",
+ "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz",
+ "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==",
+ "dev": true
+ },
+ "@babel/helper-function-name": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz",
+ "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==",
+ "dev": true,
+ "requires": {
+ "@babel/template": "^7.22.15",
+ "@babel/types": "^7.23.0"
+ }
+ },
+ "@babel/helper-hoist-variables": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz",
+ "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helper-member-expression-to-functions": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.23.0.tgz",
+ "integrity": "sha512-6gfrPwh7OuT6gZyJZvd6WbTfrqAo7vm4xCzAXOusKqq/vWdKXphTpj5klHKNmRUU6/QRGlBsyU9mAIPaWHlqJA==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.23.0"
+ }
+ },
+ "@babel/helper-module-imports": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz",
+ "integrity": "sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.22.15"
+ }
+ },
+ "@babel/helper-module-transforms": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.23.0.tgz",
+ "integrity": "sha512-WhDWw1tdrlT0gMgUJSlX0IQvoO1eN279zrAUbVB+KpV2c3Tylz8+GnKOLllCS6Z/iZQEyVYxhZVUdPTqs2YYPw==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-environment-visitor": "^7.22.20",
+ "@babel/helper-module-imports": "^7.22.15",
+ "@babel/helper-simple-access": "^7.22.5",
+ "@babel/helper-split-export-declaration": "^7.22.6",
+ "@babel/helper-validator-identifier": "^7.22.20"
+ }
+ },
+ "@babel/helper-optimise-call-expression": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz",
+ "integrity": "sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helper-plugin-utils": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz",
+ "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==",
+ "dev": true
+ },
+ "@babel/helper-replace-supers": {
+ "version": "7.22.20",
+ "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.22.20.tgz",
+ "integrity": "sha512-qsW0In3dbwQUbK8kejJ4R7IHVGwHJlV6lpG6UA7a9hSa2YEiAib+N1T2kr6PEeUT+Fl7najmSOS6SmAwCHK6Tw==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-environment-visitor": "^7.22.20",
+ "@babel/helper-member-expression-to-functions": "^7.22.15",
+ "@babel/helper-optimise-call-expression": "^7.22.5"
+ }
+ },
+ "@babel/helper-simple-access": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz",
+ "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helper-skip-transparent-expression-wrappers": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz",
+ "integrity": "sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helper-split-export-declaration": {
+ "version": "7.22.6",
+ "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz",
+ "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helper-string-parser": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz",
+ "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==",
+ "dev": true
+ },
+ "@babel/helper-validator-identifier": {
+ "version": "7.22.20",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz",
+ "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==",
+ "dev": true
+ },
+ "@babel/helper-validator-option": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.15.tgz",
+ "integrity": "sha512-bMn7RmyFjY/mdECUbgn9eoSY4vqvacUnS9i9vGAGttgFWesO6B4CYWA7XlpbWgBt71iv/hfbPlynohStqnu5hA==",
+ "dev": true
+ },
+ "@babel/helpers": {
+ "version": "7.23.2",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.23.2.tgz",
+ "integrity": "sha512-lzchcp8SjTSVe/fPmLwtWVBFC7+Tbn8LGHDVfDp9JGxpAY5opSaEFgt8UQvrnECWOTdji2mOWMz1rOhkHscmGQ==",
+ "dev": true,
+ "requires": {
+ "@babel/template": "^7.22.15",
+ "@babel/traverse": "^7.23.2",
+ "@babel/types": "^7.23.0"
+ }
+ },
+ "@babel/highlight": {
+ "version": "7.22.20",
+ "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz",
+ "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-validator-identifier": "^7.22.20",
+ "chalk": "^2.4.2",
+ "js-tokens": "^4.0.0"
+ }
+ },
+ "@babel/parser": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz",
+ "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==",
+ "dev": true
+ },
+ "@babel/plugin-syntax-jsx": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.22.5.tgz",
+ "integrity": "sha512-gvyP4hZrgrs/wWMaocvxZ44Hw0b3W8Pe+cMxc8V1ULQ07oh8VNbIRaoD1LRZVTvD+0nieDKjfgKg89sD7rrKrg==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-syntax-typescript": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.22.5.tgz",
+ "integrity": "sha512-1mS2o03i7t1c6VzH6fdQ3OA8tcEIxwG18zIPRp+UY1Ihv6W+XZzBCVxExF9upussPXJ0xE9XRHwMoNs1ep/nRQ==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-modules-commonjs": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.23.0.tgz",
+ "integrity": "sha512-32Xzss14/UVc7k9g775yMIvkVK8xwKE0DPdP5JTapr3+Z9w4tzeOuLNY6BXDQR6BdnzIlXnCGAzsk/ICHBLVWQ==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-module-transforms": "^7.23.0",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-simple-access": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-typescript": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.22.15.tgz",
+ "integrity": "sha512-1uirS0TnijxvQLnlv5wQBwOX3E1wCFX7ITv+9pBV2wKEk4K+M5tqDaoNXnTH8tjEIYHLO98MwiTWO04Ggz4XuA==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-create-class-features-plugin": "^7.22.15",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-typescript": "^7.22.5"
+ }
+ },
+ "@babel/preset-typescript": {
+ "version": "7.23.2",
+ "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.23.2.tgz",
+ "integrity": "sha512-u4UJc1XsS1GhIGteM8rnGiIvf9rJpiVgMEeCnwlLA7WJPC+jcXWJAGxYmeqs5hOZD8BbAfnV5ezBOxQbb4OUxA==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-validator-option": "^7.22.15",
+ "@babel/plugin-syntax-jsx": "^7.22.5",
+ "@babel/plugin-transform-modules-commonjs": "^7.23.0",
+ "@babel/plugin-transform-typescript": "^7.22.15"
+ }
+ },
+ "@babel/template": {
+ "version": "7.22.15",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz",
+ "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==",
+ "dev": true,
+ "requires": {
+ "@babel/code-frame": "^7.22.13",
+ "@babel/parser": "^7.22.15",
+ "@babel/types": "^7.22.15"
+ }
+ },
+ "@babel/traverse": {
+ "version": "7.23.2",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz",
+ "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==",
+ "dev": true,
+ "requires": {
+ "@babel/code-frame": "^7.22.13",
+ "@babel/generator": "^7.23.0",
+ "@babel/helper-environment-visitor": "^7.22.20",
+ "@babel/helper-function-name": "^7.23.0",
+ "@babel/helper-hoist-variables": "^7.22.5",
+ "@babel/helper-split-export-declaration": "^7.22.6",
+ "@babel/parser": "^7.23.0",
+ "@babel/types": "^7.23.0",
+ "debug": "^4.1.0",
+ "globals": "^11.1.0"
+ }
+ },
+ "@babel/types": {
+ "version": "7.23.0",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz",
+ "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-string-parser": "^7.22.5",
+ "@babel/helper-validator-identifier": "^7.22.20",
+ "to-fast-properties": "^2.0.0"
+ }
+ },
+ "@cspotcode/source-map-support": {
+ "version": "0.8.1",
+ "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz",
+ "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==",
+ "dev": true,
+ "requires": {
+ "@jridgewell/trace-mapping": "0.3.9"
+ },
+ "dependencies": {
+ "@jridgewell/trace-mapping": {
+ "version": "0.3.9",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz",
+ "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==",
+ "dev": true,
+ "requires": {
+ "@jridgewell/resolve-uri": "^3.0.3",
+ "@jridgewell/sourcemap-codec": "^1.4.10"
+ }
+ }
+ }
+ },
+ "@eslint-community/eslint-utils": {
+ "version": "4.4.0",
+ "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz",
+ "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==",
+ "dev": true,
+ "requires": {
+ "eslint-visitor-keys": "^3.3.0"
+ }
+ },
+ "@eslint-community/regexpp": {
+ "version": "4.10.0",
+ "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.10.0.tgz",
+ "integrity": "sha512-Cu96Sd2By9mCNTx2iyKOmq10v22jUVQv0lQnlGNy16oE9589yE+QADPbrMGCkA51cKZSg3Pu/aTJVTGfL/qjUA==",
+ "dev": true
+ },
+ "@eslint/eslintrc": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.2.tgz",
+ "integrity": "sha512-+wvgpDsrB1YqAMdEUCcnTlpfVBH7Vqn6A/NT3D8WVXFIaKMlErPIZT3oCIAVCOtarRpMtelZLqJeU3t7WY6X6g==",
+ "dev": true,
+ "requires": {
+ "ajv": "^6.12.4",
+ "debug": "^4.3.2",
+ "espree": "^9.6.0",
+ "globals": "^13.19.0",
+ "ignore": "^5.2.0",
+ "import-fresh": "^3.2.1",
+ "js-yaml": "^4.1.0",
+ "minimatch": "^3.1.2",
+ "strip-json-comments": "^3.1.1"
+ },
+ "dependencies": {
+ "argparse": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
+ "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
+ "dev": true
+ },
+ "globals": {
+ "version": "13.23.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-13.23.0.tgz",
+ "integrity": "sha512-XAmF0RjlrjY23MA51q3HltdlGxUpXPvg0GioKiD9X6HD28iMjo2dKC8Vqwm7lne4GNr78+RHTfliktR6ZH09wA==",
+ "dev": true,
+ "requires": {
+ "type-fest": "^0.20.2"
+ }
+ },
+ "js-yaml": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
+ "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
+ "dev": true,
+ "requires": {
+ "argparse": "^2.0.1"
+ }
+ },
+ "type-fest": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz",
+ "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==",
+ "dev": true
+ }
+ }
+ },
+ "@eslint/js": {
+ "version": "8.52.0",
+ "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.52.0.tgz",
+ "integrity": "sha512-mjZVbpaeMZludF2fsWLD0Z9gCref1Tk4i9+wddjRvpUNqqcndPkBD09N/Mapey0b3jaXbLm2kICwFv2E64QinA==",
+ "dev": true
+ },
+ "@humanwhocodes/config-array": {
+ "version": "0.11.13",
+ "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.13.tgz",
+ "integrity": "sha512-JSBDMiDKSzQVngfRjOdFXgFfklaXI4K9nLF49Auh21lmBWRLIK3+xTErTWD4KU54pb6coM6ESE7Awz/FNU3zgQ==",
+ "dev": true,
+ "requires": {
+ "@humanwhocodes/object-schema": "^2.0.1",
+ "debug": "^4.1.1",
+ "minimatch": "^3.0.5"
+ }
+ },
+ "@humanwhocodes/module-importer": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz",
+ "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==",
+ "dev": true
+ },
+ "@humanwhocodes/object-schema": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.1.tgz",
+ "integrity": "sha512-dvuCeX5fC9dXgJn9t+X5atfmgQAzUOWqS1254Gh0m6i8wKd10ebXkfNKiRK+1GWi/yTvvLDHpoxLr0xxxeslWw==",
+ "dev": true
+ },
+ "@jridgewell/gen-mapping": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz",
+ "integrity": "sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==",
+ "dev": true,
+ "requires": {
+ "@jridgewell/set-array": "^1.0.1",
+ "@jridgewell/sourcemap-codec": "^1.4.10",
+ "@jridgewell/trace-mapping": "^0.3.9"
+ }
+ },
+ "@jridgewell/resolve-uri": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz",
+ "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==",
+ "dev": true
+ },
+ "@jridgewell/set-array": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz",
+ "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==",
+ "dev": true
+ },
+ "@jridgewell/sourcemap-codec": {
+ "version": "1.4.14",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz",
+ "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==",
+ "dev": true
+ },
+ "@jridgewell/trace-mapping": {
+ "version": "0.3.17",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.17.tgz",
+ "integrity": "sha512-MCNzAp77qzKca9+W/+I0+sEpaUnZoeasnghNeVc41VZCEKaCH73Vq3BZZ/SzWIgrqE4H4ceI+p+b6C0mHf9T4g==",
+ "dev": true,
+ "requires": {
+ "@jridgewell/resolve-uri": "3.1.0",
+ "@jridgewell/sourcemap-codec": "1.4.14"
+ }
+ },
+ "@nicolo-ribaudo/chokidar-2": {
+ "version": "2.1.8-no-fsevents.3",
+ "resolved": "https://registry.npmjs.org/@nicolo-ribaudo/chokidar-2/-/chokidar-2-2.1.8-no-fsevents.3.tgz",
+ "integrity": "sha512-s88O1aVtXftvp5bCPB7WnmXc5IwOZZ7YPuwNPt+GtOOXpPvad1LfbmjYv+qII7zP6RU2QGnqve27dnLycEnyEQ==",
+ "dev": true,
+ "optional": true
+ },
+ "@nodelib/fs.scandir": {
+ "version": "2.1.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
+ "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
+ "dev": true,
+ "requires": {
+ "@nodelib/fs.stat": "2.0.5",
+ "run-parallel": "^1.1.9"
+ }
+ },
+ "@nodelib/fs.stat": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
+ "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
+ "dev": true
+ },
+ "@nodelib/fs.walk": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
+ "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
+ "dev": true,
+ "requires": {
+ "@nodelib/fs.scandir": "2.1.5",
+ "fastq": "^1.6.0"
+ }
+ },
+ "@pkgr/utils": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/@pkgr/utils/-/utils-2.4.2.tgz",
+ "integrity": "sha512-POgTXhjrTfbTV63DiFXav4lBHiICLKKwDeaKn9Nphwj7WH6m0hMMCaJkMyRWjgtPFyRKRVoMXXjczsTQRDEhYw==",
+ "dev": true,
+ "requires": {
+ "cross-spawn": "^7.0.3",
+ "fast-glob": "^3.3.0",
+ "is-glob": "^4.0.3",
+ "open": "^9.1.0",
+ "picocolors": "^1.0.0",
+ "tslib": "^2.6.0"
+ }
+ },
+ "@tsconfig/node10": {
+ "version": "1.0.9",
+ "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.9.tgz",
+ "integrity": "sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==",
+ "dev": true
+ },
+ "@tsconfig/node12": {
+ "version": "1.0.11",
+ "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz",
+ "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==",
+ "dev": true
+ },
+ "@tsconfig/node14": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz",
+ "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==",
+ "dev": true
+ },
+ "@tsconfig/node16": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz",
+ "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==",
+ "dev": true
+ },
+ "@types/babel__core": {
+ "version": "7.20.3",
+ "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.3.tgz",
+ "integrity": "sha512-54fjTSeSHwfan8AyHWrKbfBWiEUrNTZsUwPTDSNaaP1QDQIZbeNUg3a59E9D+375MzUw/x1vx2/0F5LBz+AeYA==",
+ "dev": true,
+ "requires": {
+ "@babel/parser": "^7.20.7",
+ "@babel/types": "^7.20.7",
+ "@types/babel__generator": "*",
+ "@types/babel__template": "*",
+ "@types/babel__traverse": "*"
+ }
+ },
+ "@types/babel__generator": {
+ "version": "7.6.4",
+ "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.6.4.tgz",
+ "integrity": "sha512-tFkciB9j2K755yrTALxD44McOrk+gfpIpvC3sxHjRawj6PfnQxrse4Clq5y/Rq+G3mrBurMax/lG8Qn2t9mSsg==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "@types/babel__template": {
+ "version": "7.4.1",
+ "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.1.tgz",
+ "integrity": "sha512-azBFKemX6kMg5Io+/rdGT0dkGreboUVR0Cdm3fz9QJWpaQGJRQXl7C+6hOTCZcMll7KFyEQpgbYI2lHdsS4U7g==",
+ "dev": true,
+ "requires": {
+ "@babel/parser": "^7.1.0",
+ "@babel/types": "^7.0.0"
+ }
+ },
+ "@types/babel__traverse": {
+ "version": "7.14.2",
+ "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.14.2.tgz",
+ "integrity": "sha512-K2waXdXBi2302XUdcHcR1jCeU0LL4TD9HRs/gk0N2Xvrht+G/BfJa4QObBQZfhMdxiCpV3COl5Nfq4uKTeTnJA==",
+ "dev": true,
+ "requires": {
+ "@babel/types": "^7.3.0"
+ }
+ },
+ "@types/body-parser": {
+ "version": "1.19.2",
+ "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.2.tgz",
+ "integrity": "sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==",
+ "dev": true,
+ "requires": {
+ "@types/connect": "*",
+ "@types/node": "*"
+ }
+ },
+ "@types/connect": {
+ "version": "3.4.35",
+ "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz",
+ "integrity": "sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==",
+ "dev": true,
+ "requires": {
+ "@types/node": "*"
+ }
+ },
+ "@types/dom-mediacapture-transform": {
+ "version": "0.1.8",
+ "resolved": "https://registry.npmjs.org/@types/dom-mediacapture-transform/-/dom-mediacapture-transform-0.1.8.tgz",
+ "integrity": "sha512-L27i831nPYT82MVGweu3Uyw9ekWbEXq9NfHMQ4DWqbD6DYzswkgYxwRHOhU0KBIWEc76NP/PsX3R8sQMrL680Q==",
+ "dev": true,
+ "requires": {
+ "@types/dom-webcodecs": "*"
+ }
+ },
+ "@types/dom-webcodecs": {
+ "version": "0.1.9",
+ "resolved": "https://registry.npmjs.org/@types/dom-webcodecs/-/dom-webcodecs-0.1.9.tgz",
+ "integrity": "sha512-lOqlovxh4zB7p59rJwej8XG3uo0kv+hR+59Ky2MftcNS70ULWnWc6I2ZIM0xKcPFyvwU/DpRsTeFm8llayr5bA==",
+ "dev": true
+ },
+ "@types/express": {
+ "version": "4.17.20",
+ "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.20.tgz",
+ "integrity": "sha512-rOaqlkgEvOW495xErXMsmyX3WKBInbhG5eqojXYi3cGUaLoRDlXa5d52fkfWZT963AZ3v2eZ4MbKE6WpDAGVsw==",
+ "dev": true,
+ "requires": {
+ "@types/body-parser": "*",
+ "@types/express-serve-static-core": "^4.17.33",
+ "@types/qs": "*",
+ "@types/serve-static": "*"
+ }
+ },
+ "@types/express-serve-static-core": {
+ "version": "4.17.39",
+ "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.39.tgz",
+ "integrity": "sha512-BiEUfAiGCOllomsRAZOiMFP7LAnrifHpt56pc4Z7l9K6ACyN06Ns1JLMBxwkfLOjJRlSf06NwWsT7yzfpaVpyQ==",
+ "dev": true,
+ "requires": {
+ "@types/node": "*",
+ "@types/qs": "*",
+ "@types/range-parser": "*",
+ "@types/send": "*"
+ }
+ },
+ "@types/jquery": {
+ "version": "3.5.25",
+ "resolved": "https://registry.npmjs.org/@types/jquery/-/jquery-3.5.25.tgz",
+ "integrity": "sha512-gykx2c+OZf5nx2tv/5fDQqmvGgTiXshELy5jf9IgXPtVfSBl57IUYByN4osbwMXwJijWGOEYQABzGaFZE79A0Q==",
+ "dev": true,
+ "requires": {
+ "@types/sizzle": "*"
+ }
+ },
+ "@types/json-schema": {
+ "version": "7.0.14",
+ "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.14.tgz",
+ "integrity": "sha512-U3PUjAudAdJBeC2pgN8uTIKgxrb4nlDF3SF0++EldXQvQBGkpFZMSnwQiIoDU77tv45VgNkl/L4ouD+rEomujw==",
+ "dev": true
+ },
+ "@types/json5": {
+ "version": "0.0.29",
+ "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz",
+ "integrity": "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==",
+ "dev": true
+ },
+ "@types/mime": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.2.tgz",
+ "integrity": "sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw==",
+ "dev": true
+ },
+ "@types/minimist": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/@types/minimist/-/minimist-1.2.2.tgz",
+ "integrity": "sha512-jhuKLIRrhvCPLqwPcx6INqmKeiA5EWrsCOPhrlFSrbrmU4ZMPjj5Ul/oLCMDO98XRUIwVm78xICz4EPCektzeQ==",
+ "dev": true
+ },
+ "@types/morgan": {
+ "version": "1.9.7",
+ "resolved": "https://registry.npmjs.org/@types/morgan/-/morgan-1.9.7.tgz",
+ "integrity": "sha512-4sJFBUBrIZkP5EvMm1L6VCXp3SQe8dnXqlVpe1jsmTjS1JQVmSjnpMNs8DosQd6omBi/K7BSKJ6z/Mc3ki0K9g==",
+ "dev": true,
+ "requires": {
+ "@types/node": "*"
+ }
+ },
+ "@types/node": {
+ "version": "20.8.10",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.8.10.tgz",
+ "integrity": "sha512-TlgT8JntpcbmKUFzjhsyhGfP2fsiz1Mv56im6enJ905xG1DAYesxJaeSbGqQmAw8OWPdhyJGhGSQGKRNJ45u9w==",
+ "dev": true,
+ "requires": {
+ "undici-types": "~5.26.4"
+ }
+ },
+ "@types/normalize-package-data": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.1.tgz",
+ "integrity": "sha512-Gj7cI7z+98M282Tqmp2K5EIsoouUEzbBJhQQzDE3jSIRk6r9gsz0oUokqIUR4u1R3dMHo0pDHM7sNOHyhulypw==",
+ "dev": true
+ },
+ "@types/offscreencanvas": {
+ "version": "2019.7.2",
+ "resolved": "https://registry.npmjs.org/@types/offscreencanvas/-/offscreencanvas-2019.7.2.tgz",
+ "integrity": "sha512-ujCjOxeA07IbEBQYAkoOI+XFw5sT3nhWJ/xZfPR6reJppDG7iPQPZacQiLTtWH1b3a2NYXWlxvYqa40y/LAixQ==",
+ "dev": true
+ },
+ "@types/pngjs": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/@types/pngjs/-/pngjs-6.0.3.tgz",
+ "integrity": "sha512-F/WaGVKEZ1XYFlEtsWtqWm92vRfQdOqSSTBPj07BRDKnDtRhCw50DpwEQtrrDwEZUoAZAzv2FaalZiNV/54BoQ==",
+ "dev": true,
+ "requires": {
+ "@types/node": "*"
+ }
+ },
+ "@types/qs": {
+ "version": "6.9.9",
+ "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.9.tgz",
+ "integrity": "sha512-wYLxw35euwqGvTDx6zfY1vokBFnsK0HNrzc6xNHchxfO2hpuRg74GbkEW7e3sSmPvj0TjCDT1VCa6OtHXnubsg==",
+ "dev": true
+ },
+ "@types/range-parser": {
+ "version": "1.2.6",
+ "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.6.tgz",
+ "integrity": "sha512-+0autS93xyXizIYiyL02FCY8N+KkKPhILhcUSA276HxzreZ16kl+cmwvV2qAM/PuCCwPXzOXOWhiPcw20uSFcA==",
+ "dev": true
+ },
+ "@types/semver": {
+ "version": "7.5.4",
+ "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.4.tgz",
+ "integrity": "sha512-MMzuxN3GdFwskAnb6fz0orFvhfqi752yjaXylr0Rp4oDg5H0Zn1IuyRhDVvYOwAXoJirx2xuS16I3WjxnAIHiQ==",
+ "dev": true
+ },
+ "@types/send": {
+ "version": "0.17.3",
+ "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.3.tgz",
+ "integrity": "sha512-/7fKxvKUoETxjFUsuFlPB9YndePpxxRAOfGC/yJdc9kTjTeP5kRCTzfnE8kPUKCeyiyIZu0YQ76s50hCedI1ug==",
+ "dev": true,
+ "requires": {
+ "@types/mime": "^1",
+ "@types/node": "*"
+ }
+ },
+ "@types/serve-index": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.3.tgz",
+ "integrity": "sha512-4KG+yMEuvDPRrYq5fyVm/I2uqAJSAwZK9VSa+Zf+zUq9/oxSSvy3kkIqyL+jjStv6UCVi8/Aho0NHtB1Fwosrg==",
+ "dev": true,
+ "requires": {
+ "@types/express": "*"
+ }
+ },
+ "@types/serve-static": {
+ "version": "1.13.10",
+ "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.13.10.tgz",
+ "integrity": "sha512-nCkHGI4w7ZgAdNkrEu0bv+4xNV/XDqW+DydknebMOQwkpDGx8G+HTlj7R7ABI8i8nKxVw0wtKPi1D+lPOkh4YQ==",
+ "dev": true,
+ "requires": {
+ "@types/mime": "^1",
+ "@types/node": "*"
+ }
+ },
+ "@types/sizzle": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/@types/sizzle/-/sizzle-2.3.3.tgz",
+ "integrity": "sha512-JYM8x9EGF163bEyhdJBpR2QX1R5naCJHC8ucJylJ3w9/CVBaskdQ8WqBf8MmQrd1kRvp/a4TS8HJ+bxzR7ZJYQ==",
+ "dev": true
+ },
+ "@typescript-eslint/eslint-plugin": {
+ "version": "6.9.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.9.1.tgz",
+ "integrity": "sha512-w0tiiRc9I4S5XSXXrMHOWgHgxbrBn1Ro+PmiYhSg2ZVdxrAJtQgzU5o2m1BfP6UOn7Vxcc6152vFjQfmZR4xEg==",
+ "dev": true,
+ "requires": {
+ "@eslint-community/regexpp": "^4.5.1",
+ "@typescript-eslint/scope-manager": "6.9.1",
+ "@typescript-eslint/type-utils": "6.9.1",
+ "@typescript-eslint/utils": "6.9.1",
+ "@typescript-eslint/visitor-keys": "6.9.1",
+ "debug": "^4.3.4",
+ "graphemer": "^1.4.0",
+ "ignore": "^5.2.4",
+ "natural-compare": "^1.4.0",
+ "semver": "^7.5.4",
+ "ts-api-utils": "^1.0.1"
+ },
+ "dependencies": {
+ "@typescript-eslint/type-utils": {
+ "version": "6.9.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.9.1.tgz",
+ "integrity": "sha512-eh2oHaUKCK58qIeYp19F5V5TbpM52680sB4zNSz29VBQPTWIlE/hCj5P5B1AChxECe/fmZlspAWFuRniep1Skg==",
+ "dev": true,
+ "requires": {
+ "@typescript-eslint/typescript-estree": "6.9.1",
+ "@typescript-eslint/utils": "6.9.1",
+ "debug": "^4.3.4",
+ "ts-api-utils": "^1.0.1"
+ }
+ },
+ "semver": {
+ "version": "7.5.4",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
+ "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "dev": true,
+ "requires": {
+ "lru-cache": "^6.0.0"
+ }
+ }
+ }
+ },
+ "@typescript-eslint/parser": {
+ "version": "6.9.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.9.1.tgz",
+ "integrity": "sha512-C7AK2wn43GSaCUZ9do6Ksgi2g3mwFkMO3Cis96kzmgudoVaKyt62yNzJOktP0HDLb/iO2O0n2lBOzJgr6Q/cyg==",
+ "dev": true,
+ "requires": {
+ "@typescript-eslint/scope-manager": "6.9.1",
+ "@typescript-eslint/types": "6.9.1",
+ "@typescript-eslint/typescript-estree": "6.9.1",
+ "@typescript-eslint/visitor-keys": "6.9.1",
+ "debug": "^4.3.4"
+ }
+ },
+ "@typescript-eslint/scope-manager": {
+ "version": "6.9.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.9.1.tgz",
+ "integrity": "sha512-38IxvKB6NAne3g/+MyXMs2Cda/Sz+CEpmm+KLGEM8hx/CvnSRuw51i8ukfwB/B/sESdeTGet1NH1Wj7I0YXswg==",
+ "dev": true,
+ "requires": {
+ "@typescript-eslint/types": "6.9.1",
+ "@typescript-eslint/visitor-keys": "6.9.1"
+ }
+ },
+ "@typescript-eslint/type-utils": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.62.0.tgz",
+ "integrity": "sha512-xsSQreu+VnfbqQpW5vnCJdq1Z3Q0U31qiWmRhr98ONQmcp/yhiPJFPq8MXiJVLiksmOKSjIldZzkebzHuCGzew==",
+ "dev": true,
+ "requires": {
+ "@typescript-eslint/typescript-estree": "5.62.0",
+ "@typescript-eslint/utils": "5.62.0",
+ "debug": "^4.3.4",
+ "tsutils": "^3.21.0"
+ },
+ "dependencies": {
+ "@typescript-eslint/scope-manager": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.62.0.tgz",
+ "integrity": "sha512-VXuvVvZeQCQb5Zgf4HAxc04q5j+WrNAtNh9OwCsCgpKqESMTu3tF/jhZ3xG6T4NZwWl65Bg8KuS2uEvhSfLl0w==",
+ "dev": true,
+ "requires": {
+ "@typescript-eslint/types": "5.62.0",
+ "@typescript-eslint/visitor-keys": "5.62.0"
+ }
+ },
+ "@typescript-eslint/types": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.62.0.tgz",
+ "integrity": "sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ==",
+ "dev": true
+ },
+ "@typescript-eslint/typescript-estree": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.62.0.tgz",
+ "integrity": "sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==",
+ "dev": true,
+ "requires": {
+ "@typescript-eslint/types": "5.62.0",
+ "@typescript-eslint/visitor-keys": "5.62.0",
+ "debug": "^4.3.4",
+ "globby": "^11.1.0",
+ "is-glob": "^4.0.3",
+ "semver": "^7.3.7",
+ "tsutils": "^3.21.0"
+ }
+ },
+ "@typescript-eslint/utils": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.62.0.tgz",
+ "integrity": "sha512-n8oxjeb5aIbPFEtmQxQYOLI0i9n5ySBEY/ZEHHZqKQSFnxio1rv6dthascc9dLuwrL0RC5mPCxB7vnAVGAYWAQ==",
+ "dev": true,
+ "requires": {
+ "@eslint-community/eslint-utils": "^4.2.0",
+ "@types/json-schema": "^7.0.9",
+ "@types/semver": "^7.3.12",
+ "@typescript-eslint/scope-manager": "5.62.0",
+ "@typescript-eslint/types": "5.62.0",
+ "@typescript-eslint/typescript-estree": "5.62.0",
+ "eslint-scope": "^5.1.1",
+ "semver": "^7.3.7"
+ }
+ },
+ "@typescript-eslint/visitor-keys": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.62.0.tgz",
+ "integrity": "sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw==",
+ "dev": true,
+ "requires": {
+ "@typescript-eslint/types": "5.62.0",
+ "eslint-visitor-keys": "^3.3.0"
+ }
+ },
+ "eslint-scope": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz",
+ "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==",
+ "dev": true,
+ "requires": {
+ "esrecurse": "^4.3.0",
+ "estraverse": "^4.1.1"
+ }
+ },
+ "estraverse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz",
+ "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==",
+ "dev": true
+ },
+ "semver": {
+ "version": "7.5.4",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
+ "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "dev": true,
+ "requires": {
+ "lru-cache": "^6.0.0"
+ }
+ }
+ }
+ },
+ "@typescript-eslint/types": {
+ "version": "6.9.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.9.1.tgz",
+ "integrity": "sha512-BUGslGOb14zUHOUmDB2FfT6SI1CcZEJYfF3qFwBeUrU6srJfzANonwRYHDpLBuzbq3HaoF2XL2hcr01c8f8OaQ==",
+ "dev": true
+ },
+ "@typescript-eslint/typescript-estree": {
+ "version": "6.9.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.9.1.tgz",
+ "integrity": "sha512-U+mUylTHfcqeO7mLWVQ5W/tMLXqVpRv61wm9ZtfE5egz7gtnmqVIw9ryh0mgIlkKk9rZLY3UHygsBSdB9/ftyw==",
+ "dev": true,
+ "requires": {
+ "@typescript-eslint/types": "6.9.1",
+ "@typescript-eslint/visitor-keys": "6.9.1",
+ "debug": "^4.3.4",
+ "globby": "^11.1.0",
+ "is-glob": "^4.0.3",
+ "semver": "^7.5.4",
+ "ts-api-utils": "^1.0.1"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "7.5.4",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
+ "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "dev": true,
+ "requires": {
+ "lru-cache": "^6.0.0"
+ }
+ }
+ }
+ },
+ "@typescript-eslint/utils": {
+ "version": "6.9.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.9.1.tgz",
+ "integrity": "sha512-L1T0A5nFdQrMVunpZgzqPL6y2wVreSyHhKGZryS6jrEN7bD9NplVAyMryUhXsQ4TWLnZmxc2ekar/lSGIlprCA==",
+ "dev": true,
+ "requires": {
+ "@eslint-community/eslint-utils": "^4.4.0",
+ "@types/json-schema": "^7.0.12",
+ "@types/semver": "^7.5.0",
+ "@typescript-eslint/scope-manager": "6.9.1",
+ "@typescript-eslint/types": "6.9.1",
+ "@typescript-eslint/typescript-estree": "6.9.1",
+ "semver": "^7.5.4"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "7.5.4",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
+ "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "dev": true,
+ "requires": {
+ "lru-cache": "^6.0.0"
+ }
+ }
+ }
+ },
+ "@typescript-eslint/visitor-keys": {
+ "version": "6.9.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.9.1.tgz",
+ "integrity": "sha512-MUaPUe/QRLEffARsmNfmpghuQkW436DvESW+h+M52w0coICHRfD6Np9/K6PdACwnrq1HmuLl+cSPZaJmeVPkSw==",
+ "dev": true,
+ "requires": {
+ "@typescript-eslint/types": "6.9.1",
+ "eslint-visitor-keys": "^3.4.1"
+ }
+ },
+ "@ungap/structured-clone": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz",
+ "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==",
+ "dev": true
+ },
+ "@webgpu/types": {
+ "version": "0.1.38",
+ "resolved": "https://registry.npmjs.org/@webgpu/types/-/types-0.1.38.tgz",
+ "integrity": "sha512-7LrhVKz2PRh+DD7+S+PVaFd5HxaWQvoMqBbsV9fNJO1pjUs1P8bM2vQVNfk+3URTqbuTI7gkXi0rfsN0IadoBA==",
+ "dev": true
+ },
+ "abbrev": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.1.1.tgz",
+ "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==",
+ "dev": true
+ },
+ "accepts": {
+ "version": "1.3.8",
+ "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz",
+ "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==",
+ "dev": true,
+ "requires": {
+ "mime-types": "~2.1.34",
+ "negotiator": "0.6.3"
+ }
+ },
+ "acorn": {
+ "version": "8.11.2",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.2.tgz",
+ "integrity": "sha512-nc0Axzp/0FILLEVsm4fNwLCwMttvhEI263QtVPQcbpfZZ3ts0hLsZGOpE6czNlid7CJ9MlyH8reXkpsf3YUY4w==",
+ "dev": true
+ },
+ "acorn-jsx": {
+ "version": "5.3.2",
+ "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz",
+ "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==",
+ "dev": true,
+ "requires": {}
+ },
+ "acorn-walk": {
+ "version": "8.3.0",
+ "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.0.tgz",
+ "integrity": "sha512-FS7hV565M5l1R08MXqo8odwMTB02C2UqzB17RVgu9EyuYFBqJZ3/ZY97sQD5FewVu1UyDFc1yztUDrAwT0EypA==",
+ "dev": true
+ },
+ "ajv": {
+ "version": "6.12.6",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
+ "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
+ "dev": true,
+ "requires": {
+ "fast-deep-equal": "^3.1.1",
+ "fast-json-stable-stringify": "^2.0.0",
+ "json-schema-traverse": "^0.4.1",
+ "uri-js": "^4.2.2"
+ }
+ },
+ "ansi-colors": {
+ "version": "4.1.3",
+ "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.3.tgz",
+ "integrity": "sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==",
+ "dev": true
+ },
+ "ansi-escapes": {
+ "version": "4.3.2",
+ "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz",
+ "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==",
+ "dev": true,
+ "requires": {
+ "type-fest": "^0.21.3"
+ }
+ },
+ "ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+ "dev": true
+ },
+ "ansi-sequence-parser": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/ansi-sequence-parser/-/ansi-sequence-parser-1.1.1.tgz",
+ "integrity": "sha512-vJXt3yiaUL4UU546s3rPXlsry/RnM730G1+HkpKE012AN0sx1eOrxSu95oKDIonskeLTijMgqWZ3uDEe3NFvyg==",
+ "dev": true
+ },
+ "ansi-styles": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+ "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+ "dev": true,
+ "requires": {
+ "color-convert": "^1.9.0"
+ }
+ },
+ "anymatch": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.2.tgz",
+ "integrity": "sha512-P43ePfOAIupkguHUycrc4qJ9kz8ZiuOUijaETwX7THt0Y/GNK7v0aa8rY816xWjZ7rJdA5XdMcpVFTKMq+RvWg==",
+ "dev": true,
+ "requires": {
+ "normalize-path": "^3.0.0",
+ "picomatch": "^2.0.4"
+ }
+ },
+ "arg": {
+ "version": "4.1.3",
+ "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz",
+ "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==",
+ "dev": true
+ },
+ "argparse": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+ "dev": true,
+ "requires": {
+ "sprintf-js": "~1.0.2"
+ }
+ },
+ "arr-diff": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz",
+ "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=",
+ "dev": true
+ },
+ "arr-flatten": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz",
+ "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==",
+ "dev": true
+ },
+ "arr-union": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz",
+ "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=",
+ "dev": true
+ },
+ "array-buffer-byte-length": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.0.tgz",
+ "integrity": "sha512-LPuwb2P+NrQw3XhxGc36+XSvuBPopovXYTR9Ew++Du9Yb/bx5AzBfrIsBoj0EZUifjQU+sHL21sseZ3jerWO/A==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "is-array-buffer": "^3.0.1"
+ }
+ },
+ "array-each": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/array-each/-/array-each-1.0.1.tgz",
+ "integrity": "sha1-p5SvDAWrF1KEbudTofIRoFugxE8=",
+ "dev": true
+ },
+ "array-flatten": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
+ "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=",
+ "dev": true
+ },
+ "array-includes": {
+ "version": "3.1.7",
+ "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.7.tgz",
+ "integrity": "sha512-dlcsNBIiWhPkHdOEEKnehA+RNUWDc4UqFtnIXU4uuYDPtA4LDkr7qip2p0VvFAEXNDr0yWZ9PJyIRiGjRLQzwQ==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1",
+ "get-intrinsic": "^1.2.1",
+ "is-string": "^1.0.7"
+ }
+ },
+ "array-slice": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/array-slice/-/array-slice-1.1.0.tgz",
+ "integrity": "sha512-B1qMD3RBP7O8o0H2KbrXDyB0IccejMF15+87Lvlor12ONPRHP6gTjXMNkt/d3ZuOGbAe66hFmaCfECI24Ufp6w==",
+ "dev": true
+ },
+ "array-union": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz",
+ "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==",
+ "dev": true
+ },
+ "array-unique": {
+ "version": "0.3.2",
+ "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz",
+ "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=",
+ "dev": true
+ },
+ "array.prototype.findlastindex": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.3.tgz",
+ "integrity": "sha512-LzLoiOMAxvy+Gd3BAq3B7VeIgPdo+Q8hthvKtXybMvRV0jrXfJM/t8mw7nNlpEcVlVUnCnM2KSX4XU5HmpodOA==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1",
+ "es-shim-unscopables": "^1.0.0",
+ "get-intrinsic": "^1.2.1"
+ }
+ },
+ "array.prototype.flat": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.2.tgz",
+ "integrity": "sha512-djYB+Zx2vLewY8RWlNCUdHjDXs2XOgm602S9E7P/UpHgfeHL00cRiIF+IN/G/aUJ7kGPb6yO/ErDI5V2s8iycA==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1",
+ "es-shim-unscopables": "^1.0.0"
+ }
+ },
+ "array.prototype.flatmap": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.2.tgz",
+ "integrity": "sha512-Ewyx0c9PmpcsByhSW4r+9zDU7sGjFc86qf/kKtuSCRdhfbk0SNLLkaT5qvcHnRGgc5NP/ly/y+qkXkqONX54CQ==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1",
+ "es-shim-unscopables": "^1.0.0"
+ }
+ },
+ "arraybuffer.prototype.slice": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.2.tgz",
+ "integrity": "sha512-yMBKppFur/fbHu9/6USUe03bZ4knMYiwFBcyiaXB8Go0qNehwX6inYPzK9U0NeQvGxKthcmHcaR8P5MStSRBAw==",
+ "dev": true,
+ "requires": {
+ "array-buffer-byte-length": "^1.0.0",
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1",
+ "get-intrinsic": "^1.2.1",
+ "is-array-buffer": "^3.0.2",
+ "is-shared-array-buffer": "^1.0.2"
+ }
+ },
+ "arrify": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz",
+ "integrity": "sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=",
+ "dev": true
+ },
+ "assign-symbols": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz",
+ "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=",
+ "dev": true
+ },
+ "async": {
+ "version": "3.2.4",
+ "resolved": "https://registry.npmjs.org/async/-/async-3.2.4.tgz",
+ "integrity": "sha512-iAB+JbDEGXhyIUavoDl9WP/Jj106Kz9DEn1DPgYw5ruDn0e3Wgi3sKFm55sASdGBNOQB8F59d9qQ7deqrHA8wQ==",
+ "dev": true
+ },
+ "async-each": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/async-each/-/async-each-1.0.3.tgz",
+ "integrity": "sha512-z/WhQ5FPySLdvREByI2vZiTWwCnF0moMJ1hK9YQwDTHKh6I7/uSckMetoRGb5UBZPC1z0jlw+n/XCgjeH7y1AQ==",
+ "dev": true
+ },
+ "atob": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz",
+ "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==",
+ "dev": true
+ },
+ "available-typed-arrays": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz",
+ "integrity": "sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==",
+ "dev": true
+ },
+ "babel-plugin-add-header-comment": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/babel-plugin-add-header-comment/-/babel-plugin-add-header-comment-1.0.3.tgz",
+ "integrity": "sha1-URxJAQYmQNWkgLSsPt1pRBlYUOw=",
+ "dev": true
+ },
+ "babel-plugin-const-enum": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/babel-plugin-const-enum/-/babel-plugin-const-enum-1.2.0.tgz",
+ "integrity": "sha512-o1m/6iyyFnp9MRsK1dHF3bneqyf3AlM2q3A/YbgQr2pCat6B6XJVDv2TXqzfY2RYUi4mak6WAksSBPlyYGx9dg==",
+ "dev": true,
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.0.0",
+ "@babel/plugin-syntax-typescript": "^7.3.3",
+ "@babel/traverse": "^7.16.0"
+ }
+ },
+ "balanced-match": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
+ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
+ "dev": true
+ },
+ "base": {
+ "version": "0.11.2",
+ "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz",
+ "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==",
+ "dev": true,
+ "requires": {
+ "cache-base": "^1.0.1",
+ "class-utils": "^0.3.5",
+ "component-emitter": "^1.2.1",
+ "define-property": "^1.0.0",
+ "isobject": "^3.0.1",
+ "mixin-deep": "^1.2.0",
+ "pascalcase": "^0.1.1"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+ "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^1.0.0"
+ }
+ },
+ "is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "requires": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ }
+ }
+ }
+ },
+ "bash-color": {
+ "version": "0.0.3",
+ "resolved": "https://registry.npmjs.org/bash-color/-/bash-color-0.0.3.tgz",
+ "integrity": "sha512-y0MC9pb/jBCn/1FzRm8/BZqOYrk0vBnW4pNkmAmLwLSA/rA9Wd3u0CYQnB8y5lW6VDf+Sf2kScntAUUS0KDGxw==",
+ "dev": true
+ },
+ "basic-auth": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/basic-auth/-/basic-auth-2.0.1.tgz",
+ "integrity": "sha512-NF+epuEdnUYVlGuhaxbbq+dvJttwLnGY+YixlXlME5KpQ5W3CnXA5cVTneY3SPbPDRkcjMbifrwmFYcClgOZeg==",
+ "dev": true,
+ "requires": {
+ "safe-buffer": "5.1.2"
+ }
+ },
+ "batch": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz",
+ "integrity": "sha1-3DQxT05nkxgJP8dgJyUl+UvyXBY=",
+ "dev": true
+ },
+ "big-integer": {
+ "version": "1.6.51",
+ "resolved": "https://registry.npmjs.org/big-integer/-/big-integer-1.6.51.tgz",
+ "integrity": "sha512-GPEid2Y9QU1Exl1rpO9B2IPJGHPSupF5GnVIP0blYvNOMer2bTvSWs1jGOUg04hTmu67nmLsQ9TBo1puaotBHg==",
+ "dev": true
+ },
+ "binary-extensions": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz",
+ "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==",
+ "dev": true
+ },
+ "bindings": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz",
+ "integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==",
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "file-uri-to-path": "1.0.0"
+ }
+ },
+ "body-parser": {
+ "version": "1.20.1",
+ "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz",
+ "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==",
+ "dev": true,
+ "requires": {
+ "bytes": "3.1.2",
+ "content-type": "~1.0.4",
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
+ "http-errors": "2.0.0",
+ "iconv-lite": "0.4.24",
+ "on-finished": "2.4.1",
+ "qs": "6.11.0",
+ "raw-body": "2.5.1",
+ "type-is": "~1.6.18",
+ "unpipe": "1.0.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "depd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
+ "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
+ "dev": true
+ },
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
+ "dev": true
+ },
+ "on-finished": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
+ "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
+ "dev": true,
+ "requires": {
+ "ee-first": "1.1.1"
+ }
+ }
+ }
+ },
+ "bplist-parser": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/bplist-parser/-/bplist-parser-0.2.0.tgz",
+ "integrity": "sha512-z0M+byMThzQmD9NILRniCUXYsYpjwnlO8N5uCFaCqIOpqRsJCrQL9NK3JsD67CN5a08nF5oIL2bD6loTdHOuKw==",
+ "dev": true,
+ "requires": {
+ "big-integer": "^1.6.44"
+ }
+ },
+ "brace-expansion": {
+ "version": "1.1.11",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
+ "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "dev": true,
+ "requires": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "braces": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
+ "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
+ "dev": true,
+ "requires": {
+ "fill-range": "^7.0.1"
+ }
+ },
+ "browserslist": {
+ "version": "4.22.1",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.22.1.tgz",
+ "integrity": "sha512-FEVc202+2iuClEhZhrWy6ZiAcRLvNMyYcxZ8raemul1DYVOVdFsbqckWLdsixQZCpJlwe77Z3UTalE7jsjnKfQ==",
+ "dev": true,
+ "requires": {
+ "caniuse-lite": "^1.0.30001541",
+ "electron-to-chromium": "^1.4.535",
+ "node-releases": "^2.0.13",
+ "update-browserslist-db": "^1.0.13"
+ }
+ },
+ "bundle-name": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/bundle-name/-/bundle-name-3.0.0.tgz",
+ "integrity": "sha512-PKA4BeSvBpQKQ8iPOGCSiell+N8P+Tf1DlwqmYhpe2gAhKPHn8EYOxVT+ShuGmhg8lN8XiSlS80yiExKXrURlw==",
+ "dev": true,
+ "requires": {
+ "run-applescript": "^5.0.0"
+ }
+ },
+ "bytes": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
+ "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
+ "dev": true
+ },
+ "cache-base": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz",
+ "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==",
+ "dev": true,
+ "requires": {
+ "collection-visit": "^1.0.0",
+ "component-emitter": "^1.2.1",
+ "get-value": "^2.0.6",
+ "has-value": "^1.0.0",
+ "isobject": "^3.0.1",
+ "set-value": "^2.0.0",
+ "to-object-path": "^0.3.0",
+ "union-value": "^1.0.0",
+ "unset-value": "^1.0.0"
+ }
+ },
+ "call-bind": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.5.tgz",
+ "integrity": "sha512-C3nQxfFZxFRVoJoGKKI8y3MOEo129NQ+FgQ08iye+Mk4zNZZGdjfs06bVTr+DBSlA66Q2VEcMki/cUCP4SercQ==",
+ "dev": true,
+ "requires": {
+ "function-bind": "^1.1.2",
+ "get-intrinsic": "^1.2.1",
+ "set-function-length": "^1.1.1"
+ }
+ },
+ "callsites": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
+ "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
+ "dev": true
+ },
+ "camelcase": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz",
+ "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==",
+ "dev": true
+ },
+ "camelcase-keys": {
+ "version": "6.2.2",
+ "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-6.2.2.tgz",
+ "integrity": "sha512-YrwaA0vEKazPBkn0ipTiMpSajYDSe+KjQfrjhcBMxJt/znbvlHd8Pw/Vamaz5EB4Wfhs3SUR3Z9mwRu/P3s3Yg==",
+ "dev": true,
+ "requires": {
+ "camelcase": "^5.3.1",
+ "map-obj": "^4.0.0",
+ "quick-lru": "^4.0.1"
+ }
+ },
+ "caniuse-lite": {
+ "version": "1.0.30001559",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001559.tgz",
+ "integrity": "sha512-cPiMKZgqgkg5LY3/ntGeLFUpi6tzddBNS58A4tnTgQw1zON7u2sZMU7SzOeVH4tj20++9ggL+V6FDOFMTaFFYA==",
+ "dev": true
+ },
+ "chalk": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
+ "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
+ "dev": true,
+ "requires": {
+ "ansi-styles": "^3.2.1",
+ "escape-string-regexp": "^1.0.5",
+ "supports-color": "^5.3.0"
+ }
+ },
+ "chardet": {
+ "version": "0.7.0",
+ "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz",
+ "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==",
+ "dev": true
+ },
+ "chokidar": {
+ "version": "3.5.3",
+ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz",
+ "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==",
+ "dev": true,
+ "requires": {
+ "anymatch": "~3.1.2",
+ "braces": "~3.0.2",
+ "fsevents": "~2.3.2",
+ "glob-parent": "~5.1.2",
+ "is-binary-path": "~2.1.0",
+ "is-glob": "~4.0.1",
+ "normalize-path": "~3.0.0",
+ "readdirp": "~3.6.0"
+ }
+ },
+ "class-utils": {
+ "version": "0.3.6",
+ "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz",
+ "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==",
+ "dev": true,
+ "requires": {
+ "arr-union": "^3.1.0",
+ "define-property": "^0.2.5",
+ "isobject": "^3.0.0",
+ "static-extend": "^0.1.1"
+ }
+ },
+ "cli-cursor": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz",
+ "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==",
+ "dev": true,
+ "requires": {
+ "restore-cursor": "^3.1.0"
+ }
+ },
+ "cli-width": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-3.0.0.tgz",
+ "integrity": "sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw==",
+ "dev": true
+ },
+ "collection-visit": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz",
+ "integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=",
+ "dev": true,
+ "requires": {
+ "map-visit": "^1.0.0",
+ "object-visit": "^1.0.0"
+ }
+ },
+ "color-convert": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
+ "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
+ "dev": true,
+ "requires": {
+ "color-name": "1.1.3"
+ }
+ },
+ "color-name": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+ "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
+ "dev": true
+ },
+ "colors": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/colors/-/colors-1.1.2.tgz",
+ "integrity": "sha1-FopHAXVran9RoSzgyXv6KMCE7WM=",
+ "dev": true
+ },
+ "component-emitter": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.3.0.tgz",
+ "integrity": "sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg==",
+ "dev": true
+ },
+ "concat-map": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+ "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=",
+ "dev": true
+ },
+ "content-disposition": {
+ "version": "0.5.4",
+ "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz",
+ "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==",
+ "dev": true,
+ "requires": {
+ "safe-buffer": "5.2.1"
+ },
+ "dependencies": {
+ "safe-buffer": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
+ "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
+ "dev": true
+ }
+ }
+ },
+ "content-type": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
+ "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==",
+ "dev": true
+ },
+ "convert-source-map": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
+ "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
+ "dev": true
+ },
+ "cookie": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz",
+ "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==",
+ "dev": true
+ },
+ "cookie-signature": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
+ "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw=",
+ "dev": true
+ },
+ "copy-descriptor": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz",
+ "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=",
+ "dev": true
+ },
+ "core-util-is": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz",
+ "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==",
+ "dev": true
+ },
+ "corser": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/corser/-/corser-2.0.1.tgz",
+ "integrity": "sha1-jtolLsqrWEDc2XXOuQ2TcMgZ/4c=",
+ "dev": true
+ },
+ "create-require": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz",
+ "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==",
+ "dev": true
+ },
+ "cross-spawn": {
+ "version": "7.0.3",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
+ "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
+ "dev": true,
+ "requires": {
+ "path-key": "^3.1.0",
+ "shebang-command": "^2.0.0",
+ "which": "^2.0.1"
+ }
+ },
+ "csproj2ts": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/csproj2ts/-/csproj2ts-1.1.0.tgz",
+ "integrity": "sha512-sk0RTT51t4lUNQ7UfZrqjQx7q4g0m3iwNA6mvyh7gLsgQYvwKzfdyoAgicC9GqJvkoIkU0UmndV9c7VZ8pJ45Q==",
+ "dev": true,
+ "requires": {
+ "es6-promise": "^4.1.1",
+ "lodash": "^4.17.4",
+ "semver": "^5.4.1",
+ "xml2js": "^0.4.19"
+ },
+ "dependencies": {
+ "es6-promise": {
+ "version": "4.2.8",
+ "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz",
+ "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==",
+ "dev": true
+ },
+ "semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
+ "dev": true
+ }
+ }
+ },
+ "d": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/d/-/d-1.0.1.tgz",
+ "integrity": "sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==",
+ "dev": true,
+ "requires": {
+ "es5-ext": "^0.10.50",
+ "type": "^1.0.1"
+ }
+ },
+ "dateformat": {
+ "version": "4.6.3",
+ "resolved": "https://registry.npmjs.org/dateformat/-/dateformat-4.6.3.tgz",
+ "integrity": "sha512-2P0p0pFGzHS5EMnhdxQi7aJN+iMheud0UhG4dlE1DLAlvL8JHjJJTX/CSm4JXwV0Ka5nGk3zC5mcb5bUQUxxMA==",
+ "dev": true
+ },
+ "debug": {
+ "version": "4.3.4",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
+ "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
+ "dev": true,
+ "requires": {
+ "ms": "2.1.2"
+ }
+ },
+ "decamelize": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz",
+ "integrity": "sha1-9lNNFRSCabIDUue+4m9QH5oZEpA=",
+ "dev": true
+ },
+ "decamelize-keys": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/decamelize-keys/-/decamelize-keys-1.1.0.tgz",
+ "integrity": "sha1-0XGoeTMlKAfrPLYdwcFEXQeN8tk=",
+ "dev": true,
+ "requires": {
+ "decamelize": "^1.1.0",
+ "map-obj": "^1.0.0"
+ },
+ "dependencies": {
+ "map-obj": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz",
+ "integrity": "sha1-2TPOuSBdgr3PSIb2dCvcK03qFG0=",
+ "dev": true
+ }
+ }
+ },
+ "decode-uri-component": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz",
+ "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=",
+ "dev": true
+ },
+ "deep-is": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz",
+ "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==",
+ "dev": true
+ },
+ "default-browser": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/default-browser/-/default-browser-4.0.0.tgz",
+ "integrity": "sha512-wX5pXO1+BrhMkSbROFsyxUm0i/cJEScyNhA4PPxc41ICuv05ZZB/MX28s8aZx6xjmatvebIapF6hLEKEcpneUA==",
+ "dev": true,
+ "requires": {
+ "bundle-name": "^3.0.0",
+ "default-browser-id": "^3.0.0",
+ "execa": "^7.1.1",
+ "titleize": "^3.0.0"
+ },
+ "dependencies": {
+ "execa": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/execa/-/execa-7.2.0.tgz",
+ "integrity": "sha512-UduyVP7TLB5IcAQl+OzLyLcS/l32W/GLg+AhHJ+ow40FOk2U3SAllPwR44v4vmdFwIWqpdwxxpQbF1n5ta9seA==",
+ "dev": true,
+ "requires": {
+ "cross-spawn": "^7.0.3",
+ "get-stream": "^6.0.1",
+ "human-signals": "^4.3.0",
+ "is-stream": "^3.0.0",
+ "merge-stream": "^2.0.0",
+ "npm-run-path": "^5.1.0",
+ "onetime": "^6.0.0",
+ "signal-exit": "^3.0.7",
+ "strip-final-newline": "^3.0.0"
+ }
+ },
+ "human-signals": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-4.3.1.tgz",
+ "integrity": "sha512-nZXjEF2nbo7lIw3mgYjItAfgQXog3OjJogSbKa2CQIIvSGWcKgeJnQlNXip6NglNzYH45nSRiEVimMvYL8DDqQ==",
+ "dev": true
+ },
+ "is-stream": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz",
+ "integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==",
+ "dev": true
+ },
+ "mimic-fn": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz",
+ "integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==",
+ "dev": true
+ },
+ "npm-run-path": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.1.0.tgz",
+ "integrity": "sha512-sJOdmRGrY2sjNTRMbSvluQqg+8X7ZK61yvzBEIDhz4f8z1TZFYABsqjjCBd/0PUNE9M6QDgHJXQkGUEm7Q+l9Q==",
+ "dev": true,
+ "requires": {
+ "path-key": "^4.0.0"
+ }
+ },
+ "onetime": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz",
+ "integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==",
+ "dev": true,
+ "requires": {
+ "mimic-fn": "^4.0.0"
+ }
+ },
+ "path-key": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz",
+ "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==",
+ "dev": true
+ },
+ "strip-final-newline": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz",
+ "integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==",
+ "dev": true
+ }
+ }
+ },
+ "default-browser-id": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/default-browser-id/-/default-browser-id-3.0.0.tgz",
+ "integrity": "sha512-OZ1y3y0SqSICtE8DE4S8YOE9UZOJ8wO16fKWVP5J1Qz42kV9jcnMVFrEE/noXb/ss3Q4pZIH79kxofzyNNtUNA==",
+ "dev": true,
+ "requires": {
+ "bplist-parser": "^0.2.0",
+ "untildify": "^4.0.0"
+ }
+ },
+ "define-data-property": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.1.tgz",
+ "integrity": "sha512-E7uGkTzkk1d0ByLeSc6ZsFS79Axg+m1P/VsgYsxHgiuc3tFSj+MjMIwe90FC4lOAZzNBdY7kkO2P2wKdsQ1vgQ==",
+ "dev": true,
+ "requires": {
+ "get-intrinsic": "^1.2.1",
+ "gopd": "^1.0.1",
+ "has-property-descriptors": "^1.0.0"
+ }
+ },
+ "define-lazy-prop": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-3.0.0.tgz",
+ "integrity": "sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==",
+ "dev": true
+ },
+ "define-properties": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz",
+ "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==",
+ "dev": true,
+ "requires": {
+ "define-data-property": "^1.0.1",
+ "has-property-descriptors": "^1.0.0",
+ "object-keys": "^1.1.1"
+ }
+ },
+ "define-property": {
+ "version": "0.2.5",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+ "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^0.1.0"
+ }
+ },
+ "depd": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
+ "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=",
+ "dev": true
+ },
+ "destroy": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz",
+ "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==",
+ "dev": true
+ },
+ "detect-file": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/detect-file/-/detect-file-1.0.0.tgz",
+ "integrity": "sha1-8NZtA2cqglyxtzvbP+YjEMjlUrc=",
+ "dev": true
+ },
+ "detect-indent": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/detect-indent/-/detect-indent-4.0.0.tgz",
+ "integrity": "sha1-920GQ1LN9Docts5hnE7jqUdd4gg=",
+ "dev": true,
+ "requires": {
+ "repeating": "^2.0.0"
+ }
+ },
+ "detect-newline": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/detect-newline/-/detect-newline-2.1.0.tgz",
+ "integrity": "sha1-9B8cEL5LAOh7XxPaaAdZ8sW/0+I=",
+ "dev": true
+ },
+ "diff": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz",
+ "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==",
+ "dev": true
+ },
+ "dir-glob": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz",
+ "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==",
+ "dev": true,
+ "requires": {
+ "path-type": "^4.0.0"
+ }
+ },
+ "doctrine": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz",
+ "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==",
+ "dev": true,
+ "requires": {
+ "esutils": "^2.0.2"
+ }
+ },
+ "duration": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/duration/-/duration-0.2.2.tgz",
+ "integrity": "sha512-06kgtea+bGreF5eKYgI/36A6pLXggY7oR4p1pq4SmdFBn1ReOL5D8RhG64VrqfTTKNucqqtBAwEj8aB88mcqrg==",
+ "dev": true,
+ "requires": {
+ "d": "1",
+ "es5-ext": "~0.10.46"
+ }
+ },
+ "ee-first": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
+ "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=",
+ "dev": true
+ },
+ "electron-to-chromium": {
+ "version": "1.4.573",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.573.tgz",
+ "integrity": "sha512-tzxxvKDTO3V5vzN2F+3v9jrK9gEbCdf1YYJUx/zVq1cyzyh+x1ddeYNNWh0ZS2ETNCVK3+Pns1LHIBq4w20X2Q==",
+ "dev": true
+ },
+ "emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
+ "dev": true
+ },
+ "encodeurl": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
+ "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==",
+ "dev": true
+ },
+ "error-ex": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz",
+ "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==",
+ "dev": true,
+ "requires": {
+ "is-arrayish": "^0.2.1"
+ }
+ },
+ "es-abstract": {
+ "version": "1.22.3",
+ "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.22.3.tgz",
+ "integrity": "sha512-eiiY8HQeYfYH2Con2berK+To6GrK2RxbPawDkGq4UiCQQfZHb6wX9qQqkbpPqaxQFcl8d9QzZqo0tGE0VcrdwA==",
+ "dev": true,
+ "requires": {
+ "array-buffer-byte-length": "^1.0.0",
+ "arraybuffer.prototype.slice": "^1.0.2",
+ "available-typed-arrays": "^1.0.5",
+ "call-bind": "^1.0.5",
+ "es-set-tostringtag": "^2.0.1",
+ "es-to-primitive": "^1.2.1",
+ "function.prototype.name": "^1.1.6",
+ "get-intrinsic": "^1.2.2",
+ "get-symbol-description": "^1.0.0",
+ "globalthis": "^1.0.3",
+ "gopd": "^1.0.1",
+ "has-property-descriptors": "^1.0.0",
+ "has-proto": "^1.0.1",
+ "has-symbols": "^1.0.3",
+ "hasown": "^2.0.0",
+ "internal-slot": "^1.0.5",
+ "is-array-buffer": "^3.0.2",
+ "is-callable": "^1.2.7",
+ "is-negative-zero": "^2.0.2",
+ "is-regex": "^1.1.4",
+ "is-shared-array-buffer": "^1.0.2",
+ "is-string": "^1.0.7",
+ "is-typed-array": "^1.1.12",
+ "is-weakref": "^1.0.2",
+ "object-inspect": "^1.13.1",
+ "object-keys": "^1.1.1",
+ "object.assign": "^4.1.4",
+ "regexp.prototype.flags": "^1.5.1",
+ "safe-array-concat": "^1.0.1",
+ "safe-regex-test": "^1.0.0",
+ "string.prototype.trim": "^1.2.8",
+ "string.prototype.trimend": "^1.0.7",
+ "string.prototype.trimstart": "^1.0.7",
+ "typed-array-buffer": "^1.0.0",
+ "typed-array-byte-length": "^1.0.0",
+ "typed-array-byte-offset": "^1.0.0",
+ "typed-array-length": "^1.0.4",
+ "unbox-primitive": "^1.0.2",
+ "which-typed-array": "^1.1.13"
+ }
+ },
+ "es-set-tostringtag": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.2.tgz",
+ "integrity": "sha512-BuDyupZt65P9D2D2vA/zqcI3G5xRsklm5N3xCwuiy+/vKy8i0ifdsQP1sLgO4tZDSCaQUSnmC48khknGMV3D2Q==",
+ "dev": true,
+ "requires": {
+ "get-intrinsic": "^1.2.2",
+ "has-tostringtag": "^1.0.0",
+ "hasown": "^2.0.0"
+ }
+ },
+ "es-shim-unscopables": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz",
+ "integrity": "sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw==",
+ "dev": true,
+ "requires": {
+ "hasown": "^2.0.0"
+ }
+ },
+ "es-to-primitive": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz",
+ "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==",
+ "dev": true,
+ "requires": {
+ "is-callable": "^1.1.4",
+ "is-date-object": "^1.0.1",
+ "is-symbol": "^1.0.2"
+ }
+ },
+ "es5-ext": {
+ "version": "0.10.62",
+ "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.62.tgz",
+ "integrity": "sha512-BHLqn0klhEpnOKSrzn/Xsz2UIW8j+cGmo9JLzr8BiUapV8hPL9+FliFqjwr9ngW7jWdnxv6eO+/LqyhJVqgrjA==",
+ "dev": true,
+ "requires": {
+ "es6-iterator": "^2.0.3",
+ "es6-symbol": "^3.1.3",
+ "next-tick": "^1.1.0"
+ }
+ },
+ "es6-iterator": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz",
+ "integrity": "sha512-zw4SRzoUkd+cl+ZoE15A9o1oQd920Bb0iOJMQkQhl3jNc03YqVjAhG7scf9C5KWRU/R13Orf588uCC6525o02g==",
+ "dev": true,
+ "requires": {
+ "d": "1",
+ "es5-ext": "^0.10.35",
+ "es6-symbol": "^3.1.1"
+ }
+ },
+ "es6-promise": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-0.1.2.tgz",
+ "integrity": "sha1-8RLCn+paCZhTn8tqL9IUQ9KPBfc=",
+ "dev": true
+ },
+ "es6-symbol": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.3.tgz",
+ "integrity": "sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==",
+ "dev": true,
+ "requires": {
+ "d": "^1.0.1",
+ "ext": "^1.1.2"
+ }
+ },
+ "escalade": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
+ "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==",
+ "dev": true
+ },
+ "escape-html": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
+ "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=",
+ "dev": true
+ },
+ "escape-string-regexp": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+ "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=",
+ "dev": true
+ },
+ "eslint": {
+ "version": "8.52.0",
+ "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.52.0.tgz",
+ "integrity": "sha512-zh/JHnaixqHZsolRB/w9/02akBk9EPrOs9JwcTP2ek7yL5bVvXuRariiaAjjoJ5DvuwQ1WAE/HsMz+w17YgBCg==",
+ "dev": true,
+ "requires": {
+ "@eslint-community/eslint-utils": "^4.2.0",
+ "@eslint-community/regexpp": "^4.6.1",
+ "@eslint/eslintrc": "^2.1.2",
+ "@eslint/js": "8.52.0",
+ "@humanwhocodes/config-array": "^0.11.13",
+ "@humanwhocodes/module-importer": "^1.0.1",
+ "@nodelib/fs.walk": "^1.2.8",
+ "@ungap/structured-clone": "^1.2.0",
+ "ajv": "^6.12.4",
+ "chalk": "^4.0.0",
+ "cross-spawn": "^7.0.2",
+ "debug": "^4.3.2",
+ "doctrine": "^3.0.0",
+ "escape-string-regexp": "^4.0.0",
+ "eslint-scope": "^7.2.2",
+ "eslint-visitor-keys": "^3.4.3",
+ "espree": "^9.6.1",
+ "esquery": "^1.4.2",
+ "esutils": "^2.0.2",
+ "fast-deep-equal": "^3.1.3",
+ "file-entry-cache": "^6.0.1",
+ "find-up": "^5.0.0",
+ "glob-parent": "^6.0.2",
+ "globals": "^13.19.0",
+ "graphemer": "^1.4.0",
+ "ignore": "^5.2.0",
+ "imurmurhash": "^0.1.4",
+ "is-glob": "^4.0.0",
+ "is-path-inside": "^3.0.3",
+ "js-yaml": "^4.1.0",
+ "json-stable-stringify-without-jsonify": "^1.0.1",
+ "levn": "^0.4.1",
+ "lodash.merge": "^4.6.2",
+ "minimatch": "^3.1.2",
+ "natural-compare": "^1.4.0",
+ "optionator": "^0.9.3",
+ "strip-ansi": "^6.0.1",
+ "text-table": "^0.2.0"
+ },
+ "dependencies": {
+ "ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dev": true,
+ "requires": {
+ "color-convert": "^2.0.1"
+ }
+ },
+ "argparse": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
+ "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
+ "dev": true
+ },
+ "chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "dev": true,
+ "requires": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ }
+ },
+ "color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dev": true,
+ "requires": {
+ "color-name": "~1.1.4"
+ }
+ },
+ "color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+ "dev": true
+ },
+ "escape-string-regexp": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
+ "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==",
+ "dev": true
+ },
+ "glob-parent": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz",
+ "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==",
+ "dev": true,
+ "requires": {
+ "is-glob": "^4.0.3"
+ }
+ },
+ "globals": {
+ "version": "13.23.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-13.23.0.tgz",
+ "integrity": "sha512-XAmF0RjlrjY23MA51q3HltdlGxUpXPvg0GioKiD9X6HD28iMjo2dKC8Vqwm7lne4GNr78+RHTfliktR6ZH09wA==",
+ "dev": true,
+ "requires": {
+ "type-fest": "^0.20.2"
+ }
+ },
+ "has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "dev": true
+ },
+ "js-yaml": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
+ "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
+ "dev": true,
+ "requires": {
+ "argparse": "^2.0.1"
+ }
+ },
+ "supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dev": true,
+ "requires": {
+ "has-flag": "^4.0.0"
+ }
+ },
+ "type-fest": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz",
+ "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==",
+ "dev": true
+ }
+ }
+ },
+ "eslint-config-prettier": {
+ "version": "9.0.0",
+ "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-9.0.0.tgz",
+ "integrity": "sha512-IcJsTkJae2S35pRsRAwoCE+925rJJStOdkKnLVgtE+tEpqU0EVVM7OqrwxqgptKdX29NUwC82I5pXsGFIgSevw==",
+ "dev": true,
+ "requires": {}
+ },
+ "eslint-import-resolver-node": {
+ "version": "0.3.9",
+ "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.9.tgz",
+ "integrity": "sha512-WFj2isz22JahUv+B788TlO3N6zL3nNJGU8CcZbPZvVEkBPaJdCV4vy5wyghty5ROFbCRnm132v8BScu5/1BQ8g==",
+ "dev": true,
+ "requires": {
+ "debug": "^3.2.7",
+ "is-core-module": "^2.13.0",
+ "resolve": "^1.22.4"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
+ "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
+ "dev": true,
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ }
+ }
+ },
+ "eslint-module-utils": {
+ "version": "2.8.0",
+ "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.8.0.tgz",
+ "integrity": "sha512-aWajIYfsqCKRDgUfjEXNN/JlrzauMuSEy5sbd7WXbtW3EH6A6MpwEh42c7qD+MqQo9QMJ6fWLAeIJynx0g6OAw==",
+ "dev": true,
+ "requires": {
+ "debug": "^3.2.7"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
+ "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
+ "dev": true,
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ }
+ }
+ },
+ "eslint-plugin-ban": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-ban/-/eslint-plugin-ban-1.6.0.tgz",
+ "integrity": "sha512-gZptoV+SFHOHO57/5lmPvizMvSXrjFatP9qlVQf3meL/WHo9TxSoERygrMlESl19CPh95U86asTxohT8OprwDw==",
+ "dev": true,
+ "requires": {
+ "requireindex": "~1.2.0"
+ }
+ },
+ "eslint-plugin-deprecation": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-deprecation/-/eslint-plugin-deprecation-2.0.0.tgz",
+ "integrity": "sha512-OAm9Ohzbj11/ZFyICyR5N6LbOIvQMp7ZU2zI7Ej0jIc8kiGUERXPNMfw2QqqHD1ZHtjMub3yPZILovYEYucgoQ==",
+ "dev": true,
+ "requires": {
+ "@typescript-eslint/utils": "^6.0.0",
+ "tslib": "^2.3.1",
+ "tsutils": "^3.21.0"
+ }
+ },
+ "eslint-plugin-es": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-es/-/eslint-plugin-es-3.0.1.tgz",
+ "integrity": "sha512-GUmAsJaN4Fc7Gbtl8uOBlayo2DqhwWvEzykMHSCZHU3XdJ+NSzzZcVhXh3VxX5icqQ+oQdIEawXX8xkR3mIFmQ==",
+ "dev": true,
+ "requires": {
+ "eslint-utils": "^2.0.0",
+ "regexpp": "^3.0.0"
+ }
+ },
+ "eslint-plugin-gpuweb-cts": {
+ "version": "file:tools/eslint-plugin-gpuweb-cts"
+ },
+ "eslint-plugin-import": {
+ "version": "2.29.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.29.0.tgz",
+ "integrity": "sha512-QPOO5NO6Odv5lpoTkddtutccQjysJuFxoPS7fAHO+9m9udNHvTCPSAMW9zGAYj8lAIdr40I8yPCdUYrncXtrwg==",
+ "dev": true,
+ "requires": {
+ "array-includes": "^3.1.7",
+ "array.prototype.findlastindex": "^1.2.3",
+ "array.prototype.flat": "^1.3.2",
+ "array.prototype.flatmap": "^1.3.2",
+ "debug": "^3.2.7",
+ "doctrine": "^2.1.0",
+ "eslint-import-resolver-node": "^0.3.9",
+ "eslint-module-utils": "^2.8.0",
+ "hasown": "^2.0.0",
+ "is-core-module": "^2.13.1",
+ "is-glob": "^4.0.3",
+ "minimatch": "^3.1.2",
+ "object.fromentries": "^2.0.7",
+ "object.groupby": "^1.0.1",
+ "object.values": "^1.1.7",
+ "semver": "^6.3.1",
+ "tsconfig-paths": "^3.14.2"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
+ "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
+ "dev": true,
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ },
+ "doctrine": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz",
+ "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==",
+ "dev": true,
+ "requires": {
+ "esutils": "^2.0.2"
+ }
+ }
+ }
+ },
+ "eslint-plugin-node": {
+ "version": "11.1.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-node/-/eslint-plugin-node-11.1.0.tgz",
+ "integrity": "sha512-oUwtPJ1W0SKD0Tr+wqu92c5xuCeQqB3hSCHasn/ZgjFdA9iDGNkNf2Zi9ztY7X+hNuMib23LNGRm6+uN+KLE3g==",
+ "dev": true,
+ "requires": {
+ "eslint-plugin-es": "^3.0.0",
+ "eslint-utils": "^2.0.0",
+ "ignore": "^5.1.1",
+ "minimatch": "^3.0.4",
+ "resolve": "^1.10.1",
+ "semver": "^6.1.0"
+ }
+ },
+ "eslint-plugin-prettier": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-prettier/-/eslint-plugin-prettier-5.0.0.tgz",
+ "integrity": "sha512-AgaZCVuYDXHUGxj/ZGu1u8H8CYgDY3iG6w5kUFw4AzMVXzB7VvbKgYR4nATIN+OvUrghMbiDLeimVjVY5ilq3w==",
+ "dev": true,
+ "requires": {
+ "prettier-linter-helpers": "^1.0.0",
+ "synckit": "^0.8.5"
+ }
+ },
+ "eslint-scope": {
+ "version": "7.2.2",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz",
+ "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==",
+ "dev": true,
+ "requires": {
+ "esrecurse": "^4.3.0",
+ "estraverse": "^5.2.0"
+ }
+ },
+ "eslint-utils": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-2.1.0.tgz",
+ "integrity": "sha512-w94dQYoauyvlDc43XnGB8lU3Zt713vNChgt4EWwhXAP2XkBvndfxF0AgIqKOOasjPIPzj9JqgwkwbCYD0/V3Zg==",
+ "dev": true,
+ "requires": {
+ "eslint-visitor-keys": "^1.1.0"
+ },
+ "dependencies": {
+ "eslint-visitor-keys": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz",
+ "integrity": "sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ==",
+ "dev": true
+ }
+ }
+ },
+ "eslint-visitor-keys": {
+ "version": "3.4.3",
+ "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz",
+ "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==",
+ "dev": true
+ },
+ "espree": {
+ "version": "9.6.1",
+ "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz",
+ "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==",
+ "dev": true,
+ "requires": {
+ "acorn": "^8.9.0",
+ "acorn-jsx": "^5.3.2",
+ "eslint-visitor-keys": "^3.4.1"
+ }
+ },
+ "esprima": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
+ "dev": true
+ },
+ "esquery": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz",
+ "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==",
+ "dev": true,
+ "requires": {
+ "estraverse": "^5.1.0"
+ }
+ },
+ "esrecurse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
+ "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
+ "dev": true,
+ "requires": {
+ "estraverse": "^5.2.0"
+ }
+ },
+ "estraverse": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz",
+ "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==",
+ "dev": true
+ },
+ "esutils": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
+ "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==",
+ "dev": true
+ },
+ "etag": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
+ "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==",
+ "dev": true
+ },
+ "eventemitter2": {
+ "version": "0.4.14",
+ "resolved": "https://registry.npmjs.org/eventemitter2/-/eventemitter2-0.4.14.tgz",
+ "integrity": "sha1-j2G3XN4BKy6esoTUVFWDtWQ7Yas=",
+ "dev": true
+ },
+ "eventemitter3": {
+ "version": "4.0.7",
+ "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz",
+ "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==",
+ "dev": true
+ },
+ "execa": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz",
+ "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==",
+ "dev": true,
+ "requires": {
+ "cross-spawn": "^7.0.3",
+ "get-stream": "^6.0.0",
+ "human-signals": "^2.1.0",
+ "is-stream": "^2.0.0",
+ "merge-stream": "^2.0.0",
+ "npm-run-path": "^4.0.1",
+ "onetime": "^5.1.2",
+ "signal-exit": "^3.0.3",
+ "strip-final-newline": "^2.0.0"
+ }
+ },
+ "exit": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/exit/-/exit-0.1.2.tgz",
+ "integrity": "sha1-BjJjj42HfMghB9MKD/8aF8uhzQw=",
+ "dev": true
+ },
+ "expand-brackets": {
+ "version": "2.1.4",
+ "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz",
+ "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=",
+ "dev": true,
+ "requires": {
+ "debug": "^2.3.3",
+ "define-property": "^0.2.5",
+ "extend-shallow": "^2.0.1",
+ "posix-character-classes": "^0.1.0",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.1"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=",
+ "dev": true
+ }
+ }
+ },
+ "expand-tilde": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/expand-tilde/-/expand-tilde-2.0.2.tgz",
+ "integrity": "sha1-l+gBqgUt8CRU3kawK/YhZCzchQI=",
+ "dev": true,
+ "requires": {
+ "homedir-polyfill": "^1.0.1"
+ }
+ },
+ "express": {
+ "version": "4.18.2",
+ "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz",
+ "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==",
+ "dev": true,
+ "requires": {
+ "accepts": "~1.3.8",
+ "array-flatten": "1.1.1",
+ "body-parser": "1.20.1",
+ "content-disposition": "0.5.4",
+ "content-type": "~1.0.4",
+ "cookie": "0.5.0",
+ "cookie-signature": "1.0.6",
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "finalhandler": "1.2.0",
+ "fresh": "0.5.2",
+ "http-errors": "2.0.0",
+ "merge-descriptors": "1.0.1",
+ "methods": "~1.1.2",
+ "on-finished": "2.4.1",
+ "parseurl": "~1.3.3",
+ "path-to-regexp": "0.1.7",
+ "proxy-addr": "~2.0.7",
+ "qs": "6.11.0",
+ "range-parser": "~1.2.1",
+ "safe-buffer": "5.2.1",
+ "send": "0.18.0",
+ "serve-static": "1.15.0",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
+ "type-is": "~1.6.18",
+ "utils-merge": "1.0.1",
+ "vary": "~1.1.2"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "depd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
+ "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
+ "dev": true
+ },
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=",
+ "dev": true
+ },
+ "on-finished": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
+ "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
+ "dev": true,
+ "requires": {
+ "ee-first": "1.1.1"
+ }
+ },
+ "safe-buffer": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
+ "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
+ "dev": true
+ },
+ "statuses": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
+ "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
+ "dev": true
+ }
+ }
+ },
+ "ext": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/ext/-/ext-1.7.0.tgz",
+ "integrity": "sha512-6hxeJYaL110a9b5TEJSj0gojyHQAmA2ch5Os+ySCiA1QGdS697XWY1pzsrSjqA9LDEEgdB/KypIlR59RcLuHYw==",
+ "dev": true,
+ "requires": {
+ "type": "^2.7.2"
+ },
+ "dependencies": {
+ "type": {
+ "version": "2.7.2",
+ "resolved": "https://registry.npmjs.org/type/-/type-2.7.2.tgz",
+ "integrity": "sha512-dzlvlNlt6AXU7EBSfpAscydQ7gXB+pPGsPnfJnZpiNJBDj7IaJzQlBZYGdEi4R9HmPdBv2XmWJ6YUtoTa7lmCw==",
+ "dev": true
+ }
+ }
+ },
+ "extend": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
+ "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==",
+ "dev": true
+ },
+ "extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+ "dev": true,
+ "requires": {
+ "is-extendable": "^0.1.0"
+ }
+ },
+ "external-editor": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz",
+ "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==",
+ "dev": true,
+ "requires": {
+ "chardet": "^0.7.0",
+ "iconv-lite": "^0.4.24",
+ "tmp": "^0.0.33"
+ }
+ },
+ "extglob": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz",
+ "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==",
+ "dev": true,
+ "requires": {
+ "array-unique": "^0.3.2",
+ "define-property": "^1.0.0",
+ "expand-brackets": "^2.1.4",
+ "extend-shallow": "^2.0.1",
+ "fragment-cache": "^0.2.1",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.1"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+ "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^1.0.0"
+ }
+ },
+ "is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "requires": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ }
+ }
+ }
+ },
+ "fast-deep-equal": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
+ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
+ "dev": true
+ },
+ "fast-diff": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/fast-diff/-/fast-diff-1.3.0.tgz",
+ "integrity": "sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw==",
+ "dev": true
+ },
+ "fast-glob": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.1.tgz",
+ "integrity": "sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg==",
+ "dev": true,
+ "requires": {
+ "@nodelib/fs.stat": "^2.0.2",
+ "@nodelib/fs.walk": "^1.2.3",
+ "glob-parent": "^5.1.2",
+ "merge2": "^1.3.0",
+ "micromatch": "^4.0.4"
+ }
+ },
+ "fast-json-stable-stringify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
+ "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
+ "dev": true
+ },
+ "fast-levenshtein": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz",
+ "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==",
+ "dev": true
+ },
+ "fastq": {
+ "version": "1.13.0",
+ "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.13.0.tgz",
+ "integrity": "sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw==",
+ "dev": true,
+ "requires": {
+ "reusify": "^1.0.4"
+ }
+ },
+ "figures": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz",
+ "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==",
+ "dev": true,
+ "requires": {
+ "escape-string-regexp": "^1.0.5"
+ }
+ },
+ "file-entry-cache": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz",
+ "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==",
+ "dev": true,
+ "requires": {
+ "flat-cache": "^3.0.4"
+ }
+ },
+ "file-sync-cmp": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/file-sync-cmp/-/file-sync-cmp-0.1.1.tgz",
+ "integrity": "sha1-peeo/7+kk7Q7kju9TKiaU7Y7YSs=",
+ "dev": true
+ },
+ "file-uri-to-path": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz",
+ "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==",
+ "dev": true,
+ "optional": true
+ },
+ "fill-range": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
+ "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
+ "dev": true,
+ "requires": {
+ "to-regex-range": "^5.0.1"
+ }
+ },
+ "finalhandler": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz",
+ "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==",
+ "dev": true,
+ "requires": {
+ "debug": "2.6.9",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "on-finished": "2.4.1",
+ "parseurl": "~1.3.3",
+ "statuses": "2.0.1",
+ "unpipe": "~1.0.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
+ "dev": true
+ },
+ "on-finished": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
+ "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
+ "dev": true,
+ "requires": {
+ "ee-first": "1.1.1"
+ }
+ },
+ "statuses": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
+ "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
+ "dev": true
+ }
+ }
+ },
+ "find-up": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz",
+ "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==",
+ "dev": true,
+ "requires": {
+ "locate-path": "^6.0.0",
+ "path-exists": "^4.0.0"
+ }
+ },
+ "findup-sync": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/findup-sync/-/findup-sync-5.0.0.tgz",
+ "integrity": "sha512-MzwXju70AuyflbgeOhzvQWAvvQdo1XL0A9bVvlXsYcFEBM87WR4OakL4OfZq+QRmr+duJubio+UtNQCPsVESzQ==",
+ "dev": true,
+ "requires": {
+ "detect-file": "^1.0.0",
+ "is-glob": "^4.0.3",
+ "micromatch": "^4.0.4",
+ "resolve-dir": "^1.0.1"
+ }
+ },
+ "fined": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/fined/-/fined-1.2.0.tgz",
+ "integrity": "sha512-ZYDqPLGxDkDhDZBjZBb+oD1+j0rA4E0pXY50eplAAOPg2N/gUBSSk5IM1/QhPfyVo19lJ+CvXpqfvk+b2p/8Ng==",
+ "dev": true,
+ "requires": {
+ "expand-tilde": "^2.0.2",
+ "is-plain-object": "^2.0.3",
+ "object.defaults": "^1.1.0",
+ "object.pick": "^1.2.0",
+ "parse-filepath": "^1.0.1"
+ }
+ },
+ "flagged-respawn": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/flagged-respawn/-/flagged-respawn-1.0.1.tgz",
+ "integrity": "sha512-lNaHNVymajmk0OJMBn8fVUAU1BtDeKIqKoVhk4xAALB57aALg6b4W0MfJ/cUE0g9YBXy5XhSlPIpYIJ7HaY/3Q==",
+ "dev": true
+ },
+ "flat-cache": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz",
+ "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==",
+ "dev": true,
+ "requires": {
+ "flatted": "^3.1.0",
+ "rimraf": "^3.0.2"
+ }
+ },
+ "flatted": {
+ "version": "3.2.5",
+ "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.5.tgz",
+ "integrity": "sha512-WIWGi2L3DyTUvUrwRKgGi9TwxQMUEqPOPQBVi71R96jZXJdFskXEmf54BoZaS1kknGODoIGASGEzBUYdyMCBJg==",
+ "dev": true
+ },
+ "follow-redirects": {
+ "version": "1.14.9",
+ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.9.tgz",
+ "integrity": "sha512-MQDfihBQYMcyy5dhRDJUHcw7lb2Pv/TuE6xP1vyraLukNDHKbDxDNaOE3NbCAdKQApno+GPRyo1YAp89yCjK4w==",
+ "dev": true
+ },
+ "for-each": {
+ "version": "0.3.3",
+ "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.3.tgz",
+ "integrity": "sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==",
+ "dev": true,
+ "requires": {
+ "is-callable": "^1.1.3"
+ }
+ },
+ "for-in": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz",
+ "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=",
+ "dev": true
+ },
+ "for-own": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/for-own/-/for-own-1.0.0.tgz",
+ "integrity": "sha1-xjMy9BXO3EsE2/5wz4NklMU8tEs=",
+ "dev": true,
+ "requires": {
+ "for-in": "^1.0.1"
+ }
+ },
+ "forwarded": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
+ "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==",
+ "dev": true
+ },
+ "fragment-cache": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz",
+ "integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=",
+ "dev": true,
+ "requires": {
+ "map-cache": "^0.2.2"
+ }
+ },
+ "fresh": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
+ "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==",
+ "dev": true
+ },
+ "fs-readdir-recursive": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/fs-readdir-recursive/-/fs-readdir-recursive-1.1.0.tgz",
+ "integrity": "sha512-GNanXlVr2pf02+sPN40XN8HG+ePaNcvM0q5mZBd668Obwb0yD5GiUbZOFgwn8kGMY6I3mdyDJzieUy3PTYyTRA==",
+ "dev": true
+ },
+ "fs.realpath": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
+ "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=",
+ "dev": true
+ },
+ "fsevents": {
+ "version": "2.3.2",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz",
+ "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==",
+ "dev": true,
+ "optional": true
+ },
+ "function-bind": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "dev": true
+ },
+ "function.prototype.name": {
+ "version": "1.1.6",
+ "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.6.tgz",
+ "integrity": "sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1",
+ "functions-have-names": "^1.2.3"
+ }
+ },
+ "functional.js": {
+ "version": "0.6.16",
+ "resolved": "https://registry.npmjs.org/functional.js/-/functional.js-0.6.16.tgz",
+ "integrity": "sha512-WDtBOEhQLa+s/1XyOsElhwXiQCMSipqSevaTmpEZzV8bDSNAExbr08NeG8Qkr/PSQbxhyZzFx/CmFJutAG1S0A==",
+ "dev": true
+ },
+ "functions-have-names": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz",
+ "integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==",
+ "dev": true
+ },
+ "gensync": {
+ "version": "1.0.0-beta.2",
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
+ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
+ "dev": true
+ },
+ "get-intrinsic": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.2.tgz",
+ "integrity": "sha512-0gSo4ml/0j98Y3lngkFEot/zhiCeWsbYIlZ+uZOVgzLyLaUw7wxUL+nCTP0XJvJg1AXulJRI3UJi8GsbDuxdGA==",
+ "dev": true,
+ "requires": {
+ "function-bind": "^1.1.2",
+ "has-proto": "^1.0.1",
+ "has-symbols": "^1.0.3",
+ "hasown": "^2.0.0"
+ }
+ },
+ "get-stream": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz",
+ "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==",
+ "dev": true
+ },
+ "get-symbol-description": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.0.tgz",
+ "integrity": "sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "get-intrinsic": "^1.1.1"
+ }
+ },
+ "get-value": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz",
+ "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=",
+ "dev": true
+ },
+ "getobject": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/getobject/-/getobject-1.0.2.tgz",
+ "integrity": "sha512-2zblDBaFcb3rB4rF77XVnuINOE2h2k/OnqXAiy0IrTxUfV1iFp3la33oAQVY9pCpWU268WFYVt2t71hlMuLsOg==",
+ "dev": true
+ },
+ "glob": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.0.tgz",
+ "integrity": "sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q==",
+ "dev": true,
+ "requires": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.0.4",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ }
+ },
+ "glob-parent": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
+ "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
+ "dev": true,
+ "requires": {
+ "is-glob": "^4.0.1"
+ }
+ },
+ "global-modules": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-1.0.0.tgz",
+ "integrity": "sha512-sKzpEkf11GpOFuw0Zzjzmt4B4UZwjOcG757PPvrfhxcLFbq0wpsgpOqxpxtxFiCG4DtG93M6XRVbF2oGdev7bg==",
+ "dev": true,
+ "requires": {
+ "global-prefix": "^1.0.1",
+ "is-windows": "^1.0.1",
+ "resolve-dir": "^1.0.0"
+ }
+ },
+ "global-prefix": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-1.0.2.tgz",
+ "integrity": "sha1-2/dDxsFJklk8ZVVoy2btMsASLr4=",
+ "dev": true,
+ "requires": {
+ "expand-tilde": "^2.0.2",
+ "homedir-polyfill": "^1.0.1",
+ "ini": "^1.3.4",
+ "is-windows": "^1.0.1",
+ "which": "^1.2.14"
+ },
+ "dependencies": {
+ "ini": {
+ "version": "1.3.8",
+ "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz",
+ "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==",
+ "dev": true
+ },
+ "which": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz",
+ "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==",
+ "dev": true,
+ "requires": {
+ "isexe": "^2.0.0"
+ }
+ }
+ }
+ },
+ "globals": {
+ "version": "11.12.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz",
+ "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==",
+ "dev": true
+ },
+ "globalthis": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.3.tgz",
+ "integrity": "sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA==",
+ "dev": true,
+ "requires": {
+ "define-properties": "^1.1.3"
+ }
+ },
+ "globby": {
+ "version": "11.1.0",
+ "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz",
+ "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==",
+ "dev": true,
+ "requires": {
+ "array-union": "^2.1.0",
+ "dir-glob": "^3.0.1",
+ "fast-glob": "^3.2.9",
+ "ignore": "^5.2.0",
+ "merge2": "^1.4.1",
+ "slash": "^3.0.0"
+ },
+ "dependencies": {
+ "slash": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
+ "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==",
+ "dev": true
+ }
+ }
+ },
+ "gopd": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz",
+ "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==",
+ "dev": true,
+ "requires": {
+ "get-intrinsic": "^1.1.3"
+ }
+ },
+ "graceful-fs": {
+ "version": "4.2.9",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.9.tgz",
+ "integrity": "sha512-NtNxqUcXgpW2iMrfqSfR73Glt39K+BLwWsPs94yR63v45T0Wbej7eRmL5cWfwEgqXnmjQp3zaJTshdRW/qC2ZQ==",
+ "dev": true
+ },
+ "graphemer": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz",
+ "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==",
+ "dev": true
+ },
+ "grunt": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/grunt/-/grunt-1.6.1.tgz",
+ "integrity": "sha512-/ABUy3gYWu5iBmrUSRBP97JLpQUm0GgVveDCp6t3yRNIoltIYw7rEj3g5y1o2PGPR2vfTRGa7WC/LZHLTXnEzA==",
+ "dev": true,
+ "requires": {
+ "dateformat": "~4.6.2",
+ "eventemitter2": "~0.4.13",
+ "exit": "~0.1.2",
+ "findup-sync": "~5.0.0",
+ "glob": "~7.1.6",
+ "grunt-cli": "~1.4.3",
+ "grunt-known-options": "~2.0.0",
+ "grunt-legacy-log": "~3.0.0",
+ "grunt-legacy-util": "~2.0.1",
+ "iconv-lite": "~0.6.3",
+ "js-yaml": "~3.14.0",
+ "minimatch": "~3.0.4",
+ "nopt": "~3.0.6"
+ },
+ "dependencies": {
+ "glob": {
+ "version": "7.1.7",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz",
+ "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==",
+ "dev": true,
+ "requires": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.0.4",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ }
+ },
+ "iconv-lite": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz",
+ "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==",
+ "dev": true,
+ "requires": {
+ "safer-buffer": ">= 2.1.2 < 3.0.0"
+ }
+ },
+ "minimatch": {
+ "version": "3.0.8",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.8.tgz",
+ "integrity": "sha512-6FsRAQsxQ61mw+qP1ZzbL9Bc78x2p5OqNgNpnoAFLTrX8n5Kxph0CsnhmKKNXTWjXqU5L0pGPR7hYk+XWZr60Q==",
+ "dev": true,
+ "requires": {
+ "brace-expansion": "^1.1.7"
+ }
+ }
+ }
+ },
+ "grunt-cli": {
+ "version": "1.4.3",
+ "resolved": "https://registry.npmjs.org/grunt-cli/-/grunt-cli-1.4.3.tgz",
+ "integrity": "sha512-9Dtx/AhVeB4LYzsViCjUQkd0Kw0McN2gYpdmGYKtE2a5Yt7v1Q+HYZVWhqXc/kGnxlMtqKDxSwotiGeFmkrCoQ==",
+ "dev": true,
+ "requires": {
+ "grunt-known-options": "~2.0.0",
+ "interpret": "~1.1.0",
+ "liftup": "~3.0.1",
+ "nopt": "~4.0.1",
+ "v8flags": "~3.2.0"
+ },
+ "dependencies": {
+ "nopt": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/nopt/-/nopt-4.0.3.tgz",
+ "integrity": "sha512-CvaGwVMztSMJLOeXPrez7fyfObdZqNUK1cPAEzLHrTybIua9pMdmmPR5YwtfNftIOMv3DPUhFaxsZMNTQO20Kg==",
+ "dev": true,
+ "requires": {
+ "abbrev": "1",
+ "osenv": "^0.1.4"
+ }
+ }
+ }
+ },
+ "grunt-contrib-clean": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/grunt-contrib-clean/-/grunt-contrib-clean-2.0.1.tgz",
+ "integrity": "sha512-uRvnXfhiZt8akb/ZRDHJpQQtkkVkqc/opWO4Po/9ehC2hPxgptB9S6JHDC/Nxswo4CJSM0iFPT/Iym3cEMWzKA==",
+ "dev": true,
+ "requires": {
+ "async": "^3.2.3",
+ "rimraf": "^2.6.2"
+ },
+ "dependencies": {
+ "rimraf": {
+ "version": "2.7.1",
+ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz",
+ "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==",
+ "dev": true,
+ "requires": {
+ "glob": "^7.1.3"
+ }
+ }
+ }
+ },
+ "grunt-contrib-copy": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/grunt-contrib-copy/-/grunt-contrib-copy-1.0.0.tgz",
+ "integrity": "sha1-cGDGWB6QS4qw0A8HbgqPbj58NXM=",
+ "dev": true,
+ "requires": {
+ "chalk": "^1.1.1",
+ "file-sync-cmp": "^0.1.0"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
+ "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=",
+ "dev": true
+ },
+ "ansi-styles": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz",
+ "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=",
+ "dev": true
+ },
+ "chalk": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz",
+ "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=",
+ "dev": true,
+ "requires": {
+ "ansi-styles": "^2.2.1",
+ "escape-string-regexp": "^1.0.2",
+ "has-ansi": "^2.0.0",
+ "strip-ansi": "^3.0.0",
+ "supports-color": "^2.0.0"
+ }
+ },
+ "strip-ansi": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
+ "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^2.0.0"
+ }
+ },
+ "supports-color": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz",
+ "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=",
+ "dev": true
+ }
+ }
+ },
+ "grunt-known-options": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/grunt-known-options/-/grunt-known-options-2.0.0.tgz",
+ "integrity": "sha512-GD7cTz0I4SAede1/+pAbmJRG44zFLPipVtdL9o3vqx9IEyb7b4/Y3s7r6ofI3CchR5GvYJ+8buCSioDv5dQLiA==",
+ "dev": true
+ },
+ "grunt-legacy-log": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/grunt-legacy-log/-/grunt-legacy-log-3.0.0.tgz",
+ "integrity": "sha512-GHZQzZmhyq0u3hr7aHW4qUH0xDzwp2YXldLPZTCjlOeGscAOWWPftZG3XioW8MasGp+OBRIu39LFx14SLjXRcA==",
+ "dev": true,
+ "requires": {
+ "colors": "~1.1.2",
+ "grunt-legacy-log-utils": "~2.1.0",
+ "hooker": "~0.2.3",
+ "lodash": "~4.17.19"
+ }
+ },
+ "grunt-legacy-log-utils": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/grunt-legacy-log-utils/-/grunt-legacy-log-utils-2.1.0.tgz",
+ "integrity": "sha512-lwquaPXJtKQk0rUM1IQAop5noEpwFqOXasVoedLeNzaibf/OPWjKYvvdqnEHNmU+0T0CaReAXIbGo747ZD+Aaw==",
+ "dev": true,
+ "requires": {
+ "chalk": "~4.1.0",
+ "lodash": "~4.17.19"
+ },
+ "dependencies": {
+ "ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dev": true,
+ "requires": {
+ "color-convert": "^2.0.1"
+ }
+ },
+ "chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "dev": true,
+ "requires": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ }
+ },
+ "color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dev": true,
+ "requires": {
+ "color-name": "~1.1.4"
+ }
+ },
+ "color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+ "dev": true
+ },
+ "has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "dev": true
+ },
+ "supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dev": true,
+ "requires": {
+ "has-flag": "^4.0.0"
+ }
+ }
+ }
+ },
+ "grunt-legacy-util": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/grunt-legacy-util/-/grunt-legacy-util-2.0.1.tgz",
+ "integrity": "sha512-2bQiD4fzXqX8rhNdXkAywCadeqiPiay0oQny77wA2F3WF4grPJXCvAcyoWUJV+po/b15glGkxuSiQCK299UC2w==",
+ "dev": true,
+ "requires": {
+ "async": "~3.2.0",
+ "exit": "~0.1.2",
+ "getobject": "~1.0.0",
+ "hooker": "~0.2.3",
+ "lodash": "~4.17.21",
+ "underscore.string": "~3.3.5",
+ "which": "~2.0.2"
+ }
+ },
+ "grunt-run": {
+ "version": "0.8.1",
+ "resolved": "https://registry.npmjs.org/grunt-run/-/grunt-run-0.8.1.tgz",
+ "integrity": "sha512-+wvoOJevugcjMLldbVCyspRHHntwVIJiTGjx0HFq+UwXhVPe7AaAiUdY4135CS68pAoRLhd7pAILpL2ITe1tmA==",
+ "dev": true,
+ "requires": {
+ "strip-ansi": "^3.0.0"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
+ "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=",
+ "dev": true
+ },
+ "strip-ansi": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
+ "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^2.0.0"
+ }
+ }
+ }
+ },
+ "grunt-timer": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/grunt-timer/-/grunt-timer-0.6.0.tgz",
+ "integrity": "sha512-CZc6NsOGr/HMo70RLXTBCPJm8seJok/lQL2VFygXEvrhj6fYJEvyDIEdSUTSNiXSyC4eNoN8zUNrzMXGwinjdQ==",
+ "dev": true,
+ "requires": {
+ "bash-color": "^0.0.3",
+ "duration": "^0.2.0",
+ "functional.js": "^0.6.10",
+ "hooker": "^0.2.3"
+ }
+ },
+ "grunt-ts": {
+ "version": "6.0.0-beta.22",
+ "resolved": "https://registry.npmjs.org/grunt-ts/-/grunt-ts-6.0.0-beta.22.tgz",
+ "integrity": "sha512-g9e+ZImQ7W38dfpwhp0+GUltXWidy3YGPfIA/IyGL5HMv6wmVmMMoSgscI5swhs2HSPf8yAvXAAJbwrouijoRg==",
+ "dev": true,
+ "requires": {
+ "chokidar": "^2.0.4",
+ "csproj2ts": "^1.1.0",
+ "detect-indent": "^4.0.0",
+ "detect-newline": "^2.1.0",
+ "es6-promise": "~0.1.1",
+ "jsmin2": "^1.2.1",
+ "lodash": "~4.17.10",
+ "ncp": "0.5.1",
+ "rimraf": "2.2.6",
+ "semver": "^5.3.0",
+ "strip-bom": "^2.0.0"
+ },
+ "dependencies": {
+ "anymatch": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-2.0.0.tgz",
+ "integrity": "sha512-5teOsQWABXHHBFP9y3skS5P3d/WfWXpv3FUpy+LorMrNYaT9pI4oLMQX7jzQ2KklNpGpWHzdCXTDT2Y3XGlZBw==",
+ "dev": true,
+ "requires": {
+ "micromatch": "^3.1.4",
+ "normalize-path": "^2.1.1"
+ },
+ "dependencies": {
+ "normalize-path": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz",
+ "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=",
+ "dev": true,
+ "requires": {
+ "remove-trailing-separator": "^1.0.1"
+ }
+ }
+ }
+ },
+ "binary-extensions": {
+ "version": "1.13.1",
+ "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.13.1.tgz",
+ "integrity": "sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw==",
+ "dev": true
+ },
+ "braces": {
+ "version": "2.3.2",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz",
+ "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==",
+ "dev": true,
+ "requires": {
+ "arr-flatten": "^1.1.0",
+ "array-unique": "^0.3.2",
+ "extend-shallow": "^2.0.1",
+ "fill-range": "^4.0.0",
+ "isobject": "^3.0.1",
+ "repeat-element": "^1.1.2",
+ "snapdragon": "^0.8.1",
+ "snapdragon-node": "^2.0.1",
+ "split-string": "^3.0.2",
+ "to-regex": "^3.0.1"
+ }
+ },
+ "chokidar": {
+ "version": "2.1.8",
+ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-2.1.8.tgz",
+ "integrity": "sha512-ZmZUazfOzf0Nve7duiCKD23PFSCs4JPoYyccjUFF3aQkQadqBhfzhjkwBH2mNOG9cTBwhamM37EIsIkZw3nRgg==",
+ "dev": true,
+ "requires": {
+ "anymatch": "^2.0.0",
+ "async-each": "^1.0.1",
+ "braces": "^2.3.2",
+ "fsevents": "^1.2.7",
+ "glob-parent": "^3.1.0",
+ "inherits": "^2.0.3",
+ "is-binary-path": "^1.0.0",
+ "is-glob": "^4.0.0",
+ "normalize-path": "^3.0.0",
+ "path-is-absolute": "^1.0.0",
+ "readdirp": "^2.2.1",
+ "upath": "^1.1.1"
+ }
+ },
+ "define-property": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz",
+ "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^1.0.2",
+ "isobject": "^3.0.1"
+ }
+ },
+ "fill-range": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz",
+ "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=",
+ "dev": true,
+ "requires": {
+ "extend-shallow": "^2.0.1",
+ "is-number": "^3.0.0",
+ "repeat-string": "^1.6.1",
+ "to-regex-range": "^2.1.0"
+ }
+ },
+ "fsevents": {
+ "version": "1.2.13",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.13.tgz",
+ "integrity": "sha512-oWb1Z6mkHIskLzEJ/XWX0srkpkTQ7vaopMQkyaEIoq0fmtFVxOthb8cCxeT+p3ynTdkk/RZwbgG4brR5BeWECw==",
+ "dev": true,
+ "optional": true,
+ "requires": {
+ "bindings": "^1.5.0",
+ "nan": "^2.12.1"
+ }
+ },
+ "glob-parent": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz",
+ "integrity": "sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4=",
+ "dev": true,
+ "requires": {
+ "is-glob": "^3.1.0",
+ "path-dirname": "^1.0.0"
+ },
+ "dependencies": {
+ "is-glob": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz",
+ "integrity": "sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=",
+ "dev": true,
+ "requires": {
+ "is-extglob": "^2.1.0"
+ }
+ }
+ }
+ },
+ "is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-binary-path": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-1.0.1.tgz",
+ "integrity": "sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg=",
+ "dev": true,
+ "requires": {
+ "binary-extensions": "^1.0.0"
+ }
+ },
+ "is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "requires": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ }
+ },
+ "is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dev": true,
+ "requires": {
+ "is-plain-object": "^2.0.4"
+ }
+ },
+ "is-number": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz",
+ "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=",
+ "dev": true,
+ "requires": {
+ "kind-of": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "micromatch": {
+ "version": "3.1.10",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz",
+ "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==",
+ "dev": true,
+ "requires": {
+ "arr-diff": "^4.0.0",
+ "array-unique": "^0.3.2",
+ "braces": "^2.3.1",
+ "define-property": "^2.0.2",
+ "extend-shallow": "^3.0.2",
+ "extglob": "^2.0.4",
+ "fragment-cache": "^0.2.1",
+ "kind-of": "^6.0.2",
+ "nanomatch": "^1.2.9",
+ "object.pick": "^1.3.0",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.2"
+ },
+ "dependencies": {
+ "extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "dev": true,
+ "requires": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ }
+ }
+ }
+ },
+ "readdirp": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-2.2.1.tgz",
+ "integrity": "sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ==",
+ "dev": true,
+ "requires": {
+ "graceful-fs": "^4.1.11",
+ "micromatch": "^3.1.10",
+ "readable-stream": "^2.0.2"
+ }
+ },
+ "rimraf": {
+ "version": "2.2.6",
+ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.2.6.tgz",
+ "integrity": "sha1-xZWXVpsU2VatKcrMQr3d9fDqT0w=",
+ "dev": true
+ },
+ "semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
+ "dev": true
+ },
+ "to-regex-range": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz",
+ "integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=",
+ "dev": true,
+ "requires": {
+ "is-number": "^3.0.0",
+ "repeat-string": "^1.6.1"
+ }
+ }
+ }
+ },
+ "gts": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/gts/-/gts-5.2.0.tgz",
+ "integrity": "sha512-25qOnePUUX7upFc4ycqWersDBq+o1X6hXUTW56JOWCxPYKJXQ1RWzqT9q+2SU3LfPKJf+4sz4Dw3VT0p96Kv6g==",
+ "dev": true,
+ "requires": {
+ "@typescript-eslint/eslint-plugin": "5.62.0",
+ "@typescript-eslint/parser": "5.62.0",
+ "chalk": "^4.1.2",
+ "eslint": "8.50.0",
+ "eslint-config-prettier": "9.0.0",
+ "eslint-plugin-node": "11.1.0",
+ "eslint-plugin-prettier": "5.0.0",
+ "execa": "^5.0.0",
+ "inquirer": "^7.3.3",
+ "json5": "^2.1.3",
+ "meow": "^9.0.0",
+ "ncp": "^2.0.0",
+ "prettier": "3.0.3",
+ "rimraf": "3.0.2",
+ "write-file-atomic": "^4.0.0"
+ },
+ "dependencies": {
+ "@eslint/js": {
+ "version": "8.50.0",
+ "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.50.0.tgz",
+ "integrity": "sha512-NCC3zz2+nvYd+Ckfh87rA47zfu2QsQpvc6k1yzTk+b9KzRj0wkGa8LSoGOXN6Zv4lRf/EIoZ80biDh9HOI+RNQ==",
+ "dev": true
+ },
+ "@typescript-eslint/eslint-plugin": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.62.0.tgz",
+ "integrity": "sha512-TiZzBSJja/LbhNPvk6yc0JrX9XqhQ0hdh6M2svYfsHGejaKFIAGd9MQ+ERIMzLGlN/kZoYIgdxFV0PuljTKXag==",
+ "dev": true,
+ "requires": {
+ "@eslint-community/regexpp": "^4.4.0",
+ "@typescript-eslint/scope-manager": "5.62.0",
+ "@typescript-eslint/type-utils": "5.62.0",
+ "@typescript-eslint/utils": "5.62.0",
+ "debug": "^4.3.4",
+ "graphemer": "^1.4.0",
+ "ignore": "^5.2.0",
+ "natural-compare-lite": "^1.4.0",
+ "semver": "^7.3.7",
+ "tsutils": "^3.21.0"
+ }
+ },
+ "@typescript-eslint/parser": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.62.0.tgz",
+ "integrity": "sha512-VlJEV0fOQ7BExOsHYAGrgbEiZoi8D+Bl2+f6V2RrXerRSylnp+ZBHmPvaIa8cz0Ajx7WO7Z5RqfgYg7ED1nRhA==",
+ "dev": true,
+ "requires": {
+ "@typescript-eslint/scope-manager": "5.62.0",
+ "@typescript-eslint/types": "5.62.0",
+ "@typescript-eslint/typescript-estree": "5.62.0",
+ "debug": "^4.3.4"
+ }
+ },
+ "@typescript-eslint/scope-manager": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.62.0.tgz",
+ "integrity": "sha512-VXuvVvZeQCQb5Zgf4HAxc04q5j+WrNAtNh9OwCsCgpKqESMTu3tF/jhZ3xG6T4NZwWl65Bg8KuS2uEvhSfLl0w==",
+ "dev": true,
+ "requires": {
+ "@typescript-eslint/types": "5.62.0",
+ "@typescript-eslint/visitor-keys": "5.62.0"
+ }
+ },
+ "@typescript-eslint/types": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.62.0.tgz",
+ "integrity": "sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ==",
+ "dev": true
+ },
+ "@typescript-eslint/typescript-estree": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.62.0.tgz",
+ "integrity": "sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==",
+ "dev": true,
+ "requires": {
+ "@typescript-eslint/types": "5.62.0",
+ "@typescript-eslint/visitor-keys": "5.62.0",
+ "debug": "^4.3.4",
+ "globby": "^11.1.0",
+ "is-glob": "^4.0.3",
+ "semver": "^7.3.7",
+ "tsutils": "^3.21.0"
+ }
+ },
+ "@typescript-eslint/utils": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.62.0.tgz",
+ "integrity": "sha512-n8oxjeb5aIbPFEtmQxQYOLI0i9n5ySBEY/ZEHHZqKQSFnxio1rv6dthascc9dLuwrL0RC5mPCxB7vnAVGAYWAQ==",
+ "dev": true,
+ "requires": {
+ "@eslint-community/eslint-utils": "^4.2.0",
+ "@types/json-schema": "^7.0.9",
+ "@types/semver": "^7.3.12",
+ "@typescript-eslint/scope-manager": "5.62.0",
+ "@typescript-eslint/types": "5.62.0",
+ "@typescript-eslint/typescript-estree": "5.62.0",
+ "eslint-scope": "^5.1.1",
+ "semver": "^7.3.7"
+ },
+ "dependencies": {
+ "eslint-scope": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz",
+ "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==",
+ "dev": true,
+ "requires": {
+ "esrecurse": "^4.3.0",
+ "estraverse": "^4.1.1"
+ }
+ }
+ }
+ },
+ "@typescript-eslint/visitor-keys": {
+ "version": "5.62.0",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.62.0.tgz",
+ "integrity": "sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw==",
+ "dev": true,
+ "requires": {
+ "@typescript-eslint/types": "5.62.0",
+ "eslint-visitor-keys": "^3.3.0"
+ }
+ },
+ "ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dev": true,
+ "requires": {
+ "color-convert": "^2.0.1"
+ }
+ },
+ "argparse": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
+ "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
+ "dev": true
+ },
+ "chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "dev": true,
+ "requires": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ }
+ },
+ "color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dev": true,
+ "requires": {
+ "color-name": "~1.1.4"
+ }
+ },
+ "color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+ "dev": true
+ },
+ "escape-string-regexp": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
+ "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==",
+ "dev": true
+ },
+ "eslint": {
+ "version": "8.50.0",
+ "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.50.0.tgz",
+ "integrity": "sha512-FOnOGSuFuFLv/Sa+FDVRZl4GGVAAFFi8LecRsI5a1tMO5HIE8nCm4ivAlzt4dT3ol/PaaGC0rJEEXQmHJBGoOg==",
+ "dev": true,
+ "requires": {
+ "@eslint-community/eslint-utils": "^4.2.0",
+ "@eslint-community/regexpp": "^4.6.1",
+ "@eslint/eslintrc": "^2.1.2",
+ "@eslint/js": "8.50.0",
+ "@humanwhocodes/config-array": "^0.11.11",
+ "@humanwhocodes/module-importer": "^1.0.1",
+ "@nodelib/fs.walk": "^1.2.8",
+ "ajv": "^6.12.4",
+ "chalk": "^4.0.0",
+ "cross-spawn": "^7.0.2",
+ "debug": "^4.3.2",
+ "doctrine": "^3.0.0",
+ "escape-string-regexp": "^4.0.0",
+ "eslint-scope": "^7.2.2",
+ "eslint-visitor-keys": "^3.4.3",
+ "espree": "^9.6.1",
+ "esquery": "^1.4.2",
+ "esutils": "^2.0.2",
+ "fast-deep-equal": "^3.1.3",
+ "file-entry-cache": "^6.0.1",
+ "find-up": "^5.0.0",
+ "glob-parent": "^6.0.2",
+ "globals": "^13.19.0",
+ "graphemer": "^1.4.0",
+ "ignore": "^5.2.0",
+ "imurmurhash": "^0.1.4",
+ "is-glob": "^4.0.0",
+ "is-path-inside": "^3.0.3",
+ "js-yaml": "^4.1.0",
+ "json-stable-stringify-without-jsonify": "^1.0.1",
+ "levn": "^0.4.1",
+ "lodash.merge": "^4.6.2",
+ "minimatch": "^3.1.2",
+ "natural-compare": "^1.4.0",
+ "optionator": "^0.9.3",
+ "strip-ansi": "^6.0.1",
+ "text-table": "^0.2.0"
+ }
+ },
+ "estraverse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz",
+ "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==",
+ "dev": true
+ },
+ "glob-parent": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz",
+ "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==",
+ "dev": true,
+ "requires": {
+ "is-glob": "^4.0.3"
+ }
+ },
+ "globals": {
+ "version": "13.23.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-13.23.0.tgz",
+ "integrity": "sha512-XAmF0RjlrjY23MA51q3HltdlGxUpXPvg0GioKiD9X6HD28iMjo2dKC8Vqwm7lne4GNr78+RHTfliktR6ZH09wA==",
+ "dev": true,
+ "requires": {
+ "type-fest": "^0.20.2"
+ }
+ },
+ "has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "dev": true
+ },
+ "js-yaml": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
+ "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
+ "dev": true,
+ "requires": {
+ "argparse": "^2.0.1"
+ }
+ },
+ "ncp": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ncp/-/ncp-2.0.0.tgz",
+ "integrity": "sha1-GVoh1sRuNh0vsSgbo4uR6d9727M=",
+ "dev": true
+ },
+ "semver": {
+ "version": "7.5.4",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
+ "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
+ "dev": true,
+ "requires": {
+ "lru-cache": "^6.0.0"
+ }
+ },
+ "supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dev": true,
+ "requires": {
+ "has-flag": "^4.0.0"
+ }
+ },
+ "type-fest": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz",
+ "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==",
+ "dev": true
+ }
+ }
+ },
+ "hard-rejection": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/hard-rejection/-/hard-rejection-2.1.0.tgz",
+ "integrity": "sha512-VIZB+ibDhx7ObhAe7OVtoEbuP4h/MuOTHJ+J8h/eBXotJYl0fBgR72xDFCKgIh22OJZIOVNxBMWuhAr10r8HdA==",
+ "dev": true
+ },
+ "has-ansi": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz",
+ "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^2.0.0"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
+ "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=",
+ "dev": true
+ }
+ }
+ },
+ "has-bigints": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz",
+ "integrity": "sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==",
+ "dev": true
+ },
+ "has-flag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+ "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
+ "dev": true
+ },
+ "has-property-descriptors": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.1.tgz",
+ "integrity": "sha512-VsX8eaIewvas0xnvinAe9bw4WfIeODpGYikiWYLH+dma0Jw6KHYqWiWfhQlgOVK8D6PvjubK5Uc4P0iIhIcNVg==",
+ "dev": true,
+ "requires": {
+ "get-intrinsic": "^1.2.2"
+ }
+ },
+ "has-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz",
+ "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==",
+ "dev": true
+ },
+ "has-symbols": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz",
+ "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==",
+ "dev": true
+ },
+ "has-tostringtag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz",
+ "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==",
+ "dev": true,
+ "requires": {
+ "has-symbols": "^1.0.2"
+ }
+ },
+ "has-value": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz",
+ "integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=",
+ "dev": true,
+ "requires": {
+ "get-value": "^2.0.6",
+ "has-values": "^1.0.0",
+ "isobject": "^3.0.0"
+ }
+ },
+ "has-values": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz",
+ "integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=",
+ "dev": true,
+ "requires": {
+ "is-number": "^3.0.0",
+ "kind-of": "^4.0.0"
+ },
+ "dependencies": {
+ "is-number": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz",
+ "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=",
+ "dev": true,
+ "requires": {
+ "kind-of": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "kind-of": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz",
+ "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=",
+ "dev": true,
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "hasown": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.0.tgz",
+ "integrity": "sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==",
+ "dev": true,
+ "requires": {
+ "function-bind": "^1.1.2"
+ }
+ },
+ "he": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz",
+ "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==",
+ "dev": true
+ },
+ "homedir-polyfill": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/homedir-polyfill/-/homedir-polyfill-1.0.3.tgz",
+ "integrity": "sha512-eSmmWE5bZTK2Nou4g0AI3zZ9rswp7GRKoKXS1BLUkvPviOqs4YTN1djQIqrXy9k5gEtdLPy86JjRwsNM9tnDcA==",
+ "dev": true,
+ "requires": {
+ "parse-passwd": "^1.0.0"
+ }
+ },
+ "hooker": {
+ "version": "0.2.3",
+ "resolved": "https://registry.npmjs.org/hooker/-/hooker-0.2.3.tgz",
+ "integrity": "sha1-uDT3I8xKJCqmWWNFnfbZhMXT2Vk=",
+ "dev": true
+ },
+ "hosted-git-info": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-4.1.0.tgz",
+ "integrity": "sha512-kyCuEOWjJqZuDbRHzL8V93NzQhwIB71oFWSyzVo+KPZI+pnQPPxucdkrOZvkLRnrf5URsQM+IJ09Dw29cRALIA==",
+ "dev": true,
+ "requires": {
+ "lru-cache": "^6.0.0"
+ }
+ },
+ "html-encoding-sniffer": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-3.0.0.tgz",
+ "integrity": "sha512-oWv4T4yJ52iKrufjnyZPkrN0CH3QnrUqdB6In1g5Fe1mia8GmF36gnfNySxoZtxD5+NmYw1EElVXiBk93UeskA==",
+ "dev": true,
+ "requires": {
+ "whatwg-encoding": "^2.0.0"
+ }
+ },
+ "http-errors": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz",
+ "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==",
+ "dev": true,
+ "requires": {
+ "depd": "2.0.0",
+ "inherits": "2.0.4",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
+ "toidentifier": "1.0.1"
+ },
+ "dependencies": {
+ "depd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
+ "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
+ "dev": true
+ },
+ "statuses": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
+ "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
+ "dev": true
+ }
+ }
+ },
+ "http-proxy": {
+ "version": "1.18.1",
+ "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz",
+ "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==",
+ "dev": true,
+ "requires": {
+ "eventemitter3": "^4.0.0",
+ "follow-redirects": "^1.0.0",
+ "requires-port": "^1.0.0"
+ }
+ },
+ "http-server": {
+ "version": "14.1.1",
+ "resolved": "https://registry.npmjs.org/http-server/-/http-server-14.1.1.tgz",
+ "integrity": "sha512-+cbxadF40UXd9T01zUHgA+rlo2Bg1Srer4+B4NwIHdaGxAGGv59nYRnGGDJ9LBk7alpS0US+J+bLLdQOOkJq4A==",
+ "dev": true,
+ "requires": {
+ "basic-auth": "^2.0.1",
+ "chalk": "^4.1.2",
+ "corser": "^2.0.1",
+ "he": "^1.2.0",
+ "html-encoding-sniffer": "^3.0.0",
+ "http-proxy": "^1.18.1",
+ "mime": "^1.6.0",
+ "minimist": "^1.2.6",
+ "opener": "^1.5.1",
+ "portfinder": "^1.0.28",
+ "secure-compare": "3.0.1",
+ "union": "~0.5.0",
+ "url-join": "^4.0.1"
+ },
+ "dependencies": {
+ "ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dev": true,
+ "requires": {
+ "color-convert": "^2.0.1"
+ }
+ },
+ "chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "dev": true,
+ "requires": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ }
+ },
+ "color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dev": true,
+ "requires": {
+ "color-name": "~1.1.4"
+ }
+ },
+ "color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+ "dev": true
+ },
+ "has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "dev": true
+ },
+ "supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dev": true,
+ "requires": {
+ "has-flag": "^4.0.0"
+ }
+ }
+ }
+ },
+ "human-signals": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz",
+ "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==",
+ "dev": true
+ },
+ "iconv-lite": {
+ "version": "0.4.24",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
+ "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
+ "dev": true,
+ "requires": {
+ "safer-buffer": ">= 2.1.2 < 3"
+ }
+ },
+ "ignore": {
+ "version": "5.2.4",
+ "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz",
+ "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==",
+ "dev": true
+ },
+ "import-fresh": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz",
+ "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==",
+ "dev": true,
+ "requires": {
+ "parent-module": "^1.0.0",
+ "resolve-from": "^4.0.0"
+ }
+ },
+ "imurmurhash": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
+ "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=",
+ "dev": true
+ },
+ "indent-string": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz",
+ "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==",
+ "dev": true
+ },
+ "inflight": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
+ "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
+ "dev": true,
+ "requires": {
+ "once": "^1.3.0",
+ "wrappy": "1"
+ }
+ },
+ "inherits": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==",
+ "dev": true
+ },
+ "inquirer": {
+ "version": "7.3.3",
+ "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-7.3.3.tgz",
+ "integrity": "sha512-JG3eIAj5V9CwcGvuOmoo6LB9kbAYT8HXffUl6memuszlwDC/qvFAJw49XJ5NROSFNPxp3iQg1GqkFhaY/CR0IA==",
+ "dev": true,
+ "requires": {
+ "ansi-escapes": "^4.2.1",
+ "chalk": "^4.1.0",
+ "cli-cursor": "^3.1.0",
+ "cli-width": "^3.0.0",
+ "external-editor": "^3.0.3",
+ "figures": "^3.0.0",
+ "lodash": "^4.17.19",
+ "mute-stream": "0.0.8",
+ "run-async": "^2.4.0",
+ "rxjs": "^6.6.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0",
+ "through": "^2.3.6"
+ },
+ "dependencies": {
+ "ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dev": true,
+ "requires": {
+ "color-convert": "^2.0.1"
+ }
+ },
+ "chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "dev": true,
+ "requires": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ }
+ },
+ "color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dev": true,
+ "requires": {
+ "color-name": "~1.1.4"
+ }
+ },
+ "color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+ "dev": true
+ },
+ "has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "dev": true
+ },
+ "supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dev": true,
+ "requires": {
+ "has-flag": "^4.0.0"
+ }
+ }
+ }
+ },
+ "internal-slot": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.6.tgz",
+ "integrity": "sha512-Xj6dv+PsbtwyPpEflsejS+oIZxmMlV44zAhG479uYu89MsjcYOhCFnNyKrkJrihbsiasQyY0afoCl/9BLR65bg==",
+ "dev": true,
+ "requires": {
+ "get-intrinsic": "^1.2.2",
+ "hasown": "^2.0.0",
+ "side-channel": "^1.0.4"
+ }
+ },
+ "interpret": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.1.0.tgz",
+ "integrity": "sha1-ftGxQQxqDg94z5XTuEQMY/eLhhQ=",
+ "dev": true
+ },
+ "ipaddr.js": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
+ "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==",
+ "dev": true
+ },
+ "is-absolute": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-absolute/-/is-absolute-1.0.0.tgz",
+ "integrity": "sha512-dOWoqflvcydARa360Gvv18DZ/gRuHKi2NU/wU5X1ZFzdYfH29nkiNZsF3mp4OJ3H4yo9Mx8A/uAGNzpzPN3yBA==",
+ "dev": true,
+ "requires": {
+ "is-relative": "^1.0.0",
+ "is-windows": "^1.0.1"
+ }
+ },
+ "is-accessor-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz",
+ "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=",
+ "dev": true,
+ "requires": {
+ "kind-of": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "is-array-buffer": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.2.tgz",
+ "integrity": "sha512-y+FyyR/w8vfIRq4eQcM1EYgSTnmHXPqaF+IgzgraytCFq5Xh8lllDVmAZolPJiZttZLeFSINPYMaEJ7/vWUa1w==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "get-intrinsic": "^1.2.0",
+ "is-typed-array": "^1.1.10"
+ }
+ },
+ "is-arrayish": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
+ "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=",
+ "dev": true
+ },
+ "is-bigint": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz",
+ "integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==",
+ "dev": true,
+ "requires": {
+ "has-bigints": "^1.0.1"
+ }
+ },
+ "is-binary-path": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
+ "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
+ "dev": true,
+ "requires": {
+ "binary-extensions": "^2.0.0"
+ }
+ },
+ "is-boolean-object": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz",
+ "integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "has-tostringtag": "^1.0.0"
+ }
+ },
+ "is-buffer": {
+ "version": "1.1.6",
+ "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz",
+ "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==",
+ "dev": true
+ },
+ "is-callable": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz",
+ "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==",
+ "dev": true
+ },
+ "is-core-module": {
+ "version": "2.13.1",
+ "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz",
+ "integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==",
+ "dev": true,
+ "requires": {
+ "hasown": "^2.0.0"
+ }
+ },
+ "is-data-descriptor": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz",
+ "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=",
+ "dev": true,
+ "requires": {
+ "kind-of": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "is-date-object": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz",
+ "integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==",
+ "dev": true,
+ "requires": {
+ "has-tostringtag": "^1.0.0"
+ }
+ },
+ "is-descriptor": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz",
+ "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==",
+ "dev": true,
+ "requires": {
+ "is-accessor-descriptor": "^0.1.6",
+ "is-data-descriptor": "^0.1.4",
+ "kind-of": "^5.0.0"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz",
+ "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==",
+ "dev": true
+ }
+ }
+ },
+ "is-docker": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-3.0.0.tgz",
+ "integrity": "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==",
+ "dev": true
+ },
+ "is-extendable": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz",
+ "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=",
+ "dev": true
+ },
+ "is-extglob": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+ "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=",
+ "dev": true
+ },
+ "is-finite": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-finite/-/is-finite-1.1.0.tgz",
+ "integrity": "sha512-cdyMtqX/BOqqNBBiKlIVkytNHm49MtMlYyn1zxzvJKWmFMlGzm+ry5BBfYyeY9YmNKbRSo/o7OX9w9ale0wg3w==",
+ "dev": true
+ },
+ "is-fullwidth-code-point": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
+ "dev": true
+ },
+ "is-glob": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
+ "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
+ "dev": true,
+ "requires": {
+ "is-extglob": "^2.1.1"
+ }
+ },
+ "is-inside-container": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-inside-container/-/is-inside-container-1.0.0.tgz",
+ "integrity": "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==",
+ "dev": true,
+ "requires": {
+ "is-docker": "^3.0.0"
+ }
+ },
+ "is-negative-zero": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.2.tgz",
+ "integrity": "sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==",
+ "dev": true
+ },
+ "is-number": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
+ "dev": true
+ },
+ "is-number-object": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.7.tgz",
+ "integrity": "sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==",
+ "dev": true,
+ "requires": {
+ "has-tostringtag": "^1.0.0"
+ }
+ },
+ "is-path-inside": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz",
+ "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==",
+ "dev": true
+ },
+ "is-plain-obj": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz",
+ "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=",
+ "dev": true
+ },
+ "is-plain-object": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz",
+ "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==",
+ "dev": true,
+ "requires": {
+ "isobject": "^3.0.1"
+ }
+ },
+ "is-regex": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz",
+ "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "has-tostringtag": "^1.0.0"
+ }
+ },
+ "is-relative": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-relative/-/is-relative-1.0.0.tgz",
+ "integrity": "sha512-Kw/ReK0iqwKeu0MITLFuj0jbPAmEiOsIwyIXvvbfa6QfmN9pkD1M+8pdk7Rl/dTKbH34/XBFMbgD4iMJhLQbGA==",
+ "dev": true,
+ "requires": {
+ "is-unc-path": "^1.0.0"
+ }
+ },
+ "is-shared-array-buffer": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz",
+ "integrity": "sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2"
+ }
+ },
+ "is-stream": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz",
+ "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==",
+ "dev": true
+ },
+ "is-string": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz",
+ "integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==",
+ "dev": true,
+ "requires": {
+ "has-tostringtag": "^1.0.0"
+ }
+ },
+ "is-symbol": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz",
+ "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==",
+ "dev": true,
+ "requires": {
+ "has-symbols": "^1.0.2"
+ }
+ },
+ "is-typed-array": {
+ "version": "1.1.12",
+ "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.12.tgz",
+ "integrity": "sha512-Z14TF2JNG8Lss5/HMqt0//T9JeHXttXy5pH/DBU4vi98ozO2btxzq9MwYDZYnKwU8nRsz/+GVFVRDq3DkVuSPg==",
+ "dev": true,
+ "requires": {
+ "which-typed-array": "^1.1.11"
+ }
+ },
+ "is-unc-path": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-unc-path/-/is-unc-path-1.0.0.tgz",
+ "integrity": "sha512-mrGpVd0fs7WWLfVsStvgF6iEJnbjDFZh9/emhRDcGWTduTfNHd9CHeUwH3gYIjdbwo4On6hunkztwOaAw0yllQ==",
+ "dev": true,
+ "requires": {
+ "unc-path-regex": "^0.1.2"
+ }
+ },
+ "is-utf8": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/is-utf8/-/is-utf8-0.2.1.tgz",
+ "integrity": "sha1-Sw2hRCEE0bM2NA6AeX6GXPOffXI=",
+ "dev": true
+ },
+ "is-weakref": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz",
+ "integrity": "sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2"
+ }
+ },
+ "is-windows": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz",
+ "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==",
+ "dev": true
+ },
+ "is-wsl": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz",
+ "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==",
+ "dev": true,
+ "requires": {
+ "is-docker": "^2.0.0"
+ },
+ "dependencies": {
+ "is-docker": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz",
+ "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==",
+ "dev": true
+ }
+ }
+ },
+ "isarray": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
+ "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=",
+ "dev": true
+ },
+ "isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=",
+ "dev": true
+ },
+ "isobject": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz",
+ "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=",
+ "dev": true
+ },
+ "js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==",
+ "dev": true
+ },
+ "js-yaml": {
+ "version": "3.14.1",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz",
+ "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==",
+ "dev": true,
+ "requires": {
+ "argparse": "^1.0.7",
+ "esprima": "^4.0.0"
+ }
+ },
+ "jsesc": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz",
+ "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==",
+ "dev": true
+ },
+ "jsmin2": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/jsmin2/-/jsmin2-1.2.1.tgz",
+ "integrity": "sha1-iPvi+/dfCpH2YCD9mBzWk/S/5X4=",
+ "dev": true
+ },
+ "json-parse-even-better-errors": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz",
+ "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==",
+ "dev": true
+ },
+ "json-schema-traverse": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
+ "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
+ "dev": true
+ },
+ "json-stable-stringify-without-jsonify": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz",
+ "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=",
+ "dev": true
+ },
+ "json5": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
+ "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
+ "dev": true
+ },
+ "jsonc-parser": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.2.0.tgz",
+ "integrity": "sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==",
+ "dev": true
+ },
+ "kind-of": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
+ "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==",
+ "dev": true
+ },
+ "levn": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz",
+ "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==",
+ "dev": true,
+ "requires": {
+ "prelude-ls": "^1.2.1",
+ "type-check": "~0.4.0"
+ }
+ },
+ "liftup": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/liftup/-/liftup-3.0.1.tgz",
+ "integrity": "sha512-yRHaiQDizWSzoXk3APcA71eOI/UuhEkNN9DiW2Tt44mhYzX4joFoCZlxsSOF7RyeLlfqzFLQI1ngFq3ggMPhOw==",
+ "dev": true,
+ "requires": {
+ "extend": "^3.0.2",
+ "findup-sync": "^4.0.0",
+ "fined": "^1.2.0",
+ "flagged-respawn": "^1.0.1",
+ "is-plain-object": "^2.0.4",
+ "object.map": "^1.0.1",
+ "rechoir": "^0.7.0",
+ "resolve": "^1.19.0"
+ },
+ "dependencies": {
+ "findup-sync": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/findup-sync/-/findup-sync-4.0.0.tgz",
+ "integrity": "sha512-6jvvn/12IC4quLBL1KNokxC7wWTvYncaVUYSoxWw7YykPLuRrnv4qdHcSOywOI5RpkOVGeQRtWM8/q+G6W6qfQ==",
+ "dev": true,
+ "requires": {
+ "detect-file": "^1.0.0",
+ "is-glob": "^4.0.0",
+ "micromatch": "^4.0.2",
+ "resolve-dir": "^1.0.1"
+ }
+ }
+ }
+ },
+ "lines-and-columns": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz",
+ "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==",
+ "dev": true
+ },
+ "locate-path": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",
+ "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==",
+ "dev": true,
+ "requires": {
+ "p-locate": "^5.0.0"
+ }
+ },
+ "lodash": {
+ "version": "4.17.21",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
+ "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==",
+ "dev": true
+ },
+ "lodash.merge": {
+ "version": "4.6.2",
+ "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz",
+ "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==",
+ "dev": true
+ },
+ "lru-cache": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz",
+ "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==",
+ "dev": true,
+ "requires": {
+ "yallist": "^4.0.0"
+ }
+ },
+ "lunr": {
+ "version": "2.3.9",
+ "resolved": "https://registry.npmjs.org/lunr/-/lunr-2.3.9.tgz",
+ "integrity": "sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow==",
+ "dev": true
+ },
+ "make-dir": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-2.1.0.tgz",
+ "integrity": "sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==",
+ "dev": true,
+ "requires": {
+ "pify": "^4.0.1",
+ "semver": "^5.6.0"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
+ "dev": true
+ }
+ }
+ },
+ "make-error": {
+ "version": "1.3.6",
+ "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz",
+ "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==",
+ "dev": true
+ },
+ "make-iterator": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/make-iterator/-/make-iterator-1.0.1.tgz",
+ "integrity": "sha512-pxiuXh0iVEq7VM7KMIhs5gxsfxCux2URptUQaXo4iZZJxBAzTPOLE2BumO5dbfVYq/hBJFBR/a1mFDmOx5AGmw==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.2"
+ }
+ },
+ "map-cache": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz",
+ "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=",
+ "dev": true
+ },
+ "map-obj": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.3.0.tgz",
+ "integrity": "sha512-hdN1wVrZbb29eBGiGjJbeP8JbKjq1urkHJ/LIP/NY48MZ1QVXUsQBV1G1zvYFHn1XE06cwjBsOI2K3Ulnj1YXQ==",
+ "dev": true
+ },
+ "map-visit": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz",
+ "integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=",
+ "dev": true,
+ "requires": {
+ "object-visit": "^1.0.0"
+ }
+ },
+ "marked": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/marked/-/marked-4.3.0.tgz",
+ "integrity": "sha512-PRsaiG84bK+AMvxziE/lCFss8juXjNaWzVbN5tXAm4XjeaS9NAHhop+PjQxz2A9h8Q4M/xGmzP8vqNwy6JeK0A==",
+ "dev": true
+ },
+ "media-typer": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
+ "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==",
+ "dev": true
+ },
+ "meow": {
+ "version": "9.0.0",
+ "resolved": "https://registry.npmjs.org/meow/-/meow-9.0.0.tgz",
+ "integrity": "sha512-+obSblOQmRhcyBt62furQqRAQpNyWXo8BuQ5bN7dG8wmwQ+vwHKp/rCFD4CrTP8CsDQD1sjoZ94K417XEUk8IQ==",
+ "dev": true,
+ "requires": {
+ "@types/minimist": "^1.2.0",
+ "camelcase-keys": "^6.2.2",
+ "decamelize": "^1.2.0",
+ "decamelize-keys": "^1.1.0",
+ "hard-rejection": "^2.1.0",
+ "minimist-options": "4.1.0",
+ "normalize-package-data": "^3.0.0",
+ "read-pkg-up": "^7.0.1",
+ "redent": "^3.0.0",
+ "trim-newlines": "^3.0.0",
+ "type-fest": "^0.18.0",
+ "yargs-parser": "^20.2.3"
+ },
+ "dependencies": {
+ "type-fest": {
+ "version": "0.18.1",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.18.1.tgz",
+ "integrity": "sha512-OIAYXk8+ISY+qTOwkHtKqzAuxchoMiD9Udx+FSGQDuiRR+PJKJHc2NJAXlbhkGwTt/4/nKZxELY1w3ReWOL8mw==",
+ "dev": true
+ }
+ }
+ },
+ "merge-descriptors": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz",
+ "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E=",
+ "dev": true
+ },
+ "merge-stream": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
+ "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==",
+ "dev": true
+ },
+ "merge2": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
+ "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
+ "dev": true
+ },
+ "methods": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
+ "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=",
+ "dev": true
+ },
+ "micromatch": {
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.4.tgz",
+ "integrity": "sha512-pRmzw/XUcwXGpD9aI9q/0XOwLNygjETJ8y0ao0wdqprrzDa4YnxLcz7fQRZr8voh8V10kGhABbNcHVk5wHgWwg==",
+ "dev": true,
+ "requires": {
+ "braces": "^3.0.1",
+ "picomatch": "^2.2.3"
+ }
+ },
+ "mime": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
+ "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==",
+ "dev": true
+ },
+ "mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "dev": true
+ },
+ "mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "dev": true,
+ "requires": {
+ "mime-db": "1.52.0"
+ }
+ },
+ "mimic-fn": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz",
+ "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==",
+ "dev": true
+ },
+ "min-indent": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz",
+ "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==",
+ "dev": true
+ },
+ "minimatch": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
+ "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "dev": true,
+ "requires": {
+ "brace-expansion": "^1.1.7"
+ }
+ },
+ "minimist": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz",
+ "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==",
+ "dev": true
+ },
+ "minimist-options": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/minimist-options/-/minimist-options-4.1.0.tgz",
+ "integrity": "sha512-Q4r8ghd80yhO/0j1O3B2BjweX3fiHg9cdOwjJd2J76Q135c+NDxGCqdYKQ1SKBuFfgWbAUzBfvYjPUEeNgqN1A==",
+ "dev": true,
+ "requires": {
+ "arrify": "^1.0.1",
+ "is-plain-obj": "^1.1.0",
+ "kind-of": "^6.0.3"
+ }
+ },
+ "mixin-deep": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz",
+ "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==",
+ "dev": true,
+ "requires": {
+ "for-in": "^1.0.2",
+ "is-extendable": "^1.0.1"
+ },
+ "dependencies": {
+ "is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dev": true,
+ "requires": {
+ "is-plain-object": "^2.0.4"
+ }
+ }
+ }
+ },
+ "morgan": {
+ "version": "1.10.0",
+ "resolved": "https://registry.npmjs.org/morgan/-/morgan-1.10.0.tgz",
+ "integrity": "sha512-AbegBVI4sh6El+1gNwvD5YIck7nSA36weD7xvIxG4in80j/UoK8AEGaWnnz8v1GxonMCltmlNs5ZKbGvl9b1XQ==",
+ "dev": true,
+ "requires": {
+ "basic-auth": "~2.0.1",
+ "debug": "2.6.9",
+ "depd": "~2.0.0",
+ "on-finished": "~2.3.0",
+ "on-headers": "~1.0.2"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "depd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
+ "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
+ "dev": true
+ },
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=",
+ "dev": true
+ }
+ }
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
+ "dev": true
+ },
+ "mute-stream": {
+ "version": "0.0.8",
+ "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz",
+ "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==",
+ "dev": true
+ },
+ "nan": {
+ "version": "2.15.0",
+ "resolved": "https://registry.npmjs.org/nan/-/nan-2.15.0.tgz",
+ "integrity": "sha512-8ZtvEnA2c5aYCZYd1cvgdnU6cqwixRoYg70xPLWUws5ORTa/lnw+u4amixRS/Ac5U5mQVgp9pnlSUnbNWFaWZQ==",
+ "dev": true,
+ "optional": true
+ },
+ "nanomatch": {
+ "version": "1.2.13",
+ "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz",
+ "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==",
+ "dev": true,
+ "requires": {
+ "arr-diff": "^4.0.0",
+ "array-unique": "^0.3.2",
+ "define-property": "^2.0.2",
+ "extend-shallow": "^3.0.2",
+ "fragment-cache": "^0.2.1",
+ "is-windows": "^1.0.2",
+ "kind-of": "^6.0.2",
+ "object.pick": "^1.3.0",
+ "regex-not": "^1.0.0",
+ "snapdragon": "^0.8.1",
+ "to-regex": "^3.0.1"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz",
+ "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^1.0.2",
+ "isobject": "^3.0.1"
+ }
+ },
+ "extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "dev": true,
+ "requires": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ }
+ },
+ "is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "requires": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ }
+ },
+ "is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dev": true,
+ "requires": {
+ "is-plain-object": "^2.0.4"
+ }
+ }
+ }
+ },
+ "natural-compare": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
+ "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=",
+ "dev": true
+ },
+ "natural-compare-lite": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/natural-compare-lite/-/natural-compare-lite-1.4.0.tgz",
+ "integrity": "sha512-Tj+HTDSJJKaZnfiuw+iaF9skdPpTo2GtEly5JHnWV/hfv2Qj/9RKsGISQtLh2ox3l5EAGw487hnBee0sIJ6v2g==",
+ "dev": true
+ },
+ "ncp": {
+ "version": "0.5.1",
+ "resolved": "https://registry.npmjs.org/ncp/-/ncp-0.5.1.tgz",
+ "integrity": "sha1-dDmFMW49tFkoG1hxaehFc1oFQ58=",
+ "dev": true
+ },
+ "negotiator": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz",
+ "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==",
+ "dev": true
+ },
+ "next-tick": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.1.0.tgz",
+ "integrity": "sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ==",
+ "dev": true
+ },
+ "node-releases": {
+ "version": "2.0.13",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.13.tgz",
+ "integrity": "sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==",
+ "dev": true
+ },
+ "nopt": {
+ "version": "3.0.6",
+ "resolved": "https://registry.npmjs.org/nopt/-/nopt-3.0.6.tgz",
+ "integrity": "sha1-xkZdvwirzU2zWTF/eaxopkayj/k=",
+ "dev": true,
+ "requires": {
+ "abbrev": "1"
+ }
+ },
+ "normalize-package-data": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-3.0.3.tgz",
+ "integrity": "sha512-p2W1sgqij3zMMyRC067Dg16bfzVH+w7hyegmpIvZ4JNjqtGOVAIvLmjBx3yP7YTe9vKJgkoNOPjwQGogDoMXFA==",
+ "dev": true,
+ "requires": {
+ "hosted-git-info": "^4.0.1",
+ "is-core-module": "^2.5.0",
+ "semver": "^7.3.4",
+ "validate-npm-package-license": "^3.0.1"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "7.3.5",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.3.5.tgz",
+ "integrity": "sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ==",
+ "dev": true,
+ "requires": {
+ "lru-cache": "^6.0.0"
+ }
+ }
+ }
+ },
+ "normalize-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
+ "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
+ "dev": true
+ },
+ "npm-run-path": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz",
+ "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==",
+ "dev": true,
+ "requires": {
+ "path-key": "^3.0.0"
+ }
+ },
+ "object-copy": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz",
+ "integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=",
+ "dev": true,
+ "requires": {
+ "copy-descriptor": "^0.1.0",
+ "define-property": "^0.2.5",
+ "kind-of": "^3.0.3"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "object-inspect": {
+ "version": "1.13.1",
+ "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz",
+ "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==",
+ "dev": true
+ },
+ "object-keys": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz",
+ "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==",
+ "dev": true
+ },
+ "object-visit": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz",
+ "integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=",
+ "dev": true,
+ "requires": {
+ "isobject": "^3.0.0"
+ }
+ },
+ "object.assign": {
+ "version": "4.1.4",
+ "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.4.tgz",
+ "integrity": "sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.4",
+ "has-symbols": "^1.0.3",
+ "object-keys": "^1.1.1"
+ }
+ },
+ "object.defaults": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/object.defaults/-/object.defaults-1.1.0.tgz",
+ "integrity": "sha1-On+GgzS0B96gbaFtiNXNKeQ1/s8=",
+ "dev": true,
+ "requires": {
+ "array-each": "^1.0.1",
+ "array-slice": "^1.0.0",
+ "for-own": "^1.0.0",
+ "isobject": "^3.0.0"
+ }
+ },
+ "object.fromentries": {
+ "version": "2.0.7",
+ "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.7.tgz",
+ "integrity": "sha512-UPbPHML6sL8PI/mOqPwsH4G6iyXcCGzLin8KvEPenOZN5lpCNBZZQ+V62vdjB1mQHrmqGQt5/OJzemUA+KJmEA==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1"
+ }
+ },
+ "object.groupby": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/object.groupby/-/object.groupby-1.0.1.tgz",
+ "integrity": "sha512-HqaQtqLnp/8Bn4GL16cj+CUYbnpe1bh0TtEaWvybszDG4tgxCJuRpV8VGuvNaI1fAnI4lUJzDG55MXcOH4JZcQ==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1",
+ "get-intrinsic": "^1.2.1"
+ }
+ },
+ "object.map": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/object.map/-/object.map-1.0.1.tgz",
+ "integrity": "sha1-z4Plncj8wK1fQlDh94s7gb2AHTc=",
+ "dev": true,
+ "requires": {
+ "for-own": "^1.0.0",
+ "make-iterator": "^1.0.0"
+ }
+ },
+ "object.pick": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz",
+ "integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=",
+ "dev": true,
+ "requires": {
+ "isobject": "^3.0.1"
+ }
+ },
+ "object.values": {
+ "version": "1.1.7",
+ "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.7.tgz",
+ "integrity": "sha512-aU6xnDFYT3x17e/f0IiiwlGPTy2jzMySGfUB4fq6z7CV8l85CWHDk5ErhyhpfDHhrOMwGFhSQkhMGHaIotA6Ng==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1"
+ }
+ },
+ "on-finished": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
+ "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=",
+ "dev": true,
+ "requires": {
+ "ee-first": "1.1.1"
+ }
+ },
+ "on-headers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz",
+ "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==",
+ "dev": true
+ },
+ "once": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+ "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
+ "dev": true,
+ "requires": {
+ "wrappy": "1"
+ }
+ },
+ "onetime": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz",
+ "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==",
+ "dev": true,
+ "requires": {
+ "mimic-fn": "^2.1.0"
+ }
+ },
+ "open": {
+ "version": "9.1.0",
+ "resolved": "https://registry.npmjs.org/open/-/open-9.1.0.tgz",
+ "integrity": "sha512-OS+QTnw1/4vrf+9hh1jc1jnYjzSG4ttTBB8UxOwAnInG3Uo4ssetzC1ihqaIHjLJnA5GGlRl6QlZXOTQhRBUvg==",
+ "dev": true,
+ "requires": {
+ "default-browser": "^4.0.0",
+ "define-lazy-prop": "^3.0.0",
+ "is-inside-container": "^1.0.0",
+ "is-wsl": "^2.2.0"
+ }
+ },
+ "opener": {
+ "version": "1.5.2",
+ "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz",
+ "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==",
+ "dev": true
+ },
+ "optionator": {
+ "version": "0.9.3",
+ "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.3.tgz",
+ "integrity": "sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==",
+ "dev": true,
+ "requires": {
+ "@aashutoshrathi/word-wrap": "^1.2.3",
+ "deep-is": "^0.1.3",
+ "fast-levenshtein": "^2.0.6",
+ "levn": "^0.4.1",
+ "prelude-ls": "^1.2.1",
+ "type-check": "^0.4.0"
+ }
+ },
+ "os-homedir": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz",
+ "integrity": "sha1-/7xJiDNuDoM94MFox+8VISGqf7M=",
+ "dev": true
+ },
+ "os-tmpdir": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz",
+ "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=",
+ "dev": true
+ },
+ "osenv": {
+ "version": "0.1.5",
+ "resolved": "https://registry.npmjs.org/osenv/-/osenv-0.1.5.tgz",
+ "integrity": "sha512-0CWcCECdMVc2Rw3U5w9ZjqX6ga6ubk1xDVKxtBQPK7wis/0F2r9T6k4ydGYhecl7YUBxBVxhL5oisPsNxAPe2g==",
+ "dev": true,
+ "requires": {
+ "os-homedir": "^1.0.0",
+ "os-tmpdir": "^1.0.0"
+ }
+ },
+ "p-limit": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
+ "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==",
+ "dev": true,
+ "requires": {
+ "yocto-queue": "^0.1.0"
+ }
+ },
+ "p-locate": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz",
+ "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==",
+ "dev": true,
+ "requires": {
+ "p-limit": "^3.0.2"
+ }
+ },
+ "parent-module": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
+ "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==",
+ "dev": true,
+ "requires": {
+ "callsites": "^3.0.0"
+ }
+ },
+ "parse-filepath": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/parse-filepath/-/parse-filepath-1.0.2.tgz",
+ "integrity": "sha1-pjISf1Oq89FYdvWHLz/6x2PWyJE=",
+ "dev": true,
+ "requires": {
+ "is-absolute": "^1.0.0",
+ "map-cache": "^0.2.0",
+ "path-root": "^0.1.1"
+ }
+ },
+ "parse-json": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz",
+ "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==",
+ "dev": true,
+ "requires": {
+ "@babel/code-frame": "^7.0.0",
+ "error-ex": "^1.3.1",
+ "json-parse-even-better-errors": "^2.3.0",
+ "lines-and-columns": "^1.1.6"
+ }
+ },
+ "parse-passwd": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/parse-passwd/-/parse-passwd-1.0.0.tgz",
+ "integrity": "sha1-bVuTSkVpk7I9N/QKOC1vFmao5cY=",
+ "dev": true
+ },
+ "parseurl": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
+ "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==",
+ "dev": true
+ },
+ "pascalcase": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz",
+ "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=",
+ "dev": true
+ },
+ "path-dirname": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/path-dirname/-/path-dirname-1.0.2.tgz",
+ "integrity": "sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA=",
+ "dev": true
+ },
+ "path-exists": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
+ "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
+ "dev": true
+ },
+ "path-is-absolute": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
+ "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=",
+ "dev": true
+ },
+ "path-key": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
+ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
+ "dev": true
+ },
+ "path-parse": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
+ "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==",
+ "dev": true
+ },
+ "path-root": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/path-root/-/path-root-0.1.1.tgz",
+ "integrity": "sha1-mkpoFMrBwM1zNgqV8yCDyOpHRbc=",
+ "dev": true,
+ "requires": {
+ "path-root-regex": "^0.1.0"
+ }
+ },
+ "path-root-regex": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/path-root-regex/-/path-root-regex-0.1.2.tgz",
+ "integrity": "sha1-v8zcjfWxLcUsi0PsONGNcsBLqW0=",
+ "dev": true
+ },
+ "path-to-regexp": {
+ "version": "0.1.7",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz",
+ "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w=",
+ "dev": true
+ },
+ "path-type": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz",
+ "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==",
+ "dev": true
+ },
+ "picocolors": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz",
+ "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==",
+ "dev": true
+ },
+ "picomatch": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
+ "dev": true
+ },
+ "pify": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/pify/-/pify-4.0.1.tgz",
+ "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==",
+ "dev": true
+ },
+ "playwright-core": {
+ "version": "1.39.0",
+ "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.39.0.tgz",
+ "integrity": "sha512-+k4pdZgs1qiM+OUkSjx96YiKsXsmb59evFoqv8SKO067qBA+Z2s/dCzJij/ZhdQcs2zlTAgRKfeiiLm8PQ2qvw==",
+ "dev": true
+ },
+ "pngjs": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/pngjs/-/pngjs-7.0.0.tgz",
+ "integrity": "sha512-LKWqWJRhstyYo9pGvgor/ivk2w94eSjE3RGVuzLGlr3NmD8bf7RcYGze1mNdEHRP6TRP6rMuDHk5t44hnTRyow==",
+ "dev": true
+ },
+ "portfinder": {
+ "version": "1.0.32",
+ "resolved": "https://registry.npmjs.org/portfinder/-/portfinder-1.0.32.tgz",
+ "integrity": "sha512-on2ZJVVDXRADWE6jnQaX0ioEylzgBpQk8r55NE4wjXW1ZxO+BgDlY6DXwj20i0V8eB4SenDQ00WEaxfiIQPcxg==",
+ "dev": true,
+ "requires": {
+ "async": "^2.6.4",
+ "debug": "^3.2.7",
+ "mkdirp": "^0.5.6"
+ },
+ "dependencies": {
+ "async": {
+ "version": "2.6.4",
+ "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz",
+ "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==",
+ "dev": true,
+ "requires": {
+ "lodash": "^4.17.14"
+ }
+ },
+ "debug": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz",
+ "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==",
+ "dev": true,
+ "requires": {
+ "ms": "^2.1.1"
+ }
+ },
+ "mkdirp": {
+ "version": "0.5.6",
+ "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.6.tgz",
+ "integrity": "sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==",
+ "dev": true,
+ "requires": {
+ "minimist": "^1.2.6"
+ }
+ }
+ }
+ },
+ "posix-character-classes": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz",
+ "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=",
+ "dev": true
+ },
+ "prelude-ls": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz",
+ "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==",
+ "dev": true
+ },
+ "prettier": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.0.3.tgz",
+ "integrity": "sha512-L/4pUDMxcNa8R/EthV08Zt42WBO4h1rarVtK0K+QJG0X187OLo7l699jWw0GKuwzkPQ//jMFA/8Xm6Fh3J/DAg==",
+ "dev": true
+ },
+ "prettier-linter-helpers": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/prettier-linter-helpers/-/prettier-linter-helpers-1.0.0.tgz",
+ "integrity": "sha512-GbK2cP9nraSSUF9N2XwUwqfzlAFlMNYYl+ShE/V+H8a9uNl/oUqB1w2EL54Jh0OlyRSd8RfWYJ3coVS4TROP2w==",
+ "dev": true,
+ "requires": {
+ "fast-diff": "^1.1.2"
+ }
+ },
+ "process-nextick-args": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
+ "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==",
+ "dev": true
+ },
+ "proxy-addr": {
+ "version": "2.0.7",
+ "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
+ "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
+ "dev": true,
+ "requires": {
+ "forwarded": "0.2.0",
+ "ipaddr.js": "1.9.1"
+ }
+ },
+ "punycode": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz",
+ "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==",
+ "dev": true
+ },
+ "qs": {
+ "version": "6.11.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz",
+ "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==",
+ "dev": true,
+ "requires": {
+ "side-channel": "^1.0.4"
+ }
+ },
+ "queue-microtask": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
+ "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
+ "dev": true
+ },
+ "quick-lru": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-4.0.1.tgz",
+ "integrity": "sha512-ARhCpm70fzdcvNQfPoy49IaanKkTlRWF2JMzqhcJbhSFRZv7nPTvZJdcY7301IPmvW+/p0RgIWnQDLJxifsQ7g==",
+ "dev": true
+ },
+ "range-parser": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
+ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
+ "dev": true
+ },
+ "raw-body": {
+ "version": "2.5.1",
+ "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.1.tgz",
+ "integrity": "sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==",
+ "dev": true,
+ "requires": {
+ "bytes": "3.1.2",
+ "http-errors": "2.0.0",
+ "iconv-lite": "0.4.24",
+ "unpipe": "1.0.0"
+ }
+ },
+ "read-pkg": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz",
+ "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==",
+ "dev": true,
+ "requires": {
+ "@types/normalize-package-data": "^2.4.0",
+ "normalize-package-data": "^2.5.0",
+ "parse-json": "^5.0.0",
+ "type-fest": "^0.6.0"
+ },
+ "dependencies": {
+ "hosted-git-info": {
+ "version": "2.8.9",
+ "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz",
+ "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==",
+ "dev": true
+ },
+ "normalize-package-data": {
+ "version": "2.5.0",
+ "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz",
+ "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==",
+ "dev": true,
+ "requires": {
+ "hosted-git-info": "^2.1.4",
+ "resolve": "^1.10.0",
+ "semver": "2 || 3 || 4 || 5",
+ "validate-npm-package-license": "^3.0.1"
+ }
+ },
+ "semver": {
+ "version": "5.7.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
+ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==",
+ "dev": true
+ },
+ "type-fest": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz",
+ "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==",
+ "dev": true
+ }
+ }
+ },
+ "read-pkg-up": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz",
+ "integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==",
+ "dev": true,
+ "requires": {
+ "find-up": "^4.1.0",
+ "read-pkg": "^5.2.0",
+ "type-fest": "^0.8.1"
+ },
+ "dependencies": {
+ "find-up": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz",
+ "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==",
+ "dev": true,
+ "requires": {
+ "locate-path": "^5.0.0",
+ "path-exists": "^4.0.0"
+ }
+ },
+ "locate-path": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz",
+ "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==",
+ "dev": true,
+ "requires": {
+ "p-locate": "^4.1.0"
+ }
+ },
+ "p-limit": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz",
+ "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==",
+ "dev": true,
+ "requires": {
+ "p-try": "^2.0.0"
+ }
+ },
+ "p-locate": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz",
+ "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==",
+ "dev": true,
+ "requires": {
+ "p-limit": "^2.2.0"
+ }
+ },
+ "p-try": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz",
+ "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==",
+ "dev": true
+ },
+ "type-fest": {
+ "version": "0.8.1",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz",
+ "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==",
+ "dev": true
+ }
+ }
+ },
+ "readable-stream": {
+ "version": "2.3.7",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz",
+ "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==",
+ "dev": true,
+ "requires": {
+ "core-util-is": "~1.0.0",
+ "inherits": "~2.0.3",
+ "isarray": "~1.0.0",
+ "process-nextick-args": "~2.0.0",
+ "safe-buffer": "~5.1.1",
+ "string_decoder": "~1.1.1",
+ "util-deprecate": "~1.0.1"
+ }
+ },
+ "readdirp": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
+ "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
+ "dev": true,
+ "requires": {
+ "picomatch": "^2.2.1"
+ }
+ },
+ "rechoir": {
+ "version": "0.7.1",
+ "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.7.1.tgz",
+ "integrity": "sha512-/njmZ8s1wVeR6pjTZ+0nCnv8SpZNRMT2D1RLOJQESlYFDBvwpTA4KWJpZ+sBJ4+vhjILRcK7JIFdGCdxEAAitg==",
+ "dev": true,
+ "requires": {
+ "resolve": "^1.9.0"
+ }
+ },
+ "redent": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz",
+ "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==",
+ "dev": true,
+ "requires": {
+ "indent-string": "^4.0.0",
+ "strip-indent": "^3.0.0"
+ }
+ },
+ "regex-not": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz",
+ "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==",
+ "dev": true,
+ "requires": {
+ "extend-shallow": "^3.0.2",
+ "safe-regex": "^1.1.0"
+ },
+ "dependencies": {
+ "extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "dev": true,
+ "requires": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ }
+ },
+ "is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dev": true,
+ "requires": {
+ "is-plain-object": "^2.0.4"
+ }
+ }
+ }
+ },
+ "regexp.prototype.flags": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.1.tgz",
+ "integrity": "sha512-sy6TXMN+hnP/wMy+ISxg3krXx7BAtWVO4UouuCN/ziM9UEne0euamVNafDfvC83bRNr95y0V5iijeDQFUNpvrg==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "set-function-name": "^2.0.0"
+ }
+ },
+ "regexpp": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-3.2.0.tgz",
+ "integrity": "sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==",
+ "dev": true
+ },
+ "remove-trailing-separator": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz",
+ "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=",
+ "dev": true
+ },
+ "repeat-element": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.4.tgz",
+ "integrity": "sha512-LFiNfRcSu7KK3evMyYOuCzv3L10TW7yC1G2/+StMjK8Y6Vqd2MG7r/Qjw4ghtuCOjFvlnms/iMmLqpvW/ES/WQ==",
+ "dev": true
+ },
+ "repeat-string": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz",
+ "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=",
+ "dev": true
+ },
+ "repeating": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/repeating/-/repeating-2.0.1.tgz",
+ "integrity": "sha1-UhTFOpJtNVJwdSf7q0FdvAjQbdo=",
+ "dev": true,
+ "requires": {
+ "is-finite": "^1.0.0"
+ }
+ },
+ "requireindex": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/requireindex/-/requireindex-1.2.0.tgz",
+ "integrity": "sha512-L9jEkOi3ASd9PYit2cwRfyppc9NoABujTP8/5gFcbERmo5jUoAKovIC3fsF17pkTnGsrByysqX+Kxd2OTNI1ww==",
+ "dev": true
+ },
+ "requires-port": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz",
+ "integrity": "sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8=",
+ "dev": true
+ },
+ "resolve": {
+ "version": "1.22.8",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz",
+ "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==",
+ "dev": true,
+ "requires": {
+ "is-core-module": "^2.13.0",
+ "path-parse": "^1.0.7",
+ "supports-preserve-symlinks-flag": "^1.0.0"
+ }
+ },
+ "resolve-dir": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/resolve-dir/-/resolve-dir-1.0.1.tgz",
+ "integrity": "sha1-eaQGRMNivoLybv/nOcm7U4IEb0M=",
+ "dev": true,
+ "requires": {
+ "expand-tilde": "^2.0.0",
+ "global-modules": "^1.0.0"
+ }
+ },
+ "resolve-from": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
+ "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==",
+ "dev": true
+ },
+ "resolve-url": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz",
+ "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo=",
+ "dev": true
+ },
+ "restore-cursor": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz",
+ "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==",
+ "dev": true,
+ "requires": {
+ "onetime": "^5.1.0",
+ "signal-exit": "^3.0.2"
+ }
+ },
+ "ret": {
+ "version": "0.1.15",
+ "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz",
+ "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==",
+ "dev": true
+ },
+ "reusify": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz",
+ "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==",
+ "dev": true
+ },
+ "rimraf": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
+ "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
+ "dev": true,
+ "requires": {
+ "glob": "^7.1.3"
+ }
+ },
+ "run-applescript": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/run-applescript/-/run-applescript-5.0.0.tgz",
+ "integrity": "sha512-XcT5rBksx1QdIhlFOCtgZkB99ZEouFZ1E2Kc2LHqNW13U3/74YGdkQRmThTwxy4QIyookibDKYZOPqX//6BlAg==",
+ "dev": true,
+ "requires": {
+ "execa": "^5.0.0"
+ }
+ },
+ "run-async": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.4.1.tgz",
+ "integrity": "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==",
+ "dev": true
+ },
+ "run-parallel": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
+ "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
+ "dev": true,
+ "requires": {
+ "queue-microtask": "^1.2.2"
+ }
+ },
+ "rxjs": {
+ "version": "6.6.7",
+ "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.6.7.tgz",
+ "integrity": "sha512-hTdwr+7yYNIT5n4AMYp85KA6yw2Va0FLa3Rguvbpa4W3I5xynaBZo41cM3XM+4Q6fRMj3sBYIR1VAmZMXYJvRQ==",
+ "dev": true,
+ "requires": {
+ "tslib": "^1.9.0"
+ },
+ "dependencies": {
+ "tslib": {
+ "version": "1.14.1",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz",
+ "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==",
+ "dev": true
+ }
+ }
+ },
+ "safe-array-concat": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.0.1.tgz",
+ "integrity": "sha512-6XbUAseYE2KtOuGueyeobCySj9L4+66Tn6KQMOPQJrAJEowYKW/YR/MGJZl7FdydUdaFu4LYyDZjxf4/Nmo23Q==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "get-intrinsic": "^1.2.1",
+ "has-symbols": "^1.0.3",
+ "isarray": "^2.0.5"
+ },
+ "dependencies": {
+ "isarray": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz",
+ "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==",
+ "dev": true
+ }
+ }
+ },
+ "safe-buffer": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+ "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
+ "dev": true
+ },
+ "safe-regex": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz",
+ "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=",
+ "dev": true,
+ "requires": {
+ "ret": "~0.1.10"
+ }
+ },
+ "safe-regex-test": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.0.tgz",
+ "integrity": "sha512-JBUUzyOgEwXQY1NuPtvcj/qcBDbDmEvWufhlnXZIm75DEHp+afM1r1ujJpJsV/gSM4t59tpDyPi1sd6ZaPFfsA==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "get-intrinsic": "^1.1.3",
+ "is-regex": "^1.1.4"
+ }
+ },
+ "safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
+ "dev": true
+ },
+ "sax": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz",
+ "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==",
+ "dev": true
+ },
+ "screenshot-ftw": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/screenshot-ftw/-/screenshot-ftw-1.0.5.tgz",
+ "integrity": "sha512-LPKvVt9TBvUD9CEb1xolbtS3CJODwkcF0NxnxdyXwBiT+nLokLaxuuISNUMzWxekjVgYqx077mG1gNhkvIE1Mg==",
+ "dev": true
+ },
+ "secure-compare": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/secure-compare/-/secure-compare-3.0.1.tgz",
+ "integrity": "sha1-8aAymzCLIh+uN7mXTz1XjQypmeM=",
+ "dev": true
+ },
+ "semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "dev": true
+ },
+ "send": {
+ "version": "0.18.0",
+ "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz",
+ "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==",
+ "dev": true,
+ "requires": {
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "fresh": "0.5.2",
+ "http-errors": "2.0.0",
+ "mime": "1.6.0",
+ "ms": "2.1.3",
+ "on-finished": "2.4.1",
+ "range-parser": "~1.2.1",
+ "statuses": "2.0.1"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "requires": {
+ "ms": "2.0.0"
+ },
+ "dependencies": {
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
+ "dev": true
+ }
+ }
+ },
+ "depd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
+ "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
+ "dev": true
+ },
+ "ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "dev": true
+ },
+ "on-finished": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
+ "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
+ "dev": true,
+ "requires": {
+ "ee-first": "1.1.1"
+ }
+ },
+ "statuses": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
+ "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
+ "dev": true
+ }
+ }
+ },
+ "serve-index": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz",
+ "integrity": "sha1-03aNabHn2C5c4FD/9bRTvqEqkjk=",
+ "dev": true,
+ "requires": {
+ "accepts": "~1.3.4",
+ "batch": "0.6.1",
+ "debug": "2.6.9",
+ "escape-html": "~1.0.3",
+ "http-errors": "~1.6.2",
+ "mime-types": "~2.1.17",
+ "parseurl": "~1.3.2"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "http-errors": {
+ "version": "1.6.3",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
+ "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=",
+ "dev": true,
+ "requires": {
+ "depd": "~1.1.2",
+ "inherits": "2.0.3",
+ "setprototypeof": "1.1.0",
+ "statuses": ">= 1.4.0 < 2"
+ }
+ },
+ "inherits": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
+ "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=",
+ "dev": true
+ },
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=",
+ "dev": true
+ },
+ "setprototypeof": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz",
+ "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==",
+ "dev": true
+ }
+ }
+ },
+ "serve-static": {
+ "version": "1.15.0",
+ "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.15.0.tgz",
+ "integrity": "sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g==",
+ "dev": true,
+ "requires": {
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "parseurl": "~1.3.3",
+ "send": "0.18.0"
+ }
+ },
+ "set-function-length": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.1.1.tgz",
+ "integrity": "sha512-VoaqjbBJKiWtg4yRcKBQ7g7wnGnLV3M8oLvVWwOk2PdYY6PEFegR1vezXR0tw6fZGF9csVakIRjrJiy2veSBFQ==",
+ "dev": true,
+ "requires": {
+ "define-data-property": "^1.1.1",
+ "get-intrinsic": "^1.2.1",
+ "gopd": "^1.0.1",
+ "has-property-descriptors": "^1.0.0"
+ }
+ },
+ "set-function-name": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/set-function-name/-/set-function-name-2.0.1.tgz",
+ "integrity": "sha512-tMNCiqYVkXIZgc2Hnoy2IvC/f8ezc5koaRFkCjrpWzGpCd3qbZXPzVy9MAZzK1ch/X0jvSkojys3oqJN0qCmdA==",
+ "dev": true,
+ "requires": {
+ "define-data-property": "^1.0.1",
+ "functions-have-names": "^1.2.3",
+ "has-property-descriptors": "^1.0.0"
+ }
+ },
+ "set-value": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz",
+ "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==",
+ "dev": true,
+ "requires": {
+ "extend-shallow": "^2.0.1",
+ "is-extendable": "^0.1.1",
+ "is-plain-object": "^2.0.3",
+ "split-string": "^3.0.1"
+ }
+ },
+ "setprototypeof": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz",
+ "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==",
+ "dev": true
+ },
+ "shebang-command": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
+ "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
+ "dev": true,
+ "requires": {
+ "shebang-regex": "^3.0.0"
+ }
+ },
+ "shebang-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
+ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
+ "dev": true
+ },
+ "shiki": {
+ "version": "0.14.5",
+ "resolved": "https://registry.npmjs.org/shiki/-/shiki-0.14.5.tgz",
+ "integrity": "sha512-1gCAYOcmCFONmErGTrS1fjzJLA7MGZmKzrBNX7apqSwhyITJg2O102uFzXUeBxNnEkDA9vHIKLyeKq0V083vIw==",
+ "dev": true,
+ "requires": {
+ "ansi-sequence-parser": "^1.1.0",
+ "jsonc-parser": "^3.2.0",
+ "vscode-oniguruma": "^1.7.0",
+ "vscode-textmate": "^8.0.0"
+ }
+ },
+ "side-channel": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz",
+ "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.0",
+ "get-intrinsic": "^1.0.2",
+ "object-inspect": "^1.9.0"
+ }
+ },
+ "signal-exit": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz",
+ "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==",
+ "dev": true
+ },
+ "slash": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz",
+ "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==",
+ "dev": true
+ },
+ "snapdragon": {
+ "version": "0.8.2",
+ "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz",
+ "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==",
+ "dev": true,
+ "requires": {
+ "base": "^0.11.1",
+ "debug": "^2.2.0",
+ "define-property": "^0.2.5",
+ "extend-shallow": "^2.0.1",
+ "map-cache": "^0.2.2",
+ "source-map": "^0.5.6",
+ "source-map-resolve": "^0.5.0",
+ "use": "^3.1.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dev": true,
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=",
+ "dev": true
+ }
+ }
+ },
+ "snapdragon-node": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz",
+ "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==",
+ "dev": true,
+ "requires": {
+ "define-property": "^1.0.0",
+ "isobject": "^3.0.0",
+ "snapdragon-util": "^3.0.1"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+ "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^1.0.0"
+ }
+ },
+ "is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "requires": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ }
+ }
+ }
+ },
+ "snapdragon-util": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz",
+ "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^3.2.0"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "source-map": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+ "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=",
+ "dev": true
+ },
+ "source-map-resolve": {
+ "version": "0.5.3",
+ "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.3.tgz",
+ "integrity": "sha512-Htz+RnsXWk5+P2slx5Jh3Q66vhQj1Cllm0zvnaY98+NFx+Dv2CF/f5O/t8x+KaNdrdIAsruNzoh/KpialbqAnw==",
+ "dev": true,
+ "requires": {
+ "atob": "^2.1.2",
+ "decode-uri-component": "^0.2.0",
+ "resolve-url": "^0.2.1",
+ "source-map-url": "^0.4.0",
+ "urix": "^0.1.0"
+ }
+ },
+ "source-map-url": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.1.tgz",
+ "integrity": "sha512-cPiFOTLUKvJFIg4SKVScy4ilPPW6rFgMgfuZJPNoDuMs3nC1HbMUycBoJw77xFIp6z1UJQJOfx6C9GMH80DiTw==",
+ "dev": true
+ },
+ "spdx-correct": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.1.1.tgz",
+ "integrity": "sha512-cOYcUWwhCuHCXi49RhFRCyJEK3iPj1Ziz9DpViV3tbZOwXD49QzIN3MpOLJNxh2qwq2lJJZaKMVw9qNi4jTC0w==",
+ "dev": true,
+ "requires": {
+ "spdx-expression-parse": "^3.0.0",
+ "spdx-license-ids": "^3.0.0"
+ }
+ },
+ "spdx-exceptions": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz",
+ "integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==",
+ "dev": true
+ },
+ "spdx-expression-parse": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz",
+ "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==",
+ "dev": true,
+ "requires": {
+ "spdx-exceptions": "^2.1.0",
+ "spdx-license-ids": "^3.0.0"
+ }
+ },
+ "spdx-license-ids": {
+ "version": "3.0.11",
+ "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.11.tgz",
+ "integrity": "sha512-Ctl2BrFiM0X3MANYgj3CkygxhRmr9mi6xhejbdO960nF6EDJApTYpn0BQnDKlnNBULKiCN1n3w9EBkHK8ZWg+g==",
+ "dev": true
+ },
+ "split-string": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz",
+ "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==",
+ "dev": true,
+ "requires": {
+ "extend-shallow": "^3.0.0"
+ },
+ "dependencies": {
+ "extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "dev": true,
+ "requires": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ }
+ },
+ "is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dev": true,
+ "requires": {
+ "is-plain-object": "^2.0.4"
+ }
+ }
+ }
+ },
+ "sprintf-js": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+ "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=",
+ "dev": true
+ },
+ "static-extend": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz",
+ "integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=",
+ "dev": true,
+ "requires": {
+ "define-property": "^0.2.5",
+ "object-copy": "^0.1.0"
+ }
+ },
+ "statuses": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz",
+ "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=",
+ "dev": true
+ },
+ "string_decoder": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
+ "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
+ "dev": true,
+ "requires": {
+ "safe-buffer": "~5.1.0"
+ }
+ },
+ "string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "dev": true,
+ "requires": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ }
+ },
+ "string.prototype.trim": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.8.tgz",
+ "integrity": "sha512-lfjY4HcixfQXOfaqCvcBuOIapyaroTXhbkfJN3gcB1OtyupngWK4sEET9Knd0cXd28kTUqu/kHoV4HKSJdnjiQ==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1"
+ }
+ },
+ "string.prototype.trimend": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.7.tgz",
+ "integrity": "sha512-Ni79DqeB72ZFq1uH/L6zJ+DKZTkOtPIHovb3YZHQViE+HDouuU4mBrLOLDn5Dde3RF8qw5qVETEjhu9locMLvA==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1"
+ }
+ },
+ "string.prototype.trimstart": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.7.tgz",
+ "integrity": "sha512-NGhtDFu3jCEm7B4Fy0DpLewdJQOZcQ0rGbwQ/+stjnrp2i+rlKeCvos9hOIeCmqwratM47OBxY7uFZzjxHXmrg==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.2.0",
+ "es-abstract": "^1.22.1"
+ }
+ },
+ "strip-ansi": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "dev": true,
+ "requires": {
+ "ansi-regex": "^5.0.1"
+ }
+ },
+ "strip-bom": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-2.0.0.tgz",
+ "integrity": "sha1-YhmoVhZSBJHzV4i9vxRHqZx+aw4=",
+ "dev": true,
+ "requires": {
+ "is-utf8": "^0.2.0"
+ }
+ },
+ "strip-final-newline": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz",
+ "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==",
+ "dev": true
+ },
+ "strip-indent": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz",
+ "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==",
+ "dev": true,
+ "requires": {
+ "min-indent": "^1.0.0"
+ }
+ },
+ "strip-json-comments": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz",
+ "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==",
+ "dev": true
+ },
+ "supports-color": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
+ "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
+ "dev": true,
+ "requires": {
+ "has-flag": "^3.0.0"
+ }
+ },
+ "supports-preserve-symlinks-flag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
+ "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
+ "dev": true
+ },
+ "synckit": {
+ "version": "0.8.5",
+ "resolved": "https://registry.npmjs.org/synckit/-/synckit-0.8.5.tgz",
+ "integrity": "sha512-L1dapNV6vu2s/4Sputv8xGsCdAVlb5nRDMFU/E27D44l5U6cw1g0dGd45uLc+OXjNMmF4ntiMdCimzcjFKQI8Q==",
+ "dev": true,
+ "requires": {
+ "@pkgr/utils": "^2.3.1",
+ "tslib": "^2.5.0"
+ }
+ },
+ "text-table": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
+ "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=",
+ "dev": true
+ },
+ "through": {
+ "version": "2.3.8",
+ "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz",
+ "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=",
+ "dev": true
+ },
+ "titleize": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/titleize/-/titleize-3.0.0.tgz",
+ "integrity": "sha512-KxVu8EYHDPBdUYdKZdKtU2aj2XfEx9AfjXxE/Aj0vT06w2icA09Vus1rh6eSu1y01akYg6BjIK/hxyLJINoMLQ==",
+ "dev": true
+ },
+ "tmp": {
+ "version": "0.0.33",
+ "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz",
+ "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==",
+ "dev": true,
+ "requires": {
+ "os-tmpdir": "~1.0.2"
+ }
+ },
+ "to-fast-properties": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
+ "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4=",
+ "dev": true
+ },
+ "to-object-path": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz",
+ "integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=",
+ "dev": true,
+ "requires": {
+ "kind-of": "^3.0.2"
+ },
+ "dependencies": {
+ "kind-of": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+ "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+ "dev": true,
+ "requires": {
+ "is-buffer": "^1.1.5"
+ }
+ }
+ }
+ },
+ "to-regex": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz",
+ "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==",
+ "dev": true,
+ "requires": {
+ "define-property": "^2.0.2",
+ "extend-shallow": "^3.0.2",
+ "regex-not": "^1.0.2",
+ "safe-regex": "^1.1.0"
+ },
+ "dependencies": {
+ "define-property": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz",
+ "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==",
+ "dev": true,
+ "requires": {
+ "is-descriptor": "^1.0.2",
+ "isobject": "^3.0.1"
+ }
+ },
+ "extend-shallow": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+ "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+ "dev": true,
+ "requires": {
+ "assign-symbols": "^1.0.0",
+ "is-extendable": "^1.0.1"
+ }
+ },
+ "is-accessor-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+ "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-data-descriptor": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+ "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+ "dev": true,
+ "requires": {
+ "kind-of": "^6.0.0"
+ }
+ },
+ "is-descriptor": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+ "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+ "dev": true,
+ "requires": {
+ "is-accessor-descriptor": "^1.0.0",
+ "is-data-descriptor": "^1.0.0",
+ "kind-of": "^6.0.2"
+ }
+ },
+ "is-extendable": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+ "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+ "dev": true,
+ "requires": {
+ "is-plain-object": "^2.0.4"
+ }
+ }
+ }
+ },
+ "to-regex-range": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "dev": true,
+ "requires": {
+ "is-number": "^7.0.0"
+ }
+ },
+ "toidentifier": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz",
+ "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==",
+ "dev": true
+ },
+ "trim-newlines": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-3.0.1.tgz",
+ "integrity": "sha512-c1PTsA3tYrIsLGkJkzHF+w9F2EyxfXGo4UyJc4pFL++FMjnq0HJS69T3M7d//gKrFKwy429bouPescbjecU+Zw==",
+ "dev": true
+ },
+ "ts-api-utils": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.0.3.tgz",
+ "integrity": "sha512-wNMeqtMz5NtwpT/UZGY5alT+VoKdSsOOP/kqHFcUW1P/VRhH2wJ48+DN2WwUliNbQ976ETwDL0Ifd2VVvgonvg==",
+ "dev": true,
+ "requires": {}
+ },
+ "ts-node": {
+ "version": "10.9.1",
+ "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz",
+ "integrity": "sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==",
+ "dev": true,
+ "requires": {
+ "@cspotcode/source-map-support": "^0.8.0",
+ "@tsconfig/node10": "^1.0.7",
+ "@tsconfig/node12": "^1.0.7",
+ "@tsconfig/node14": "^1.0.0",
+ "@tsconfig/node16": "^1.0.2",
+ "acorn": "^8.4.1",
+ "acorn-walk": "^8.1.1",
+ "arg": "^4.1.0",
+ "create-require": "^1.1.0",
+ "diff": "^4.0.1",
+ "make-error": "^1.1.1",
+ "v8-compile-cache-lib": "^3.0.1",
+ "yn": "3.1.1"
+ }
+ },
+ "tsconfig-paths": {
+ "version": "3.14.2",
+ "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.14.2.tgz",
+ "integrity": "sha512-o/9iXgCYc5L/JxCHPe3Hvh8Q/2xm5Z+p18PESBU6Ff33695QnCHBEjcytY2q19ua7Mbl/DavtBOLq+oG0RCL+g==",
+ "dev": true,
+ "requires": {
+ "@types/json5": "^0.0.29",
+ "json5": "^1.0.2",
+ "minimist": "^1.2.6",
+ "strip-bom": "^3.0.0"
+ },
+ "dependencies": {
+ "json5": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz",
+ "integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==",
+ "dev": true,
+ "requires": {
+ "minimist": "^1.2.0"
+ }
+ },
+ "strip-bom": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz",
+ "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==",
+ "dev": true
+ }
+ }
+ },
+ "tslib": {
+ "version": "2.6.2",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz",
+ "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==",
+ "dev": true
+ },
+ "tsutils": {
+ "version": "3.21.0",
+ "resolved": "https://registry.npmjs.org/tsutils/-/tsutils-3.21.0.tgz",
+ "integrity": "sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==",
+ "dev": true,
+ "requires": {
+ "tslib": "^1.8.1"
+ },
+ "dependencies": {
+ "tslib": {
+ "version": "1.14.1",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz",
+ "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==",
+ "dev": true
+ }
+ }
+ },
+ "type": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/type/-/type-1.2.0.tgz",
+ "integrity": "sha512-+5nt5AAniqsCnu2cEQQdpzCAh33kVx8n0VoFidKpB1dVVLAN/F+bgVOqOJqOnEnrhp222clB5p3vUlD+1QAnfg==",
+ "dev": true
+ },
+ "type-check": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz",
+ "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==",
+ "dev": true,
+ "requires": {
+ "prelude-ls": "^1.2.1"
+ }
+ },
+ "type-fest": {
+ "version": "0.21.3",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz",
+ "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==",
+ "dev": true
+ },
+ "type-is": {
+ "version": "1.6.18",
+ "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
+ "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
+ "dev": true,
+ "requires": {
+ "media-typer": "0.3.0",
+ "mime-types": "~2.1.24"
+ }
+ },
+ "typed-array-buffer": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.0.tgz",
+ "integrity": "sha512-Y8KTSIglk9OZEr8zywiIHG/kmQ7KWyjseXs1CbSo8vC42w7hg2HgYTxSWwP0+is7bWDc1H+Fo026CpHFwm8tkw==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "get-intrinsic": "^1.2.1",
+ "is-typed-array": "^1.1.10"
+ }
+ },
+ "typed-array-byte-length": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.0.tgz",
+ "integrity": "sha512-Or/+kvLxNpeQ9DtSydonMxCx+9ZXOswtwJn17SNLvhptaXYDJvkFFP5zbfU/uLmvnBJlI4yrnXRxpdWH/M5tNA==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "for-each": "^0.3.3",
+ "has-proto": "^1.0.1",
+ "is-typed-array": "^1.1.10"
+ }
+ },
+ "typed-array-byte-offset": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.0.tgz",
+ "integrity": "sha512-RD97prjEt9EL8YgAgpOkf3O4IF9lhJFr9g0htQkm0rchFp/Vx7LW5Q8fSXXub7BXAODyUQohRMyOc3faCPd0hg==",
+ "dev": true,
+ "requires": {
+ "available-typed-arrays": "^1.0.5",
+ "call-bind": "^1.0.2",
+ "for-each": "^0.3.3",
+ "has-proto": "^1.0.1",
+ "is-typed-array": "^1.1.10"
+ }
+ },
+ "typed-array-length": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.4.tgz",
+ "integrity": "sha512-KjZypGq+I/H7HI5HlOoGHkWUUGq+Q0TPhQurLbyrVrvnKTBgzLhIJ7j6J/XTQOi0d1RjyZ0wdas8bKs2p0x3Ng==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "for-each": "^0.3.3",
+ "is-typed-array": "^1.1.9"
+ }
+ },
+ "typedoc": {
+ "version": "0.25.3",
+ "resolved": "https://registry.npmjs.org/typedoc/-/typedoc-0.25.3.tgz",
+ "integrity": "sha512-Ow8Bo7uY1Lwy7GTmphRIMEo6IOZ+yYUyrc8n5KXIZg1svpqhZSWgni2ZrDhe+wLosFS8yswowUzljTAV/3jmWw==",
+ "dev": true,
+ "requires": {
+ "lunr": "^2.3.9",
+ "marked": "^4.3.0",
+ "minimatch": "^9.0.3",
+ "shiki": "^0.14.1"
+ },
+ "dependencies": {
+ "brace-expansion": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
+ "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
+ "dev": true,
+ "requires": {
+ "balanced-match": "^1.0.0"
+ }
+ },
+ "minimatch": {
+ "version": "9.0.3",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz",
+ "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==",
+ "dev": true,
+ "requires": {
+ "brace-expansion": "^2.0.1"
+ }
+ }
+ }
+ },
+ "typescript": {
+ "version": "5.2.2",
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz",
+ "integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==",
+ "dev": true
+ },
+ "unbox-primitive": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.2.tgz",
+ "integrity": "sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==",
+ "dev": true,
+ "requires": {
+ "call-bind": "^1.0.2",
+ "has-bigints": "^1.0.2",
+ "has-symbols": "^1.0.3",
+ "which-boxed-primitive": "^1.0.2"
+ }
+ },
+ "unc-path-regex": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/unc-path-regex/-/unc-path-regex-0.1.2.tgz",
+ "integrity": "sha1-5z3T17DXxe2G+6xrCufYxqadUPo=",
+ "dev": true
+ },
+ "underscore.string": {
+ "version": "3.3.6",
+ "resolved": "https://registry.npmjs.org/underscore.string/-/underscore.string-3.3.6.tgz",
+ "integrity": "sha512-VoC83HWXmCrF6rgkyxS9GHv8W9Q5nhMKho+OadDJGzL2oDYbYEppBaCMH6pFlwLeqj2QS+hhkw2kpXkSdD1JxQ==",
+ "dev": true,
+ "requires": {
+ "sprintf-js": "^1.1.1",
+ "util-deprecate": "^1.0.2"
+ },
+ "dependencies": {
+ "sprintf-js": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.2.tgz",
+ "integrity": "sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug==",
+ "dev": true
+ }
+ }
+ },
+ "undici-types": {
+ "version": "5.26.5",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
+ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
+ "dev": true
+ },
+ "union": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/union/-/union-0.5.0.tgz",
+ "integrity": "sha512-N6uOhuW6zO95P3Mel2I2zMsbsanvvtgn6jVqJv4vbVcz/JN0OkL9suomjQGmWtxJQXOCqUJvquc1sMeNz/IwlA==",
+ "dev": true,
+ "requires": {
+ "qs": "^6.4.0"
+ }
+ },
+ "union-value": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz",
+ "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==",
+ "dev": true,
+ "requires": {
+ "arr-union": "^3.1.0",
+ "get-value": "^2.0.6",
+ "is-extendable": "^0.1.1",
+ "set-value": "^2.0.1"
+ }
+ },
+ "unpipe": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
+ "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==",
+ "dev": true
+ },
+ "unset-value": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz",
+ "integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=",
+ "dev": true,
+ "requires": {
+ "has-value": "^0.3.1",
+ "isobject": "^3.0.0"
+ },
+ "dependencies": {
+ "has-value": {
+ "version": "0.3.1",
+ "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz",
+ "integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=",
+ "dev": true,
+ "requires": {
+ "get-value": "^2.0.3",
+ "has-values": "^0.1.4",
+ "isobject": "^2.0.0"
+ },
+ "dependencies": {
+ "isobject": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz",
+ "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=",
+ "dev": true,
+ "requires": {
+ "isarray": "1.0.0"
+ }
+ }
+ }
+ },
+ "has-values": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz",
+ "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E=",
+ "dev": true
+ }
+ }
+ },
+ "untildify": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/untildify/-/untildify-4.0.0.tgz",
+ "integrity": "sha512-KK8xQ1mkzZeg9inewmFVDNkg3l5LUhoq9kN6iWYB/CC9YMG8HA+c1Q8HwDe6dEX7kErrEVNVBO3fWsVq5iDgtw==",
+ "dev": true
+ },
+ "upath": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/upath/-/upath-1.2.0.tgz",
+ "integrity": "sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg==",
+ "dev": true
+ },
+ "update-browserslist-db": {
+ "version": "1.0.13",
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz",
+ "integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==",
+ "dev": true,
+ "requires": {
+ "escalade": "^3.1.1",
+ "picocolors": "^1.0.0"
+ }
+ },
+ "uri-js": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
+ "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
+ "dev": true,
+ "requires": {
+ "punycode": "^2.1.0"
+ }
+ },
+ "urix": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz",
+ "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI=",
+ "dev": true
+ },
+ "url-join": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz",
+ "integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==",
+ "dev": true
+ },
+ "use": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz",
+ "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==",
+ "dev": true
+ },
+ "util-deprecate": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+ "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=",
+ "dev": true
+ },
+ "utils-merge": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
+ "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=",
+ "dev": true
+ },
+ "v8-compile-cache-lib": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz",
+ "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==",
+ "dev": true
+ },
+ "v8flags": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/v8flags/-/v8flags-3.2.0.tgz",
+ "integrity": "sha512-mH8etigqMfiGWdeXpaaqGfs6BndypxusHHcv2qSHyZkGEznCd/qAXCWWRzeowtL54147cktFOC4P5y+kl8d8Jg==",
+ "dev": true,
+ "requires": {
+ "homedir-polyfill": "^1.0.1"
+ }
+ },
+ "validate-npm-package-license": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz",
+ "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==",
+ "dev": true,
+ "requires": {
+ "spdx-correct": "^3.0.0",
+ "spdx-expression-parse": "^3.0.0"
+ }
+ },
+ "vary": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
+ "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=",
+ "dev": true
+ },
+ "vscode-oniguruma": {
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/vscode-oniguruma/-/vscode-oniguruma-1.7.0.tgz",
+ "integrity": "sha512-L9WMGRfrjOhgHSdOYgCt/yRMsXzLDJSL7BPrOZt73gU0iWO4mpqzqQzOz5srxqTvMBaR0XZTSrVWo4j55Rc6cA==",
+ "dev": true
+ },
+ "vscode-textmate": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/vscode-textmate/-/vscode-textmate-8.0.0.tgz",
+ "integrity": "sha512-AFbieoL7a5LMqcnOF04ji+rpXadgOXnZsxQr//r83kLPr7biP7am3g9zbaZIaBGwBRWeSvoMD4mgPdX3e4NWBg==",
+ "dev": true
+ },
+ "whatwg-encoding": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-2.0.0.tgz",
+ "integrity": "sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg==",
+ "dev": true,
+ "requires": {
+ "iconv-lite": "0.6.3"
+ },
+ "dependencies": {
+ "iconv-lite": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz",
+ "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==",
+ "dev": true,
+ "requires": {
+ "safer-buffer": ">= 2.1.2 < 3.0.0"
+ }
+ }
+ }
+ },
+ "which": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
+ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
+ "dev": true,
+ "requires": {
+ "isexe": "^2.0.0"
+ }
+ },
+ "which-boxed-primitive": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz",
+ "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==",
+ "dev": true,
+ "requires": {
+ "is-bigint": "^1.0.1",
+ "is-boolean-object": "^1.1.0",
+ "is-number-object": "^1.0.4",
+ "is-string": "^1.0.5",
+ "is-symbol": "^1.0.3"
+ }
+ },
+ "which-typed-array": {
+ "version": "1.1.13",
+ "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.13.tgz",
+ "integrity": "sha512-P5Nra0qjSncduVPEAr7xhoF5guty49ArDTwzJ/yNuPIbZppyRxFQsRCWrocxIY+CnMVG+qfbU2FmDKyvSGClow==",
+ "dev": true,
+ "requires": {
+ "available-typed-arrays": "^1.0.5",
+ "call-bind": "^1.0.4",
+ "for-each": "^0.3.3",
+ "gopd": "^1.0.1",
+ "has-tostringtag": "^1.0.0"
+ }
+ },
+ "wrappy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+ "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=",
+ "dev": true
+ },
+ "write-file-atomic": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz",
+ "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==",
+ "dev": true,
+ "requires": {
+ "imurmurhash": "^0.1.4",
+ "signal-exit": "^3.0.7"
+ }
+ },
+ "xml2js": {
+ "version": "0.4.23",
+ "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.23.tgz",
+ "integrity": "sha512-ySPiMjM0+pLDftHgXY4By0uswI3SPKLDw/i3UXbnO8M/p28zqexCUoPmQFrYD+/1BzhGJSs2i1ERWKJAtiLrug==",
+ "dev": true,
+ "requires": {
+ "sax": ">=0.6.0",
+ "xmlbuilder": "~11.0.0"
+ }
+ },
+ "xmlbuilder": {
+ "version": "11.0.1",
+ "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-11.0.1.tgz",
+ "integrity": "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA==",
+ "dev": true
+ },
+ "yallist": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
+ "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==",
+ "dev": true
+ },
+ "yargs-parser": {
+ "version": "20.2.9",
+ "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz",
+ "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==",
+ "dev": true
+ },
+ "yn": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz",
+ "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==",
+ "dev": true
+ },
+ "yocto-queue": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",
+ "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==",
+ "dev": true
+ }
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/package.json b/dom/webgpu/tests/cts/checkout/package.json
new file mode 100644
index 0000000000..21f74065af
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/package.json
@@ -0,0 +1,80 @@
+{
+ "name": "@webgpu/cts",
+ "version": "0.1.0",
+ "description": "WebGPU Conformance Test Suite",
+ "scripts": {
+ "test": "grunt pre",
+ "check": "grunt check",
+ "standalone": "grunt standalone",
+ "wpt": "grunt wpt",
+ "fix": "grunt fix",
+ "unittest": "grunt unittest",
+ "gen_wpt_cts_html": "node tools/gen_wpt_cts_html",
+ "gen_cache": "node tools/gen_cache",
+ "tsdoc": "grunt run:tsdoc",
+ "start": "node tools/dev_server",
+ "dev": "node tools/dev_server"
+ },
+ "engines": {
+ "node": "^12.22.0 || ^14.17.0 || >=16.0.0",
+ "npm": ">=8.5.2"
+ },
+ "repository": {
+ "type": "git",
+ "url": "git+https://github.com/gpuweb/cts.git"
+ },
+ "author": "WebGPU CTS Contributors",
+ "private": true,
+ "license": "BSD-3-Clause",
+ "bugs": {
+ "url": "https://github.com/gpuweb/cts/issues"
+ },
+ "homepage": "https://github.com/gpuweb/cts#readme",
+ "devDependencies": {
+ "@babel/cli": "^7.23.0",
+ "@babel/core": "^7.23.2",
+ "@babel/preset-typescript": "^7.23.2",
+ "@types/babel__core": "^7.20.3",
+ "@types/dom-mediacapture-transform": "^0.1.8",
+ "@types/dom-webcodecs": "^0.1.9",
+ "@types/express": "^4.17.20",
+ "@types/jquery": "^3.5.25",
+ "@types/morgan": "^1.9.7",
+ "@types/node": "^20.8.10",
+ "@types/offscreencanvas": "^2019.7.2",
+ "@types/pngjs": "^6.0.3",
+ "@types/serve-index": "^1.9.3",
+ "@typescript-eslint/eslint-plugin": "^6.9.1",
+ "@typescript-eslint/parser": "^6.9.1",
+ "@webgpu/types": "^0.1.38",
+ "ansi-colors": "4.1.3",
+ "babel-plugin-add-header-comment": "^1.0.3",
+ "babel-plugin-const-enum": "^1.2.0",
+ "chokidar": "^3.5.3",
+ "eslint": "^8.52.0",
+ "eslint-plugin-ban": "^1.6.0",
+ "eslint-plugin-deprecation": "^2.0.0",
+ "eslint-plugin-gpuweb-cts": "file:./tools/eslint-plugin-gpuweb-cts",
+ "eslint-plugin-import": "^2.29.0",
+ "express": "^4.18.2",
+ "grunt": "^1.6.1",
+ "grunt-cli": "^1.4.3",
+ "grunt-contrib-clean": "^2.0.1",
+ "grunt-contrib-copy": "^1.0.0",
+ "grunt-run": "^0.8.1",
+ "grunt-timer": "^0.6.0",
+ "grunt-ts": "^6.0.0-beta.22",
+ "gts": "^5.2.0",
+ "http-server": "^14.1.1",
+ "morgan": "^1.10.0",
+ "playwright-core": "^1.39.0",
+ "pngjs": "^7.0.0",
+ "portfinder": "^1.0.32",
+ "prettier": "~3.0.3",
+ "screenshot-ftw": "^1.0.5",
+ "serve-index": "^1.9.1",
+ "ts-node": "^10.9.1",
+ "typedoc": "^0.25.3",
+ "typescript": "~5.2.2"
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/prettier.config.js b/dom/webgpu/tests/cts/checkout/prettier.config.js
new file mode 100644
index 0000000000..9f4053f719
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/prettier.config.js
@@ -0,0 +1,8 @@
+module.exports = {
+ printWidth: 100,
+
+ arrowParens: 'avoid',
+ bracketSpacing: true,
+ singleQuote: true,
+ trailingComma: 'es5',
+};
diff --git a/dom/webgpu/tests/cts/checkout/src/common/framework/data_cache.ts b/dom/webgpu/tests/cts/checkout/src/common/framework/data_cache.ts
new file mode 100644
index 0000000000..c1e3a889be
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/framework/data_cache.ts
@@ -0,0 +1,197 @@
+/**
+ * Utilities to improve the performance of the CTS, by caching data that is
+ * expensive to build using a two-level cache (in-memory, pre-computed file).
+ */
+
+import { assert } from '../util/util.js';
+
+interface DataStore {
+ load(path: string): Promise<Uint8Array>;
+}
+
+/** Logger is a basic debug logger function */
+export type Logger = (s: string) => void;
+
+/**
+ * DataCacheNode represents a single cache entry in the LRU DataCache.
+ * DataCacheNode is a doubly linked list, so that least-recently-used entries can be removed, and
+ * cache hits can move the node to the front of the list.
+ */
+class DataCacheNode {
+ public constructor(path: string, data: unknown) {
+ this.path = path;
+ this.data = data;
+ }
+
+ /** insertAfter() re-inserts this node in the doubly-linked list after `prev` */
+ public insertAfter(prev: DataCacheNode) {
+ this.unlink();
+ this.next = prev.next;
+ this.prev = prev;
+ prev.next = this;
+ if (this.next) {
+ this.next.prev = this;
+ }
+ }
+
+ /** unlink() removes this node from the doubly-linked list */
+ public unlink() {
+ const prev = this.prev;
+ const next = this.next;
+ if (prev) {
+ prev.next = next;
+ }
+ if (next) {
+ next.prev = prev;
+ }
+ this.prev = null;
+ this.next = null;
+ }
+
+ public readonly path: string; // The file path this node represents
+ public readonly data: unknown; // The deserialized data for this node
+ public prev: DataCacheNode | null = null; // The previous node in the doubly-linked list
+ public next: DataCacheNode | null = null; // The next node in the doubly-linked list
+}
+
+/** DataCache is an interface to a LRU-cached data store used to hold data cached by path */
+export class DataCache {
+ public constructor() {
+ this.lruHeadNode.next = this.lruTailNode;
+ this.lruTailNode.prev = this.lruHeadNode;
+ }
+
+ /** setDataStore() sets the backing data store used by the data cache */
+ public setStore(dataStore: DataStore) {
+ this.dataStore = dataStore;
+ }
+
+ /** setDebugLogger() sets the verbose logger */
+ public setDebugLogger(logger: Logger) {
+ this.debugLogger = logger;
+ }
+
+ /**
+ * fetch() retrieves cacheable data from the data cache, first checking the
+ * in-memory cache, then the data store (if specified), then resorting to
+ * building the data and storing it in the cache.
+ */
+ public async fetch<Data>(cacheable: Cacheable<Data>): Promise<Data> {
+ {
+ // First check the in-memory cache
+ const node = this.cache.get(cacheable.path);
+ if (node !== undefined) {
+ this.log('in-memory cache hit');
+ node.insertAfter(this.lruHeadNode);
+ return Promise.resolve(node.data as Data);
+ }
+ }
+ this.log('in-memory cache miss');
+ // In in-memory cache miss.
+ // Next, try the data store.
+ if (this.dataStore !== null && !this.unavailableFiles.has(cacheable.path)) {
+ let serialized: Uint8Array | undefined;
+ try {
+ serialized = await this.dataStore.load(cacheable.path);
+ this.log('loaded serialized');
+ } catch (err) {
+ // not found in data store
+ this.log(`failed to load (${cacheable.path}): ${err}`);
+ this.unavailableFiles.add(cacheable.path);
+ }
+ if (serialized !== undefined) {
+ this.log(`deserializing`);
+ const data = cacheable.deserialize(serialized);
+ this.addToCache(cacheable.path, data);
+ return data;
+ }
+ }
+ // Not found anywhere. Build the data, and cache for future lookup.
+ this.log(`cache: building (${cacheable.path})`);
+ const data = await cacheable.build();
+ this.addToCache(cacheable.path, data);
+ return data;
+ }
+
+ /**
+ * addToCache() creates a new node for `path` and `data`, inserting the new node at the front of
+ * the doubly-linked list. If the number of entries in the cache exceeds this.maxCount, then the
+ * least recently used entry is evicted
+ * @param path the file path for the data
+ * @param data the deserialized data
+ */
+ private addToCache(path: string, data: unknown) {
+ if (this.cache.size >= this.maxCount) {
+ const toEvict = this.lruTailNode.prev;
+ assert(toEvict !== null);
+ toEvict.unlink();
+ this.cache.delete(toEvict.path);
+ this.log(`evicting ${toEvict.path}`);
+ }
+ const node = new DataCacheNode(path, data);
+ node.insertAfter(this.lruHeadNode);
+ this.cache.set(path, node);
+ this.log(`added ${path}. new count: ${this.cache.size}`);
+ }
+
+ private log(msg: string) {
+ if (this.debugLogger !== null) {
+ this.debugLogger(`DataCache: ${msg}`);
+ }
+ }
+
+ // Max number of entries in the cache before LRU entries are evicted.
+ private readonly maxCount = 4;
+
+ private cache = new Map<string, DataCacheNode>();
+ private lruHeadNode = new DataCacheNode('', null); // placeholder node (no path or data)
+ private lruTailNode = new DataCacheNode('', null); // placeholder node (no path or data)
+ private unavailableFiles = new Set<string>();
+ private dataStore: DataStore | null = null;
+ private debugLogger: Logger | null = null;
+}
+
+/** The data cache */
+export const dataCache = new DataCache();
+
+/** true if the current process is building the cache */
+let isBuildingDataCache = false;
+
+/** @returns true if the data cache is currently being built */
+export function getIsBuildingDataCache() {
+ return isBuildingDataCache;
+}
+
+/** Sets whether the data cache is currently being built */
+export function setIsBuildingDataCache(value = true) {
+ isBuildingDataCache = value;
+}
+
+/**
+ * Cacheable is the interface to something that can be stored into the
+ * DataCache.
+ * The 'npm run gen_cache' tool will look for module-scope variables of this
+ * interface, with the name `d`.
+ */
+export interface Cacheable<Data> {
+ /** the globally unique path for the cacheable data */
+ readonly path: string;
+
+ /**
+ * build() builds the cacheable data.
+ * This is assumed to be an expensive operation and will only happen if the
+ * cache does not already contain the built data.
+ */
+ build(): Promise<Data>;
+
+ /**
+ * serialize() encodes `data` to a binary representation so that it can be stored in a cache file.
+ */
+ serialize(data: Data): Uint8Array;
+
+ /**
+ * deserialize() is the inverse of serialize(), decoding the binary representation back to a Data
+ * object.
+ */
+ deserialize(binary: Uint8Array): Data;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/framework/fixture.ts b/dom/webgpu/tests/cts/checkout/src/common/framework/fixture.ts
new file mode 100644
index 0000000000..77875e047d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/framework/fixture.ts
@@ -0,0 +1,370 @@
+import { TestCaseRecorder } from '../internal/logging/test_case_recorder.js';
+import { JSONWithUndefined } from '../internal/params_utils.js';
+import { assert, ExceptionCheckOptions, unreachable } from '../util/util.js';
+
+export class SkipTestCase extends Error {}
+export class UnexpectedPassError extends Error {}
+
+export { TestCaseRecorder } from '../internal/logging/test_case_recorder.js';
+
+/** The fully-general type for params passed to a test function invocation. */
+export type TestParams = {
+ readonly [k: string]: JSONWithUndefined;
+};
+
+type DestroyableObject =
+ | { destroy(): void }
+ | { close(): void }
+ | { getExtension(extensionName: 'WEBGL_lose_context'): WEBGL_lose_context };
+
+export class SubcaseBatchState {
+ constructor(
+ protected readonly recorder: TestCaseRecorder,
+ /** The case parameters for this test fixture shared state. Subcase params are not included. */
+ public readonly params: TestParams
+ ) {}
+
+ /**
+ * Runs before the `.before()` function.
+ * @internal MAINTENANCE_TODO: Make this not visible to test code?
+ */
+ async init() {}
+ /**
+ * Runs between the `.before()` function and the subcases.
+ * @internal MAINTENANCE_TODO: Make this not visible to test code?
+ */
+ async postInit() {}
+ /**
+ * Runs after all subcases finish.
+ * @internal MAINTENANCE_TODO: Make this not visible to test code?
+ */
+ async finalize() {}
+}
+
+/**
+ * A Fixture is a class used to instantiate each test sub/case at run time.
+ * A new instance of the Fixture is created for every single test subcase
+ * (i.e. every time the test function is run).
+ */
+export class Fixture<S extends SubcaseBatchState = SubcaseBatchState> {
+ private _params: unknown;
+ private _sharedState: S;
+ /**
+ * Interface for recording logs and test status.
+ *
+ * @internal
+ */
+ readonly rec: TestCaseRecorder;
+ private eventualExpectations: Array<Promise<unknown>> = [];
+ private numOutstandingAsyncExpectations = 0;
+ private objectsToCleanUp: DestroyableObject[] = [];
+
+ public static MakeSharedState(recorder: TestCaseRecorder, params: TestParams): SubcaseBatchState {
+ return new SubcaseBatchState(recorder, params);
+ }
+
+ /** @internal */
+ constructor(sharedState: S, rec: TestCaseRecorder, params: TestParams) {
+ this._sharedState = sharedState;
+ this.rec = rec;
+ this._params = params;
+ }
+
+ /**
+ * Returns the (case+subcase) parameters for this test function invocation.
+ */
+ get params(): unknown {
+ return this._params;
+ }
+
+ /**
+ * Gets the test fixture's shared state. This object is shared between subcases
+ * within the same testcase.
+ */
+ get sharedState(): S {
+ return this._sharedState;
+ }
+
+ /**
+ * Override this to do additional pre-test-function work in a derived fixture.
+ * This has to be a member function instead of an async `createFixture` function, because
+ * we need to be able to ergonomically override it in subclasses.
+ *
+ * @internal MAINTENANCE_TODO: Make this not visible to test code?
+ */
+ async init(): Promise<void> {}
+
+ /**
+ * Override this to do additional post-test-function work in a derived fixture.
+ *
+ * Called even if init was unsuccessful.
+ *
+ * @internal MAINTENANCE_TODO: Make this not visible to test code?
+ */
+ async finalize(): Promise<void> {
+ assert(
+ this.numOutstandingAsyncExpectations === 0,
+ 'there were outstanding immediateAsyncExpectations (e.g. expectUncapturedError) at the end of the test'
+ );
+
+ // Loop to exhaust the eventualExpectations in case they chain off each other.
+ while (this.eventualExpectations.length) {
+ const p = this.eventualExpectations.shift()!;
+ try {
+ await p;
+ } catch (ex) {
+ this.rec.threw(ex);
+ }
+ }
+
+ // And clean up any objects now that they're done being used.
+ for (const o of this.objectsToCleanUp) {
+ if ('getExtension' in o) {
+ const WEBGL_lose_context = o.getExtension('WEBGL_lose_context');
+ if (WEBGL_lose_context) WEBGL_lose_context.loseContext();
+ } else if ('destroy' in o) {
+ o.destroy();
+ } else {
+ o.close();
+ }
+ }
+ }
+
+ /**
+ * Tracks an object to be cleaned up after the test finishes.
+ *
+ * MAINTENANCE_TODO: Use this in more places. (Will be easier once .destroy() is allowed on
+ * invalid objects.)
+ */
+ trackForCleanup<T extends DestroyableObject>(o: T): T {
+ this.objectsToCleanUp.push(o);
+ return o;
+ }
+
+ /** Tracks an object, if it's destroyable, to be cleaned up after the test finishes. */
+ tryTrackForCleanup<T>(o: T): T {
+ if (typeof o === 'object' && o !== null) {
+ if (
+ 'destroy' in o ||
+ 'close' in o ||
+ o instanceof WebGLRenderingContext ||
+ o instanceof WebGL2RenderingContext
+ ) {
+ this.objectsToCleanUp.push(o as unknown as DestroyableObject);
+ }
+ }
+ return o;
+ }
+
+ /** Log a debug message. */
+ debug(msg: string): void {
+ this.rec.debug(new Error(msg));
+ }
+
+ /** Throws an exception marking the subcase as skipped. */
+ skip(msg: string): never {
+ throw new SkipTestCase(msg);
+ }
+
+ /** Throws an exception marking the subcase as skipped if condition is true */
+ skipIf(cond: boolean, msg: string | (() => string) = '') {
+ if (cond) {
+ this.skip(typeof msg === 'function' ? msg() : msg);
+ }
+ }
+
+ /** Log a warning and increase the result status to "Warn". */
+ warn(msg?: string): void {
+ this.rec.warn(new Error(msg));
+ }
+
+ /** Log an error and increase the result status to "ExpectFailed". */
+ fail(msg?: string): void {
+ this.rec.expectationFailed(new Error(msg));
+ }
+
+ /**
+ * Wraps an async function. Tracks its status to fail if the test tries to report a test status
+ * before the async work has finished.
+ */
+ protected async immediateAsyncExpectation<T>(fn: () => Promise<T>): Promise<T> {
+ this.numOutstandingAsyncExpectations++;
+ const ret = await fn();
+ this.numOutstandingAsyncExpectations--;
+ return ret;
+ }
+
+ /**
+ * Wraps an async function, passing it an `Error` object recording the original stack trace.
+ * The async work will be implicitly waited upon before reporting a test status.
+ */
+ protected eventualAsyncExpectation<T>(fn: (niceStack: Error) => Promise<T>): void {
+ const promise = fn(new Error());
+ this.eventualExpectations.push(promise);
+ }
+
+ private expectErrorValue(expectedError: string | true, ex: unknown, niceStack: Error): void {
+ if (!(ex instanceof Error)) {
+ niceStack.message = `THREW non-error value, of type ${typeof ex}: ${ex}`;
+ this.rec.expectationFailed(niceStack);
+ return;
+ }
+ const actualName = ex.name;
+ if (expectedError !== true && actualName !== expectedError) {
+ niceStack.message = `THREW ${actualName}, instead of ${expectedError}: ${ex}`;
+ this.rec.expectationFailed(niceStack);
+ } else {
+ niceStack.message = `OK: threw ${actualName}: ${ex.message}`;
+ this.rec.debug(niceStack);
+ }
+ }
+
+ /** Expect that the provided promise resolves (fulfills). */
+ shouldResolve(p: Promise<unknown>, msg?: string): void {
+ this.eventualAsyncExpectation(async niceStack => {
+ const m = msg ? ': ' + msg : '';
+ try {
+ await p;
+ niceStack.message = 'resolved as expected' + m;
+ } catch (ex) {
+ niceStack.message = `REJECTED${m}`;
+ if (ex instanceof Error) {
+ niceStack.message += '\n' + ex.message;
+ }
+ this.rec.expectationFailed(niceStack);
+ }
+ });
+ }
+
+ /** Expect that the provided promise rejects, with the provided exception name. */
+ shouldReject(
+ expectedName: string,
+ p: Promise<unknown>,
+ { allowMissingStack = false, message }: ExceptionCheckOptions = {}
+ ): void {
+ this.eventualAsyncExpectation(async niceStack => {
+ const m = message ? ': ' + message : '';
+ try {
+ await p;
+ niceStack.message = 'DID NOT REJECT' + m;
+ this.rec.expectationFailed(niceStack);
+ } catch (ex) {
+ this.expectErrorValue(expectedName, ex, niceStack);
+ if (!allowMissingStack) {
+ if (!(ex instanceof Error && typeof ex.stack === 'string')) {
+ const exMessage = ex instanceof Error ? ex.message : '?';
+ niceStack.message = `rejected as expected, but missing stack (${exMessage})${m}`;
+ this.rec.expectationFailed(niceStack);
+ }
+ }
+ }
+ });
+ }
+
+ /**
+ * Expect that the provided function throws (if `true` or `string`) or not (if `false`).
+ * If a string is provided, expect that the throw exception has that name.
+ *
+ * MAINTENANCE_TODO: Change to `string | false` so the exception name is always checked.
+ */
+ shouldThrow(
+ expectedError: string | boolean,
+ fn: () => void,
+ { allowMissingStack = false, message }: ExceptionCheckOptions = {}
+ ) {
+ const m = message ? ': ' + message : '';
+ try {
+ fn();
+ if (expectedError === false) {
+ this.rec.debug(new Error('did not throw, as expected' + m));
+ } else {
+ this.rec.expectationFailed(new Error('unexpectedly did not throw' + m));
+ }
+ } catch (ex) {
+ if (expectedError === false) {
+ this.rec.expectationFailed(new Error('threw unexpectedly' + m));
+ } else {
+ this.expectErrorValue(expectedError, ex, new Error(m));
+ if (!allowMissingStack) {
+ if (!(ex instanceof Error && typeof ex.stack === 'string')) {
+ this.rec.expectationFailed(new Error('threw as expected, but missing stack' + m));
+ }
+ }
+ }
+ }
+ }
+
+ /** Expect that a condition is true. */
+ expect(cond: boolean, msg?: string): boolean {
+ if (cond) {
+ const m = msg ? ': ' + msg : '';
+ this.rec.debug(new Error('expect OK' + m));
+ } else {
+ this.rec.expectationFailed(new Error(msg));
+ }
+ return cond;
+ }
+
+ /**
+ * If the argument is an `Error`, fail (or warn). If it's `undefined`, no-op.
+ * If the argument is an array, apply the above behavior on each of elements.
+ */
+ expectOK(
+ error: Error | undefined | (Error | undefined)[],
+ { mode = 'fail', niceStack }: { mode?: 'fail' | 'warn'; niceStack?: Error } = {}
+ ): void {
+ const handleError = (error: Error | undefined) => {
+ if (error instanceof Error) {
+ if (niceStack) {
+ error.stack = niceStack.stack;
+ }
+ if (mode === 'fail') {
+ this.rec.expectationFailed(error);
+ } else if (mode === 'warn') {
+ this.rec.warn(error);
+ } else {
+ unreachable();
+ }
+ }
+ };
+
+ if (Array.isArray(error)) {
+ for (const e of error) {
+ handleError(e);
+ }
+ } else {
+ handleError(error);
+ }
+ }
+
+ eventualExpectOK(
+ error: Promise<Error | undefined | (Error | undefined)[]>,
+ { mode = 'fail' }: { mode?: 'fail' | 'warn' } = {}
+ ) {
+ this.eventualAsyncExpectation(async niceStack => {
+ this.expectOK(await error, { mode, niceStack });
+ });
+ }
+}
+
+export type SubcaseBatchStateFromFixture<F> = F extends Fixture<infer S> ? S : never;
+
+/**
+ * FixtureClass encapsulates a constructor for fixture and a corresponding
+ * shared state factory function. An interface version of the type is also
+ * defined for mixin declaration use ONLY. The interface version is necessary
+ * because mixin classes need a constructor with a single any[] rest
+ * parameter.
+ */
+export type FixtureClass<F extends Fixture = Fixture> = {
+ new (sharedState: SubcaseBatchStateFromFixture<F>, log: TestCaseRecorder, params: TestParams): F;
+ MakeSharedState(recorder: TestCaseRecorder, params: TestParams): SubcaseBatchStateFromFixture<F>;
+};
+export type FixtureClassInterface<F extends Fixture = Fixture> = {
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ new (...args: any[]): F;
+ MakeSharedState(recorder: TestCaseRecorder, params: TestParams): SubcaseBatchStateFromFixture<F>;
+};
+export type FixtureClassWithMixin<FC, M> = FC extends FixtureClass<infer F>
+ ? FixtureClass<F & M>
+ : never;
diff --git a/dom/webgpu/tests/cts/checkout/src/common/framework/metadata.ts b/dom/webgpu/tests/cts/checkout/src/common/framework/metadata.ts
new file mode 100644
index 0000000000..2c2a1ef794
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/framework/metadata.ts
@@ -0,0 +1,28 @@
+import { assert } from '../util/util.js';
+
+/** Metadata about tests (that can't be derived at runtime). */
+export type TestMetadata = {
+ /**
+ * Estimated average time-per-subcase, in milliseconds.
+ * This is used to determine chunking granularity when exporting to WPT with
+ * chunking enabled (like out-wpt/cts-chunked2sec.https.html).
+ */
+ subcaseMS: number;
+};
+
+export type TestMetadataListing = {
+ [testQuery: string]: TestMetadata;
+};
+
+export function loadMetadataForSuite(suiteDir: string): TestMetadataListing | null {
+ assert(typeof require !== 'undefined', 'loadMetadataForSuite is only implemented on Node');
+ const fs = require('fs');
+
+ const metadataFile = `${suiteDir}/listing_meta.json`;
+ if (!fs.existsSync(metadataFile)) {
+ return null;
+ }
+
+ const metadata: TestMetadataListing = JSON.parse(fs.readFileSync(metadataFile, 'utf8'));
+ return metadata;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/framework/params_builder.ts b/dom/webgpu/tests/cts/checkout/src/common/framework/params_builder.ts
new file mode 100644
index 0000000000..09a7d9c320
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/framework/params_builder.ts
@@ -0,0 +1,389 @@
+import { Merged, mergeParams, mergeParamsChecked } from '../internal/params_utils.js';
+import { comparePublicParamsPaths, Ordering } from '../internal/query/compare.js';
+import { stringifyPublicParams } from '../internal/query/stringify_params.js';
+import { DeepReadonly } from '../util/types.js';
+import { assert, mapLazy, objectEquals } from '../util/util.js';
+
+import { TestParams } from './fixture.js';
+
+// ================================================================
+// "Public" ParamsBuilder API / Documentation
+// ================================================================
+
+/**
+ * Provides doc comments for the methods of CaseParamsBuilder and SubcaseParamsBuilder.
+ * (Also enforces rough interface match between them.)
+ */
+export interface ParamsBuilder {
+ /**
+ * Expands each item in `this` into zero or more items.
+ * Each item has its parameters expanded with those returned by the `expander`.
+ *
+ * **Note:** When only a single key is being added, use the simpler `expand` for readability.
+ *
+ * ```text
+ * this = [ a , b , c ]
+ * this.map(expander) = [ f(a) f(b) f(c) ]
+ * = [[a1, a2, a3] , [ b1 ] , [] ]
+ * merge and flatten = [ merge(a, a1), merge(a, a2), merge(a, a3), merge(b, b1) ]
+ * ```
+ */
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ expandWithParams(expander: (_: any) => any): any;
+
+ /**
+ * Expands each item in `this` into zero or more items. Each item has its parameters expanded
+ * with one new key, `key`, and the values returned by `expander`.
+ */
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ expand(key: string, expander: (_: any) => any): any;
+
+ /**
+ * Expands each item in `this` to multiple items, one for each item in `newParams`.
+ *
+ * In other words, takes the cartesian product of [ the items in `this` ] and `newParams`.
+ *
+ * **Note:** When only a single key is being added, use the simpler `combine` for readability.
+ *
+ * ```text
+ * this = [ {a:1}, {b:2} ]
+ * newParams = [ {x:1}, {y:2} ]
+ * this.combineP(newParams) = [ {a:1,x:1}, {a:1,y:2}, {b:2,x:1}, {b:2,y:2} ]
+ * ```
+ */
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ combineWithParams(newParams: Iterable<any>): any;
+
+ /**
+ * Expands each item in `this` to multiple items with `{ [name]: value }` for each value.
+ *
+ * In other words, takes the cartesian product of [ the items in `this` ]
+ * and `[ {[name]: value} for each value in values ]`
+ */
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ combine(key: string, newParams: Iterable<any>): any;
+
+ /**
+ * Filters `this` to only items for which `pred` returns true.
+ */
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ filter(pred: (_: any) => boolean): any;
+
+ /**
+ * Filters `this` to only items for which `pred` returns false.
+ */
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ unless(pred: (_: any) => boolean): any;
+}
+
+/**
+ * Determines the resulting parameter object type which would be generated by an object of
+ * the given ParamsBuilder type.
+ */
+export type ParamTypeOf<
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ T extends ParamsBuilder,
+> = T extends SubcaseParamsBuilder<infer CaseP, infer SubcaseP>
+ ? Merged<CaseP, SubcaseP>
+ : T extends CaseParamsBuilder<infer CaseP>
+ ? CaseP
+ : never;
+
+// ================================================================
+// Implementation
+// ================================================================
+
+/**
+ * Iterable over pairs of either:
+ * - `[case params, Iterable<subcase params>]` if there are subcases.
+ * - `[case params, undefined]` if not.
+ */
+export type CaseSubcaseIterable<CaseP, SubcaseP> = Iterable<
+ readonly [DeepReadonly<CaseP>, Iterable<DeepReadonly<SubcaseP>> | undefined]
+>;
+
+/**
+ * Base class for `CaseParamsBuilder` and `SubcaseParamsBuilder`.
+ */
+export abstract class ParamsBuilderBase<CaseP extends {}, SubcaseP extends {}> {
+ protected readonly cases: (caseFilter: TestParams | null) => Generator<CaseP>;
+
+ constructor(cases: (caseFilter: TestParams | null) => Generator<CaseP>) {
+ this.cases = cases;
+ }
+
+ /**
+ * Hidden from test files. Use `builderIterateCasesWithSubcases` to access this.
+ */
+ protected abstract iterateCasesWithSubcases(
+ caseFilter: TestParams | null
+ ): CaseSubcaseIterable<CaseP, SubcaseP>;
+}
+
+/**
+ * Calls the (normally hidden) `iterateCasesWithSubcases()` method.
+ */
+export function builderIterateCasesWithSubcases(
+ builder: ParamsBuilderBase<{}, {}>,
+ caseFilter: TestParams | null
+) {
+ interface IterableParamsBuilder {
+ iterateCasesWithSubcases(caseFilter: TestParams | null): CaseSubcaseIterable<{}, {}>;
+ }
+
+ return (builder as unknown as IterableParamsBuilder).iterateCasesWithSubcases(caseFilter);
+}
+
+/**
+ * Builder for combinatorial test **case** parameters.
+ *
+ * CaseParamsBuilder is immutable. Each method call returns a new, immutable object,
+ * modifying the list of cases according to the method called.
+ *
+ * This means, for example, that the `unit` passed into `TestBuilder.params()` can be reused.
+ */
+export class CaseParamsBuilder<CaseP extends {}>
+ extends ParamsBuilderBase<CaseP, {}>
+ implements Iterable<DeepReadonly<CaseP>>, ParamsBuilder
+{
+ *iterateCasesWithSubcases(caseFilter: TestParams | null): CaseSubcaseIterable<CaseP, {}> {
+ for (const caseP of this.cases(caseFilter)) {
+ if (caseFilter) {
+ // this.cases() only filters out cases which conflict with caseFilter. Now that we have
+ // the final caseP, filter out cases which are missing keys that caseFilter requires.
+ const ordering = comparePublicParamsPaths(caseP, caseFilter);
+ if (ordering === Ordering.StrictSuperset || ordering === Ordering.Unordered) {
+ continue;
+ }
+ }
+
+ yield [caseP as DeepReadonly<typeof caseP>, undefined];
+ }
+ }
+
+ [Symbol.iterator](): Iterator<DeepReadonly<CaseP>> {
+ return this.cases(null) as Iterator<DeepReadonly<CaseP>>;
+ }
+
+ /** @inheritDoc */
+ expandWithParams<NewP extends {}>(
+ expander: (_: CaseP) => Iterable<NewP>
+ ): CaseParamsBuilder<Merged<CaseP, NewP>> {
+ const baseGenerator = this.cases;
+ return new CaseParamsBuilder(function* (caseFilter) {
+ for (const a of baseGenerator(caseFilter)) {
+ for (const b of expander(a)) {
+ if (caseFilter) {
+ // If the expander generated any key-value pair that conflicts with caseFilter, skip.
+ const kvPairs = Object.entries(b);
+ if (kvPairs.some(([k, v]) => k in caseFilter && !objectEquals(caseFilter[k], v))) {
+ continue;
+ }
+ }
+
+ yield mergeParamsChecked(a, b);
+ }
+ }
+ });
+ }
+
+ /** @inheritDoc */
+ expand<NewPKey extends string, NewPValue>(
+ key: NewPKey,
+ expander: (_: CaseP) => Iterable<NewPValue>
+ ): CaseParamsBuilder<Merged<CaseP, { [name in NewPKey]: NewPValue }>> {
+ const baseGenerator = this.cases;
+ return new CaseParamsBuilder(function* (caseFilter) {
+ for (const a of baseGenerator(caseFilter)) {
+ assert(!(key in a), `New key '${key}' already exists in ${JSON.stringify(a)}`);
+
+ for (const v of expander(a)) {
+ // If the expander generated a value for this key that conflicts with caseFilter, skip.
+ if (caseFilter && key in caseFilter) {
+ if (!objectEquals(caseFilter[key], v)) {
+ continue;
+ }
+ }
+ yield { ...a, [key]: v } as Merged<CaseP, { [name in NewPKey]: NewPValue }>;
+ }
+ }
+ });
+ }
+
+ /** @inheritDoc */
+ combineWithParams<NewP extends {}>(
+ newParams: Iterable<NewP>
+ ): CaseParamsBuilder<Merged<CaseP, NewP>> {
+ assertNotGenerator(newParams);
+ const seenValues = new Set<string>();
+ for (const params of newParams) {
+ const paramsStr = stringifyPublicParams(params);
+ assert(!seenValues.has(paramsStr), `Duplicate entry in combine[WithParams]: ${paramsStr}`);
+ seenValues.add(paramsStr);
+ }
+
+ return this.expandWithParams(() => newParams);
+ }
+
+ /** @inheritDoc */
+ combine<NewPKey extends string, NewPValue>(
+ key: NewPKey,
+ values: Iterable<NewPValue>
+ ): CaseParamsBuilder<Merged<CaseP, { [name in NewPKey]: NewPValue }>> {
+ assertNotGenerator(values);
+ const mapped = mapLazy(values, v => ({ [key]: v }) as { [name in NewPKey]: NewPValue });
+ return this.combineWithParams(mapped);
+ }
+
+ /** @inheritDoc */
+ filter(pred: (_: CaseP) => boolean): CaseParamsBuilder<CaseP> {
+ const baseGenerator = this.cases;
+ return new CaseParamsBuilder(function* (caseFilter) {
+ for (const a of baseGenerator(caseFilter)) {
+ if (pred(a)) yield a;
+ }
+ });
+ }
+
+ /** @inheritDoc */
+ unless(pred: (_: CaseP) => boolean): CaseParamsBuilder<CaseP> {
+ return this.filter(x => !pred(x));
+ }
+
+ /**
+ * "Finalize" the list of cases and begin defining subcases.
+ * Returns a new SubcaseParamsBuilder. Methods called on SubcaseParamsBuilder
+ * generate new subcases instead of new cases.
+ */
+ beginSubcases(): SubcaseParamsBuilder<CaseP, {}> {
+ return new SubcaseParamsBuilder(this.cases, function* () {
+ yield {};
+ });
+ }
+}
+
+/**
+ * The unit CaseParamsBuilder, representing a single case with no params: `[ {} ]`.
+ *
+ * `punit` is passed to every `.params()`/`.paramsSubcasesOnly()` call, so `kUnitCaseParamsBuilder`
+ * is only explicitly needed if constructing a ParamsBuilder outside of a test builder.
+ */
+export const kUnitCaseParamsBuilder = new CaseParamsBuilder(function* () {
+ yield {};
+});
+
+/**
+ * Builder for combinatorial test _subcase_ parameters.
+ *
+ * SubcaseParamsBuilder is immutable. Each method call returns a new, immutable object,
+ * modifying the list of subcases according to the method called.
+ */
+export class SubcaseParamsBuilder<CaseP extends {}, SubcaseP extends {}>
+ extends ParamsBuilderBase<CaseP, SubcaseP>
+ implements ParamsBuilder
+{
+ protected readonly subcases: (_: CaseP) => Generator<SubcaseP>;
+
+ constructor(
+ cases: (caseFilter: TestParams | null) => Generator<CaseP>,
+ generator: (_: CaseP) => Generator<SubcaseP>
+ ) {
+ super(cases);
+ this.subcases = generator;
+ }
+
+ *iterateCasesWithSubcases(caseFilter: TestParams | null): CaseSubcaseIterable<CaseP, SubcaseP> {
+ for (const caseP of this.cases(caseFilter)) {
+ if (caseFilter) {
+ // this.cases() only filters out cases which conflict with caseFilter. Now that we have
+ // the final caseP, filter out cases which are missing keys that caseFilter requires.
+ const ordering = comparePublicParamsPaths(caseP, caseFilter);
+ if (ordering === Ordering.StrictSuperset || ordering === Ordering.Unordered) {
+ continue;
+ }
+ }
+
+ const subcases = Array.from(this.subcases(caseP));
+ if (subcases.length) {
+ yield [
+ caseP as DeepReadonly<typeof caseP>,
+ subcases as DeepReadonly<(typeof subcases)[number]>[],
+ ];
+ }
+ }
+ }
+
+ /** @inheritDoc */
+ expandWithParams<NewP extends {}>(
+ expander: (_: Merged<CaseP, SubcaseP>) => Iterable<NewP>
+ ): SubcaseParamsBuilder<CaseP, Merged<SubcaseP, NewP>> {
+ const baseGenerator = this.subcases;
+ return new SubcaseParamsBuilder(this.cases, function* (base) {
+ for (const a of baseGenerator(base)) {
+ for (const b of expander(mergeParams(base, a))) {
+ yield mergeParamsChecked(a, b);
+ }
+ }
+ });
+ }
+
+ /** @inheritDoc */
+ expand<NewPKey extends string, NewPValue>(
+ key: NewPKey,
+ expander: (_: Merged<CaseP, SubcaseP>) => Iterable<NewPValue>
+ ): SubcaseParamsBuilder<CaseP, Merged<SubcaseP, { [name in NewPKey]: NewPValue }>> {
+ const baseGenerator = this.subcases;
+ return new SubcaseParamsBuilder(this.cases, function* (base) {
+ for (const a of baseGenerator(base)) {
+ const before = mergeParams(base, a);
+ assert(!(key in before), () => `Key '${key}' already exists in ${JSON.stringify(before)}`);
+
+ for (const v of expander(before)) {
+ yield { ...a, [key]: v } as Merged<SubcaseP, { [k in NewPKey]: NewPValue }>;
+ }
+ }
+ });
+ }
+
+ /** @inheritDoc */
+ combineWithParams<NewP extends {}>(
+ newParams: Iterable<NewP>
+ ): SubcaseParamsBuilder<CaseP, Merged<SubcaseP, NewP>> {
+ assertNotGenerator(newParams);
+ return this.expandWithParams(() => newParams);
+ }
+
+ /** @inheritDoc */
+ combine<NewPKey extends string, NewPValue>(
+ key: NewPKey,
+ values: Iterable<NewPValue>
+ ): SubcaseParamsBuilder<CaseP, Merged<SubcaseP, { [name in NewPKey]: NewPValue }>> {
+ assertNotGenerator(values);
+ return this.expand(key, () => values);
+ }
+
+ /** @inheritDoc */
+ filter(pred: (_: Merged<CaseP, SubcaseP>) => boolean): SubcaseParamsBuilder<CaseP, SubcaseP> {
+ const baseGenerator = this.subcases;
+ return new SubcaseParamsBuilder(this.cases, function* (base) {
+ for (const a of baseGenerator(base)) {
+ if (pred(mergeParams(base, a))) yield a;
+ }
+ });
+ }
+
+ /** @inheritDoc */
+ unless(pred: (_: Merged<CaseP, SubcaseP>) => boolean): SubcaseParamsBuilder<CaseP, SubcaseP> {
+ return this.filter(x => !pred(x));
+ }
+}
+
+/** Assert an object is not a Generator (a thing returned from a generator function). */
+function assertNotGenerator(x: object) {
+ if ('constructor' in x) {
+ assert(
+ x.constructor !== (function* () {})().constructor,
+ 'Argument must not be a generator, as generators are not reusable'
+ );
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/framework/resources.ts b/dom/webgpu/tests/cts/checkout/src/common/framework/resources.ts
new file mode 100644
index 0000000000..05451304b6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/framework/resources.ts
@@ -0,0 +1,110 @@
+/**
+ * Base path for resources. The default value is correct for non-worker WPT, but standalone and
+ * workers must access resources using a different base path, so this is overridden in
+ * `test_worker-worker.ts` and `standalone.ts`.
+ */
+let baseResourcePath = './resources';
+let crossOriginHost = '';
+
+function getAbsoluteBaseResourcePath(path: string) {
+ // Path is already an absolute one.
+ if (path[0] === '/') {
+ return path;
+ }
+
+ // Path is relative
+ const relparts = window.location.pathname.split('/');
+ relparts.pop();
+ const pathparts = path.split('/');
+
+ let i;
+ for (i = 0; i < pathparts.length; ++i) {
+ switch (pathparts[i]) {
+ case '':
+ break;
+ case '.':
+ break;
+ case '..':
+ relparts.pop();
+ break;
+ default:
+ relparts.push(pathparts[i]);
+ break;
+ }
+ }
+
+ return relparts.join('/');
+}
+
+function runningOnLocalHost(): boolean {
+ const hostname = window.location.hostname;
+ return hostname === 'localhost' || hostname === '127.0.0.1' || hostname === '::1';
+}
+
+/**
+ * Get a path to a resource in the `resources` directory relative to the current execution context
+ * (html file or worker .js file), for `fetch()`, `<img>`, `<video>`, etc but from cross origin host.
+ * Provide onlineUrl if the case running online.
+ * @internal MAINTENANCE_TODO: Cases may run in the LAN environment (not localhost but no internet
+ * access). We temporarily use `crossOriginHost` to configure the cross origin host name in that situation.
+ * But opening to auto-detect mechanism or other solutions.
+ */
+export function getCrossOriginResourcePath(pathRelativeToResourcesDir: string, onlineUrl = '') {
+ // A cross origin host has been configured. Use this to load resource.
+ if (crossOriginHost !== '') {
+ return (
+ crossOriginHost +
+ getAbsoluteBaseResourcePath(baseResourcePath) +
+ '/' +
+ pathRelativeToResourcesDir
+ );
+ }
+
+ // Using 'localhost' and '127.0.0.1' trick to load cross origin resource. Set cross origin host name
+ // to 'localhost' if case is not running in 'localhost' domain. Otherwise, use '127.0.0.1'.
+ // host name to locahost unless the server running in
+ if (runningOnLocalHost()) {
+ let crossOriginHostName = '';
+ if (location.hostname === 'localhost') {
+ crossOriginHostName = 'http://127.0.0.1';
+ } else {
+ crossOriginHostName = 'http://localhost';
+ }
+
+ return (
+ crossOriginHostName +
+ ':' +
+ location.port +
+ getAbsoluteBaseResourcePath(baseResourcePath) +
+ '/' +
+ pathRelativeToResourcesDir
+ );
+ }
+
+ return onlineUrl;
+}
+
+/**
+ * Get a path to a resource in the `resources` directory, relative to the current execution context
+ * (html file or worker .js file), for `fetch()`, `<img>`, `<video>`, etc. Pass the cross origin host
+ * name if wants to load resoruce from cross origin host.
+ */
+export function getResourcePath(pathRelativeToResourcesDir: string) {
+ return baseResourcePath + '/' + pathRelativeToResourcesDir;
+}
+
+/**
+ * Set the base resource path (path to the `resources` directory relative to the current
+ * execution context).
+ */
+export function setBaseResourcePath(path: string) {
+ baseResourcePath = path;
+}
+
+/**
+ * Set the cross origin host and cases related to cross origin
+ * will load resource from the given host.
+ */
+export function setCrossOriginHost(host: string) {
+ crossOriginHost = host;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/framework/test_config.ts b/dom/webgpu/tests/cts/checkout/src/common/framework/test_config.ts
new file mode 100644
index 0000000000..2575418299
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/framework/test_config.ts
@@ -0,0 +1,32 @@
+export type TestConfig = {
+ maxSubcasesInFlight: number;
+ testHeartbeatCallback: () => void;
+ noRaceWithRejectOnTimeout: boolean;
+
+ /**
+ * Logger for debug messages from the test framework
+ * (that can't be captured in the logs of a test).
+ */
+ frameworkDebugLog?: (msg: string) => void;
+
+ /**
+ * Controls the emission of loops in constant-evaluation shaders under
+ * 'webgpu:shader,execution,expression,*'
+ * FXC is extremely slow to compile shaders with loops unrolled, where as the
+ * MSL compiler is extremely slow to compile with loops rolled.
+ */
+ unrollConstEvalLoops: boolean;
+
+ /**
+ * Whether or not we're running in compatibility mode.
+ */
+ compatibility: boolean;
+};
+
+export const globalTestConfig: TestConfig = {
+ maxSubcasesInFlight: 500,
+ testHeartbeatCallback: () => {},
+ noRaceWithRejectOnTimeout: false,
+ unrollConstEvalLoops: false,
+ compatibility: false,
+};
diff --git a/dom/webgpu/tests/cts/checkout/src/common/framework/test_group.ts b/dom/webgpu/tests/cts/checkout/src/common/framework/test_group.ts
new file mode 100644
index 0000000000..5b761db9db
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/framework/test_group.ts
@@ -0,0 +1 @@
+export { makeTestGroup } from '../internal/test_group.js';
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/file_loader.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/file_loader.ts
new file mode 100644
index 0000000000..b5e1b1a446
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/file_loader.ts
@@ -0,0 +1,105 @@
+import { IterableTestGroup } from '../internal/test_group.js';
+import { assert } from '../util/util.js';
+
+import { parseQuery } from './query/parseQuery.js';
+import { TestQuery } from './query/query.js';
+import { TestSuiteListing } from './test_suite_listing.js';
+import { loadTreeForQuery, TestTree, TestTreeLeaf } from './tree.js';
+
+// A listing file, e.g. either of:
+// - `src/webgpu/listing.ts` (which is dynamically computed, has a Promise<TestSuiteListing>)
+// - `out/webgpu/listing.js` (which is pre-baked, has a TestSuiteListing)
+interface ListingFile {
+ listing: Promise<TestSuiteListing> | TestSuiteListing;
+}
+
+// A .spec.ts file, as imported.
+export interface SpecFile {
+ readonly description: string;
+ readonly g: IterableTestGroup;
+}
+
+export interface ImportInfo {
+ url: string;
+}
+
+interface TestFileLoaderEventMap {
+ import: MessageEvent<ImportInfo>;
+ imported: MessageEvent<ImportInfo>;
+ finish: MessageEvent<void>;
+}
+
+// Override the types for addEventListener/removeEventListener so the callbacks can be used as
+// strongly-typed.
+/* eslint-disable-next-line @typescript-eslint/no-unsafe-declaration-merging */
+export interface TestFileLoader extends EventTarget {
+ addEventListener<K extends keyof TestFileLoaderEventMap>(
+ type: K,
+ listener: (this: TestFileLoader, ev: TestFileLoaderEventMap[K]) => void,
+ options?: boolean | AddEventListenerOptions
+ ): void;
+ addEventListener(
+ type: string,
+ listener: EventListenerOrEventListenerObject,
+ options?: boolean | AddEventListenerOptions
+ ): void;
+ removeEventListener<K extends keyof TestFileLoaderEventMap>(
+ type: K,
+ listener: (this: TestFileLoader, ev: TestFileLoaderEventMap[K]) => void,
+ options?: boolean | EventListenerOptions
+ ): void;
+ removeEventListener(
+ type: string,
+ listener: EventListenerOrEventListenerObject,
+ options?: boolean | EventListenerOptions
+ ): void;
+}
+
+// Base class for DefaultTestFileLoader and FakeTestFileLoader.
+/* eslint-disable-next-line @typescript-eslint/no-unsafe-declaration-merging */
+export abstract class TestFileLoader extends EventTarget {
+ abstract listing(suite: string): Promise<TestSuiteListing>;
+ protected abstract import(path: string): Promise<SpecFile>;
+
+ async importSpecFile(suite: string, path: string[]): Promise<SpecFile> {
+ const url = `${suite}/${path.join('/')}.spec.js`;
+ this.dispatchEvent(new MessageEvent<ImportInfo>('import', { data: { url } }));
+ const ret = await this.import(url);
+ this.dispatchEvent(new MessageEvent<ImportInfo>('imported', { data: { url } }));
+ return ret;
+ }
+
+ async loadTree(
+ query: TestQuery,
+ {
+ subqueriesToExpand = [],
+ maxChunkTime = Infinity,
+ }: { subqueriesToExpand?: string[]; maxChunkTime?: number } = {}
+ ): Promise<TestTree> {
+ const tree = await loadTreeForQuery(this, query, {
+ subqueriesToExpand: subqueriesToExpand.map(s => {
+ const q = parseQuery(s);
+ assert(q.level >= 2, () => `subqueriesToExpand entries should not be multi-file:\n ${q}`);
+ return q;
+ }),
+ maxChunkTime,
+ });
+ this.dispatchEvent(new MessageEvent<void>('finish'));
+ return tree;
+ }
+
+ async loadCases(query: TestQuery): Promise<IterableIterator<TestTreeLeaf>> {
+ const tree = await this.loadTree(query);
+ return tree.iterateLeaves();
+ }
+}
+
+export class DefaultTestFileLoader extends TestFileLoader {
+ async listing(suite: string): Promise<TestSuiteListing> {
+ return ((await import(`../../${suite}/listing.js`)) as ListingFile).listing;
+ }
+
+ import(path: string): Promise<SpecFile> {
+ return import(`../../${path}`);
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/logging/log_message.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/logging/log_message.ts
new file mode 100644
index 0000000000..ee006cdeb3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/logging/log_message.ts
@@ -0,0 +1,44 @@
+import { ErrorWithExtra } from '../../util/util.js';
+import { extractImportantStackTrace } from '../stack.js';
+
+export class LogMessageWithStack extends Error {
+ readonly extra: unknown;
+
+ private stackHiddenMessage: string | undefined = undefined;
+
+ constructor(name: string, ex: Error | ErrorWithExtra) {
+ super(ex.message);
+
+ this.name = name;
+ this.stack = ex.stack;
+ if ('extra' in ex) {
+ this.extra = ex.extra;
+ }
+ }
+
+ /** Set a flag so the stack is not printed in toJSON(). */
+ setStackHidden(stackHiddenMessage: string) {
+ this.stackHiddenMessage ??= stackHiddenMessage;
+ }
+
+ toJSON(): string {
+ let m = this.name;
+ if (this.message) m += ': ' + this.message;
+ if (this.stack) {
+ if (this.stackHiddenMessage === undefined) {
+ m += '\n' + extractImportantStackTrace(this);
+ } else if (this.stackHiddenMessage) {
+ m += `\n at (elided: ${this.stackHiddenMessage})`;
+ }
+ }
+ return m;
+ }
+}
+
+/**
+ * Returns a string, nicely indented, for debug logs.
+ * This is used in the cmdline and wpt runtimes. In WPT, it shows up in the `*-actual.txt` file.
+ */
+export function prettyPrintLog(log: LogMessageWithStack): string {
+ return ' - ' + log.toJSON().replace(/\n/g, '\n ');
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/logging/logger.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/logging/logger.ts
new file mode 100644
index 0000000000..e4526cff54
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/logging/logger.ts
@@ -0,0 +1,30 @@
+import { version } from '../version.js';
+
+import { LiveTestCaseResult } from './result.js';
+import { TestCaseRecorder } from './test_case_recorder.js';
+
+export type LogResults = Map<string, LiveTestCaseResult>;
+
+export class Logger {
+ static globalDebugMode: boolean = false;
+
+ readonly overriddenDebugMode: boolean | undefined;
+ readonly results: LogResults = new Map();
+
+ constructor({ overrideDebugMode }: { overrideDebugMode?: boolean } = {}) {
+ this.overriddenDebugMode = overrideDebugMode;
+ }
+
+ record(name: string): [TestCaseRecorder, LiveTestCaseResult] {
+ const result: LiveTestCaseResult = { status: 'running', timems: -1 };
+ this.results.set(name, result);
+ return [
+ new TestCaseRecorder(result, this.overriddenDebugMode ?? Logger.globalDebugMode),
+ result,
+ ];
+ }
+
+ asJSON(space?: number): string {
+ return JSON.stringify({ version, results: Array.from(this.results) }, undefined, space);
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/logging/result.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/logging/result.ts
new file mode 100644
index 0000000000..3318e8c937
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/logging/result.ts
@@ -0,0 +1,21 @@
+import { LogMessageWithStack } from './log_message.js';
+
+// MAINTENANCE_TODO: Add warn expectations
+export type Expectation = 'pass' | 'skip' | 'fail';
+
+export type Status = 'notrun' | 'running' | 'warn' | Expectation;
+
+export interface TestCaseResult {
+ status: Status;
+ timems: number;
+}
+
+export interface LiveTestCaseResult extends TestCaseResult {
+ logs?: LogMessageWithStack[];
+}
+
+export interface TransferredTestCaseResult extends TestCaseResult {
+ // When transferred from a worker, a LogMessageWithStack turns into a generic Error
+ // (its prototype gets lost and replaced with Error).
+ logs?: Error[];
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/logging/test_case_recorder.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/logging/test_case_recorder.ts
new file mode 100644
index 0000000000..f5c3252b5c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/logging/test_case_recorder.ts
@@ -0,0 +1,184 @@
+import { SkipTestCase, UnexpectedPassError } from '../../framework/fixture.js';
+import { globalTestConfig } from '../../framework/test_config.js';
+import { now, assert } from '../../util/util.js';
+
+import { LogMessageWithStack } from './log_message.js';
+import { Expectation, LiveTestCaseResult, Status } from './result.js';
+
+enum LogSeverity {
+ NotRun = 0,
+ Skip = 1,
+ Pass = 2,
+ Warn = 3,
+ ExpectFailed = 4,
+ ValidationFailed = 5,
+ ThrewException = 6,
+}
+
+const kMaxLogStacks = 2;
+const kMinSeverityForStack = LogSeverity.Warn;
+
+function logSeverityToString(status: LogSeverity): Status {
+ switch (status) {
+ case LogSeverity.NotRun:
+ return 'notrun';
+ case LogSeverity.Pass:
+ return 'pass';
+ case LogSeverity.Skip:
+ return 'skip';
+ case LogSeverity.Warn:
+ return 'warn';
+ default:
+ return 'fail'; // Everything else is an error
+ }
+}
+
+/** Holds onto a LiveTestCaseResult owned by the Logger, and writes the results into it. */
+export class TestCaseRecorder {
+ readonly result: LiveTestCaseResult;
+ public nonskippedSubcaseCount: number = 0;
+ private inSubCase: boolean = false;
+ private subCaseStatus = LogSeverity.NotRun;
+ private finalCaseStatus = LogSeverity.NotRun;
+ private hideStacksBelowSeverity = kMinSeverityForStack;
+ private startTime = -1;
+ private logs: LogMessageWithStack[] = [];
+ private logLinesAtCurrentSeverity = 0;
+ private debugging = false;
+ /** Used to dedup log messages which have identical stacks. */
+ private messagesForPreviouslySeenStacks = new Map<string, LogMessageWithStack>();
+
+ constructor(result: LiveTestCaseResult, debugging: boolean) {
+ this.result = result;
+ this.debugging = debugging;
+ }
+
+ start(): void {
+ assert(this.startTime < 0, 'TestCaseRecorder cannot be reused');
+ this.startTime = now();
+ }
+
+ finish(): void {
+ // This is a framework error. If this assert is hit, it won't be localized
+ // to a test. The whole test run will fail out.
+ assert(this.startTime >= 0, 'internal error: finish() before start()');
+
+ const timeMilliseconds = now() - this.startTime;
+ // Round to next microsecond to avoid storing useless .xxxx00000000000002 in results.
+ this.result.timems = Math.ceil(timeMilliseconds * 1000) / 1000;
+
+ if (this.finalCaseStatus === LogSeverity.Skip && this.nonskippedSubcaseCount !== 0) {
+ this.threw(new Error('internal error: case is "skip" but has nonskipped subcases'));
+ }
+
+ // Convert numeric enum back to string (but expose 'exception' as 'fail')
+ this.result.status = logSeverityToString(this.finalCaseStatus);
+
+ this.result.logs = this.logs;
+ }
+
+ beginSubCase() {
+ this.subCaseStatus = LogSeverity.NotRun;
+ this.inSubCase = true;
+ }
+
+ endSubCase(expectedStatus: Expectation) {
+ if (this.subCaseStatus !== LogSeverity.Skip) {
+ this.nonskippedSubcaseCount++;
+ }
+ try {
+ if (expectedStatus === 'fail') {
+ if (this.subCaseStatus <= LogSeverity.Warn) {
+ throw new UnexpectedPassError();
+ } else {
+ this.subCaseStatus = LogSeverity.Pass;
+ }
+ }
+ } finally {
+ this.inSubCase = false;
+ this.finalCaseStatus = Math.max(this.finalCaseStatus, this.subCaseStatus);
+ }
+ }
+
+ injectResult(injectedResult: LiveTestCaseResult): void {
+ Object.assign(this.result, injectedResult);
+ }
+
+ debug(ex: Error): void {
+ if (!this.debugging) return;
+ this.logImpl(LogSeverity.Pass, 'DEBUG', ex);
+ }
+
+ info(ex: Error): void {
+ // We need this to use the lowest LogSeverity so it doesn't override the current severity for this test case.
+ this.logImpl(LogSeverity.NotRun, 'INFO', ex);
+ }
+
+ skipped(ex: SkipTestCase): void {
+ this.logImpl(LogSeverity.Skip, 'SKIP', ex);
+ }
+
+ warn(ex: Error): void {
+ this.logImpl(LogSeverity.Warn, 'WARN', ex);
+ }
+
+ expectationFailed(ex: Error): void {
+ this.logImpl(LogSeverity.ExpectFailed, 'EXPECTATION FAILED', ex);
+ }
+
+ validationFailed(ex: Error): void {
+ this.logImpl(LogSeverity.ValidationFailed, 'VALIDATION FAILED', ex);
+ }
+
+ passed(): void {
+ if (this.inSubCase) {
+ this.subCaseStatus = Math.max(this.subCaseStatus, LogSeverity.Pass);
+ } else {
+ this.finalCaseStatus = Math.max(this.finalCaseStatus, LogSeverity.Pass);
+ }
+ }
+
+ threw(ex: unknown): void {
+ if (ex instanceof SkipTestCase) {
+ this.skipped(ex);
+ return;
+ }
+ this.logImpl(LogSeverity.ThrewException, 'EXCEPTION', ex);
+ }
+
+ private logImpl(level: LogSeverity, name: string, baseException: unknown): void {
+ assert(baseException instanceof Error, 'test threw a non-Error object');
+ globalTestConfig.testHeartbeatCallback();
+ const logMessage = new LogMessageWithStack(name, baseException);
+
+ // Final case status should be the "worst" of all log entries.
+ if (this.inSubCase) {
+ this.subCaseStatus = Math.max(this.subCaseStatus, level);
+ } else {
+ this.finalCaseStatus = Math.max(this.finalCaseStatus, level);
+ }
+
+ // setFirstLineOnly for all logs except `kMaxLogStacks` stacks at the highest severity
+ if (level > this.hideStacksBelowSeverity) {
+ this.logLinesAtCurrentSeverity = 0;
+ this.hideStacksBelowSeverity = level;
+
+ // Go back and setFirstLineOnly for everything of a lower log level
+ for (const log of this.logs) {
+ log.setStackHidden('below max severity');
+ }
+ }
+ if (level === this.hideStacksBelowSeverity) {
+ this.logLinesAtCurrentSeverity++;
+ } else if (level < kMinSeverityForStack) {
+ logMessage.setStackHidden('');
+ } else if (level < this.hideStacksBelowSeverity) {
+ logMessage.setStackHidden('below max severity');
+ }
+ if (this.logLinesAtCurrentSeverity > kMaxLogStacks) {
+ logMessage.setStackHidden(`only ${kMaxLogStacks} shown`);
+ }
+
+ this.logs.push(logMessage);
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/params_utils.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/params_utils.ts
new file mode 100644
index 0000000000..6c336b935d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/params_utils.ts
@@ -0,0 +1,138 @@
+import { TestParams } from '../framework/fixture.js';
+import { ResolveType, UnionToIntersection } from '../util/types.js';
+import { assert } from '../util/util.js';
+
+import { comparePublicParamsPaths, Ordering } from './query/compare.js';
+import { kWildcard, kParamSeparator, kParamKVSeparator } from './query/separators.js';
+
+export type JSONWithUndefined =
+ | undefined
+ | null
+ | number
+ | string
+ | boolean
+ | readonly JSONWithUndefined[]
+ // Ideally this would recurse into JSONWithUndefined, but it breaks code.
+ | { readonly [k: string]: unknown };
+export interface TestParamsRW {
+ [k: string]: JSONWithUndefined;
+}
+export type TestParamsIterable = Iterable<TestParams>;
+
+export function paramKeyIsPublic(key: string): boolean {
+ return !key.startsWith('_');
+}
+
+export function extractPublicParams(params: TestParams): TestParams {
+ const publicParams: TestParamsRW = {};
+ for (const k of Object.keys(params)) {
+ if (paramKeyIsPublic(k)) {
+ publicParams[k] = params[k];
+ }
+ }
+ return publicParams;
+}
+
+/** Used to escape reserved characters in URIs */
+const kPercent = '%';
+
+export const badParamValueChars = new RegExp(
+ '[' + kParamKVSeparator + kParamSeparator + kWildcard + kPercent + ']'
+);
+
+export function publicParamsEquals(x: TestParams, y: TestParams): boolean {
+ return comparePublicParamsPaths(x, y) === Ordering.Equal;
+}
+
+export type KeyOfNeverable<T> = T extends never ? never : keyof T;
+export type AllKeysFromUnion<T> = keyof T | KeyOfNeverable<UnionToIntersection<T>>;
+export type KeyOfOr<T, K, Default> = K extends keyof T ? T[K] : Default;
+
+/**
+ * Flatten a union of interfaces into a single interface encoding the same type.
+ *
+ * Flattens a union in such a way that:
+ * `{ a: number, b?: undefined } | { b: string, a?: undefined }`
+ * (which is the value type of `[{ a: 1 }, { b: 1 }]`)
+ * becomes `{ a: number | undefined, b: string | undefined }`.
+ *
+ * And also works for `{ a: number } | { b: string }` which maps to the same.
+ */
+export type FlattenUnionOfInterfaces<T> = {
+ [K in AllKeysFromUnion<T>]: KeyOfOr<
+ T,
+ // If T always has K, just take T[K] (union of C[K] for each component C of T):
+ K,
+ // Otherwise, take the union of C[K] for each component C of T, PLUS undefined:
+ undefined | KeyOfOr<UnionToIntersection<T>, K, void>
+ >;
+};
+
+/* eslint-disable-next-line @typescript-eslint/no-unused-vars */
+function typeAssert<_ extends 'pass'>() {}
+{
+ type Test<T, U> = [T] extends [U]
+ ? [U] extends [T]
+ ? 'pass'
+ : { actual: ResolveType<T>; expected: U }
+ : { actual: ResolveType<T>; expected: U };
+
+ type T01 = { a: number } | { b: string };
+ type T02 = { a: number } | { b?: string };
+ type T03 = { a: number } | { a?: number };
+ type T04 = { a: number } | { a: string };
+ type T05 = { a: number } | { a?: string };
+
+ type T11 = { a: number; b?: undefined } | { a?: undefined; b: string };
+
+ type T21 = { a: number; b?: undefined } | { b: string };
+ type T22 = { a: number; b?: undefined } | { b?: string };
+ type T23 = { a: number; b?: undefined } | { a?: number };
+ type T24 = { a: number; b?: undefined } | { a: string };
+ type T25 = { a: number; b?: undefined } | { a?: string };
+ type T26 = { a: number; b?: undefined } | { a: undefined };
+ type T27 = { a: number; b?: undefined } | { a: undefined; b: undefined };
+
+ /* prettier-ignore */ {
+ typeAssert<Test<FlattenUnionOfInterfaces<T01>, { a: number | undefined; b: string | undefined }>>();
+ typeAssert<Test<FlattenUnionOfInterfaces<T02>, { a: number | undefined; b: string | undefined }>>();
+ typeAssert<Test<FlattenUnionOfInterfaces<T03>, { a: number | undefined }>>();
+ typeAssert<Test<FlattenUnionOfInterfaces<T04>, { a: number | string }>>();
+ typeAssert<Test<FlattenUnionOfInterfaces<T05>, { a: number | string | undefined }>>();
+
+ typeAssert<Test<FlattenUnionOfInterfaces<T11>, { a: number | undefined; b: string | undefined }>>();
+
+ typeAssert<Test<FlattenUnionOfInterfaces<T22>, { a: number | undefined; b: string | undefined }>>();
+ typeAssert<Test<FlattenUnionOfInterfaces<T23>, { a: number | undefined; b: undefined }>>();
+ typeAssert<Test<FlattenUnionOfInterfaces<T24>, { a: number | string; b: undefined }>>();
+ typeAssert<Test<FlattenUnionOfInterfaces<T25>, { a: number | string | undefined; b: undefined }>>();
+ typeAssert<Test<FlattenUnionOfInterfaces<T27>, { a: number | undefined; b: undefined }>>();
+
+ // Unexpected test results - hopefully okay to ignore these
+ typeAssert<Test<FlattenUnionOfInterfaces<T21>, { b: string | undefined }>>();
+ typeAssert<Test<FlattenUnionOfInterfaces<T26>, { a: number | undefined }>>();
+ }
+}
+
+export type Merged<A, B> = MergedFromFlat<A, FlattenUnionOfInterfaces<B>>;
+export type MergedFromFlat<A, B> = {
+ [K in keyof A | keyof B]: K extends keyof B ? B[K] : K extends keyof A ? A[K] : never;
+};
+
+/** Merges two objects into one `{ ...a, ...b }` and return it with a flattened type. */
+export function mergeParams<A extends {}, B extends {}>(a: A, b: B): Merged<A, B> {
+ return { ...a, ...b } as Merged<A, B>;
+}
+
+/**
+ * Merges two objects into one `{ ...a, ...b }` and asserts they had no overlapping keys.
+ * This is slower than {@link mergeParams}.
+ */
+export function mergeParamsChecked<A extends {}, B extends {}>(a: A, b: B): Merged<A, B> {
+ const merged = mergeParams(a, b);
+ assert(
+ Object.keys(merged).length === Object.keys(a).length + Object.keys(b).length,
+ () => `Duplicate key between ${JSON.stringify(a)} and ${JSON.stringify(b)}`
+ );
+ return merged;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/query/compare.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/query/compare.ts
new file mode 100644
index 0000000000..a9419b87c1
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/query/compare.ts
@@ -0,0 +1,95 @@
+import { TestParams } from '../../framework/fixture.js';
+import { assert, objectEquals } from '../../util/util.js';
+import { paramKeyIsPublic } from '../params_utils.js';
+
+import { TestQuery } from './query.js';
+
+export const enum Ordering {
+ Unordered,
+ StrictSuperset,
+ Equal,
+ StrictSubset,
+}
+
+/**
+ * Compares two queries for their ordering (which is used to build the tree).
+ *
+ * See src/unittests/query_compare.spec.ts for examples.
+ */
+export function compareQueries(a: TestQuery, b: TestQuery): Ordering {
+ if (a.suite !== b.suite) {
+ return Ordering.Unordered;
+ }
+
+ const filePathOrdering = comparePaths(a.filePathParts, b.filePathParts);
+ if (filePathOrdering !== Ordering.Equal || a.isMultiFile || b.isMultiFile) {
+ return compareOneLevel(filePathOrdering, a.isMultiFile, b.isMultiFile);
+ }
+ assert('testPathParts' in a && 'testPathParts' in b);
+
+ const testPathOrdering = comparePaths(a.testPathParts, b.testPathParts);
+ if (testPathOrdering !== Ordering.Equal || a.isMultiTest || b.isMultiTest) {
+ return compareOneLevel(testPathOrdering, a.isMultiTest, b.isMultiTest);
+ }
+ assert('params' in a && 'params' in b);
+
+ const paramsPathOrdering = comparePublicParamsPaths(a.params, b.params);
+ if (paramsPathOrdering !== Ordering.Equal || a.isMultiCase || b.isMultiCase) {
+ return compareOneLevel(paramsPathOrdering, a.isMultiCase, b.isMultiCase);
+ }
+ return Ordering.Equal;
+}
+
+/**
+ * Compares a single level of a query.
+ *
+ * "IsBig" means the query is big relative to the level, e.g. for test-level:
+ * - Anything >= `suite:a,*` is big
+ * - Anything <= `suite:a:*` is small
+ */
+function compareOneLevel(ordering: Ordering, aIsBig: boolean, bIsBig: boolean): Ordering {
+ assert(ordering !== Ordering.Equal || aIsBig || bIsBig);
+ if (ordering === Ordering.Unordered) return Ordering.Unordered;
+ if (aIsBig && bIsBig) return ordering;
+ if (!aIsBig && !bIsBig) return Ordering.Unordered; // Equal case is already handled
+ // Exactly one of (a, b) is big.
+ if (aIsBig && ordering !== Ordering.StrictSubset) return Ordering.StrictSuperset;
+ if (bIsBig && ordering !== Ordering.StrictSuperset) return Ordering.StrictSubset;
+ return Ordering.Unordered;
+}
+
+function comparePaths(a: readonly string[], b: readonly string[]): Ordering {
+ const shorter = Math.min(a.length, b.length);
+
+ for (let i = 0; i < shorter; ++i) {
+ if (a[i] !== b[i]) {
+ return Ordering.Unordered;
+ }
+ }
+ if (a.length === b.length) {
+ return Ordering.Equal;
+ } else if (a.length < b.length) {
+ return Ordering.StrictSuperset;
+ } else {
+ return Ordering.StrictSubset;
+ }
+}
+
+export function comparePublicParamsPaths(a: TestParams, b: TestParams): Ordering {
+ const aKeys = Object.keys(a).filter(k => paramKeyIsPublic(k));
+ const commonKeys = new Set(aKeys.filter(k => k in b));
+
+ for (const k of commonKeys) {
+ // Treat +/-0.0 as different query by distinguishing them in objectEquals
+ if (!objectEquals(a[k], b[k], true)) {
+ return Ordering.Unordered;
+ }
+ }
+ const bKeys = Object.keys(b).filter(k => paramKeyIsPublic(k));
+ const aRemainingKeys = aKeys.length - commonKeys.size;
+ const bRemainingKeys = bKeys.length - commonKeys.size;
+ if (aRemainingKeys === 0 && bRemainingKeys === 0) return Ordering.Equal;
+ if (aRemainingKeys === 0) return Ordering.StrictSuperset;
+ if (bRemainingKeys === 0) return Ordering.StrictSubset;
+ return Ordering.Unordered;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/query/encode_selectively.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/query/encode_selectively.ts
new file mode 100644
index 0000000000..ab1997b6e4
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/query/encode_selectively.ts
@@ -0,0 +1,23 @@
+/**
+ * Encodes a stringified TestQuery so that it can be placed in a `?q=` parameter in a URL.
+ *
+ * `encodeURIComponent` encodes in accordance with `application/x-www-form-urlencoded`,
+ * but URLs don't actually have to be as strict as HTML form encoding
+ * (we interpret this purely from JavaScript).
+ * So we encode the component, then selectively convert some %-encoded escape codes
+ * back to their original form for readability/copyability.
+ */
+export function encodeURIComponentSelectively(s: string): string {
+ let ret = encodeURIComponent(s);
+ ret = ret.replace(/%22/g, '"'); // for JSON strings
+ ret = ret.replace(/%2C/g, ','); // for path separator, and JSON arrays
+ ret = ret.replace(/%3A/g, ':'); // for big separator
+ ret = ret.replace(/%3B/g, ';'); // for param separator
+ ret = ret.replace(/%3D/g, '='); // for params (k=v)
+ ret = ret.replace(/%5B/g, '['); // for JSON arrays
+ ret = ret.replace(/%5D/g, ']'); // for JSON arrays
+ ret = ret.replace(/%7B/g, '{'); // for JSON objects
+ ret = ret.replace(/%7D/g, '}'); // for JSON objects
+ ret = ret.replace(/%E2%9C%97/g, '✗'); // for jsUndefinedMagicValue
+ return ret;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/query/json_param_value.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/query/json_param_value.ts
new file mode 100644
index 0000000000..40cc8c7bf6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/query/json_param_value.ts
@@ -0,0 +1,114 @@
+import { assert, sortObjectByKey, isPlainObject } from '../../util/util.js';
+import { JSONWithUndefined } from '../params_utils.js';
+
+// JSON can't represent various values and by default stores them as `null`.
+// Instead, storing them as a magic string values in JSON.
+const jsUndefinedMagicValue = '_undef_';
+const jsNaNMagicValue = '_nan_';
+const jsPositiveInfinityMagicValue = '_posinfinity_';
+const jsNegativeInfinityMagicValue = '_neginfinity_';
+
+// -0 needs to be handled separately, because -0 === +0 returns true. Not
+// special casing +0/0, since it behaves intuitively. Assuming that if -0 is
+// being used, the differentiation from +0 is desired.
+const jsNegativeZeroMagicValue = '_negzero_';
+
+// bigint values are not defined in JSON, so need to wrap them up as strings
+const jsBigIntMagicPattern = /^(\d+)n$/;
+
+const toStringMagicValue = new Map<unknown, string>([
+ [undefined, jsUndefinedMagicValue],
+ [NaN, jsNaNMagicValue],
+ [Number.POSITIVE_INFINITY, jsPositiveInfinityMagicValue],
+ [Number.NEGATIVE_INFINITY, jsNegativeInfinityMagicValue],
+ // No -0 handling because it is special cased.
+]);
+
+const fromStringMagicValue = new Map<string, unknown>([
+ [jsUndefinedMagicValue, undefined],
+ [jsNaNMagicValue, NaN],
+ [jsPositiveInfinityMagicValue, Number.POSITIVE_INFINITY],
+ [jsNegativeInfinityMagicValue, Number.NEGATIVE_INFINITY],
+ // -0 is handled in this direction because there is no comparison issue.
+ [jsNegativeZeroMagicValue, -0],
+]);
+
+function stringifyFilter(_k: string, v: unknown): unknown {
+ // Make sure no one actually uses a magic value as a parameter.
+ if (typeof v === 'string') {
+ assert(
+ !fromStringMagicValue.has(v),
+ `${v} is a magic value for stringification, so cannot be used`
+ );
+
+ assert(
+ v !== jsNegativeZeroMagicValue,
+ `${v} is a magic value for stringification, so cannot be used`
+ );
+
+ assert(
+ v.match(jsBigIntMagicPattern) === null,
+ `${v} matches bigint magic pattern for stringification, so cannot be used`
+ );
+ }
+
+ const isObject = v !== null && typeof v === 'object' && !Array.isArray(v);
+ if (isObject) {
+ assert(
+ isPlainObject(v),
+ `value must be a plain object but it appears to be a '${
+ Object.getPrototypeOf(v).constructor.name
+ }`
+ );
+ }
+ assert(typeof v !== 'function', `${v} can not be a function`);
+
+ if (Object.is(v, -0)) {
+ return jsNegativeZeroMagicValue;
+ }
+
+ if (typeof v === 'bigint') {
+ return `${v}n`;
+ }
+
+ return toStringMagicValue.has(v) ? toStringMagicValue.get(v) : v;
+}
+
+export function stringifyParamValue(value: JSONWithUndefined): string {
+ return JSON.stringify(value, stringifyFilter);
+}
+
+/**
+ * Like stringifyParamValue but sorts dictionaries by key, for hashing.
+ */
+export function stringifyParamValueUniquely(value: JSONWithUndefined): string {
+ return JSON.stringify(value, (k, v) => {
+ if (typeof v === 'object' && v !== null) {
+ return sortObjectByKey(v);
+ }
+
+ return stringifyFilter(k, v);
+ });
+}
+
+// 'any' is part of the JSON.parse reviver interface, so cannot be avoided.
+// eslint-disable-next-line @typescript-eslint/no-explicit-any
+function parseParamValueReviver(_k: string, v: any): any {
+ if (fromStringMagicValue.has(v)) {
+ return fromStringMagicValue.get(v);
+ }
+
+ if (typeof v === 'string') {
+ const match: RegExpMatchArray | null = v.match(jsBigIntMagicPattern);
+ if (match !== null) {
+ // [0] is the entire match, and following entries are the capture groups
+ return BigInt(match[1]);
+ }
+ }
+
+ return v;
+}
+
+export function parseParamValue(s: string): JSONWithUndefined {
+ return JSON.parse(s, parseParamValueReviver);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/query/parseQuery.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/query/parseQuery.ts
new file mode 100644
index 0000000000..996835b0ec
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/query/parseQuery.ts
@@ -0,0 +1,155 @@
+import { assert } from '../../util/util.js';
+import {
+ TestParamsRW,
+ JSONWithUndefined,
+ badParamValueChars,
+ paramKeyIsPublic,
+} from '../params_utils.js';
+
+import { parseParamValue } from './json_param_value.js';
+import {
+ TestQuery,
+ TestQueryMultiFile,
+ TestQueryMultiTest,
+ TestQueryMultiCase,
+ TestQuerySingleCase,
+} from './query.js';
+import { kBigSeparator, kWildcard, kPathSeparator, kParamSeparator } from './separators.js';
+import { validQueryPart } from './validQueryPart.js';
+
+export function parseQuery(s: string): TestQuery {
+ try {
+ return parseQueryImpl(s);
+ } catch (ex) {
+ if (ex instanceof Error) {
+ ex.message += '\n on: ' + s;
+ }
+ throw ex;
+ }
+}
+
+function parseQueryImpl(s: string): TestQuery {
+ // Undo encodeURIComponentSelectively
+ s = decodeURIComponent(s);
+
+ // bigParts are: suite, file, test, params (note kBigSeparator could appear in params)
+ let suite: string;
+ let fileString: string | undefined;
+ let testString: string | undefined;
+ let paramsString: string | undefined;
+ {
+ const i1 = s.indexOf(kBigSeparator);
+ assert(i1 !== -1, `query string must have at least one ${kBigSeparator}`);
+ suite = s.substring(0, i1);
+ const i2 = s.indexOf(kBigSeparator, i1 + 1);
+ if (i2 === -1) {
+ fileString = s.substring(i1 + 1);
+ } else {
+ fileString = s.substring(i1 + 1, i2);
+ const i3 = s.indexOf(kBigSeparator, i2 + 1);
+ if (i3 === -1) {
+ testString = s.substring(i2 + 1);
+ } else {
+ testString = s.substring(i2 + 1, i3);
+ paramsString = s.substring(i3 + 1);
+ }
+ }
+ }
+
+ const { parts: file, wildcard: filePathHasWildcard } = parseBigPart(fileString, kPathSeparator);
+
+ if (testString === undefined) {
+ // Query is file-level
+ assert(
+ filePathHasWildcard,
+ `File-level query without wildcard ${kWildcard}. Did you want a file-level query \
+(append ${kPathSeparator}${kWildcard}) or test-level query (append ${kBigSeparator}${kWildcard})?`
+ );
+ return new TestQueryMultiFile(suite, file);
+ }
+ assert(!filePathHasWildcard, `Wildcard ${kWildcard} must be at the end of the query string`);
+
+ const { parts: test, wildcard: testPathHasWildcard } = parseBigPart(testString, kPathSeparator);
+
+ if (paramsString === undefined) {
+ // Query is test-level
+ assert(
+ testPathHasWildcard,
+ `Test-level query without wildcard ${kWildcard}; did you want a test-level query \
+(append ${kPathSeparator}${kWildcard}) or case-level query (append ${kBigSeparator}${kWildcard})?`
+ );
+ assert(file.length > 0, 'File part of test-level query was empty (::)');
+ return new TestQueryMultiTest(suite, file, test);
+ }
+
+ // Query is case-level
+ assert(!testPathHasWildcard, `Wildcard ${kWildcard} must be at the end of the query string`);
+
+ const { parts: paramsParts, wildcard: paramsHasWildcard } = parseBigPart(
+ paramsString,
+ kParamSeparator
+ );
+
+ assert(test.length > 0, 'Test part of case-level query was empty (::)');
+
+ const params: TestParamsRW = {};
+ for (const paramPart of paramsParts) {
+ const [k, v] = parseSingleParam(paramPart);
+ assert(validQueryPart.test(k), `param key names must match ${validQueryPart}`);
+ params[k] = v;
+ }
+ if (paramsHasWildcard) {
+ return new TestQueryMultiCase(suite, file, test, params);
+ } else {
+ return new TestQuerySingleCase(suite, file, test, params);
+ }
+}
+
+// webgpu:a,b,* or webgpu:a,b,c:*
+const kExampleQueries = `\
+webgpu${kBigSeparator}a${kPathSeparator}b${kPathSeparator}${kWildcard} or \
+webgpu${kBigSeparator}a${kPathSeparator}b${kPathSeparator}c${kBigSeparator}${kWildcard}`;
+
+function parseBigPart(
+ s: string,
+ separator: typeof kParamSeparator | typeof kPathSeparator
+): { parts: string[]; wildcard: boolean } {
+ if (s === '') {
+ return { parts: [], wildcard: false };
+ }
+ const parts = s.split(separator);
+
+ let endsWithWildcard = false;
+ for (const [i, part] of parts.entries()) {
+ if (i === parts.length - 1) {
+ endsWithWildcard = part === kWildcard;
+ }
+ assert(
+ part.indexOf(kWildcard) === -1 || endsWithWildcard,
+ `Wildcard ${kWildcard} must be complete last part of a path (e.g. ${kExampleQueries})`
+ );
+ }
+ if (endsWithWildcard) {
+ // Remove the last element of the array (which is just the wildcard).
+ parts.length = parts.length - 1;
+ }
+ return { parts, wildcard: endsWithWildcard };
+}
+
+function parseSingleParam(paramSubstring: string): [string, JSONWithUndefined] {
+ assert(paramSubstring !== '', 'Param in a query must not be blank (is there a trailing comma?)');
+ const i = paramSubstring.indexOf('=');
+ assert(i !== -1, 'Param in a query must be of form key=value');
+ const k = paramSubstring.substring(0, i);
+ assert(paramKeyIsPublic(k), 'Param in a query must not be private (start with _)');
+ const v = paramSubstring.substring(i + 1);
+ return [k, parseSingleParamValue(v)];
+}
+
+function parseSingleParamValue(s: string): JSONWithUndefined {
+ assert(
+ !badParamValueChars.test(s),
+ `param value must not match ${badParamValueChars} - was ${s}`
+ );
+ return parseParamValue(s);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/query/query.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/query/query.ts
new file mode 100644
index 0000000000..7c72a62f88
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/query/query.ts
@@ -0,0 +1,262 @@
+import { TestParams } from '../../framework/fixture.js';
+import { optionEnabled } from '../../runtime/helper/options.js';
+import { assert, unreachable } from '../../util/util.js';
+import { Expectation } from '../logging/result.js';
+
+import { compareQueries, Ordering } from './compare.js';
+import { encodeURIComponentSelectively } from './encode_selectively.js';
+import { parseQuery } from './parseQuery.js';
+import { kBigSeparator, kPathSeparator, kWildcard } from './separators.js';
+import { stringifyPublicParams } from './stringify_params.js';
+
+/**
+ * Represents a test query of some level.
+ *
+ * TestQuery types are immutable.
+ */
+export type TestQuery =
+ | TestQuerySingleCase
+ | TestQueryMultiCase
+ | TestQueryMultiTest
+ | TestQueryMultiFile;
+
+/**
+ * - 1 = MultiFile.
+ * - 2 = MultiTest.
+ * - 3 = MultiCase.
+ * - 4 = SingleCase.
+ */
+export type TestQueryLevel = 1 | 2 | 3 | 4;
+
+export interface TestQueryWithExpectation {
+ query: TestQuery;
+ expectation: Expectation;
+}
+
+/**
+ * A multi-file test query, like `s:*` or `s:a,b,*`.
+ *
+ * Immutable (makes copies of constructor args).
+ */
+export class TestQueryMultiFile {
+ readonly level: TestQueryLevel = 1;
+ readonly isMultiFile: boolean = true;
+ readonly suite: string;
+ readonly filePathParts: readonly string[];
+
+ constructor(suite: string, file: readonly string[]) {
+ this.suite = suite;
+ this.filePathParts = [...file];
+ }
+
+ get depthInLevel() {
+ return this.filePathParts.length;
+ }
+
+ toString(): string {
+ return encodeURIComponentSelectively(this.toStringHelper().join(kBigSeparator));
+ }
+
+ protected toStringHelper(): string[] {
+ return [this.suite, [...this.filePathParts, kWildcard].join(kPathSeparator)];
+ }
+}
+
+/**
+ * A multi-test test query, like `s:f:*` or `s:f:a,b,*`.
+ *
+ * Immutable (makes copies of constructor args).
+ */
+export class TestQueryMultiTest extends TestQueryMultiFile {
+ override readonly level: TestQueryLevel = 2;
+ override readonly isMultiFile = false as const;
+ readonly isMultiTest: boolean = true;
+ readonly testPathParts: readonly string[];
+
+ constructor(suite: string, file: readonly string[], test: readonly string[]) {
+ super(suite, file);
+ assert(file.length > 0, 'multi-test (or finer) query must have file-path');
+ this.testPathParts = [...test];
+ }
+
+ override get depthInLevel() {
+ return this.testPathParts.length;
+ }
+
+ protected override toStringHelper(): string[] {
+ return [
+ this.suite,
+ this.filePathParts.join(kPathSeparator),
+ [...this.testPathParts, kWildcard].join(kPathSeparator),
+ ];
+ }
+}
+
+/**
+ * A multi-case test query, like `s:f:t:*` or `s:f:t:a,b,*`.
+ *
+ * Immutable (makes copies of constructor args), except for param values
+ * (which aren't normally supposed to change; they're marked readonly in TestParams).
+ */
+export class TestQueryMultiCase extends TestQueryMultiTest {
+ override readonly level: TestQueryLevel = 3;
+ override readonly isMultiTest = false as const;
+ readonly isMultiCase: boolean = true;
+ readonly params: TestParams;
+
+ constructor(suite: string, file: readonly string[], test: readonly string[], params: TestParams) {
+ super(suite, file, test);
+ assert(test.length > 0, 'multi-case (or finer) query must have test-path');
+ this.params = { ...params };
+ }
+
+ override get depthInLevel() {
+ return Object.keys(this.params).length;
+ }
+
+ protected override toStringHelper(): string[] {
+ return [
+ this.suite,
+ this.filePathParts.join(kPathSeparator),
+ this.testPathParts.join(kPathSeparator),
+ stringifyPublicParams(this.params, true),
+ ];
+ }
+}
+
+/**
+ * A multi-case test query, like `s:f:t:` or `s:f:t:a=1,b=1`.
+ *
+ * Immutable (makes copies of constructor args).
+ */
+export class TestQuerySingleCase extends TestQueryMultiCase {
+ override readonly level: TestQueryLevel = 4;
+ override readonly isMultiCase = false as const;
+
+ override get depthInLevel() {
+ return 0;
+ }
+
+ protected override toStringHelper(): string[] {
+ return [
+ this.suite,
+ this.filePathParts.join(kPathSeparator),
+ this.testPathParts.join(kPathSeparator),
+ stringifyPublicParams(this.params),
+ ];
+ }
+}
+
+/**
+ * Parse raw expectations input into TestQueryWithExpectation[], filtering so that only
+ * expectations that are relevant for the provided query and wptURL.
+ *
+ * `rawExpectations` should be @type {{ query: string, expectation: Expectation }[]}
+ *
+ * The `rawExpectations` are parsed and validated that they are in the correct format.
+ * If `wptURL` is passed, the query string should be of the full path format such
+ * as `path/to/cts.https.html?worker=0&q=suite:test_path:test_name:foo=1;bar=2;*`.
+ * If `wptURL` is `undefined`, the query string should be only the query
+ * `suite:test_path:test_name:foo=1;bar=2;*`.
+ */
+export function parseExpectationsForTestQuery(
+ rawExpectations:
+ | unknown
+ | {
+ query: string;
+ expectation: Expectation;
+ }[],
+ query: TestQuery,
+ wptURL?: URL
+) {
+ if (!Array.isArray(rawExpectations)) {
+ unreachable('Expectations should be an array');
+ }
+ const expectations: TestQueryWithExpectation[] = [];
+ for (const entry of rawExpectations) {
+ assert(typeof entry === 'object');
+ const rawExpectation = entry as { query?: string; expectation?: string };
+ assert(rawExpectation.query !== undefined, 'Expectation missing query string');
+ assert(rawExpectation.expectation !== undefined, 'Expectation missing expectation string');
+
+ let expectationQuery: TestQuery;
+ if (wptURL !== undefined) {
+ const expectationURL = new URL(`${wptURL.origin}/${entry.query}`);
+ if (expectationURL.pathname !== wptURL.pathname) {
+ continue;
+ }
+ assert(
+ expectationURL.pathname === wptURL.pathname,
+ `Invalid expectation path ${expectationURL.pathname}
+Expectation should be of the form path/to/cts.https.html?worker=0&q=suite:test_path:test_name:foo=1;bar=2;...
+ `
+ );
+
+ const params = expectationURL.searchParams;
+ if (optionEnabled('worker', params) !== optionEnabled('worker', wptURL.searchParams)) {
+ continue;
+ }
+
+ const qs = params.getAll('q');
+ assert(qs.length === 1, 'currently, there must be exactly one ?q= in the expectation string');
+ expectationQuery = parseQuery(qs[0]);
+ } else {
+ expectationQuery = parseQuery(entry.query);
+ }
+
+ // Strip params from multicase expectations so that an expectation of foo=2;*
+ // is stored if the test query is bar=3;*
+ const queryForFilter =
+ expectationQuery instanceof TestQueryMultiCase
+ ? new TestQueryMultiCase(
+ expectationQuery.suite,
+ expectationQuery.filePathParts,
+ expectationQuery.testPathParts,
+ {}
+ )
+ : expectationQuery;
+
+ if (compareQueries(query, queryForFilter) === Ordering.Unordered) {
+ continue;
+ }
+
+ switch (entry.expectation) {
+ case 'pass':
+ case 'skip':
+ case 'fail':
+ break;
+ default:
+ unreachable(`Invalid expectation ${entry.expectation}`);
+ }
+
+ expectations.push({
+ query: expectationQuery,
+ expectation: entry.expectation,
+ });
+ }
+ return expectations;
+}
+
+/**
+ * For display purposes only, produces a "relative" query string from parent to child.
+ * Used in the wpt runtime to reduce the verbosity of logs.
+ */
+export function relativeQueryString(parent: TestQuery, child: TestQuery): string {
+ const ordering = compareQueries(parent, child);
+ if (ordering === Ordering.Equal) {
+ return '';
+ } else if (ordering === Ordering.StrictSuperset) {
+ const parentString = parent.toString();
+ assert(parentString.endsWith(kWildcard));
+ const childString = child.toString();
+ assert(
+ childString.startsWith(parentString.substring(0, parentString.length - 2)),
+ 'impossible?: childString does not start with parentString[:-2]'
+ );
+ return childString.substring(parentString.length - 2);
+ } else {
+ unreachable(
+ `relativeQueryString arguments have invalid ordering ${ordering}:\n${parent}\n${child}`
+ );
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/query/separators.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/query/separators.ts
new file mode 100644
index 0000000000..0c8f6ea9a9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/query/separators.ts
@@ -0,0 +1,14 @@
+/** Separator between big parts: suite:file:test:case */
+export const kBigSeparator = ':';
+
+/** Separator between path,to,file or path,to,test */
+export const kPathSeparator = ',';
+
+/** Separator between k=v;k=v */
+export const kParamSeparator = ';';
+
+/** Separator between key and value in k=v */
+export const kParamKVSeparator = '=';
+
+/** Final wildcard, if query is not single-case */
+export const kWildcard = '*';
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/query/stringify_params.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/query/stringify_params.ts
new file mode 100644
index 0000000000..907cc0791a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/query/stringify_params.ts
@@ -0,0 +1,44 @@
+import { TestParams } from '../../framework/fixture.js';
+import { assert } from '../../util/util.js';
+import { JSONWithUndefined, badParamValueChars, paramKeyIsPublic } from '../params_utils.js';
+
+import { stringifyParamValue, stringifyParamValueUniquely } from './json_param_value.js';
+import { kParamKVSeparator, kParamSeparator, kWildcard } from './separators.js';
+
+export function stringifyPublicParams(p: TestParams, addWildcard = false): string {
+ const parts = Object.keys(p)
+ .filter(k => paramKeyIsPublic(k))
+ .map(k => stringifySingleParam(k, p[k]));
+
+ if (addWildcard) parts.push(kWildcard);
+
+ return parts.join(kParamSeparator);
+}
+
+/**
+ * An _approximately_ unique string representing a CaseParams value.
+ */
+export function stringifyPublicParamsUniquely(p: TestParams): string {
+ const keys = Object.keys(p).sort();
+ return keys
+ .filter(k => paramKeyIsPublic(k))
+ .map(k => stringifySingleParamUniquely(k, p[k]))
+ .join(kParamSeparator);
+}
+
+export function stringifySingleParam(k: string, v: JSONWithUndefined) {
+ return `${k}${kParamKVSeparator}${stringifySingleParamValue(v)}`;
+}
+
+function stringifySingleParamUniquely(k: string, v: JSONWithUndefined) {
+ return `${k}${kParamKVSeparator}${stringifyParamValueUniquely(v)}`;
+}
+
+function stringifySingleParamValue(v: JSONWithUndefined): string {
+ const s = stringifyParamValue(v);
+ assert(
+ !badParamValueChars.test(s),
+ `JSON.stringified param value must not match ${badParamValueChars} - was ${s}`
+ );
+ return s;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/query/validQueryPart.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/query/validQueryPart.ts
new file mode 100644
index 0000000000..62184adb62
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/query/validQueryPart.ts
@@ -0,0 +1,2 @@
+/** Applies to group parts, test parts, params keys. */
+export const validQueryPart = /^[a-zA-Z0-9_]+$/;
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/stack.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/stack.ts
new file mode 100644
index 0000000000..5de54088c8
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/stack.ts
@@ -0,0 +1,82 @@
+// Returns the stack trace of an Error, but without the extra boilerplate at the bottom
+// (e.g. RunCaseSpecific, processTicksAndRejections, etc.), for logging.
+export function extractImportantStackTrace(e: Error): string {
+ let stack = e.stack;
+ if (!stack) {
+ return '';
+ }
+ const redundantMessage = 'Error: ' + e.message + '\n';
+ if (stack.startsWith(redundantMessage)) {
+ stack = stack.substring(redundantMessage.length);
+ }
+
+ const lines = stack.split('\n');
+ for (let i = lines.length - 1; i >= 0; --i) {
+ const line = lines[i];
+ if (line.indexOf('.spec.') !== -1) {
+ return lines.slice(0, i + 1).join('\n');
+ }
+ }
+ return stack;
+}
+
+// *** Examples ***
+//
+// Node fail()
+// > Error:
+// > at CaseRecorder.fail (/Users/kainino/src/cts/src/common/framework/logger.ts:99:30)
+// > at RunCaseSpecific.exports.g.test.t [as fn] (/Users/kainino/src/cts/src/unittests/logger.spec.ts:80:7)
+// x at RunCaseSpecific.run (/Users/kainino/src/cts/src/common/framework/test_group.ts:121:18)
+// x at processTicksAndRejections (internal/process/task_queues.js:86:5)
+//
+// Node throw
+// > Error: hello
+// > at RunCaseSpecific.g.test.t [as fn] (/Users/kainino/src/cts/src/unittests/test_group.spec.ts:51:11)
+// x at RunCaseSpecific.run (/Users/kainino/src/cts/src/common/framework/test_group.ts:121:18)
+// x at processTicksAndRejections (internal/process/task_queues.js:86:5)
+//
+// Firefox fail()
+// > fail@http://localhost:8080/out/framework/logger.js:104:30
+// > expect@http://localhost:8080/out/framework/default_fixture.js:59:16
+// > @http://localhost:8080/out/unittests/util.spec.js:35:5
+// x run@http://localhost:8080/out/framework/test_group.js:119:18
+//
+// Firefox throw
+// > @http://localhost:8080/out/unittests/test_group.spec.js:48:11
+// x run@http://localhost:8080/out/framework/test_group.js:119:18
+//
+// Safari fail()
+// > fail@http://localhost:8080/out/framework/logger.js:104:39
+// > expect@http://localhost:8080/out/framework/default_fixture.js:59:20
+// > http://localhost:8080/out/unittests/util.spec.js:35:11
+// x http://localhost:8080/out/framework/test_group.js:119:20
+// x asyncFunctionResume@[native code]
+// x [native code]
+// x promiseReactionJob@[native code]
+//
+// Safari throw
+// > http://localhost:8080/out/unittests/test_group.spec.js:48:20
+// x http://localhost:8080/out/framework/test_group.js:119:20
+// x asyncFunctionResume@[native code]
+// x [native code]
+// x promiseReactionJob@[native code]
+//
+// Chrome fail()
+// x Error
+// x at CaseRecorder.fail (http://localhost:8080/out/framework/logger.js:104:30)
+// x at DefaultFixture.expect (http://localhost:8080/out/framework/default_fixture.js:59:16)
+// > at RunCaseSpecific.fn (http://localhost:8080/out/unittests/util.spec.js:35:5)
+// x at RunCaseSpecific.run (http://localhost:8080/out/framework/test_group.js:119:18)
+// x at async runCase (http://localhost:8080/out/runtime/standalone.js:37:17)
+// x at async http://localhost:8080/out/runtime/standalone.js:102:7
+//
+// Chrome throw
+// x Error: hello
+// > at RunCaseSpecific.fn (http://localhost:8080/out/unittests/test_group.spec.js:48:11)
+// x at RunCaseSpecific.run (http://localhost:8080/out/framework/test_group.js:119:18)"
+// x at async Promise.all (index 0)
+// x at async TestGroupTest.run (http://localhost:8080/out/unittests/test_group_test.js:6:5)
+// x at async RunCaseSpecific.fn (http://localhost:8080/out/unittests/test_group.spec.js:53:15)
+// x at async RunCaseSpecific.run (http://localhost:8080/out/framework/test_group.js:119:7)
+// x at async runCase (http://localhost:8080/out/runtime/standalone.js:37:17)
+// x at async http://localhost:8080/out/runtime/standalone.js:102:7
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/test_group.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/test_group.ts
new file mode 100644
index 0000000000..632a822ef1
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/test_group.ts
@@ -0,0 +1,754 @@
+import {
+ Fixture,
+ SubcaseBatchState,
+ SkipTestCase,
+ TestParams,
+ UnexpectedPassError,
+ SubcaseBatchStateFromFixture,
+ FixtureClass,
+} from '../framework/fixture.js';
+import {
+ CaseParamsBuilder,
+ builderIterateCasesWithSubcases,
+ kUnitCaseParamsBuilder,
+ ParamsBuilderBase,
+ SubcaseParamsBuilder,
+} from '../framework/params_builder.js';
+import { globalTestConfig } from '../framework/test_config.js';
+import { Expectation } from '../internal/logging/result.js';
+import { TestCaseRecorder } from '../internal/logging/test_case_recorder.js';
+import { extractPublicParams, Merged, mergeParams } from '../internal/params_utils.js';
+import { compareQueries, Ordering } from '../internal/query/compare.js';
+import {
+ TestQueryMultiFile,
+ TestQueryMultiTest,
+ TestQuerySingleCase,
+ TestQueryWithExpectation,
+} from '../internal/query/query.js';
+import { kPathSeparator } from '../internal/query/separators.js';
+import {
+ stringifyPublicParams,
+ stringifyPublicParamsUniquely,
+} from '../internal/query/stringify_params.js';
+import { validQueryPart } from '../internal/query/validQueryPart.js';
+import { DeepReadonly } from '../util/types.js';
+import { assert, unreachable } from '../util/util.js';
+
+import { logToWebsocket } from './websocket_logger.js';
+
+export type RunFn = (
+ rec: TestCaseRecorder,
+ expectations?: TestQueryWithExpectation[]
+) => Promise<void>;
+
+export interface TestCaseID {
+ readonly test: readonly string[];
+ readonly params: TestParams;
+}
+
+export interface RunCase {
+ readonly id: TestCaseID;
+ readonly isUnimplemented: boolean;
+ computeSubcaseCount(): number;
+ run(
+ rec: TestCaseRecorder,
+ selfQuery: TestQuerySingleCase,
+ expectations: TestQueryWithExpectation[]
+ ): Promise<void>;
+}
+
+// Interface for defining tests
+export interface TestGroupBuilder<F extends Fixture> {
+ test(name: string): TestBuilderWithName<F>;
+}
+export function makeTestGroup<F extends Fixture>(fixture: FixtureClass<F>): TestGroupBuilder<F> {
+ return new TestGroup(fixture as unknown as FixtureClass);
+}
+
+// Interfaces for running tests
+export interface IterableTestGroup {
+ iterate(): Iterable<IterableTest>;
+ validate(fileQuery: TestQueryMultiFile): void;
+ /** Returns the file-relative test paths of tests which have >0 cases. */
+ collectNonEmptyTests(): { testPath: string[] }[];
+}
+export interface IterableTest {
+ testPath: string[];
+ description: string | undefined;
+ readonly testCreationStack: Error;
+ iterate(caseFilter: TestParams | null): Iterable<RunCase>;
+}
+
+export function makeTestGroupForUnitTesting<F extends Fixture>(
+ fixture: FixtureClass<F>
+): TestGroup<F> {
+ return new TestGroup(fixture);
+}
+
+/** The maximum allowed length of a test query string. Checked by tools/validate. */
+export const kQueryMaxLength = 375;
+
+/** Parameter name for batch number (see also TestBuilder.batch). */
+const kBatchParamName = 'batch__';
+
+type TestFn<F extends Fixture, P extends {}> = (
+ t: F & { params: DeepReadonly<P> }
+) => Promise<void> | void;
+type BeforeAllSubcasesFn<S extends SubcaseBatchState, P extends {}> = (
+ s: S & { params: DeepReadonly<P> }
+) => Promise<void> | void;
+
+export class TestGroup<F extends Fixture> implements TestGroupBuilder<F> {
+ private fixture: FixtureClass;
+ private seen: Set<string> = new Set();
+ private tests: Array<TestBuilder<SubcaseBatchStateFromFixture<F>, F>> = [];
+
+ constructor(fixture: FixtureClass) {
+ this.fixture = fixture;
+ }
+
+ iterate(): Iterable<IterableTest> {
+ return this.tests;
+ }
+
+ private checkName(name: string): void {
+ assert(
+ // Shouldn't happen due to the rule above. Just makes sure that treating
+ // unencoded strings as encoded strings is OK.
+ name === decodeURIComponent(name),
+ `Not decodeURIComponent-idempotent: ${name} !== ${decodeURIComponent(name)}`
+ );
+ assert(!this.seen.has(name), `Duplicate test name: ${name}`);
+
+ this.seen.add(name);
+ }
+
+ test(name: string): TestBuilderWithName<F> {
+ const testCreationStack = new Error(`Test created: ${name}`);
+
+ this.checkName(name);
+
+ const parts = name.split(kPathSeparator);
+ for (const p of parts) {
+ assert(validQueryPart.test(p), `Invalid test name part ${p}; must match ${validQueryPart}`);
+ }
+
+ const test = new TestBuilder(parts, this.fixture, testCreationStack);
+ this.tests.push(test);
+ return test as unknown as TestBuilderWithName<F>;
+ }
+
+ validate(fileQuery: TestQueryMultiFile): void {
+ for (const test of this.tests) {
+ const testQuery = new TestQueryMultiTest(
+ fileQuery.suite,
+ fileQuery.filePathParts,
+ test.testPath
+ );
+ test.validate(testQuery);
+ }
+ }
+
+ collectNonEmptyTests(): { testPath: string[] }[] {
+ const testPaths = [];
+ for (const test of this.tests) {
+ if (test.computeCaseCount() > 0) {
+ testPaths.push({ testPath: test.testPath });
+ }
+ }
+ return testPaths;
+ }
+}
+
+interface TestBuilderWithName<F extends Fixture> extends TestBuilderWithParams<F, {}, {}> {
+ desc(description: string): this;
+ /**
+ * A noop function to associate a test with the relevant part of the specification.
+ *
+ * @param url a link to the spec where test is extracted from.
+ */
+ specURL(url: string): this;
+ /**
+ * Parameterize the test, generating multiple cases, each possibly having subcases.
+ *
+ * The `unit` value passed to the `cases` callback is an immutable constant
+ * `CaseParamsBuilder<{}>` representing the "unit" builder `[ {} ]`,
+ * provided for convenience. The non-callback overload can be used if `unit` is not needed.
+ */
+ params<CaseP extends {}, SubcaseP extends {}>(
+ cases: (unit: CaseParamsBuilder<{}>) => ParamsBuilderBase<CaseP, SubcaseP>
+ ): TestBuilderWithParams<F, CaseP, SubcaseP>;
+ /**
+ * Parameterize the test, generating multiple cases, each possibly having subcases.
+ *
+ * Use the callback overload of this method if a "unit" builder is needed.
+ */
+ params<CaseP extends {}, SubcaseP extends {}>(
+ cases: ParamsBuilderBase<CaseP, SubcaseP>
+ ): TestBuilderWithParams<F, CaseP, SubcaseP>;
+
+ /**
+ * Parameterize the test, generating multiple cases, without subcases.
+ */
+ paramsSimple<P extends {}>(cases: Iterable<P>): TestBuilderWithParams<F, P, {}>;
+
+ /**
+ * Parameterize the test, generating one case with multiple subcases.
+ */
+ paramsSubcasesOnly<P extends {}>(subcases: Iterable<P>): TestBuilderWithParams<F, {}, P>;
+ /**
+ * Parameterize the test, generating one case with multiple subcases.
+ *
+ * The `unit` value passed to the `subcases` callback is an immutable constant
+ * `SubcaseParamsBuilder<{}>`, with one empty case `{}` and one empty subcase `{}`.
+ */
+ paramsSubcasesOnly<P extends {}>(
+ subcases: (unit: SubcaseParamsBuilder<{}, {}>) => SubcaseParamsBuilder<{}, P>
+ ): TestBuilderWithParams<F, {}, P>;
+}
+
+interface TestBuilderWithParams<F extends Fixture, CaseP extends {}, SubcaseP extends {}> {
+ /**
+ * Limit subcases to a maximum number of per testcase.
+ * @param b the maximum number of subcases per testcase.
+ *
+ * If the number of subcases exceeds `b`, add an internal
+ * numeric, incrementing `batch__` param to split subcases
+ * into groups of at most `b` subcases.
+ */
+ batch(b: number): this;
+ /**
+ * Run a function on shared subcase batch state before each
+ * batch of subcases.
+ * @param fn the function to run. It is called with the test
+ * fixture's shared subcase batch state.
+ *
+ * Generally, this function should be careful to avoid mutating
+ * any state on the shared subcase batch state which could result
+ * in unexpected order-dependent test behavior.
+ */
+ beforeAllSubcases(fn: BeforeAllSubcasesFn<SubcaseBatchStateFromFixture<F>, CaseP>): this;
+ /**
+ * Set the test function.
+ * @param fn the test function.
+ */
+ fn(fn: TestFn<F, Merged<CaseP, SubcaseP>>): void;
+ /**
+ * Mark the test as unimplemented.
+ */
+ unimplemented(): void;
+}
+
+class TestBuilder<S extends SubcaseBatchState, F extends Fixture> {
+ readonly testPath: string[];
+ isUnimplemented: boolean;
+ description: string | undefined;
+ readonly testCreationStack: Error;
+
+ private readonly fixture: FixtureClass;
+ private testFn: TestFn<Fixture, {}> | undefined;
+ private beforeFn: BeforeAllSubcasesFn<SubcaseBatchState, {}> | undefined;
+ private testCases?: ParamsBuilderBase<{}, {}> = undefined;
+ private batchSize: number = 0;
+
+ constructor(testPath: string[], fixture: FixtureClass, testCreationStack: Error) {
+ this.testPath = testPath;
+ this.isUnimplemented = false;
+ this.fixture = fixture;
+ this.testCreationStack = testCreationStack;
+ }
+
+ desc(description: string): this {
+ this.description = description.trim();
+ return this;
+ }
+
+ specURL(_url: string): this {
+ return this;
+ }
+
+ beforeAllSubcases(fn: BeforeAllSubcasesFn<SubcaseBatchState, {}>): this {
+ assert(this.beforeFn === undefined);
+ this.beforeFn = fn;
+ return this;
+ }
+
+ fn(fn: TestFn<Fixture, {}>): void {
+ // eslint-disable-next-line no-warning-comments
+ // MAINTENANCE_TODO: add "TODO" if there's no description? (and make sure it only ends up on
+ // actual tests, not on test parents in the tree, which is what happens if you do it here, not
+ // sure why)
+ assert(this.testFn === undefined);
+ this.testFn = fn;
+ }
+
+ batch(b: number): this {
+ this.batchSize = b;
+ return this;
+ }
+
+ unimplemented(): void {
+ assert(this.testFn === undefined);
+
+ this.description =
+ (this.description ? this.description + '\n\n' : '') + 'TODO: .unimplemented()';
+ this.isUnimplemented = true;
+
+ this.testFn = () => {
+ throw new SkipTestCase('test unimplemented');
+ };
+ }
+
+ /** Perform various validation/"lint" chenks. */
+ validate(testQuery: TestQueryMultiTest): void {
+ const testPathString = this.testPath.join(kPathSeparator);
+ assert(this.testFn !== undefined, () => {
+ let s = `Test is missing .fn(): ${testPathString}`;
+ if (this.testCreationStack.stack) {
+ s += `\n-> test created at:\n${this.testCreationStack.stack}`;
+ }
+ return s;
+ });
+
+ assert(
+ testQuery.toString().length <= kQueryMaxLength,
+ () =>
+ `Test query ${testQuery} is too long. Max length is ${kQueryMaxLength} characters. Please shorten names or reduce parameters.`
+ );
+
+ if (this.testCases === undefined) {
+ return;
+ }
+
+ const seen = new Set<string>();
+ for (const [caseParams, subcases] of builderIterateCasesWithSubcases(this.testCases, null)) {
+ const caseQuery = new TestQuerySingleCase(
+ testQuery.suite,
+ testQuery.filePathParts,
+ testQuery.testPathParts,
+ caseParams
+ ).toString();
+ assert(
+ caseQuery.length <= kQueryMaxLength,
+ () =>
+ `Case query ${caseQuery} is too long. Max length is ${kQueryMaxLength} characters. Please shorten names or reduce parameters.`
+ );
+
+ for (const subcaseParams of subcases ?? [{}]) {
+ const params = mergeParams(caseParams, subcaseParams);
+ assert(this.batchSize === 0 || !(kBatchParamName in params));
+
+ // stringifyPublicParams also checks for invalid params values
+ let testcaseString;
+ try {
+ testcaseString = stringifyPublicParams(params);
+ } catch (e) {
+ throw new Error(`${e}: ${testPathString}`);
+ }
+
+ // A (hopefully) unique representation of a params value.
+ const testcaseStringUnique = stringifyPublicParamsUniquely(params);
+ assert(
+ !seen.has(testcaseStringUnique),
+ `Duplicate public test case+subcase params for test ${testPathString}: ${testcaseString}`
+ );
+ seen.add(testcaseStringUnique);
+ }
+ }
+ }
+
+ computeCaseCount(): number {
+ if (this.testCases === undefined) {
+ return 1;
+ }
+
+ let caseCount = 0;
+ for (const [_caseParams, _subcases] of builderIterateCasesWithSubcases(this.testCases, null)) {
+ caseCount++;
+ }
+ return caseCount;
+ }
+
+ params(
+ cases: ((unit: CaseParamsBuilder<{}>) => ParamsBuilderBase<{}, {}>) | ParamsBuilderBase<{}, {}>
+ ): TestBuilder<S, F> {
+ assert(this.testCases === undefined, 'test case is already parameterized');
+ if (cases instanceof Function) {
+ this.testCases = cases(kUnitCaseParamsBuilder);
+ } else {
+ this.testCases = cases;
+ }
+ return this;
+ }
+
+ paramsSimple(cases: Iterable<{}>): TestBuilder<S, F> {
+ assert(this.testCases === undefined, 'test case is already parameterized');
+ this.testCases = kUnitCaseParamsBuilder.combineWithParams(cases);
+ return this;
+ }
+
+ paramsSubcasesOnly(
+ subcases: Iterable<{}> | ((unit: SubcaseParamsBuilder<{}, {}>) => SubcaseParamsBuilder<{}, {}>)
+ ): TestBuilder<S, F> {
+ if (subcases instanceof Function) {
+ return this.params(subcases(kUnitCaseParamsBuilder.beginSubcases()));
+ } else {
+ return this.params(kUnitCaseParamsBuilder.beginSubcases().combineWithParams(subcases));
+ }
+ }
+
+ private makeCaseSpecific(params: {}, subcases: Iterable<{}> | undefined) {
+ assert(this.testFn !== undefined, 'No test function (.fn()) for test');
+ return new RunCaseSpecific(
+ this.testPath,
+ params,
+ this.isUnimplemented,
+ subcases,
+ this.fixture,
+ this.testFn,
+ this.beforeFn,
+ this.testCreationStack
+ );
+ }
+
+ *iterate(caseFilter: TestParams | null): IterableIterator<RunCase> {
+ this.testCases ??= kUnitCaseParamsBuilder;
+
+ // Remove the batch__ from the caseFilter because the params builder doesn't
+ // know about it (we don't add it until later in this function).
+ let filterToBatch: number | undefined;
+ const caseFilterWithoutBatch = caseFilter ? { ...caseFilter } : null;
+ if (caseFilterWithoutBatch && kBatchParamName in caseFilterWithoutBatch) {
+ const batchParam = caseFilterWithoutBatch[kBatchParamName];
+ assert(typeof batchParam === 'number');
+ filterToBatch = batchParam;
+ delete caseFilterWithoutBatch[kBatchParamName];
+ }
+
+ for (const [caseParams, subcases] of builderIterateCasesWithSubcases(
+ this.testCases,
+ caseFilterWithoutBatch
+ )) {
+ // If batches are not used, yield just one case.
+ if (this.batchSize === 0 || subcases === undefined) {
+ yield this.makeCaseSpecific(caseParams, subcases);
+ continue;
+ }
+
+ // Same if there ends up being only one batch.
+ const subcaseArray = Array.from(subcases);
+ if (subcaseArray.length <= this.batchSize) {
+ yield this.makeCaseSpecific(caseParams, subcaseArray);
+ continue;
+ }
+
+ // There are multiple batches. Helper function for this case:
+ const makeCaseForBatch = (batch: number) => {
+ const sliceStart = batch * this.batchSize;
+ return this.makeCaseSpecific(
+ { ...caseParams, [kBatchParamName]: batch },
+ subcaseArray.slice(sliceStart, Math.min(subcaseArray.length, sliceStart + this.batchSize))
+ );
+ };
+
+ // If we filter to just one batch, yield it.
+ if (filterToBatch !== undefined) {
+ yield makeCaseForBatch(filterToBatch);
+ continue;
+ }
+
+ // Finally, if not, yield all of the batches.
+ for (let batch = 0; batch * this.batchSize < subcaseArray.length; ++batch) {
+ yield makeCaseForBatch(batch);
+ }
+ }
+ }
+}
+
+class RunCaseSpecific implements RunCase {
+ readonly id: TestCaseID;
+ readonly isUnimplemented: boolean;
+
+ private readonly params: {};
+ private readonly subcases: Iterable<{}> | undefined;
+ private readonly fixture: FixtureClass;
+ private readonly fn: TestFn<Fixture, {}>;
+ private readonly beforeFn?: BeforeAllSubcasesFn<SubcaseBatchState, {}>;
+ private readonly testCreationStack: Error;
+
+ constructor(
+ testPath: string[],
+ params: {},
+ isUnimplemented: boolean,
+ subcases: Iterable<{}> | undefined,
+ fixture: FixtureClass,
+ fn: TestFn<Fixture, {}>,
+ beforeFn: BeforeAllSubcasesFn<SubcaseBatchState, {}> | undefined,
+ testCreationStack: Error
+ ) {
+ this.id = { test: testPath, params: extractPublicParams(params) };
+ this.isUnimplemented = isUnimplemented;
+ this.params = params;
+ this.subcases = subcases;
+ this.fixture = fixture;
+ this.fn = fn;
+ this.beforeFn = beforeFn;
+ this.testCreationStack = testCreationStack;
+ }
+
+ computeSubcaseCount(): number {
+ if (this.subcases) {
+ let count = 0;
+ for (const _subcase of this.subcases) {
+ count++;
+ }
+ return count;
+ } else {
+ return 1;
+ }
+ }
+
+ async runTest(
+ rec: TestCaseRecorder,
+ sharedState: SubcaseBatchState,
+ params: TestParams,
+ throwSkip: boolean,
+ expectedStatus: Expectation
+ ): Promise<void> {
+ try {
+ rec.beginSubCase();
+ if (expectedStatus === 'skip') {
+ throw new SkipTestCase('Skipped by expectations');
+ }
+
+ const inst = new this.fixture(sharedState, rec, params);
+ try {
+ await inst.init();
+ await this.fn(inst as Fixture & { params: {} });
+ rec.passed();
+ } finally {
+ // Runs as long as constructor succeeded, even if initialization or the test failed.
+ await inst.finalize();
+ }
+ } catch (ex) {
+ // There was an exception from constructor, init, test, or finalize.
+ // An error from init or test may have been a SkipTestCase.
+ // An error from finalize may have been an eventualAsyncExpectation failure
+ // or unexpected validation/OOM error from the GPUDevice.
+ rec.threw(ex);
+ if (throwSkip && ex instanceof SkipTestCase) {
+ throw ex;
+ }
+ } finally {
+ try {
+ rec.endSubCase(expectedStatus);
+ } catch (ex) {
+ assert(ex instanceof UnexpectedPassError);
+ ex.message = `Testcase passed unexpectedly.`;
+ ex.stack = this.testCreationStack.stack;
+ rec.warn(ex);
+ }
+ }
+ }
+
+ async run(
+ rec: TestCaseRecorder,
+ selfQuery: TestQuerySingleCase,
+ expectations: TestQueryWithExpectation[]
+ ): Promise<void> {
+ const getExpectedStatus = (selfQueryWithSubParams: TestQuerySingleCase) => {
+ let didSeeFail = false;
+ for (const exp of expectations) {
+ const ordering = compareQueries(exp.query, selfQueryWithSubParams);
+ if (ordering === Ordering.Unordered || ordering === Ordering.StrictSubset) {
+ continue;
+ }
+
+ switch (exp.expectation) {
+ // Skip takes precedence. If there is any expectation indicating a skip,
+ // signal it immediately.
+ case 'skip':
+ return 'skip';
+ case 'fail':
+ // Otherwise, indicate that we might expect a failure.
+ didSeeFail = true;
+ break;
+ default:
+ unreachable();
+ }
+ }
+ return didSeeFail ? 'fail' : 'pass';
+ };
+
+ const { testHeartbeatCallback, maxSubcasesInFlight } = globalTestConfig;
+ try {
+ rec.start();
+ const sharedState = this.fixture.MakeSharedState(rec, this.params);
+ try {
+ await sharedState.init();
+ if (this.beforeFn) {
+ await this.beforeFn(sharedState);
+ }
+ await sharedState.postInit();
+ testHeartbeatCallback();
+
+ let allPreviousSubcasesFinalizedPromise: Promise<void> = Promise.resolve();
+ if (this.subcases) {
+ let totalCount = 0;
+ let skipCount = 0;
+
+ // If there are too many subcases in flight, starting the next subcase will register
+ // `resolvePromiseBlockingSubcase` and wait until `subcaseFinishedCallback` is called.
+ let subcasesInFlight = 0;
+ let resolvePromiseBlockingSubcase: (() => void) | undefined = undefined;
+ const subcaseFinishedCallback = () => {
+ subcasesInFlight -= 1;
+ // If there is any subcase waiting on a previous subcase to finish,
+ // unblock it now, and clear the resolve callback.
+ if (resolvePromiseBlockingSubcase) {
+ resolvePromiseBlockingSubcase();
+ resolvePromiseBlockingSubcase = undefined;
+ }
+ };
+
+ for (const subParams of this.subcases) {
+ // Make a recorder that will defer all calls until `allPreviousSubcasesFinalizedPromise`
+ // resolves. Waiting on `allPreviousSubcasesFinalizedPromise` ensures that
+ // logs from all the previous subcases have been flushed before flushing new logs.
+ const subcasePrefix = 'subcase: ' + stringifyPublicParams(subParams);
+ const subRec = new Proxy(rec, {
+ get: (target, k: keyof TestCaseRecorder) => {
+ const prop = TestCaseRecorder.prototype[k];
+ if (typeof prop === 'function') {
+ testHeartbeatCallback();
+ return function (...args: Parameters<typeof prop>) {
+ void allPreviousSubcasesFinalizedPromise.then(() => {
+ // Prepend the subcase name to all error messages.
+ for (const arg of args) {
+ if (arg instanceof Error) {
+ try {
+ arg.message = subcasePrefix + '\n' + arg.message;
+ } catch {
+ // If that fails (e.g. on DOMException), try to put it in the stack:
+ let stack = subcasePrefix;
+ if (arg.stack) stack += '\n' + arg.stack;
+ try {
+ arg.stack = stack;
+ } catch {
+ // If that fails too, just silence it.
+ }
+ }
+ }
+ }
+
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ const rv = (prop as any).apply(target, args);
+ // Because this proxy executes functions in a deferred manner,
+ // it should never be used for functions that need to return a value.
+ assert(rv === undefined);
+ });
+ };
+ }
+ return prop;
+ },
+ });
+
+ const params = mergeParams(this.params, subParams);
+ const subcaseQuery = new TestQuerySingleCase(
+ selfQuery.suite,
+ selfQuery.filePathParts,
+ selfQuery.testPathParts,
+ params
+ );
+
+ // Limit the maximum number of subcases in flight.
+ if (subcasesInFlight >= maxSubcasesInFlight) {
+ await new Promise<void>(resolve => {
+ // There should only be one subcase waiting at a time.
+ assert(resolvePromiseBlockingSubcase === undefined);
+ resolvePromiseBlockingSubcase = resolve;
+ });
+ }
+
+ subcasesInFlight += 1;
+ // Runs async without waiting so that subsequent subcases can start.
+ // All finalization steps will be waited on at the end of the testcase.
+ const finalizePromise = this.runTest(
+ subRec,
+ sharedState,
+ params,
+ /* throwSkip */ true,
+ getExpectedStatus(subcaseQuery)
+ )
+ .then(() => {
+ subRec.info(new Error('OK'));
+ })
+ .catch(ex => {
+ if (ex instanceof SkipTestCase) {
+ // Convert SkipTestCase to info messages
+ ex.message = 'subcase skipped: ' + ex.message;
+ subRec.info(ex);
+ ++skipCount;
+ } else {
+ // Since we are catching all error inside runTest(), this should never happen
+ subRec.threw(ex);
+ }
+ })
+ .finally(subcaseFinishedCallback);
+
+ allPreviousSubcasesFinalizedPromise = allPreviousSubcasesFinalizedPromise.then(
+ () => finalizePromise
+ );
+ ++totalCount;
+ }
+
+ // Wait for all subcases to finalize and report their results.
+ await allPreviousSubcasesFinalizedPromise;
+
+ if (skipCount === totalCount) {
+ rec.skipped(new SkipTestCase('all subcases were skipped'));
+ }
+ } else {
+ await this.runTest(
+ rec,
+ sharedState,
+ this.params,
+ /* throwSkip */ false,
+ getExpectedStatus(selfQuery)
+ );
+ }
+ } finally {
+ testHeartbeatCallback();
+ // Runs as long as the shared state constructor succeeded, even if initialization or a test failed.
+ await sharedState.finalize();
+ testHeartbeatCallback();
+ }
+ } catch (ex) {
+ // There was an exception from sharedState/fixture constructor, init, beforeFn, or test.
+ // An error from beforeFn may have been SkipTestCase.
+ // An error from finalize may have been an eventualAsyncExpectation failure
+ // or unexpected validation/OOM error from the GPUDevice.
+ rec.threw(ex);
+ } finally {
+ rec.finish();
+
+ const msg: CaseTimingLogLine = {
+ q: selfQuery.toString(),
+ timems: rec.result.timems,
+ nonskippedSubcaseCount: rec.nonskippedSubcaseCount,
+ };
+ logToWebsocket(JSON.stringify(msg));
+ }
+ }
+}
+
+export type CaseTimingLogLine = {
+ q: string;
+ /** Total time it took to execute the case. */
+ timems: number;
+ /**
+ * Number of subcases that ran in the case (excluding skipped subcases, so
+ * they don't dilute the average per-subcase time.
+ */
+ nonskippedSubcaseCount: number;
+};
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/test_suite_listing.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/test_suite_listing.ts
new file mode 100644
index 0000000000..2d2b555366
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/test_suite_listing.ts
@@ -0,0 +1,15 @@
+// A listing of all specs within a single suite. This is the (awaited) type of
+// `groups` in '{cts,unittests}/listing.ts' and `listing` in the auto-generated
+// 'out/{cts,unittests}/listing.js' files (see tools/gen_listings).
+export type TestSuiteListing = TestSuiteListingEntry[];
+
+export type TestSuiteListingEntry = TestSuiteListingEntrySpec | TestSuiteListingEntryReadme;
+
+interface TestSuiteListingEntrySpec {
+ readonly file: string[];
+}
+
+interface TestSuiteListingEntryReadme {
+ readonly file: string[];
+ readonly readme: string;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/tree.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/tree.ts
new file mode 100644
index 0000000000..594837059c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/tree.ts
@@ -0,0 +1,671 @@
+import { loadMetadataForSuite, TestMetadataListing } from '../framework/metadata.js';
+import { globalTestConfig } from '../framework/test_config.js';
+import { RunCase, RunFn } from '../internal/test_group.js';
+import { assert, now } from '../util/util.js';
+
+import { TestFileLoader } from './file_loader.js';
+import { TestParamsRW } from './params_utils.js';
+import { comparePublicParamsPaths, compareQueries, Ordering } from './query/compare.js';
+import {
+ TestQuery,
+ TestQueryMultiCase,
+ TestQuerySingleCase,
+ TestQueryMultiFile,
+ TestQueryMultiTest,
+} from './query/query.js';
+import { kBigSeparator, kWildcard, kPathSeparator, kParamSeparator } from './query/separators.js';
+import { stringifySingleParam } from './query/stringify_params.js';
+import { StacklessError } from './util.js';
+
+// `loadTreeForQuery()` loads a TestTree for a given queryToLoad.
+// The resulting tree is a linked-list all the way from `suite:*` to queryToLoad,
+// and under queryToLoad is a tree containing every case matched by queryToLoad.
+//
+// `subqueriesToExpand` influences the `collapsible` flag on nodes in the resulting tree.
+// A node is considered "collapsible" if none of the subqueriesToExpand is a StrictSubset
+// of that node.
+//
+// In WebKit/Blink-style web_tests, an expectation file marks individual cts.https.html "variants
+// as "Failure", "Crash", etc. By passing in the list of expectations as the subqueriesToExpand,
+// we can programmatically subdivide the cts.https.html "variants" list to be able to implement
+// arbitrarily-fine suppressions (instead of having to suppress entire test files, which would
+// lose a lot of coverage).
+//
+// `iterateCollapsedNodes()` produces the list of queries for the variants list.
+//
+// Though somewhat complicated, this system has important benefits:
+// - Avoids having to suppress entire test files, which would cause large test coverage loss.
+// - Minimizes the number of page loads needed for fine-grained suppressions.
+// (In the naive case, we could do one page load per test case - but the test suite would
+// take impossibly long to run.)
+// - Enables developers to put any number of tests in one file as appropriate, without worrying
+// about expectation granularity.
+
+interface TestTreeNodeBase<T extends TestQuery> {
+ readonly query: T;
+ /**
+ * Readable "relative" name for display in standalone runner.
+ * Not always the exact relative name, because sometimes there isn't
+ * one (e.g. s:f:* relative to s:f,*), but something that is readable.
+ */
+ readonly readableRelativeName: string;
+ subtreeCounts?: { tests: number; nodesWithTODO: number; totalTimeMS: number };
+ subcaseCount?: number;
+}
+
+export interface TestSubtree<T extends TestQuery = TestQuery> extends TestTreeNodeBase<T> {
+ readonly children: Map<string, TestTreeNode>;
+ collapsible: boolean;
+ description?: string;
+ readonly testCreationStack?: Error;
+}
+
+export interface TestTreeLeaf extends TestTreeNodeBase<TestQuerySingleCase> {
+ readonly run: RunFn;
+ readonly isUnimplemented?: boolean;
+ subtreeCounts?: undefined;
+ subcaseCount: number;
+}
+
+export type TestTreeNode = TestSubtree | TestTreeLeaf;
+
+/**
+ * When iterating through "collapsed" tree nodes, indicates how many "query levels" to traverse
+ * through before starting to collapse nodes.
+ *
+ * Corresponds with TestQueryLevel, but excludes 4 (SingleCase):
+ * - 1 = MultiFile. Expands so every file is in the collapsed tree.
+ * - 2 = MultiTest. Expands so every test is in the collapsed tree.
+ * - 3 = MultiCase. Expands so every case is in the collapsed tree (i.e. collapsing disabled).
+ */
+export type ExpandThroughLevel = 1 | 2 | 3;
+
+export class TestTree {
+ /**
+ * The `queryToLoad` that this test tree was created for.
+ * Test trees are always rooted at `suite:*`, but they only contain nodes that fit
+ * within `forQuery`.
+ *
+ * This is used for `iterateCollapsedNodes` which only starts collapsing at the next
+ * `TestQueryLevel` after `forQuery`.
+ */
+ readonly forQuery: TestQuery;
+ readonly root: TestSubtree;
+
+ private constructor(forQuery: TestQuery, root: TestSubtree) {
+ this.forQuery = forQuery;
+ this.root = root;
+ assert(
+ root.query.level === 1 && root.query.depthInLevel === 0,
+ 'TestTree root must be the root (suite:*)'
+ );
+ }
+
+ static async create(
+ forQuery: TestQuery,
+ root: TestSubtree,
+ maxChunkTime: number
+ ): Promise<TestTree> {
+ const suite = forQuery.suite;
+
+ let chunking = undefined;
+ if (Number.isFinite(maxChunkTime)) {
+ const metadata = loadMetadataForSuite(`./src/${suite}`);
+ assert(metadata !== null, `metadata for ${suite} is missing, but maxChunkTime was requested`);
+ chunking = { metadata, maxChunkTime };
+ }
+ await TestTree.propagateCounts(root, chunking);
+
+ return new TestTree(forQuery, root);
+ }
+
+ /**
+ * Iterate through the leaves of a version of the tree which has been pruned to exclude
+ * subtrees which:
+ * - are at a deeper `TestQueryLevel` than `this.forQuery`, and
+ * - were not a `Ordering.StrictSubset` of any of the `subqueriesToExpand` during tree creation.
+ */
+ iterateCollapsedNodes({
+ includeIntermediateNodes = false,
+ includeEmptySubtrees = false,
+ alwaysExpandThroughLevel,
+ }: {
+ /** Whether to include intermediate tree nodes or only collapsed-leaves. */
+ includeIntermediateNodes?: boolean;
+ /** Whether to include collapsed-leaves with no children. */
+ includeEmptySubtrees?: boolean;
+ /** Never collapse nodes up through this level. */
+ alwaysExpandThroughLevel: ExpandThroughLevel;
+ }): IterableIterator<Readonly<TestTreeNode>> {
+ const expandThroughLevel = Math.max(this.forQuery.level, alwaysExpandThroughLevel);
+ return TestTree.iterateSubtreeNodes(this.root, {
+ includeIntermediateNodes,
+ includeEmptySubtrees,
+ expandThroughLevel,
+ });
+ }
+
+ iterateLeaves(): IterableIterator<Readonly<TestTreeLeaf>> {
+ return TestTree.iterateSubtreeLeaves(this.root);
+ }
+
+ /**
+ * Dissolve nodes which have only one child, e.g.:
+ * a,* { a,b,* { a,b:* { ... } } }
+ * collapses down into:
+ * a,* { a,b:* { ... } }
+ * which is less needlessly verbose when displaying the tree in the standalone runner.
+ */
+ dissolveSingleChildTrees(): void {
+ const newRoot = dissolveSingleChildTrees(this.root);
+ assert(newRoot === this.root);
+ }
+
+ toString(): string {
+ return TestTree.subtreeToString('(root)', this.root, '');
+ }
+
+ static *iterateSubtreeNodes(
+ subtree: TestSubtree,
+ opts: {
+ includeIntermediateNodes: boolean;
+ includeEmptySubtrees: boolean;
+ expandThroughLevel: number;
+ }
+ ): IterableIterator<TestTreeNode> {
+ if (opts.includeIntermediateNodes) {
+ yield subtree;
+ }
+
+ for (const [, child] of subtree.children) {
+ if ('children' in child) {
+ // Is a subtree
+ const collapsible = child.collapsible && child.query.level > opts.expandThroughLevel;
+ if (child.children.size > 0 && !collapsible) {
+ yield* TestTree.iterateSubtreeNodes(child, opts);
+ } else if (child.children.size > 0 || opts.includeEmptySubtrees) {
+ // Don't yield empty subtrees (e.g. files with no tests) unless includeEmptySubtrees
+ yield child;
+ }
+ } else {
+ // Is a leaf
+ yield child;
+ }
+ }
+ }
+
+ static *iterateSubtreeLeaves(subtree: TestSubtree): IterableIterator<TestTreeLeaf> {
+ for (const [, child] of subtree.children) {
+ if ('children' in child) {
+ yield* TestTree.iterateSubtreeLeaves(child);
+ } else {
+ yield child;
+ }
+ }
+ }
+
+ /** Propagate the subtreeTODOs/subtreeTests state upward from leaves to parent nodes. */
+ static async propagateCounts(
+ subtree: TestSubtree,
+ chunking: { metadata: TestMetadataListing; maxChunkTime: number } | undefined
+ ): Promise<{ tests: number; nodesWithTODO: number; totalTimeMS: number; subcaseCount: number }> {
+ subtree.subtreeCounts ??= { tests: 0, nodesWithTODO: 0, totalTimeMS: 0 };
+ subtree.subcaseCount = 0;
+ for (const [, child] of subtree.children) {
+ if ('children' in child) {
+ const counts = await TestTree.propagateCounts(child, chunking);
+ subtree.subtreeCounts.tests += counts.tests;
+ subtree.subtreeCounts.nodesWithTODO += counts.nodesWithTODO;
+ subtree.subtreeCounts.totalTimeMS += counts.totalTimeMS;
+ subtree.subcaseCount += counts.subcaseCount;
+ } else {
+ subtree.subcaseCount = child.subcaseCount;
+ }
+ }
+
+ // If we're chunking based on a maxChunkTime, then at each
+ // TestQueryMultiCase node of the tree we look at its total time. If the
+ // total time is larger than the maxChunkTime, we set collapsible=false to
+ // make sure it gets split up in the output. Note:
+ // - TestQueryMultiTest and higher nodes are never set to collapsible anyway, so we ignore them.
+ // - TestQuerySingleCase nodes can't be collapsed, so we ignore them.
+ if (chunking && subtree.query instanceof TestQueryMultiCase) {
+ const testLevelQuery = new TestQueryMultiCase(
+ subtree.query.suite,
+ subtree.query.filePathParts,
+ subtree.query.testPathParts,
+ {}
+ ).toString();
+
+ const metadata = chunking.metadata;
+
+ const subcaseTiming: number | undefined = metadata[testLevelQuery]?.subcaseMS;
+ if (subcaseTiming !== undefined) {
+ const totalTiming = subcaseTiming * subtree.subcaseCount;
+ subtree.subtreeCounts.totalTimeMS = totalTiming;
+ if (totalTiming > chunking.maxChunkTime) {
+ subtree.collapsible = false;
+ }
+ }
+ }
+
+ return { ...subtree.subtreeCounts, subcaseCount: subtree.subcaseCount ?? 0 };
+ }
+
+ /** Displays counts in the format `(Nodes with TODOs) / (Total test count)`. */
+ static countsToString(tree: TestTreeNode): string {
+ if (tree.subtreeCounts) {
+ return `${tree.subtreeCounts.nodesWithTODO} / ${tree.subtreeCounts.tests}`;
+ } else {
+ return '';
+ }
+ }
+
+ static subtreeToString(name: string, tree: TestTreeNode, indent: string): string {
+ const collapsible = 'run' in tree ? '>' : tree.collapsible ? '+' : '-';
+ let s =
+ indent +
+ `${collapsible} ${TestTree.countsToString(tree)} ${JSON.stringify(name)} => ${tree.query}`;
+ if ('children' in tree) {
+ if (tree.description !== undefined) {
+ s += `\n${indent} | ${JSON.stringify(tree.description)}`;
+ }
+
+ for (const [name, child] of tree.children) {
+ s += '\n' + TestTree.subtreeToString(name, child, indent + ' ');
+ }
+ }
+ return s;
+ }
+}
+
+// MAINTENANCE_TODO: Consider having subqueriesToExpand actually impact the depth-order of params
+// in the tree.
+export async function loadTreeForQuery(
+ loader: TestFileLoader,
+ queryToLoad: TestQuery,
+ {
+ subqueriesToExpand,
+ maxChunkTime = Infinity,
+ }: { subqueriesToExpand: TestQuery[]; maxChunkTime?: number }
+): Promise<TestTree> {
+ const suite = queryToLoad.suite;
+ const specs = await loader.listing(suite);
+
+ const subqueriesToExpandEntries = Array.from(subqueriesToExpand.entries());
+ const seenSubqueriesToExpand: boolean[] = new Array(subqueriesToExpand.length);
+ seenSubqueriesToExpand.fill(false);
+
+ const isCollapsible = (subquery: TestQuery) =>
+ subqueriesToExpandEntries.every(([i, toExpand]) => {
+ const ordering = compareQueries(toExpand, subquery);
+
+ // If toExpand == subquery, no expansion is needed (but it's still "seen").
+ if (ordering === Ordering.Equal) seenSubqueriesToExpand[i] = true;
+ return ordering !== Ordering.StrictSubset;
+ });
+
+ // L0 = suite-level, e.g. suite:*
+ // L1 = file-level, e.g. suite:a,b:*
+ // L2 = test-level, e.g. suite:a,b:c,d:*
+ // L3 = case-level, e.g. suite:a,b:c,d:
+ let foundCase = false;
+ // L0 is suite:*
+ const subtreeL0 = makeTreeForSuite(suite, isCollapsible);
+
+ const imports_start = now();
+ const pEntriesWithImports = []; // Promise<entry with importedSpec>[]
+ for (const entry of specs) {
+ if (entry.file.length === 0 && 'readme' in entry) {
+ // Suite-level readme.
+ setSubtreeDescriptionAndCountTODOs(subtreeL0, entry.readme);
+ continue;
+ }
+
+ {
+ const queryL1 = new TestQueryMultiFile(suite, entry.file);
+ const orderingL1 = compareQueries(queryL1, queryToLoad);
+ if (orderingL1 === Ordering.Unordered) {
+ // File path is not matched by this query.
+ continue;
+ }
+ }
+
+ // We're going to be fetching+importing a bunch of things, so do it in async.
+ const pEntryWithImport = (async () => {
+ if ('readme' in entry) {
+ return entry;
+ } else {
+ return {
+ ...entry,
+ importedSpec: await loader.importSpecFile(queryToLoad.suite, entry.file),
+ };
+ }
+ })();
+
+ const kForceSerialImporting = false;
+ if (kForceSerialImporting) {
+ await pEntryWithImport;
+ }
+ pEntriesWithImports.push(pEntryWithImport);
+ }
+
+ const entriesWithImports = await Promise.all(pEntriesWithImports);
+ if (globalTestConfig.frameworkDebugLog) {
+ const imported_time = performance.now() - imports_start;
+ globalTestConfig.frameworkDebugLog(
+ `Imported importedSpecFiles[${entriesWithImports.length}] in ${imported_time}ms.`
+ );
+ }
+
+ for (const entry of entriesWithImports) {
+ if ('readme' in entry) {
+ // Entry is a README that is an ancestor or descendant of the query.
+ // (It's included for display in the standalone runner.)
+
+ // readmeSubtree is suite:a,b,*
+ // (This is always going to dedup with a file path, if there are any test spec files under
+ // the directory that has the README).
+ const readmeSubtree: TestSubtree<TestQueryMultiFile> = addSubtreeForDirPath(
+ subtreeL0,
+ entry.file,
+ isCollapsible
+ );
+ setSubtreeDescriptionAndCountTODOs(readmeSubtree, entry.readme);
+ continue;
+ }
+
+ // Entry is a spec file.
+ const spec = entry.importedSpec;
+ // subtreeL1 is suite:a,b:*
+ const subtreeL1: TestSubtree<TestQueryMultiTest> = addSubtreeForFilePath(
+ subtreeL0,
+ entry.file,
+ isCollapsible
+ );
+ setSubtreeDescriptionAndCountTODOs(subtreeL1, spec.description);
+
+ let groupHasTests = false;
+ for (const t of spec.g.iterate()) {
+ groupHasTests = true;
+ {
+ const queryL2 = new TestQueryMultiCase(suite, entry.file, t.testPath, {});
+ const orderingL2 = compareQueries(queryL2, queryToLoad);
+ if (orderingL2 === Ordering.Unordered) {
+ // Test path is not matched by this query.
+ continue;
+ }
+ }
+
+ // subtreeL2 is suite:a,b:c,d:*
+ const subtreeL2: TestSubtree<TestQueryMultiCase> = addSubtreeForTestPath(
+ subtreeL1,
+ t.testPath,
+ t.testCreationStack,
+ isCollapsible
+ );
+ // This is 1 test. Set tests=1 then count TODOs.
+ subtreeL2.subtreeCounts ??= { tests: 1, nodesWithTODO: 0, totalTimeMS: 0 };
+ if (t.description) setSubtreeDescriptionAndCountTODOs(subtreeL2, t.description);
+
+ let caseFilter = null;
+ if ('params' in queryToLoad) {
+ caseFilter = queryToLoad.params;
+ }
+
+ // MAINTENANCE_TODO: If tree generation gets too slow, avoid actually iterating the cases in a
+ // file if there's no need to (based on the subqueriesToExpand).
+ for (const c of t.iterate(caseFilter)) {
+ // iterate() guarantees c's query is equal to or a subset of queryToLoad.
+
+ if (queryToLoad instanceof TestQuerySingleCase) {
+ // A subset is OK if it's TestQueryMultiCase, but for SingleCase it must match exactly.
+ const ordering = comparePublicParamsPaths(c.id.params, queryToLoad.params);
+ if (ordering !== Ordering.Equal) {
+ continue;
+ }
+ }
+
+ // Leaf for case is suite:a,b:c,d:x=1;y=2
+ addLeafForCase(subtreeL2, c, isCollapsible);
+ foundCase = true;
+ }
+ }
+ if (!groupHasTests && !subtreeL1.subtreeCounts) {
+ throw new StacklessError(
+ `${subtreeL1.query} has no tests - it must have "TODO" in its description`
+ );
+ }
+ }
+
+ for (const [i, sq] of subqueriesToExpandEntries) {
+ const subquerySeen = seenSubqueriesToExpand[i];
+ if (!subquerySeen) {
+ throw new StacklessError(
+ `subqueriesToExpand entry did not match anything \
+(could be wrong, or could be redundant with a previous subquery):\n ${sq.toString()}`
+ );
+ }
+ }
+ assert(foundCase, `Query \`${queryToLoad.toString()}\` does not match any cases`);
+
+ return TestTree.create(queryToLoad, subtreeL0, maxChunkTime);
+}
+
+function setSubtreeDescriptionAndCountTODOs(
+ subtree: TestSubtree<TestQueryMultiFile>,
+ description: string
+) {
+ assert(subtree.description === undefined);
+ subtree.description = description.trim();
+ subtree.subtreeCounts ??= { tests: 0, nodesWithTODO: 0, totalTimeMS: 0 };
+ if (subtree.description.indexOf('TODO') !== -1) {
+ subtree.subtreeCounts.nodesWithTODO++;
+ }
+}
+
+function makeTreeForSuite(
+ suite: string,
+ isCollapsible: (sq: TestQuery) => boolean
+): TestSubtree<TestQueryMultiFile> {
+ const query = new TestQueryMultiFile(suite, []);
+ return {
+ readableRelativeName: suite + kBigSeparator,
+ query,
+ children: new Map(),
+ collapsible: isCollapsible(query),
+ };
+}
+
+function addSubtreeForDirPath(
+ tree: TestSubtree<TestQueryMultiFile>,
+ file: string[],
+ isCollapsible: (sq: TestQuery) => boolean
+): TestSubtree<TestQueryMultiFile> {
+ const subqueryFile: string[] = [];
+ // To start, tree is suite:*
+ // This loop goes from that -> suite:a,* -> suite:a,b,*
+ for (const part of file) {
+ subqueryFile.push(part);
+ tree = getOrInsertSubtree(part, tree, () => {
+ const query = new TestQueryMultiFile(tree.query.suite, subqueryFile);
+ return {
+ readableRelativeName: part + kPathSeparator + kWildcard,
+ query,
+ collapsible: isCollapsible(query),
+ };
+ });
+ }
+ return tree;
+}
+
+function addSubtreeForFilePath(
+ tree: TestSubtree<TestQueryMultiFile>,
+ file: string[],
+ isCollapsible: (sq: TestQuery) => boolean
+): TestSubtree<TestQueryMultiTest> {
+ // To start, tree is suite:*
+ // This goes from that -> suite:a,* -> suite:a,b,*
+ tree = addSubtreeForDirPath(tree, file, isCollapsible);
+ // This goes from that -> suite:a,b:*
+ const subtree = getOrInsertSubtree('', tree, () => {
+ const query = new TestQueryMultiTest(tree.query.suite, tree.query.filePathParts, []);
+ assert(file.length > 0, 'file path is empty');
+ return {
+ readableRelativeName: file[file.length - 1] + kBigSeparator + kWildcard,
+ query,
+ collapsible: isCollapsible(query),
+ };
+ });
+ return subtree;
+}
+
+function addSubtreeForTestPath(
+ tree: TestSubtree<TestQueryMultiTest>,
+ test: readonly string[],
+ testCreationStack: Error,
+ isCollapsible: (sq: TestQuery) => boolean
+): TestSubtree<TestQueryMultiCase> {
+ const subqueryTest: string[] = [];
+ // To start, tree is suite:a,b:*
+ // This loop goes from that -> suite:a,b:c,* -> suite:a,b:c,d,*
+ for (const part of test) {
+ subqueryTest.push(part);
+ tree = getOrInsertSubtree(part, tree, () => {
+ const query = new TestQueryMultiTest(
+ tree.query.suite,
+ tree.query.filePathParts,
+ subqueryTest
+ );
+ return {
+ readableRelativeName: part + kPathSeparator + kWildcard,
+ query,
+ collapsible: isCollapsible(query),
+ };
+ });
+ }
+ // This goes from that -> suite:a,b:c,d:*
+ return getOrInsertSubtree('', tree, () => {
+ const query = new TestQueryMultiCase(
+ tree.query.suite,
+ tree.query.filePathParts,
+ subqueryTest,
+ {}
+ );
+ assert(subqueryTest.length > 0, 'subqueryTest is empty');
+ return {
+ readableRelativeName: subqueryTest[subqueryTest.length - 1] + kBigSeparator + kWildcard,
+ kWildcard,
+ query,
+ testCreationStack,
+ collapsible: isCollapsible(query),
+ };
+ });
+}
+
+function addLeafForCase(
+ tree: TestSubtree<TestQueryMultiTest>,
+ t: RunCase,
+ checkCollapsible: (sq: TestQuery) => boolean
+): void {
+ const query = tree.query;
+ let name: string = '';
+ const subqueryParams: TestParamsRW = {};
+
+ // To start, tree is suite:a,b:c,d:*
+ // This loop goes from that -> suite:a,b:c,d:x=1;* -> suite:a,b:c,d:x=1;y=2;*
+ for (const [k, v] of Object.entries(t.id.params)) {
+ name = stringifySingleParam(k, v);
+ subqueryParams[k] = v;
+
+ tree = getOrInsertSubtree(name, tree, () => {
+ const subquery = new TestQueryMultiCase(
+ query.suite,
+ query.filePathParts,
+ query.testPathParts,
+ subqueryParams
+ );
+ return {
+ readableRelativeName: name + kParamSeparator + kWildcard,
+ query: subquery,
+ collapsible: checkCollapsible(subquery),
+ };
+ });
+ }
+
+ // This goes from that -> suite:a,b:c,d:x=1;y=2
+ const subquery = new TestQuerySingleCase(
+ query.suite,
+ query.filePathParts,
+ query.testPathParts,
+ subqueryParams
+ );
+ checkCollapsible(subquery); // mark seenSubqueriesToExpand
+ insertLeaf(tree, subquery, t);
+}
+
+function getOrInsertSubtree<T extends TestQuery>(
+ key: string,
+ parent: TestSubtree,
+ createSubtree: () => Omit<TestSubtree<T>, 'children'>
+): TestSubtree<T> {
+ let v: TestSubtree<T>;
+ const child = parent.children.get(key);
+ if (child !== undefined) {
+ assert('children' in child); // Make sure cached subtree is not actually a leaf
+ v = child as TestSubtree<T>;
+ } else {
+ v = { ...createSubtree(), children: new Map() };
+ parent.children.set(key, v);
+ }
+ return v;
+}
+
+function insertLeaf(parent: TestSubtree, query: TestQuerySingleCase, t: RunCase) {
+ const leaf: TestTreeLeaf = {
+ readableRelativeName: readableNameForCase(query),
+ query,
+ run: (rec, expectations) => t.run(rec, query, expectations || []),
+ isUnimplemented: t.isUnimplemented,
+ subcaseCount: t.computeSubcaseCount(),
+ };
+
+ // This is a leaf (e.g. s:f:t:x=1;* -> s:f:t:x=1). The key is always ''.
+ const key = '';
+ assert(!parent.children.has(key), `Duplicate testcase: ${query}`);
+ parent.children.set(key, leaf);
+}
+
+function dissolveSingleChildTrees(tree: TestTreeNode): TestTreeNode {
+ if ('children' in tree) {
+ const shouldDissolveThisTree =
+ tree.children.size === 1 && tree.query.depthInLevel !== 0 && tree.description === undefined;
+ if (shouldDissolveThisTree) {
+ // Loops exactly once
+ for (const [, child] of tree.children) {
+ // Recurse on child
+ return dissolveSingleChildTrees(child);
+ }
+ }
+
+ for (const [k, child] of tree.children) {
+ // Recurse on each child
+ const newChild = dissolveSingleChildTrees(child);
+ if (newChild !== child) {
+ tree.children.set(k, newChild);
+ }
+ }
+ }
+ return tree;
+}
+
+/** Generate a readable relative name for a case (used in standalone). */
+function readableNameForCase(query: TestQuerySingleCase): string {
+ const paramsKeys = Object.keys(query.params);
+ if (paramsKeys.length === 0) {
+ return query.testPathParts[query.testPathParts.length - 1] + kBigSeparator;
+ } else {
+ const lastKey = paramsKeys[paramsKeys.length - 1];
+ return stringifySingleParam(lastKey, query.params[lastKey]);
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/util.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/util.ts
new file mode 100644
index 0000000000..37a5db3568
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/util.ts
@@ -0,0 +1,10 @@
+/**
+ * Error without a stack, which can be used to fatally exit from `tool/` scripts with a
+ * user-friendly message (and no confusing stack).
+ */
+export class StacklessError extends Error {
+ constructor(message: string) {
+ super(message);
+ this.stack = undefined;
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/version.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/version.ts
new file mode 100644
index 0000000000..53cc97482e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/version.ts
@@ -0,0 +1 @@
+export const version = 'unknown';
diff --git a/dom/webgpu/tests/cts/checkout/src/common/internal/websocket_logger.ts b/dom/webgpu/tests/cts/checkout/src/common/internal/websocket_logger.ts
new file mode 100644
index 0000000000..30246df843
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/internal/websocket_logger.ts
@@ -0,0 +1,52 @@
+/**
+ * - 'uninitialized' means we haven't tried to connect yet
+ * - Promise means it's pending
+ * - 'failed' means it failed (this is the most common case, where the logger isn't running)
+ * - WebSocket means it succeeded
+ */
+let connection: Promise<WebSocket | 'failed'> | WebSocket | 'failed' | 'uninitialized' =
+ 'uninitialized';
+
+/**
+ * Log a string to a websocket at `localhost:59497`. See `tools/websocket-logger`.
+ *
+ * This does nothing if a connection couldn't be established on the first call.
+ */
+export function logToWebsocket(msg: string) {
+ if (connection === 'failed') {
+ return;
+ }
+
+ if (connection === 'uninitialized') {
+ connection = new Promise(resolve => {
+ if (typeof WebSocket === 'undefined') {
+ resolve('failed');
+ return;
+ }
+
+ const ws = new WebSocket('ws://localhost:59497/optional_cts_websocket_logger');
+ ws.onopen = () => {
+ resolve(ws);
+ };
+ ws.onerror = () => {
+ connection = 'failed';
+ resolve('failed');
+ };
+ ws.onclose = () => {
+ connection = 'failed';
+ resolve('failed');
+ };
+ });
+ void connection.then(resolved => {
+ connection = resolved;
+ });
+ }
+
+ void (async () => {
+ // connection may be a promise or a value here. Either is OK to await.
+ const ws = await connection;
+ if (ws !== 'failed') {
+ ws.send(msg);
+ }
+ })();
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/runtime/cmdline.ts b/dom/webgpu/tests/cts/checkout/src/common/runtime/cmdline.ts
new file mode 100644
index 0000000000..44a73fb38b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/runtime/cmdline.ts
@@ -0,0 +1,286 @@
+/* eslint no-console: "off" */
+
+import * as fs from 'fs';
+
+import { dataCache } from '../framework/data_cache.js';
+import { globalTestConfig } from '../framework/test_config.js';
+import { DefaultTestFileLoader } from '../internal/file_loader.js';
+import { prettyPrintLog } from '../internal/logging/log_message.js';
+import { Logger } from '../internal/logging/logger.js';
+import { LiveTestCaseResult } from '../internal/logging/result.js';
+import { parseQuery } from '../internal/query/parseQuery.js';
+import { parseExpectationsForTestQuery } from '../internal/query/query.js';
+import { Colors } from '../util/colors.js';
+import { setDefaultRequestAdapterOptions, setGPUProvider } from '../util/navigator_gpu.js';
+import { assert, unreachable } from '../util/util.js';
+
+import sys from './helper/sys.js';
+
+function usage(rc: number): never {
+ console.log(`Usage:
+ tools/run_${sys.type} [OPTIONS...] QUERIES...
+ tools/run_${sys.type} 'unittests:*' 'webgpu:buffers,*'
+Options:
+ --colors Enable ANSI colors in output.
+ --compat Runs tests in compatibility mode.
+ --coverage Emit coverage data.
+ --verbose Print result/log of every test as it runs.
+ --list Print all testcase names that match the given query and exit.
+ --debug Include debug messages in logging.
+ --print-json Print the complete result JSON in the output.
+ --expectations Path to expectations file.
+ --gpu-provider Path to node module that provides the GPU implementation.
+ --gpu-provider-flag Flag to set on the gpu-provider as <flag>=<value>
+ --unroll-const-eval-loops Unrolls loops in constant-evaluation shader execution tests
+ --quiet Suppress summary information in output
+`);
+ return sys.exit(rc);
+}
+
+// The interface that exposes creation of the GPU, and optional interface to code coverage.
+interface GPUProviderModule {
+ // @returns a GPU with the given flags
+ create(flags: string[]): GPU;
+ // An optional interface to a CodeCoverageProvider
+ coverage?: CodeCoverageProvider;
+}
+
+interface CodeCoverageProvider {
+ // Starts collecting code coverage
+ begin(): void;
+ // Ends collecting of code coverage, returning the coverage data.
+ // This data is opaque (implementation defined).
+ end(): string;
+}
+
+type listModes = 'none' | 'cases' | 'unimplemented';
+
+Colors.enabled = false;
+
+let verbose = false;
+let emitCoverage = false;
+let listMode: listModes = 'none';
+let debug = false;
+let printJSON = false;
+let quiet = false;
+let loadWebGPUExpectations: Promise<unknown> | undefined = undefined;
+let gpuProviderModule: GPUProviderModule | undefined = undefined;
+let dataPath: string | undefined = undefined;
+
+const queries: string[] = [];
+const gpuProviderFlags: string[] = [];
+for (let i = 0; i < sys.args.length; ++i) {
+ const a = sys.args[i];
+ if (a.startsWith('-')) {
+ if (a === '--colors') {
+ Colors.enabled = true;
+ } else if (a === '--coverage') {
+ emitCoverage = true;
+ } else if (a === '--verbose') {
+ verbose = true;
+ } else if (a === '--list') {
+ listMode = 'cases';
+ } else if (a === '--list-unimplemented') {
+ listMode = 'unimplemented';
+ } else if (a === '--debug') {
+ debug = true;
+ } else if (a === '--data') {
+ dataPath = sys.args[++i];
+ } else if (a === '--print-json') {
+ printJSON = true;
+ } else if (a === '--expectations') {
+ const expectationsFile = new URL(sys.args[++i], `file://${sys.cwd()}`).pathname;
+ loadWebGPUExpectations = import(expectationsFile).then(m => m.expectations);
+ } else if (a === '--gpu-provider') {
+ const modulePath = sys.args[++i];
+ gpuProviderModule = require(modulePath);
+ } else if (a === '--gpu-provider-flag') {
+ gpuProviderFlags.push(sys.args[++i]);
+ } else if (a === '--quiet') {
+ quiet = true;
+ } else if (a === '--unroll-const-eval-loops') {
+ globalTestConfig.unrollConstEvalLoops = true;
+ } else if (a === '--compat') {
+ globalTestConfig.compatibility = true;
+ } else {
+ console.log('unrecognized flag: ', a);
+ usage(1);
+ }
+ } else {
+ queries.push(a);
+ }
+}
+
+let codeCoverage: CodeCoverageProvider | undefined = undefined;
+
+if (globalTestConfig.compatibility) {
+ // MAINTENANCE_TODO: remove the cast once compatibilityMode is officially added
+ setDefaultRequestAdapterOptions({ compatibilityMode: true } as GPURequestAdapterOptions);
+}
+
+if (gpuProviderModule) {
+ setGPUProvider(() => gpuProviderModule!.create(gpuProviderFlags));
+ if (emitCoverage) {
+ codeCoverage = gpuProviderModule.coverage;
+ if (codeCoverage === undefined) {
+ console.error(
+ `--coverage specified, but the GPUProviderModule does not support code coverage.
+Did you remember to build with code coverage instrumentation enabled?`
+ );
+ sys.exit(1);
+ }
+ }
+}
+
+if (dataPath !== undefined) {
+ dataCache.setStore({
+ load: (path: string) => {
+ return new Promise<Uint8Array>((resolve, reject) => {
+ fs.readFile(`${dataPath}/${path}`, (err, data) => {
+ if (err !== null) {
+ reject(err.message);
+ } else {
+ resolve(data);
+ }
+ });
+ });
+ },
+ });
+}
+if (verbose) {
+ dataCache.setDebugLogger(console.log);
+}
+
+if (queries.length === 0) {
+ console.log('no queries specified');
+ usage(0);
+}
+
+(async () => {
+ const loader = new DefaultTestFileLoader();
+ assert(queries.length === 1, 'currently, there must be exactly one query on the cmd line');
+ const filterQuery = parseQuery(queries[0]);
+ const testcases = await loader.loadCases(filterQuery);
+ const expectations = parseExpectationsForTestQuery(
+ await (loadWebGPUExpectations ?? []),
+ filterQuery
+ );
+
+ Logger.globalDebugMode = debug;
+ const log = new Logger();
+
+ const failed: Array<[string, LiveTestCaseResult]> = [];
+ const warned: Array<[string, LiveTestCaseResult]> = [];
+ const skipped: Array<[string, LiveTestCaseResult]> = [];
+
+ let total = 0;
+
+ if (codeCoverage !== undefined) {
+ codeCoverage.begin();
+ }
+
+ for (const testcase of testcases) {
+ const name = testcase.query.toString();
+ switch (listMode) {
+ case 'cases':
+ console.log(name);
+ continue;
+ case 'unimplemented':
+ if (testcase.isUnimplemented) {
+ console.log(name);
+ }
+ continue;
+ default:
+ break;
+ }
+
+ const [rec, res] = log.record(name);
+ await testcase.run(rec, expectations);
+
+ if (verbose) {
+ printResults([[name, res]]);
+ }
+
+ total++;
+ switch (res.status) {
+ case 'pass':
+ break;
+ case 'fail':
+ failed.push([name, res]);
+ break;
+ case 'warn':
+ warned.push([name, res]);
+ break;
+ case 'skip':
+ skipped.push([name, res]);
+ break;
+ default:
+ unreachable('unrecognized status');
+ }
+ }
+
+ if (codeCoverage !== undefined) {
+ const coverage = codeCoverage.end();
+ console.log(`Code-coverage: [[${coverage}]]`);
+ }
+
+ if (listMode !== 'none') {
+ return;
+ }
+
+ assert(total > 0, 'found no tests!');
+
+ // MAINTENANCE_TODO: write results out somewhere (a file?)
+ if (printJSON) {
+ console.log(log.asJSON(2));
+ }
+
+ if (!quiet) {
+ if (skipped.length) {
+ console.log('');
+ console.log('** Skipped **');
+ printResults(skipped);
+ }
+ if (warned.length) {
+ console.log('');
+ console.log('** Warnings **');
+ printResults(warned);
+ }
+ if (failed.length) {
+ console.log('');
+ console.log('** Failures **');
+ printResults(failed);
+ }
+
+ const passed = total - warned.length - failed.length - skipped.length;
+ const pct = (x: number) => ((100 * x) / total).toFixed(2);
+ const rpt = (x: number) => {
+ const xs = x.toString().padStart(1 + Math.log10(total), ' ');
+ return `${xs} / ${total} = ${pct(x).padStart(6, ' ')}%`;
+ };
+ console.log('');
+ console.log(`** Summary **
+Passed w/o warnings = ${rpt(passed)}
+Passed with warnings = ${rpt(warned.length)}
+Skipped = ${rpt(skipped.length)}
+Failed = ${rpt(failed.length)}`);
+ }
+
+ if (failed.length || warned.length) {
+ sys.exit(1);
+ }
+})().catch(ex => {
+ console.log(ex.stack ?? ex.toString());
+ sys.exit(1);
+});
+
+function printResults(results: Array<[string, LiveTestCaseResult]>): void {
+ for (const [name, r] of results) {
+ console.log(`[${r.status}] ${name} (${r.timems}ms). Log:`);
+ if (r.logs) {
+ for (const l of r.logs) {
+ console.log(prettyPrintLog(l));
+ }
+ }
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/runtime/helper/options.ts b/dom/webgpu/tests/cts/checkout/src/common/runtime/helper/options.ts
new file mode 100644
index 0000000000..38974b803f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/runtime/helper/options.ts
@@ -0,0 +1,129 @@
+let windowURL: URL | undefined = undefined;
+function getWindowURL() {
+ if (windowURL === undefined) {
+ windowURL = new URL(window.location.toString());
+ }
+ return windowURL;
+}
+
+export function optionEnabled(
+ opt: string,
+ searchParams: URLSearchParams = getWindowURL().searchParams
+): boolean {
+ const val = searchParams.get(opt);
+ return val !== null && val !== '0';
+}
+
+export function optionString(
+ opt: string,
+ searchParams: URLSearchParams = getWindowURL().searchParams
+): string {
+ return searchParams.get(opt) || '';
+}
+
+/**
+ * The possible options for the tests.
+ */
+export interface CTSOptions {
+ worker: boolean;
+ debug: boolean;
+ compatibility: boolean;
+ unrollConstEvalLoops: boolean;
+ powerPreference?: GPUPowerPreference | '';
+}
+
+export const kDefaultCTSOptions: CTSOptions = {
+ worker: false,
+ debug: true,
+ compatibility: false,
+ unrollConstEvalLoops: false,
+ powerPreference: '',
+};
+
+/**
+ * Extra per option info.
+ */
+export interface OptionInfo {
+ description: string;
+ parser?: (key: string, searchParams?: URLSearchParams) => boolean | string;
+ selectValueDescriptions?: { value: string; description: string }[];
+}
+
+/**
+ * Type for info for every option. This definition means adding an option
+ * will generate a compile time error if no extra info is provided.
+ */
+export type OptionsInfos<Type> = Record<keyof Type, OptionInfo>;
+
+/**
+ * Options to the CTS.
+ */
+export const kCTSOptionsInfo: OptionsInfos<CTSOptions> = {
+ worker: { description: 'run in a worker' },
+ debug: { description: 'show more info' },
+ compatibility: { description: 'run in compatibility mode' },
+ unrollConstEvalLoops: { description: 'unroll const eval loops in WGSL' },
+ powerPreference: {
+ description: 'set default powerPreference for some tests',
+ parser: optionString,
+ selectValueDescriptions: [
+ { value: '', description: 'default' },
+ { value: 'low-power', description: 'low-power' },
+ { value: 'high-performance', description: 'high-performance' },
+ ],
+ },
+};
+
+/**
+ * Converts camel case to snake case.
+ * Examples:
+ * fooBar -> foo_bar
+ * parseHTMLFile -> parse_html_file
+ */
+export function camelCaseToSnakeCase(id: string) {
+ return id
+ .replace(/(.)([A-Z][a-z]+)/g, '$1_$2')
+ .replace(/([a-z0-9])([A-Z])/g, '$1_$2')
+ .toLowerCase();
+}
+
+/**
+ * Creates a Options from search parameters.
+ */
+function getOptionsInfoFromSearchString<Type extends CTSOptions>(
+ optionsInfos: OptionsInfos<Type>,
+ searchString: string
+): Type {
+ const searchParams = new URLSearchParams(searchString);
+ const optionValues: Record<string, boolean | string> = {};
+ for (const [optionName, info] of Object.entries(optionsInfos)) {
+ const parser = info.parser || optionEnabled;
+ optionValues[optionName] = parser(camelCaseToSnakeCase(optionName), searchParams);
+ }
+ return optionValues as unknown as Type;
+}
+
+/**
+ * Given a test query string in the form of `suite:foo,bar,moo&opt1=val1&opt2=val2
+ * returns the query and the options.
+ */
+export function parseSearchParamLikeWithOptions<Type extends CTSOptions>(
+ optionsInfos: OptionsInfos<Type>,
+ query: string
+): {
+ queries: string[];
+ options: Type;
+} {
+ const searchString = query.includes('q=') || query.startsWith('?') ? query : `q=${query}`;
+ const queries = new URLSearchParams(searchString).getAll('q');
+ const options = getOptionsInfoFromSearchString(optionsInfos, searchString);
+ return { queries, options };
+}
+
+/**
+ * Given a test query string in the form of `suite:foo,bar,moo&opt1=val1&opt2=val2
+ * returns the query and the common options.
+ */
+export function parseSearchParamLikeWithCTSOptions(query: string) {
+ return parseSearchParamLikeWithOptions(kCTSOptionsInfo, query);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/runtime/helper/sys.ts b/dom/webgpu/tests/cts/checkout/src/common/runtime/helper/sys.ts
new file mode 100644
index 0000000000..d2e07ff26d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/runtime/helper/sys.ts
@@ -0,0 +1,46 @@
+/* eslint no-process-exit: "off" */
+/* eslint @typescript-eslint/no-namespace: "off" */
+
+function node() {
+ const { existsSync } = require('fs');
+
+ return {
+ type: 'node',
+ existsSync,
+ args: process.argv.slice(2),
+ cwd: () => process.cwd(),
+ exit: (code?: number | undefined) => process.exit(code),
+ };
+}
+
+declare global {
+ namespace Deno {
+ function readFileSync(path: string): Uint8Array;
+ const args: string[];
+ const cwd: () => string;
+ function exit(code?: number): never;
+ }
+}
+
+function deno() {
+ function existsSync(path: string) {
+ try {
+ Deno.readFileSync(path);
+ return true;
+ } catch (err) {
+ return false;
+ }
+ }
+
+ return {
+ type: 'deno',
+ existsSync,
+ args: Deno.args,
+ cwd: Deno.cwd,
+ exit: Deno.exit,
+ };
+}
+
+const sys = typeof globalThis.process !== 'undefined' ? node() : deno();
+
+export default sys;
diff --git a/dom/webgpu/tests/cts/checkout/src/common/runtime/helper/test_worker-worker.ts b/dom/webgpu/tests/cts/checkout/src/common/runtime/helper/test_worker-worker.ts
new file mode 100644
index 0000000000..e8d187ea7e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/runtime/helper/test_worker-worker.ts
@@ -0,0 +1,48 @@
+import { setBaseResourcePath } from '../../framework/resources.js';
+import { globalTestConfig } from '../../framework/test_config.js';
+import { DefaultTestFileLoader } from '../../internal/file_loader.js';
+import { Logger } from '../../internal/logging/logger.js';
+import { parseQuery } from '../../internal/query/parseQuery.js';
+import { TestQueryWithExpectation } from '../../internal/query/query.js';
+import { setDefaultRequestAdapterOptions } from '../../util/navigator_gpu.js';
+import { assert } from '../../util/util.js';
+
+import { CTSOptions } from './options.js';
+
+// Should be DedicatedWorkerGlobalScope, but importing lib "webworker" conflicts with lib "dom".
+/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+declare const self: any;
+
+const loader = new DefaultTestFileLoader();
+
+setBaseResourcePath('../../../resources');
+
+self.onmessage = async (ev: MessageEvent) => {
+ const query: string = ev.data.query;
+ const expectations: TestQueryWithExpectation[] = ev.data.expectations;
+ const ctsOptions: CTSOptions = ev.data.ctsOptions;
+
+ const { debug, unrollConstEvalLoops, powerPreference, compatibility } = ctsOptions;
+ globalTestConfig.unrollConstEvalLoops = unrollConstEvalLoops;
+ globalTestConfig.compatibility = compatibility;
+
+ Logger.globalDebugMode = debug;
+ const log = new Logger();
+
+ if (powerPreference || compatibility) {
+ setDefaultRequestAdapterOptions({
+ ...(powerPreference && { powerPreference }),
+ // MAINTENANCE_TODO: Change this to whatever the option ends up being
+ ...(compatibility && { compatibilityMode: true }),
+ });
+ }
+
+ const testcases = Array.from(await loader.loadCases(parseQuery(query)));
+ assert(testcases.length === 1, 'worker query resulted in != 1 cases');
+
+ const testcase = testcases[0];
+ const [rec, result] = log.record(testcase.query.toString());
+ await testcase.run(rec, expectations);
+
+ self.postMessage({ query, result });
+};
diff --git a/dom/webgpu/tests/cts/checkout/src/common/runtime/helper/test_worker.ts b/dom/webgpu/tests/cts/checkout/src/common/runtime/helper/test_worker.ts
new file mode 100644
index 0000000000..9bbcab0946
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/runtime/helper/test_worker.ts
@@ -0,0 +1,49 @@
+import { LogMessageWithStack } from '../../internal/logging/log_message.js';
+import { TransferredTestCaseResult, LiveTestCaseResult } from '../../internal/logging/result.js';
+import { TestCaseRecorder } from '../../internal/logging/test_case_recorder.js';
+import { TestQueryWithExpectation } from '../../internal/query/query.js';
+
+import { CTSOptions, kDefaultCTSOptions } from './options.js';
+
+export class TestWorker {
+ private readonly ctsOptions: CTSOptions;
+ private readonly worker: Worker;
+ private readonly resolvers = new Map<string, (result: LiveTestCaseResult) => void>();
+
+ constructor(ctsOptions?: CTSOptions) {
+ this.ctsOptions = { ...(ctsOptions || kDefaultCTSOptions), ...{ worker: true } };
+ const selfPath = import.meta.url;
+ const selfPathDir = selfPath.substring(0, selfPath.lastIndexOf('/'));
+ const workerPath = selfPathDir + '/test_worker-worker.js';
+ this.worker = new Worker(workerPath, { type: 'module' });
+ this.worker.onmessage = ev => {
+ const query: string = ev.data.query;
+ const result: TransferredTestCaseResult = ev.data.result;
+ if (result.logs) {
+ for (const l of result.logs) {
+ Object.setPrototypeOf(l, LogMessageWithStack.prototype);
+ }
+ }
+ this.resolvers.get(query)!(result as LiveTestCaseResult);
+
+ // MAINTENANCE_TODO(kainino0x): update the Logger with this result (or don't have a logger and
+ // update the entire results JSON somehow at some point).
+ };
+ }
+
+ async run(
+ rec: TestCaseRecorder,
+ query: string,
+ expectations: TestQueryWithExpectation[] = []
+ ): Promise<void> {
+ this.worker.postMessage({
+ query,
+ expectations,
+ ctsOptions: this.ctsOptions,
+ });
+ const workerResult = await new Promise<LiveTestCaseResult>(resolve => {
+ this.resolvers.set(query, resolve);
+ });
+ rec.injectResult(workerResult);
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/runtime/server.ts b/dom/webgpu/tests/cts/checkout/src/common/runtime/server.ts
new file mode 100644
index 0000000000..8310784e3a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/runtime/server.ts
@@ -0,0 +1,236 @@
+/* eslint no-console: "off" */
+
+import * as fs from 'fs';
+import * as http from 'http';
+import { AddressInfo } from 'net';
+
+import { dataCache } from '../framework/data_cache.js';
+import { globalTestConfig } from '../framework/test_config.js';
+import { DefaultTestFileLoader } from '../internal/file_loader.js';
+import { prettyPrintLog } from '../internal/logging/log_message.js';
+import { Logger } from '../internal/logging/logger.js';
+import { LiveTestCaseResult, Status } from '../internal/logging/result.js';
+import { parseQuery } from '../internal/query/parseQuery.js';
+import { TestQueryWithExpectation } from '../internal/query/query.js';
+import { TestTreeLeaf } from '../internal/tree.js';
+import { Colors } from '../util/colors.js';
+import { setDefaultRequestAdapterOptions, setGPUProvider } from '../util/navigator_gpu.js';
+
+import sys from './helper/sys.js';
+
+function usage(rc: number): never {
+ console.log(`Usage:
+ tools/run_${sys.type} [OPTIONS...]
+Options:
+ --colors Enable ANSI colors in output.
+ --compat Run tests in compatibility mode.
+ --coverage Add coverage data to each result.
+ --data Path to the data cache directory.
+ --verbose Print result/log of every test as it runs.
+ --gpu-provider Path to node module that provides the GPU implementation.
+ --gpu-provider-flag Flag to set on the gpu-provider as <flag>=<value>
+ --unroll-const-eval-loops Unrolls loops in constant-evaluation shader execution tests
+ --u Flag to set on the gpu-provider as <flag>=<value>
+
+Provides an HTTP server used for running tests via an HTTP RPC interface
+To run a test, perform an HTTP GET or POST at the URL:
+ http://localhost:port/run?<test-name>
+To shutdown the server perform an HTTP GET or POST at the URL:
+ http://localhost:port/terminate
+`);
+ return sys.exit(rc);
+}
+
+interface RunResult {
+ // The result of the test
+ status: Status;
+ // Any additional messages printed
+ message: string;
+ // Code coverage data, if the server was started with `--coverage`
+ // This data is opaque (implementation defined).
+ coverageData?: string;
+}
+
+// The interface that exposes creation of the GPU, and optional interface to code coverage.
+interface GPUProviderModule {
+ // @returns a GPU with the given flags
+ create(flags: string[]): GPU;
+ // An optional interface to a CodeCoverageProvider
+ coverage?: CodeCoverageProvider;
+}
+
+interface CodeCoverageProvider {
+ // Starts collecting code coverage
+ begin(): void;
+ // Ends collecting of code coverage, returning the coverage data.
+ // This data is opaque (implementation defined).
+ end(): string;
+}
+
+if (!sys.existsSync('src/common/runtime/cmdline.ts')) {
+ console.log('Must be run from repository root');
+ usage(1);
+}
+
+Colors.enabled = false;
+
+let emitCoverage = false;
+let verbose = false;
+let gpuProviderModule: GPUProviderModule | undefined = undefined;
+let dataPath: string | undefined = undefined;
+
+const gpuProviderFlags: string[] = [];
+for (let i = 0; i < sys.args.length; ++i) {
+ const a = sys.args[i];
+ if (a.startsWith('-')) {
+ if (a === '--colors') {
+ Colors.enabled = true;
+ } else if (a === '--compat') {
+ globalTestConfig.compatibility = true;
+ } else if (a === '--coverage') {
+ emitCoverage = true;
+ } else if (a === '--data') {
+ dataPath = sys.args[++i];
+ } else if (a === '--gpu-provider') {
+ const modulePath = sys.args[++i];
+ gpuProviderModule = require(modulePath);
+ } else if (a === '--gpu-provider-flag') {
+ gpuProviderFlags.push(sys.args[++i]);
+ } else if (a === '--unroll-const-eval-loops') {
+ globalTestConfig.unrollConstEvalLoops = true;
+ } else if (a === '--help') {
+ usage(1);
+ } else if (a === '--verbose') {
+ verbose = true;
+ } else {
+ console.log(`unrecognized flag: ${a}`);
+ }
+ }
+}
+
+let codeCoverage: CodeCoverageProvider | undefined = undefined;
+
+if (globalTestConfig.compatibility) {
+ // MAINTENANCE_TODO: remove the cast once compatibilityMode is officially added
+ setDefaultRequestAdapterOptions({ compatibilityMode: true } as GPURequestAdapterOptions);
+}
+
+if (gpuProviderModule) {
+ setGPUProvider(() => gpuProviderModule!.create(gpuProviderFlags));
+
+ if (emitCoverage) {
+ codeCoverage = gpuProviderModule.coverage;
+ if (codeCoverage === undefined) {
+ console.error(
+ `--coverage specified, but the GPUProviderModule does not support code coverage.
+Did you remember to build with code coverage instrumentation enabled?`
+ );
+ sys.exit(1);
+ }
+ }
+}
+
+if (dataPath !== undefined) {
+ dataCache.setStore({
+ load: (path: string) => {
+ return new Promise<Uint8Array>((resolve, reject) => {
+ fs.readFile(`${dataPath}/${path}`, (err, data) => {
+ if (err !== null) {
+ reject(err.message);
+ } else {
+ resolve(data);
+ }
+ });
+ });
+ },
+ });
+}
+if (verbose) {
+ dataCache.setDebugLogger(console.log);
+}
+
+// eslint-disable-next-line @typescript-eslint/require-await
+(async () => {
+ Logger.globalDebugMode = verbose;
+ const log = new Logger();
+ const testcases = new Map<string, TestTreeLeaf>();
+
+ async function runTestcase(
+ testcase: TestTreeLeaf,
+ expectations: TestQueryWithExpectation[] = []
+ ): Promise<LiveTestCaseResult> {
+ const name = testcase.query.toString();
+ const [rec, res] = log.record(name);
+ await testcase.run(rec, expectations);
+ return res;
+ }
+
+ const server = http.createServer(
+ async (request: http.IncomingMessage, response: http.ServerResponse) => {
+ if (request.url === undefined) {
+ response.end('invalid url');
+ return;
+ }
+
+ const loadCasesPrefix = '/load?';
+ const runPrefix = '/run?';
+ const terminatePrefix = '/terminate';
+
+ if (request.url.startsWith(loadCasesPrefix)) {
+ const query = request.url.substr(loadCasesPrefix.length);
+ try {
+ const webgpuQuery = parseQuery(query);
+ const loader = new DefaultTestFileLoader();
+ for (const testcase of await loader.loadCases(webgpuQuery)) {
+ testcases.set(testcase.query.toString(), testcase);
+ }
+ response.statusCode = 200;
+ response.end();
+ } catch (err) {
+ response.statusCode = 500;
+ response.end(`load failed with error: ${err}\n${(err as Error).stack}`);
+ }
+ } else if (request.url.startsWith(runPrefix)) {
+ const name = request.url.substr(runPrefix.length);
+ try {
+ const testcase = testcases.get(name);
+ if (testcase) {
+ if (codeCoverage !== undefined) {
+ codeCoverage.begin();
+ }
+ const result = await runTestcase(testcase);
+ const coverageData = codeCoverage !== undefined ? codeCoverage.end() : undefined;
+ let message = '';
+ if (result.logs !== undefined) {
+ message = result.logs.map(log => prettyPrintLog(log)).join('\n');
+ }
+ const status = result.status;
+ const res: RunResult = { status, message, coverageData };
+ response.statusCode = 200;
+ response.end(JSON.stringify(res));
+ } else {
+ response.statusCode = 404;
+ response.end(`test case '${name}' not found`);
+ }
+ } catch (err) {
+ response.statusCode = 500;
+ response.end(`run failed with error: ${err}`);
+ }
+ } else if (request.url.startsWith(terminatePrefix)) {
+ server.close();
+ sys.exit(1);
+ } else {
+ response.statusCode = 404;
+ response.end('unhandled url request');
+ }
+ }
+ );
+
+ server.listen(0, () => {
+ const address = server.address() as AddressInfo;
+ console.log(`Server listening at [[${address.port}]]`);
+ });
+})().catch(ex => {
+ console.error(ex.stack ?? ex.toString());
+ sys.exit(1);
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/common/runtime/standalone.ts b/dom/webgpu/tests/cts/checkout/src/common/runtime/standalone.ts
new file mode 100644
index 0000000000..0376f92dda
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/runtime/standalone.ts
@@ -0,0 +1,679 @@
+// Implements the standalone test runner (see also: /standalone/index.html).
+/* eslint no-console: "off" */
+
+import { dataCache } from '../framework/data_cache.js';
+import { setBaseResourcePath } from '../framework/resources.js';
+import { globalTestConfig } from '../framework/test_config.js';
+import { DefaultTestFileLoader } from '../internal/file_loader.js';
+import { Logger } from '../internal/logging/logger.js';
+import { LiveTestCaseResult } from '../internal/logging/result.js';
+import { parseQuery } from '../internal/query/parseQuery.js';
+import { TestQueryLevel } from '../internal/query/query.js';
+import { TestTreeNode, TestSubtree, TestTreeLeaf, TestTree } from '../internal/tree.js';
+import { setDefaultRequestAdapterOptions } from '../util/navigator_gpu.js';
+import { ErrorWithExtra, unreachable } from '../util/util.js';
+
+import {
+ kCTSOptionsInfo,
+ parseSearchParamLikeWithOptions,
+ CTSOptions,
+ OptionInfo,
+ OptionsInfos,
+ camelCaseToSnakeCase,
+} from './helper/options.js';
+import { TestWorker } from './helper/test_worker.js';
+
+const rootQuerySpec = 'webgpu:*';
+let promptBeforeReload = false;
+let isFullCTS = false;
+
+globalTestConfig.frameworkDebugLog = console.log;
+
+window.onbeforeunload = () => {
+ // Prompt user before reloading if there are any results
+ return promptBeforeReload ? false : undefined;
+};
+
+const kOpenTestLinkAltText = 'Open';
+
+type StandaloneOptions = CTSOptions & { runnow: OptionInfo };
+
+const kStandaloneOptionsInfos: OptionsInfos<StandaloneOptions> = {
+ ...kCTSOptionsInfo,
+ runnow: { description: 'run immediately on load' },
+};
+
+const { queries: qs, options } = parseSearchParamLikeWithOptions(
+ kStandaloneOptionsInfos,
+ window.location.search || rootQuerySpec
+);
+const { runnow, debug, unrollConstEvalLoops, powerPreference, compatibility } = options;
+globalTestConfig.unrollConstEvalLoops = unrollConstEvalLoops;
+globalTestConfig.compatibility = compatibility;
+
+Logger.globalDebugMode = debug;
+const logger = new Logger();
+
+setBaseResourcePath('../out/resources');
+
+const worker = options.worker ? new TestWorker(options) : undefined;
+
+const autoCloseOnPass = document.getElementById('autoCloseOnPass') as HTMLInputElement;
+const resultsVis = document.getElementById('resultsVis')!;
+const progressElem = document.getElementById('progress')!;
+const progressTestNameElem = progressElem.querySelector('.progress-test-name')!;
+const stopButtonElem = progressElem.querySelector('button')!;
+let runDepth = 0;
+let stopRequested = false;
+
+stopButtonElem.addEventListener('click', () => {
+ stopRequested = true;
+});
+
+if (powerPreference || compatibility) {
+ setDefaultRequestAdapterOptions({
+ ...(powerPreference && { powerPreference }),
+ // MAINTENANCE_TODO: Change this to whatever the option ends up being
+ ...(compatibility && { compatibilityMode: true }),
+ });
+}
+
+dataCache.setStore({
+ load: async (path: string) => {
+ const response = await fetch(`data/${path}`);
+ if (!response.ok) {
+ return Promise.reject(response.statusText);
+ }
+ return new Uint8Array(await response.arrayBuffer());
+ },
+});
+
+interface SubtreeResult {
+ pass: number;
+ fail: number;
+ warn: number;
+ skip: number;
+ total: number;
+ timems: number;
+}
+
+function emptySubtreeResult() {
+ return { pass: 0, fail: 0, warn: 0, skip: 0, total: 0, timems: 0 };
+}
+
+function mergeSubtreeResults(...results: SubtreeResult[]) {
+ const target = emptySubtreeResult();
+ for (const result of results) {
+ target.pass += result.pass;
+ target.fail += result.fail;
+ target.warn += result.warn;
+ target.skip += result.skip;
+ target.total += result.total;
+ target.timems += result.timems;
+ }
+ return target;
+}
+
+type SetCheckedRecursively = () => void;
+type GenerateSubtreeHTML = (parent: HTMLElement) => SetCheckedRecursively;
+type RunSubtree = () => Promise<SubtreeResult>;
+
+interface VisualizedSubtree {
+ generateSubtreeHTML: GenerateSubtreeHTML;
+ runSubtree: RunSubtree;
+}
+
+// DOM generation
+
+function memoize<T>(fn: () => T): () => T {
+ let value: T | undefined;
+ return () => {
+ if (value === undefined) {
+ value = fn();
+ }
+ return value;
+ };
+}
+
+function makeTreeNodeHTML(tree: TestTreeNode, parentLevel: TestQueryLevel): VisualizedSubtree {
+ let subtree: VisualizedSubtree;
+
+ if ('children' in tree) {
+ subtree = makeSubtreeHTML(tree, parentLevel);
+ } else {
+ subtree = makeCaseHTML(tree);
+ }
+
+ const generateMyHTML = (parentElement: HTMLElement) => {
+ const div = $('<div>').appendTo(parentElement)[0];
+ return subtree.generateSubtreeHTML(div);
+ };
+ return { runSubtree: subtree.runSubtree, generateSubtreeHTML: generateMyHTML };
+}
+
+function makeCaseHTML(t: TestTreeLeaf): VisualizedSubtree {
+ // Becomes set once the case has been run once.
+ let caseResult: LiveTestCaseResult | undefined;
+
+ // Becomes set once the DOM for this case exists.
+ let clearRenderedResult: (() => void) | undefined;
+ let updateRenderedResult: (() => void) | undefined;
+
+ const name = t.query.toString();
+ const runSubtree = async () => {
+ if (clearRenderedResult) clearRenderedResult();
+
+ const result: SubtreeResult = emptySubtreeResult();
+ progressTestNameElem.textContent = name;
+
+ const [rec, res] = logger.record(name);
+ caseResult = res;
+ if (worker) {
+ await worker.run(rec, name);
+ } else {
+ await t.run(rec);
+ }
+
+ result.total++;
+ result.timems += caseResult.timems;
+ switch (caseResult.status) {
+ case 'pass':
+ result.pass++;
+ break;
+ case 'fail':
+ result.fail++;
+ break;
+ case 'skip':
+ result.skip++;
+ break;
+ case 'warn':
+ result.warn++;
+ break;
+ default:
+ unreachable();
+ }
+
+ if (updateRenderedResult) updateRenderedResult();
+
+ return result;
+ };
+
+ const generateSubtreeHTML = (div: HTMLElement) => {
+ div.classList.add('testcase');
+
+ const caselogs = $('<div>').addClass('testcaselogs').hide();
+ const [casehead, setChecked] = makeTreeNodeHeaderHTML(t, runSubtree, 2, checked => {
+ checked ? caselogs.show() : caselogs.hide();
+ });
+ const casetime = $('<div>').addClass('testcasetime').html('ms').appendTo(casehead);
+ div.appendChild(casehead);
+ div.appendChild(caselogs[0]);
+
+ clearRenderedResult = () => {
+ div.removeAttribute('data-status');
+ casetime.text('ms');
+ caselogs.empty();
+ };
+
+ updateRenderedResult = () => {
+ if (caseResult) {
+ div.setAttribute('data-status', caseResult.status);
+
+ casetime.text(caseResult.timems.toFixed(4) + ' ms');
+
+ if (caseResult.logs) {
+ caselogs.empty();
+ for (const l of caseResult.logs) {
+ const caselog = $('<div>').addClass('testcaselog').appendTo(caselogs);
+ $('<button>')
+ .addClass('testcaselogbtn')
+ .attr('alt', 'Log stack to console')
+ .attr('title', 'Log stack to console')
+ .appendTo(caselog)
+ .on('click', () => {
+ consoleLogError(l);
+ });
+ $('<pre>').addClass('testcaselogtext').appendTo(caselog).text(l.toJSON());
+ }
+ }
+ }
+ };
+
+ updateRenderedResult();
+
+ return setChecked;
+ };
+
+ return { runSubtree, generateSubtreeHTML };
+}
+
+function makeSubtreeHTML(n: TestSubtree, parentLevel: TestQueryLevel): VisualizedSubtree {
+ let subtreeResult: SubtreeResult = emptySubtreeResult();
+ // Becomes set once the DOM for this case exists.
+ let clearRenderedResult: (() => void) | undefined;
+ let updateRenderedResult: (() => void) | undefined;
+
+ const { runSubtree, generateSubtreeHTML } = makeSubtreeChildrenHTML(
+ n.children.values(),
+ n.query.level
+ );
+
+ const runMySubtree = async () => {
+ if (runDepth === 0) {
+ stopRequested = false;
+ progressElem.style.display = '';
+ // only prompt if this is the full CTS and we started from the root.
+ if (isFullCTS && n.query.filePathParts.length === 0) {
+ promptBeforeReload = true;
+ }
+ }
+ if (stopRequested) {
+ const result = emptySubtreeResult();
+ result.skip = 1;
+ result.total = 1;
+ return result;
+ }
+
+ ++runDepth;
+
+ if (clearRenderedResult) clearRenderedResult();
+ subtreeResult = await runSubtree();
+ if (updateRenderedResult) updateRenderedResult();
+
+ --runDepth;
+ if (runDepth === 0) {
+ progressElem.style.display = 'none';
+ }
+
+ return subtreeResult;
+ };
+
+ const generateMyHTML = (div: HTMLElement) => {
+ const subtreeHTML = $('<div>').addClass('subtreechildren');
+ const generateSubtree = memoize(() => generateSubtreeHTML(subtreeHTML[0]));
+
+ // Hide subtree - it's not generated yet.
+ subtreeHTML.hide();
+ const [header, setChecked] = makeTreeNodeHeaderHTML(n, runMySubtree, parentLevel, checked => {
+ if (checked) {
+ // Make sure the subtree is generated and then show it.
+ generateSubtree();
+ subtreeHTML.show();
+ } else {
+ subtreeHTML.hide();
+ }
+ });
+
+ div.classList.add('subtree');
+ div.classList.add(['', 'multifile', 'multitest', 'multicase'][n.query.level]);
+ div.appendChild(header);
+ div.appendChild(subtreeHTML[0]);
+
+ clearRenderedResult = () => {
+ div.removeAttribute('data-status');
+ };
+
+ updateRenderedResult = () => {
+ let status = '';
+ if (subtreeResult.pass > 0) {
+ status += 'pass';
+ }
+ if (subtreeResult.fail > 0) {
+ status += 'fail';
+ }
+ if (subtreeResult.skip === subtreeResult.total && subtreeResult.total > 0) {
+ status += 'skip';
+ }
+ div.setAttribute('data-status', status);
+ if (autoCloseOnPass.checked && status === 'pass') {
+ div.firstElementChild!.removeAttribute('open');
+ }
+ };
+
+ updateRenderedResult();
+
+ return () => {
+ setChecked();
+ const setChildrenChecked = generateSubtree();
+ setChildrenChecked();
+ };
+ };
+
+ return { runSubtree: runMySubtree, generateSubtreeHTML: generateMyHTML };
+}
+
+function makeSubtreeChildrenHTML(
+ children: Iterable<TestTreeNode>,
+ parentLevel: TestQueryLevel
+): VisualizedSubtree {
+ const childFns = Array.from(children, subtree => makeTreeNodeHTML(subtree, parentLevel));
+
+ const runMySubtree = async () => {
+ const results: SubtreeResult[] = [];
+ for (const { runSubtree } of childFns) {
+ results.push(await runSubtree());
+ }
+ return mergeSubtreeResults(...results);
+ };
+ const generateMyHTML = (div: HTMLElement) => {
+ const setChildrenChecked = Array.from(childFns, ({ generateSubtreeHTML }) =>
+ generateSubtreeHTML(div)
+ );
+
+ return () => {
+ for (const setChildChecked of setChildrenChecked) {
+ setChildChecked();
+ }
+ };
+ };
+
+ return { runSubtree: runMySubtree, generateSubtreeHTML: generateMyHTML };
+}
+
+function consoleLogError(e: Error | ErrorWithExtra | undefined) {
+ if (e === undefined) return;
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ (globalThis as any)._stack = e;
+ console.log('_stack =', e);
+ if ('extra' in e && e.extra !== undefined) {
+ console.log('_stack.extra =', e.extra);
+ }
+}
+
+function makeTreeNodeHeaderHTML(
+ n: TestTreeNode,
+ runSubtree: RunSubtree,
+ parentLevel: TestQueryLevel,
+ onChange: (checked: boolean) => void
+): [HTMLElement, SetCheckedRecursively] {
+ const isLeaf = 'run' in n;
+ const div = $('<details>').addClass('nodeheader');
+ const header = $('<summary>').appendTo(div);
+
+ // prevent toggling if user is selecting text from an input element
+ {
+ let lastNodeName = '';
+ div.on('pointerdown', event => {
+ lastNodeName = event.target.nodeName;
+ });
+ div.on('click', event => {
+ if (lastNodeName === 'INPUT') {
+ event.preventDefault();
+ }
+ });
+ }
+
+ const setChecked = () => {
+ div.prop('open', true); // (does not fire onChange)
+ onChange(true);
+ };
+
+ const href = createSearchQuery([n.query.toString()]);
+ if (onChange) {
+ div.on('toggle', function (this) {
+ onChange((this as HTMLDetailsElement).open);
+ });
+
+ // Expand the shallower parts of the tree at load.
+ // Also expand completely within subtrees that are at the same query level
+ // (e.g. s:f:t,* and s:f:t,t,*).
+ if (n.query.level <= lastQueryLevelToExpand || n.query.level === parentLevel) {
+ setChecked();
+ }
+ }
+ const runtext = isLeaf ? 'Run case' : 'Run subtree';
+ $('<button>')
+ .addClass(isLeaf ? 'leafrun' : 'subtreerun')
+ .attr('alt', runtext)
+ .attr('title', runtext)
+ .on('click', async () => {
+ if (runDepth > 0) {
+ showInfo('tests are already running');
+ return;
+ }
+ showInfo('');
+ console.log(`Starting run for ${n.query}`);
+ // turn off all run buttons
+ $('#resultsVis').addClass('disable-run');
+ const startTime = performance.now();
+ await runSubtree();
+ const dt = performance.now() - startTime;
+ const dtMinutes = dt / 1000 / 60;
+ // turn on all run buttons
+ $('#resultsVis').removeClass('disable-run');
+ console.log(`Finished run: ${dt.toFixed(1)} ms = ${dtMinutes.toFixed(1)} min`);
+ })
+ .appendTo(header);
+ $('<a>')
+ .addClass('nodelink')
+ .attr('href', href)
+ .attr('alt', kOpenTestLinkAltText)
+ .attr('title', kOpenTestLinkAltText)
+ .appendTo(header);
+ $('<button>')
+ .addClass('copybtn')
+ .attr('alt', 'copy query')
+ .attr('title', 'copy query')
+ .on('click', () => {
+ void navigator.clipboard.writeText(n.query.toString());
+ })
+ .appendTo(header);
+ if ('testCreationStack' in n && n.testCreationStack) {
+ $('<button>')
+ .addClass('testcaselogbtn')
+ .attr('alt', 'Log test creation stack to console')
+ .attr('title', 'Log test creation stack to console')
+ .appendTo(header)
+ .on('click', () => {
+ consoleLogError(n.testCreationStack);
+ });
+ }
+ const nodetitle = $('<div>').addClass('nodetitle').appendTo(header);
+ const nodecolumns = $('<span>').addClass('nodecolumns').appendTo(nodetitle);
+ {
+ $('<input>')
+ .attr('type', 'text')
+ .prop('readonly', true)
+ .addClass('nodequery')
+ .on('click', event => {
+ (event.target as HTMLInputElement).select();
+ })
+ .val(n.query.toString())
+ .appendTo(nodecolumns);
+ if (n.subtreeCounts) {
+ $('<span>')
+ .attr('title', '(Nodes with TODOs) / (Total test count)')
+ .text(TestTree.countsToString(n))
+ .appendTo(nodecolumns);
+ }
+ }
+ if ('description' in n && n.description) {
+ nodetitle.append('&nbsp;');
+ $('<pre>') //
+ .addClass('nodedescription')
+ .text(n.description)
+ .appendTo(header);
+ }
+ return [div[0], setChecked];
+}
+
+// Collapse s:f:t:* or s:f:t:c by default.
+let lastQueryLevelToExpand: TestQueryLevel = 2;
+
+type ParamValue = string | undefined | null | boolean | string[];
+
+/**
+ * Takes an array of string, ParamValue and returns an array of pairs
+ * of [key, value] where value is a string. Converts boolean to '0' or '1'.
+ */
+function keyValueToPairs([k, v]: [string, ParamValue]): [string, string][] {
+ const key = camelCaseToSnakeCase(k);
+ if (typeof v === 'boolean') {
+ return [[key, v ? '1' : '0']];
+ } else if (Array.isArray(v)) {
+ return v.map(v => [key, v]);
+ } else {
+ return [[key, v!.toString()]];
+ }
+}
+
+/**
+ * Converts key value pairs to a search string.
+ * Keys will appear in order in the search string.
+ * Values can be undefined, null, boolean, string, or string[]
+ * If the value is falsy the key will not appear in the search string.
+ * If the value is an array the key will appear multiple times.
+ *
+ * @param params Some object with key value pairs.
+ * @returns a search string.
+ */
+function prepareParams(params: Record<string, ParamValue>): string {
+ const pairsArrays = Object.entries(params)
+ .filter(([, v]) => !!v)
+ .map(keyValueToPairs);
+ const pairs = pairsArrays.flat();
+ return new URLSearchParams(pairs).toString();
+}
+
+// This is just a cast in one place.
+export function optionsToRecord(options: CTSOptions) {
+ return options as unknown as Record<string, boolean | string>;
+}
+
+/**
+ * Given a search query, generates a search parameter string
+ * @param queries array of queries
+ * @param params an optional existing search
+ * @returns a search query string
+ */
+function createSearchQuery(queries: string[], params?: string) {
+ params = params === undefined ? prepareParams(optionsToRecord(options)) : params;
+ // Add in q separately to avoid escaping punctuation marks.
+ return `?${params}${params ? '&' : ''}${queries.map(q => 'q=' + q).join('&')}`;
+}
+
+/**
+ * Show an info message on the page.
+ * @param msg Message to show
+ */
+function showInfo(msg: string) {
+ $('#info')[0].textContent = msg;
+}
+
+void (async () => {
+ const loader = new DefaultTestFileLoader();
+
+ // MAINTENANCE_TODO: start populating page before waiting for everything to load?
+ isFullCTS = qs.length === 1 && qs[0] === rootQuerySpec;
+
+ // Update the URL bar to match the exact current options.
+ const updateURLsWithCurrentOptions = () => {
+ const params = prepareParams(optionsToRecord(options));
+ let url = `${window.location.origin}${window.location.pathname}`;
+ url += createSearchQuery(qs, params);
+ window.history.replaceState(null, '', url.toString());
+ document.querySelectorAll(`a[alt=${kOpenTestLinkAltText}]`).forEach(elem => {
+ const a = elem as HTMLAnchorElement;
+ const qs = new URLSearchParams(a.search).getAll('q');
+ a.search = createSearchQuery(qs, params);
+ });
+ };
+
+ const addOptionsToPage = (
+ options: StandaloneOptions,
+ optionsInfos: typeof kStandaloneOptionsInfos
+ ) => {
+ const optionsElem = $('table#options>tbody')[0];
+ const optionValues = optionsToRecord(options);
+
+ const createCheckbox = (optionName: string) => {
+ return $(`<input>`)
+ .attr('type', 'checkbox')
+ .prop('checked', optionValues[optionName] as boolean)
+ .on('change', function () {
+ optionValues[optionName] = (this as HTMLInputElement).checked;
+ updateURLsWithCurrentOptions();
+ });
+ };
+
+ const createSelect = (optionName: string, info: OptionInfo) => {
+ const select = $('<select>').on('change', function () {
+ optionValues[optionName] = (this as HTMLInputElement).value;
+ updateURLsWithCurrentOptions();
+ });
+ const currentValue = optionValues[optionName];
+ for (const { value, description } of info.selectValueDescriptions!) {
+ $('<option>')
+ .text(description)
+ .val(value)
+ .prop('selected', value === currentValue)
+ .appendTo(select);
+ }
+ return select;
+ };
+
+ for (const [optionName, info] of Object.entries(optionsInfos)) {
+ const input =
+ typeof optionValues[optionName] === 'boolean'
+ ? createCheckbox(optionName)
+ : createSelect(optionName, info);
+ $('<tr>')
+ .append($('<td>').append(input))
+ .append($('<td>').text(camelCaseToSnakeCase(optionName)))
+ .append($('<td>').text(info.description))
+ .appendTo(optionsElem);
+ }
+ };
+ addOptionsToPage(options, kStandaloneOptionsInfos);
+
+ if (qs.length !== 1) {
+ showInfo('currently, there must be exactly one ?q=');
+ return;
+ }
+
+ let rootQuery;
+ try {
+ rootQuery = parseQuery(qs[0]);
+ } catch (e) {
+ showInfo((e as Error).toString());
+ return;
+ }
+
+ if (rootQuery.level > lastQueryLevelToExpand) {
+ lastQueryLevelToExpand = rootQuery.level;
+ }
+ loader.addEventListener('import', ev => {
+ showInfo(`loading: ${ev.data.url}`);
+ });
+ loader.addEventListener('imported', ev => {
+ showInfo(`imported: ${ev.data.url}`);
+ });
+ loader.addEventListener('finish', () => {
+ showInfo('');
+ });
+
+ let tree;
+ try {
+ tree = await loader.loadTree(rootQuery);
+ } catch (err) {
+ showInfo((err as Error).toString());
+ return;
+ }
+
+ tree.dissolveSingleChildTrees();
+
+ const { runSubtree, generateSubtreeHTML } = makeSubtreeHTML(tree.root, 1);
+ const setTreeCheckedRecursively = generateSubtreeHTML(resultsVis);
+
+ document.getElementById('expandall')!.addEventListener('click', () => {
+ setTreeCheckedRecursively();
+ });
+
+ document.getElementById('copyResultsJSON')!.addEventListener('click', () => {
+ void navigator.clipboard.writeText(logger.asJSON(2));
+ });
+
+ if (runnow) {
+ void runSubtree();
+ }
+})();
diff --git a/dom/webgpu/tests/cts/checkout/src/common/runtime/wpt.ts b/dom/webgpu/tests/cts/checkout/src/common/runtime/wpt.ts
new file mode 100644
index 0000000000..d4a4008154
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/runtime/wpt.ts
@@ -0,0 +1,83 @@
+// Implements the wpt-embedded test runner (see also: wpt/cts.https.html).
+
+import { globalTestConfig } from '../framework/test_config.js';
+import { DefaultTestFileLoader } from '../internal/file_loader.js';
+import { prettyPrintLog } from '../internal/logging/log_message.js';
+import { Logger } from '../internal/logging/logger.js';
+import { parseQuery } from '../internal/query/parseQuery.js';
+import { parseExpectationsForTestQuery, relativeQueryString } from '../internal/query/query.js';
+import { assert } from '../util/util.js';
+
+import { optionEnabled } from './helper/options.js';
+import { TestWorker } from './helper/test_worker.js';
+
+// testharness.js API (https://web-platform-tests.org/writing-tests/testharness-api.html)
+declare interface WptTestObject {
+ step(f: () => void): void;
+ done(): void;
+}
+declare function setup(properties: { explicit_done?: boolean }): void;
+declare function promise_test(f: (t: WptTestObject) => Promise<void>, name: string): void;
+declare function done(): void;
+declare function assert_unreached(description: string): void;
+
+declare const loadWebGPUExpectations: Promise<unknown> | undefined;
+declare const shouldWebGPUCTSFailOnWarnings: Promise<boolean> | undefined;
+
+setup({
+ // It's convenient for us to asynchronously add tests to the page. Prevent done() from being
+ // called implicitly when the page is finished loading.
+ explicit_done: true,
+});
+
+void (async () => {
+ const workerEnabled = optionEnabled('worker');
+ const worker = workerEnabled ? new TestWorker() : undefined;
+
+ globalTestConfig.unrollConstEvalLoops = optionEnabled('unroll_const_eval_loops');
+
+ const failOnWarnings =
+ typeof shouldWebGPUCTSFailOnWarnings !== 'undefined' && (await shouldWebGPUCTSFailOnWarnings);
+
+ const loader = new DefaultTestFileLoader();
+ const qs = new URLSearchParams(window.location.search).getAll('q');
+ assert(qs.length === 1, 'currently, there must be exactly one ?q=');
+ const filterQuery = parseQuery(qs[0]);
+ const testcases = await loader.loadCases(filterQuery);
+
+ const expectations =
+ typeof loadWebGPUExpectations !== 'undefined'
+ ? parseExpectationsForTestQuery(
+ await loadWebGPUExpectations,
+ filterQuery,
+ new URL(window.location.href)
+ )
+ : [];
+
+ const log = new Logger();
+
+ for (const testcase of testcases) {
+ const name = testcase.query.toString();
+ // For brevity, display the case name "relative" to the ?q= path.
+ const shortName = relativeQueryString(filterQuery, testcase.query) || '(case)';
+
+ const wpt_fn = async () => {
+ const [rec, res] = log.record(name);
+ if (worker) {
+ await worker.run(rec, name, expectations);
+ } else {
+ await testcase.run(rec, expectations);
+ }
+
+ // Unfortunately, it seems not possible to surface any logs for warn/skip.
+ if (res.status === 'fail' || (res.status === 'warn' && failOnWarnings)) {
+ const logs = (res.logs ?? []).map(prettyPrintLog);
+ assert_unreached('\n' + logs.join('\n') + '\n');
+ }
+ };
+
+ promise_test(wpt_fn, shortName);
+ }
+
+ done();
+})();
diff --git a/dom/webgpu/tests/cts/checkout/src/common/templates/cts.https.html b/dom/webgpu/tests/cts/checkout/src/common/templates/cts.https.html
new file mode 100644
index 0000000000..2961f0c3ee
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/templates/cts.https.html
@@ -0,0 +1,32 @@
+<!--
+ This test suite is built from the TypeScript sources at:
+ https://github.com/gpuweb/cts
+
+ If you are debugging WebGPU conformance tests, it's highly recommended that
+ you use the standalone interactive runner in that repository, which
+ provides tools for easier debugging and editing (source maps, debug
+ logging, warn/skip functionality, etc.)
+
+ NOTE:
+ The WPT version of this file is generated with *one variant per test spec
+ file*. If your harness needs more fine-grained suppressions, you'll need to
+ generate your own variants list from your suppression list.
+ See `tools/gen_wpt_cts_html` to do this.
+
+ When run under browser CI, the original cts.https.html should be skipped, and
+ this alternate version should be run instead, under a non-exported WPT test
+ directory (e.g. Chromium's wpt_internal).
+-->
+
+<!doctype html>
+<title>WebGPU CTS</title>
+<meta charset=utf-8>
+<link rel=help href='https://gpuweb.github.io/gpuweb/'>
+
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+ const loadWebGPUExpectations = undefined;
+ const shouldWebGPUCTSFailOnWarnings = undefined;
+</script>
+<script type=module src=/webgpu/common/runtime/wpt.js></script>
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/.eslintrc.json b/dom/webgpu/tests/cts/checkout/src/common/tools/.eslintrc.json
new file mode 100644
index 0000000000..e589f291bb
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/.eslintrc.json
@@ -0,0 +1,11 @@
+{
+ "parser": "@typescript-eslint/parser",
+ "parserOptions": { "project": "./tsconfig.json" },
+ "rules": {
+ "no-console": "off",
+ "no-process-exit": "off",
+ "node/no-unpublished-import": "off",
+ "node/no-unpublished-require": "off",
+ "@typescript-eslint/no-var-requires": "off"
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/checklist.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/checklist.ts
new file mode 100644
index 0000000000..e301cfb2c8
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/checklist.ts
@@ -0,0 +1,136 @@
+import * as fs from 'fs';
+import * as process from 'process';
+
+import { DefaultTestFileLoader } from '../internal/file_loader.js';
+import { Ordering, compareQueries } from '../internal/query/compare.js';
+import { parseQuery } from '../internal/query/parseQuery.js';
+import { TestQuery, TestQueryMultiFile } from '../internal/query/query.js';
+import { loadTreeForQuery, TestTree } from '../internal/tree.js';
+import { StacklessError } from '../internal/util.js';
+import { assert } from '../util/util.js';
+
+function usage(rc: number): void {
+ console.error('Usage:');
+ console.error(' tools/checklist FILE');
+ console.error(' tools/checklist my/list.txt');
+ process.exit(rc);
+}
+
+if (process.argv.length === 2) usage(0);
+if (process.argv.length !== 3) usage(1);
+
+type QueryInSuite = { readonly query: TestQuery; readonly done: boolean };
+type QueriesInSuite = QueryInSuite[];
+type QueriesBySuite = Map<string, QueriesInSuite>;
+async function loadQueryListFromTextFile(filename: string): Promise<QueriesBySuite> {
+ const lines = (await fs.promises.readFile(filename, 'utf8')).split(/\r?\n/);
+ const allQueries = lines
+ .filter(l => l)
+ .map(l => {
+ const [doneStr, q] = l.split(/\s+/);
+ assert(doneStr === 'DONE' || doneStr === 'TODO', 'first column must be DONE or TODO');
+ return { query: parseQuery(q), done: doneStr === 'DONE' } as const;
+ });
+
+ const queriesBySuite: QueriesBySuite = new Map();
+ for (const q of allQueries) {
+ let suiteQueries = queriesBySuite.get(q.query.suite);
+ if (suiteQueries === undefined) {
+ suiteQueries = [];
+ queriesBySuite.set(q.query.suite, suiteQueries);
+ }
+
+ suiteQueries.push(q);
+ }
+
+ return queriesBySuite;
+}
+
+function checkForOverlappingQueries(queries: QueriesInSuite): void {
+ for (let i1 = 0; i1 < queries.length; ++i1) {
+ for (let i2 = i1 + 1; i2 < queries.length; ++i2) {
+ const q1 = queries[i1].query;
+ const q2 = queries[i2].query;
+ if (compareQueries(q1, q2) !== Ordering.Unordered) {
+ console.log(` FYI, the following checklist items overlap:\n ${q1}\n ${q2}`);
+ }
+ }
+ }
+}
+
+function checkForUnmatchedSubtreesAndDoneness(
+ tree: TestTree,
+ matchQueries: QueriesInSuite
+): number {
+ let subtreeCount = 0;
+ const unmatchedSubtrees: TestQuery[] = [];
+ const overbroadMatches: [TestQuery, TestQuery][] = [];
+ const donenessMismatches: QueryInSuite[] = [];
+ const alwaysExpandThroughLevel = 1; // expand to, at minimum, every file.
+ for (const subtree of tree.iterateCollapsedNodes({
+ includeIntermediateNodes: true,
+ includeEmptySubtrees: true,
+ alwaysExpandThroughLevel,
+ })) {
+ subtreeCount++;
+ const subtreeDone = !subtree.subtreeCounts?.nodesWithTODO;
+
+ let subtreeMatched = false;
+ for (const q of matchQueries) {
+ const comparison = compareQueries(q.query, subtree.query);
+ if (comparison !== Ordering.Unordered) subtreeMatched = true;
+ if (comparison === Ordering.StrictSubset) continue;
+ if (comparison === Ordering.StrictSuperset) overbroadMatches.push([q.query, subtree.query]);
+ if (comparison === Ordering.Equal && q.done !== subtreeDone) donenessMismatches.push(q);
+ }
+ if (!subtreeMatched) unmatchedSubtrees.push(subtree.query);
+ }
+
+ if (overbroadMatches.length) {
+ // (note, this doesn't show ALL multi-test queries - just ones that actually match any .spec.ts)
+ console.log(` FYI, the following checklist items were broader than one file:`);
+ for (const [q, collapsedSubtree] of overbroadMatches) {
+ console.log(` ${q} > ${collapsedSubtree}`);
+ }
+ }
+
+ if (unmatchedSubtrees.length) {
+ throw new StacklessError(`Found unmatched tests:\n ${unmatchedSubtrees.join('\n ')}`);
+ }
+
+ if (donenessMismatches.length) {
+ throw new StacklessError(
+ 'Found done/todo mismatches:\n ' +
+ donenessMismatches
+ .map(q => `marked ${q.done ? 'DONE, but is TODO' : 'TODO, but is DONE'}: ${q.query}`)
+ .join('\n ')
+ );
+ }
+
+ return subtreeCount;
+}
+
+(async () => {
+ console.log('Loading queries...');
+ const queriesBySuite = await loadQueryListFromTextFile(process.argv[2]);
+ console.log(' Found suites: ' + Array.from(queriesBySuite.keys()).join(' '));
+
+ const loader = new DefaultTestFileLoader();
+ for (const [suite, queriesInSuite] of queriesBySuite.entries()) {
+ console.log(`Suite "${suite}":`);
+ console.log(` Checking overlaps between ${queriesInSuite.length} checklist items...`);
+ checkForOverlappingQueries(queriesInSuite);
+ const suiteQuery = new TestQueryMultiFile(suite, []);
+ console.log(` Loading tree ${suiteQuery}...`);
+ const tree = await loadTreeForQuery(loader, suiteQuery, {
+ subqueriesToExpand: queriesInSuite.map(q => q.query),
+ });
+ console.log(' Found no invalid queries in the checklist. Checking for unmatched tests...');
+ const subtreeCount = checkForUnmatchedSubtreesAndDoneness(tree, queriesInSuite);
+ console.log(` No unmatched tests or done/todo mismatches among ${subtreeCount} subtrees!`);
+ }
+ console.log(`Checklist looks good!`);
+})().catch(ex => {
+ console.log(ex.stack ?? ex.toString());
+ process.exit(1);
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/crawl.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/crawl.ts
new file mode 100644
index 0000000000..50340dd68b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/crawl.ts
@@ -0,0 +1,167 @@
+// Node can look at the filesystem, but JS in the browser can't.
+// This crawls the file tree under src/suites/${suite} to generate a (non-hierarchical) static
+// listing file that can then be used in the browser to load the modules containing the tests.
+
+import * as fs from 'fs';
+import * as path from 'path';
+
+import { loadMetadataForSuite } from '../framework/metadata.js';
+import { SpecFile } from '../internal/file_loader.js';
+import { TestQueryMultiCase, TestQueryMultiFile } from '../internal/query/query.js';
+import { validQueryPart } from '../internal/query/validQueryPart.js';
+import { TestSuiteListingEntry, TestSuiteListing } from '../internal/test_suite_listing.js';
+import { assert, unreachable } from '../util/util.js';
+
+const specFileSuffix = __filename.endsWith('.ts') ? '.spec.ts' : '.spec.js';
+
+async function crawlFilesRecursively(dir: string): Promise<string[]> {
+ const subpathInfo = await Promise.all(
+ (await fs.promises.readdir(dir)).map(async d => {
+ const p = path.join(dir, d);
+ const stats = await fs.promises.stat(p);
+ return {
+ path: p,
+ isDirectory: stats.isDirectory(),
+ isFile: stats.isFile(),
+ };
+ })
+ );
+
+ const files = subpathInfo
+ .filter(
+ i =>
+ i.isFile &&
+ (i.path.endsWith(specFileSuffix) ||
+ i.path.endsWith(`${path.sep}README.txt`) ||
+ i.path === 'README.txt')
+ )
+ .map(i => i.path);
+
+ return files.concat(
+ await subpathInfo
+ .filter(i => i.isDirectory)
+ .map(i => crawlFilesRecursively(i.path))
+ .reduce(async (a, b) => (await a).concat(await b), Promise.resolve([]))
+ );
+}
+
+export async function crawl(suiteDir: string, validate: boolean): Promise<TestSuiteListingEntry[]> {
+ if (!fs.existsSync(suiteDir)) {
+ throw new Error(`Could not find suite: ${suiteDir}`);
+ }
+
+ let validateTimingsEntries;
+ if (validate) {
+ const metadata = loadMetadataForSuite(suiteDir);
+ if (metadata) {
+ validateTimingsEntries = {
+ metadata,
+ testsFoundInFiles: new Set<string>(),
+ };
+ }
+ }
+
+ // Crawl files and convert paths to be POSIX-style, relative to suiteDir.
+ const filesToEnumerate = (await crawlFilesRecursively(suiteDir))
+ .map(f => path.relative(suiteDir, f).replace(/\\/g, '/'))
+ .sort();
+
+ const entries: TestSuiteListingEntry[] = [];
+ for (const file of filesToEnumerate) {
+ // |file| is the suite-relative file path.
+ if (file.endsWith(specFileSuffix)) {
+ const filepathWithoutExtension = file.substring(0, file.length - specFileSuffix.length);
+ const pathSegments = filepathWithoutExtension.split('/');
+
+ const suite = path.basename(suiteDir);
+
+ if (validate) {
+ const filename = `../../${suite}/${filepathWithoutExtension}.spec.js`;
+
+ assert(!process.env.STANDALONE_DEV_SERVER);
+ const mod = (await import(filename)) as SpecFile;
+ assert(mod.description !== undefined, 'Test spec file missing description: ' + filename);
+ assert(mod.g !== undefined, 'Test spec file missing TestGroup definition: ' + filename);
+
+ mod.g.validate(new TestQueryMultiFile(suite, pathSegments));
+
+ for (const { testPath } of mod.g.collectNonEmptyTests()) {
+ const testQuery = new TestQueryMultiCase(suite, pathSegments, testPath, {}).toString();
+ if (validateTimingsEntries) {
+ validateTimingsEntries.testsFoundInFiles.add(testQuery);
+ }
+ }
+ }
+
+ for (const p of pathSegments) {
+ assert(validQueryPart.test(p), `Invalid directory name ${p}; must match ${validQueryPart}`);
+ }
+ entries.push({ file: pathSegments });
+ } else if (path.basename(file) === 'README.txt') {
+ const dirname = path.dirname(file);
+ const readme = fs.readFileSync(path.join(suiteDir, file), 'utf8').trim();
+
+ const pathSegments = dirname !== '.' ? dirname.split('/') : [];
+ entries.push({ file: pathSegments, readme });
+ } else {
+ unreachable(`Matched an unrecognized filename ${file}`);
+ }
+ }
+
+ if (validateTimingsEntries) {
+ let failed = false;
+
+ const zeroEntries = [];
+ const staleEntries = [];
+ for (const [metadataKey, metadataValue] of Object.entries(validateTimingsEntries.metadata)) {
+ if (metadataKey.startsWith('_')) {
+ // Ignore json "_comments".
+ continue;
+ }
+ if (metadataValue.subcaseMS <= 0) {
+ zeroEntries.push(metadataKey);
+ }
+ if (!validateTimingsEntries.testsFoundInFiles.has(metadataKey)) {
+ staleEntries.push(metadataKey);
+ }
+ }
+ if (zeroEntries.length) {
+ console.warn('WARNING: subcaseMS≤0 found in listing_meta.json (allowed, but try to avoid):');
+ for (const metadataKey of zeroEntries) {
+ console.warn(` ${metadataKey}`);
+ }
+ }
+ if (staleEntries.length) {
+ console.error('ERROR: Non-existent tests found in listing_meta.json:');
+ for (const metadataKey of staleEntries) {
+ console.error(` ${metadataKey}`);
+ }
+ failed = true;
+ }
+
+ const missingEntries = [];
+ for (const metadataKey of validateTimingsEntries.testsFoundInFiles) {
+ if (!(metadataKey in validateTimingsEntries.metadata)) {
+ missingEntries.push(metadataKey);
+ }
+ }
+ if (missingEntries.length) {
+ console.error(
+ 'ERROR: Tests missing from listing_meta.json. Please add the new tests (See docs/adding_timing_metadata.md):'
+ );
+ for (const metadataKey of missingEntries) {
+ console.error(` ${metadataKey}`);
+ failed = true;
+ }
+ }
+ assert(!failed);
+ }
+
+ return entries;
+}
+
+export function makeListing(filename: string): Promise<TestSuiteListing> {
+ // Don't validate. This path is only used for the dev server and running tests with Node.
+ // Validation is done for listing generation and presubmit.
+ return crawl(path.dirname(filename), false);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/dev_server.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/dev_server.ts
new file mode 100644
index 0000000000..57cb6a7ea4
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/dev_server.ts
@@ -0,0 +1,214 @@
+import * as fs from 'fs';
+import * as os from 'os';
+import * as path from 'path';
+
+import * as babel from '@babel/core';
+import * as chokidar from 'chokidar';
+import * as express from 'express';
+import * as morgan from 'morgan';
+import * as portfinder from 'portfinder';
+import * as serveIndex from 'serve-index';
+
+import { makeListing } from './crawl.js';
+
+// Make sure that makeListing doesn't cache imported spec files. See crawl().
+process.env.STANDALONE_DEV_SERVER = '1';
+
+function usage(rc: number): void {
+ console.error(`\
+Usage:
+ tools/dev_server
+ tools/dev_server 0.0.0.0
+ npm start
+ npm start 0.0.0.0
+
+By default, serves on localhost only. If the argument 0.0.0.0 is passed, serves on all interfaces.
+`);
+ process.exit(rc);
+}
+
+const srcDir = path.resolve(__dirname, '../../');
+
+// Import the project's babel.config.js. We'll use the same config for the runtime compiler.
+const babelConfig = {
+ ...require(path.resolve(srcDir, '../babel.config.js'))({
+ cache: () => {
+ /* not used */
+ },
+ }),
+ sourceMaps: 'inline',
+};
+
+// Caches for the generated listing file and compiled TS sources to speed up reloads.
+// Keyed by suite name
+const listingCache = new Map<string, string>();
+// Keyed by the path to the .ts file, without src/
+const compileCache = new Map<string, string>();
+
+console.log('Watching changes in', srcDir);
+const watcher = chokidar.watch(srcDir, {
+ persistent: true,
+});
+
+/**
+ * Handler to dirty the compile cache for changed .ts files.
+ */
+function dirtyCompileCache(absPath: string, stats?: fs.Stats) {
+ const relPath = path.relative(srcDir, absPath);
+ if ((stats === undefined || stats.isFile()) && relPath.endsWith('.ts')) {
+ const tsUrl = relPath;
+ if (compileCache.has(tsUrl)) {
+ console.debug('Dirtying compile cache', tsUrl);
+ }
+ compileCache.delete(tsUrl);
+ }
+}
+
+/**
+ * Handler to dirty the listing cache for:
+ * - Directory changes
+ * - .spec.ts changes
+ * - README.txt changes
+ * Also dirties the compile cache for changed files.
+ */
+function dirtyListingAndCompileCache(absPath: string, stats?: fs.Stats) {
+ const relPath = path.relative(srcDir, absPath);
+
+ const segments = relPath.split(path.sep);
+ // The listing changes if the directories change, or if a .spec.ts file is added/removed.
+ const listingChange =
+ // A directory or a file with no extension that we can't stat.
+ // (stat doesn't work for deletions)
+ ((path.extname(relPath) === '' && (stats === undefined || !stats.isFile())) ||
+ // A spec file
+ relPath.endsWith('.spec.ts') ||
+ // A README.txt
+ path.basename(relPath, 'txt') === 'README') &&
+ segments.length > 0;
+ if (listingChange) {
+ const suite = segments[0];
+ if (listingCache.has(suite)) {
+ console.debug('Dirtying listing cache', suite);
+ }
+ listingCache.delete(suite);
+ }
+
+ dirtyCompileCache(absPath, stats);
+}
+
+watcher.on('add', dirtyListingAndCompileCache);
+watcher.on('unlink', dirtyListingAndCompileCache);
+watcher.on('addDir', dirtyListingAndCompileCache);
+watcher.on('unlinkDir', dirtyListingAndCompileCache);
+watcher.on('change', dirtyCompileCache);
+
+const app = express();
+
+// Send Chrome Origin Trial tokens
+app.use((_req, res, next) => {
+ res.header('Origin-Trial', [
+ // Token for http://localhost:8080
+ 'AvyDIV+RJoYs8fn3W6kIrBhWw0te0klraoz04mw/nPb8VTus3w5HCdy+vXqsSzomIH745CT6B5j1naHgWqt/tw8AAABJeyJvcmlnaW4iOiJodHRwOi8vbG9jYWxob3N0OjgwODAiLCJmZWF0dXJlIjoiV2ViR1BVIiwiZXhwaXJ5IjoxNjYzNzE4Mzk5fQ==',
+ ]);
+ next();
+});
+
+// Set up logging
+app.use(morgan('dev'));
+
+// Serve the standalone runner directory
+app.use('/standalone', express.static(path.resolve(srcDir, '../standalone')));
+// Add out-wpt/ build dir for convenience
+app.use('/out-wpt', express.static(path.resolve(srcDir, '../out-wpt')));
+app.use('/docs/tsdoc', express.static(path.resolve(srcDir, '../docs/tsdoc')));
+
+// Serve a suite's listing.js file by crawling the filesystem for all tests.
+app.get('/out/:suite([a-zA-Z0-9_-]+)/listing.js', async (req, res, next) => {
+ const suite = req.params['suite'];
+
+ if (listingCache.has(suite)) {
+ res.setHeader('Content-Type', 'application/javascript');
+ res.send(listingCache.get(suite));
+ return;
+ }
+
+ try {
+ const listing = await makeListing(path.resolve(srcDir, suite, 'listing.ts'));
+ const result = `export const listing = ${JSON.stringify(listing, undefined, 2)}`;
+
+ listingCache.set(suite, result);
+ res.setHeader('Content-Type', 'application/javascript');
+ res.send(result);
+ } catch (err) {
+ next(err);
+ }
+});
+
+// Serve all other .js files by fetching the source .ts file and compiling it.
+app.get('/out/**/*.js', async (req, res, next) => {
+ const jsUrl = path.relative('/out', req.url);
+ const tsUrl = jsUrl.replace(/\.js$/, '.ts');
+ if (compileCache.has(tsUrl)) {
+ res.setHeader('Content-Type', 'application/javascript');
+ res.send(compileCache.get(tsUrl));
+ return;
+ }
+
+ let absPath = path.join(srcDir, tsUrl);
+ if (!fs.existsSync(absPath)) {
+ // The .ts file doesn't exist. Try .js file in case this is a .js/.d.ts pair.
+ absPath = path.join(srcDir, jsUrl);
+ }
+
+ try {
+ const result = await babel.transformFileAsync(absPath, babelConfig);
+ if (result && result.code) {
+ compileCache.set(tsUrl, result.code);
+
+ res.setHeader('Content-Type', 'application/javascript');
+ res.send(result.code);
+ } else {
+ throw new Error(`Failed compile ${tsUrl}.`);
+ }
+ } catch (err) {
+ next(err);
+ }
+});
+
+// Serve everything else (not .js) as static, and directories as directory listings.
+app.use('/out', serveIndex(path.resolve(srcDir, '../src')));
+app.use('/out', express.static(path.resolve(srcDir, '../src')));
+
+void (async () => {
+ let host = '127.0.0.1';
+ if (process.argv.length >= 3) {
+ if (process.argv.length !== 3) usage(1);
+ if (process.argv[2] === '0.0.0.0') {
+ host = '0.0.0.0';
+ } else {
+ usage(1);
+ }
+ }
+
+ console.log(`Finding an available port on ${host}...`);
+ const kPortFinderStart = 8080;
+ const port = await portfinder.getPortPromise({ host, port: kPortFinderStart });
+
+ watcher.on('ready', () => {
+ // Listen on the available port.
+ app.listen(port, host, () => {
+ console.log('Standalone test runner running at:');
+ if (host === '0.0.0.0') {
+ for (const iface of Object.values(os.networkInterfaces())) {
+ for (const details of iface || []) {
+ if (details.family === 'IPv4') {
+ console.log(` http://${details.address}:${port}/standalone/`);
+ }
+ }
+ }
+ } else {
+ console.log(` http://${host}:${port}/standalone/`);
+ }
+ });
+ });
+})();
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/gen_cache.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/gen_cache.ts
new file mode 100644
index 0000000000..ce0854aa20
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/gen_cache.ts
@@ -0,0 +1,198 @@
+import * as fs from 'fs';
+import * as path from 'path';
+import * as process from 'process';
+
+import { Cacheable, dataCache, setIsBuildingDataCache } from '../framework/data_cache.js';
+
+function usage(rc: number): void {
+ console.error(`Usage: tools/gen_cache [options] [OUT_DIR] [SUITE_DIRS...]
+
+For each suite in SUITE_DIRS, pre-compute data that is expensive to generate
+at runtime and store it under OUT_DIR. If the data file is found then the
+DataCache will load this instead of building the expensive data at CTS runtime.
+
+Options:
+ --help Print this message and exit.
+ --list Print the list of output files without writing them.
+ --nth i/n Only process every file where (file_index % n == i)
+ --validate Check that cache should build (Tests for collisions).
+ --verbose Print each action taken.
+`);
+ process.exit(rc);
+}
+
+let mode: 'emit' | 'list' | 'validate' = 'emit';
+let nth = { i: 0, n: 1 };
+let verbose = false;
+
+const nonFlagsArgs: string[] = [];
+
+for (let i = 0; i < process.argv.length; i++) {
+ const arg = process.argv[i];
+ if (arg.startsWith('-')) {
+ switch (arg) {
+ case '--list': {
+ mode = 'list';
+ break;
+ }
+ case '--help': {
+ usage(0);
+ break;
+ }
+ case '--verbose': {
+ verbose = true;
+ break;
+ }
+ case '--validate': {
+ mode = 'validate';
+ break;
+ }
+ case '--nth': {
+ const err = () => {
+ console.error(
+ `--nth requires a value of the form 'i/n', where i and n are positive integers and i < n`
+ );
+ process.exit(1);
+ };
+ i++;
+ if (i >= process.argv.length) {
+ err();
+ }
+ const value = process.argv[i];
+ const parts = value.split('/');
+ if (parts.length !== 2) {
+ err();
+ }
+ nth = { i: parseInt(parts[0]), n: parseInt(parts[1]) };
+ if (nth.i < 0 || nth.n < 1 || nth.i > nth.n) {
+ err();
+ }
+ break;
+ }
+ default: {
+ console.log('unrecognized flag: ', arg);
+ usage(1);
+ }
+ }
+ } else {
+ nonFlagsArgs.push(arg);
+ }
+}
+
+if (nonFlagsArgs.length < 4) {
+ usage(0);
+}
+
+const outRootDir = nonFlagsArgs[2];
+
+dataCache.setStore({
+ load: (path: string) => {
+ return new Promise<Uint8Array>((resolve, reject) => {
+ fs.readFile(`data/${path}`, (err, data) => {
+ if (err !== null) {
+ reject(err.message);
+ } else {
+ resolve(data);
+ }
+ });
+ });
+ },
+});
+setIsBuildingDataCache();
+
+void (async () => {
+ for (const suiteDir of nonFlagsArgs.slice(3)) {
+ await build(suiteDir);
+ }
+})();
+
+const specFileSuffix = __filename.endsWith('.ts') ? '.spec.ts' : '.spec.js';
+
+async function crawlFilesRecursively(dir: string): Promise<string[]> {
+ const subpathInfo = await Promise.all(
+ (await fs.promises.readdir(dir)).map(async d => {
+ const p = path.join(dir, d);
+ const stats = await fs.promises.stat(p);
+ return {
+ path: p,
+ isDirectory: stats.isDirectory(),
+ isFile: stats.isFile(),
+ };
+ })
+ );
+
+ const files = subpathInfo
+ .filter(i => i.isFile && i.path.endsWith(specFileSuffix))
+ .map(i => i.path);
+
+ return files.concat(
+ await subpathInfo
+ .filter(i => i.isDirectory)
+ .map(i => crawlFilesRecursively(i.path))
+ .reduce(async (a, b) => (await a).concat(await b), Promise.resolve([]))
+ );
+}
+
+async function build(suiteDir: string) {
+ if (!fs.existsSync(suiteDir)) {
+ console.error(`Could not find ${suiteDir}`);
+ process.exit(1);
+ }
+
+ // Crawl files and convert paths to be POSIX-style, relative to suiteDir.
+ let filesToEnumerate = (await crawlFilesRecursively(suiteDir)).sort();
+
+ // Filter out non-spec files
+ filesToEnumerate = filesToEnumerate.filter(f => f.endsWith(specFileSuffix));
+
+ const cacheablePathToTS = new Map<string, string>();
+
+ let fileIndex = 0;
+ for (const file of filesToEnumerate) {
+ const pathWithoutExtension = file.substring(0, file.length - specFileSuffix.length);
+ const mod = await import(`../../../${pathWithoutExtension}.spec.js`);
+ if (mod.d?.serialize !== undefined) {
+ const cacheable = mod.d as Cacheable<unknown>;
+
+ {
+ // Check for collisions
+ const existing = cacheablePathToTS.get(cacheable.path);
+ if (existing !== undefined) {
+ console.error(
+ `error: Cacheable '${cacheable.path}' is emitted by both:
+ '${existing}'
+and
+ '${file}'`
+ );
+ process.exit(1);
+ }
+ cacheablePathToTS.set(cacheable.path, file);
+ }
+
+ const outPath = `${outRootDir}/data/${cacheable.path}`;
+
+ if (fileIndex++ % nth.n === nth.i) {
+ switch (mode) {
+ case 'emit': {
+ if (verbose) {
+ console.log(`building '${outPath}'`);
+ }
+ const data = await cacheable.build();
+ const serialized = cacheable.serialize(data);
+ fs.mkdirSync(path.dirname(outPath), { recursive: true });
+ fs.writeFileSync(outPath, serialized, 'binary');
+ break;
+ }
+ case 'list': {
+ console.log(outPath);
+ break;
+ }
+ case 'validate': {
+ // Only check currently performed is the collision detection above
+ break;
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/gen_listings.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/gen_listings.ts
new file mode 100644
index 0000000000..fc5e1f3cde
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/gen_listings.ts
@@ -0,0 +1,63 @@
+import * as fs from 'fs';
+import * as path from 'path';
+import * as process from 'process';
+
+import { crawl } from './crawl.js';
+
+function usage(rc: number): void {
+ console.error(`Usage: tools/gen_listings [options] [OUT_DIR] [SUITE_DIRS...]
+
+For each suite in SUITE_DIRS, generate listings and write each listing.js
+into OUT_DIR/{suite}/listing.js. Example:
+ tools/gen_listings out/ src/unittests/ src/webgpu/
+
+Options:
+ --help Print this message and exit.
+`);
+ process.exit(rc);
+}
+
+const argv = process.argv;
+if (argv.indexOf('--help') !== -1) {
+ usage(0);
+}
+
+{
+ // Ignore old argument that is now the default
+ const i = argv.indexOf('--no-validate');
+ if (i !== -1) {
+ argv.splice(i, 1);
+ }
+}
+
+if (argv.length < 4) {
+ usage(0);
+}
+
+const myself = 'src/common/tools/gen_listings.ts';
+
+const outDir = argv[2];
+
+for (const suiteDir of argv.slice(3)) {
+ // Run concurrently for each suite (might be a tiny bit more efficient)
+ void crawl(suiteDir, false).then(listing => {
+ const suite = path.basename(suiteDir);
+ const outFile = path.normalize(path.join(outDir, `${suite}/listing.js`));
+ fs.mkdirSync(path.join(outDir, suite), { recursive: true });
+ fs.writeFileSync(
+ outFile,
+ `\
+// AUTO-GENERATED - DO NOT EDIT. See ${myself}.
+
+export const listing = ${JSON.stringify(listing, undefined, 2)};
+`
+ );
+
+ // If there was a sourcemap for the file we just replaced, delete it.
+ try {
+ fs.unlinkSync(outFile + '.map');
+ } catch (ex) {
+ // ignore if file didn't exist
+ }
+ });
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/gen_wpt_cts_html.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/gen_wpt_cts_html.ts
new file mode 100644
index 0000000000..e8161304e9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/gen_wpt_cts_html.ts
@@ -0,0 +1,252 @@
+import { promises as fs } from 'fs';
+import * as path from 'path';
+
+import { DefaultTestFileLoader } from '../internal/file_loader.js';
+import {
+ TestQueryMultiCase,
+ TestQueryMultiFile,
+ TestQueryMultiTest,
+} from '../internal/query/query.js';
+import { assert } from '../util/util.js';
+
+function printUsageAndExit(rc: number): never {
+ console.error(`\
+Usage (simple, for webgpu:* suite only):
+ tools/gen_wpt_cts_html OUTPUT_FILE TEMPLATE_FILE
+ tools/gen_wpt_cts_html out-wpt/cts.https.html templates/cts.https.html
+
+Usage (config file):
+ tools/gen_wpt_cts_html CONFIG_JSON_FILE
+
+where CONFIG_JSON_FILE is a JSON file in the format documented in the code of
+gen_wpt_cts_html.ts. Example:
+ {
+ "suite": "webgpu",
+ "out": "path/to/output/cts.https.html",
+ "template": "path/to/template/cts.https.html",
+ "maxChunkTimeMS": 2000
+ }
+
+Usage (advanced) (deprecated, use config file):
+ tools/gen_wpt_cts_html OUTPUT_FILE TEMPLATE_FILE ARGUMENTS_PREFIXES_FILE EXPECTATIONS_FILE EXPECTATIONS_PREFIX [SUITE]
+ tools/gen_wpt_cts_html my/path/to/cts.https.html templates/cts.https.html arguments.txt myexpectations.txt 'path/to/cts.https.html' cts
+
+where arguments.txt is a file containing a list of arguments prefixes to both generate and expect
+in the expectations. The entire variant list generation runs *once per prefix*, so this
+multiplies the size of the variant list.
+
+ ?worker=0&q=
+ ?worker=1&q=
+
+and myexpectations.txt is a file containing a list of WPT paths to suppress, e.g.:
+
+ path/to/cts.https.html?worker=0&q=webgpu:a/foo:bar={"x":1}
+ path/to/cts.https.html?worker=1&q=webgpu:a/foo:bar={"x":1}
+
+ path/to/cts.https.html?worker=1&q=webgpu:a/foo:bar={"x":3}
+`);
+ process.exit(rc);
+}
+
+interface ConfigJSON {
+ /** Test suite to generate from. */
+ suite: string;
+ /** Output filename, relative to JSON file. */
+ out: string;
+ /** Input template filename, relative to JSON file. */
+ template: string;
+ /**
+ * Maximum time for a single WPT "variant" chunk, in milliseconds. Defaults to infinity.
+ *
+ * This data is typically captured by developers on higher-end computers, so typical test
+ * machines might execute more slowly. For this reason, use a time much less than 5 seconds
+ * (a typical default time limit in WPT test executors).
+ */
+ maxChunkTimeMS?: number;
+ /** List of argument prefixes (what comes before the test query). Defaults to `['?q=']`. */
+ argumentsPrefixes?: string[];
+ expectations?: {
+ /** File containing a list of WPT paths to suppress. */
+ file: string;
+ /** The prefix to trim from every line of the expectations_file. */
+ prefix: string;
+ };
+}
+
+interface Config {
+ suite: string;
+ out: string;
+ template: string;
+ maxChunkTimeMS: number;
+ argumentsPrefixes: string[];
+ expectations?: {
+ file: string;
+ prefix: string;
+ };
+}
+
+let config: Config;
+
+(async () => {
+ // Load the config
+ switch (process.argv.length) {
+ case 3: {
+ const configFile = process.argv[2];
+ const configJSON: ConfigJSON = JSON.parse(await fs.readFile(configFile, 'utf8'));
+ const jsonFileDir = path.dirname(configFile);
+
+ config = {
+ suite: configJSON.suite,
+ out: path.resolve(jsonFileDir, configJSON.out),
+ template: path.resolve(jsonFileDir, configJSON.template),
+ maxChunkTimeMS: configJSON.maxChunkTimeMS ?? Infinity,
+ argumentsPrefixes: configJSON.argumentsPrefixes ?? ['?q='],
+ };
+ if (configJSON.expectations) {
+ config.expectations = {
+ file: path.resolve(jsonFileDir, configJSON.expectations.file),
+ prefix: configJSON.expectations.prefix,
+ };
+ }
+ break;
+ }
+ case 4:
+ case 7:
+ case 8: {
+ const [
+ _nodeBinary,
+ _thisScript,
+ outFile,
+ templateFile,
+ argsPrefixesFile,
+ expectationsFile,
+ expectationsPrefix,
+ suite = 'webgpu',
+ ] = process.argv;
+
+ config = {
+ out: outFile,
+ template: templateFile,
+ suite,
+ maxChunkTimeMS: Infinity,
+ argumentsPrefixes: ['?q='],
+ };
+ if (process.argv.length >= 7) {
+ config.argumentsPrefixes = (await fs.readFile(argsPrefixesFile, 'utf8'))
+ .split(/\r?\n/)
+ .filter(a => a.length);
+ config.expectations = {
+ file: expectationsFile,
+ prefix: expectationsPrefix,
+ };
+ }
+ break;
+ }
+ default:
+ console.error('incorrect number of arguments!');
+ printUsageAndExit(1);
+ }
+
+ const useChunking = Number.isFinite(config.maxChunkTimeMS);
+
+ // Sort prefixes from longest to shortest
+ config.argumentsPrefixes.sort((a, b) => b.length - a.length);
+
+ // Load expectations (if any)
+ let expectationLines = new Set<string>();
+ if (config.expectations) {
+ expectationLines = new Set(
+ (await fs.readFile(config.expectations.file, 'utf8')).split(/\r?\n/).filter(l => l.length)
+ );
+ }
+
+ const expectations: Map<string, string[]> = new Map();
+ for (const prefix of config.argumentsPrefixes) {
+ expectations.set(prefix, []);
+ }
+
+ expLoop: for (const exp of expectationLines) {
+ // Take each expectation for the longest prefix it matches.
+ for (const argsPrefix of config.argumentsPrefixes) {
+ const prefix = config.expectations!.prefix + argsPrefix;
+ if (exp.startsWith(prefix)) {
+ expectations.get(argsPrefix)!.push(exp.substring(prefix.length));
+ continue expLoop;
+ }
+ }
+ console.log('note: ignored expectation: ' + exp);
+ }
+
+ const loader = new DefaultTestFileLoader();
+ const lines = [];
+ for (const prefix of config.argumentsPrefixes) {
+ const rootQuery = new TestQueryMultiFile(config.suite, []);
+ const tree = await loader.loadTree(rootQuery, {
+ subqueriesToExpand: expectations.get(prefix),
+ maxChunkTime: config.maxChunkTimeMS,
+ });
+
+ lines.push(undefined); // output blank line between prefixes
+ const prefixComment = { comment: `Prefix: "${prefix}"` }; // contents will be updated later
+ if (useChunking) lines.push(prefixComment);
+
+ const filesSeen = new Set<string>();
+ const testsSeen = new Set<string>();
+ let variantCount = 0;
+
+ const alwaysExpandThroughLevel = 2; // expand to, at minimum, every test.
+ for (const { query, subtreeCounts } of tree.iterateCollapsedNodes({
+ alwaysExpandThroughLevel,
+ })) {
+ assert(query instanceof TestQueryMultiCase);
+ const queryString = query.toString();
+ // Check for a safe-ish path length limit. Filename must be <= 255, and on Windows the whole
+ // path must be <= 259. Leave room for e.g.:
+ // 'c:\b\s\w\xxxxxxxx\layout-test-results\external\wpt\webgpu\cts_worker=0_q=...-actual.txt'
+ assert(
+ queryString.length < 185,
+ `Generated test variant would produce too-long -actual.txt filename. Possible solutions:
+- Reduce the length of the parts of the test query
+- Reduce the parameterization of the test
+- Make the test function faster and regenerate the listing_meta entry
+- Reduce the specificity of test expectations (if you're using them)
+${queryString}`
+ );
+
+ lines.push({
+ urlQueryString: prefix + query.toString(), // "?worker=0&q=..."
+ comment: useChunking ? `estimated: ${subtreeCounts?.totalTimeMS.toFixed(3)} ms` : undefined,
+ });
+
+ variantCount++;
+ filesSeen.add(new TestQueryMultiTest(query.suite, query.filePathParts, []).toString());
+ testsSeen.add(
+ new TestQueryMultiCase(query.suite, query.filePathParts, query.testPathParts, {}).toString()
+ );
+ }
+ prefixComment.comment += `; ${variantCount} variants generated from ${testsSeen.size} tests in ${filesSeen.size} files`;
+ }
+ await generateFile(lines);
+})().catch(ex => {
+ console.log(ex.stack ?? ex.toString());
+ process.exit(1);
+});
+
+async function generateFile(
+ lines: Array<{ urlQueryString?: string; comment?: string } | undefined>
+): Promise<void> {
+ let result = '';
+ result += '<!-- AUTO-GENERATED - DO NOT EDIT. See WebGPU CTS: tools/gen_wpt_cts_html. -->\n';
+
+ result += await fs.readFile(config.template, 'utf8');
+
+ for (const line of lines) {
+ if (line !== undefined) {
+ if (line.urlQueryString) result += `<meta name=variant content='${line.urlQueryString}'>`;
+ if (line.comment) result += `<!-- ${line.comment} -->`;
+ }
+ result += '\n';
+ }
+
+ await fs.writeFile(config.out, result);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/image_utils.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/image_utils.ts
new file mode 100644
index 0000000000..84cf9adfa8
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/image_utils.ts
@@ -0,0 +1,58 @@
+import * as fs from 'fs';
+
+import { Page } from 'playwright-core';
+import { PNG } from 'pngjs';
+import { screenshot, WindowInfo } from 'screenshot-ftw';
+
+// eslint-disable-next-line ban/ban
+const waitMS = (ms: number) => new Promise(resolve => setTimeout(resolve, ms));
+
+export function readPng(filename: string) {
+ const data = fs.readFileSync(filename);
+ return PNG.sync.read(data);
+}
+
+export function writePng(filename: string, width: number, height: number, data: Buffer) {
+ const png = new PNG({ colorType: 6, width, height });
+ for (let i = 0; i < data.byteLength; ++i) {
+ png.data[i] = data[i];
+ }
+ const buffer = PNG.sync.write(png);
+ fs.writeFileSync(filename, buffer);
+}
+
+export class ScreenshotManager {
+ window?: WindowInfo;
+
+ async init(page: Page) {
+ // set the title to some random number so we can find the window by title
+ const title: string = await page.evaluate(() => {
+ const title = `t-${Math.random()}`;
+ document.title = title;
+ return title;
+ });
+
+ // wait for the window to show up
+ let window;
+ for (let i = 0; !window && i < 100; ++i) {
+ await waitMS(50);
+ const windows = await screenshot.getWindows();
+ window = windows.find(window => window.title.includes(title));
+ }
+ if (!window) {
+ throw Error(`could not find window: ${title}`);
+ }
+ this.window = window;
+ }
+
+ async takeScreenshot(page: Page, screenshotName: string) {
+ // await page.screenshot({ path: screenshotName });
+
+ // we need to set the url and title since the screenshot will include the chrome
+ await page.evaluate(() => {
+ document.title = 'screenshot';
+ window.history.replaceState({}, '', '/screenshot');
+ });
+ await screenshot.captureWindowById(screenshotName, this.window!.id);
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/merge_listing_times.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/merge_listing_times.ts
new file mode 100644
index 0000000000..fb33ae20fb
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/merge_listing_times.ts
@@ -0,0 +1,177 @@
+import * as fs from 'fs';
+import * as process from 'process';
+import * as readline from 'readline';
+
+import { TestMetadataListing } from '../framework/metadata.js';
+import { parseQuery } from '../internal/query/parseQuery.js';
+import { TestQueryMultiCase, TestQuerySingleCase } from '../internal/query/query.js';
+import { CaseTimingLogLine } from '../internal/test_group.js';
+import { assert } from '../util/util.js';
+
+// For information on listing_meta.json file maintenance, please read
+// tools/merge_listing_times first.
+
+function usage(rc: number): never {
+ console.error(`Usage: tools/merge_listing_times [options] SUITES... -- [TIMING_LOG_FILES...]
+
+Options:
+ --help Print this message and exit.
+
+Reads raw case timing data for each suite in SUITES, from all TIMING_LOG_FILES
+(see below), and merges it into the src/*/listing_meta.json files checked into
+the repository. The timing data in the listing_meta.json files is updated with
+the newly-observed timing data *if the new timing is slower*. That is, it will
+only increase the values in the listing_meta.json file, and will only cause WPT
+chunks to become smaller.
+
+If there are no TIMING_LOG_FILES, this just regenerates (reformats) the file
+using the data already present.
+
+In more detail:
+
+- Reads per-case timing data in any of the SUITES, from all TIMING_LOG_FILES
+ (ignoring skipped cases), and averages it over the number of subcases.
+ In the case of cases that have run multiple times, takes the max of each.
+- Compiles the average time-per-subcase for each test seen.
+- For each suite seen, loads its listing_meta.json, takes the max of the old and
+ new data, and writes it back out.
+
+How to generate TIMING_LOG_FILES files:
+
+- Launch the 'websocket-logger' tool (see its README.md), which listens for
+ log messages on localhost:59497.
+- Run the tests you want to capture data for, on the same system. Since
+ logging is done through the websocket side-channel, you can run the tests
+ under any runtime (standalone, WPT, etc.) as long as WebSocket support is
+ available (always true in browsers).
+- Run \`tools/merge_listing_times webgpu -- tools/websocket-logger/wslog-*.txt\`
+`);
+ process.exit(rc);
+}
+
+const kHeader = `{
+ "_comment": "SEMI AUTO-GENERATED: Please read docs/adding_timing_metadata.md.",
+`;
+const kFooter = `\
+ "_end": ""
+}
+`;
+
+const argv = process.argv;
+if (argv.some(v => v.startsWith('-') && v !== '--') || argv.every(v => v !== '--')) {
+ usage(0);
+}
+const suites = [];
+const timingLogFilenames = [];
+let seenDashDash = false;
+for (const arg of argv.slice(2)) {
+ if (arg === '--') {
+ seenDashDash = true;
+ continue;
+ } else if (arg.startsWith('-')) {
+ usage(0);
+ }
+
+ if (seenDashDash) {
+ timingLogFilenames.push(arg);
+ } else {
+ suites.push(arg);
+ }
+}
+if (!seenDashDash) {
+ usage(0);
+}
+
+void (async () => {
+ // Read the log files to find the log line for each *case* query. If a case
+ // ran multiple times, take the one with the largest average subcase time.
+ const caseTimes = new Map<string, CaseTimingLogLine>();
+ for (const timingLogFilename of timingLogFilenames) {
+ const rl = readline.createInterface({
+ input: fs.createReadStream(timingLogFilename),
+ crlfDelay: Infinity,
+ });
+
+ for await (const line of rl) {
+ const parsed: CaseTimingLogLine = JSON.parse(line);
+
+ const prev = caseTimes.get(parsed.q);
+ if (prev !== undefined) {
+ const timePerSubcase = parsed.timems / Math.max(1, parsed.nonskippedSubcaseCount);
+ const prevTimePerSubcase = prev.timems / Math.max(1, prev.nonskippedSubcaseCount);
+
+ if (timePerSubcase > prevTimePerSubcase) {
+ caseTimes.set(parsed.q, parsed);
+ }
+ } else {
+ caseTimes.set(parsed.q, parsed);
+ }
+ }
+ }
+
+ // Accumulate total times per test. Map of suite -> query -> {totalTimeMS, caseCount}.
+ const testTimes = new Map<string, Map<string, { totalTimeMS: number; subcaseCount: number }>>();
+ for (const suite of suites) {
+ testTimes.set(suite, new Map());
+ }
+ for (const [caseQString, caseTime] of caseTimes) {
+ const caseQ = parseQuery(caseQString);
+ assert(caseQ instanceof TestQuerySingleCase);
+ const suite = caseQ.suite;
+ const suiteTestTimes = testTimes.get(suite);
+ if (suiteTestTimes === undefined) {
+ continue;
+ }
+
+ const testQ = new TestQueryMultiCase(suite, caseQ.filePathParts, caseQ.testPathParts, {});
+ const testQString = testQ.toString();
+
+ const prev = suiteTestTimes.get(testQString);
+ if (prev !== undefined) {
+ prev.totalTimeMS += caseTime.timems;
+ prev.subcaseCount += caseTime.nonskippedSubcaseCount;
+ } else {
+ suiteTestTimes.set(testQString, {
+ totalTimeMS: caseTime.timems,
+ subcaseCount: caseTime.nonskippedSubcaseCount,
+ });
+ }
+ }
+
+ for (const suite of suites) {
+ const currentMetadata: TestMetadataListing = JSON.parse(
+ fs.readFileSync(`./src/${suite}/listing_meta.json`, 'utf8')
+ );
+
+ const metadata = { ...currentMetadata };
+ for (const [testQString, { totalTimeMS, subcaseCount }] of testTimes.get(suite)!) {
+ const avgTime = totalTimeMS / Math.max(1, subcaseCount);
+ if (testQString in metadata) {
+ metadata[testQString].subcaseMS = Math.max(metadata[testQString].subcaseMS, avgTime);
+ } else {
+ metadata[testQString] = { subcaseMS: avgTime };
+ }
+ }
+
+ writeListings(suite, metadata);
+ }
+})();
+
+function writeListings(suite: string, metadata: TestMetadataListing) {
+ const output = fs.createWriteStream(`./src/${suite}/listing_meta.json`);
+ try {
+ output.write(kHeader);
+ const keys = Object.keys(metadata).sort();
+ for (const k of keys) {
+ if (k.startsWith('_')) {
+ // Ignore json "_comments".
+ continue;
+ }
+ assert(k.indexOf('"') === -1);
+ output.write(` "${k}": { "subcaseMS": ${metadata[k].subcaseMS.toFixed(3)} },\n`);
+ }
+ output.write(kFooter);
+ } finally {
+ output.close();
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/run_wpt_ref_tests.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/run_wpt_ref_tests.ts
new file mode 100644
index 0000000000..9f8661b9c4
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/run_wpt_ref_tests.ts
@@ -0,0 +1,446 @@
+import * as fs from 'fs';
+import * as path from 'path';
+
+import { chromium, firefox, webkit, Page, Browser } from 'playwright-core';
+
+import { ScreenshotManager, readPng, writePng } from './image_utils.js';
+
+declare function wptRefTestPageReady(): boolean;
+declare function wptRefTestGetTimeout(): boolean;
+
+const verbose = !!process.env.VERBOSE;
+const kRefTestsBaseURL = 'http://localhost:8080/out/webgpu/web_platform/reftests';
+const kRefTestsPath = 'src/webgpu/web_platform/reftests';
+const kScreenshotPath = 'out-wpt-reftest-screenshots';
+
+// note: technically we should use an HTML parser to find this to deal with whitespace
+// attribute order, quotes, entities, etc but since we control the test source we can just
+// make sure they match
+const kRefLinkRE = /<link\s+rel="match"\s+href="(.*?)"/;
+const kRefWaitClassRE = /class="reftest-wait"/;
+const kFuzzy = /<meta\s+name="?fuzzy"?\s+content="(.*?)">/;
+
+function printUsage() {
+ console.log(`
+run_wpt_ref_tests path-to-browser-executable [ref-test-name]
+
+where ref-test-name is just a simple check for the test including the given string.
+If not passed all ref tests are run
+
+MacOS Chrome Example:
+ node tools/run_wpt_ref_tests /Applications/Google\\ Chrome\\ Canary.app/Contents/MacOS/Google\\ Chrome\\ Canary
+
+`);
+}
+
+// Get all of filenames that end with '.html'
+function getRefTestNames(refTestPath: string) {
+ return fs.readdirSync(refTestPath).filter(name => name.endsWith('.html'));
+}
+
+// Given a regex with one capture, return it or the empty string if no match.
+function getRegexMatchCapture(re: RegExp, content: string) {
+ const m = re.exec(content);
+ return m ? m[1] : '';
+}
+
+type FileInfo = {
+ content: string;
+ refLink: string;
+ refWait: boolean;
+ fuzzy: string;
+};
+
+function readHTMLFile(filename: string): FileInfo {
+ const content = fs.readFileSync(filename, { encoding: 'utf8' });
+ return {
+ content,
+ refLink: getRegexMatchCapture(kRefLinkRE, content),
+ refWait: kRefWaitClassRE.test(content),
+ fuzzy: getRegexMatchCapture(kFuzzy, content),
+ };
+}
+
+/**
+ * This is workaround for a bug in Chrome. The bug is when in emulation mode
+ * Chrome lets you set a devicePixelRatio but Chrome still renders in the
+ * actual devicePixelRatio, at least on MacOS.
+ * So, we compute the ratio and then use that.
+ */
+async function getComputedDevicePixelRatio(browser: Browser): Promise<number> {
+ const context = await browser.newContext();
+ const page = await context.newPage();
+ await page.goto('data:text/html,<html></html>');
+ await page.waitForLoadState('networkidle');
+ const devicePixelRatio = await page.evaluate(() => {
+ let resolve: (v: number) => void;
+ const promise = new Promise(_resolve => (resolve = _resolve));
+ const observer = new ResizeObserver(entries => {
+ const devicePixelWidth = entries[0].devicePixelContentBoxSize[0].inlineSize;
+ const clientWidth = entries[0].target.clientWidth;
+ const devicePixelRatio = devicePixelWidth / clientWidth;
+ resolve(devicePixelRatio);
+ });
+ observer.observe(document.documentElement);
+ return promise;
+ });
+ await page.close();
+ await context.close();
+ return devicePixelRatio as number;
+}
+
+// Note: If possible, rather then start adding command line options to this tool,
+// see if you can just make it work based off the path.
+async function getBrowserInterface(executablePath: string) {
+ const lc = executablePath.toLowerCase();
+ if (lc.includes('chrom')) {
+ const browser = await chromium.launch({
+ executablePath,
+ headless: false,
+ args: ['--enable-unsafe-webgpu'],
+ });
+ const devicePixelRatio = await getComputedDevicePixelRatio(browser);
+ const context = await browser.newContext({
+ deviceScaleFactor: devicePixelRatio,
+ });
+ return { browser, context };
+ } else if (lc.includes('firefox')) {
+ const browser = await firefox.launch({
+ executablePath,
+ headless: false,
+ });
+ const context = await browser.newContext();
+ return { browser, context };
+ } else if (lc.includes('safari') || lc.includes('webkit')) {
+ const browser = await webkit.launch({
+ executablePath,
+ headless: false,
+ });
+ const context = await browser.newContext();
+ return { browser, context };
+ } else {
+ throw new Error(`could not guess browser from executable path: ${executablePath}`);
+ }
+}
+
+// Parses a fuzzy spec as defined here
+// https://web-platform-tests.org/writing-tests/reftests.html#fuzzy-matching
+// Note: This is not robust but the tests will eventually be run in the real wpt.
+function parseFuzzy(fuzzy: string) {
+ if (!fuzzy) {
+ return { maxDifference: [0, 0], totalPixels: [0, 0] };
+ } else {
+ const parts = fuzzy.split(';');
+ if (parts.length !== 2) {
+ throw Error(`unhandled fuzzy format: ${fuzzy}`);
+ }
+ const ranges = parts.map(part => {
+ const range = part
+ .replace(/[a-zA-Z=]/g, '')
+ .split('-')
+ .map(v => parseInt(v));
+ return range.length === 1 ? [0, range[0]] : range;
+ });
+ return {
+ maxDifference: ranges[0],
+ totalPixels: ranges[1],
+ };
+ }
+}
+
+// Compares two images using the algorithm described in the web platform tests
+// https://web-platform-tests.org/writing-tests/reftests.html#fuzzy-matching
+// If they are different will write out a diff mask.
+function compareImages(
+ filename1: string,
+ filename2: string,
+ fuzzy: string,
+ diffName: string,
+ startingRow: number = 0
+) {
+ const img1 = readPng(filename1);
+ const img2 = readPng(filename2);
+ const { width, height } = img1;
+ if (img2.width !== width || img2.height !== height) {
+ console.error('images are not the same size:', filename1, filename2);
+ return;
+ }
+
+ const { maxDifference, totalPixels } = parseFuzzy(fuzzy);
+
+ const diffData = Buffer.alloc(width * height * 4);
+ const diffPixels = new Uint32Array(diffData.buffer);
+ const kRed = 0xff0000ff;
+ const kWhite = 0xffffffff;
+ const kYellow = 0xff00ffff;
+
+ let numPixelsDifferent = 0;
+ let anyPixelsOutOfRange = false;
+ for (let y = startingRow; y < height; ++y) {
+ for (let x = 0; x < width; ++x) {
+ const offset = y * width + x;
+ let isDifferent = false;
+ let outOfRange = false;
+ for (let c = 0; c < 4 && !outOfRange; ++c) {
+ const off = offset * 4 + c;
+ const v0 = img1.data[off];
+ const v1 = img2.data[off];
+ const channelDiff = Math.abs(v0 - v1);
+ outOfRange ||= channelDiff < maxDifference[0] || channelDiff > maxDifference[1];
+ isDifferent ||= channelDiff > 0;
+ }
+ numPixelsDifferent += isDifferent ? 1 : 0;
+ anyPixelsOutOfRange ||= outOfRange;
+ diffPixels[offset] = outOfRange ? kRed : isDifferent ? kYellow : kWhite;
+ }
+ }
+
+ const pass =
+ !anyPixelsOutOfRange &&
+ numPixelsDifferent >= totalPixels[0] &&
+ numPixelsDifferent <= totalPixels[1];
+ if (!pass) {
+ writePng(diffName, width, height, diffData);
+ console.error(
+ `FAIL: too many differences in: ${filename1} vs ${filename2}
+ ${numPixelsDifferent} differences, expected: ${totalPixels[0]}-${totalPixels[1]} with range: ${maxDifference[0]}-${maxDifference[1]}
+ wrote difference to: ${diffName};
+ `
+ );
+ } else {
+ console.log(`PASS`);
+ }
+ return pass;
+}
+
+function exists(filename: string) {
+ try {
+ fs.accessSync(filename);
+ return true;
+ } catch (e) {
+ return false;
+ }
+}
+
+async function waitForPageRender(page: Page) {
+ await page.evaluate(() => {
+ return new Promise(resolve => requestAnimationFrame(resolve));
+ });
+}
+
+// returns true if the page timed out.
+async function runPage(page: Page, url: string, refWait: boolean) {
+ console.log(' loading:', url);
+ // we need to load about:blank to force the browser to re-render
+ // else the previous page may still be visible if the page we are loading fails
+ await page.goto('about:blank');
+ await page.waitForLoadState('domcontentloaded');
+ await waitForPageRender(page);
+
+ await page.goto(url);
+ await page.waitForLoadState('domcontentloaded');
+ await waitForPageRender(page);
+
+ if (refWait) {
+ await page.waitForFunction(() => wptRefTestPageReady());
+ const timeout = await page.evaluate(() => wptRefTestGetTimeout());
+ if (timeout) {
+ return true;
+ }
+ }
+ return false;
+}
+
+async function main() {
+ const args = process.argv.slice(2);
+ if (args.length < 1 || args.length > 2) {
+ printUsage();
+ return;
+ }
+
+ const [executablePath, refTestName] = args;
+
+ if (!exists(executablePath)) {
+ console.error(executablePath, 'does not exist');
+ return;
+ }
+
+ const testNames = getRefTestNames(kRefTestsPath).filter(name =>
+ refTestName ? name.includes(refTestName) : true
+ );
+
+ if (!exists(kScreenshotPath)) {
+ fs.mkdirSync(kScreenshotPath, { recursive: true });
+ }
+
+ if (testNames.length === 0) {
+ console.error(`no tests include "${refTestName}"`);
+ return;
+ }
+
+ const { browser, context } = await getBrowserInterface(executablePath);
+ const page = await context.newPage();
+
+ const screenshotManager = new ScreenshotManager();
+ await screenshotManager.init(page);
+
+ if (verbose) {
+ page.on('console', async msg => {
+ const { url, lineNumber, columnNumber } = msg.location();
+ const values = await Promise.all(msg.args().map(a => a.jsonValue()));
+ console.log(`${url}:${lineNumber}:${columnNumber}:`, ...values);
+ });
+ }
+
+ await page.addInitScript({
+ content: `
+ (() => {
+ let timeout = false;
+ setTimeout(() => timeout = true, 5000);
+
+ window.wptRefTestPageReady = function() {
+ return timeout || !document.documentElement.classList.contains('reftest-wait');
+ };
+
+ window.wptRefTestGetTimeout = function() {
+ return timeout;
+ };
+ })();
+ `,
+ });
+
+ type Result = {
+ status: string;
+ testName: string;
+ refName: string;
+ testScreenshotName: string;
+ refScreenshotName: string;
+ diffName: string;
+ };
+ const results: Result[] = [];
+ const addResult = (
+ status: string,
+ testName: string,
+ refName: string,
+ testScreenshotName: string = '',
+ refScreenshotName: string = '',
+ diffName: string = ''
+ ) => {
+ results.push({ status, testName, refName, testScreenshotName, refScreenshotName, diffName });
+ };
+
+ for (const testName of testNames) {
+ console.log('processing:', testName);
+ const { refLink, refWait, fuzzy } = readHTMLFile(path.join(kRefTestsPath, testName));
+ if (!refLink) {
+ throw new Error(`could not find ref link in: ${testName}`);
+ }
+ const testURL = `${kRefTestsBaseURL}/${testName}`;
+ const refURL = `${kRefTestsBaseURL}/${refLink}`;
+
+ // Technically this is not correct but it fits the existing tests.
+ // It assumes refLink is relative to the refTestsPath but it's actually
+ // supposed to be relative to the test. It might also be an absolute
+ // path. Neither of those cases exist at the time of writing this.
+ const refFileInfo = readHTMLFile(path.join(kRefTestsPath, refLink));
+ const testScreenshotName = path.join(kScreenshotPath, `${testName}-actual.png`);
+ const refScreenshotName = path.join(kScreenshotPath, `${testName}-expected.png`);
+ const diffName = path.join(kScreenshotPath, `${testName}-diff.png`);
+
+ const timeoutTest = await runPage(page, testURL, refWait);
+ if (timeoutTest) {
+ addResult('TIMEOUT', testName, refLink);
+ continue;
+ }
+ await screenshotManager.takeScreenshot(page, testScreenshotName);
+
+ const timeoutRef = await runPage(page, refURL, refFileInfo.refWait);
+ if (timeoutRef) {
+ addResult('TIMEOUT', testName, refLink);
+ continue;
+ }
+ await screenshotManager.takeScreenshot(page, refScreenshotName);
+
+ const pass = compareImages(testScreenshotName, refScreenshotName, fuzzy, diffName);
+ addResult(
+ pass ? 'PASS' : 'FAILURE',
+ testName,
+ refLink,
+ testScreenshotName,
+ refScreenshotName,
+ diffName
+ );
+ }
+
+ console.log(
+ `----results----\n${results
+ .map(({ status, testName }) => `[ ${status.padEnd(7)} ] ${testName}`)
+ .join('\n')}`
+ );
+
+ const imgLink = (filename: string, title: string) => {
+ const name = path.basename(filename);
+ return `
+ <div class="screenshot">
+ ${title}
+ <a href="${name}" title="${name}">
+ <img src="${name}" width="256"/>
+ </a>
+ </div>`;
+ };
+
+ const indexName = path.join(kScreenshotPath, 'index.html');
+ fs.writeFileSync(
+ indexName,
+ `<!DOCTYPE html>
+<html>
+ <head>
+ <style>
+ .screenshot {
+ display: inline-block;
+ background: #CCC;
+ margin-right: 5px;
+ padding: 5px;
+ }
+ .screenshot a {
+ display: block;
+ }
+ .screenshot
+ </style>
+ </head>
+ <body>
+ ${results
+ .map(({ status, testName, refName, testScreenshotName, refScreenshotName, diffName }) => {
+ return `
+ <div>
+ <div>[ ${status} ]: ${testName} ref: ${refName}</div>
+ ${
+ status === 'FAILURE'
+ ? `${imgLink(testScreenshotName, 'actual')}
+ ${imgLink(refScreenshotName, 'ref')}
+ ${imgLink(diffName, 'diff')}`
+ : ``
+ }
+ </div>
+ <hr>
+ `;
+ })
+ .join('\n')}
+ </body>
+</html>
+ `
+ );
+
+ // the file:// with an absolute path makes it clickable in some terminals
+ console.log(`\nsee: file://${path.resolve(indexName)}\n`);
+
+ await page.close();
+ await context.close();
+ // I have no idea why it's taking ~30 seconds for playwright to close.
+ console.log('-- [ done: waiting for browser to close ] --');
+ await browser.close();
+}
+
+main().catch(e => {
+ throw e;
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/setup-ts-in-node.js b/dom/webgpu/tests/cts/checkout/src/common/tools/setup-ts-in-node.js
new file mode 100644
index 0000000000..89e91e8c9d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/setup-ts-in-node.js
@@ -0,0 +1,51 @@
+const path = require('path');
+
+// Automatically transpile .ts imports
+require('ts-node').register({
+ // Specify the project file so ts-node doesn't try to find it itself based on the CWD.
+ project: path.resolve(__dirname, '../../../tsconfig.json'),
+ compilerOptions: {
+ module: 'commonjs',
+ },
+ transpileOnly: true,
+});
+const Module = require('module');
+
+// Redirect imports of .js files to .ts files
+const resolveFilename = Module._resolveFilename;
+Module._resolveFilename = (request, parentModule, isMain) => {
+ do {
+ if (request.startsWith('.') && parentModule.filename.endsWith('.ts')) {
+ // Required for browser (because it needs the actual correct file path and
+ // can't do any kind of file resolution).
+ if (request.endsWith('/index.js')) {
+ throw new Error(
+ "Avoid the name `index.js`; we don't have Node-style path resolution: " + request
+ );
+ }
+
+ // Import of Node addon modules are valid and should pass through.
+ if (request.endsWith('.node')) {
+ break;
+ }
+
+ if (!request.endsWith('.js')) {
+ throw new Error('All relative imports must end in .js: ' + request);
+ }
+
+ try {
+ const tsRequest = request.substring(0, request.length - '.js'.length) + '.ts';
+ return resolveFilename.call(this, tsRequest, parentModule, isMain);
+ } catch (ex) {
+ // If the .ts file doesn't exist, try .js instead.
+ break;
+ }
+ }
+ } while (0);
+
+ return resolveFilename.call(this, request, parentModule, isMain);
+};
+
+process.on('unhandledRejection', ex => {
+ throw ex;
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/validate.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/validate.ts
new file mode 100644
index 0000000000..164ee3259a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/validate.ts
@@ -0,0 +1,36 @@
+import * as process from 'process';
+
+import { crawl } from './crawl.js';
+
+function usage(rc: number): void {
+ console.error(`Usage: tools/validate [options] [SUITE_DIRS...]
+
+For each suite in SUITE_DIRS, validate some properties about the file:
+- It has a .description and .g
+- That each test:
+ - Has a test function (or is marked unimplemented)
+ - Has no duplicate cases
+ - Configures batching correctly, if used
+- That each case query is not too long
+
+Example:
+ tools/validate src/unittests/ src/webgpu/
+
+Options:
+ --help Print this message and exit.
+`);
+ process.exit(rc);
+}
+
+const args = process.argv.slice(2);
+if (args.indexOf('--help') !== -1) {
+ usage(0);
+}
+
+if (args.length < 1) {
+ usage(0);
+}
+
+for (const suiteDir of args) {
+ void crawl(suiteDir, true);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/version.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/version.ts
new file mode 100644
index 0000000000..2b51700b12
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/version.ts
@@ -0,0 +1,4 @@
+export const version = require('child_process')
+ .execSync('git describe --always --abbrev=0 --dirty')
+ .toString()
+ .trim();
diff --git a/dom/webgpu/tests/cts/checkout/src/common/util/collect_garbage.ts b/dom/webgpu/tests/cts/checkout/src/common/util/collect_garbage.ts
new file mode 100644
index 0000000000..670028d41c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/util/collect_garbage.ts
@@ -0,0 +1,58 @@
+import { resolveOnTimeout } from './util.js';
+
+/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+declare const Components: any;
+
+/**
+ * Attempts to trigger JavaScript garbage collection, either using explicit methods if exposed
+ * (may be available in testing environments with special browser runtime flags set), or using
+ * some weird tricks to incur GC pressure. Adopted from the WebGL CTS.
+ */
+export async function attemptGarbageCollection(): Promise<void> {
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ const w: any = globalThis;
+ if (w.GCController) {
+ w.GCController.collect();
+ return;
+ }
+
+ if (w.opera && w.opera.collect) {
+ w.opera.collect();
+ return;
+ }
+
+ try {
+ w.QueryInterface(Components.interfaces.nsIInterfaceRequestor)
+ .getInterface(Components.interfaces.nsIDOMWindowUtils)
+ .garbageCollect();
+ return;
+ } catch (e) {
+ // ignore any failure
+ }
+
+ if (w.gc) {
+ w.gc();
+ return;
+ }
+
+ if (w.CollectGarbage) {
+ w.CollectGarbage();
+ return;
+ }
+
+ let i: number;
+ function gcRec(n: number): void {
+ if (n < 1) return;
+ /* eslint-disable @typescript-eslint/restrict-plus-operands */
+ let temp: object | string = { i: 'ab' + i + i / 100000 };
+ /* eslint-disable @typescript-eslint/restrict-plus-operands */
+ temp = temp + 'foo';
+ temp; // dummy use of unused variable
+ gcRec(n - 1);
+ }
+ for (i = 0; i < 1000; i++) {
+ gcRec(10);
+ }
+
+ return resolveOnTimeout(35); // Let the event loop run a few frames in case it helps.
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/util/colors.ts b/dom/webgpu/tests/cts/checkout/src/common/util/colors.ts
new file mode 100644
index 0000000000..709d159320
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/util/colors.ts
@@ -0,0 +1,127 @@
+/**
+ * The interface used for formatting strings to contain color metadata.
+ *
+ * Use the interface properties to construct a style, then use the
+ * `(s: string): string` function to format the provided string with the given
+ * style.
+ */
+export interface Colors {
+ // Are colors enabled?
+ enabled: boolean;
+
+ // Returns the string formatted to contain the specified color or style.
+ (s: string): string;
+
+ // modifiers
+ reset: Colors;
+ bold: Colors;
+ dim: Colors;
+ italic: Colors;
+ underline: Colors;
+ inverse: Colors;
+ hidden: Colors;
+ strikethrough: Colors;
+
+ // colors
+ black: Colors;
+ red: Colors;
+ green: Colors;
+ yellow: Colors;
+ blue: Colors;
+ magenta: Colors;
+ cyan: Colors;
+ white: Colors;
+ gray: Colors;
+ grey: Colors;
+
+ // bright colors
+ blackBright: Colors;
+ redBright: Colors;
+ greenBright: Colors;
+ yellowBright: Colors;
+ blueBright: Colors;
+ magentaBright: Colors;
+ cyanBright: Colors;
+ whiteBright: Colors;
+
+ // background colors
+ bgBlack: Colors;
+ bgRed: Colors;
+ bgGreen: Colors;
+ bgYellow: Colors;
+ bgBlue: Colors;
+ bgMagenta: Colors;
+ bgCyan: Colors;
+ bgWhite: Colors;
+
+ // bright background colors
+ bgBlackBright: Colors;
+ bgRedBright: Colors;
+ bgGreenBright: Colors;
+ bgYellowBright: Colors;
+ bgBlueBright: Colors;
+ bgMagentaBright: Colors;
+ bgCyanBright: Colors;
+ bgWhiteBright: Colors;
+}
+
+/**
+ * The interface used for formatting strings with color metadata.
+ *
+ * Currently Colors will use the 'ansi-colors' module if it can be loaded.
+ * If it cannot be loaded, then the Colors implementation is a straight pass-through.
+ *
+ * Colors may also be a no-op if the current environment does not support colors.
+ */
+export let Colors: Colors;
+
+try {
+ /* eslint-disable-next-line node/no-unpublished-require */
+ Colors = require('ansi-colors') as Colors;
+} catch {
+ const passthrough = ((s: string) => s) as Colors;
+ passthrough.enabled = false;
+ passthrough.reset = passthrough;
+ passthrough.bold = passthrough;
+ passthrough.dim = passthrough;
+ passthrough.italic = passthrough;
+ passthrough.underline = passthrough;
+ passthrough.inverse = passthrough;
+ passthrough.hidden = passthrough;
+ passthrough.strikethrough = passthrough;
+ passthrough.black = passthrough;
+ passthrough.red = passthrough;
+ passthrough.green = passthrough;
+ passthrough.yellow = passthrough;
+ passthrough.blue = passthrough;
+ passthrough.magenta = passthrough;
+ passthrough.cyan = passthrough;
+ passthrough.white = passthrough;
+ passthrough.gray = passthrough;
+ passthrough.grey = passthrough;
+ passthrough.blackBright = passthrough;
+ passthrough.redBright = passthrough;
+ passthrough.greenBright = passthrough;
+ passthrough.yellowBright = passthrough;
+ passthrough.blueBright = passthrough;
+ passthrough.magentaBright = passthrough;
+ passthrough.cyanBright = passthrough;
+ passthrough.whiteBright = passthrough;
+ passthrough.bgBlack = passthrough;
+ passthrough.bgRed = passthrough;
+ passthrough.bgGreen = passthrough;
+ passthrough.bgYellow = passthrough;
+ passthrough.bgBlue = passthrough;
+ passthrough.bgMagenta = passthrough;
+ passthrough.bgCyan = passthrough;
+ passthrough.bgWhite = passthrough;
+ passthrough.bgBlackBright = passthrough;
+ passthrough.bgRedBright = passthrough;
+ passthrough.bgGreenBright = passthrough;
+ passthrough.bgYellowBright = passthrough;
+ passthrough.bgBlueBright = passthrough;
+ passthrough.bgMagentaBright = passthrough;
+ passthrough.bgCyanBright = passthrough;
+ passthrough.bgWhiteBright = passthrough;
+ Colors = passthrough;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/util/data_tables.ts b/dom/webgpu/tests/cts/checkout/src/common/util/data_tables.ts
new file mode 100644
index 0000000000..dc57328ab2
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/util/data_tables.ts
@@ -0,0 +1,129 @@
+import { ResolveType, ZipKeysWithValues } from './types.js';
+
+export type valueof<K> = K[keyof K];
+
+export function keysOf<T extends string>(obj: { [k in T]: unknown }): readonly T[] {
+ return Object.keys(obj) as unknown[] as T[];
+}
+
+export function numericKeysOf<T>(obj: object): readonly T[] {
+ return Object.keys(obj).map(n => Number(n)) as unknown[] as T[];
+}
+
+/**
+ * @returns a new Record from `objects`, using the string returned by Object.toString() as the keys
+ * and the objects as the values.
+ */
+export function objectsToRecord<T extends Object>(objects: readonly T[]): Record<string, T> {
+ const record = {};
+ return objects.reduce((obj, type) => {
+ return {
+ ...obj,
+ [type.toString()]: type,
+ };
+ }, record);
+}
+
+/**
+ * Creates an info lookup object from a more nicely-formatted table. See below for examples.
+ *
+ * Note: Using `as const` on the arguments to this function is necessary to infer the correct type.
+ */
+export function makeTable<
+ Members extends readonly string[],
+ Defaults extends readonly unknown[],
+ Table extends { readonly [k: string]: readonly unknown[] },
+>(
+ members: Members,
+ defaults: Defaults,
+ table: Table
+): {
+ readonly [k in keyof Table]: ResolveType<ZipKeysWithValues<Members, Table[k], Defaults>>;
+} {
+ const result: { [k: string]: { [m: string]: unknown } } = {};
+ for (const [k, v] of Object.entries<readonly unknown[]>(table)) {
+ const item: { [m: string]: unknown } = {};
+ for (let i = 0; i < members.length; ++i) {
+ item[members[i]] = v[i] ?? defaults[i];
+ }
+ result[k] = item;
+ }
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ return result as any;
+}
+
+/**
+ * Creates an info lookup object from a more nicely-formatted table.
+ *
+ * Note: Using `as const` on the arguments to this function is necessary to infer the correct type.
+ *
+ * Example:
+ *
+ * ```
+ * const t = makeTableWithDefaults(
+ * { c: 'default' }, // columnRenames
+ * ['a', 'default', 'd'], // columnsKept
+ * ['a', 'b', 'c', 'd'], // columns
+ * [123, 456, 789, 1011], // defaults
+ * { // table
+ * foo: [1, 2, 3, 4],
+ * bar: [5, , , 8],
+ * moo: [ , 9,10, ],
+ * }
+ * );
+ *
+ * // t = {
+ * // foo: { a: 1, default: 3, d: 4 },
+ * // bar: { a: 5, default: 789, d: 8 },
+ * // moo: { a: 123, default: 10, d: 1011 },
+ * // };
+ * ```
+ *
+ * MAINTENANCE_TODO: `ZipKeysWithValues<Members, Table[k], Defaults>` is incorrect
+ * because Members no longer maps to Table[k]. It's not clear if this is even possible to fix
+ * because it requires mapping, not zipping. Maybe passing in a index mapping
+ * would fix it (which is gross) but if you have columnsKept as [0, 2, 3] then maybe it would
+ * be possible to generate the correct type? I don't think we can generate the map at compile time
+ * so we'd have to hand code it. Other ideas, don't generate kLimitsInfoCore and kLimitsInfoCompat
+ * where they are keys of infos. Instead, generate kLimitsInfoCoreDefaults, kLimitsInfoCoreMaximums,
+ * kLimitsInfoCoreClasses where each is just a `{[k: string]: type}`. Could zip those after or,
+ * maybe that suggests passing in the hard coded indices would work.
+ *
+ * @param columnRenames the name of the column in the table that will be assigned to the 'default' property of each entry.
+ * @param columnsKept the names of properties you want in the generated lookup table. This must be a subset of the columns of the tables except for the name 'default' which is looked from the previous argument.
+ * @param columns the names of the columns of the name
+ * @param defaults the default value by column for any element in a row of the table that is undefined
+ * @param table named table rows.
+ */
+export function makeTableRenameAndFilter<
+ Members extends readonly string[],
+ DataMembers extends readonly string[],
+ Defaults extends readonly unknown[],
+ Table extends { readonly [k: string]: readonly unknown[] },
+>(
+ columnRenames: { [key: string]: string },
+ columnsKept: Members,
+ columns: DataMembers,
+ defaults: Defaults,
+ table: Table
+): {
+ readonly [k in keyof Table]: ResolveType<ZipKeysWithValues<Members, Table[k], Defaults>>;
+} {
+ const result: { [k: string]: { [m: string]: unknown } } = {};
+ const keyToIndex = new Map<string, number>(
+ columnsKept.map(name => {
+ const remappedName = columnRenames[name] === undefined ? name : columnRenames[name];
+ return [name, columns.indexOf(remappedName)];
+ })
+ );
+ for (const [k, v] of Object.entries<readonly unknown[]>(table)) {
+ const item: { [m: string]: unknown } = {};
+ for (const member of columnsKept) {
+ const ndx = keyToIndex.get(member)!;
+ item[member] = v[ndx] ?? defaults[ndx];
+ }
+ result[k] = item;
+ }
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ return result as any;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/util/navigator_gpu.ts b/dom/webgpu/tests/cts/checkout/src/common/util/navigator_gpu.ts
new file mode 100644
index 0000000000..4110a0edb5
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/util/navigator_gpu.ts
@@ -0,0 +1,86 @@
+/// <reference types="@webgpu/types" />
+
+import { TestCaseRecorder } from '../framework/fixture.js';
+
+import { ErrorWithExtra, assert, objectEquals } from './util.js';
+
+/**
+ * Finds and returns the `navigator.gpu` object (or equivalent, for non-browser implementations).
+ * Throws an exception if not found.
+ */
+function defaultGPUProvider(): GPU {
+ assert(
+ typeof navigator !== 'undefined' && navigator.gpu !== undefined,
+ 'No WebGPU implementation found'
+ );
+ return navigator.gpu;
+}
+
+/**
+ * GPUProvider is a function that creates and returns a new GPU instance.
+ * May throw an exception if a GPU cannot be created.
+ */
+export type GPUProvider = () => GPU;
+
+let gpuProvider: GPUProvider = defaultGPUProvider;
+
+/**
+ * Sets the function to create and return a new GPU instance.
+ */
+export function setGPUProvider(provider: GPUProvider) {
+ assert(impl === undefined, 'setGPUProvider() should not be after getGPU()');
+ gpuProvider = provider;
+}
+
+let impl: GPU | undefined = undefined;
+
+let defaultRequestAdapterOptions: GPURequestAdapterOptions | undefined;
+
+export function setDefaultRequestAdapterOptions(options: GPURequestAdapterOptions) {
+ // It's okay to call this if you don't change the options
+ if (objectEquals(options, defaultRequestAdapterOptions)) {
+ return;
+ }
+ if (impl) {
+ throw new Error('must call setDefaultRequestAdapterOptions before getGPU');
+ }
+ defaultRequestAdapterOptions = { ...options };
+}
+
+export function getDefaultRequestAdapterOptions() {
+ return defaultRequestAdapterOptions;
+}
+
+/**
+ * Finds and returns the `navigator.gpu` object (or equivalent, for non-browser implementations).
+ * Throws an exception if not found.
+ */
+export function getGPU(recorder: TestCaseRecorder | null): GPU {
+ if (impl) {
+ return impl;
+ }
+
+ impl = gpuProvider();
+
+ if (defaultRequestAdapterOptions) {
+ // eslint-disable-next-line @typescript-eslint/unbound-method
+ const oldFn = impl.requestAdapter;
+ impl.requestAdapter = function (
+ options?: GPURequestAdapterOptions
+ ): Promise<GPUAdapter | null> {
+ const promise = oldFn.call(this, { ...defaultRequestAdapterOptions, ...options });
+ if (recorder) {
+ void promise.then(async adapter => {
+ if (adapter) {
+ const info = await adapter.requestAdapterInfo();
+ const infoString = `Adapter: ${info.vendor} / ${info.architecture} / ${info.device}`;
+ recorder.debug(new ErrorWithExtra(infoString, () => ({ adapterInfo: info })));
+ }
+ });
+ }
+ return promise;
+ };
+ }
+
+ return impl;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/util/preprocessor.ts b/dom/webgpu/tests/cts/checkout/src/common/util/preprocessor.ts
new file mode 100644
index 0000000000..6a26b290bc
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/util/preprocessor.ts
@@ -0,0 +1,149 @@
+import { assert } from './util.js';
+
+// The state of the preprocessor is a stack of States.
+type StateStack = { allowsFollowingElse: boolean; state: State }[];
+const enum State {
+ Seeking, // Still looking for a passing condition
+ Passing, // Currently inside a passing condition (the root is always in this state)
+ Skipping, // Have already seen a passing condition; now skipping the rest
+}
+
+// The transitions in the state space are the following preprocessor directives:
+// - Sibling elif
+// - Sibling else
+// - Sibling endif
+// - Child if
+abstract class Directive {
+ private readonly depth: number;
+
+ constructor(depth: number) {
+ this.depth = depth;
+ }
+
+ protected checkDepth(stack: StateStack): void {
+ assert(
+ stack.length === this.depth,
+ `Number of "$"s must match nesting depth, currently ${stack.length} (e.g. $if $$if $$endif $endif)`
+ );
+ }
+
+ abstract applyTo(stack: StateStack): void;
+}
+
+class If extends Directive {
+ private readonly predicate: boolean;
+
+ constructor(depth: number, predicate: boolean) {
+ super(depth);
+ this.predicate = predicate;
+ }
+
+ applyTo(stack: StateStack) {
+ this.checkDepth(stack);
+ const parentState = stack[stack.length - 1].state;
+ stack.push({
+ allowsFollowingElse: true,
+ state:
+ parentState !== State.Passing
+ ? State.Skipping
+ : this.predicate
+ ? State.Passing
+ : State.Seeking,
+ });
+ }
+}
+
+class ElseIf extends If {
+ override applyTo(stack: StateStack) {
+ assert(stack.length >= 1);
+ const { allowsFollowingElse, state: siblingState } = stack.pop()!;
+ this.checkDepth(stack);
+ assert(allowsFollowingElse, 'pp.elif after pp.else');
+ if (siblingState !== State.Seeking) {
+ stack.push({ allowsFollowingElse: true, state: State.Skipping });
+ } else {
+ super.applyTo(stack);
+ }
+ }
+}
+
+class Else extends Directive {
+ applyTo(stack: StateStack) {
+ assert(stack.length >= 1);
+ const { allowsFollowingElse, state: siblingState } = stack.pop()!;
+ this.checkDepth(stack);
+ assert(allowsFollowingElse, 'pp.else after pp.else');
+ stack.push({
+ allowsFollowingElse: false,
+ state: siblingState === State.Seeking ? State.Passing : State.Skipping,
+ });
+ }
+}
+
+class EndIf extends Directive {
+ applyTo(stack: StateStack) {
+ stack.pop();
+ this.checkDepth(stack);
+ }
+}
+
+/**
+ * A simple template-based, non-line-based preprocessor implementing if/elif/else/endif.
+ *
+ * @example
+ * ```
+ * const shader = pp`
+ * ${pp._if(expr)}
+ * const x: ${type} = ${value};
+ * ${pp._elif(expr)}
+ * ${pp.__if(expr)}
+ * ...
+ * ${pp.__else}
+ * ...
+ * ${pp.__endif}
+ * ${pp._endif}`;
+ * ```
+ *
+ * @param strings - The array of constant string chunks of the template string.
+ * @param ...values - The array of interpolated `${}` values within the template string.
+ */
+export function pp(
+ strings: TemplateStringsArray,
+ ...values: ReadonlyArray<Directive | string | number>
+): string {
+ let result = '';
+ const stateStack: StateStack = [{ allowsFollowingElse: false, state: State.Passing }];
+
+ for (let i = 0; i < values.length; ++i) {
+ const passing = stateStack[stateStack.length - 1].state === State.Passing;
+ if (passing) {
+ result += strings[i];
+ }
+
+ const value = values[i];
+ if (value instanceof Directive) {
+ value.applyTo(stateStack);
+ } else {
+ if (passing) {
+ result += value;
+ }
+ }
+ }
+ assert(stateStack.length === 1, 'Unterminated preprocessor condition at end of file');
+ result += strings[values.length];
+
+ return result;
+}
+pp._if = (predicate: boolean) => new If(1, predicate);
+pp._elif = (predicate: boolean) => new ElseIf(1, predicate);
+pp._else = new Else(1);
+pp._endif = new EndIf(1);
+pp.__if = (predicate: boolean) => new If(2, predicate);
+pp.__elif = (predicate: boolean) => new ElseIf(2, predicate);
+pp.__else = new Else(2);
+pp.__endif = new EndIf(2);
+pp.___if = (predicate: boolean) => new If(3, predicate);
+pp.___elif = (predicate: boolean) => new ElseIf(3, predicate);
+pp.___else = new Else(3);
+pp.___endif = new EndIf(3);
+// Add more if needed.
diff --git a/dom/webgpu/tests/cts/checkout/src/common/util/timeout.ts b/dom/webgpu/tests/cts/checkout/src/common/util/timeout.ts
new file mode 100644
index 0000000000..13c3b7fb90
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/util/timeout.ts
@@ -0,0 +1,7 @@
+/** Defined by WPT. Like `setTimeout`, but applies a timeout multiplier for slow test systems. */
+declare const step_timeout: undefined | typeof setTimeout;
+
+/**
+ * Equivalent of `setTimeout`, but redirects to WPT's `step_timeout` when it is defined.
+ */
+export const timeout = typeof step_timeout !== 'undefined' ? step_timeout : setTimeout;
diff --git a/dom/webgpu/tests/cts/checkout/src/common/util/types.ts b/dom/webgpu/tests/cts/checkout/src/common/util/types.ts
new file mode 100644
index 0000000000..746095a23e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/util/types.ts
@@ -0,0 +1,97 @@
+/** Forces a type to resolve its type definitions, to make it readable/debuggable. */
+export type ResolveType<T> = T extends object
+ ? T extends infer O
+ ? { [K in keyof O]: ResolveType<O[K]> }
+ : never
+ : T;
+
+/** Returns the type `true` iff X and Y are exactly equal */
+export type TypeEqual<X, Y> = (<T>() => T extends X ? 1 : 2) extends <T>() => T extends Y ? 1 : 2
+ ? true
+ : false;
+
+/* eslint-disable-next-line @typescript-eslint/no-unused-vars */
+export function assertTypeTrue<_ extends true>() {}
+
+/** `ReadonlyArray` of `ReadonlyArray`s. */
+export type ROArrayArray<T> = ReadonlyArray<ReadonlyArray<T>>;
+/** `ReadonlyArray` of `ReadonlyArray`s of `ReadonlyArray`s. */
+export type ROArrayArrayArray<T> = ReadonlyArray<ReadonlyArray<ReadonlyArray<T>>>;
+
+/**
+ * Deep version of the Readonly<> type, with support for tuples (up to length 7).
+ * <https://gist.github.com/masterkidan/7322752f569b1bba53e0426266768623>
+ */
+export type DeepReadonly<T> = T extends [infer A]
+ ? DeepReadonlyObject<[A]>
+ : T extends [infer A, infer B]
+ ? DeepReadonlyObject<[A, B]>
+ : T extends [infer A, infer B, infer C]
+ ? DeepReadonlyObject<[A, B, C]>
+ : T extends [infer A, infer B, infer C, infer D]
+ ? DeepReadonlyObject<[A, B, C, D]>
+ : T extends [infer A, infer B, infer C, infer D, infer E]
+ ? DeepReadonlyObject<[A, B, C, D, E]>
+ : T extends [infer A, infer B, infer C, infer D, infer E, infer F]
+ ? DeepReadonlyObject<[A, B, C, D, E, F]>
+ : T extends [infer A, infer B, infer C, infer D, infer E, infer F, infer G]
+ ? DeepReadonlyObject<[A, B, C, D, E, F, G]>
+ : T extends Map<infer U, infer V>
+ ? ReadonlyMap<DeepReadonlyObject<U>, DeepReadonlyObject<V>>
+ : T extends Set<infer U>
+ ? ReadonlySet<DeepReadonlyObject<U>>
+ : T extends Promise<infer U>
+ ? Promise<DeepReadonlyObject<U>>
+ : T extends Primitive
+ ? T
+ : T extends (infer A)[]
+ ? DeepReadonlyArray<A>
+ : DeepReadonlyObject<T>;
+
+type Primitive = string | number | boolean | undefined | null | Function | symbol;
+type DeepReadonlyArray<T> = ReadonlyArray<DeepReadonly<T>>;
+type DeepReadonlyObject<T> = { readonly [P in keyof T]: DeepReadonly<T[P]> };
+
+/**
+ * Computes the intersection of a set of types, given the union of those types.
+ *
+ * From: https://stackoverflow.com/a/56375136
+ */
+export type UnionToIntersection<U> =
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ (U extends any ? (k: U) => void : never) extends (k: infer I) => void ? I : never;
+
+/** "Type asserts" that `X` is a subtype of `Y`. */
+type EnsureSubtype<X, Y> = X extends Y ? X : never;
+
+type TupleHeadOr<T, Default> = T extends readonly [infer H, ...(readonly unknown[])] ? H : Default;
+type TupleTailOr<T, Default> = T extends readonly [unknown, ...infer Tail] ? Tail : Default;
+type TypeOr<T, Default> = T extends undefined ? Default : T;
+
+/**
+ * Zips a key tuple type and a value tuple type together into an object.
+ *
+ * @template Keys Keys of the resulting object.
+ * @template Values Values of the resulting object. If a key corresponds to a `Values` member that
+ * is undefined or past the end, it defaults to the corresponding `Defaults` member.
+ * @template Defaults Default values. If a key corresponds to a `Defaults` member that is past the
+ * end, the default falls back to `undefined`.
+ */
+export type ZipKeysWithValues<
+ Keys extends readonly string[],
+ Values extends readonly unknown[],
+ Defaults extends readonly unknown[],
+> =
+ //
+ Keys extends readonly [infer KHead, ...infer KTail]
+ ? {
+ readonly [k in EnsureSubtype<KHead, string>]: TypeOr<
+ TupleHeadOr<Values, undefined>,
+ TupleHeadOr<Defaults, undefined>
+ >;
+ } & ZipKeysWithValues<
+ EnsureSubtype<KTail, readonly string[]>,
+ TupleTailOr<Values, []>,
+ TupleTailOr<Defaults, []>
+ >
+ : {}; // K exhausted
diff --git a/dom/webgpu/tests/cts/checkout/src/common/util/util.ts b/dom/webgpu/tests/cts/checkout/src/common/util/util.ts
new file mode 100644
index 0000000000..9433aaddb0
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/util/util.ts
@@ -0,0 +1,476 @@
+import { Float16Array } from '../../external/petamoriken/float16/float16.js';
+import { SkipTestCase } from '../framework/fixture.js';
+import { globalTestConfig } from '../framework/test_config.js';
+import { Logger } from '../internal/logging/logger.js';
+
+import { keysOf } from './data_tables.js';
+import { timeout } from './timeout.js';
+
+/**
+ * Error with arbitrary `extra` data attached, for debugging.
+ * The extra data is omitted if not running the test in debug mode (`?debug=1`).
+ */
+export class ErrorWithExtra extends Error {
+ readonly extra: { [k: string]: unknown };
+
+ /**
+ * `extra` function is only called if in debug mode.
+ * If an `ErrorWithExtra` is passed, its message is used and its extras are passed through.
+ */
+ constructor(message: string, extra: () => {});
+ constructor(base: ErrorWithExtra, newExtra: () => {});
+ constructor(baseOrMessage: string | ErrorWithExtra, newExtra: () => {}) {
+ const message = typeof baseOrMessage === 'string' ? baseOrMessage : baseOrMessage.message;
+ super(message);
+
+ const oldExtras = baseOrMessage instanceof ErrorWithExtra ? baseOrMessage.extra : {};
+ this.extra = Logger.globalDebugMode
+ ? { ...oldExtras, ...newExtra() }
+ : { omitted: 'pass ?debug=1' };
+ }
+}
+
+/**
+ * Asserts `condition` is true. Otherwise, throws an `Error` with the provided message.
+ */
+export function assert(condition: boolean, msg?: string | (() => string)): asserts condition {
+ if (!condition) {
+ throw new Error(msg && (typeof msg === 'string' ? msg : msg()));
+ }
+}
+
+/** If the argument is an Error, throw it. Otherwise, pass it back. */
+export function assertOK<T>(value: Error | T): T {
+ if (value instanceof Error) {
+ throw value;
+ }
+ return value;
+}
+
+/** Options for assertReject, shouldReject, and friends. */
+export type ExceptionCheckOptions = { allowMissingStack?: boolean; message?: string };
+
+/**
+ * Resolves if the provided promise rejects; rejects if it does not.
+ */
+export async function assertReject(
+ expectedName: string,
+ p: Promise<unknown>,
+ { allowMissingStack = false, message }: ExceptionCheckOptions = {}
+): Promise<void> {
+ try {
+ await p;
+ unreachable(message);
+ } catch (ex) {
+ // Asserted as expected
+ if (!allowMissingStack) {
+ const m = message ? ` (${message})` : '';
+ assert(
+ ex instanceof Error && typeof ex.stack === 'string',
+ 'threw as expected, but missing stack' + m
+ );
+ }
+ }
+}
+
+/**
+ * Assert this code is unreachable. Unconditionally throws an `Error`.
+ */
+export function unreachable(msg?: string): never {
+ throw new Error(msg);
+}
+
+/**
+ * Throw a `SkipTestCase` exception, which skips the test case.
+ */
+export function skipTestCase(msg: string): never {
+ throw new SkipTestCase(msg);
+}
+
+/**
+ * The `performance` interface.
+ * It is available in all browsers, but it is not in scope by default in Node.
+ */
+const perf = typeof performance !== 'undefined' ? performance : require('perf_hooks').performance;
+
+/**
+ * Calls the appropriate `performance.now()` depending on whether running in a browser or Node.
+ */
+export function now(): number {
+ return perf.now();
+}
+
+/**
+ * Returns a promise which resolves after the specified time.
+ */
+export function resolveOnTimeout(ms: number): Promise<void> {
+ return new Promise(resolve => {
+ timeout(() => {
+ resolve();
+ }, ms);
+ });
+}
+
+export class PromiseTimeoutError extends Error {}
+
+/**
+ * Returns a promise which rejects after the specified time.
+ */
+export function rejectOnTimeout(ms: number, msg: string): Promise<never> {
+ return new Promise((_resolve, reject) => {
+ timeout(() => {
+ reject(new PromiseTimeoutError(msg));
+ }, ms);
+ });
+}
+
+/**
+ * Takes a promise `p`, and returns a new one which rejects if `p` takes too long,
+ * and otherwise passes the result through.
+ */
+export function raceWithRejectOnTimeout<T>(p: Promise<T>, ms: number, msg: string): Promise<T> {
+ if (globalTestConfig.noRaceWithRejectOnTimeout) {
+ return p;
+ }
+ // Setup a promise that will reject after `ms` milliseconds. We cancel this timeout when
+ // `p` is finalized, so the JavaScript VM doesn't hang around waiting for the timer to
+ // complete, once the test runner has finished executing the tests.
+ const timeoutPromise = new Promise((_resolve, reject) => {
+ const handle = timeout(() => {
+ reject(new PromiseTimeoutError(msg));
+ }, ms);
+ p = p.finally(() => clearTimeout(handle));
+ });
+ return Promise.race([p, timeoutPromise]) as Promise<T>;
+}
+
+/**
+ * Takes a promise `p` and returns a new one which rejects if `p` resolves or rejects,
+ * and otherwise resolves after the specified time.
+ */
+export function assertNotSettledWithinTime(
+ p: Promise<unknown>,
+ ms: number,
+ msg: string
+): Promise<undefined> {
+ // Rejects regardless of whether p resolves or rejects.
+ const rejectWhenSettled = p.then(() => Promise.reject(new Error(msg)));
+ // Resolves after `ms` milliseconds.
+ const timeoutPromise = new Promise<undefined>(resolve => {
+ const handle = timeout(() => {
+ resolve(undefined);
+ }, ms);
+ void p.finally(() => clearTimeout(handle));
+ });
+ return Promise.race([rejectWhenSettled, timeoutPromise]);
+}
+
+/**
+ * Returns a `Promise.reject()`, but also registers a dummy `.catch()` handler so it doesn't count
+ * as an uncaught promise rejection in the runtime.
+ */
+export function rejectWithoutUncaught<T>(err: unknown): Promise<T> {
+ const p = Promise.reject(err);
+ // Suppress uncaught promise rejection.
+ p.catch(() => {});
+ return p;
+}
+
+/**
+ * Returns true if v is a plain JavaScript object.
+ */
+export function isPlainObject(v: unknown) {
+ return !!v && Object.getPrototypeOf(v).constructor === Object.prototype.constructor;
+}
+
+/**
+ * Makes a copy of a JS `object`, with the keys reordered into sorted order.
+ */
+export function sortObjectByKey(v: { [k: string]: unknown }): { [k: string]: unknown } {
+ const sortedObject: { [k: string]: unknown } = {};
+ for (const k of Object.keys(v).sort()) {
+ sortedObject[k] = v[k];
+ }
+ return sortedObject;
+}
+
+/**
+ * Determines whether two JS values are equal, recursing into objects and arrays.
+ * NaN is treated specially, such that `objectEquals(NaN, NaN)`. +/-0.0 are treated as equal
+ * by default, but can be opted to be distinguished.
+ * @param x the first JS values that get compared
+ * @param y the second JS values that get compared
+ * @param distinguishSignedZero if set to true, treat 0.0 and -0.0 as unequal. Default to false.
+ */
+export function objectEquals(
+ x: unknown,
+ y: unknown,
+ distinguishSignedZero: boolean = false
+): boolean {
+ if (typeof x !== 'object' || typeof y !== 'object') {
+ if (typeof x === 'number' && typeof y === 'number' && Number.isNaN(x) && Number.isNaN(y)) {
+ return true;
+ }
+ // Object.is(0.0, -0.0) is false while (0.0 === -0.0) is true. Other than +/-0.0 and NaN cases,
+ // Object.is works in the same way as ===.
+ return distinguishSignedZero ? Object.is(x, y) : x === y;
+ }
+ if (x === null || y === null) return x === y;
+ if (x.constructor !== y.constructor) return false;
+ if (x instanceof Function) return x === y;
+ if (x instanceof RegExp) return x === y;
+ if (x === y || x.valueOf() === y.valueOf()) return true;
+ if (Array.isArray(x) && Array.isArray(y) && x.length !== y.length) return false;
+ if (x instanceof Date) return false;
+ if (!(x instanceof Object)) return false;
+ if (!(y instanceof Object)) return false;
+
+ const x1 = x as { [k: string]: unknown };
+ const y1 = y as { [k: string]: unknown };
+ const p = Object.keys(x);
+ return Object.keys(y).every(i => p.indexOf(i) !== -1) && p.every(i => objectEquals(x1[i], y1[i]));
+}
+
+/**
+ * Generates a range of values `fn(0)..fn(n-1)`.
+ */
+export function range<T>(n: number, fn: (i: number) => T): T[] {
+ return [...new Array(n)].map((_, i) => fn(i));
+}
+
+/**
+ * Generates a range of values `fn(0)..fn(n-1)`.
+ */
+export function* iterRange<T>(n: number, fn: (i: number) => T): Iterable<T> {
+ for (let i = 0; i < n; ++i) {
+ yield fn(i);
+ }
+}
+
+/** Creates a (reusable) iterable object that maps `f` over `xs`, lazily. */
+export function mapLazy<T, R>(xs: Iterable<T>, f: (x: T) => R): Iterable<R> {
+ return {
+ *[Symbol.iterator]() {
+ for (const x of xs) {
+ yield f(x);
+ }
+ },
+ };
+}
+
+const ReorderOrders = {
+ forward: true,
+ backward: true,
+ shiftByHalf: true,
+};
+export type ReorderOrder = keyof typeof ReorderOrders;
+export const kReorderOrderKeys = keysOf(ReorderOrders);
+
+/**
+ * Creates a new array from the given array with the first half
+ * swapped with the last half.
+ */
+export function shiftByHalf<R>(arr: R[]): R[] {
+ const len = arr.length;
+ const half = (len / 2) | 0;
+ const firstHalf = arr.splice(0, half);
+ return [...arr, ...firstHalf];
+}
+
+/**
+ * Creates a reordered array from the input array based on the Order
+ */
+export function reorder<R>(order: ReorderOrder, arr: R[]): R[] {
+ switch (order) {
+ case 'forward':
+ return arr.slice();
+ case 'backward':
+ return arr.slice().reverse();
+ case 'shiftByHalf': {
+ // should this be pseudo random?
+ return shiftByHalf(arr);
+ }
+ }
+}
+
+const TypedArrayBufferViewInstances = [
+ new Uint8Array(),
+ new Uint8ClampedArray(),
+ new Uint16Array(),
+ new Uint32Array(),
+ new Int8Array(),
+ new Int16Array(),
+ new Int32Array(),
+ new Float16Array(),
+ new Float32Array(),
+ new Float64Array(),
+] as const;
+
+export type TypedArrayBufferView = (typeof TypedArrayBufferViewInstances)[number];
+
+export type TypedArrayBufferViewConstructor<A extends TypedArrayBufferView = TypedArrayBufferView> =
+ {
+ // Interface copied from Uint8Array, and made generic.
+ readonly prototype: A;
+ readonly BYTES_PER_ELEMENT: number;
+
+ new (): A;
+ new (elements: Iterable<number>): A;
+ new (array: ArrayLike<number> | ArrayBufferLike): A;
+ new (buffer: ArrayBufferLike, byteOffset?: number, length?: number): A;
+ new (length: number): A;
+
+ from(arrayLike: ArrayLike<number>): A;
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ from(arrayLike: Iterable<number>, mapfn?: (v: number, k: number) => number, thisArg?: any): A;
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ from<T>(arrayLike: ArrayLike<T>, mapfn: (v: T, k: number) => number, thisArg?: any): A;
+ of(...items: number[]): A;
+ };
+
+export const kTypedArrayBufferViews: {
+ readonly [k: string]: TypedArrayBufferViewConstructor;
+} = {
+ ...(() => {
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ const result: { [k: string]: any } = {};
+ for (const v of TypedArrayBufferViewInstances) {
+ result[v.constructor.name] = v.constructor;
+ }
+ return result;
+ })(),
+};
+export const kTypedArrayBufferViewKeys = keysOf(kTypedArrayBufferViews);
+export const kTypedArrayBufferViewConstructors = Object.values(kTypedArrayBufferViews);
+
+interface TypedArrayMap {
+ Int8Array: Int8Array;
+ Uint8Array: Uint8Array;
+ Int16Array: Int16Array;
+ Uint16Array: Uint16Array;
+ Uint8ClampedArray: Uint8ClampedArray;
+ Int32Array: Int32Array;
+ Uint32Array: Uint32Array;
+ Float32Array: Float32Array;
+ Float64Array: Float64Array;
+ BigInt64Array: BigInt64Array;
+ BigUint64Array: BigUint64Array;
+}
+
+type TypedArrayParam<K extends keyof TypedArrayMap> = {
+ type: K;
+ data: readonly number[];
+};
+
+/**
+ * Creates a case parameter for a typedarray.
+ *
+ * You can't put typedarrays in case parameters directly so instead of
+ *
+ * ```
+ * u.combine('data', [
+ * new Uint8Array([1, 2, 3]),
+ * new Float32Array([4, 5, 6]),
+ * ])
+ * ```
+ *
+ * You can use
+ *
+ * ```
+ * u.combine('data', [
+ * typedArrayParam('Uint8Array' [1, 2, 3]),
+ * typedArrayParam('Float32Array' [4, 5, 6]),
+ * ])
+ * ```
+ *
+ * and then convert the params to typedarrays eg.
+ *
+ * ```
+ * .fn(t => {
+ * const data = t.params.data.map(v => typedArrayFromParam(v));
+ * })
+ * ```
+ */
+export function typedArrayParam<K extends keyof TypedArrayMap>(
+ type: K,
+ data: number[]
+): TypedArrayParam<K> {
+ return { type, data };
+}
+
+export function createTypedArray<K extends keyof TypedArrayMap>(
+ type: K,
+ data: readonly number[]
+): TypedArrayMap[K] {
+ return new kTypedArrayBufferViews[type](data) as TypedArrayMap[K];
+}
+
+/**
+ * Converts a TypedArrayParam to a typedarray. See typedArrayParam
+ */
+export function typedArrayFromParam<K extends keyof TypedArrayMap>(
+ param: TypedArrayParam<K>
+): TypedArrayMap[K] {
+ const { type, data } = param;
+ return createTypedArray(type, data);
+}
+
+function subarrayAsU8(
+ buf: ArrayBuffer | TypedArrayBufferView,
+ { start = 0, length }: { start?: number; length?: number }
+): Uint8Array | Uint8ClampedArray {
+ if (buf instanceof ArrayBuffer) {
+ return new Uint8Array(buf, start, length);
+ } else if (buf instanceof Uint8Array || buf instanceof Uint8ClampedArray) {
+ // Don't wrap in new views if we don't need to.
+ if (start === 0 && (length === undefined || length === buf.byteLength)) {
+ return buf;
+ }
+ }
+ const byteOffset = buf.byteOffset + start * buf.BYTES_PER_ELEMENT;
+ const byteLength =
+ length !== undefined
+ ? length * buf.BYTES_PER_ELEMENT
+ : buf.byteLength - (byteOffset - buf.byteOffset);
+ return new Uint8Array(buf.buffer, byteOffset, byteLength);
+}
+
+/**
+ * Copy a range of bytes from one ArrayBuffer or TypedArray to another.
+ *
+ * `start`/`length` are in elements (or in bytes, if ArrayBuffer).
+ */
+export function memcpy(
+ src: { src: ArrayBuffer | TypedArrayBufferView; start?: number; length?: number },
+ dst: { dst: ArrayBuffer | TypedArrayBufferView; start?: number }
+): void {
+ subarrayAsU8(dst.dst, dst).set(subarrayAsU8(src.src, src));
+}
+
+/**
+ * Used to create a value that is specified by multiplying some runtime value
+ * by a constant and then adding a constant to it.
+ */
+export interface ValueTestVariant {
+ mult: number;
+ add: number;
+}
+
+/**
+ * Filters out SpecValues that are the same.
+ */
+export function filterUniqueValueTestVariants(valueTestVariants: ValueTestVariant[]) {
+ return new Map<string, ValueTestVariant>(
+ valueTestVariants.map(v => [`m:${v.mult},a:${v.add}`, v])
+ ).values();
+}
+
+/**
+ * Used to create a value that is specified by multiplied some runtime value
+ * by a constant and then adding a constant to it. This happens often in test
+ * with limits that can only be known at runtime and yet we need a way to
+ * add parameters to a test and those parameters must be constants.
+ */
+export function makeValueTestVariant(base: number, variant: ValueTestVariant) {
+ return base * variant.mult + variant.add;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/util/wpt_reftest_wait.ts b/dom/webgpu/tests/cts/checkout/src/common/util/wpt_reftest_wait.ts
new file mode 100644
index 0000000000..7d10520bcb
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/util/wpt_reftest_wait.ts
@@ -0,0 +1,24 @@
+import { timeout } from './timeout.js';
+
+// Copied from https://github.com/web-platform-tests/wpt/blob/master/common/reftest-wait.js
+
+/**
+ * Remove the `reftest-wait` class on the document element.
+ * The reftest runner will wait with taking a screenshot while
+ * this class is present.
+ *
+ * See https://web-platform-tests.org/writing-tests/reftests.html#controlling-when-comparison-occurs
+ */
+export function takeScreenshot() {
+ document.documentElement.classList.remove('reftest-wait');
+}
+
+/**
+ * Call `takeScreenshot()` after a delay of at least `ms` milliseconds.
+ * @param {number} ms - milliseconds
+ */
+export function takeScreenshotDelayed(ms: number) {
+ timeout(() => {
+ takeScreenshot();
+ }, ms);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/demo/README.txt b/dom/webgpu/tests/cts/checkout/src/demo/README.txt
new file mode 100644
index 0000000000..3b5654080e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/demo/README.txt
@@ -0,0 +1 @@
+Demo test suite for manually testing test runners.
diff --git a/dom/webgpu/tests/cts/checkout/src/demo/a.spec.ts b/dom/webgpu/tests/cts/checkout/src/demo/a.spec.ts
new file mode 100644
index 0000000000..283d0e8a90
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/demo/a.spec.ts
@@ -0,0 +1,8 @@
+export const description = 'Description for a.spec.ts';
+
+import { makeTestGroup } from '../common/framework/test_group.js';
+import { UnitTest } from '../unittests/unit_test.js';
+
+export const g = makeTestGroup(UnitTest);
+
+g.test('not_implemented_yet').unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/demo/a/README.txt b/dom/webgpu/tests/cts/checkout/src/demo/a/README.txt
new file mode 100644
index 0000000000..62c18e3cc3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/demo/a/README.txt
@@ -0,0 +1 @@
+README for a/
diff --git a/dom/webgpu/tests/cts/checkout/src/demo/a/b.spec.ts b/dom/webgpu/tests/cts/checkout/src/demo/a/b.spec.ts
new file mode 100644
index 0000000000..7e066591dd
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/demo/a/b.spec.ts
@@ -0,0 +1,6 @@
+export const description = 'Description for b.spec.ts';
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { UnitTest } from '../../unittests/unit_test.js';
+
+export const g = makeTestGroup(UnitTest);
diff --git a/dom/webgpu/tests/cts/checkout/src/demo/a/b/README.txt b/dom/webgpu/tests/cts/checkout/src/demo/a/b/README.txt
new file mode 100644
index 0000000000..eed2f44bbd
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/demo/a/b/README.txt
@@ -0,0 +1 @@
+README for a/b/
diff --git a/dom/webgpu/tests/cts/checkout/src/demo/a/b/c.spec.ts b/dom/webgpu/tests/cts/checkout/src/demo/a/b/c.spec.ts
new file mode 100644
index 0000000000..0ee8f4c182
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/demo/a/b/c.spec.ts
@@ -0,0 +1,80 @@
+export const description = 'Description for c.spec.ts';
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { unreachable } from '../../../common/util/util.js';
+import { UnitTest } from '../../../unittests/unit_test.js';
+
+export const g = makeTestGroup(UnitTest);
+
+g.test('f')
+ .desc(
+ `Test plan for f
+ - Test stuff
+ - Test some more stuff`
+ )
+ .fn(() => {});
+
+g.test('f,g').fn(() => {});
+
+g.test('f,g,h')
+ .paramsSimple([{}, { x: 0 }, { x: 0, y: 0 }])
+ .fn(() => {});
+
+g.test('case_depth_2_in_single_child_test')
+ .paramsSimple([{ x: 0, y: 0 }])
+ .fn(() => {});
+
+g.test('deep_case_tree')
+ .params(u =>
+ u //
+ .combine('x', [1, 2])
+ .combine('y', [1, 2])
+ .combine('z', [1, 2])
+ )
+ .fn(() => {});
+
+g.test('statuses,debug').fn(t => {
+ t.debug('debug');
+});
+
+g.test('statuses,skip').fn(t => {
+ t.skip('skip');
+});
+
+g.test('statuses,warn').fn(t => {
+ t.warn('warn');
+});
+
+g.test('statuses,fail').fn(t => {
+ t.fail('fail');
+});
+
+g.test('statuses,throw').fn(() => {
+ unreachable('unreachable');
+});
+
+g.test('multiple_same_stack').fn(t => {
+ for (let i = 0; i < 3; ++i) {
+ t.fail(
+ i === 2
+ ? 'this should appear after deduplicated line'
+ : 'this should be "seen 2 times with identical stack"'
+ );
+ }
+});
+
+g.test('multiple_same_level').fn(t => {
+ t.fail('this should print a stack');
+ t.fail('this should print a stack');
+ t.fail('this should not print a stack');
+});
+
+g.test('lower_levels_hidden,before').fn(t => {
+ t.warn('warn - this should not print a stack');
+ t.fail('fail');
+});
+
+g.test('lower_levels_hidden,after').fn(t => {
+ t.fail('fail');
+ t.warn('warn - this should not print a stack');
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/demo/a/b/d.spec.ts b/dom/webgpu/tests/cts/checkout/src/demo/a/b/d.spec.ts
new file mode 100644
index 0000000000..1412e53baf
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/demo/a/b/d.spec.ts
@@ -0,0 +1,8 @@
+export const description = 'Description for d.spec.ts';
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { UnitTest } from '../../../unittests/unit_test.js';
+
+export const g = makeTestGroup(UnitTest);
+
+g.test('test_depth_2,in_single_child_file').fn(() => {});
diff --git a/dom/webgpu/tests/cts/checkout/src/demo/file_depth_2/in_single_child_dir/r.spec.ts b/dom/webgpu/tests/cts/checkout/src/demo/file_depth_2/in_single_child_dir/r.spec.ts
new file mode 100644
index 0000000000..2a1adc6f50
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/demo/file_depth_2/in_single_child_dir/r.spec.ts
@@ -0,0 +1,6 @@
+export const description = 'Description for r.spec.ts';
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { UnitTest } from '../../../unittests/unit_test.js';
+
+export const g = makeTestGroup(UnitTest);
diff --git a/dom/webgpu/tests/cts/checkout/src/demo/json.spec.ts b/dom/webgpu/tests/cts/checkout/src/demo/json.spec.ts
new file mode 100644
index 0000000000..a2ccb72137
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/demo/json.spec.ts
@@ -0,0 +1,10 @@
+export const description = 'Description for a.spec.ts';
+
+import { makeTestGroup } from '../common/framework/test_group.js';
+import { UnitTest } from '../unittests/unit_test.js';
+
+export const g = makeTestGroup(UnitTest);
+
+g.test('json')
+ .paramsSimple([{ p: { x: 1, y: 'two' } }])
+ .fn(() => {});
diff --git a/dom/webgpu/tests/cts/checkout/src/demo/subcases.spec.ts b/dom/webgpu/tests/cts/checkout/src/demo/subcases.spec.ts
new file mode 100644
index 0000000000..6b22463f07
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/demo/subcases.spec.ts
@@ -0,0 +1,38 @@
+export const description = 'Tests with subcases';
+
+import { makeTestGroup } from '../common/framework/test_group.js';
+import { UnitTest } from '../unittests/unit_test.js';
+
+export const g = makeTestGroup(UnitTest);
+
+g.test('pass_warn_fail')
+ .params(u =>
+ u
+ .combine('x', [1, 2, 3]) //
+ .beginSubcases()
+ .combine('y', [1, 2, 3])
+ )
+ .fn(t => {
+ const { x, y } = t.params;
+ if (x + y > 5) {
+ t.fail();
+ } else if (x + y > 4) {
+ t.warn();
+ }
+ });
+
+g.test('DOMException,cases')
+ .params(u => u.combine('fail', [false, true]))
+ .fn(t => {
+ if (t.params.fail) {
+ throw new DOMException('Message!', 'Name!');
+ }
+ });
+
+g.test('DOMException,subcases')
+ .paramsSubcasesOnly(u => u.combine('fail', [false, true]))
+ .fn(t => {
+ if (t.params.fail) {
+ throw new DOMException('Message!', 'Name!');
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/external/README.md b/dom/webgpu/tests/cts/checkout/src/external/README.md
new file mode 100644
index 0000000000..84fbf9c732
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/external/README.md
@@ -0,0 +1,31 @@
+# External Modules
+
+This directory contains external modules that are used by the WebGPU
+CTS. These are included in the repo, as opposed to being fetched via a
+package manager or CDN, so that there is a single canonical source of
+truth for the CTS tests and the CTS tests can be run as a standalone
+suite without needing to pull from a CDN or similar process.
+
+## Adding modules
+
+Each module that is added should be done consciously with a clear
+reasoning on what the module is providing, since the bar for adding
+new modules should be relatively high.
+
+The module will need to be licensed via a compatible license to the
+BSD-3 clause & W3C CTS licenses that the CTS currently is covered by.
+
+It is preferred to use a single source build of the module if possible.
+
+In addition to the source for the module a LICENSE file should be
+included in the directory clearly identifying the owner of the module
+and the license it is covered by.
+
+Details of the specific module, including version, origin and purpose
+should be listed below.
+
+## Current Modules
+
+| **Name** | **Origin** | **License** | **Version** | **Purpose** |
+|----------------------|--------------------------------------------------|-------------|-------------|------------------------------------------------|
+| petamoriken/float16 | [github](https://github.com/petamoriken/float16) | MIT | 3.6.6 | Fluent support for f16 numbers via TypedArrays |
diff --git a/dom/webgpu/tests/cts/checkout/src/external/petamoriken/float16/LICENSE.txt b/dom/webgpu/tests/cts/checkout/src/external/petamoriken/float16/LICENSE.txt
new file mode 100644
index 0000000000..e8eacf4e7f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/external/petamoriken/float16/LICENSE.txt
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2017-2021 Kenta Moriuchi
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/dom/webgpu/tests/cts/checkout/src/external/petamoriken/float16/float16.d.ts b/dom/webgpu/tests/cts/checkout/src/external/petamoriken/float16/float16.d.ts
new file mode 100644
index 0000000000..c9d66ab7ca
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/external/petamoriken/float16/float16.d.ts
@@ -0,0 +1,471 @@
+/**
+ * A typed array of 16-bit float values. The contents are initialized to 0. If the requested number
+ * of bytes could not be allocated an exception is raised.
+ */
+export interface Float16Array {
+ /**
+ * The size in bytes of each element in the array.
+ */
+ readonly BYTES_PER_ELEMENT: number;
+
+ /**
+ * The ArrayBuffer instance referenced by the array.
+ */
+ readonly buffer: ArrayBufferLike;
+
+ /**
+ * The length in bytes of the array.
+ */
+ readonly byteLength: number;
+
+ /**
+ * The offset in bytes of the array.
+ */
+ readonly byteOffset: number;
+
+ [Symbol.iterator](): IterableIterator<number>;
+
+ /**
+ * Returns an array of key, value pairs for every entry in the array
+ */
+ entries(): IterableIterator<[number, number]>;
+
+ /**
+ * Returns an list of keys in the array
+ */
+ keys(): IterableIterator<number>;
+
+ /**
+ * Returns an list of values in the array
+ */
+ values(): IterableIterator<number>;
+
+ /**
+ * Returns the item located at the specified index.
+ * @param index The zero-based index of the desired code unit. A negative index will count back from the last item.
+ */
+ at(index: number): number | undefined;
+
+ /**
+ * Returns the this object after copying a section of the array identified by start and end
+ * to the same array starting at position target
+ * @param target If target is negative, it is treated as length+target where length is the
+ * length of the array.
+ * @param start If start is negative, it is treated as length+start. If end is negative, it
+ * is treated as length+end.
+ * @param end If not specified, length of the this object is used as its default value.
+ */
+ copyWithin(target: number, start: number, end?: number): this;
+
+ /**
+ * Determines whether all the members of an array satisfy the specified test.
+ * @param callbackfn A function that accepts up to three arguments. The every method calls
+ * the callbackfn function for each element in the array until the callbackfn returns a value
+ * which is coercible to the Boolean value false, or until the end of the array.
+ * @param thisArg An object to which the this keyword can refer in the callbackfn function.
+ * If thisArg is omitted, undefined is used as the this value.
+ */
+ every(
+ callbackfn: (value: number, index: number, array: Float16Array) => unknown,
+ thisArg?: any,
+ ): boolean;
+
+ /**
+ * Returns the this object after filling the section identified by start and end with value
+ * @param value value to fill array section with
+ * @param start index to start filling the array at. If start is negative, it is treated as
+ * length+start where length is the length of the array.
+ * @param end index to stop filling the array at. If end is negative, it is treated as
+ * length+end.
+ */
+ fill(value: number, start?: number, end?: number): this;
+
+ /**
+ * Returns the elements of an array that meet the condition specified in a callback function.
+ * @param predicate A function that accepts up to three arguments. The filter method calls
+ * the predicate function one time for each element in the array.
+ * @param thisArg An object to which the this keyword can refer in the predicate function.
+ * If thisArg is omitted, undefined is used as the this value.
+ */
+ filter(
+ predicate: (value: number, index: number, array: Float16Array) => any,
+ thisArg?: any,
+ ): Float16Array;
+
+ /**
+ * Returns the value of the first element in the array where predicate is true, and undefined
+ * otherwise.
+ * @param predicate find calls predicate once for each element of the array, in ascending
+ * order, until it finds one where predicate returns true. If such an element is found, find
+ * immediately returns that element value. Otherwise, find returns undefined.
+ * @param thisArg If provided, it will be used as the this value for each invocation of
+ * predicate. If it is not provided, undefined is used instead.
+ */
+ find(
+ predicate: (value: number, index: number, obj: Float16Array) => boolean,
+ thisArg?: any,
+ ): number | undefined;
+
+ /**
+ * Returns the index of the first element in the array where predicate is true, and -1
+ * otherwise.
+ * @param predicate find calls predicate once for each element of the array, in ascending
+ * order, until it finds one where predicate returns true. If such an element is found,
+ * findIndex immediately returns that element index. Otherwise, findIndex returns -1.
+ * @param thisArg If provided, it will be used as the this value for each invocation of
+ * predicate. If it is not provided, undefined is used instead.
+ */
+ findIndex(
+ predicate: (value: number, index: number, obj: Float16Array) => boolean,
+ thisArg?: any,
+ ): number;
+
+ /**
+ * Returns the value of the last element in the array where predicate is true, and undefined
+ * otherwise.
+ * @param predicate find calls predicate once for each element of the array, in descending
+ * order, until it finds one where predicate returns true. If such an element is found, findLast
+ * immediately returns that element value. Otherwise, findLast returns undefined.
+ * @param thisArg If provided, it will be used as the this value for each invocation of
+ * predicate. If it is not provided, undefined is used instead.
+ */
+ findLast(
+ predicate: (value: number, index: number, obj: Float16Array) => boolean,
+ thisArg?: any,
+ ): number | undefined;
+
+ /**
+ * Returns the index of the last element in the array where predicate is true, and -1
+ * otherwise.
+ * @param predicate find calls predicate once for each element of the array, in descending
+ * order, until it finds one where predicate returns true. If such an element is found,
+ * findLastIndex immediately returns that element index. Otherwise, findLastIndex returns -1.
+ * @param thisArg If provided, it will be used as the this value for each invocation of
+ * predicate. If it is not provided, undefined is used instead.
+ */
+ findLastIndex(
+ predicate: (value: number, index: number, obj: Float16Array) => boolean,
+ thisArg?: any,
+ ): number;
+
+ /**
+ * Performs the specified action for each element in an array.
+ * @param callbackfn A function that accepts up to three arguments. forEach calls the
+ * callbackfn function one time for each element in the array.
+ * @param thisArg An object to which the this keyword can refer in the callbackfn function.
+ * If thisArg is omitted, undefined is used as the this value.
+ */
+ forEach(
+ callbackfn: (value: number, index: number, array: Float16Array) => void,
+ thisArg?: any,
+ ): void;
+
+ /**
+ * Determines whether an array includes a certain element, returning true or false as appropriate.
+ * @param searchElement The element to search for.
+ * @param fromIndex The position in this array at which to begin searching for searchElement.
+ */
+ includes(searchElement: number, fromIndex?: number): boolean;
+
+ /**
+ * Returns the index of the first occurrence of a value in an array.
+ * @param searchElement The value to locate in the array.
+ * @param fromIndex The array index at which to begin the search. If fromIndex is omitted, the
+ * search starts at index 0.
+ */
+ indexOf(searchElement: number, fromIndex?: number): number;
+
+ /**
+ * Adds all the elements of an array separated by the specified separator string.
+ * @param separator A string used to separate one element of an array from the next in the
+ * resulting String. If omitted, the array elements are separated with a comma.
+ */
+ join(separator?: string): string;
+
+ /**
+ * Returns the index of the last occurrence of a value in an array.
+ * @param searchElement The value to locate in the array.
+ * @param fromIndex The array index at which to begin the search. If fromIndex is omitted, the
+ * search starts at index 0.
+ */
+ lastIndexOf(searchElement: number, fromIndex?: number): number;
+
+ /**
+ * The length of the array.
+ */
+ readonly length: number;
+
+ /**
+ * Calls a defined callback function on each element of an array, and returns an array that
+ * contains the results.
+ * @param callbackfn A function that accepts up to three arguments. The map method calls the
+ * callbackfn function one time for each element in the array.
+ * @param thisArg An object to which the this keyword can refer in the callbackfn function.
+ * If thisArg is omitted, undefined is used as the this value.
+ */
+ map(
+ callbackfn: (value: number, index: number, array: Float16Array) => number,
+ thisArg?: any,
+ ): Float16Array;
+
+ /**
+ * Calls the specified callback function for all the elements in an array. The return value of
+ * the callback function is the accumulated result, and is provided as an argument in the next
+ * call to the callback function.
+ * @param callbackfn A function that accepts up to four arguments. The reduce method calls the
+ * callbackfn function one time for each element in the array.
+ * @param initialValue If initialValue is specified, it is used as the initial value to start
+ * the accumulation. The first call to the callbackfn function provides this value as an argument
+ * instead of an array value.
+ */
+ reduce(
+ callbackfn: (
+ previousValue: number,
+ currentValue: number,
+ currentIndex: number,
+ array: Float16Array,
+ ) => number,
+ ): number;
+ reduce(
+ callbackfn: (
+ previousValue: number,
+ currentValue: number,
+ currentIndex: number,
+ array: Float16Array,
+ ) => number,
+ initialValue: number,
+ ): number;
+ reduce<U>(
+ callbackfn: (
+ previousValue: U,
+ currentValue: number,
+ currentIndex: number,
+ array: Float16Array,
+ ) => U,
+ initialValue: U,
+ ): U;
+
+ /**
+ * Calls the specified callback function for all the elements in an array, in descending order.
+ * The return value of the callback function is the accumulated result, and is provided as an
+ * argument in the next call to the callback function.
+ * @param callbackfn A function that accepts up to four arguments. The reduceRight method calls
+ * the callbackfn function one time for each element in the array.
+ * @param initialValue If initialValue is specified, it is used as the initial value to start
+ * the accumulation. The first call to the callbackfn function provides this value as an
+ * argument instead of an array value.
+ */
+ reduceRight(
+ callbackfn: (
+ previousValue: number,
+ currentValue: number,
+ currentIndex: number,
+ array: Float16Array,
+ ) => number,
+ ): number;
+ reduceRight(
+ callbackfn: (
+ previousValue: number,
+ currentValue: number,
+ currentIndex: number,
+ array: Float16Array,
+ ) => number,
+ initialValue: number,
+ ): number;
+ reduceRight<U>(
+ callbackfn: (
+ previousValue: U,
+ currentValue: number,
+ currentIndex: number,
+ array: Float16Array,
+ ) => U,
+ initialValue: U,
+ ): U;
+
+ /**
+ * Reverses the elements in an Array.
+ */
+ reverse(): this;
+
+ /**
+ * Sets a value or an array of values.
+ * @param array A typed or untyped array of values to set.
+ * @param offset The index in the current array at which the values are to be written.
+ */
+ set(array: ArrayLike<number>, offset?: number): void;
+
+ /**
+ * Returns a section of an array.
+ * @param start The beginning of the specified portion of the array.
+ * @param end The end of the specified portion of the array. This is exclusive of the element at the index 'end'.
+ */
+ slice(start?: number, end?: number): Float16Array;
+
+ /**
+ * Determines whether the specified callback function returns true for any element of an array.
+ * @param callbackfn A function that accepts up to three arguments. The some method calls
+ * the callbackfn function for each element in the array until the callbackfn returns a value
+ * which is coercible to the Boolean value true, or until the end of the array.
+ * @param thisArg An object to which the this keyword can refer in the callbackfn function.
+ * If thisArg is omitted, undefined is used as the this value.
+ */
+ some(
+ callbackfn: (value: number, index: number, array: Float16Array) => unknown,
+ thisArg?: any,
+ ): boolean;
+
+ /**
+ * Sorts an array.
+ * @param compareFn Function used to determine the order of the elements. It is expected to return
+ * a negative value if first argument is less than second argument, zero if they're equal and a positive
+ * value otherwise. If omitted, the elements are sorted in ascending.
+ */
+ sort(compareFn?: (a: number, b: number) => number): this;
+
+ /**
+ * Gets a new Float16Array view of the ArrayBuffer store for this array, referencing the elements
+ * at begin, inclusive, up to end, exclusive.
+ * @param begin The index of the beginning of the array.
+ * @param end The index of the end of the array.
+ */
+ subarray(begin?: number, end?: number): Float16Array;
+
+ /**
+ * Converts a number to a string by using the current locale.
+ */
+ toLocaleString(): string;
+
+ /**
+ * Returns a string representation of an array.
+ */
+ toString(): string;
+
+ /**
+ * Returns the primitive value of the specified object.
+ */
+ valueOf(): Float16Array;
+
+ readonly [Symbol.toStringTag]: "Float16Array";
+
+ [index: number]: number;
+}
+
+export interface Float16ArrayConstructor {
+ readonly prototype: Float16Array;
+ new (): Float16Array;
+ new (length: number): Float16Array;
+ new (elements: Iterable<number>): Float16Array;
+ new (array: ArrayLike<number> | ArrayBufferLike): Float16Array;
+ new (
+ buffer: ArrayBufferLike,
+ byteOffset: number,
+ length?: number,
+ ): Float16Array;
+
+ /**
+ * The size in bytes of each element in the array.
+ */
+ readonly BYTES_PER_ELEMENT: number;
+
+ /**
+ * Returns a new array from a set of elements.
+ * @param items A set of elements to include in the new array object.
+ */
+ of(...items: number[]): Float16Array;
+
+ /**
+ * Creates an array from an array-like or iterable object.
+ * @param elements An iterable object to convert to an array.
+ */
+ from(elements: Iterable<number>): Float16Array;
+
+ /**
+ * Creates an array from an array-like or iterable object.
+ * @param elements An iterable object to convert to an array.
+ * @param mapfn A mapping function to call on every element of the array.
+ * @param thisArg Value of 'this' used to invoke the mapfn.
+ */
+ from<T>(
+ elements: Iterable<T>,
+ mapfn: (v: T, k: number) => number,
+ thisArg?: any,
+ ): Float16Array;
+
+ /**
+ * Creates an array from an array-like or iterable object.
+ * @param arrayLike An array-like object to convert to an array.
+ */
+ from(arrayLike: ArrayLike<number>): Float16Array;
+
+ /**
+ * Creates an array from an array-like or iterable object.
+ * @param arrayLike An array-like object to convert to an array.
+ * @param mapfn A mapping function to call on every element of the array.
+ * @param thisArg Value of 'this' used to invoke the mapfn.
+ */
+ from<T>(
+ arrayLike: ArrayLike<T>,
+ mapfn: (v: T, k: number) => number,
+ thisArg?: any,
+ ): Float16Array;
+}
+export declare const Float16Array: Float16ArrayConstructor;
+
+/**
+ * Returns `true` if the value is a Float16Array instance.
+ * @since v3.4.0
+ */
+export declare function isFloat16Array(value: unknown): value is Float16Array;
+
+/**
+ * Returns `true` if the value is a type of TypedArray instance that contains Float16Array.
+ * @since v3.6.0
+ */
+export declare function isTypedArray(
+ value: unknown,
+): value is
+ | Uint8Array
+ | Uint8ClampedArray
+ | Uint16Array
+ | Uint32Array
+ | Int8Array
+ | Int16Array
+ | Int32Array
+ | Float16Array
+ | Float32Array
+ | Float64Array
+ | BigUint64Array
+ | BigInt64Array;
+
+/**
+ * Gets the Float16 value at the specified byte offset from the start of the view. There is
+ * no alignment constraint; multi-byte values may be fetched from any offset.
+ * @param byteOffset The place in the buffer at which the value should be retrieved.
+ * @param littleEndian If false or undefined, a big-endian value should be read,
+ * otherwise a little-endian value should be read.
+ */
+export declare function getFloat16(
+ dataView: DataView,
+ byteOffset: number,
+ littleEndian?: boolean,
+): number;
+
+/**
+ * Stores an Float16 value at the specified byte offset from the start of the view.
+ * @param byteOffset The place in the buffer at which the value should be set.
+ * @param value The value to set.
+ * @param littleEndian If false or undefined, a big-endian value should be written,
+ * otherwise a little-endian value should be written.
+ */
+export declare function setFloat16(
+ dataView: DataView,
+ byteOffset: number,
+ value: number,
+ littleEndian?: boolean,
+): void;
+
+/**
+ * Returns the nearest half-precision float representation of a number.
+ * @param x A numeric expression.
+ */
+export declare function hfround(x: number): number;
diff --git a/dom/webgpu/tests/cts/checkout/src/external/petamoriken/float16/float16.js b/dom/webgpu/tests/cts/checkout/src/external/petamoriken/float16/float16.js
new file mode 100644
index 0000000000..54843a4842
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/external/petamoriken/float16/float16.js
@@ -0,0 +1,1228 @@
+/*! @petamoriken/float16 v3.6.6 | MIT License - https://github.com/petamoriken/float16 */
+
+const THIS_IS_NOT_AN_OBJECT = "This is not an object";
+const THIS_IS_NOT_A_FLOAT16ARRAY_OBJECT = "This is not a Float16Array object";
+const THIS_CONSTRUCTOR_IS_NOT_A_SUBCLASS_OF_FLOAT16ARRAY =
+ "This constructor is not a subclass of Float16Array";
+const THE_CONSTRUCTOR_PROPERTY_VALUE_IS_NOT_AN_OBJECT =
+ "The constructor property value is not an object";
+const SPECIES_CONSTRUCTOR_DIDNT_RETURN_TYPEDARRAY_OBJECT =
+ "Species constructor didn't return TypedArray object";
+const DERIVED_CONSTRUCTOR_CREATED_TYPEDARRAY_OBJECT_WHICH_WAS_TOO_SMALL_LENGTH =
+ "Derived constructor created TypedArray object which was too small length";
+const ATTEMPTING_TO_ACCESS_DETACHED_ARRAYBUFFER =
+ "Attempting to access detached ArrayBuffer";
+const CANNOT_CONVERT_UNDEFINED_OR_NULL_TO_OBJECT =
+ "Cannot convert undefined or null to object";
+const CANNOT_MIX_BIGINT_AND_OTHER_TYPES =
+ "Cannot mix BigInt and other types, use explicit conversions";
+const ITERATOR_PROPERTY_IS_NOT_CALLABLE = "@@iterator property is not callable";
+const REDUCE_OF_EMPTY_ARRAY_WITH_NO_INITIAL_VALUE =
+ "Reduce of empty array with no initial value";
+const OFFSET_IS_OUT_OF_BOUNDS = "Offset is out of bounds";
+
+function uncurryThis(target) {
+ return (thisArg, ...args) => {
+ return ReflectApply(target, thisArg, args);
+ };
+}
+function uncurryThisGetter(target, key) {
+ return uncurryThis(
+ ReflectGetOwnPropertyDescriptor(
+ target,
+ key
+ ).get
+ );
+}
+const {
+ apply: ReflectApply,
+ construct: ReflectConstruct,
+ defineProperty: ReflectDefineProperty,
+ get: ReflectGet,
+ getOwnPropertyDescriptor: ReflectGetOwnPropertyDescriptor,
+ getPrototypeOf: ReflectGetPrototypeOf,
+ has: ReflectHas,
+ ownKeys: ReflectOwnKeys,
+ set: ReflectSet,
+ setPrototypeOf: ReflectSetPrototypeOf,
+} = Reflect;
+const NativeProxy = Proxy;
+const {
+ MAX_SAFE_INTEGER: MAX_SAFE_INTEGER,
+ isFinite: NumberIsFinite,
+ isNaN: NumberIsNaN,
+} = Number;
+const {
+ iterator: SymbolIterator,
+ species: SymbolSpecies,
+ toStringTag: SymbolToStringTag,
+ for: SymbolFor,
+} = Symbol;
+const NativeObject = Object;
+const {
+ create: ObjectCreate,
+ defineProperty: ObjectDefineProperty,
+ freeze: ObjectFreeze,
+ is: ObjectIs,
+} = NativeObject;
+const ObjectPrototype = NativeObject.prototype;
+const ObjectPrototype__lookupGetter__ = (ObjectPrototype).__lookupGetter__
+ ? uncurryThis( (ObjectPrototype).__lookupGetter__)
+ : (object, key) => {
+ if (object == null) {
+ throw NativeTypeError(
+ CANNOT_CONVERT_UNDEFINED_OR_NULL_TO_OBJECT
+ );
+ }
+ let target = NativeObject(object);
+ do {
+ const descriptor = ReflectGetOwnPropertyDescriptor(target, key);
+ if (descriptor !== undefined) {
+ if (ObjectHasOwn(descriptor, "get")) {
+ return descriptor.get;
+ }
+ return;
+ }
+ } while ((target = ReflectGetPrototypeOf(target)) !== null);
+ };
+const ObjectHasOwn = (NativeObject).hasOwn ||
+ uncurryThis(ObjectPrototype.hasOwnProperty);
+const NativeArray = Array;
+const ArrayIsArray = NativeArray.isArray;
+const ArrayPrototype = NativeArray.prototype;
+const ArrayPrototypeJoin = uncurryThis(ArrayPrototype.join);
+const ArrayPrototypePush = uncurryThis(ArrayPrototype.push);
+const ArrayPrototypeToLocaleString = uncurryThis(
+ ArrayPrototype.toLocaleString
+);
+const NativeArrayPrototypeSymbolIterator = ArrayPrototype[SymbolIterator];
+const ArrayPrototypeSymbolIterator = uncurryThis(NativeArrayPrototypeSymbolIterator);
+const MathTrunc = Math.trunc;
+const NativeArrayBuffer = ArrayBuffer;
+const ArrayBufferIsView = NativeArrayBuffer.isView;
+const ArrayBufferPrototype = NativeArrayBuffer.prototype;
+const ArrayBufferPrototypeSlice = uncurryThis(ArrayBufferPrototype.slice);
+const ArrayBufferPrototypeGetByteLength = uncurryThisGetter(ArrayBufferPrototype, "byteLength");
+const NativeSharedArrayBuffer = typeof SharedArrayBuffer !== "undefined" ? SharedArrayBuffer : null;
+const SharedArrayBufferPrototypeGetByteLength = NativeSharedArrayBuffer
+ && uncurryThisGetter(NativeSharedArrayBuffer.prototype, "byteLength");
+const TypedArray = ReflectGetPrototypeOf(Uint8Array);
+const TypedArrayFrom = TypedArray.from;
+const TypedArrayPrototype = TypedArray.prototype;
+const NativeTypedArrayPrototypeSymbolIterator = TypedArrayPrototype[SymbolIterator];
+const TypedArrayPrototypeKeys = uncurryThis(TypedArrayPrototype.keys);
+const TypedArrayPrototypeValues = uncurryThis(
+ TypedArrayPrototype.values
+);
+const TypedArrayPrototypeEntries = uncurryThis(
+ TypedArrayPrototype.entries
+);
+const TypedArrayPrototypeSet = uncurryThis(TypedArrayPrototype.set);
+const TypedArrayPrototypeReverse = uncurryThis(
+ TypedArrayPrototype.reverse
+);
+const TypedArrayPrototypeFill = uncurryThis(TypedArrayPrototype.fill);
+const TypedArrayPrototypeCopyWithin = uncurryThis(
+ TypedArrayPrototype.copyWithin
+);
+const TypedArrayPrototypeSort = uncurryThis(TypedArrayPrototype.sort);
+const TypedArrayPrototypeSlice = uncurryThis(TypedArrayPrototype.slice);
+const TypedArrayPrototypeSubarray = uncurryThis(
+ TypedArrayPrototype.subarray
+);
+const TypedArrayPrototypeGetBuffer = uncurryThisGetter(
+ TypedArrayPrototype,
+ "buffer"
+);
+const TypedArrayPrototypeGetByteOffset = uncurryThisGetter(
+ TypedArrayPrototype,
+ "byteOffset"
+);
+const TypedArrayPrototypeGetLength = uncurryThisGetter(
+ TypedArrayPrototype,
+ "length"
+);
+const TypedArrayPrototypeGetSymbolToStringTag = uncurryThisGetter(
+ TypedArrayPrototype,
+ SymbolToStringTag
+);
+const NativeUint16Array = Uint16Array;
+const Uint16ArrayFrom = (...args) => {
+ return ReflectApply(TypedArrayFrom, NativeUint16Array, args);
+};
+const NativeUint32Array = Uint32Array;
+const NativeFloat32Array = Float32Array;
+const ArrayIteratorPrototype = ReflectGetPrototypeOf([][SymbolIterator]());
+const ArrayIteratorPrototypeNext = uncurryThis(ArrayIteratorPrototype.next);
+const GeneratorPrototypeNext = uncurryThis((function* () {})().next);
+const IteratorPrototype = ReflectGetPrototypeOf(ArrayIteratorPrototype);
+const DataViewPrototype = DataView.prototype;
+const DataViewPrototypeGetUint16 = uncurryThis(
+ DataViewPrototype.getUint16
+);
+const DataViewPrototypeSetUint16 = uncurryThis(
+ DataViewPrototype.setUint16
+);
+const NativeTypeError = TypeError;
+const NativeRangeError = RangeError;
+const NativeWeakSet = WeakSet;
+const WeakSetPrototype = NativeWeakSet.prototype;
+const WeakSetPrototypeAdd = uncurryThis(WeakSetPrototype.add);
+const WeakSetPrototypeHas = uncurryThis(WeakSetPrototype.has);
+const NativeWeakMap = WeakMap;
+const WeakMapPrototype = NativeWeakMap.prototype;
+const WeakMapPrototypeGet = uncurryThis(WeakMapPrototype.get);
+const WeakMapPrototypeHas = uncurryThis(WeakMapPrototype.has);
+const WeakMapPrototypeSet = uncurryThis(WeakMapPrototype.set);
+
+const arrayIterators = new NativeWeakMap();
+const SafeIteratorPrototype = ObjectCreate(null, {
+ next: {
+ value: function next() {
+ const arrayIterator = WeakMapPrototypeGet(arrayIterators, this);
+ return ArrayIteratorPrototypeNext(arrayIterator);
+ },
+ },
+ [SymbolIterator]: {
+ value: function values() {
+ return this;
+ },
+ },
+});
+function safeIfNeeded(array) {
+ if (array[SymbolIterator] === NativeArrayPrototypeSymbolIterator) {
+ return array;
+ }
+ const safe = ObjectCreate(SafeIteratorPrototype);
+ WeakMapPrototypeSet(arrayIterators, safe, ArrayPrototypeSymbolIterator(array));
+ return safe;
+}
+const generators = new NativeWeakMap();
+const DummyArrayIteratorPrototype = ObjectCreate(IteratorPrototype, {
+ next: {
+ value: function next() {
+ const generator = WeakMapPrototypeGet(generators, this);
+ return GeneratorPrototypeNext(generator);
+ },
+ writable: true,
+ configurable: true,
+ },
+});
+for (const key of ReflectOwnKeys(ArrayIteratorPrototype)) {
+ if (key === "next") {
+ continue;
+ }
+ ObjectDefineProperty(DummyArrayIteratorPrototype, key, ReflectGetOwnPropertyDescriptor(ArrayIteratorPrototype, key));
+}
+function wrap(generator) {
+ const dummy = ObjectCreate(DummyArrayIteratorPrototype);
+ WeakMapPrototypeSet(generators, dummy, generator);
+ return dummy;
+}
+
+function isObject(value) {
+ return (value !== null && typeof value === "object") ||
+ typeof value === "function";
+}
+function isObjectLike(value) {
+ return value !== null && typeof value === "object";
+}
+function isNativeTypedArray(value) {
+ return TypedArrayPrototypeGetSymbolToStringTag(value) !== undefined;
+}
+function isNativeBigIntTypedArray(value) {
+ const typedArrayName = TypedArrayPrototypeGetSymbolToStringTag(value);
+ return typedArrayName === "BigInt64Array" ||
+ typedArrayName === "BigUint64Array";
+}
+function isArrayBuffer(value) {
+ try {
+ ArrayBufferPrototypeGetByteLength( (value));
+ return true;
+ } catch (e) {
+ return false;
+ }
+}
+function isSharedArrayBuffer(value) {
+ if (NativeSharedArrayBuffer === null) {
+ return false;
+ }
+ try {
+ SharedArrayBufferPrototypeGetByteLength( (value));
+ return true;
+ } catch (e) {
+ return false;
+ }
+}
+function isOrdinaryArray(value) {
+ if (!ArrayIsArray(value)) {
+ return false;
+ }
+ if (value[SymbolIterator] === NativeArrayPrototypeSymbolIterator) {
+ return true;
+ }
+ const iterator = value[SymbolIterator]();
+ return iterator[SymbolToStringTag] === "Array Iterator";
+}
+function isOrdinaryNativeTypedArray(value) {
+ if (!isNativeTypedArray(value)) {
+ return false;
+ }
+ if (value[SymbolIterator] === NativeTypedArrayPrototypeSymbolIterator) {
+ return true;
+ }
+ const iterator = value[SymbolIterator]();
+ return iterator[SymbolToStringTag] === "Array Iterator";
+}
+function isCanonicalIntegerIndexString(value) {
+ if (typeof value !== "string") {
+ return false;
+ }
+ const number = +value;
+ if (value !== number + "") {
+ return false;
+ }
+ if (!NumberIsFinite(number)) {
+ return false;
+ }
+ return number === MathTrunc(number);
+}
+
+const brand = SymbolFor("__Float16Array__");
+function hasFloat16ArrayBrand(target) {
+ if (!isObjectLike(target)) {
+ return false;
+ }
+ const prototype = ReflectGetPrototypeOf(target);
+ if (!isObjectLike(prototype)) {
+ return false;
+ }
+ const constructor = prototype.constructor;
+ if (constructor === undefined) {
+ return false;
+ }
+ if (!isObject(constructor)) {
+ throw NativeTypeError(THE_CONSTRUCTOR_PROPERTY_VALUE_IS_NOT_AN_OBJECT);
+ }
+ return ReflectHas(constructor, brand);
+}
+
+const buffer = new NativeArrayBuffer(4);
+const floatView = new NativeFloat32Array(buffer);
+const uint32View = new NativeUint32Array(buffer);
+const baseTable = new NativeUint32Array(512);
+const shiftTable = new NativeUint32Array(512);
+for (let i = 0; i < 256; ++i) {
+ const e = i - 127;
+ if (e < -27) {
+ baseTable[i] = 0x0000;
+ baseTable[i | 0x100] = 0x8000;
+ shiftTable[i] = 24;
+ shiftTable[i | 0x100] = 24;
+ } else if (e < -14) {
+ baseTable[i] = 0x0400 >> (-e - 14);
+ baseTable[i | 0x100] = (0x0400 >> (-e - 14)) | 0x8000;
+ shiftTable[i] = -e - 1;
+ shiftTable[i | 0x100] = -e - 1;
+ } else if (e <= 15) {
+ baseTable[i] = (e + 15) << 10;
+ baseTable[i | 0x100] = ((e + 15) << 10) | 0x8000;
+ shiftTable[i] = 13;
+ shiftTable[i | 0x100] = 13;
+ } else if (e < 128) {
+ baseTable[i] = 0x7c00;
+ baseTable[i | 0x100] = 0xfc00;
+ shiftTable[i] = 24;
+ shiftTable[i | 0x100] = 24;
+ } else {
+ baseTable[i] = 0x7c00;
+ baseTable[i | 0x100] = 0xfc00;
+ shiftTable[i] = 13;
+ shiftTable[i | 0x100] = 13;
+ }
+}
+function roundToFloat16Bits(num) {
+ floatView[0] = (num);
+ const f = uint32View[0];
+ const e = (f >> 23) & 0x1ff;
+ return baseTable[e] + ((f & 0x007fffff) >> shiftTable[e]);
+}
+const mantissaTable = new NativeUint32Array(2048);
+const exponentTable = new NativeUint32Array(64);
+const offsetTable = new NativeUint32Array(64);
+for (let i = 1; i < 1024; ++i) {
+ let m = i << 13;
+ let e = 0;
+ while((m & 0x00800000) === 0) {
+ m <<= 1;
+ e -= 0x00800000;
+ }
+ m &= ~0x00800000;
+ e += 0x38800000;
+ mantissaTable[i] = m | e;
+}
+for (let i = 1024; i < 2048; ++i) {
+ mantissaTable[i] = 0x38000000 + ((i - 1024) << 13);
+}
+for (let i = 1; i < 31; ++i) {
+ exponentTable[i] = i << 23;
+}
+exponentTable[31] = 0x47800000;
+exponentTable[32] = 0x80000000;
+for (let i = 33; i < 63; ++i) {
+ exponentTable[i] = 0x80000000 + ((i - 32) << 23);
+}
+exponentTable[63] = 0xc7800000;
+for (let i = 1; i < 64; ++i) {
+ if (i !== 32) {
+ offsetTable[i] = 1024;
+ }
+}
+function convertToNumber(float16bits) {
+ const m = float16bits >> 10;
+ uint32View[0] = mantissaTable[offsetTable[m] + (float16bits & 0x3ff)] + exponentTable[m];
+ return floatView[0];
+}
+
+function ToIntegerOrInfinity(target) {
+ const number = +target;
+ if (NumberIsNaN(number) || number === 0) {
+ return 0;
+ }
+ return MathTrunc(number);
+}
+function ToLength(target) {
+ const length = ToIntegerOrInfinity(target);
+ if (length < 0) {
+ return 0;
+ }
+ return length < MAX_SAFE_INTEGER
+ ? length
+ : MAX_SAFE_INTEGER;
+}
+function SpeciesConstructor(target, defaultConstructor) {
+ if (!isObject(target)) {
+ throw NativeTypeError(THIS_IS_NOT_AN_OBJECT);
+ }
+ const constructor = target.constructor;
+ if (constructor === undefined) {
+ return defaultConstructor;
+ }
+ if (!isObject(constructor)) {
+ throw NativeTypeError(THE_CONSTRUCTOR_PROPERTY_VALUE_IS_NOT_AN_OBJECT);
+ }
+ const species = constructor[SymbolSpecies];
+ if (species == null) {
+ return defaultConstructor;
+ }
+ return species;
+}
+function IsDetachedBuffer(buffer) {
+ if (isSharedArrayBuffer(buffer)) {
+ return false;
+ }
+ try {
+ ArrayBufferPrototypeSlice(buffer, 0, 0);
+ return false;
+ } catch (e) {}
+ return true;
+}
+function defaultCompare(x, y) {
+ const isXNaN = NumberIsNaN(x);
+ const isYNaN = NumberIsNaN(y);
+ if (isXNaN && isYNaN) {
+ return 0;
+ }
+ if (isXNaN) {
+ return 1;
+ }
+ if (isYNaN) {
+ return -1;
+ }
+ if (x < y) {
+ return -1;
+ }
+ if (x > y) {
+ return 1;
+ }
+ if (x === 0 && y === 0) {
+ const isXPlusZero = ObjectIs(x, 0);
+ const isYPlusZero = ObjectIs(y, 0);
+ if (!isXPlusZero && isYPlusZero) {
+ return -1;
+ }
+ if (isXPlusZero && !isYPlusZero) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+const BYTES_PER_ELEMENT = 2;
+const float16bitsArrays = new NativeWeakMap();
+function isFloat16Array(target) {
+ return WeakMapPrototypeHas(float16bitsArrays, target) ||
+ (!ArrayBufferIsView(target) && hasFloat16ArrayBrand(target));
+}
+function assertFloat16Array(target) {
+ if (!isFloat16Array(target)) {
+ throw NativeTypeError(THIS_IS_NOT_A_FLOAT16ARRAY_OBJECT);
+ }
+}
+function assertSpeciesTypedArray(target, count) {
+ const isTargetFloat16Array = isFloat16Array(target);
+ const isTargetTypedArray = isNativeTypedArray(target);
+ if (!isTargetFloat16Array && !isTargetTypedArray) {
+ throw NativeTypeError(SPECIES_CONSTRUCTOR_DIDNT_RETURN_TYPEDARRAY_OBJECT);
+ }
+ if (typeof count === "number") {
+ let length;
+ if (isTargetFloat16Array) {
+ const float16bitsArray = getFloat16BitsArray(target);
+ length = TypedArrayPrototypeGetLength(float16bitsArray);
+ } else {
+ length = TypedArrayPrototypeGetLength(target);
+ }
+ if (length < count) {
+ throw NativeTypeError(
+ DERIVED_CONSTRUCTOR_CREATED_TYPEDARRAY_OBJECT_WHICH_WAS_TOO_SMALL_LENGTH
+ );
+ }
+ }
+ if (isNativeBigIntTypedArray(target)) {
+ throw NativeTypeError(CANNOT_MIX_BIGINT_AND_OTHER_TYPES);
+ }
+}
+function getFloat16BitsArray(float16) {
+ const float16bitsArray = WeakMapPrototypeGet(float16bitsArrays, float16);
+ if (float16bitsArray !== undefined) {
+ const buffer = TypedArrayPrototypeGetBuffer(float16bitsArray);
+ if (IsDetachedBuffer(buffer)) {
+ throw NativeTypeError(ATTEMPTING_TO_ACCESS_DETACHED_ARRAYBUFFER);
+ }
+ return float16bitsArray;
+ }
+ const buffer = (float16).buffer;
+ if (IsDetachedBuffer(buffer)) {
+ throw NativeTypeError(ATTEMPTING_TO_ACCESS_DETACHED_ARRAYBUFFER);
+ }
+ const cloned = ReflectConstruct(Float16Array, [
+ buffer,
+ (float16).byteOffset,
+ (float16).length,
+ ], float16.constructor);
+ return WeakMapPrototypeGet(float16bitsArrays, cloned);
+}
+function copyToArray(float16bitsArray) {
+ const length = TypedArrayPrototypeGetLength(float16bitsArray);
+ const array = [];
+ for (let i = 0; i < length; ++i) {
+ array[i] = convertToNumber(float16bitsArray[i]);
+ }
+ return array;
+}
+const TypedArrayPrototypeGetters = new NativeWeakSet();
+for (const key of ReflectOwnKeys(TypedArrayPrototype)) {
+ if (key === SymbolToStringTag) {
+ continue;
+ }
+ const descriptor = ReflectGetOwnPropertyDescriptor(TypedArrayPrototype, key);
+ if (ObjectHasOwn(descriptor, "get") && typeof descriptor.get === "function") {
+ WeakSetPrototypeAdd(TypedArrayPrototypeGetters, descriptor.get);
+ }
+}
+const handler = ObjectFreeze( ({
+ get(target, key, receiver) {
+ if (isCanonicalIntegerIndexString(key) && ObjectHasOwn(target, key)) {
+ return convertToNumber(ReflectGet(target, key));
+ }
+ if (WeakSetPrototypeHas(TypedArrayPrototypeGetters, ObjectPrototype__lookupGetter__(target, key))) {
+ return ReflectGet(target, key);
+ }
+ return ReflectGet(target, key, receiver);
+ },
+ set(target, key, value, receiver) {
+ if (isCanonicalIntegerIndexString(key) && ObjectHasOwn(target, key)) {
+ return ReflectSet(target, key, roundToFloat16Bits(value));
+ }
+ return ReflectSet(target, key, value, receiver);
+ },
+ getOwnPropertyDescriptor(target, key) {
+ if (isCanonicalIntegerIndexString(key) && ObjectHasOwn(target, key)) {
+ const descriptor = ReflectGetOwnPropertyDescriptor(target, key);
+ descriptor.value = convertToNumber(descriptor.value);
+ return descriptor;
+ }
+ return ReflectGetOwnPropertyDescriptor(target, key);
+ },
+ defineProperty(target, key, descriptor) {
+ if (
+ isCanonicalIntegerIndexString(key) &&
+ ObjectHasOwn(target, key) &&
+ ObjectHasOwn(descriptor, "value")
+ ) {
+ descriptor.value = roundToFloat16Bits(descriptor.value);
+ return ReflectDefineProperty(target, key, descriptor);
+ }
+ return ReflectDefineProperty(target, key, descriptor);
+ },
+}));
+class Float16Array {
+ constructor(input, _byteOffset, _length) {
+ let float16bitsArray;
+ if (isFloat16Array(input)) {
+ float16bitsArray = ReflectConstruct(NativeUint16Array, [getFloat16BitsArray(input)], new.target);
+ } else if (isObject(input) && !isArrayBuffer(input)) {
+ let list;
+ let length;
+ if (isNativeTypedArray(input)) {
+ list = input;
+ length = TypedArrayPrototypeGetLength(input);
+ const buffer = TypedArrayPrototypeGetBuffer(input);
+ const BufferConstructor = !isSharedArrayBuffer(buffer)
+ ? (SpeciesConstructor(
+ buffer,
+ NativeArrayBuffer
+ ))
+ : NativeArrayBuffer;
+ if (IsDetachedBuffer(buffer)) {
+ throw NativeTypeError(ATTEMPTING_TO_ACCESS_DETACHED_ARRAYBUFFER);
+ }
+ if (isNativeBigIntTypedArray(input)) {
+ throw NativeTypeError(CANNOT_MIX_BIGINT_AND_OTHER_TYPES);
+ }
+ const data = new BufferConstructor(
+ length * BYTES_PER_ELEMENT
+ );
+ float16bitsArray = ReflectConstruct(NativeUint16Array, [data], new.target);
+ } else {
+ const iterator = input[SymbolIterator];
+ if (iterator != null && typeof iterator !== "function") {
+ throw NativeTypeError(ITERATOR_PROPERTY_IS_NOT_CALLABLE);
+ }
+ if (iterator != null) {
+ if (isOrdinaryArray(input)) {
+ list = input;
+ length = input.length;
+ } else {
+ list = [... (input)];
+ length = list.length;
+ }
+ } else {
+ list = (input);
+ length = ToLength(list.length);
+ }
+ float16bitsArray = ReflectConstruct(NativeUint16Array, [length], new.target);
+ }
+ for (let i = 0; i < length; ++i) {
+ float16bitsArray[i] = roundToFloat16Bits(list[i]);
+ }
+ } else {
+ float16bitsArray = ReflectConstruct(NativeUint16Array, arguments, new.target);
+ }
+ const proxy = (new NativeProxy(float16bitsArray, handler));
+ WeakMapPrototypeSet(float16bitsArrays, proxy, float16bitsArray);
+ return proxy;
+ }
+ static from(src, ...opts) {
+ const Constructor = this;
+ if (!ReflectHas(Constructor, brand)) {
+ throw NativeTypeError(
+ THIS_CONSTRUCTOR_IS_NOT_A_SUBCLASS_OF_FLOAT16ARRAY
+ );
+ }
+ if (Constructor === Float16Array) {
+ if (isFloat16Array(src) && opts.length === 0) {
+ const float16bitsArray = getFloat16BitsArray(src);
+ const uint16 = new NativeUint16Array(
+ TypedArrayPrototypeGetBuffer(float16bitsArray),
+ TypedArrayPrototypeGetByteOffset(float16bitsArray),
+ TypedArrayPrototypeGetLength(float16bitsArray)
+ );
+ return new Float16Array(
+ TypedArrayPrototypeGetBuffer(TypedArrayPrototypeSlice(uint16))
+ );
+ }
+ if (opts.length === 0) {
+ return new Float16Array(
+ TypedArrayPrototypeGetBuffer(
+ Uint16ArrayFrom(src, roundToFloat16Bits)
+ )
+ );
+ }
+ const mapFunc = opts[0];
+ const thisArg = opts[1];
+ return new Float16Array(
+ TypedArrayPrototypeGetBuffer(
+ Uint16ArrayFrom(src, function (val, ...args) {
+ return roundToFloat16Bits(
+ ReflectApply(mapFunc, this, [val, ...safeIfNeeded(args)])
+ );
+ }, thisArg)
+ )
+ );
+ }
+ let list;
+ let length;
+ const iterator = src[SymbolIterator];
+ if (iterator != null && typeof iterator !== "function") {
+ throw NativeTypeError(ITERATOR_PROPERTY_IS_NOT_CALLABLE);
+ }
+ if (iterator != null) {
+ if (isOrdinaryArray(src)) {
+ list = src;
+ length = src.length;
+ } else if (isOrdinaryNativeTypedArray(src)) {
+ list = src;
+ length = TypedArrayPrototypeGetLength(src);
+ } else {
+ list = [...src];
+ length = list.length;
+ }
+ } else {
+ if (src == null) {
+ throw NativeTypeError(
+ CANNOT_CONVERT_UNDEFINED_OR_NULL_TO_OBJECT
+ );
+ }
+ list = NativeObject(src);
+ length = ToLength(list.length);
+ }
+ const array = new Constructor(length);
+ if (opts.length === 0) {
+ for (let i = 0; i < length; ++i) {
+ array[i] = (list[i]);
+ }
+ } else {
+ const mapFunc = opts[0];
+ const thisArg = opts[1];
+ for (let i = 0; i < length; ++i) {
+ array[i] = ReflectApply(mapFunc, thisArg, [list[i], i]);
+ }
+ }
+ return array;
+ }
+ static of(...items) {
+ const Constructor = this;
+ if (!ReflectHas(Constructor, brand)) {
+ throw NativeTypeError(
+ THIS_CONSTRUCTOR_IS_NOT_A_SUBCLASS_OF_FLOAT16ARRAY
+ );
+ }
+ const length = items.length;
+ if (Constructor === Float16Array) {
+ const proxy = new Float16Array(length);
+ const float16bitsArray = getFloat16BitsArray(proxy);
+ for (let i = 0; i < length; ++i) {
+ float16bitsArray[i] = roundToFloat16Bits(items[i]);
+ }
+ return proxy;
+ }
+ const array = new Constructor(length);
+ for (let i = 0; i < length; ++i) {
+ array[i] = items[i];
+ }
+ return array;
+ }
+ keys() {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ return TypedArrayPrototypeKeys(float16bitsArray);
+ }
+ values() {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ return wrap((function* () {
+ for (const val of TypedArrayPrototypeValues(float16bitsArray)) {
+ yield convertToNumber(val);
+ }
+ })());
+ }
+ entries() {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ return wrap((function* () {
+ for (const [i, val] of TypedArrayPrototypeEntries(float16bitsArray)) {
+ yield ([i, convertToNumber(val)]);
+ }
+ })());
+ }
+ at(index) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const length = TypedArrayPrototypeGetLength(float16bitsArray);
+ const relativeIndex = ToIntegerOrInfinity(index);
+ const k = relativeIndex >= 0 ? relativeIndex : length + relativeIndex;
+ if (k < 0 || k >= length) {
+ return;
+ }
+ return convertToNumber(float16bitsArray[k]);
+ }
+ map(callback, ...opts) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const length = TypedArrayPrototypeGetLength(float16bitsArray);
+ const thisArg = opts[0];
+ const Constructor = SpeciesConstructor(float16bitsArray, Float16Array);
+ if (Constructor === Float16Array) {
+ const proxy = new Float16Array(length);
+ const array = getFloat16BitsArray(proxy);
+ for (let i = 0; i < length; ++i) {
+ const val = convertToNumber(float16bitsArray[i]);
+ array[i] = roundToFloat16Bits(
+ ReflectApply(callback, thisArg, [val, i, this])
+ );
+ }
+ return proxy;
+ }
+ const array = new Constructor(length);
+ assertSpeciesTypedArray(array, length);
+ for (let i = 0; i < length; ++i) {
+ const val = convertToNumber(float16bitsArray[i]);
+ array[i] = ReflectApply(callback, thisArg, [val, i, this]);
+ }
+ return (array);
+ }
+ filter(callback, ...opts) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const length = TypedArrayPrototypeGetLength(float16bitsArray);
+ const thisArg = opts[0];
+ const kept = [];
+ for (let i = 0; i < length; ++i) {
+ const val = convertToNumber(float16bitsArray[i]);
+ if (ReflectApply(callback, thisArg, [val, i, this])) {
+ ArrayPrototypePush(kept, val);
+ }
+ }
+ const Constructor = SpeciesConstructor(float16bitsArray, Float16Array);
+ const array = new Constructor(kept);
+ assertSpeciesTypedArray(array);
+ return (array);
+ }
+ reduce(callback, ...opts) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const length = TypedArrayPrototypeGetLength(float16bitsArray);
+ if (length === 0 && opts.length === 0) {
+ throw NativeTypeError(REDUCE_OF_EMPTY_ARRAY_WITH_NO_INITIAL_VALUE);
+ }
+ let accumulator, start;
+ if (opts.length === 0) {
+ accumulator = convertToNumber(float16bitsArray[0]);
+ start = 1;
+ } else {
+ accumulator = opts[0];
+ start = 0;
+ }
+ for (let i = start; i < length; ++i) {
+ accumulator = callback(
+ accumulator,
+ convertToNumber(float16bitsArray[i]),
+ i,
+ this
+ );
+ }
+ return accumulator;
+ }
+ reduceRight(callback, ...opts) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const length = TypedArrayPrototypeGetLength(float16bitsArray);
+ if (length === 0 && opts.length === 0) {
+ throw NativeTypeError(REDUCE_OF_EMPTY_ARRAY_WITH_NO_INITIAL_VALUE);
+ }
+ let accumulator, start;
+ if (opts.length === 0) {
+ accumulator = convertToNumber(float16bitsArray[length - 1]);
+ start = length - 2;
+ } else {
+ accumulator = opts[0];
+ start = length - 1;
+ }
+ for (let i = start; i >= 0; --i) {
+ accumulator = callback(
+ accumulator,
+ convertToNumber(float16bitsArray[i]),
+ i,
+ this
+ );
+ }
+ return accumulator;
+ }
+ forEach(callback, ...opts) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const length = TypedArrayPrototypeGetLength(float16bitsArray);
+ const thisArg = opts[0];
+ for (let i = 0; i < length; ++i) {
+ ReflectApply(callback, thisArg, [
+ convertToNumber(float16bitsArray[i]),
+ i,
+ this,
+ ]);
+ }
+ }
+ find(callback, ...opts) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const length = TypedArrayPrototypeGetLength(float16bitsArray);
+ const thisArg = opts[0];
+ for (let i = 0; i < length; ++i) {
+ const value = convertToNumber(float16bitsArray[i]);
+ if (ReflectApply(callback, thisArg, [value, i, this])) {
+ return value;
+ }
+ }
+ }
+ findIndex(callback, ...opts) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const length = TypedArrayPrototypeGetLength(float16bitsArray);
+ const thisArg = opts[0];
+ for (let i = 0; i < length; ++i) {
+ const value = convertToNumber(float16bitsArray[i]);
+ if (ReflectApply(callback, thisArg, [value, i, this])) {
+ return i;
+ }
+ }
+ return -1;
+ }
+ findLast(callback, ...opts) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const length = TypedArrayPrototypeGetLength(float16bitsArray);
+ const thisArg = opts[0];
+ for (let i = length - 1; i >= 0; --i) {
+ const value = convertToNumber(float16bitsArray[i]);
+ if (ReflectApply(callback, thisArg, [value, i, this])) {
+ return value;
+ }
+ }
+ }
+ findLastIndex(callback, ...opts) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const length = TypedArrayPrototypeGetLength(float16bitsArray);
+ const thisArg = opts[0];
+ for (let i = length - 1; i >= 0; --i) {
+ const value = convertToNumber(float16bitsArray[i]);
+ if (ReflectApply(callback, thisArg, [value, i, this])) {
+ return i;
+ }
+ }
+ return -1;
+ }
+ every(callback, ...opts) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const length = TypedArrayPrototypeGetLength(float16bitsArray);
+ const thisArg = opts[0];
+ for (let i = 0; i < length; ++i) {
+ if (
+ !ReflectApply(callback, thisArg, [
+ convertToNumber(float16bitsArray[i]),
+ i,
+ this,
+ ])
+ ) {
+ return false;
+ }
+ }
+ return true;
+ }
+ some(callback, ...opts) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const length = TypedArrayPrototypeGetLength(float16bitsArray);
+ const thisArg = opts[0];
+ for (let i = 0; i < length; ++i) {
+ if (
+ ReflectApply(callback, thisArg, [
+ convertToNumber(float16bitsArray[i]),
+ i,
+ this,
+ ])
+ ) {
+ return true;
+ }
+ }
+ return false;
+ }
+ set(input, ...opts) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const targetOffset = ToIntegerOrInfinity(opts[0]);
+ if (targetOffset < 0) {
+ throw NativeRangeError(OFFSET_IS_OUT_OF_BOUNDS);
+ }
+ if (input == null) {
+ throw NativeTypeError(
+ CANNOT_CONVERT_UNDEFINED_OR_NULL_TO_OBJECT
+ );
+ }
+ if (isNativeBigIntTypedArray(input)) {
+ throw NativeTypeError(
+ CANNOT_MIX_BIGINT_AND_OTHER_TYPES
+ );
+ }
+ if (isFloat16Array(input)) {
+ return TypedArrayPrototypeSet(
+ getFloat16BitsArray(this),
+ getFloat16BitsArray(input),
+ targetOffset
+ );
+ }
+ if (isNativeTypedArray(input)) {
+ const buffer = TypedArrayPrototypeGetBuffer(input);
+ if (IsDetachedBuffer(buffer)) {
+ throw NativeTypeError(ATTEMPTING_TO_ACCESS_DETACHED_ARRAYBUFFER);
+ }
+ }
+ const targetLength = TypedArrayPrototypeGetLength(float16bitsArray);
+ const src = NativeObject(input);
+ const srcLength = ToLength(src.length);
+ if (targetOffset === Infinity || srcLength + targetOffset > targetLength) {
+ throw NativeRangeError(OFFSET_IS_OUT_OF_BOUNDS);
+ }
+ for (let i = 0; i < srcLength; ++i) {
+ float16bitsArray[i + targetOffset] = roundToFloat16Bits(src[i]);
+ }
+ }
+ reverse() {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ TypedArrayPrototypeReverse(float16bitsArray);
+ return this;
+ }
+ fill(value, ...opts) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ TypedArrayPrototypeFill(
+ float16bitsArray,
+ roundToFloat16Bits(value),
+ ...safeIfNeeded(opts)
+ );
+ return this;
+ }
+ copyWithin(target, start, ...opts) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ TypedArrayPrototypeCopyWithin(float16bitsArray, target, start, ...safeIfNeeded(opts));
+ return this;
+ }
+ sort(compareFn) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const sortCompare = compareFn !== undefined ? compareFn : defaultCompare;
+ TypedArrayPrototypeSort(float16bitsArray, (x, y) => {
+ return sortCompare(convertToNumber(x), convertToNumber(y));
+ });
+ return this;
+ }
+ slice(start, end) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const Constructor = SpeciesConstructor(float16bitsArray, Float16Array);
+ if (Constructor === Float16Array) {
+ const uint16 = new NativeUint16Array(
+ TypedArrayPrototypeGetBuffer(float16bitsArray),
+ TypedArrayPrototypeGetByteOffset(float16bitsArray),
+ TypedArrayPrototypeGetLength(float16bitsArray)
+ );
+ return new Float16Array(
+ TypedArrayPrototypeGetBuffer(
+ TypedArrayPrototypeSlice(uint16, start, end)
+ )
+ );
+ }
+ const length = TypedArrayPrototypeGetLength(float16bitsArray);
+ const relativeStart = ToIntegerOrInfinity(start);
+ const relativeEnd = end === undefined ? length : ToIntegerOrInfinity(end);
+ let k;
+ if (relativeStart === -Infinity) {
+ k = 0;
+ } else if (relativeStart < 0) {
+ k = length + relativeStart > 0 ? length + relativeStart : 0;
+ } else {
+ k = length < relativeStart ? length : relativeStart;
+ }
+ let final;
+ if (relativeEnd === -Infinity) {
+ final = 0;
+ } else if (relativeEnd < 0) {
+ final = length + relativeEnd > 0 ? length + relativeEnd : 0;
+ } else {
+ final = length < relativeEnd ? length : relativeEnd;
+ }
+ const count = final - k > 0 ? final - k : 0;
+ const array = new Constructor(count);
+ assertSpeciesTypedArray(array, count);
+ if (count === 0) {
+ return array;
+ }
+ const buffer = TypedArrayPrototypeGetBuffer(float16bitsArray);
+ if (IsDetachedBuffer(buffer)) {
+ throw NativeTypeError(ATTEMPTING_TO_ACCESS_DETACHED_ARRAYBUFFER);
+ }
+ let n = 0;
+ while (k < final) {
+ array[n] = convertToNumber(float16bitsArray[k]);
+ ++k;
+ ++n;
+ }
+ return (array);
+ }
+ subarray(begin, end) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const Constructor = SpeciesConstructor(float16bitsArray, Float16Array);
+ const uint16 = new NativeUint16Array(
+ TypedArrayPrototypeGetBuffer(float16bitsArray),
+ TypedArrayPrototypeGetByteOffset(float16bitsArray),
+ TypedArrayPrototypeGetLength(float16bitsArray)
+ );
+ const uint16Subarray = TypedArrayPrototypeSubarray(uint16, begin, end);
+ const array = new Constructor(
+ TypedArrayPrototypeGetBuffer(uint16Subarray),
+ TypedArrayPrototypeGetByteOffset(uint16Subarray),
+ TypedArrayPrototypeGetLength(uint16Subarray)
+ );
+ assertSpeciesTypedArray(array);
+ return (array);
+ }
+ indexOf(element, ...opts) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const length = TypedArrayPrototypeGetLength(float16bitsArray);
+ let from = ToIntegerOrInfinity(opts[0]);
+ if (from === Infinity) {
+ return -1;
+ }
+ if (from < 0) {
+ from += length;
+ if (from < 0) {
+ from = 0;
+ }
+ }
+ for (let i = from; i < length; ++i) {
+ if (
+ ObjectHasOwn(float16bitsArray, i) &&
+ convertToNumber(float16bitsArray[i]) === element
+ ) {
+ return i;
+ }
+ }
+ return -1;
+ }
+ lastIndexOf(element, ...opts) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const length = TypedArrayPrototypeGetLength(float16bitsArray);
+ let from = opts.length >= 1 ? ToIntegerOrInfinity(opts[0]) : length - 1;
+ if (from === -Infinity) {
+ return -1;
+ }
+ if (from >= 0) {
+ from = from < length - 1 ? from : length - 1;
+ } else {
+ from += length;
+ }
+ for (let i = from; i >= 0; --i) {
+ if (
+ ObjectHasOwn(float16bitsArray, i) &&
+ convertToNumber(float16bitsArray[i]) === element
+ ) {
+ return i;
+ }
+ }
+ return -1;
+ }
+ includes(element, ...opts) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const length = TypedArrayPrototypeGetLength(float16bitsArray);
+ let from = ToIntegerOrInfinity(opts[0]);
+ if (from === Infinity) {
+ return false;
+ }
+ if (from < 0) {
+ from += length;
+ if (from < 0) {
+ from = 0;
+ }
+ }
+ const isNaN = NumberIsNaN(element);
+ for (let i = from; i < length; ++i) {
+ const value = convertToNumber(float16bitsArray[i]);
+ if (isNaN && NumberIsNaN(value)) {
+ return true;
+ }
+ if (value === element) {
+ return true;
+ }
+ }
+ return false;
+ }
+ join(separator) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const array = copyToArray(float16bitsArray);
+ return ArrayPrototypeJoin(array, separator);
+ }
+ toLocaleString(...opts) {
+ assertFloat16Array(this);
+ const float16bitsArray = getFloat16BitsArray(this);
+ const array = copyToArray(float16bitsArray);
+ return ArrayPrototypeToLocaleString(array, ...safeIfNeeded(opts));
+ }
+ get [SymbolToStringTag]() {
+ if (isFloat16Array(this)) {
+ return ("Float16Array");
+ }
+ }
+}
+ObjectDefineProperty(Float16Array, "BYTES_PER_ELEMENT", {
+ value: BYTES_PER_ELEMENT,
+});
+ObjectDefineProperty(Float16Array, brand, {});
+ReflectSetPrototypeOf(Float16Array, TypedArray);
+const Float16ArrayPrototype = Float16Array.prototype;
+ObjectDefineProperty(Float16ArrayPrototype, "BYTES_PER_ELEMENT", {
+ value: BYTES_PER_ELEMENT,
+});
+ObjectDefineProperty(Float16ArrayPrototype, SymbolIterator, {
+ value: Float16ArrayPrototype.values,
+ writable: true,
+ configurable: true,
+});
+ReflectSetPrototypeOf(Float16ArrayPrototype, TypedArrayPrototype);
+
+function isTypedArray(target) {
+ return isNativeTypedArray(target) || isFloat16Array(target);
+}
+
+function getFloat16(dataView, byteOffset, ...opts) {
+ return convertToNumber(
+ DataViewPrototypeGetUint16(dataView, byteOffset, ...safeIfNeeded(opts))
+ );
+}
+function setFloat16(dataView, byteOffset, value, ...opts) {
+ return DataViewPrototypeSetUint16(
+ dataView,
+ byteOffset,
+ roundToFloat16Bits(value),
+ ...safeIfNeeded(opts)
+ );
+}
+
+function hfround(x) {
+ const number = +x;
+ if (!NumberIsFinite(number) || number === 0) {
+ return number;
+ }
+ const x16 = roundToFloat16Bits(number);
+ return convertToNumber(x16);
+}
+
+export { Float16Array, getFloat16, hfround, isFloat16Array, isTypedArray, setFloat16 };
diff --git a/dom/webgpu/tests/cts/checkout/src/manual/README.txt b/dom/webgpu/tests/cts/checkout/src/manual/README.txt
new file mode 100644
index 0000000000..a50ded41db
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/manual/README.txt
@@ -0,0 +1,18 @@
+WebGPU tests that require manual intervention.
+
+Many of these test may be HTML pages rather than using the harness.
+
+Add informal notes here on possible stress tests.
+
+- Suspending or hibernating the machine.
+- Manually crashing or relaunching the browser's GPU process.
+- Triggering a GPU driver reset (TDR).
+- Forcibly or gracefully unplugging an external GPU.
+- Forcibly switching between GPUs using OS/driver settings.
+- Backgrounding the browser (on mobile OSes).
+- Moving windows between displays attached to different hardware adapters.
+- Moving windows between displays with different color properties (HDR/WCG).
+- Unplugging a laptop.
+- Switching between canvas and XR device output.
+
+TODO: look at dEQP (OpenGL ES and Vulkan) and WebGL for inspiration here.
diff --git a/dom/webgpu/tests/cts/checkout/src/resources/README.md b/dom/webgpu/tests/cts/checkout/src/resources/README.md
new file mode 100644
index 0000000000..824f82b998
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/resources/README.md
@@ -0,0 +1,15 @@
+Always use `getResourcePath()` to get the appropriate path to these resources depending
+on the context (WPT, standalone, worker, etc.)
+
+
+The test video files were generated with the ffmpeg cmds below:
+ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libvpx -pix_fmt yuv420p -frames 500 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv four-colors-vp8-bt601.webm
+ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libtheora -pix_fmt yuv420p -frames 500 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv four-colors-theora-bt601.ogv
+ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libx264 -pix_fmt yuv420p -frames 500 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv four-colors-h264-bt601.mp4
+ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libvpx-vp9 -pix_fmt yuv420p -frames 500 -colorspace smpte170m -color_primaries smpte170m -color_trc smpte170m -color_range tv four-colors-vp9-bt601.webm
+ffmpeg.exe -loop 1 -i .\four-colors.png -c:v libvpx-vp9 -pix_fmt yuv420p -frames 500 -colorspace bt709 -color_primaries bt709 -color_trc bt709 -color_range tv -vf scale=out_color_matrix=bt709:out_range=tv four-colors-vp9-bt709.webm
+
+These rotation test files are copies of four-colors-h264-bt601.mp4 with metadata changes.
+ffmpeg.exe -i .\four-colors-h264-bt601.mp4 -c copy -metadata:s:v rotate=90 four-colors-h264-bt601-rotate-90.mp4
+ffmpeg.exe -i .\four-colors-h264-bt601.mp4 -c copy -metadata:s:v rotate=180 four-colors-h264-bt601-rotate-180.mp4
+ffmpeg.exe -i .\four-colors-h264-bt601.mp4 -c copy -metadata:s:v rotate=270 four-colors-h264-bt601-rotate-270.mp4 \ No newline at end of file
diff --git a/dom/webgpu/tests/cts/checkout/src/resources/four-colors-h264-bt601-rotate-180.mp4 b/dom/webgpu/tests/cts/checkout/src/resources/four-colors-h264-bt601-rotate-180.mp4
new file mode 100644
index 0000000000..1f0e9094a5
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/resources/four-colors-h264-bt601-rotate-180.mp4
Binary files differ
diff --git a/dom/webgpu/tests/cts/checkout/src/resources/four-colors-h264-bt601-rotate-270.mp4 b/dom/webgpu/tests/cts/checkout/src/resources/four-colors-h264-bt601-rotate-270.mp4
new file mode 100644
index 0000000000..e0480ceff2
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/resources/four-colors-h264-bt601-rotate-270.mp4
Binary files differ
diff --git a/dom/webgpu/tests/cts/checkout/src/resources/four-colors-h264-bt601-rotate-90.mp4 b/dom/webgpu/tests/cts/checkout/src/resources/four-colors-h264-bt601-rotate-90.mp4
new file mode 100644
index 0000000000..9a6261056e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/resources/four-colors-h264-bt601-rotate-90.mp4
Binary files differ
diff --git a/dom/webgpu/tests/cts/checkout/src/resources/four-colors-h264-bt601.mp4 b/dom/webgpu/tests/cts/checkout/src/resources/four-colors-h264-bt601.mp4
new file mode 100644
index 0000000000..81a5ade435
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/resources/four-colors-h264-bt601.mp4
Binary files differ
diff --git a/dom/webgpu/tests/cts/checkout/src/resources/four-colors-theora-bt601.ogv b/dom/webgpu/tests/cts/checkout/src/resources/four-colors-theora-bt601.ogv
new file mode 100644
index 0000000000..79ed41163c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/resources/four-colors-theora-bt601.ogv
Binary files differ
diff --git a/dom/webgpu/tests/cts/checkout/src/resources/four-colors-vp8-bt601.webm b/dom/webgpu/tests/cts/checkout/src/resources/four-colors-vp8-bt601.webm
new file mode 100644
index 0000000000..20a2178596
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/resources/four-colors-vp8-bt601.webm
Binary files differ
diff --git a/dom/webgpu/tests/cts/checkout/src/resources/four-colors-vp9-bt601.webm b/dom/webgpu/tests/cts/checkout/src/resources/four-colors-vp9-bt601.webm
new file mode 100644
index 0000000000..a4044a9209
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/resources/four-colors-vp9-bt601.webm
Binary files differ
diff --git a/dom/webgpu/tests/cts/checkout/src/resources/four-colors-vp9-bt709.webm b/dom/webgpu/tests/cts/checkout/src/resources/four-colors-vp9-bt709.webm
new file mode 100644
index 0000000000..189e422035
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/resources/four-colors-vp9-bt709.webm
Binary files differ
diff --git a/dom/webgpu/tests/cts/checkout/src/resources/four-colors.png b/dom/webgpu/tests/cts/checkout/src/resources/four-colors.png
new file mode 100644
index 0000000000..c26c3d4865
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/resources/four-colors.png
Binary files differ
diff --git a/dom/webgpu/tests/cts/checkout/src/resources/webgpu.png b/dom/webgpu/tests/cts/checkout/src/resources/webgpu.png
new file mode 100644
index 0000000000..eec0d6eb90
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/resources/webgpu.png
Binary files differ
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/README.txt b/dom/webgpu/tests/cts/checkout/src/stress/README.txt
new file mode 100644
index 0000000000..5457e8400d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/README.txt
@@ -0,0 +1,6 @@
+WebGPU stress tests.
+
+These tests are separated from conformance tests because they are more likely to
+cause browser hangs and crashes.
+
+TODO: Look at dEQP (OpenGL ES and Vulkan) and WebGL for inspiration here.
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/adapter/README.txt b/dom/webgpu/tests/cts/checkout/src/stress/adapter/README.txt
new file mode 100644
index 0000000000..3a57f3d87f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/adapter/README.txt
@@ -0,0 +1 @@
+Stress tests covering use of GPUAdapter.
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/adapter/device_allocation.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/adapter/device_allocation.spec.ts
new file mode 100644
index 0000000000..27bb5f6a32
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/adapter/device_allocation.spec.ts
@@ -0,0 +1,292 @@
+export const description = `
+Stress tests for GPUAdapter.requestDevice.
+`;
+
+import { Fixture } from '../../common/framework/fixture.js';
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { attemptGarbageCollection } from '../../common/util/collect_garbage.js';
+import { keysOf } from '../../common/util/data_tables.js';
+import { getGPU } from '../../common/util/navigator_gpu.js';
+import { assert, iterRange } from '../../common/util/util.js';
+import { getDefaultLimitsForAdapter } from '../../webgpu/capability_info.js';
+
+export const g = makeTestGroup(Fixture);
+
+/** Adapter preference identifier to option. */
+const kAdapterTypeOptions: {
+ readonly [k in GPUPowerPreference | 'fallback']: GPURequestAdapterOptions;
+} =
+ /* prettier-ignore */ {
+ 'low-power': { powerPreference: 'low-power', forceFallbackAdapter: false },
+ 'high-performance': { powerPreference: 'high-performance', forceFallbackAdapter: false },
+ 'fallback': { powerPreference: undefined, forceFallbackAdapter: true },
+};
+/** List of all adapter hint types. */
+const kAdapterTypes = keysOf(kAdapterTypeOptions);
+
+/**
+ * Creates a device, a valid compute pipeline, valid resources for the pipeline, and
+ * ties them together into a set of compute commands ready to be submitted to the GPU
+ * queue. Does not submit the commands in order to make sure that all resources are
+ * kept alive until the device is destroyed.
+ */
+async function createDeviceAndComputeCommands(adapter: GPUAdapter) {
+ // Constants are computed such that per run, this function should allocate roughly 2G
+ // worth of data. This should be sufficient as we run these creation functions many
+ // times. If the data backing the created objects is not recycled we should OOM.
+ const limitInfo = getDefaultLimitsForAdapter(adapter);
+ const kNumPipelines = 64;
+ const kNumBindgroups = 128;
+ const kNumBufferElements =
+ limitInfo.maxComputeWorkgroupSizeX.default * limitInfo.maxComputeWorkgroupSizeY.default;
+ const kBufferSize = kNumBufferElements * 4;
+ const kBufferData = new Uint32Array([...iterRange(kNumBufferElements, x => x)]);
+
+ const device: GPUDevice = await adapter.requestDevice();
+ const commands = [];
+
+ for (let pipelineIndex = 0; pipelineIndex < kNumPipelines; ++pipelineIndex) {
+ const pipeline = device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: device.createShaderModule({
+ code: `
+ struct Buffer { data: array<u32>, };
+
+ @group(0) @binding(0) var<storage, read_write> buffer: Buffer;
+ @compute @workgroup_size(1) fn main(
+ @builtin(global_invocation_id) id: vec3<u32>) {
+ buffer.data[id.x * ${limitInfo.maxComputeWorkgroupSizeX.default}u + id.y] =
+ buffer.data[id.x * ${limitInfo.maxComputeWorkgroupSizeX.default}u + id.y] +
+ ${pipelineIndex}u;
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ });
+ for (let bindgroupIndex = 0; bindgroupIndex < kNumBindgroups; ++bindgroupIndex) {
+ const buffer = device.createBuffer({
+ size: kBufferSize,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST | GPUBufferUsage.COPY_SRC,
+ });
+ device.queue.writeBuffer(buffer, 0, kBufferData, 0, kBufferData.length);
+ const bindgroup = device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer } }],
+ });
+
+ const encoder = device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindgroup);
+ pass.dispatchWorkgroups(
+ limitInfo.maxComputeWorkgroupSizeX.default,
+ limitInfo.maxComputeWorkgroupSizeY.default
+ );
+ pass.end();
+ commands.push(encoder.finish());
+ }
+ }
+ return { device, objects: commands };
+}
+
+/**
+ * Creates a device, a valid render pipeline, valid resources for the pipeline, and
+ * ties them together into a set of render commands ready to be submitted to the GPU
+ * queue. Does not submit the commands in order to make sure that all resources are
+ * kept alive until the device is destroyed.
+ */
+async function createDeviceAndRenderCommands(adapter: GPUAdapter) {
+ // Constants are computed such that per run, this function should allocate roughly 2G
+ // worth of data. This should be sufficient as we run these creation functions many
+ // times. If the data backing the created objects is not recycled we should OOM.
+ const kNumPipelines = 128;
+ const kNumBindgroups = 128;
+ const kSize = 128;
+ const kBufferData = new Uint32Array([...iterRange(kSize * kSize, x => x)]);
+
+ const device: GPUDevice = await adapter.requestDevice();
+ const commands = [];
+
+ for (let pipelineIndex = 0; pipelineIndex < kNumPipelines; ++pipelineIndex) {
+ const module = device.createShaderModule({
+ code: `
+ struct Buffer { data: array<vec4<u32>, ${(kSize * kSize) / 4}>, };
+
+ @group(0) @binding(0) var<uniform> buffer: Buffer;
+ @vertex fn vmain(
+ @builtin(vertex_index) vertexIndex: u32
+ ) -> @builtin(position) vec4<f32> {
+ let index = buffer.data[vertexIndex / 4u][vertexIndex % 4u];
+ let position = vec2<f32>(f32(index % ${kSize}u), f32(index / ${kSize}u));
+ let r = vec2<f32>(1.0 / f32(${kSize}));
+ let a = 2.0 * r;
+ let b = r - vec2<f32>(1.0);
+ return vec4<f32>(fma(position, a, b), 0.0, 1.0);
+ }
+
+ @fragment fn fmain() -> @location(0) vec4<f32> {
+ return vec4<f32>(${pipelineIndex}.0 / ${kNumPipelines}.0, 0.0, 0.0, 1.0);
+ }
+ `,
+ });
+ const pipeline = device.createRenderPipeline({
+ layout: device.createPipelineLayout({
+ bindGroupLayouts: [
+ device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.VERTEX,
+ buffer: { type: 'uniform' },
+ },
+ ],
+ }),
+ ],
+ }),
+ vertex: { module, entryPoint: 'vmain', buffers: [] },
+ primitive: { topology: 'point-list' },
+ fragment: {
+ targets: [{ format: 'rgba8unorm' }],
+ module,
+ entryPoint: 'fmain',
+ },
+ });
+ for (let bindgroupIndex = 0; bindgroupIndex < kNumBindgroups; ++bindgroupIndex) {
+ const buffer = device.createBuffer({
+ size: kSize * kSize * 4,
+ usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
+ });
+ device.queue.writeBuffer(buffer, 0, kBufferData, 0, kBufferData.length);
+ const bindgroup = device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer } }],
+ });
+ const texture = device.createTexture({
+ size: [kSize, kSize],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ format: 'rgba8unorm',
+ });
+
+ const encoder = device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: texture.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindgroup);
+ pass.draw(kSize * kSize);
+ pass.end();
+ commands.push(encoder.finish());
+ }
+ }
+ return { device, objects: commands };
+}
+
+/**
+ * Creates a device and a large number of buffers which are immediately written to. The
+ * buffers are expected to be kept alive until they or the device are destroyed.
+ */
+async function createDeviceAndBuffers(adapter: GPUAdapter) {
+ // Currently we just allocate 2G of memory using 512MB blocks. We may be able to
+ // increase this to hit OOM instead, but on integrated GPUs on Metal, this can cause
+ // kernel panics at the moment, and it can greatly increase the time needed.
+ const kTotalMemorySize = 2 * 1024 * 1024 * 1024;
+ const kMemoryBlockSize = 512 * 1024 * 1024;
+ const kMemoryBlockData = new Uint8Array(kMemoryBlockSize);
+
+ const device: GPUDevice = await adapter.requestDevice();
+ const buffers = [];
+ for (let memory = 0; memory < kTotalMemorySize; memory += kMemoryBlockSize) {
+ const buffer = device.createBuffer({
+ size: kMemoryBlockSize,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
+ });
+
+ // Write out to the buffer to make sure that it has backing memory.
+ device.queue.writeBuffer(buffer, 0, kMemoryBlockData, 0, kMemoryBlockData.length);
+ buffers.push(buffer);
+ }
+ return { device, objects: buffers };
+}
+
+g.test('coexisting')
+ .desc(`Tests allocation of many coexisting GPUDevice objects.`)
+ .params(u => u.combine('adapterType', kAdapterTypes))
+ .fn(async t => {
+ const { adapterType } = t.params;
+ const adapter = await getGPU(t.rec).requestAdapter(kAdapterTypeOptions[adapterType]);
+ assert(adapter !== null, 'Failed to get adapter.');
+
+ // Based on Vulkan conformance test requirement to be able to create multiple devices.
+ const kNumDevices = 5;
+
+ const devices = [];
+ for (let i = 0; i < kNumDevices; ++i) {
+ const device: GPUDevice = await adapter.requestDevice();
+ devices.push(device);
+ }
+ });
+
+g.test('continuous,with_destroy')
+ .desc(
+ `Tests allocation and destruction of many GPUDevice objects over time. Device objects
+are sequentially requested with a series of device allocated objects created on each
+device. The devices are then destroyed to verify that the device and the device allocated
+objects are recycled over a very large number of iterations.`
+ )
+ .params(u => u.combine('adapterType', kAdapterTypes))
+ .fn(async t => {
+ const { adapterType } = t.params;
+ const adapter = await getGPU(t.rec).requestAdapter(kAdapterTypeOptions[adapterType]);
+ assert(adapter !== null, 'Failed to get adapter.');
+
+ // Since devices are being destroyed, we should be able to create many devices.
+ const kNumDevices = 100;
+ const kFunctions = [
+ createDeviceAndBuffers,
+ createDeviceAndComputeCommands,
+ createDeviceAndRenderCommands,
+ ];
+
+ const deviceList = [];
+ const objectLists = [];
+ for (let i = 0; i < kNumDevices; ++i) {
+ const { device, objects } = await kFunctions[i % kFunctions.length](adapter);
+ t.expect(objects.length > 0, 'unable to allocate any objects');
+ deviceList.push(device);
+ objectLists.push(objects);
+ device.destroy();
+ }
+ });
+
+g.test('continuous,no_destroy')
+ .desc(
+ `Tests allocation and implicit GC of many GPUDevice objects over time. Objects are
+sequentially requested and dropped for GC over a very large number of iterations. Note
+that without destroy, we do not create device allocated objects because that will
+implicitly keep the device in scope.`
+ )
+ .params(u => u.combine('adapterType', kAdapterTypes))
+ .fn(async t => {
+ const { adapterType } = t.params;
+ const adapter = await getGPU(t.rec).requestAdapter(kAdapterTypeOptions[adapterType]);
+ assert(adapter !== null, 'Failed to get adapter.');
+
+ const kNumDevices = 10_000;
+ for (let i = 1; i <= kNumDevices; ++i) {
+ await (async () => {
+ t.expect((await adapter.requestDevice()) !== null, 'unexpected null device');
+ })();
+ if (i % 10 === 0) {
+ // We need to occasionally wait for GC to clear out stale devices.
+ await attemptGarbageCollection();
+ }
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/compute/README.txt b/dom/webgpu/tests/cts/checkout/src/stress/compute/README.txt
new file mode 100644
index 0000000000..b41aabc66b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/compute/README.txt
@@ -0,0 +1 @@
+Stress tests covering operations specific to GPUComputePipeline and GPUComputePass.
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/compute/compute_pass.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/compute/compute_pass.spec.ts
new file mode 100644
index 0000000000..bd63ca1450
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/compute/compute_pass.spec.ts
@@ -0,0 +1,243 @@
+export const description = `
+Stress tests covering GPUComputePassEncoder usage.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { assert, iterRange } from '../../common/util/util.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('many')
+ .desc(
+ `Tests execution of a huge number of compute passes using the same
+GPUComputePipeline.`
+ )
+ .fn(t => {
+ const kNumElements = 64;
+ const data = new Uint32Array([...iterRange(kNumElements, x => x)]);
+ const buffer = t.makeBufferWithContents(data, GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC);
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+ struct Buffer { data: array<u32>, };
+ @group(0) @binding(0) var<storage, read_write> buffer: Buffer;
+ @compute @workgroup_size(1) fn main(
+ @builtin(global_invocation_id) id: vec3<u32>) {
+ buffer.data[id.x] = buffer.data[id.x] + 1u;
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ });
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer } }],
+ });
+ const kNumIterations = 250_000;
+ for (let i = 0; i < kNumIterations; ++i) {
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(kNumElements);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ }
+ t.expectGPUBufferValuesEqual(
+ buffer,
+ new Uint32Array([...iterRange(kNumElements, x => x + kNumIterations)])
+ );
+ });
+
+g.test('pipeline_churn')
+ .desc(
+ `Tests execution of a huge number of compute passes which each use a different
+GPUComputePipeline.`
+ )
+ .fn(t => {
+ const buffer = t.makeBufferWithContents(
+ new Uint32Array([0]),
+ GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC
+ );
+ const kNumIterations = 10_000;
+ const stages = iterRange(kNumIterations, i => ({
+ module: t.device.createShaderModule({
+ code: `
+ struct Buffer { data: u32, };
+ @group(0) @binding(0) var<storage, read_write> buffer: Buffer;
+ @compute @workgroup_size(1) fn main${i}() {
+ buffer.data = buffer.data + 1u;
+ }
+ `,
+ }),
+ entryPoint: `main${i}`,
+ }));
+ for (const compute of stages) {
+ const encoder = t.device.createCommandEncoder();
+ const pipeline = t.device.createComputePipeline({ layout: 'auto', compute });
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer } }],
+ });
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ }
+ t.expectGPUBufferValuesEqual(buffer, new Uint32Array([kNumIterations]));
+ });
+
+g.test('bind_group_churn')
+ .desc(
+ `Tests execution of compute passes which switch between a huge number of bind
+groups.`
+ )
+ .fn(t => {
+ const kNumElements = 64;
+ const data = new Uint32Array([...iterRange(kNumElements, x => x)]);
+ const buffer1 = t.makeBufferWithContents(
+ data,
+ GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC
+ );
+ const buffer2 = t.makeBufferWithContents(
+ data,
+ GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC
+ );
+ const module = t.device.createShaderModule({
+ code: `
+ struct Buffer { data: array<u32>, };
+ @group(0) @binding(0) var<storage, read_write> buffer1: Buffer;
+ @group(0) @binding(1) var<storage, read_write> buffer2: Buffer;
+ @compute @workgroup_size(1) fn main(
+ @builtin(global_invocation_id) id: vec3<u32>) {
+ buffer1.data[id.x] = buffer1.data[id.x] + 1u;
+ buffer2.data[id.x] = buffer2.data[id.x] + 2u;
+ }
+ `,
+ });
+ const kNumIterations = 250_000;
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: { module, entryPoint: 'main' },
+ });
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ for (let i = 0; i < kNumIterations; ++i) {
+ const buffer1Binding = i % 2;
+ const buffer2Binding = buffer1Binding ^ 1;
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: buffer1Binding, resource: { buffer: buffer1 } },
+ { binding: buffer2Binding, resource: { buffer: buffer2 } },
+ ],
+ });
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(kNumElements);
+ }
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ const kTotalAddition = (kNumIterations / 2) * 3;
+ t.expectGPUBufferValuesEqual(
+ buffer1,
+ new Uint32Array([...iterRange(kNumElements, x => x + kTotalAddition)])
+ );
+ t.expectGPUBufferValuesEqual(
+ buffer2,
+ new Uint32Array([...iterRange(kNumElements, x => x + kTotalAddition)])
+ );
+ });
+
+g.test('many_dispatches')
+ .desc(`Tests execution of compute passes with a huge number of dispatch calls`)
+ .fn(t => {
+ const kNumElements = 64;
+ const data = new Uint32Array([...iterRange(kNumElements, x => x)]);
+ const buffer = t.makeBufferWithContents(data, GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC);
+ const module = t.device.createShaderModule({
+ code: `
+ struct Buffer { data: array<u32>, };
+ @group(0) @binding(0) var<storage, read_write> buffer: Buffer;
+ @compute @workgroup_size(1) fn main(
+ @builtin(global_invocation_id) id: vec3<u32>) {
+ buffer.data[id.x] = buffer.data[id.x] + 1u;
+ }
+ `,
+ });
+ const kNumIterations = 1_000_000;
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: { module, entryPoint: 'main' },
+ });
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer } }],
+ });
+ pass.setBindGroup(0, bindGroup);
+ for (let i = 0; i < kNumIterations; ++i) {
+ pass.dispatchWorkgroups(kNumElements);
+ }
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ t.expectGPUBufferValuesEqual(
+ buffer,
+ new Uint32Array([...iterRange(kNumElements, x => x + kNumIterations)])
+ );
+ });
+
+g.test('huge_dispatches')
+ .desc(`Tests execution of compute passes with huge dispatch calls`)
+ .fn(async t => {
+ const kDimensions = [512, 512, 128];
+ kDimensions.forEach(x => {
+ assert(x <= t.device.limits.maxComputeWorkgroupsPerDimension);
+ });
+
+ const kNumElements = kDimensions[0] * kDimensions[1] * kDimensions[2];
+ const data = new Uint32Array([...iterRange(kNumElements, x => x)]);
+ const buffer = t.makeBufferWithContents(data, GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC);
+ const module = t.device.createShaderModule({
+ code: `
+ struct Buffer { data: array<u32>, };
+ @group(0) @binding(0) var<storage, read_write> buffer: Buffer;
+ @compute @workgroup_size(1) fn main(
+ @builtin(global_invocation_id) id: vec3<u32>) {
+ let index = (id.z * 512u + id.y) * 512u + id.x;
+ buffer.data[index] = buffer.data[index] + 1u;
+ }
+ `,
+ });
+ const kNumIterations = 16;
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: { module, entryPoint: 'main' },
+ });
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer } }],
+ });
+ for (let i = 0; i < kNumIterations; ++i) {
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setBindGroup(0, bindGroup);
+ pass.setPipeline(pipeline);
+ pass.dispatchWorkgroups(kDimensions[0], kDimensions[1], kDimensions[2]);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ await t.device.queue.onSubmittedWorkDone();
+ }
+ t.expectGPUBufferValuesEqual(
+ buffer,
+ new Uint32Array([...iterRange(kNumElements, x => x + kNumIterations)])
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/device/README.txt b/dom/webgpu/tests/cts/checkout/src/stress/device/README.txt
new file mode 100644
index 0000000000..6ee89fc5fd
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/device/README.txt
@@ -0,0 +1,2 @@
+Stress tests covering GPUDevice usage, primarily focused on stressing allocation
+of various resources.
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/device/bind_group_allocation.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/device/bind_group_allocation.spec.ts
new file mode 100644
index 0000000000..5d428f3edb
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/device/bind_group_allocation.spec.ts
@@ -0,0 +1,65 @@
+export const description = `
+Stress tests for allocation of GPUBindGroup objects through GPUDevice.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('coexisting')
+ .desc(`Tests allocation of many coexisting GPUBindGroup objects.`)
+ .fn(t => {
+ const kNumGroups = 1_000_000;
+ const buffer = t.device.createBuffer({
+ size: 64,
+ usage: GPUBufferUsage.STORAGE,
+ });
+ const layout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: { type: 'storage' },
+ },
+ ],
+ });
+ const bindGroups = [];
+ for (let i = 0; i < kNumGroups; ++i) {
+ bindGroups.push(
+ t.device.createBindGroup({
+ layout,
+ entries: [{ binding: 0, resource: { buffer } }],
+ })
+ );
+ }
+ });
+
+g.test('continuous')
+ .desc(
+ `Tests allocation and implicit GC of many GPUBindGroup objects over time.
+Objects are sequentially created and dropped for GC over a very large number of
+iterations.`
+ )
+ .fn(t => {
+ const kNumGroups = 5_000_000;
+ const buffer = t.device.createBuffer({
+ size: 64,
+ usage: GPUBufferUsage.STORAGE,
+ });
+ const layout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: { type: 'storage' },
+ },
+ ],
+ });
+ for (let i = 0; i < kNumGroups; ++i) {
+ t.device.createBindGroup({
+ layout,
+ entries: [{ binding: 0, resource: { buffer } }],
+ });
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/device/bind_group_layout_allocation.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/device/bind_group_layout_allocation.spec.ts
new file mode 100644
index 0000000000..0933cd1b59
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/device/bind_group_layout_allocation.spec.ts
@@ -0,0 +1,20 @@
+export const description = `
+Stress tests for allocation of GPUBindGroupLayout objects through GPUDevice.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('coexisting')
+ .desc(`Tests allocation of many coexisting GPUBindGroupLayout objects.`)
+ .unimplemented();
+
+g.test('continuous')
+ .desc(
+ `Tests allocation and implicit GC of many GPUBindGroupLayout objects over time.
+Objects are sequentially created and dropped for GC over a very large number of
+iterations.`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/device/buffer_allocation.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/device/buffer_allocation.spec.ts
new file mode 100644
index 0000000000..f55ec79c44
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/device/buffer_allocation.spec.ts
@@ -0,0 +1,25 @@
+export const description = `
+Stress tests for allocation of GPUBuffer objects through GPUDevice.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('coexisting').desc(`Tests allocation of many coexisting GPUBuffer objects.`).unimplemented();
+
+g.test('continuous,with_destroy')
+ .desc(
+ `Tests allocation and destruction of many GPUBuffer objects over time. Objects
+are sequentially created and destroyed over a very large number of iterations.`
+ )
+ .unimplemented();
+
+g.test('continuous,no_destroy')
+ .desc(
+ `Tests allocation and implicit GC of many GPUBuffer objects over time. Objects
+are sequentially created and dropped for GC over a very large number of
+iterations.`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/device/command_encoder_allocation.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/device/command_encoder_allocation.spec.ts
new file mode 100644
index 0000000000..e41769ee06
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/device/command_encoder_allocation.spec.ts
@@ -0,0 +1,20 @@
+export const description = `
+Stress tests for allocation of GPUCommandEncoder objects through GPUDevice.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('coexisting')
+ .desc(`Tests allocation of many coexisting GPUCommandEncoder objects.`)
+ .unimplemented();
+
+g.test('continuous')
+ .desc(
+ `Tests allocation and implicit GC of many GPUCommandEncoder objects over time.
+Objects are sequentially created and dropped for GC over a very large number of
+iterations.`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/device/compute_pipeline_allocation.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/device/compute_pipeline_allocation.spec.ts
new file mode 100644
index 0000000000..5c03bc9674
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/device/compute_pipeline_allocation.spec.ts
@@ -0,0 +1,20 @@
+export const description = `
+Stress tests for allocation of GPUComputePipeline objects through GPUDevice.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('coexisting')
+ .desc(`Tests allocation of many coexisting GPUComputePipeline objects.`)
+ .unimplemented();
+
+g.test('continuous')
+ .desc(
+ `Tests allocation and implicit GC of many GPUComputePipeline objects over time.
+Objects are sequentially created and dropped for GC over a very large number of
+iterations.`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/device/pipeline_layout_allocation.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/device/pipeline_layout_allocation.spec.ts
new file mode 100644
index 0000000000..15d417fd7e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/device/pipeline_layout_allocation.spec.ts
@@ -0,0 +1,20 @@
+export const description = `
+Stress tests for allocation of GPUPipelineLayout objects through GPUDevice.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('coexisting')
+ .desc(`Tests allocation of many coexisting GPUPipelineLayout objects.`)
+ .unimplemented();
+
+g.test('continuous')
+ .desc(
+ `Tests allocation and implicit GC of many GPUPipelineLayout objects over time.
+Objects are sequentially created and dropped for GC over a very large number of
+iterations.`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/device/query_set_allocation.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/device/query_set_allocation.spec.ts
new file mode 100644
index 0000000000..757645cbf6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/device/query_set_allocation.spec.ts
@@ -0,0 +1,27 @@
+export const description = `
+Stress tests for allocation of GPUQuerySet objects through GPUDevice.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('coexisting')
+ .desc(`Tests allocation of many coexisting GPUQuerySet objects.`)
+ .unimplemented();
+
+g.test('continuous,with_destroy')
+ .desc(
+ `Tests allocation and destruction of many GPUQuerySet objects over time. Objects
+are sequentially created and destroyed over a very large number of iterations.`
+ )
+ .unimplemented();
+
+g.test('continuous,no_destroy')
+ .desc(
+ `Tests allocation and implicit GC of many GPUQuerySet objects over time. Objects
+are sequentially created and dropped for GC over a very large number of
+iterations.`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/device/render_bundle_allocation.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/device/render_bundle_allocation.spec.ts
new file mode 100644
index 0000000000..d7448412a1
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/device/render_bundle_allocation.spec.ts
@@ -0,0 +1,20 @@
+export const description = `
+Stress tests for allocation of GPURenderBundle objects through GPUDevice.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('coexisting')
+ .desc(`Tests allocation of many coexisting GPURenderBundle objects.`)
+ .unimplemented();
+
+g.test('continuous')
+ .desc(
+ `Tests allocation and implicit GC of many GPURenderBundle objects over time.
+Objects are sequentially created and dropped for GC over a very large number of
+iterations.`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/device/render_pipeline_allocation.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/device/render_pipeline_allocation.spec.ts
new file mode 100644
index 0000000000..21eb92cf7c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/device/render_pipeline_allocation.spec.ts
@@ -0,0 +1,20 @@
+export const description = `
+Stress tests for allocation of GPURenderPipeline objects through GPUDevice.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('coexisting')
+ .desc(`Tests allocation of many coexisting GPURenderPipeline objects.`)
+ .unimplemented();
+
+g.test('continuous')
+ .desc(
+ `Tests allocation and implicit GC of many GPURenderPipeline objects over time.
+Objects are sequentially created and dropped for GC over a very large number of
+iterations.`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/device/sampler_allocation.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/device/sampler_allocation.spec.ts
new file mode 100644
index 0000000000..c34dae3f67
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/device/sampler_allocation.spec.ts
@@ -0,0 +1,20 @@
+export const description = `
+Stress tests for allocation of GPUSampler objects through GPUDevice.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('coexisting')
+ .desc(`Tests allocation of many coexisting GPUSampler objects.`)
+ .unimplemented();
+
+g.test('continuous')
+ .desc(
+ `Tests allocation and implicit GC of many GPUSampler objects over time. Objects
+are sequentially created and dropped for GC over a very large number of
+iterations.`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/device/shader_module_allocation.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/device/shader_module_allocation.spec.ts
new file mode 100644
index 0000000000..97ef73d2c9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/device/shader_module_allocation.spec.ts
@@ -0,0 +1,20 @@
+export const description = `
+Stress tests for allocation of GPUShaderModule objects through GPUDevice.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('coexisting')
+ .desc(`Tests allocation of many coexisting GPUShaderModule objects.`)
+ .unimplemented();
+
+g.test('continuous')
+ .desc(
+ `Tests allocation and implicit GC of many GPUShaderModule objects over time.
+Objects are sequentially created and dropped for GC over a very large number of
+iterations.`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/device/texture_allocation.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/device/texture_allocation.spec.ts
new file mode 100644
index 0000000000..5cef598804
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/device/texture_allocation.spec.ts
@@ -0,0 +1,27 @@
+export const description = `
+Stress tests for allocation of GPUTexture objects through GPUDevice.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('coexisting')
+ .desc(`Tests allocation of many coexisting GPUTexture objects.`)
+ .unimplemented();
+
+g.test('continuous,with_destroy')
+ .desc(
+ `Tests allocation and destruction of many GPUTexture objects over time. Objects
+are sequentially created and destroyed over a very large number of iterations.`
+ )
+ .unimplemented();
+
+g.test('continuous,no_destroy')
+ .desc(
+ `Tests allocation and implicit GC of many GPUTexture objects over time. Objects
+are sequentially created and dropped for GC over a very large number of
+iterations.`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/listing.ts b/dom/webgpu/tests/cts/checkout/src/stress/listing.ts
new file mode 100644
index 0000000000..823639c692
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/listing.ts
@@ -0,0 +1,5 @@
+/* eslint-disable import/no-restricted-paths */
+import { TestSuiteListing } from '../common/internal/test_suite_listing.js';
+import { makeListing } from '../common/tools/crawl.js';
+
+export const listing: Promise<TestSuiteListing> = makeListing(__filename);
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/memory/README.txt b/dom/webgpu/tests/cts/checkout/src/stress/memory/README.txt
new file mode 100644
index 0000000000..ac0c90bfb7
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/memory/README.txt
@@ -0,0 +1 @@
+Stress tests covering allocation and usage of various types of GPUBuffer objects.
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/memory/churn.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/memory/churn.spec.ts
new file mode 100644
index 0000000000..fcb899eb29
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/memory/churn.spec.ts
@@ -0,0 +1,17 @@
+export const description = `
+Stress tests covering robustness in the presence of heavy buffer and texture
+memory churn.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('churn')
+ .desc(
+ `Allocates and populates a huge number of buffers and textures over time,
+retaining some while dropping or explicitly destroying others. When finished,
+verifies the expected contents of any remaining buffers and textures.`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/memory/oom.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/memory/oom.spec.ts
new file mode 100644
index 0000000000..d14486ecee
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/memory/oom.spec.ts
@@ -0,0 +1,178 @@
+export const description = `
+Stress tests covering robustness when available VRAM is exhausted.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { unreachable } from '../../common/util/util.js';
+import { GPUConst } from '../../webgpu/constants.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+import { exhaustVramUntilUnder64MB } from '../../webgpu/util/memory.js';
+
+export const g = makeTestGroup(GPUTest);
+
+function createBufferWithMapState(
+ device: GPUDevice,
+ size: number,
+ mapState: GPUBufferMapState,
+ mode: GPUMapModeFlags,
+ mappedAtCreation: boolean
+) {
+ const mappable = mapState === 'unmapped';
+ if (!mappable && !mappedAtCreation) {
+ return device.createBuffer({
+ size,
+ usage: GPUBufferUsage.UNIFORM,
+ mappedAtCreation,
+ });
+ }
+ let buffer: GPUBuffer;
+ switch (mode) {
+ case GPUMapMode.READ:
+ buffer = device.createBuffer({
+ size,
+ usage: GPUBufferUsage.MAP_READ,
+ mappedAtCreation,
+ });
+ break;
+ case GPUMapMode.WRITE:
+ buffer = device.createBuffer({
+ size,
+ usage: GPUBufferUsage.MAP_WRITE,
+ mappedAtCreation,
+ });
+ break;
+ default:
+ unreachable();
+ }
+ // If we want the buffer to be mappable and also mappedAtCreation, we call unmap on it now.
+ if (mappable && mappedAtCreation) {
+ buffer.unmap();
+ }
+ return buffer;
+}
+
+g.test('vram_oom')
+ .desc(`Tests that we can allocate buffers until we run out of VRAM.`)
+ .fn(async t => {
+ await exhaustVramUntilUnder64MB(t.device);
+ });
+
+g.test('map_after_vram_oom')
+ .desc(
+ `Allocates tons of buffers and textures with varying mapping states (unmappable,
+mappable, mapAtCreation, mapAtCreation-then-unmapped) until OOM; then attempts
+to mapAsync all the mappable objects. The last buffer should be an error buffer so
+mapAsync on it should reject and produce a validation error. `
+ )
+ .params(u =>
+ u
+ .combine('mapState', ['mapped', 'unmapped'] as GPUBufferMapState[])
+ .combine('mode', [GPUConst.MapMode.READ, GPUConst.MapMode.WRITE])
+ .combine('mappedAtCreation', [true, false])
+ .combine('unmapBeforeResolve', [true, false])
+ )
+ .fn(async t => {
+ // Use a relatively large size to quickly hit OOM.
+ const kSize = 512 * 1024 * 1024;
+
+ const { mapState, mode, mappedAtCreation, unmapBeforeResolve } = t.params;
+ const mappable = mapState === 'unmapped';
+ const buffers: GPUBuffer[] = [];
+ // Closure to call map and verify results on all of the buffers.
+ const finish = async () => {
+ if (mappable) {
+ await Promise.all(buffers.map(value => value.mapAsync(mode)));
+ } else {
+ buffers.forEach(value => {
+ t.expectValidationError(() => {
+ void value.mapAsync(mode);
+ });
+ });
+ }
+ // Finally, destroy all the buffers to free the resources.
+ buffers.forEach(buffer => buffer.destroy());
+ };
+
+ let errorBuffer: GPUBuffer;
+ for (;;) {
+ if (mappedAtCreation) {
+ // When mappedAtCreation is true, OOM can happen on the client which throws a RangeError. In
+ // this case, we don't do any validations on the OOM buffer.
+ try {
+ t.device.pushErrorScope('out-of-memory');
+ const buffer = t.trackForCleanup(
+ createBufferWithMapState(t.device, kSize, mapState, mode, mappedAtCreation)
+ );
+ if (await t.device.popErrorScope()) {
+ errorBuffer = buffer;
+ break;
+ }
+ buffers.push(buffer);
+ } catch (ex) {
+ t.expect(ex instanceof RangeError);
+ await finish();
+ return;
+ }
+ } else {
+ t.device.pushErrorScope('out-of-memory');
+ const buffer = t.trackForCleanup(
+ createBufferWithMapState(t.device, kSize, mapState, mode, mappedAtCreation)
+ );
+ if (await t.device.popErrorScope()) {
+ errorBuffer = buffer;
+ break;
+ }
+ buffers.push(buffer);
+ }
+ }
+
+ // Do some validation on the OOM buffer.
+ let promise: Promise<void>;
+ t.expectValidationError(() => {
+ promise = errorBuffer.mapAsync(mode);
+ });
+ if (unmapBeforeResolve) {
+ // Should reject with abort error because buffer will be unmapped
+ // before validation check finishes.
+ t.shouldReject('AbortError', promise!);
+ } else {
+ // Should also reject in addition to the validation error.
+ t.shouldReject('OperationError', promise!);
+
+ // Wait for validation error before unmap to ensure validation check
+ // ends before unmap.
+ try {
+ await promise!;
+ throw new Error('The promise should be rejected.');
+ } catch {
+ // Should cause an exception because the promise should be rejected.
+ }
+ }
+
+ // Should throw an OperationError because the buffer is not mapped.
+ // Note: not a RangeError because the state of the buffer is checked first.
+ t.shouldThrow('OperationError', () => {
+ errorBuffer.getMappedRange();
+ });
+
+ // Should't be a validation error even if the buffer failed to be mapped.
+ errorBuffer.unmap();
+ errorBuffer.destroy();
+
+ // Finish the rest of the test w.r.t the mappable buffers.
+ await finish();
+ });
+
+g.test('validation_vs_oom')
+ .desc(
+ `Tests that calls affected by both OOM and validation errors expose the
+validation error with precedence.`
+ )
+ .unimplemented();
+
+g.test('recovery')
+ .desc(
+ `Tests that after going VRAM-OOM, destroying allocated resources eventually
+allows new resources to be allocated.`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/queries/README.txt b/dom/webgpu/tests/cts/checkout/src/stress/queries/README.txt
new file mode 100644
index 0000000000..fe466205c4
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/queries/README.txt
@@ -0,0 +1 @@
+Stress tests covering use of GPUQuerySet objects and related operations.
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/queries/occlusion.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/queries/occlusion.spec.ts
new file mode 100644
index 0000000000..056d6bdaea
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/queries/occlusion.spec.ts
@@ -0,0 +1,10 @@
+export const description = `
+Stress tests for occlusion queries.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('many').desc(`Tests a huge number of occlusion queries in a render pass.`).unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/queries/resolve.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/queries/resolve.spec.ts
new file mode 100644
index 0000000000..da67977395
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/queries/resolve.spec.ts
@@ -0,0 +1,15 @@
+export const description = `
+Stress tests for query resolution.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('many_large_sets')
+ .desc(
+ `Tests a huge number of resolveQuerySet operations on a huge number of
+query sets between render passes.`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/queries/timestamps.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/queries/timestamps.spec.ts
new file mode 100644
index 0000000000..da3e1eb472
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/queries/timestamps.spec.ts
@@ -0,0 +1,50 @@
+export const description = `
+Stress tests for timestamp queries.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('command_encoder_one_query_set')
+ .desc(
+ `Tests a huge number of timestamp queries over a single query set between render
+passes on a single command encoder.`
+ )
+ .unimplemented();
+
+g.test('command_encoder_many_query_sets')
+ .desc(
+ `Tests a huge number of timestamp queries over many query sets between render
+passes on a single command encoder.`
+ )
+ .unimplemented();
+
+g.test('render_pass_one_query_set')
+ .desc(
+ `Tests a huge number of timestamp queries over a single query set in a single
+render pass.`
+ )
+ .unimplemented();
+
+g.test('render_pass_many_query_sets')
+ .desc(
+ `Tests a huge number of timestamp queries over a huge number of query sets in a
+single render pass.`
+ )
+ .unimplemented();
+
+g.test('compute_pass_one_query_set')
+ .desc(
+ `Tests a huge number of timestamp queries over a single query set in a single
+compute pass.`
+ )
+ .unimplemented();
+
+g.test('compute_pass_many_query_sets')
+ .desc(
+ `Tests a huge number of timestamp queries over a huge number of query sets in a
+single compute pass.`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/queue/README.txt b/dom/webgpu/tests/cts/checkout/src/stress/queue/README.txt
new file mode 100644
index 0000000000..adb4ec40ce
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/queue/README.txt
@@ -0,0 +1 @@
+Stress tests covering GPUQueue usage.
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/queue/submit.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/queue/submit.spec.ts
new file mode 100644
index 0000000000..fcce353272
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/queue/submit.spec.ts
@@ -0,0 +1,102 @@
+export const description = `
+Stress tests for command submission to GPUQueue objects.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { iterRange } from '../../common/util/util.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('huge_command_buffer')
+ .desc(
+ `Tests submission of huge command buffers to a GPUQueue. Huge buffers are
+encoded by chaining together long sequences of compute passes, with expected
+results verified at the end of the test.`
+ )
+ .fn(t => {
+ const kNumElements = 64;
+ const data = new Uint32Array([...iterRange(kNumElements, x => x)]);
+ const buffer = t.makeBufferWithContents(data, GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC);
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+ struct Buffer { data: array<u32>, };
+ @group(0) @binding(0) var<storage, read_write> buffer: Buffer;
+ @compute @workgroup_size(1) fn main(
+ @builtin(global_invocation_id) id: vec3<u32>) {
+ buffer.data[id.x] = buffer.data[id.x] + 1u;
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ });
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer } }],
+ });
+ const encoder = t.device.createCommandEncoder();
+ const kNumIterations = 500_000;
+ for (let i = 0; i < kNumIterations; ++i) {
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(kNumElements);
+ pass.end();
+ }
+ t.device.queue.submit([encoder.finish()]);
+ t.expectGPUBufferValuesEqual(
+ buffer,
+ new Uint32Array([...iterRange(kNumElements, x => x + kNumIterations)])
+ );
+ });
+
+g.test('many_command_buffers')
+ .desc(
+ `Tests submission of a huge number of command buffers to a GPUQueue by a single
+submit() call.`
+ )
+ .fn(t => {
+ const kNumElements = 64;
+ const data = new Uint32Array([...iterRange(kNumElements, x => x)]);
+ const buffer = t.makeBufferWithContents(data, GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC);
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+ struct Buffer { data: array<u32>, };
+ @group(0) @binding(0) var<storage, read_write> buffer: Buffer;
+ @compute @workgroup_size(1) fn main(
+ @builtin(global_invocation_id) id: vec3<u32>) {
+ buffer.data[id.x] = buffer.data[id.x] + 1u;
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ });
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer } }],
+ });
+ const kNumIterations = 500_000;
+ const buffers = [];
+ for (let i = 0; i < kNumIterations; ++i) {
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(kNumElements);
+ pass.end();
+ buffers.push(encoder.finish());
+ }
+ t.device.queue.submit(buffers);
+ t.expectGPUBufferValuesEqual(
+ buffer,
+ new Uint32Array([...iterRange(kNumElements, x => x + kNumIterations)])
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/render/README.txt b/dom/webgpu/tests/cts/checkout/src/stress/render/README.txt
new file mode 100644
index 0000000000..7dcc73fbc3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/render/README.txt
@@ -0,0 +1,3 @@
+Stress tests covering operations specific to GPURenderPipeline, GPURenderPass, and GPURenderBundle.
+
+- Issuing draw calls with huge counts.
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/render/render_pass.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/render/render_pass.spec.ts
new file mode 100644
index 0000000000..6d2917a090
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/render/render_pass.spec.ts
@@ -0,0 +1,354 @@
+export const description = `
+Stress tests covering GPURenderPassEncoder usage.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { range } from '../../common/util/util.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('many')
+ .desc(
+ `Tests execution of a huge number of render passes using the same GPURenderPipeline. This uses
+a single render pass for every output fragment, with each pass executing a one-vertex draw call.`
+ )
+ .fn(t => {
+ const kSize = 1024;
+ const module = t.device.createShaderModule({
+ code: `
+ @vertex fn vmain(@builtin(vertex_index) index: u32)
+ -> @builtin(position) vec4<f32> {
+ let position = vec2<f32>(f32(index % ${kSize}u), f32(index / ${kSize}u));
+ let r = vec2<f32>(1.0 / f32(${kSize}));
+ let a = 2.0 * r;
+ let b = r - vec2<f32>(1.0);
+ return vec4<f32>(fma(position, a, b), 0.0, 1.0);
+ }
+ @fragment fn fmain() -> @location(0) vec4<f32> {
+ return vec4<f32>(1.0, 0.0, 1.0, 1.0);
+ }
+ `,
+ });
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: { module, entryPoint: 'vmain', buffers: [] },
+ primitive: { topology: 'point-list' },
+ fragment: {
+ targets: [{ format: 'rgba8unorm' }],
+ module,
+ entryPoint: 'fmain',
+ },
+ });
+ const renderTarget = t.device.createTexture({
+ size: [kSize, kSize],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ format: 'rgba8unorm',
+ });
+ const renderPassDescriptor: GPURenderPassDescriptor = {
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ };
+ const encoder = t.device.createCommandEncoder();
+ range(kSize * kSize, i => {
+ const pass = encoder.beginRenderPass(renderPassDescriptor);
+ pass.setPipeline(pipeline);
+ pass.draw(1, 1, i);
+ pass.end();
+ });
+ t.device.queue.submit([encoder.finish()]);
+ t.expectSingleColor(renderTarget, 'rgba8unorm', {
+ size: [kSize, kSize, 1],
+ exp: { R: 1, G: 0, B: 1, A: 1 },
+ });
+ });
+
+g.test('pipeline_churn')
+ .desc(
+ `Tests execution of a large number of render pipelines, each within its own render pass. Each
+pass does a single draw call, with one pass per output fragment.`
+ )
+ .fn(t => {
+ const kWidth = 64;
+ const kHeight = 8;
+ const module = t.device.createShaderModule({
+ code: `
+ @vertex fn vmain(@builtin(vertex_index) index: u32)
+ -> @builtin(position) vec4<f32> {
+ let position = vec2<f32>(f32(index % ${kWidth}u), f32(index / ${kWidth}u));
+ let size = vec2<f32>(f32(${kWidth}), f32(${kHeight}));
+ let r = vec2<f32>(1.0) / size;
+ let a = 2.0 * r;
+ let b = r - vec2<f32>(1.0);
+ return vec4<f32>(fma(position, a, b), 0.0, 1.0);
+ }
+ @fragment fn fmain() -> @location(0) vec4<f32> {
+ return vec4<f32>(1.0, 0.0, 1.0, 1.0);
+ }
+ `,
+ });
+ const renderTarget = t.device.createTexture({
+ size: [kWidth, kHeight],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ format: 'rgba8unorm',
+ });
+ const depthTarget = t.device.createTexture({
+ size: [kWidth, kHeight],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ format: 'depth24plus-stencil8',
+ });
+ const renderPassDescriptor: GPURenderPassDescriptor = {
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ depthStencilAttachment: {
+ view: depthTarget.createView(),
+ depthLoadOp: 'load',
+ depthStoreOp: 'store',
+ stencilLoadOp: 'load',
+ stencilStoreOp: 'discard',
+ },
+ };
+ const encoder = t.device.createCommandEncoder();
+ range(kWidth * kHeight, i => {
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: { module, entryPoint: 'vmain', buffers: [] },
+ primitive: { topology: 'point-list' },
+ depthStencil: {
+ format: 'depth24plus-stencil8',
+ depthCompare: 'always',
+ depthWriteEnabled: false,
+ // Not really used, but it ensures that each pipeline is unique.
+ depthBias: i,
+ },
+ fragment: {
+ targets: [{ format: 'rgba8unorm' }],
+ module,
+ entryPoint: 'fmain',
+ },
+ });
+ const pass = encoder.beginRenderPass(renderPassDescriptor);
+ pass.setPipeline(pipeline);
+ pass.draw(1, 1, i);
+ pass.end();
+ });
+ t.device.queue.submit([encoder.finish()]);
+ t.expectSingleColor(renderTarget, 'rgba8unorm', {
+ size: [kWidth, kHeight, 1],
+ exp: { R: 1, G: 0, B: 1, A: 1 },
+ });
+ });
+
+g.test('bind_group_churn')
+ .desc(
+ `Tests execution of render passes which switch between a huge number of bind groups. This uses
+a single render pass with a single pipeline, and one draw call per fragment of the output texture.
+Each draw call is made with a unique bind group 0, with binding 0 referencing a unique uniform
+buffer.`
+ )
+ .fn(t => {
+ const kSize = 128;
+ const module = t.device.createShaderModule({
+ code: `
+ struct Uniforms { index: u32, };
+ @group(0) @binding(0) var<uniform> uniforms: Uniforms;
+ @vertex fn vmain() -> @builtin(position) vec4<f32> {
+ let index = uniforms.index;
+ let position = vec2<f32>(f32(index % ${kSize}u), f32(index / ${kSize}u));
+ let r = vec2<f32>(1.0 / f32(${kSize}));
+ let a = 2.0 * r;
+ let b = r - vec2<f32>(1.0);
+ return vec4<f32>(fma(position, a, b), 0.0, 1.0);
+ }
+ @fragment fn fmain() -> @location(0) vec4<f32> {
+ return vec4<f32>(1.0, 0.0, 1.0, 1.0);
+ }
+ `,
+ });
+ const layout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.VERTEX,
+ buffer: { type: 'uniform' },
+ },
+ ],
+ });
+ const pipeline = t.device.createRenderPipeline({
+ layout: t.device.createPipelineLayout({ bindGroupLayouts: [layout] }),
+ vertex: { module, entryPoint: 'vmain', buffers: [] },
+ primitive: { topology: 'point-list' },
+ fragment: {
+ targets: [{ format: 'rgba8unorm' }],
+ module,
+ entryPoint: 'fmain',
+ },
+ });
+ const renderTarget = t.device.createTexture({
+ size: [kSize, kSize],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ format: 'rgba8unorm',
+ });
+ const renderPassDescriptor: GPURenderPassDescriptor = {
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ };
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass(renderPassDescriptor);
+ pass.setPipeline(pipeline);
+ range(kSize * kSize, i => {
+ const buffer = t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.UNIFORM,
+ mappedAtCreation: true,
+ });
+ new Uint32Array(buffer.getMappedRange())[0] = i;
+ buffer.unmap();
+ pass.setBindGroup(
+ 0,
+ t.device.createBindGroup({ layout, entries: [{ binding: 0, resource: { buffer } }] })
+ );
+ pass.draw(1, 1);
+ });
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ t.expectSingleColor(renderTarget, 'rgba8unorm', {
+ size: [kSize, kSize, 1],
+ exp: { R: 1, G: 0, B: 1, A: 1 },
+ });
+ });
+
+g.test('many_draws')
+ .desc(
+ `Tests execution of render passes with a huge number of draw calls. This uses a single
+render pass with a single pipeline, and one draw call per fragment of the output texture.`
+ )
+ .fn(t => {
+ const kSize = 4096;
+ const module = t.device.createShaderModule({
+ code: `
+ @vertex fn vmain(@builtin(vertex_index) index: u32)
+ -> @builtin(position) vec4<f32> {
+ let position = vec2<f32>(f32(index % ${kSize}u), f32(index / ${kSize}u));
+ let r = vec2<f32>(1.0 / f32(${kSize}));
+ let a = 2.0 * r;
+ let b = r - vec2<f32>(1.0);
+ return vec4<f32>(fma(position, a, b), 0.0, 1.0);
+ }
+ @fragment fn fmain() -> @location(0) vec4<f32> {
+ return vec4<f32>(1.0, 0.0, 1.0, 1.0);
+ }
+ `,
+ });
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: { module, entryPoint: 'vmain', buffers: [] },
+ primitive: { topology: 'point-list' },
+ fragment: {
+ targets: [{ format: 'rgba8unorm' }],
+ module,
+ entryPoint: 'fmain',
+ },
+ });
+ const renderTarget = t.device.createTexture({
+ size: [kSize, kSize],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ format: 'rgba8unorm',
+ });
+ const renderPassDescriptor: GPURenderPassDescriptor = {
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ };
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass(renderPassDescriptor);
+ pass.setPipeline(pipeline);
+ range(kSize * kSize, i => pass.draw(1, 1, i));
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ t.expectSingleColor(renderTarget, 'rgba8unorm', {
+ size: [kSize, kSize, 1],
+ exp: { R: 1, G: 0, B: 1, A: 1 },
+ });
+ });
+
+g.test('huge_draws')
+ .desc(
+ `Tests execution of several render passes with huge draw calls. Each pass uses a single draw
+call which draws multiple vertices for each fragment of a large output texture.`
+ )
+ .fn(t => {
+ const kSize = 32768;
+ const kTextureSize = 4096;
+ const kVertsPerFragment = (kSize * kSize) / (kTextureSize * kTextureSize);
+ const module = t.device.createShaderModule({
+ code: `
+ @vertex fn vmain(@builtin(vertex_index) vert_index: u32)
+ -> @builtin(position) vec4<f32> {
+ let index = vert_index / ${kVertsPerFragment}u;
+ let position = vec2<f32>(f32(index % ${kTextureSize}u), f32(index / ${kTextureSize}u));
+ let r = vec2<f32>(1.0 / f32(${kTextureSize}));
+ let a = 2.0 * r;
+ let b = r - vec2<f32>(1.0);
+ return vec4<f32>(fma(position, a, b), 0.0, 1.0);
+ }
+ @fragment fn fmain() -> @location(0) vec4<f32> {
+ return vec4<f32>(1.0, 0.0, 1.0, 1.0);
+ }
+ `,
+ });
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: { module, entryPoint: 'vmain', buffers: [] },
+ primitive: { topology: 'point-list' },
+ fragment: {
+ targets: [{ format: 'rgba8unorm' }],
+ module,
+ entryPoint: 'fmain',
+ },
+ });
+ const renderTarget = t.device.createTexture({
+ size: [kTextureSize, kTextureSize],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ format: 'rgba8unorm',
+ });
+ const renderPassDescriptor: GPURenderPassDescriptor = {
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ };
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass(renderPassDescriptor);
+ pass.setPipeline(pipeline);
+ pass.draw(kSize * kSize);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ t.expectSingleColor(renderTarget, 'rgba8unorm', {
+ size: [kTextureSize, kTextureSize, 1],
+ exp: { R: 1, G: 0, B: 1, A: 1 },
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/render/vertex_buffers.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/render/vertex_buffers.spec.ts
new file mode 100644
index 0000000000..e055f96fc4
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/render/vertex_buffers.spec.ts
@@ -0,0 +1,130 @@
+export const description = `
+Stress tests covering vertex buffer usage.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+function createHugeVertexBuffer(t: GPUTest, size: number) {
+ const kBufferSize = size * size * 8;
+ const buffer = t.device.createBuffer({
+ size: kBufferSize,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+ struct Buffer { data: array<vec2<u32>>, };
+ @group(0) @binding(0) var<storage, read_write> buffer: Buffer;
+ @compute @workgroup_size(1) fn main(
+ @builtin(global_invocation_id) id: vec3<u32>) {
+ let base = id.x * ${size}u;
+ for (var x: u32 = 0u; x < ${size}u; x = x + 1u) {
+ buffer.data[base + x] = vec2<u32>(x, id.x);
+ }
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ });
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer },
+ },
+ ],
+ });
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(size);
+ pass.end();
+
+ const vertexBuffer = t.device.createBuffer({
+ size: kBufferSize,
+ usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
+ });
+ encoder.copyBufferToBuffer(buffer, 0, vertexBuffer, 0, kBufferSize);
+ t.device.queue.submit([encoder.finish()]);
+ return vertexBuffer;
+}
+
+g.test('many')
+ .desc(`Tests execution of draw calls using a huge vertex buffer.`)
+ .fn(t => {
+ const kSize = 4096;
+ const buffer = createHugeVertexBuffer(t, kSize);
+ const module = t.device.createShaderModule({
+ code: `
+ @vertex fn vmain(@location(0) position: vec2<u32>)
+ -> @builtin(position) vec4<f32> {
+ let r = vec2<f32>(1.0 / f32(${kSize}));
+ let a = 2.0 * r;
+ let b = r - vec2<f32>(1.0);
+ return vec4<f32>(fma(vec2<f32>(position), a, b), 0.0, 1.0);
+ }
+ @fragment fn fmain() -> @location(0) vec4<f32> {
+ return vec4<f32>(1.0, 0.0, 1.0, 1.0);
+ }
+ `,
+ });
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vmain',
+ buffers: [
+ {
+ arrayStride: 8,
+ attributes: [
+ {
+ format: 'uint32x2',
+ offset: 0,
+ shaderLocation: 0,
+ },
+ ],
+ },
+ ],
+ },
+ primitive: { topology: 'point-list' },
+ fragment: {
+ targets: [{ format: 'rgba8unorm' }],
+ module,
+ entryPoint: 'fmain',
+ },
+ });
+ const renderTarget = t.device.createTexture({
+ size: [kSize, kSize],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ format: 'rgba8unorm',
+ });
+ const renderPassDescriptor: GPURenderPassDescriptor = {
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ };
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass(renderPassDescriptor);
+ pass.setPipeline(pipeline);
+ pass.setVertexBuffer(0, buffer);
+ pass.draw(kSize * kSize);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ t.expectSingleColor(renderTarget, 'rgba8unorm', {
+ size: [kSize, kSize, 1],
+ exp: { R: 1, G: 0, B: 1, A: 1 },
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/shaders/README.txt b/dom/webgpu/tests/cts/checkout/src/stress/shaders/README.txt
new file mode 100644
index 0000000000..628b4e86fa
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/shaders/README.txt
@@ -0,0 +1 @@
+Stress tests covering very long-running and/or resource-intensive shaders.
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/shaders/entry_points.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/shaders/entry_points.spec.ts
new file mode 100644
index 0000000000..313c79a8c3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/shaders/entry_points.spec.ts
@@ -0,0 +1,78 @@
+export const description = `
+Stress tests covering behavior around shader entry points.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { range } from '../../common/util/util.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+const makeCode = (numEntryPoints: number) => {
+ const kBaseCode = `
+ struct Buffer { data: u32, };
+ @group(0) @binding(0) var<storage, read_write> buffer: Buffer;
+ fn main() { buffer.data = buffer.data + 1u; }
+ `;
+ const makeEntryPoint = (i: number) => `
+ @compute @workgroup_size(1) fn computeMain${i}() { main(); }
+ `;
+ return kBaseCode + range(numEntryPoints, makeEntryPoint).join('');
+};
+
+g.test('many')
+ .desc(
+ `Tests compilation and usage of shaders with a huge number of entry points.
+
+TODO: There may be a normative limit to the number of entry points allowed in
+a shader, in which case this would become a validation test instead.`
+ )
+ .fn(t => {
+ const data = new Uint32Array([0]);
+ const buffer = t.makeBufferWithContents(data, GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC);
+
+ // NOTE: Initial shader compilation time seems to scale exponentially with
+ // this value in Chrome.
+ const kNumEntryPoints = 200;
+
+ const shader = t.device.createShaderModule({
+ code: makeCode(kNumEntryPoints),
+ });
+
+ const layout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: { type: 'storage' },
+ },
+ ],
+ });
+ const pipelineLayout = t.device.createPipelineLayout({
+ bindGroupLayouts: [layout],
+ });
+ const bindGroup = t.device.createBindGroup({
+ layout,
+ entries: [{ binding: 0, resource: { buffer } }],
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ range(kNumEntryPoints, i => {
+ const pipeline = t.device.createComputePipeline({
+ layout: pipelineLayout,
+ compute: {
+ module: shader,
+ entryPoint: `computeMain${i}`,
+ },
+ });
+
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ });
+
+ t.device.queue.submit([encoder.finish()]);
+ t.expectGPUBufferValuesEqual(buffer, new Uint32Array([kNumEntryPoints]));
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/shaders/non_halting.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/shaders/non_halting.spec.ts
new file mode 100644
index 0000000000..b88aa083b3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/shaders/non_halting.spec.ts
@@ -0,0 +1,194 @@
+export const description = `
+Stress tests covering robustness in the presence of non-halting shaders.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('compute')
+ .desc(
+ `Tests execution of compute passes with non-halting dispatch operations.
+
+This is expected to hang for a bit, but it should ultimately result in graceful
+device loss.`
+ )
+ .fn(async t => {
+ const data = new Uint32Array([0]);
+ const buffer = t.makeBufferWithContents(data, GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC);
+ const module = t.device.createShaderModule({
+ code: `
+ struct Buffer { data: u32, };
+ @group(0) @binding(0) var<storage, read_write> buffer: Buffer;
+ @compute @workgroup_size(1) fn main() {
+ loop {
+ if (buffer.data == 1u) {
+ break;
+ }
+ buffer.data = buffer.data + 2u;
+ }
+ }
+ `,
+ });
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: { module, entryPoint: 'main' },
+ });
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer } }],
+ });
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ await t.device.lost;
+ });
+
+g.test('vertex')
+ .desc(
+ `Tests execution of render passes with a non-halting vertex stage.
+
+This is expected to hang for a bit, but it should ultimately result in graceful
+device loss.`
+ )
+ .fn(async t => {
+ const module = t.device.createShaderModule({
+ code: `
+ struct Data { counter: u32, increment: u32, };
+ @group(0) @binding(0) var<uniform> data: Data;
+ @vertex fn vmain() -> @builtin(position) vec4<f32> {
+ var counter: u32 = data.counter;
+ loop {
+ if (counter % 2u == 1u) {
+ break;
+ }
+ counter = counter + data.increment;
+ }
+ return vec4<f32>(1.0, 1.0, 0.0, f32(counter));
+ }
+ @fragment fn fmain() -> @location(0) vec4<f32> {
+ return vec4<f32>(1.0);
+ }
+ `,
+ });
+
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: { module, entryPoint: 'vmain', buffers: [] },
+ primitive: { topology: 'point-list' },
+ fragment: {
+ targets: [{ format: 'rgba8unorm' }],
+ module,
+ entryPoint: 'fmain',
+ },
+ });
+ const uniforms = t.makeBufferWithContents(new Uint32Array([0, 2]), GPUBufferUsage.UNIFORM);
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer: uniforms },
+ },
+ ],
+ });
+ const renderTarget = t.device.createTexture({
+ size: [1, 1],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ format: 'rgba8unorm',
+ });
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.draw(1);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ await t.device.lost;
+ });
+
+g.test('fragment')
+ .desc(
+ `Tests execution of render passes with a non-halting fragment stage.
+
+This is expected to hang for a bit, but it should ultimately result in graceful
+device loss.`
+ )
+ .fn(async t => {
+ const module = t.device.createShaderModule({
+ code: `
+ struct Data { counter: u32, increment: u32, };
+ @group(0) @binding(0) var<uniform> data: Data;
+ @vertex fn vmain() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }
+ @fragment fn fmain() -> @location(0) vec4<f32> {
+ var counter: u32 = data.counter;
+ loop {
+ if (counter % 2u == 1u) {
+ break;
+ }
+ counter = counter + data.increment;
+ }
+ return vec4<f32>(1.0 / f32(counter), 0.0, 0.0, 1.0);
+ }
+ `,
+ });
+
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: { module, entryPoint: 'vmain', buffers: [] },
+ primitive: { topology: 'point-list' },
+ fragment: {
+ targets: [{ format: 'rgba8unorm' }],
+ module,
+ entryPoint: 'fmain',
+ },
+ });
+ const uniforms = t.makeBufferWithContents(new Uint32Array([0, 2]), GPUBufferUsage.UNIFORM);
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer: uniforms },
+ },
+ ],
+ });
+ const renderTarget = t.device.createTexture({
+ size: [1, 1],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ format: 'rgba8unorm',
+ });
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.draw(1);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ await t.device.lost;
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/shaders/slow.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/shaders/slow.spec.ts
new file mode 100644
index 0000000000..8e354105b6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/shaders/slow.spec.ts
@@ -0,0 +1,191 @@
+export const description = `
+Stress tests covering robustness in the presence of slow shaders.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest, TextureTestMixin } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(TextureTestMixin(GPUTest));
+
+g.test('compute')
+ .desc(`Tests execution of compute passes with very long-running dispatch operations.`)
+ .fn(t => {
+ const kDispatchSize = 1000;
+ const data = new Uint32Array(kDispatchSize);
+ const buffer = t.makeBufferWithContents(data, GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC);
+ const module = t.device.createShaderModule({
+ code: `
+ struct Buffer { data: array<u32>, };
+ @group(0) @binding(0) var<storage, read_write> buffer: Buffer;
+ @compute @workgroup_size(1) fn main(
+ @builtin(global_invocation_id) id: vec3<u32>) {
+ loop {
+ if (buffer.data[id.x] == 1000000u) {
+ break;
+ }
+ buffer.data[id.x] = buffer.data[id.x] + 1u;
+ }
+ }
+ `,
+ });
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: { module, entryPoint: 'main' },
+ });
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer } }],
+ });
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(kDispatchSize);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ t.expectGPUBufferValuesEqual(buffer, new Uint32Array(new Array(kDispatchSize).fill(1000000)));
+ });
+
+g.test('vertex')
+ .desc(`Tests execution of render passes with a very long-running vertex stage.`)
+ .fn(t => {
+ const module = t.device.createShaderModule({
+ code: `
+ struct Data { counter: u32, increment: u32, };
+ @group(0) @binding(0) var<uniform> data: Data;
+ @vertex fn vmain() -> @builtin(position) vec4<f32> {
+ var counter: u32 = data.counter;
+ loop {
+ counter = counter + data.increment;
+ if (counter % 50000000u == 0u) {
+ break;
+ }
+ }
+ return vec4<f32>(1.0, 1.0, 0.0, f32(counter));
+ }
+ @fragment fn fmain() -> @location(0) vec4<f32> {
+ return vec4<f32>(1.0, 1.0, 0.0, 1.0);
+ }
+ `,
+ });
+
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: { module, entryPoint: 'vmain', buffers: [] },
+ primitive: { topology: 'point-list' },
+ fragment: {
+ targets: [{ format: 'rgba8unorm' }],
+ module,
+ entryPoint: 'fmain',
+ },
+ });
+ const uniforms = t.makeBufferWithContents(new Uint32Array([0, 1]), GPUBufferUsage.UNIFORM);
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer: uniforms },
+ },
+ ],
+ });
+ const renderTarget = t.device.createTexture({
+ size: [3, 3],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ format: 'rgba8unorm',
+ });
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.draw(1);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ t.expectSinglePixelComparisonsAreOkInTexture({ texture: renderTarget }, [
+ {
+ coord: { x: 1, y: 1 },
+ exp: new Uint8Array([255, 255, 0, 255]),
+ },
+ ]);
+ });
+
+g.test('fragment')
+ .desc(`Tests execution of render passes with a very long-running fragment stage.`)
+ .fn(t => {
+ const module = t.device.createShaderModule({
+ code: `
+ struct Data { counter: u32, increment: u32, };
+ @group(0) @binding(0) var<uniform> data: Data;
+ @vertex fn vmain() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }
+ @fragment fn fmain() -> @location(0) vec4<f32> {
+ var counter: u32 = data.counter;
+ loop {
+ counter = counter + data.increment;
+ if (counter % 50000000u == 0u) {
+ break;
+ }
+ }
+ return vec4<f32>(1.0, 1.0, 1.0 / f32(counter), 1.0);
+ }
+ `,
+ });
+
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: { module, entryPoint: 'vmain', buffers: [] },
+ primitive: { topology: 'point-list' },
+ fragment: {
+ targets: [{ format: 'rgba8unorm' }],
+ module,
+ entryPoint: 'fmain',
+ },
+ });
+ const uniforms = t.makeBufferWithContents(new Uint32Array([0, 1]), GPUBufferUsage.UNIFORM);
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer: uniforms },
+ },
+ ],
+ });
+ const renderTarget = t.device.createTexture({
+ size: [3, 3],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ format: 'rgba8unorm',
+ });
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.draw(1);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ t.expectSinglePixelComparisonsAreOkInTexture({ texture: renderTarget }, [
+ {
+ coord: { x: 1, y: 1 },
+ exp: new Uint8Array([255, 255, 0, 255]),
+ },
+ ]);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/texture/README.txt b/dom/webgpu/tests/cts/checkout/src/stress/texture/README.txt
new file mode 100644
index 0000000000..db40963b2e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/texture/README.txt
@@ -0,0 +1 @@
+Stress tests covering texture usage.
diff --git a/dom/webgpu/tests/cts/checkout/src/stress/texture/large.spec.ts b/dom/webgpu/tests/cts/checkout/src/stress/texture/large.spec.ts
new file mode 100644
index 0000000000..cba2053d38
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/stress/texture/large.spec.ts
@@ -0,0 +1,56 @@
+export const description = `
+Stress tests covering usage of very large textures.
+`;
+
+import { makeTestGroup } from '../../common/framework/test_group.js';
+import { GPUTest } from '../../webgpu/gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('loading,2d')
+ .desc(
+ `Tests execution of shaders loading values from very large (up to at least
+8192x8192) 2D textures. The texture size is selected according to the limit
+supported by the GPUDevice.`
+ )
+ .unimplemented();
+
+g.test('loading,2d_array')
+ .desc(
+ `Tests execution of shaders loading values from very large (up to at least
+8192x8192x2048) arrays of 2D textures. The texture and array size is selected
+according to limits supported by the GPUDevice.`
+ )
+ .unimplemented();
+
+g.test('loading,3d')
+ .desc(
+ `Tests execution of shaders loading values from very large (up to at least
+2048x2048x2048) textures. The texture size is selected according to the limit
+supported by the GPUDevice.`
+ )
+ .unimplemented();
+
+g.test('sampling,2d')
+ .desc(
+ `Tests execution of shaders sampling values from very large (up to at least
+8192x8192) 2D textures. The texture size is selected according to the limit
+supported by the GPUDevice.`
+ )
+ .unimplemented();
+
+g.test('sampling,2d_array')
+ .desc(
+ `Tests execution of shaders sampling values from very large (up to at least
+8192x8192x2048) arrays of 2D textures. The texture and array size is selected
+according to limits supported by the GPUDevice.`
+ )
+ .unimplemented();
+
+g.test('sampling,3d')
+ .desc(
+ `Tests execution of shaders sampling values from very large (up to at least
+2048x2048x2048) textures. The texture size is selected according to the limit
+supported by the GPUDevice.`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/README.txt b/dom/webgpu/tests/cts/checkout/src/unittests/README.txt
new file mode 100644
index 0000000000..17272c3919
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/README.txt
@@ -0,0 +1 @@
+Unit tests for CTS framework.
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/async_expectations.spec.ts b/dom/webgpu/tests/cts/checkout/src/unittests/async_expectations.spec.ts
new file mode 100644
index 0000000000..2d62978b8f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/async_expectations.spec.ts
@@ -0,0 +1,168 @@
+/* eslint-disable @typescript-eslint/require-await */
+export const description = `
+Tests for eventualAsyncExpectation and immediateAsyncExpectation.
+`;
+
+import { makeTestGroup } from '../common/framework/test_group.js';
+import { makeTestGroupForUnitTesting } from '../common/internal/test_group.js';
+import { assert, objectEquals, rejectOnTimeout, resolveOnTimeout } from '../common/util/util.js';
+
+import { TestGroupTest } from './test_group_test.js';
+import { UnitTest } from './unit_test.js';
+
+class FixtureToTest extends UnitTest {
+ public override immediateAsyncExpectation<T>(fn: () => Promise<T>): Promise<T> {
+ return super.immediateAsyncExpectation(fn);
+ }
+ public override eventualAsyncExpectation<T>(fn: (niceStack: Error) => Promise<T>): void {
+ super.eventualAsyncExpectation(fn);
+ }
+}
+
+export const g = makeTestGroup(TestGroupTest);
+
+g.test('eventual').fn(async t0 => {
+ const g = makeTestGroupForUnitTesting(FixtureToTest);
+
+ const runState = [0, 0, 0, 0];
+ let runStateIndex = 0;
+
+ // Should pass in state 3
+ g.test('noawait,resolve').fn(t => {
+ const idx = runStateIndex++;
+
+ runState[idx] = 1;
+ t.eventualAsyncExpectation(async () => {
+ runState[idx] = 2;
+ await resolveOnTimeout(50);
+ runState[idx] = 3;
+ });
+ runState[idx] = 4;
+ });
+
+ // Should fail in state 4
+ g.test('noawait,reject').fn(t => {
+ const idx = runStateIndex++;
+
+ runState[idx] = 1;
+ t.eventualAsyncExpectation(async () => {
+ runState[idx] = 2;
+ await rejectOnTimeout(50, 'rejected 1');
+ runState[idx] = 3;
+ });
+ runState[idx] = 4;
+ });
+
+ // Should fail in state 3
+ g.test('nested,2').fn(t => {
+ const idx = runStateIndex++;
+
+ runState[idx] = 1;
+ t.eventualAsyncExpectation(async () => {
+ runState[idx] = 2;
+ await resolveOnTimeout(50); // Wait a bit before adding a new eventualAsyncExpectation
+ t.eventualAsyncExpectation(() => rejectOnTimeout(100, 'inner rejected 1'));
+ runState[idx] = 3;
+ });
+ runState[idx] = 4;
+ });
+
+ // Should fail in state 3
+ g.test('nested,4').fn(t => {
+ const idx = runStateIndex++;
+
+ runState[idx] = 1;
+ t.eventualAsyncExpectation(async () => {
+ t.eventualAsyncExpectation(async () => {
+ t.eventualAsyncExpectation(async () => {
+ runState[idx] = 2;
+ await resolveOnTimeout(50); // Wait a bit before adding a new eventualAsyncExpectation
+ t.eventualAsyncExpectation(() => rejectOnTimeout(100, 'inner rejected 2'));
+ runState[idx] = 3;
+ });
+ });
+ });
+ runState[idx] = 4;
+ });
+
+ const resultsPromise = t0.run(g);
+ assert(objectEquals(runState, [0, 0, 0, 0]));
+
+ const statuses = Array.from(await resultsPromise).map(([, v]) => v.status);
+ assert(objectEquals(runState, [3, 4, 3, 3]), () => runState.toString());
+ assert(objectEquals(statuses, ['pass', 'fail', 'fail', 'fail']), () => statuses.toString());
+});
+
+g.test('immediate').fn(async t0 => {
+ const g = makeTestGroupForUnitTesting(FixtureToTest);
+
+ const runState = [0, 0, 0, 0, 0];
+
+ g.test('noawait,resolve').fn(t => {
+ runState[0] = 1;
+ void t.immediateAsyncExpectation(async () => {
+ runState[0] = 2;
+ await resolveOnTimeout(50);
+ runState[0] = 3;
+ });
+ runState[0] = 4;
+ });
+
+ // (Can't g.test('noawait,reject') because it causes a top-level Promise
+ // rejection which crashes Node.)
+
+ g.test('await,resolve').fn(async t => {
+ runState[1] = 1;
+ await t.immediateAsyncExpectation(async () => {
+ runState[1] = 2;
+ await resolveOnTimeout(50);
+ runState[1] = 3;
+ });
+ });
+
+ g.test('await,reject').fn(async t => {
+ runState[2] = 1;
+ await t.immediateAsyncExpectation(async () => {
+ runState[2] = 2;
+ await rejectOnTimeout(50, 'rejected 3');
+ runState[2] = 3;
+ });
+ });
+
+ // (Similarly can't test 'nested,noawait'.)
+
+ g.test('nested,await,2').fn(t => {
+ runState[3] = 1;
+ t.eventualAsyncExpectation(async () => {
+ runState[3] = 2;
+ await resolveOnTimeout(50); // Wait a bit before adding a new immediateAsyncExpectation
+ runState[3] = 3;
+ await t.immediateAsyncExpectation(() => rejectOnTimeout(100, 'inner rejected 3'));
+ runState[3] = 5;
+ });
+ runState[3] = 4;
+ });
+
+ g.test('nested,await,4').fn(t => {
+ runState[4] = 1;
+ t.eventualAsyncExpectation(async () => {
+ t.eventualAsyncExpectation(async () => {
+ t.eventualAsyncExpectation(async () => {
+ runState[4] = 2;
+ await resolveOnTimeout(50); // Wait a bit before adding a new immediateAsyncExpectation
+ runState[4] = 3;
+ await t.immediateAsyncExpectation(() => rejectOnTimeout(100, 'inner rejected 3'));
+ runState[4] = 5;
+ });
+ });
+ });
+ runState[4] = 4;
+ });
+
+ const resultsPromise = t0.run(g);
+ assert(objectEquals(runState, [0, 0, 0, 0, 0]));
+
+ const statuses = Array.from(await resultsPromise).map(([, v]) => v.status);
+ assert(objectEquals(runState, [3, 3, 2, 3, 3]));
+ assert(objectEquals(statuses, ['fail', 'pass', 'fail', 'fail', 'fail']));
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/basic.spec.ts b/dom/webgpu/tests/cts/checkout/src/unittests/basic.spec.ts
new file mode 100644
index 0000000000..5c04067396
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/basic.spec.ts
@@ -0,0 +1,35 @@
+export const description = `
+Basic unit tests for test framework.
+`;
+
+import { makeTestGroup } from '../common/framework/test_group.js';
+
+import { UnitTest } from './unit_test.js';
+
+export const g = makeTestGroup(UnitTest);
+
+g.test('test,sync').fn(_t => {});
+
+g.test('test,async').fn(async _t => {});
+
+g.test('test_with_params,sync')
+ .paramsSimple([{}])
+ .fn(t => {
+ t.debug(JSON.stringify(t.params));
+ });
+
+g.test('test_with_params,async')
+ .paramsSimple([{}])
+ .fn(t => {
+ t.debug(JSON.stringify(t.params));
+ });
+
+g.test('test_with_params,private_params')
+ .paramsSimple([
+ { a: 1, b: 2, _result: 3 }, //
+ { a: 4, b: -3, _result: 1 },
+ ])
+ .fn(t => {
+ const { a, b, _result } = t.params;
+ t.expect(a + b === _result);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/check_contents.spec.ts b/dom/webgpu/tests/cts/checkout/src/unittests/check_contents.spec.ts
new file mode 100644
index 0000000000..1a722a1b86
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/check_contents.spec.ts
@@ -0,0 +1,71 @@
+export const description = `Unit tests for check_contents`;
+
+import { Fixture } from '../common/framework/fixture.js';
+import { makeTestGroup } from '../common/internal/test_group.js';
+import { ErrorWithExtra } from '../common/util/util.js';
+import { checkElementsEqual } from '../webgpu/util/check_contents.js';
+
+class F extends Fixture {
+ test(substr: undefined | string, result: undefined | ErrorWithExtra) {
+ if (substr === undefined) {
+ this.expect(result === undefined, result?.message);
+ } else {
+ this.expect(result !== undefined && result.message.indexOf(substr) !== -1, result?.message);
+ }
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('checkElementsEqual').fn(t => {
+ t.shouldThrow('Error', () => checkElementsEqual(new Uint8Array(), new Uint16Array()));
+ t.shouldThrow('Error', () => checkElementsEqual(new Uint32Array(), new Float32Array()));
+ t.shouldThrow('Error', () => checkElementsEqual(new Uint8Array([]), new Uint8Array([0])));
+ t.shouldThrow('Error', () => checkElementsEqual(new Uint8Array([0]), new Uint8Array([])));
+ {
+ t.test(undefined, checkElementsEqual(new Uint8Array([]), new Uint8Array([])));
+ t.test(undefined, checkElementsEqual(new Uint8Array([0]), new Uint8Array([0])));
+ t.test(undefined, checkElementsEqual(new Uint8Array([1]), new Uint8Array([1])));
+ t.test(
+ `
+ Starting at index 0:
+ actual == 0x: 00
+ failed -> xx
+ expected == 01`,
+ checkElementsEqual(new Uint8Array([0]), new Uint8Array([1]))
+ );
+ t.test(
+ 'expected == 01 02 01',
+ checkElementsEqual(new Uint8Array([1, 1, 1]), new Uint8Array([1, 2, 1]))
+ );
+ }
+ {
+ const actual = new Uint8Array(280);
+ const exp = new Uint8Array(280);
+ for (let i = 2; i < 20; ++i) actual[i] = i - 4;
+ t.test(
+ '00 fe ff 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 00',
+ checkElementsEqual(actual, exp)
+ );
+ for (let i = 2; i < 280; ++i) actual[i] = i - 4;
+ t.test('Starting at index 1:', checkElementsEqual(actual, exp));
+ for (let i = 0; i < 2; ++i) actual[i] = i - 4;
+ t.test('Starting at index 0:', checkElementsEqual(actual, exp));
+ }
+ {
+ const actual = new Int32Array(30);
+ const exp = new Int32Array(30);
+ for (let i = 2; i < 7; ++i) actual[i] = i - 3;
+ t.test('00000002 00000003 00000000\n', checkElementsEqual(actual, exp));
+ for (let i = 2; i < 30; ++i) actual[i] = i - 3;
+ t.test('00000000 00000000 ...', checkElementsEqual(actual, exp));
+ }
+ {
+ const actual = new Float64Array(30);
+ const exp = new Float64Array(30);
+ for (let i = 2; i < 7; ++i) actual[i] = (i - 4) * 1e100;
+ t.test('2.000e+100 0.000\n', checkElementsEqual(actual, exp));
+ for (let i = 2; i < 280; ++i) actual[i] = (i - 4) * 1e100;
+ t.test('6.000e+100 7.000e+100 ...', checkElementsEqual(actual, exp));
+ }
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/conversion.spec.ts b/dom/webgpu/tests/cts/checkout/src/unittests/conversion.spec.ts
new file mode 100644
index 0000000000..8606aa8717
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/conversion.spec.ts
@@ -0,0 +1,640 @@
+export const description = `Unit tests for conversion`;
+
+import { mergeParams } from '../common/internal/params_utils.js';
+import { makeTestGroup } from '../common/internal/test_group.js';
+import { keysOf } from '../common/util/data_tables.js';
+import { assert, objectEquals } from '../common/util/util.js';
+import { kValue } from '../webgpu/util/constants.js';
+import {
+ bool,
+ f16Bits,
+ f32,
+ f32Bits,
+ float16BitsToFloat32,
+ float32ToFloat16Bits,
+ float32ToFloatBits,
+ floatBitsToNormalULPFromZero,
+ floatBitsToNumber,
+ i32,
+ kFloat16Format,
+ kFloat32Format,
+ Matrix,
+ numbersApproximatelyEqual,
+ pack2x16float,
+ pack2x16snorm,
+ pack2x16unorm,
+ pack4x8snorm,
+ pack4x8unorm,
+ packRGB9E5UFloat,
+ Scalar,
+ toMatrix,
+ u32,
+ unpackRGB9E5UFloat,
+ vec2,
+ vec3,
+ vec4,
+ Vector,
+} from '../webgpu/util/conversion.js';
+
+import { UnitTest } from './unit_test.js';
+
+export const g = makeTestGroup(UnitTest);
+
+const kFloat16BitsToNumberCases = [
+ [0b0_01111_0000000000, 1],
+ [0b0_00001_0000000000, 0.00006103515625],
+ [0b0_01101_0101010101, 0.33325195],
+ [0b0_11110_1111111111, 65504],
+ [0b0_00000_0000000000, 0],
+ [0b1_00000_0000000000, -0.0], // -0.0 compares as equal to 0.0
+ [0b0_01110_0000000000, 0.5],
+ [0b0_01100_1001100110, 0.1999512],
+ [0b0_01111_0000000001, 1.00097656],
+ [0b0_10101_1001000000, 100],
+ [0b1_01100_1001100110, -0.1999512],
+ [0b1_10101_1001000000, -100],
+ [0b0_11111_1111111111, Number.NaN],
+ [0b0_11111_0000000000, Number.POSITIVE_INFINITY],
+ [0b1_11111_0000000000, Number.NEGATIVE_INFINITY],
+];
+
+g.test('float16BitsToFloat32').fn(t => {
+ for (const [bits, number] of [
+ ...kFloat16BitsToNumberCases,
+ [0b0_00000_1111111111, 0.00006104], // subnormal f16 input
+ [0b1_00000_1111111111, -0.00006104],
+ ]) {
+ const actual = float16BitsToFloat32(bits);
+ t.expect(
+ // some loose check
+ numbersApproximatelyEqual(actual, number, 0.00001),
+ `for ${bits.toString(2)}, expected ${number}, got ${actual}`
+ );
+ }
+});
+
+g.test('float32ToFloat16Bits').fn(t => {
+ for (const [bits, number] of [
+ ...kFloat16BitsToNumberCases,
+ [0b0_00000_0000000000, 0.00001], // input that becomes subnormal in f16 is rounded to 0
+ [0b1_00000_0000000000, -0.00001], // and sign is preserved
+ ]) {
+ // some loose check
+ const actual = float32ToFloat16Bits(number);
+ t.expect(
+ Math.abs(actual - bits) <= 1,
+ `for ${number}, expected ${bits.toString(2)}, got ${actual.toString(2)}`
+ );
+ }
+});
+
+g.test('float32ToFloatBits_floatBitsToNumber')
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('signed', [0, 1] as const)
+ .combine('exponentBits', [5, 8])
+ .combine('mantissaBits', [10, 23])
+ )
+ .fn(t => {
+ const { signed, exponentBits, mantissaBits } = t.params;
+ const bias = (1 << (exponentBits - 1)) - 1;
+
+ for (const [, value] of kFloat16BitsToNumberCases) {
+ if (value < 0 && signed === 0) continue;
+ const bits = float32ToFloatBits(value, signed, exponentBits, mantissaBits, bias);
+ const reconstituted = floatBitsToNumber(bits, { signed, exponentBits, mantissaBits, bias });
+ t.expect(
+ numbersApproximatelyEqual(reconstituted, value, 0.0000001),
+ `${reconstituted} vs ${value}`
+ );
+ }
+ });
+
+g.test('floatBitsToULPFromZero,16').fn(t => {
+ const test = (bits: number, ulpFromZero: number) =>
+ t.expect(floatBitsToNormalULPFromZero(bits, kFloat16Format) === ulpFromZero, bits.toString(2));
+ // Zero
+ test(0b0_00000_0000000000, 0);
+ test(0b1_00000_0000000000, 0);
+ // Subnormal
+ test(0b0_00000_0000000001, 0);
+ test(0b1_00000_0000000001, 0);
+ test(0b0_00000_1111111111, 0);
+ test(0b1_00000_1111111111, 0);
+ // Normal
+ test(0b0_00001_0000000000, 1); // 0 + 1ULP
+ test(0b1_00001_0000000000, -1); // 0 - 1ULP
+ test(0b0_00001_0000000001, 2); // 0 + 2ULP
+ test(0b1_00001_0000000001, -2); // 0 - 2ULP
+ test(0b0_01110_0000000000, 0b01101_0000000001); // 0.5
+ test(0b1_01110_0000000000, -0b01101_0000000001); // -0.5
+ test(0b0_01110_1111111110, 0b01101_1111111111); // 1.0 - 2ULP
+ test(0b1_01110_1111111110, -0b01101_1111111111); // -(1.0 - 2ULP)
+ test(0b0_01110_1111111111, 0b01110_0000000000); // 1.0 - 1ULP
+ test(0b1_01110_1111111111, -0b01110_0000000000); // -(1.0 - 1ULP)
+ test(0b0_01111_0000000000, 0b01110_0000000001); // 1.0
+ test(0b1_01111_0000000000, -0b01110_0000000001); // -1.0
+ test(0b0_01111_0000000001, 0b01110_0000000010); // 1.0 + 1ULP
+ test(0b1_01111_0000000001, -0b01110_0000000010); // -(1.0 + 1ULP)
+ test(0b0_10000_0000000000, 0b01111_0000000001); // 2.0
+ test(0b1_10000_0000000000, -0b01111_0000000001); // -2.0
+
+ const testThrows = (b: number) =>
+ t.shouldThrow('Error', () => floatBitsToNormalULPFromZero(b, kFloat16Format));
+ // Infinity
+ testThrows(0b0_11111_0000000000);
+ testThrows(0b1_11111_0000000000);
+ // NaN
+ testThrows(0b0_11111_1111111111);
+ testThrows(0b1_11111_1111111111);
+});
+
+g.test('floatBitsToULPFromZero,32').fn(t => {
+ const test = (bits: number, ulpFromZero: number) =>
+ t.expect(floatBitsToNormalULPFromZero(bits, kFloat32Format) === ulpFromZero, bits.toString(2));
+ // Zero
+ test(0b0_00000000_00000000000000000000000, 0);
+ test(0b1_00000000_00000000000000000000000, 0);
+ // Subnormal
+ test(0b0_00000000_00000000000000000000001, 0);
+ test(0b1_00000000_00000000000000000000001, 0);
+ test(0b0_00000000_11111111111111111111111, 0);
+ test(0b1_00000000_11111111111111111111111, 0);
+ // Normal
+ test(0b0_00000001_00000000000000000000000, 1); // 0 + 1ULP
+ test(0b1_00000001_00000000000000000000000, -1); // 0 - 1ULP
+ test(0b0_00000001_00000000000000000000001, 2); // 0 + 2ULP
+ test(0b1_00000001_00000000000000000000001, -2); // 0 - 2ULP
+ test(0b0_01111110_00000000000000000000000, 0b01111101_00000000000000000000001); // 0.5
+ test(0b1_01111110_00000000000000000000000, -0b01111101_00000000000000000000001); // -0.5
+ test(0b0_01111110_11111111111111111111110, 0b01111101_11111111111111111111111); // 1.0 - 2ULP
+ test(0b1_01111110_11111111111111111111110, -0b01111101_11111111111111111111111); // -(1.0 - 2ULP)
+ test(0b0_01111110_11111111111111111111111, 0b01111110_00000000000000000000000); // 1.0 - 1ULP
+ test(0b1_01111110_11111111111111111111111, -0b01111110_00000000000000000000000); // -(1.0 - 1ULP)
+ test(0b0_01111111_00000000000000000000000, 0b01111110_00000000000000000000001); // 1.0
+ test(0b1_01111111_00000000000000000000000, -0b01111110_00000000000000000000001); // -1.0
+ test(0b0_01111111_00000000000000000000001, 0b01111110_00000000000000000000010); // 1.0 + 1ULP
+ test(0b1_01111111_00000000000000000000001, -0b01111110_00000000000000000000010); // -(1.0 + 1ULP)
+ test(0b0_11110000_00000000000000000000000, 0b11101111_00000000000000000000001); // 2.0
+ test(0b1_11110000_00000000000000000000000, -0b11101111_00000000000000000000001); // -2.0
+
+ const testThrows = (b: number) =>
+ t.shouldThrow('Error', () => floatBitsToNormalULPFromZero(b, kFloat32Format));
+ // Infinity
+ testThrows(0b0_11111111_00000000000000000000000);
+ testThrows(0b1_11111111_00000000000000000000000);
+ // NaN
+ testThrows(0b0_11111111_11111111111111111111111);
+ testThrows(0b0_11111111_00000000000000000000001);
+ testThrows(0b1_11111111_11111111111111111111111);
+ testThrows(0b1_11111111_00000000000000000000001);
+});
+
+g.test('scalarWGSL').fn(t => {
+ const cases: Array<[Scalar, string]> = [
+ [f32(0.0), '0.0f'],
+ // The number -0.0 can be remapped to 0.0 when stored in a Scalar
+ // object. It is not possible to guarantee that '-0.0f' will
+ // be emitted. So the WGSL scalar value printing does not try
+ // to handle this case.
+ [f32(-0.0), '0.0f'], // -0.0 can be remapped to 0.0
+ [f32(1.0), '1.0f'],
+ [f32(-1.0), '-1.0f'],
+ [f32Bits(0x70000000), '1.5845632502852868e+29f'],
+ [f32Bits(0xf0000000), '-1.5845632502852868e+29f'],
+ [f16Bits(0), '0.0h'],
+ [f16Bits(0x3c00), '1.0h'],
+ [f16Bits(0xbc00), '-1.0h'],
+ [u32(0), '0u'],
+ [u32(1), '1u'],
+ [u32(2000000000), '2000000000u'],
+ [u32(-1), '4294967295u'],
+ [i32(0), 'i32(0)'],
+ [i32(1), 'i32(1)'],
+ [i32(-1), 'i32(-1)'],
+ [bool(true), 'true'],
+ [bool(false), 'false'],
+ ];
+ for (const [value, expect] of cases) {
+ const got = value.wgsl();
+ t.expect(
+ got === expect,
+ `[value: ${value.value}, type: ${value.type}]
+got: ${got}
+expect: ${expect}`
+ );
+ }
+});
+
+g.test('vectorWGSL').fn(t => {
+ const cases: Array<[Vector, string]> = [
+ [vec2(f32(42.0), f32(24.0)), 'vec2(42.0f, 24.0f)'],
+ [vec2(f16Bits(0x5140), f16Bits(0x4e00)), 'vec2(42.0h, 24.0h)'],
+ [vec2(u32(42), u32(24)), 'vec2(42u, 24u)'],
+ [vec2(i32(42), i32(24)), 'vec2(i32(42), i32(24))'],
+ [vec2(bool(false), bool(true)), 'vec2(false, true)'],
+
+ [vec3(f32(0.0), f32(1.0), f32(-1.0)), 'vec3(0.0f, 1.0f, -1.0f)'],
+ [vec3(f16Bits(0), f16Bits(0x3c00), f16Bits(0xbc00)), 'vec3(0.0h, 1.0h, -1.0h)'],
+ [vec3(u32(0), u32(1), u32(-1)), 'vec3(0u, 1u, 4294967295u)'],
+ [vec3(i32(0), i32(1), i32(-1)), 'vec3(i32(0), i32(1), i32(-1))'],
+ [vec3(bool(true), bool(false), bool(true)), 'vec3(true, false, true)'],
+
+ [vec4(f32(1.0), f32(-2.0), f32(4.0), f32(-8.0)), 'vec4(1.0f, -2.0f, 4.0f, -8.0f)'],
+ [
+ vec4(f16Bits(0xbc00), f16Bits(0x4000), f16Bits(0xc400), f16Bits(0x4800)),
+ 'vec4(-1.0h, 2.0h, -4.0h, 8.0h)',
+ ],
+ [vec4(u32(1), u32(-2), u32(4), u32(-8)), 'vec4(1u, 4294967294u, 4u, 4294967288u)'],
+ [vec4(i32(1), i32(-2), i32(4), i32(-8)), 'vec4(i32(1), i32(-2), i32(4), i32(-8))'],
+ [vec4(bool(false), bool(true), bool(true), bool(false)), 'vec4(false, true, true, false)'],
+ ];
+ for (const [value, expect] of cases) {
+ const got = value.wgsl();
+ t.expect(
+ got === expect,
+ `[values: ${value.elements}, type: ${value.type}]
+got: ${got}
+expect: ${expect}`
+ );
+ }
+});
+
+g.test('matrixWGSL').fn(t => {
+ const cases: Array<[Matrix, string]> = [
+ [
+ toMatrix(
+ [
+ [0.0, 1.0],
+ [2.0, 3.0],
+ ],
+ f32
+ ),
+ 'mat2x2(0.0f, 1.0f, 2.0f, 3.0f)',
+ ],
+ [
+ toMatrix(
+ [
+ [0.0, 1.0, 2.0],
+ [3.0, 4.0, 5.0],
+ ],
+ f32
+ ),
+ 'mat2x3(0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f)',
+ ],
+ [
+ toMatrix(
+ [
+ [0.0, 1.0, 2.0, 3.0],
+ [4.0, 5.0, 6.0, 7.0],
+ ],
+ f32
+ ),
+ 'mat2x4(0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f)',
+ ],
+ [
+ toMatrix(
+ [
+ [0.0, 1.0],
+ [2.0, 3.0],
+ [4.0, 5.0],
+ ],
+ f32
+ ),
+ 'mat3x2(0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f)',
+ ],
+ [
+ toMatrix(
+ [
+ [0.0, 1.0, 2.0],
+ [3.0, 4.0, 5.0],
+ [6.0, 7.0, 8.0],
+ ],
+ f32
+ ),
+ 'mat3x3(0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f)',
+ ],
+ [
+ toMatrix(
+ [
+ [0.0, 1.0, 2.0, 3.0],
+ [4.0, 5.0, 6.0, 7.0],
+ [8.0, 9.0, 10.0, 11.0],
+ ],
+ f32
+ ),
+ 'mat3x4(0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f)',
+ ],
+ [
+ toMatrix(
+ [
+ [0.0, 1.0],
+ [2.0, 3.0],
+ [4.0, 5.0],
+ [6.0, 7.0],
+ ],
+ f32
+ ),
+ 'mat4x2(0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f)',
+ ],
+ [
+ toMatrix(
+ [
+ [0.0, 1.0, 2.0],
+ [3.0, 4.0, 5.0],
+ [6.0, 7.0, 8.0],
+ [9.0, 10.0, 11.0],
+ ],
+ f32
+ ),
+ 'mat4x3(0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f)',
+ ],
+ [
+ toMatrix(
+ [
+ [0.0, 1.0, 2.0, 3.0],
+ [4.0, 5.0, 6.0, 7.0],
+ [8.0, 9.0, 10.0, 11.0],
+ [12.0, 13.0, 14.0, 15.0],
+ ],
+ f32
+ ),
+ 'mat4x4(0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f)',
+ ],
+ ];
+ for (const [value, expect] of cases) {
+ const got = value.wgsl();
+ t.expect(
+ got === expect,
+ `[values: ${value.elements}, type: ${value.type}]
+got: ${got}
+expect: ${expect}`
+ );
+ }
+});
+
+g.test('constructorMatrix')
+ .params(u =>
+ u
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ .combine('type', ['f32'] as const)
+ )
+ .fn(t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const type = t.params.type;
+ const scalar_builder = type === 'f32' ? f32 : undefined;
+ assert(scalar_builder !== undefined, `Unexpected type param '${type}' provided`);
+
+ const elements = [...Array(cols).keys()].map(c => {
+ return [...Array(rows).keys()].map(r => scalar_builder(c * cols + r));
+ });
+
+ const got = new Matrix(elements);
+ const got_type = got.type;
+ t.expect(
+ got_type.cols === cols,
+ `expected Matrix to have ${cols} columns, received ${got_type.cols} instead`
+ );
+ t.expect(
+ got_type.rows === rows,
+ `expected Matrix to have ${rows} columns, received ${got_type.rows} instead`
+ );
+ t.expect(
+ got_type.elementType.kind === type,
+ `expected Matrix to have ${type} elements, received ${got_type.elementType.kind} instead`
+ );
+ t.expect(
+ objectEquals(got.elements, elements),
+ `Matrix did not have expected elements (${JSON.stringify(elements)}), instead had (${
+ got.elements
+ })`
+ );
+ });
+
+g.test('pack2x16float')
+ .paramsSimple([
+ // f16 normals
+ { inputs: [0, 0], result: [0x00000000, 0x80000000, 0x00008000, 0x80008000] },
+ { inputs: [1, 0], result: [0x00003c00, 0x80003c00] },
+ { inputs: [1, 1], result: [0x3c003c00] },
+ { inputs: [-1, -1], result: [0xbc00bc00] },
+ { inputs: [10, 1], result: [0x3c004900] },
+ { inputs: [-10, 1], result: [0x3c00c900] },
+
+ // f32 normal, but not f16 precise
+ { inputs: [1.00000011920928955078125, 1], result: [0x3c003c00, 0x3c003c01] },
+
+ // f32 subnormals
+ // prettier-ignore
+ { inputs: [kValue.f32.positive.subnormal.max, 1], result: [0x3c000000, 0x3c008000, 0x3c000001] },
+ // prettier-ignore
+ { inputs: [kValue.f32.negative.subnormal.min, 1], result: [0x3c008001, 0x3c000000, 0x3c008000] },
+
+ // f16 subnormals
+ // prettier-ignore
+ { inputs: [kValue.f16.positive.subnormal.max, 1], result: [0x3c0003ff, 0x3c000000, 0x3c008000] },
+ // prettier-ignore
+ { inputs: [kValue.f16.negative.subnormal.min, 1], result: [0x03c0083ff, 0x3c000000, 0x3c008000] },
+
+ // f16 out of bounds
+ { inputs: [kValue.f16.positive.max + 1, 1], result: [undefined] },
+ { inputs: [kValue.f16.negative.min - 1, 1], result: [undefined] },
+ { inputs: [1, kValue.f16.positive.max + 1], result: [undefined] },
+ { inputs: [1, kValue.f16.negative.min - 1], result: [undefined] },
+ ] as const)
+ .fn(test => {
+ const toString = (data: readonly (undefined | number)[]): String[] => {
+ return data.map(d => (d !== undefined ? u32(d).toString() : 'undefined'));
+ };
+
+ const inputs = test.params.inputs;
+ const got = pack2x16float(inputs[0], inputs[1]);
+ const expect = test.params.result;
+
+ const got_str = toString(got);
+ const expect_str = toString(expect);
+
+ // Using strings of the outputs, so they can be easily sorted, since order of the results doesn't matter.
+ test.expect(
+ objectEquals(got_str.sort(), expect_str.sort()),
+ `pack2x16float(${inputs}) returned [${got_str}]. Expected [${expect_str}]`
+ );
+ });
+
+g.test('pack2x16snorm')
+ .paramsSimple([
+ // Normals
+ { inputs: [0, 0], result: 0x00000000 },
+ { inputs: [1, 0], result: 0x00007fff },
+ { inputs: [0, 1], result: 0x7fff0000 },
+ { inputs: [1, 1], result: 0x7fff7fff },
+ { inputs: [-1, -1], result: 0x80018001 },
+ { inputs: [10, 10], result: 0x7fff7fff },
+ { inputs: [-10, -10], result: 0x80018001 },
+ { inputs: [0.1, 0.1], result: 0x0ccd0ccd },
+ { inputs: [-0.1, -0.1], result: 0xf333f333 },
+ { inputs: [0.5, 0.5], result: 0x40004000 },
+ { inputs: [-0.5, -0.5], result: 0xc001c001 },
+ { inputs: [0.1, 0.5], result: 0x40000ccd },
+ { inputs: [-0.1, -0.5], result: 0xc001f333 },
+
+ // Subnormals
+ { inputs: [kValue.f32.positive.subnormal.max, 1], result: 0x7fff0000 },
+ { inputs: [kValue.f32.negative.subnormal.min, 1], result: 0x7fff0000 },
+ ] as const)
+ .fn(test => {
+ const inputs = test.params.inputs;
+ const got = pack2x16snorm(inputs[0], inputs[1]);
+ const expect = test.params.result;
+
+ test.expect(got === expect, `pack2x16snorm(${inputs}) returned ${got}. Expected ${expect}`);
+ });
+
+g.test('pack2x16unorm')
+ .paramsSimple([
+ // Normals
+ { inputs: [0, 0], result: 0x00000000 },
+ { inputs: [1, 0], result: 0x0000ffff },
+ { inputs: [0, 1], result: 0xffff0000 },
+ { inputs: [1, 1], result: 0xffffffff },
+ { inputs: [-1, -1], result: 0x00000000 },
+ { inputs: [0.1, 0.1], result: 0x199a199a },
+ { inputs: [0.5, 0.5], result: 0x80008000 },
+ { inputs: [0.1, 0.5], result: 0x8000199a },
+ { inputs: [10, 10], result: 0xffffffff },
+
+ // Subnormals
+ { inputs: [kValue.f32.positive.subnormal.max, 1], result: 0xffff0000 },
+ ] as const)
+ .fn(test => {
+ const inputs = test.params.inputs;
+ const got = pack2x16unorm(inputs[0], inputs[1]);
+ const expect = test.params.result;
+
+ test.expect(got === expect, `pack2x16unorm(${inputs}) returned ${got}. Expected ${expect}`);
+ });
+
+g.test('pack4x8snorm')
+ .paramsSimple([
+ // Normals
+ { inputs: [0, 0, 0, 0], result: 0x00000000 },
+ { inputs: [1, 0, 0, 0], result: 0x0000007f },
+ { inputs: [0, 1, 0, 0], result: 0x00007f00 },
+ { inputs: [0, 0, 1, 0], result: 0x007f0000 },
+ { inputs: [0, 0, 0, 1], result: 0x7f000000 },
+ { inputs: [1, 1, 1, 1], result: 0x7f7f7f7f },
+ { inputs: [10, 10, 10, 10], result: 0x7f7f7f7f },
+ { inputs: [-1, 0, 0, 0], result: 0x00000081 },
+ { inputs: [0, -1, 0, 0], result: 0x00008100 },
+ { inputs: [0, 0, -1, 0], result: 0x00810000 },
+ { inputs: [0, 0, 0, -1], result: 0x81000000 },
+ { inputs: [-1, -1, -1, -1], result: 0x81818181 },
+ { inputs: [-10, -10, -10, -10], result: 0x81818181 },
+ { inputs: [0.1, 0.1, 0.1, 0.1], result: 0x0d0d0d0d },
+ { inputs: [-0.1, -0.1, -0.1, -0.1], result: 0xf3f3f3f3 },
+ { inputs: [0.1, -0.1, 0.1, -0.1], result: 0xf30df30d },
+ { inputs: [0.5, 0.5, 0.5, 0.5], result: 0x40404040 },
+ { inputs: [-0.5, -0.5, -0.5, -0.5], result: 0xc1c1c1c1 },
+ { inputs: [-0.5, 0.5, -0.5, 0.5], result: 0x40c140c1 },
+ { inputs: [0.1, 0.5, 0.1, 0.5], result: 0x400d400d },
+ { inputs: [-0.1, -0.5, -0.1, -0.5], result: 0xc1f3c1f3 },
+
+ // Subnormals
+ { inputs: [kValue.f32.positive.subnormal.max, 1, 1, 1], result: 0x7f7f7f00 },
+ { inputs: [kValue.f32.negative.subnormal.min, 1, 1, 1], result: 0x7f7f7f00 },
+ ] as const)
+ .fn(test => {
+ const inputs = test.params.inputs;
+ const got = pack4x8snorm(inputs[0], inputs[1], inputs[2], inputs[3]);
+ const expect = test.params.result;
+
+ test.expect(got === expect, `pack4x8snorm(${inputs}) returned ${u32(got)}. Expected ${expect}`);
+ });
+
+g.test('pack4x8unorm')
+ .paramsSimple([
+ // Normals
+ { inputs: [0, 0, 0, 0], result: 0x00000000 },
+ { inputs: [1, 0, 0, 0], result: 0x000000ff },
+ { inputs: [0, 1, 0, 0], result: 0x0000ff00 },
+ { inputs: [0, 0, 1, 0], result: 0x00ff0000 },
+ { inputs: [0, 0, 0, 1], result: 0xff000000 },
+ { inputs: [1, 1, 1, 1], result: 0xffffffff },
+ { inputs: [10, 10, 10, 10], result: 0xffffffff },
+ { inputs: [-1, -1, -1, -1], result: 0x00000000 },
+ { inputs: [-10, -10, -10, -10], result: 0x00000000 },
+ { inputs: [0.1, 0.1, 0.1, 0.1], result: 0x1a1a1a1a },
+ { inputs: [0.5, 0.5, 0.5, 0.5], result: 0x80808080 },
+ { inputs: [0.1, 0.5, 0.1, 0.5], result: 0x801a801a },
+
+ // Subnormals
+ { inputs: [kValue.f32.positive.subnormal.max, 1, 1, 1], result: 0xffffff00 },
+ ] as const)
+ .fn(test => {
+ const inputs = test.params.inputs;
+ const got = pack4x8unorm(inputs[0], inputs[1], inputs[2], inputs[3]);
+ const expect = test.params.result;
+
+ test.expect(got === expect, `pack4x8unorm(${inputs}) returned ${got}. Expected ${expect}`);
+ });
+
+const kRGB9E5UFloatCommonData = {
+ zero: /* */ { encoded: 0b00000_000000000_000000000_000000000, rgb: [0, 0, 0] },
+ max: /* */ { encoded: 0b11111_111111111_111111111_111111111, rgb: [65408, 65408, 65408] },
+ r1: /* */ { encoded: 0b10000_000000000_000000000_100000000, rgb: [1, 0, 0] },
+ r2: /* */ { encoded: 0b10001_000000000_000000000_100000000, rgb: [2, 0, 0] },
+ g1: /* */ { encoded: 0b10000_000000000_100000000_000000000, rgb: [0, 1, 0] },
+ g2: /* */ { encoded: 0b10001_000000000_100000000_000000000, rgb: [0, 2, 0] },
+ b1: /* */ { encoded: 0b10000_100000000_000000000_000000000, rgb: [0, 0, 1] },
+ b2: /* */ { encoded: 0b10001_100000000_000000000_000000000, rgb: [0, 0, 2] },
+ r1_g1_b1: /* */ { encoded: 0b10000_100000000_100000000_100000000, rgb: [1, 1, 1] },
+ r1_g2_b1: /* */ { encoded: 0b10001_010000000_100000000_010000000, rgb: [1, 2, 1] },
+ r4_g8_b2: /* */ { encoded: 0b10011_001000000_100000000_010000000, rgb: [4, 8, 2] },
+ r1_g2_b3: /* */ { encoded: 0b10001_110000000_100000000_010000000, rgb: [1, 2, 3] },
+ r128_g3968_b65408: { encoded: 0b11111_111111111_000011111_000000001, rgb: [128, 3968, 65408] },
+ r128_g1984_b30016: { encoded: 0b11110_111010101_000011111_000000010, rgb: [128, 1984, 30016] },
+ r_5_g_25_b_8: /**/ { encoded: 0b10011_100000000_000001000_000010000, rgb: [0.5, 0.25, 8] },
+};
+
+const kPackRGB9E5UFloatData = mergeParams(kRGB9E5UFloatCommonData, {
+ clamp_max: /* */ { encoded: 0b11111_111111111_111111111_111111111, rgb: [1e7, 1e10, 1e50] },
+ subnormals: /* */ { encoded: 0b00000_000000000_000000000_000000000, rgb: [1e-10, 1e-20, 1e-30] },
+ r57423_g54_b3478: { encoded: 0b11111_000011011_000000000_111000001, rgb: [57423, 54, 3478] },
+ r6852_g3571_b2356: { encoded: 0b11100_010010011_011011111_110101100, rgb: [6852, 3571, 2356] },
+ r68312_g12_b8123: { encoded: 0b11111_000111111_000000000_111111111, rgb: [68312, 12, 8123] },
+ r7321_g846_b32: { encoded: 0b11100_000000010_000110101_111001010, rgb: [7321, 846, 32] },
+});
+
+function bits5_9_9_9(x: number) {
+ const s = (x >>> 0).toString(2).padStart(32, '0');
+ return `${s.slice(0, 5)}_${s.slice(5, 14)}_${s.slice(14, 23)}_${s.slice(23, 32)}`;
+}
+
+g.test('packRGB9E5UFloat')
+ .params(u => u.combine('case', keysOf(kPackRGB9E5UFloatData)))
+ .fn(test => {
+ const c = kPackRGB9E5UFloatData[test.params.case];
+ const got = packRGB9E5UFloat(c.rgb[0], c.rgb[1], c.rgb[2]);
+ const expect = c.encoded;
+
+ test.expect(
+ got === expect,
+ `packRGB9E5UFloat(${c.rgb}) returned ${bits5_9_9_9(got)}. Expected ${bits5_9_9_9(expect)}`
+ );
+ });
+
+g.test('unpackRGB9E5UFloat')
+ .params(u => u.combine('case', keysOf(kRGB9E5UFloatCommonData)))
+ .fn(test => {
+ const c = kRGB9E5UFloatCommonData[test.params.case];
+ const got = unpackRGB9E5UFloat(c.encoded);
+ const expect = c.rgb;
+
+ test.expect(
+ got.R === expect[0] && got.G === expect[1] && got.B === expect[2],
+ `unpackRGB9E5UFloat(${bits5_9_9_9(c.encoded)} ` +
+ `returned ${got.R},${got.G},${got.B}. Expected ${expect}`
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/floating_point.spec.ts b/dom/webgpu/tests/cts/checkout/src/unittests/floating_point.spec.ts
new file mode 100644
index 0000000000..e8f8525d7f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/floating_point.spec.ts
@@ -0,0 +1,8238 @@
+export const description = `
+Floating Point unit tests.
+`;
+
+import { makeTestGroup } from '../common/framework/test_group.js';
+import { objectEquals, unreachable } from '../common/util/util.js';
+import { kValue } from '../webgpu/util/constants.js';
+import { FP, FPInterval, FPIntervalParam, IntervalBounds } from '../webgpu/util/floating_point.js';
+import { map2DArray, oneULPF32, oneULPF16, oneULPF64 } from '../webgpu/util/math.js';
+import {
+ reinterpretU16AsF16,
+ reinterpretU32AsF32,
+ reinterpretU64AsF64,
+} from '../webgpu/util/reinterpret.js';
+
+import { UnitTest } from './unit_test.js';
+
+export const g = makeTestGroup(UnitTest);
+
+/**
+ * For ULP purposes, abstract float behaves like f32, so need to swizzle it in
+ * for expectations.
+ */
+const kFPTraitForULP = {
+ abstract: 'f32',
+ f32: 'f32',
+ f16: 'f16',
+} as const;
+
+/** Bounds indicating an expectation of unbounded error */
+const kUnboundedBounds: IntervalBounds = [Number.NEGATIVE_INFINITY, Number.POSITIVE_INFINITY];
+
+/** Interval from kUnboundedBounds */
+const kUnboundedInterval = {
+ f32: FP.f32.toParam(kUnboundedBounds),
+ f16: FP.f16.toParam(kUnboundedBounds),
+ abstract: FP.abstract.toParam(kUnboundedBounds),
+};
+
+/** @returns a number N * ULP greater than the provided number */
+const kPlusNULPFunctions = {
+ f32: (x: number, n: number) => {
+ return x + n * oneULPF32(x);
+ },
+ f16: (x: number, n: number) => {
+ return x + n * oneULPF16(x);
+ },
+ abstract: (x: number, n: number) => {
+ return x + n * oneULPF64(x);
+ },
+};
+
+/** @returns a number one ULP greater than the provided number */
+const kPlusOneULPFunctions = {
+ f32: (x: number): number => {
+ return kPlusNULPFunctions['f32'](x, 1);
+ },
+ f16: (x: number): number => {
+ return kPlusNULPFunctions['f16'](x, 1);
+ },
+ abstract: (x: number): number => {
+ return kPlusNULPFunctions['abstract'](x, 1);
+ },
+};
+
+/** @returns a number N * ULP less than the provided number */
+const kMinusNULPFunctions = {
+ f32: (x: number, n: number) => {
+ return x - n * oneULPF32(x);
+ },
+ f16: (x: number, n: number) => {
+ return x - n * oneULPF16(x);
+ },
+ abstract: (x: number, n: number) => {
+ return x - n * oneULPF64(x);
+ },
+};
+
+/** @returns a number one ULP less than the provided number */
+const kMinusOneULPFunctions = {
+ f32: (x: number): number => {
+ return kMinusNULPFunctions['f32'](x, 1);
+ },
+ f16: (x: number): number => {
+ return kMinusNULPFunctions['f16'](x, 1);
+ },
+ abstract: (x: number): number => {
+ return kMinusNULPFunctions['abstract'](x, 1);
+ },
+};
+
+/** @returns the expected IntervalBounds adjusted by the given error function
+ *
+ * @param expected the bounds to be adjusted
+ * @param error error function to adjust the bounds via
+ */
+function applyError(
+ expected: number | IntervalBounds,
+ error: (n: number) => number
+): IntervalBounds {
+ // Avoiding going through FPInterval to avoid tying this to a specific kind
+ const unpack = (n: number | IntervalBounds): [number, number] => {
+ if (expected instanceof Array) {
+ switch (expected.length) {
+ case 1:
+ return [expected[0], expected[0]];
+ case 2:
+ return [expected[0], expected[1]];
+ }
+ unreachable(`Tried to unpack an IntervalBounds with length other than 1 or 2`);
+ } else {
+ // TS doesn't narrow this to number automatically
+ return [n as number, n as number];
+ }
+ };
+
+ let [begin, end] = unpack(expected);
+
+ begin -= error(begin);
+ end += error(end);
+
+ if (begin === end) {
+ return [begin];
+ }
+ return [begin, end];
+}
+
+// FPInterval
+
+interface ConstructorCase {
+ input: IntervalBounds;
+ expected: IntervalBounds;
+}
+
+g.test('constructor')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ConstructorCase>(p => {
+ const constants = FP[p.trait].constants();
+ // prettier-ignore
+ const cases: ConstructorCase[] = [
+ // Common cases
+ { input: [0, 10], expected: [0, 10] },
+ { input: [-5, 0], expected: [-5, 0] },
+ { input: [-5, 10], expected: [-5, 10] },
+ { input: [0], expected: [0] },
+ { input: [10], expected: [10] },
+ { input: [-5], expected: [-5] },
+ { input: [2.5], expected: [2.5] },
+ { input: [-1.375], expected: [-1.375] },
+ { input: [-1.375, 2.5], expected: [-1.375, 2.5] },
+
+ // Edges
+ { input: [0, constants.positive.max], expected: [0, constants.positive.max] },
+ { input: [constants.negative.min, 0], expected: [constants.negative.min, 0] },
+ { input: [constants.negative.min, constants.positive.max], expected: [constants.negative.min, constants.positive.max] },
+
+ // Infinities
+ { input: [0, constants.positive.infinity], expected: [0, Number.POSITIVE_INFINITY] },
+ { input: [constants.negative.infinity, 0], expected: [Number.NEGATIVE_INFINITY, 0] },
+ { input: [constants.negative.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ ];
+
+ // Note: Out of range values are limited to infinities for abstract float, due to abstract
+ // float and 'number' both being f64. So there are no separate OOR tests for abstract float,
+ // otherwise the testing framework will consider them duplicated.
+ if (p.trait !== 'abstract') {
+ // prettier-ignore
+ cases.push(...[
+ // Out of range
+ { input: [0, 2 * constants.positive.max], expected: [0, 2 * constants.positive.max] },
+ { input: [2 * constants.negative.min, 0], expected: [2 * constants.negative.min, 0] },
+ { input: [2 * constants.negative.min, 2 * constants.positive.max], expected: [2 * constants.negative.min, 2 * constants.positive.max] },
+ ] as ConstructorCase[]);
+ }
+
+ return cases;
+ })
+ )
+ .fn(t => {
+ const i = new FPInterval(t.params.trait, ...t.params.input);
+ t.expect(
+ objectEquals(i.bounds(), t.params.expected),
+ `new FPInterval('${t.params.trait}', [${t.params.input}]) returned ${i}. Expected [${t.params.expected}]`
+ );
+ });
+
+interface ContainsNumberCase {
+ bounds: number | IntervalBounds;
+ value: number;
+ expected: boolean;
+}
+
+g.test('contains_number')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ContainsNumberCase>(p => {
+ const constants = FP[p.trait].constants();
+ // prettier-ignore
+ const cases: ContainsNumberCase[] = [
+ // Common usage
+ { bounds: [0, 10], value: 0, expected: true },
+ { bounds: [0, 10], value: 10, expected: true },
+ { bounds: [0, 10], value: 5, expected: true },
+ { bounds: [0, 10], value: -5, expected: false },
+ { bounds: [0, 10], value: 50, expected: false },
+ { bounds: [0, 10], value: Number.NaN, expected: false },
+ { bounds: [-5, 10], value: 0, expected: true },
+ { bounds: [-5, 10], value: 10, expected: true },
+ { bounds: [-5, 10], value: 5, expected: true },
+ { bounds: [-5, 10], value: -5, expected: true },
+ { bounds: [-5, 10], value: -6, expected: false },
+ { bounds: [-5, 10], value: 50, expected: false },
+ { bounds: [-5, 10], value: -10, expected: false },
+ { bounds: [-1.375, 2.5], value: -10, expected: false },
+ { bounds: [-1.375, 2.5], value: 0.5, expected: true },
+ { bounds: [-1.375, 2.5], value: 10, expected: false },
+
+ // Point
+ { bounds: 0, value: 0, expected: true },
+ { bounds: 0, value: 10, expected: false },
+ { bounds: 0, value: -1000, expected: false },
+ { bounds: 10, value: 10, expected: true },
+ { bounds: 10, value: 0, expected: false },
+ { bounds: 10, value: -10, expected: false },
+ { bounds: 10, value: 11, expected: false },
+
+ // Upper infinity
+ { bounds: [0, constants.positive.infinity], value: constants.positive.min, expected: true },
+ { bounds: [0, constants.positive.infinity], value: constants.positive.max, expected: true },
+ { bounds: [0, constants.positive.infinity], value: constants.positive.infinity, expected: true },
+ { bounds: [0, constants.positive.infinity], value: constants.negative.min, expected: false },
+ { bounds: [0, constants.positive.infinity], value: constants.negative.max, expected: false },
+ { bounds: [0, constants.positive.infinity], value: constants.negative.infinity, expected: false },
+
+ // Lower infinity
+ { bounds: [constants.negative.infinity, 0], value: constants.positive.min, expected: false },
+ { bounds: [constants.negative.infinity, 0], value: constants.positive.max, expected: false },
+ { bounds: [constants.negative.infinity, 0], value: constants.positive.infinity, expected: false },
+ { bounds: [constants.negative.infinity, 0], value: constants.negative.min, expected: true },
+ { bounds: [constants.negative.infinity, 0], value: constants.negative.max, expected: true },
+ { bounds: [constants.negative.infinity, 0], value: constants.negative.infinity, expected: true },
+
+ // Full infinity
+ { bounds: [constants.negative.infinity, constants.positive.infinity], value: constants.positive.min, expected: true },
+ { bounds: [constants.negative.infinity, constants.positive.infinity], value: constants.positive.max, expected: true },
+ { bounds: [constants.negative.infinity, constants.positive.infinity], value: constants.positive.infinity, expected: true },
+ { bounds: [constants.negative.infinity, constants.positive.infinity], value: constants.negative.min, expected: true },
+ { bounds: [constants.negative.infinity, constants.positive.infinity], value: constants.negative.max, expected: true },
+ { bounds: [constants.negative.infinity, constants.positive.infinity], value: constants.negative.infinity, expected: true },
+ { bounds: [constants.negative.infinity, constants.positive.infinity], value: Number.NaN, expected: true },
+
+ // Maximum f32 boundary
+ { bounds: [0, constants.positive.max], value: constants.positive.min, expected: true },
+ { bounds: [0, constants.positive.max], value: constants.positive.max, expected: true },
+ { bounds: [0, constants.positive.max], value: constants.positive.infinity, expected: false },
+ { bounds: [0, constants.positive.max], value: constants.negative.min, expected: false },
+ { bounds: [0, constants.positive.max], value: constants.negative.max, expected: false },
+ { bounds: [0, constants.positive.max], value: constants.negative.infinity, expected: false },
+
+ // Minimum f32 boundary
+ { bounds: [constants.negative.min, 0], value: constants.positive.min, expected: false },
+ { bounds: [constants.negative.min, 0], value: constants.positive.max, expected: false },
+ { bounds: [constants.negative.min, 0], value: constants.positive.infinity, expected: false },
+ { bounds: [constants.negative.min, 0], value: constants.negative.min, expected: true },
+ { bounds: [constants.negative.min, 0], value: constants.negative.max, expected: true },
+ { bounds: [constants.negative.min, 0], value: constants.negative.infinity, expected: false },
+
+ // Subnormals
+ { bounds: [0, constants.positive.min], value: constants.positive.subnormal.min, expected: true },
+ { bounds: [0, constants.positive.min], value: constants.positive.subnormal.max, expected: true },
+ { bounds: [0, constants.positive.min], value: constants.negative.subnormal.min, expected: false },
+ { bounds: [0, constants.positive.min], value: constants.negative.subnormal.max, expected: false },
+ { bounds: [constants.negative.max, 0], value: constants.positive.subnormal.min, expected: false },
+ { bounds: [constants.negative.max, 0], value: constants.positive.subnormal.max, expected: false },
+ { bounds: [constants.negative.max, 0], value: constants.negative.subnormal.min, expected: true },
+ { bounds: [constants.negative.max, 0], value: constants.negative.subnormal.max, expected: true },
+ { bounds: [0, constants.positive.subnormal.min], value: constants.positive.subnormal.min, expected: true },
+ { bounds: [0, constants.positive.subnormal.min], value: constants.positive.subnormal.max, expected: false },
+ { bounds: [0, constants.positive.subnormal.min], value: constants.negative.subnormal.min, expected: false },
+ { bounds: [0, constants.positive.subnormal.min], value: constants.negative.subnormal.max, expected: false },
+ { bounds: [constants.negative.subnormal.max, 0], value: constants.positive.subnormal.min, expected: false },
+ { bounds: [constants.negative.subnormal.max, 0], value: constants.positive.subnormal.max, expected: false },
+ { bounds: [constants.negative.subnormal.max, 0], value: constants.negative.subnormal.min, expected: false },
+ { bounds: [constants.negative.subnormal.max, 0], value: constants.negative.subnormal.max, expected: true },
+ ];
+
+ // Note: Out of range values are limited to infinities for abstract float, due to abstract
+ // float and 'number' both being f64. So there are no separate OOR tests for abstract float,
+ // otherwise the testing framework will consider them duplicated.
+ if (p.trait !== 'abstract') {
+ // prettier-ignore
+ cases.push(...[
+ // Out of range high
+ { bounds: [0, 2 * constants.positive.max], value: constants.positive.min, expected: true },
+ { bounds: [0, 2 * constants.positive.max], value: constants.positive.max, expected: true },
+ { bounds: [0, 2 * constants.positive.max], value: constants.positive.infinity, expected: false },
+ { bounds: [0, 2 * constants.positive.max], value: constants.negative.min, expected: false },
+ { bounds: [0, 2 * constants.positive.max], value: constants.negative.max, expected: false },
+ { bounds: [0, 2 * constants.positive.max], value: constants.negative.infinity, expected: false },
+
+ // Out of range low
+ { bounds: [2 * constants.negative.min, 0], value: constants.positive.min, expected: false },
+ { bounds: [2 * constants.negative.min, 0], value: constants.positive.max, expected: false },
+ { bounds: [2 * constants.negative.min, 0], value: constants.positive.infinity, expected: false },
+ { bounds: [2 * constants.negative.min, 0], value: constants.negative.min, expected: true },
+ { bounds: [2 * constants.negative.min, 0], value: constants.negative.max, expected: true },
+ { bounds: [2 * constants.negative.min, 0], value: constants.negative.infinity, expected: false },
+ ] as ContainsNumberCase[]);
+ }
+
+ return cases;
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const i = trait.toInterval(t.params.bounds);
+ const value = t.params.value;
+ const expected = t.params.expected;
+
+ const got = i.contains(value);
+ t.expect(expected === got, `${i}.contains(${value}) returned ${got}. Expected ${expected}`);
+ });
+
+interface ContainsIntervalCase {
+ lhs: number | IntervalBounds;
+ rhs: number | IntervalBounds;
+ expected: boolean;
+}
+
+g.test('contains_interval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ContainsIntervalCase>(p => {
+ const constants = FP[p.trait].constants();
+ // prettier-ignore
+ const cases: ContainsIntervalCase[] = [
+ // Common usage
+ { lhs: [-10, 10], rhs: 0, expected: true },
+ { lhs: [-10, 10], rhs: [-1, 0], expected: true },
+ { lhs: [-10, 10], rhs: [0, 2], expected: true },
+ { lhs: [-10, 10], rhs: [-1, 2], expected: true },
+ { lhs: [-10, 10], rhs: [0, 10], expected: true },
+ { lhs: [-10, 10], rhs: [-10, 2], expected: true },
+ { lhs: [-10, 10], rhs: [-10, 10], expected: true },
+ { lhs: [-10, 10], rhs: [-100, 10], expected: false },
+
+ // Upper infinity
+ { lhs: [0, constants.positive.infinity], rhs: 0, expected: true },
+ { lhs: [0, constants.positive.infinity], rhs: [-1, 0], expected: false },
+ { lhs: [0, constants.positive.infinity], rhs: [0, 1], expected: true },
+ { lhs: [0, constants.positive.infinity], rhs: [0, constants.positive.max], expected: true },
+ { lhs: [0, constants.positive.infinity], rhs: [0, constants.positive.infinity], expected: true },
+ { lhs: [0, constants.positive.infinity], rhs: [100, constants.positive.infinity], expected: true },
+ { lhs: [0, constants.positive.infinity], rhs: [Number.NEGATIVE_INFINITY, constants.positive.infinity], expected: false },
+
+ // Lower infinity
+ { lhs: [constants.negative.infinity, 0], rhs: 0, expected: true },
+ { lhs: [constants.negative.infinity, 0], rhs: [-1, 0], expected: true },
+ { lhs: [constants.negative.infinity, 0], rhs: [constants.negative.min, 0], expected: true },
+ { lhs: [constants.negative.infinity, 0], rhs: [0, 1], expected: false },
+ { lhs: [constants.negative.infinity, 0], rhs: [constants.negative.infinity, 0], expected: true },
+ { lhs: [constants.negative.infinity, 0], rhs: [constants.negative.infinity, -100 ], expected: true },
+ { lhs: [constants.negative.infinity, 0], rhs: [constants.negative.infinity, constants.positive.infinity], expected: false },
+
+ // Full infinity
+ { lhs: [constants.negative.infinity, constants.positive.infinity], rhs: 0, expected: true },
+ { lhs: [constants.negative.infinity, constants.positive.infinity], rhs: [-1, 0], expected: true },
+ { lhs: [constants.negative.infinity, constants.positive.infinity], rhs: [0, 1], expected: true },
+ { lhs: [constants.negative.infinity, constants.positive.infinity], rhs: [0, constants.positive.infinity], expected: true },
+ { lhs: [constants.negative.infinity, constants.positive.infinity], rhs: [100, constants.positive.infinity], expected: true },
+ { lhs: [constants.negative.infinity, constants.positive.infinity], rhs: [constants.negative.infinity, 0], expected: true },
+ { lhs: [constants.negative.infinity, constants.positive.infinity], rhs: [constants.negative.infinity, -100 ], expected: true },
+ { lhs: [constants.negative.infinity, constants.positive.infinity], rhs: [constants.negative.infinity, constants.positive.infinity], expected: true },
+
+ // Maximum boundary
+ { lhs: [0, constants.positive.max], rhs: 0, expected: true },
+ { lhs: [0, constants.positive.max], rhs: [-1, 0], expected: false },
+ { lhs: [0, constants.positive.max], rhs: [0, 1], expected: true },
+ { lhs: [0, constants.positive.max], rhs: [0, constants.positive.max], expected: true },
+ { lhs: [0, constants.positive.max], rhs: [0, constants.positive.infinity], expected: false },
+ { lhs: [0, constants.positive.max], rhs: [100, constants.positive.infinity], expected: false },
+ { lhs: [0, constants.positive.max], rhs: [constants.negative.infinity, constants.positive.infinity], expected: false },
+
+ // Minimum boundary
+ { lhs: [constants.negative.min, 0], rhs: [0, 0], expected: true },
+ { lhs: [constants.negative.min, 0], rhs: [-1, 0], expected: true },
+ { lhs: [constants.negative.min, 0], rhs: [constants.negative.min, 0], expected: true },
+ { lhs: [constants.negative.min, 0], rhs: [0, 1], expected: false },
+ { lhs: [constants.negative.min, 0], rhs: [constants.negative.infinity, 0], expected: false },
+ { lhs: [constants.negative.min, 0], rhs: [constants.negative.infinity, -100 ], expected: false },
+ { lhs: [constants.negative.min, 0], rhs: [constants.negative.infinity, constants.positive.infinity], expected: false },
+ ];
+
+ // Note: Out of range values are limited to infinities for abstract float, due to abstract
+ // float and 'number' both being f64. So there are no separate OOR tests for abstract float,
+ // otherwise the testing framework will consider them duplicated.
+ if (p.trait !== 'abstract') {
+ // prettier-ignore
+ cases.push(...[
+ // Out of range high
+ { lhs: [0, 2 * constants.positive.max], rhs: 0, expected: true },
+ { lhs: [0, 2 * constants.positive.max], rhs: [-1, 0], expected: false },
+ { lhs: [0, 2 * constants.positive.max], rhs: [0, 1], expected: true },
+ { lhs: [0, 2 * constants.positive.max], rhs: [0, constants.positive.max], expected: true },
+ { lhs: [0, 2 * constants.positive.max], rhs: [0, constants.positive.infinity], expected: false },
+ { lhs: [0, 2 * constants.positive.max], rhs: [100, constants.positive.infinity], expected: false },
+ { lhs: [0, 2 * constants.positive.max], rhs: [constants.negative.infinity, constants.positive.infinity], expected: false },
+
+ // Out of range low
+ { lhs: [2 * constants.negative.min, 0], rhs: 0, expected: true },
+ { lhs: [2 * constants.negative.min, 0], rhs: [-1, 0], expected: true },
+ { lhs: [2 * constants.negative.min, 0], rhs: [constants.negative.min, 0], expected: true },
+ { lhs: [2 * constants.negative.min, 0], rhs: [0, 1], expected: false },
+ { lhs: [2 * constants.negative.min, 0], rhs: [constants.negative.infinity, 0], expected: false },
+ { lhs: [2 * constants.negative.min, 0], rhs: [constants.negative.infinity, -100 ], expected: false },
+ { lhs: [2 * constants.negative.min, 0], rhs: [constants.negative.infinity, constants.positive.infinity], expected: false },
+ ] as ContainsIntervalCase[]);
+ }
+
+ return cases;
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const lhs = trait.toInterval(t.params.lhs);
+ const rhs = trait.toInterval(t.params.rhs);
+ const expected = t.params.expected;
+
+ const got = lhs.contains(rhs);
+ t.expect(expected === got, `${lhs}.contains(${rhs}) returned ${got}. Expected ${expected}`);
+ });
+
+// Utilities
+
+interface SpanIntervalsCase {
+ intervals: (number | IntervalBounds)[];
+ expected: number | IntervalBounds;
+}
+
+g.test('spanIntervals')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<SpanIntervalsCase>(p => {
+ const constants = FP[p.trait].constants();
+ // prettier-ignore
+ return [
+ // Single Intervals
+ { intervals: [[0, 10]], expected: [0, 10] },
+ { intervals: [[0, constants.positive.max]], expected: [0, constants.positive.max] },
+ { intervals: [[0, constants.positive.nearest_max]], expected: [0, constants.positive.nearest_max] },
+ { intervals: [[0, constants.positive.infinity]], expected: [0, Number.POSITIVE_INFINITY] },
+ { intervals: [[constants.negative.min, 0]], expected: [constants.negative.min, 0] },
+ { intervals: [[constants.negative.nearest_min, 0]], expected: [constants.negative.nearest_min, 0] },
+ { intervals: [[constants.negative.infinity, 0]], expected: [Number.NEGATIVE_INFINITY, 0] },
+
+ // Double Intervals
+ { intervals: [[0, 1], [2, 5]], expected: [0, 5] },
+ { intervals: [[2, 5], [0, 1]], expected: [0, 5] },
+ { intervals: [[0, 2], [1, 5]], expected: [0, 5] },
+ { intervals: [[0, 5], [1, 2]], expected: [0, 5] },
+ { intervals: [[constants.negative.infinity, 0], [0, constants.positive.infinity]], expected: kUnboundedBounds },
+
+ // Multiple Intervals
+ { intervals: [[0, 1], [2, 3], [4, 5]], expected: [0, 5] },
+ { intervals: [[0, 1], [4, 5], [2, 3]], expected: [0, 5] },
+ { intervals: [[0, 1], [0, 1], [0, 1]], expected: [0, 1] },
+
+ // Point Intervals
+ { intervals: [1], expected: 1 },
+ { intervals: [1, 2], expected: [1, 2] },
+ { intervals: [-10, 2], expected: [-10, 2] },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const intervals = t.params.intervals.map(i => trait.toInterval(i));
+ const expected = trait.toInterval(t.params.expected);
+
+ const got = trait.spanIntervals(...intervals);
+ t.expect(
+ objectEquals(got, expected),
+ `${t.params.trait}.span({${intervals}}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+interface isVectorCase {
+ input: (number | IntervalBounds | FPIntervalParam)[];
+ expected: boolean;
+}
+
+g.test('isVector')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<isVectorCase>(p => {
+ const trait = FP[p.trait];
+ return [
+ // numbers
+ { input: [1, 2], expected: false },
+ { input: [1, 2, 3], expected: false },
+ { input: [1, 2, 3, 4], expected: false },
+
+ // IntervalBounds
+ { input: [[1], [2]], expected: false },
+ { input: [[1], [2], [3]], expected: false },
+ { input: [[1], [2], [3], [4]], expected: false },
+ {
+ input: [
+ [1, 2],
+ [2, 3],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [1, 2],
+ [2, 3],
+ [3, 4],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ ],
+ expected: false,
+ },
+
+ // FPInterval, valid dimensions
+ { input: [trait.toParam([1]), trait.toParam([2])], expected: true },
+ { input: [trait.toParam([1, 2]), trait.toParam([2, 3])], expected: true },
+ {
+ input: [trait.toParam([1]), trait.toParam([2]), trait.toParam([3])],
+ expected: true,
+ },
+ {
+ input: [trait.toParam([1, 2]), trait.toParam([2, 3]), trait.toParam([3, 4])],
+ expected: true,
+ },
+ {
+ input: [trait.toParam([1]), trait.toParam([2]), trait.toParam([3]), trait.toParam([4])],
+ expected: true,
+ },
+ {
+ input: [
+ trait.toParam([1, 2]),
+ trait.toParam([2, 3]),
+ trait.toParam([3, 4]),
+ trait.toParam([4, 5]),
+ ],
+ expected: true,
+ },
+
+ // FPInterval, invalid dimensions
+ { input: [trait.toParam([1])], expected: false },
+ {
+ input: [
+ trait.toParam([1]),
+ trait.toParam([2]),
+ trait.toParam([3]),
+ trait.toParam([4]),
+ trait.toParam([5]),
+ ],
+ expected: false,
+ },
+
+ // Mixed
+ { input: [1, [2]], expected: false },
+ { input: [1, [2], trait.toParam([3])], expected: false },
+ { input: [1, trait.toParam([2]), [3], 4], expected: false },
+ { input: [trait.toParam(1), 2], expected: false },
+ { input: [trait.toParam(1), [2]], expected: false },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const input = t.params.input.map(e => trait.fromParam(e));
+ const expected = t.params.expected;
+
+ const got = trait.isVector(input);
+ t.expect(
+ got === expected,
+ `${t.params.trait}.isVector([${input}]) returned ${got}. Expected ${expected}`
+ );
+ });
+
+interface toVectorCase {
+ input: (number | IntervalBounds | FPIntervalParam)[];
+ expected: (number | IntervalBounds)[];
+}
+
+g.test('toVector')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<toVectorCase>(p => {
+ const trait = FP[p.trait];
+ return [
+ // numbers
+ { input: [1, 2], expected: [1, 2] },
+ { input: [1, 2, 3], expected: [1, 2, 3] },
+ { input: [1, 2, 3, 4], expected: [1, 2, 3, 4] },
+
+ // IntervalBounds
+ { input: [[1], [2]], expected: [1, 2] },
+ { input: [[1], [2], [3]], expected: [1, 2, 3] },
+ { input: [[1], [2], [3], [4]], expected: [1, 2, 3, 4] },
+ {
+ input: [
+ [1, 2],
+ [2, 3],
+ ],
+ expected: [
+ [1, 2],
+ [2, 3],
+ ],
+ },
+ {
+ input: [
+ [1, 2],
+ [2, 3],
+ [3, 4],
+ ],
+ expected: [
+ [1, 2],
+ [2, 3],
+ [3, 4],
+ ],
+ },
+ {
+ input: [
+ [1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ ],
+ expected: [
+ [1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ ],
+ },
+
+ // FPInterval
+ { input: [trait.toParam([1]), trait.toParam([2])], expected: [1, 2] },
+ {
+ input: [trait.toParam([1, 2]), trait.toParam([2, 3])],
+ expected: [
+ [1, 2],
+ [2, 3],
+ ],
+ },
+ {
+ input: [trait.toParam([1]), trait.toParam([2]), trait.toParam([3])],
+ expected: [1, 2, 3],
+ },
+ {
+ input: [trait.toParam([1, 2]), trait.toParam([2, 3]), trait.toParam([3, 4])],
+ expected: [
+ [1, 2],
+ [2, 3],
+ [3, 4],
+ ],
+ },
+ {
+ input: [trait.toParam([1]), trait.toParam([2]), trait.toParam([3]), trait.toParam([4])],
+ expected: [1, 2, 3, 4],
+ },
+ {
+ input: [
+ trait.toParam([1, 2]),
+ trait.toParam([2, 3]),
+ trait.toParam([3, 4]),
+ trait.toParam([4, 5]),
+ ],
+ expected: [
+ [1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ ],
+ },
+
+ // Mixed
+ { input: [1, [2]], expected: [1, 2] },
+ { input: [1, [2], trait.toParam([3])], expected: [1, 2, 3] },
+ { input: [1, trait.toParam([2]), [3], 4], expected: [1, 2, 3, 4] },
+ {
+ input: [1, [2], [2, 3], kUnboundedInterval[p.trait]],
+ expected: [1, 2, [2, 3], kUnboundedBounds],
+ },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const input = t.params.input.map(e => trait.fromParam(e));
+ const expected = t.params.expected.map(e => trait.toInterval(e));
+
+ const got = trait.toVector(input);
+ t.expect(
+ objectEquals(got, expected),
+ `${t.params.trait}.toVector([${input}]) returned [${got}]. Expected [${expected}]`
+ );
+ });
+
+interface isMatrixCase {
+ input: (number | IntervalBounds | FPIntervalParam)[][];
+ expected: boolean;
+}
+
+g.test('isMatrix')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<isMatrixCase>(p => {
+ const trait = FP[p.trait];
+ return [
+ // numbers
+ {
+ input: [
+ [1, 2],
+ [3, 4],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ [7, 8],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [1, 2, 3],
+ [4, 5, 6],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ [10, 11, 12],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ],
+ expected: false,
+ },
+
+ // IntervalBounds
+ {
+ input: [
+ [[1], [2]],
+ [[3], [4]],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [[1], [2]],
+ [[3], [4]],
+ [[5], [6]],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [[1], [2]],
+ [[3], [4]],
+ [[5], [6]],
+ [[7], [8]],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [[1], [2], [3]],
+ [[4], [5], [6]],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [[1], [2], [3]],
+ [[4], [5], [6]],
+ [[7], [8], [9]],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [[1], [2], [3]],
+ [[4], [5], [6]],
+ [[7], [8], [9]],
+ [[10], [11], [12]],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [[1], [2], [3], [4]],
+ [[5], [6], [7], [8]],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [[1], [2], [3], [4]],
+ [[5], [6], [7], [8]],
+ [[9], [10], [11], [12]],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [[1], [2], [3], [4]],
+ [[5], [6], [7], [8]],
+ [[9], [10], [11], [12]],
+ [[13], [14], [15], [16]],
+ ],
+ expected: false,
+ },
+
+ // FPInterval, valid dimensions
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2)],
+ [trait.toParam(3), trait.toParam(4)],
+ ],
+ expected: true,
+ },
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2)],
+ [trait.toParam(3), trait.toParam(4)],
+ [trait.toParam(5), trait.toParam(6)],
+ ],
+ expected: true,
+ },
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2)],
+ [trait.toParam(3), trait.toParam(4)],
+ [trait.toParam(5), trait.toParam(6)],
+ [trait.toParam(7), trait.toParam(8)],
+ ],
+ expected: true,
+ },
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2), trait.toParam(3)],
+ [trait.toParam(4), trait.toParam(5), trait.toParam(6)],
+ ],
+ expected: true,
+ },
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2), trait.toParam(3)],
+ [trait.toParam(4), trait.toParam(5), trait.toParam(6)],
+ [trait.toParam(7), trait.toParam(8), trait.toParam(9)],
+ ],
+ expected: true,
+ },
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2), trait.toParam(3)],
+ [trait.toParam(4), trait.toParam(5), trait.toParam(6)],
+ [trait.toParam(7), trait.toParam(8), trait.toParam(9)],
+ [trait.toParam(10), trait.toParam(11), trait.toParam(12)],
+ ],
+ expected: true,
+ },
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2), trait.toParam(3), trait.toParam(4)],
+ [trait.toParam(5), trait.toParam(6), trait.toParam(7), trait.toParam(8)],
+ ],
+ expected: true,
+ },
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2), trait.toParam(3), trait.toParam(4)],
+ [trait.toParam(5), trait.toParam(6), trait.toParam(7), trait.toParam(8)],
+ [trait.toParam(9), trait.toParam(10), trait.toParam(11), trait.toParam(12)],
+ ],
+ expected: true,
+ },
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2), trait.toParam(3), trait.toParam(4)],
+ [trait.toParam(5), trait.toParam(6), trait.toParam(7), trait.toParam(8)],
+ [trait.toParam(9), trait.toParam(10), trait.toParam(11), trait.toParam(12)],
+ [trait.toParam(13), trait.toParam(14), trait.toParam(15), trait.toParam(16)],
+ ],
+ expected: true,
+ },
+ {
+ input: [
+ [trait.toParam([1, 2]), trait.toParam([2, 3])],
+ [trait.toParam([3, 4]), trait.toParam([4, 5])],
+ ],
+ expected: true,
+ },
+ {
+ input: [
+ [trait.toParam([1, 2]), trait.toParam([2, 3])],
+ [trait.toParam([3, 4]), trait.toParam([4, 5])],
+ [trait.toParam([5, 6]), trait.toParam([6, 7])],
+ ],
+ expected: true,
+ },
+ {
+ input: [
+ [trait.toParam([1, 2]), trait.toParam([2, 3])],
+ [trait.toParam([3, 4]), trait.toParam([4, 5])],
+ [trait.toParam([5, 6]), trait.toParam([6, 7])],
+ [trait.toParam([7, 8]), trait.toParam([8, 9])],
+ ],
+ expected: true,
+ },
+ {
+ input: [
+ [trait.toParam([1, 2]), trait.toParam([2, 3]), trait.toParam([3, 4])],
+ [trait.toParam([4, 5]), trait.toParam([5, 6]), trait.toParam([6, 7])],
+ ],
+ expected: true,
+ },
+ {
+ input: [
+ [trait.toParam([1, 2]), trait.toParam([2, 3]), trait.toParam([3, 4])],
+ [trait.toParam([4, 5]), trait.toParam([5, 6]), trait.toParam([6, 7])],
+ [trait.toParam([7, 8]), trait.toParam([8, 9]), trait.toParam([9, 10])],
+ ],
+ expected: true,
+ },
+ {
+ input: [
+ [trait.toParam([1, 2]), trait.toParam([2, 3]), trait.toParam([3, 4])],
+ [trait.toParam([4, 5]), trait.toParam([5, 6]), trait.toParam([6, 7])],
+ [trait.toParam([7, 8]), trait.toParam([8, 9]), trait.toParam([9, 10])],
+ [trait.toParam([10, 11]), trait.toParam([11, 12]), trait.toParam([12, 13])],
+ ],
+ expected: true,
+ },
+ {
+ input: [
+ [
+ trait.toParam([1, 2]),
+ trait.toParam([2, 3]),
+ trait.toParam([3, 4]),
+ trait.toParam([4, 5]),
+ ],
+ [
+ trait.toParam([5, 6]),
+ trait.toParam([6, 7]),
+ trait.toParam([7, 8]),
+ trait.toParam([8, 9]),
+ ],
+ ],
+ expected: true,
+ },
+ {
+ input: [
+ [
+ trait.toParam([1, 2]),
+ trait.toParam([2, 3]),
+ trait.toParam([3, 4]),
+ trait.toParam([4, 5]),
+ ],
+ [
+ trait.toParam([5, 6]),
+ trait.toParam([6, 7]),
+ trait.toParam([7, 8]),
+ trait.toParam([8, 9]),
+ ],
+ [
+ trait.toParam([9, 10]),
+ trait.toParam([10, 11]),
+ trait.toParam([11, 12]),
+ trait.toParam([12, 13]),
+ ],
+ ],
+ expected: true,
+ },
+ {
+ input: [
+ [
+ trait.toParam([1, 2]),
+ trait.toParam([2, 3]),
+ trait.toParam([3, 4]),
+ trait.toParam([4, 5]),
+ ],
+ [
+ trait.toParam([5, 6]),
+ trait.toParam([6, 7]),
+ trait.toParam([7, 8]),
+ trait.toParam([8, 9]),
+ ],
+ [
+ trait.toParam([9, 10]),
+ trait.toParam([10, 11]),
+ trait.toParam([11, 12]),
+ trait.toParam([12, 13]),
+ ],
+ [
+ trait.toParam([13, 14]),
+ trait.toParam([14, 15]),
+ trait.toParam([15, 16]),
+ trait.toParam([16, 17]),
+ ],
+ ],
+ expected: true,
+ },
+
+ // FPInterval, invalid dimensions
+ { input: [[trait.toParam(1)]], expected: false },
+ {
+ input: [[trait.toParam(1)], [trait.toParam(3), trait.toParam(4)]],
+ expected: false,
+ },
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2)],
+ [trait.toParam(3), trait.toParam(4), trait.toParam(5)],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2)],
+ [trait.toParam(3), trait.toParam(4)],
+ [trait.toParam(5)],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2)],
+ [trait.toParam(3), trait.toParam(4)],
+ [trait.toParam(5), trait.toParam(6)],
+ [trait.toParam(7), trait.toParam(8)],
+ [trait.toParam(9), trait.toParam(10)],
+ ],
+ expected: false,
+ },
+
+ // Mixed
+ {
+ input: [
+ [1, [2]],
+ [3, 4],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [[1], [2]],
+ [[3], 4],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [1, 2],
+ [trait.toParam([3]), 4],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [[1], trait.toParam([2])],
+ [trait.toParam([3]), trait.toParam([4])],
+ ],
+ expected: false,
+ },
+ {
+ input: [
+ [trait.toParam(1), [2]],
+ [3, 4],
+ ],
+ expected: false,
+ },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const input = t.params.input.map(a => a.map(e => trait.fromParam(e)));
+ const expected = t.params.expected;
+
+ const got = trait.isMatrix(input);
+ t.expect(
+ got === expected,
+ `${t.params.trait}.isMatrix([${input}]) returned ${got}. Expected ${expected}`
+ );
+ });
+
+interface toMatrixCase {
+ input: (number | IntervalBounds | FPIntervalParam)[][];
+ expected: (number | IntervalBounds)[][];
+}
+
+g.test('toMatrix')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<toMatrixCase>(p => {
+ const trait = FP[p.trait];
+ return [
+ // numbers
+ {
+ input: [
+ [1, 2],
+ [3, 4],
+ ],
+ expected: [
+ [1, 2],
+ [3, 4],
+ ],
+ },
+ {
+ input: [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ ],
+ expected: [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ ],
+ },
+ {
+ input: [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ [7, 8],
+ ],
+ expected: [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ [7, 8],
+ ],
+ },
+ {
+ input: [
+ [1, 2, 3],
+ [4, 5, 6],
+ ],
+ expected: [
+ [1, 2, 3],
+ [4, 5, 6],
+ ],
+ },
+ {
+ input: [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ ],
+ expected: [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ ],
+ },
+ {
+ input: [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ [10, 11, 12],
+ ],
+ expected: [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ [10, 11, 12],
+ ],
+ },
+ {
+ input: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ ],
+ expected: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ ],
+ },
+ {
+ input: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ ],
+ expected: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ ],
+ },
+ {
+ input: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ],
+ expected: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ],
+ },
+
+ // IntervalBounds
+ {
+ input: [
+ [[1], [2]],
+ [[3], [4]],
+ ],
+ expected: [
+ [1, 2],
+ [3, 4],
+ ],
+ },
+ {
+ input: [
+ [[1], [2]],
+ [[3], [4]],
+ [[5], [6]],
+ ],
+ expected: [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ ],
+ },
+ {
+ input: [
+ [[1], [2]],
+ [[3], [4]],
+ [[5], [6]],
+ [[7], [8]],
+ ],
+ expected: [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ [7, 8],
+ ],
+ },
+ {
+ input: [
+ [[1], [2], [3]],
+ [[4], [5], [6]],
+ ],
+ expected: [
+ [1, 2, 3],
+ [4, 5, 6],
+ ],
+ },
+ {
+ input: [
+ [[1], [2], [3]],
+ [[4], [5], [6]],
+ [[7], [8], [9]],
+ ],
+ expected: [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ ],
+ },
+ {
+ input: [
+ [[1], [2], [3]],
+ [[4], [5], [6]],
+ [[7], [8], [9]],
+ [[10], [11], [12]],
+ ],
+ expected: [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ [10, 11, 12],
+ ],
+ },
+ {
+ input: [
+ [[1], [2], [3], [4]],
+ [[5], [6], [7], [8]],
+ ],
+ expected: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ ],
+ },
+ {
+ input: [
+ [[1], [2], [3], [4]],
+ [[5], [6], [7], [8]],
+ [[9], [10], [11], [12]],
+ ],
+ expected: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ ],
+ },
+ {
+ input: [
+ [[1], [2], [3], [4]],
+ [[5], [6], [7], [8]],
+ [[9], [10], [11], [12]],
+ [[13], [14], [15], [16]],
+ ],
+ expected: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ],
+ },
+
+ // FPInterval
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2)],
+ [trait.toParam(3), trait.toParam(4)],
+ ],
+ expected: [
+ [1, 2],
+ [3, 4],
+ ],
+ },
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2)],
+ [trait.toParam(3), trait.toParam(4)],
+ [trait.toParam(5), trait.toParam(6)],
+ ],
+ expected: [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ ],
+ },
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2)],
+ [trait.toParam(3), trait.toParam(4)],
+ [trait.toParam(5), trait.toParam(6)],
+ [trait.toParam(7), trait.toParam(8)],
+ ],
+ expected: [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ [7, 8],
+ ],
+ },
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2), trait.toParam(3)],
+ [trait.toParam(4), trait.toParam(5), trait.toParam(6)],
+ ],
+ expected: [
+ [1, 2, 3],
+ [4, 5, 6],
+ ],
+ },
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2), trait.toParam(3)],
+ [trait.toParam(4), trait.toParam(5), trait.toParam(6)],
+ [trait.toParam(7), trait.toParam(8), trait.toParam(9)],
+ ],
+ expected: [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ ],
+ },
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2), trait.toParam(3)],
+ [trait.toParam(4), trait.toParam(5), trait.toParam(6)],
+ [trait.toParam(7), trait.toParam(8), trait.toParam(9)],
+ [trait.toParam(10), trait.toParam(11), trait.toParam(12)],
+ ],
+ expected: [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ [10, 11, 12],
+ ],
+ },
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2), trait.toParam(3), trait.toParam(4)],
+ [trait.toParam(5), trait.toParam(6), trait.toParam(7), trait.toParam(8)],
+ ],
+ expected: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ ],
+ },
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2), trait.toParam(3), trait.toParam(4)],
+ [trait.toParam(5), trait.toParam(6), trait.toParam(7), trait.toParam(8)],
+ [trait.toParam(9), trait.toParam(10), trait.toParam(11), trait.toParam(12)],
+ ],
+ expected: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ ],
+ },
+ {
+ input: [
+ [trait.toParam(1), trait.toParam(2), trait.toParam(3), trait.toParam(4)],
+ [trait.toParam(5), trait.toParam(6), trait.toParam(7), trait.toParam(8)],
+ [trait.toParam(9), trait.toParam(10), trait.toParam(11), trait.toParam(12)],
+ [trait.toParam(13), trait.toParam(14), trait.toParam(15), trait.toParam(16)],
+ ],
+ expected: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ],
+ },
+
+ {
+ input: [
+ [trait.toParam([1, 2]), trait.toParam([2, 3])],
+ [trait.toParam([3, 4]), trait.toParam([4, 5])],
+ ],
+ expected: [
+ [
+ [1, 2],
+ [2, 3],
+ ],
+ [
+ [3, 4],
+ [4, 5],
+ ],
+ ],
+ },
+ {
+ input: [
+ [trait.toParam([1, 2]), trait.toParam([2, 3])],
+ [trait.toParam([3, 4]), trait.toParam([4, 5])],
+ [trait.toParam([5, 6]), trait.toParam([6, 7])],
+ ],
+ expected: [
+ [
+ [1, 2],
+ [2, 3],
+ ],
+ [
+ [3, 4],
+ [4, 5],
+ ],
+ [
+ [5, 6],
+ [6, 7],
+ ],
+ ],
+ },
+ {
+ input: [
+ [trait.toParam([1, 2]), trait.toParam([2, 3])],
+ [trait.toParam([3, 4]), trait.toParam([4, 5])],
+ [trait.toParam([5, 6]), trait.toParam([6, 7])],
+ [trait.toParam([7, 8]), trait.toParam([8, 9])],
+ ],
+ expected: [
+ [
+ [1, 2],
+ [2, 3],
+ ],
+ [
+ [3, 4],
+ [4, 5],
+ ],
+ [
+ [5, 6],
+ [6, 7],
+ ],
+ [
+ [7, 8],
+ [8, 9],
+ ],
+ ],
+ },
+ {
+ input: [
+ [trait.toParam([1, 2]), trait.toParam([2, 3]), trait.toParam([3, 4])],
+ [trait.toParam([4, 5]), trait.toParam([5, 6]), trait.toParam([6, 7])],
+ ],
+ expected: [
+ [
+ [1, 2],
+ [2, 3],
+ [3, 4],
+ ],
+ [
+ [4, 5],
+ [5, 6],
+ [6, 7],
+ ],
+ ],
+ },
+ {
+ input: [
+ [trait.toParam([1, 2]), trait.toParam([2, 3]), trait.toParam([3, 4])],
+ [trait.toParam([4, 5]), trait.toParam([5, 6]), trait.toParam([6, 7])],
+ [trait.toParam([7, 8]), trait.toParam([8, 9]), trait.toParam([9, 10])],
+ ],
+ expected: [
+ [
+ [1, 2],
+ [2, 3],
+ [3, 4],
+ ],
+ [
+ [4, 5],
+ [5, 6],
+ [6, 7],
+ ],
+ [
+ [7, 8],
+ [8, 9],
+ [9, 10],
+ ],
+ ],
+ },
+ {
+ input: [
+ [trait.toParam([1, 2]), trait.toParam([2, 3]), trait.toParam([3, 4])],
+ [trait.toParam([4, 5]), trait.toParam([5, 6]), trait.toParam([6, 7])],
+ [trait.toParam([7, 8]), trait.toParam([8, 9]), trait.toParam([9, 10])],
+ [trait.toParam([10, 11]), trait.toParam([11, 12]), trait.toParam([12, 13])],
+ ],
+ expected: [
+ [
+ [1, 2],
+ [2, 3],
+ [3, 4],
+ ],
+ [
+ [4, 5],
+ [5, 6],
+ [6, 7],
+ ],
+ [
+ [7, 8],
+ [8, 9],
+ [9, 10],
+ ],
+ [
+ [10, 11],
+ [11, 12],
+ [12, 13],
+ ],
+ ],
+ },
+ {
+ input: [
+ [
+ trait.toParam([1, 2]),
+ trait.toParam([2, 3]),
+ trait.toParam([3, 4]),
+ trait.toParam([4, 5]),
+ ],
+ [
+ trait.toParam([5, 6]),
+ trait.toParam([6, 7]),
+ trait.toParam([7, 8]),
+ trait.toParam([8, 9]),
+ ],
+ ],
+ expected: [
+ [
+ [1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ ],
+ [
+ [5, 6],
+ [6, 7],
+ [7, 8],
+ [8, 9],
+ ],
+ ],
+ },
+ {
+ input: [
+ [
+ trait.toParam([1, 2]),
+ trait.toParam([2, 3]),
+ trait.toParam([3, 4]),
+ trait.toParam([4, 5]),
+ ],
+ [
+ trait.toParam([5, 6]),
+ trait.toParam([6, 7]),
+ trait.toParam([7, 8]),
+ trait.toParam([8, 9]),
+ ],
+ [
+ trait.toParam([9, 10]),
+ trait.toParam([10, 11]),
+ trait.toParam([11, 12]),
+ trait.toParam([12, 13]),
+ ],
+ ],
+ expected: [
+ [
+ [1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ ],
+ [
+ [5, 6],
+ [6, 7],
+ [7, 8],
+ [8, 9],
+ ],
+ [
+ [9, 10],
+ [10, 11],
+ [11, 12],
+ [12, 13],
+ ],
+ ],
+ },
+ {
+ input: [
+ [
+ trait.toParam([1, 2]),
+ trait.toParam([2, 3]),
+ trait.toParam([3, 4]),
+ trait.toParam([4, 5]),
+ ],
+ [
+ trait.toParam([5, 6]),
+ trait.toParam([6, 7]),
+ trait.toParam([7, 8]),
+ trait.toParam([8, 9]),
+ ],
+ [
+ trait.toParam([9, 10]),
+ trait.toParam([10, 11]),
+ trait.toParam([11, 12]),
+ trait.toParam([12, 13]),
+ ],
+ [
+ trait.toParam([13, 14]),
+ trait.toParam([14, 15]),
+ trait.toParam([15, 16]),
+ trait.toParam([16, 17]),
+ ],
+ ],
+ expected: [
+ [
+ [1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ ],
+ [
+ [5, 6],
+ [6, 7],
+ [7, 8],
+ [8, 9],
+ ],
+ [
+ [9, 10],
+ [10, 11],
+ [11, 12],
+ [12, 13],
+ ],
+ [
+ [13, 14],
+ [14, 15],
+ [15, 16],
+ [16, 17],
+ ],
+ ],
+ },
+
+ // Mixed
+ {
+ input: [
+ [1, [2]],
+ [3, 4],
+ ],
+ expected: [
+ [1, 2],
+ [3, 4],
+ ],
+ },
+ {
+ input: [
+ [[1], [2]],
+ [[3], 4],
+ ],
+ expected: [
+ [1, 2],
+ [3, 4],
+ ],
+ },
+ {
+ input: [
+ [1, 2],
+ [trait.toParam([3]), 4],
+ ],
+ expected: [
+ [1, 2],
+ [3, 4],
+ ],
+ },
+ {
+ input: [
+ [[1], trait.toParam([2])],
+ [trait.toParam([3]), trait.toParam([4])],
+ ],
+ expected: [
+ [1, 2],
+ [3, 4],
+ ],
+ },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const input = map2DArray(t.params.input, e => trait.fromParam(e));
+ const expected = map2DArray(t.params.expected, e => trait.toInterval(e));
+
+ const got = trait.toMatrix(input);
+ t.expect(
+ objectEquals(got, expected),
+ `${t.params.trait}.toMatrix([${input}]) returned [${got}]. Expected [${expected}]`
+ );
+ });
+
+// API - Fundamental Error Intervals
+
+interface AbsoluteErrorCase {
+ value: number;
+ error: number;
+ expected: number | IntervalBounds;
+}
+
+// Special values used for testing absolute error interval
+// A small absolute error value is a representable value x that much smaller than 1.0,
+// but 1.0 +/- x is still exactly representable.
+const kSmallAbsoluteErrorValue = {
+ f32: 2 ** -11, // Builtin cos and sin has a absolute error 2**-11 for f32
+ f16: 2 ** -7, // Builtin cos and sin has a absolute error 2**-7 for f16
+} as const;
+// A large absolute error value is a representable value x that much smaller than maximum
+// positive, but positive.max - x is still exactly representable.
+const kLargeAbsoluteErrorValue = {
+ f32: 2 ** 110, // f32.positive.max - 2**110 = 3.4028104e+38 = 0x7f7fffbf in f32
+ f16: 2 ** 10, // f16.positive.max - 2**10 = 64480 = 0x7bdf in f16
+} as const;
+// A subnormal absolute error value is a subnormal representable value x of kind, which ensures
+// that positive.subnormal.min +/- x is still exactly representable.
+const kSubnormalAbsoluteErrorValue = {
+ f32: 2 ** -140, // f32 0x00000200
+ f16: 2 ** -20, // f16 0x0010
+} as const;
+
+g.test('absoluteErrorInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<AbsoluteErrorCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ const smallErr = kSmallAbsoluteErrorValue[p.trait];
+ const largeErr = kLargeAbsoluteErrorValue[p.trait];
+ const subnormalErr = kSubnormalAbsoluteErrorValue[p.trait];
+ // prettier-ignore
+ return [
+ // Edge Cases
+ // 1. Interval around infinity would be kUnboundedBounds
+ { value: constants.positive.infinity, error: 0, expected: kUnboundedBounds },
+ { value: constants.positive.infinity, error: largeErr, expected: kUnboundedBounds },
+ { value: constants.positive.infinity, error: 1, expected: kUnboundedBounds },
+ { value: constants.negative.infinity, error: 0, expected: kUnboundedBounds },
+ { value: constants.negative.infinity, error: largeErr, expected: kUnboundedBounds },
+ { value: constants.negative.infinity, error: 1, expected: kUnboundedBounds },
+ // 2. Interval around largest finite positive/negative
+ { value: constants.positive.max, error: 0, expected: constants.positive.max },
+ { value: constants.positive.max, error: largeErr, expected: kUnboundedBounds},
+ { value: constants.positive.max, error: constants.positive.max, expected: kUnboundedBounds},
+ { value: constants.negative.min, error: 0, expected: constants.negative.min },
+ { value: constants.negative.min, error: largeErr, expected: kUnboundedBounds},
+ { value: constants.negative.min, error: constants.positive.max, expected: kUnboundedBounds},
+ // 3. Interval around small but normal center, center should not get flushed.
+ { value: constants.positive.min, error: 0, expected: constants.positive.min },
+ { value: constants.positive.min, error: smallErr, expected: [constants.positive.min - smallErr, constants.positive.min + smallErr]},
+ { value: constants.positive.min, error: 1, expected: [constants.positive.min - 1, constants.positive.min + 1]},
+ { value: constants.negative.max, error: 0, expected: constants.negative.max },
+ { value: constants.negative.max, error: smallErr, expected: [constants.negative.max - smallErr, constants.negative.max + smallErr]},
+ { value: constants.negative.max, error: 1, expected: [constants.negative.max - 1, constants.negative.max + 1] },
+ // 4. Subnormals, center can be flushed to 0.0
+ { value: constants.positive.subnormal.max, error: 0, expected: [0, constants.positive.subnormal.max] },
+ { value: constants.positive.subnormal.max, error: subnormalErr, expected: [-subnormalErr, constants.positive.subnormal.max + subnormalErr]},
+ { value: constants.positive.subnormal.max, error: smallErr, expected: [-smallErr, constants.positive.subnormal.max + smallErr]},
+ { value: constants.positive.subnormal.max, error: 1, expected: [-1, constants.positive.subnormal.max + 1]},
+ { value: constants.positive.subnormal.min, error: 0, expected: [0, constants.positive.subnormal.min] },
+ { value: constants.positive.subnormal.min, error: subnormalErr, expected: [-subnormalErr, constants.positive.subnormal.min + subnormalErr]},
+ { value: constants.positive.subnormal.min, error: smallErr, expected: [-smallErr, constants.positive.subnormal.min + smallErr]},
+ { value: constants.positive.subnormal.min, error: 1, expected: [-1, constants.positive.subnormal.min + 1] },
+ { value: constants.negative.subnormal.min, error: 0, expected: [constants.negative.subnormal.min, 0] },
+ { value: constants.negative.subnormal.min, error: subnormalErr, expected: [constants.negative.subnormal.min - subnormalErr, subnormalErr] },
+ { value: constants.negative.subnormal.min, error: smallErr, expected: [constants.negative.subnormal.min - smallErr, smallErr] },
+ { value: constants.negative.subnormal.min, error: 1, expected: [constants.negative.subnormal.min - 1, 1] },
+ { value: constants.negative.subnormal.max, error: 0, expected: [constants.negative.subnormal.max, 0] },
+ { value: constants.negative.subnormal.max, error: subnormalErr, expected: [constants.negative.subnormal.max - subnormalErr, subnormalErr] },
+ { value: constants.negative.subnormal.max, error: smallErr, expected: [constants.negative.subnormal.max - smallErr, smallErr] },
+ { value: constants.negative.subnormal.max, error: 1, expected: [constants.negative.subnormal.max - 1, 1] },
+
+ // 64-bit subnormals, expected to be treated as 0.0 or smallest subnormal of kind.
+ { value: reinterpretU64AsF64(0x0000_0000_0000_0001n), error: 0, expected: [0, constants.positive.subnormal.min] },
+ { value: reinterpretU64AsF64(0x0000_0000_0000_0001n), error: subnormalErr, expected: [-subnormalErr, constants.positive.subnormal.min + subnormalErr] },
+ // Note that f32 minimum subnormal is so smaller than 1.0, adding them together may result in the f64 results 1.0.
+ { value: reinterpretU64AsF64(0x0000_0000_0000_0001n), error: 1, expected: [-1, constants.positive.subnormal.min + 1] },
+ { value: reinterpretU64AsF64(0x0000_0000_0000_0002n), error: 0, expected: [0, constants.positive.subnormal.min] },
+ { value: reinterpretU64AsF64(0x0000_0000_0000_0002n), error: subnormalErr, expected: [-subnormalErr, constants.positive.subnormal.min + subnormalErr] },
+ { value: reinterpretU64AsF64(0x0000_0000_0000_0002n), error: 1, expected: [-1, constants.positive.subnormal.min + 1] },
+ { value: reinterpretU64AsF64(0x800f_ffff_ffff_ffffn), error: 0, expected: [constants.negative.subnormal.max, 0] },
+ { value: reinterpretU64AsF64(0x800f_ffff_ffff_ffffn), error: subnormalErr, expected: [constants.negative.subnormal.max - subnormalErr, subnormalErr] },
+ { value: reinterpretU64AsF64(0x800f_ffff_ffff_ffffn), error: 1, expected: [constants.negative.subnormal.max - 1, 1] },
+ { value: reinterpretU64AsF64(0x800f_ffff_ffff_fffen), error: 0, expected: [constants.negative.subnormal.max, 0] },
+ { value: reinterpretU64AsF64(0x800f_ffff_ffff_fffen), error: subnormalErr, expected: [constants.negative.subnormal.max - subnormalErr, subnormalErr] },
+ { value: reinterpretU64AsF64(0x800f_ffff_ffff_fffen), error: 1, expected: [constants.negative.subnormal.max - 1, 1] },
+
+ // Zero
+ { value: 0, error: 0, expected: 0 },
+ { value: 0, error: smallErr, expected: [-smallErr, smallErr] },
+ { value: 0, error: 1, expected: [-1, 1] },
+
+ // Two
+ { value: 2, error: 0, expected: 2 },
+ { value: 2, error: smallErr, expected: [2 - smallErr, 2 + smallErr] },
+ { value: 2, error: 1, expected: [1, 3] },
+ { value: -2, error: 0, expected: -2 },
+ { value: -2, error: smallErr, expected: [-2 - smallErr, -2 + smallErr] },
+ { value: -2, error: 1, expected: [-3, -1] },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.absoluteErrorInterval(t.params.value, t.params.error);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.absoluteErrorInterval(${t.params.value}, ${
+ t.params.error
+ }) returned ${got} (${got.begin.toExponential()}, ${got.end.toExponential()}). Expected ${expected}`
+ );
+ });
+
+interface CorrectlyRoundedCase {
+ value: number;
+ expected: number | IntervalBounds;
+}
+
+// Correctly rounded cases that input values are exactly representable normal values of target type
+// prettier-ignore
+const kCorrectlyRoundedNormalCases = {
+ f32: [
+ { value: 0, expected: [0, 0] },
+ { value: reinterpretU32AsF32(0x03800000), expected: reinterpretU32AsF32(0x03800000) },
+ { value: reinterpretU32AsF32(0x03800001), expected: reinterpretU32AsF32(0x03800001) },
+ { value: reinterpretU32AsF32(0x83800000), expected: reinterpretU32AsF32(0x83800000) },
+ { value: reinterpretU32AsF32(0x83800001), expected: reinterpretU32AsF32(0x83800001) },
+ ] as CorrectlyRoundedCase[],
+ f16: [
+ { value: 0, expected: [0, 0] },
+ { value: reinterpretU16AsF16(0x0c00), expected: reinterpretU16AsF16(0x0c00) },
+ { value: reinterpretU16AsF16(0x0c01), expected: reinterpretU16AsF16(0x0c01) },
+ { value: reinterpretU16AsF16(0x8c00), expected: reinterpretU16AsF16(0x8c00) },
+ { value: reinterpretU16AsF16(0x8c01), expected: reinterpretU16AsF16(0x8c01) },
+ ] as CorrectlyRoundedCase[],
+} as const;
+
+// 64-bit normals that fall between two conjunction normal values in target type
+const kCorrectlyRoundedF64NormalCases = [
+ {
+ value: reinterpretU64AsF64(0x3ff0_0000_0000_0001n),
+ expected: {
+ f32: [reinterpretU32AsF32(0x3f800000), reinterpretU32AsF32(0x3f800001)],
+ f16: [reinterpretU16AsF16(0x3c00), reinterpretU16AsF16(0x3c01)],
+ },
+ },
+ {
+ value: reinterpretU64AsF64(0x3ff0_0000_0000_0002n),
+ expected: {
+ f32: [reinterpretU32AsF32(0x3f800000), reinterpretU32AsF32(0x3f800001)],
+ f16: [reinterpretU16AsF16(0x3c00), reinterpretU16AsF16(0x3c01)],
+ },
+ },
+ {
+ value: reinterpretU64AsF64(0x3ff0_0800_0000_0010n),
+ expected: {
+ f32: [reinterpretU32AsF32(0x3f804000), reinterpretU32AsF32(0x3f804001)],
+ f16: [reinterpretU16AsF16(0x3c02), reinterpretU16AsF16(0x3c03)],
+ },
+ },
+ {
+ value: reinterpretU64AsF64(0x3ff0_1000_0000_0020n),
+ expected: {
+ f32: [reinterpretU32AsF32(0x3f808000), reinterpretU32AsF32(0x3f808001)],
+ f16: [reinterpretU16AsF16(0x3c04), reinterpretU16AsF16(0x3c05)],
+ },
+ },
+ {
+ value: reinterpretU64AsF64(0xbff0_0000_0000_0001n),
+ expected: {
+ f32: [reinterpretU32AsF32(0xbf800001), reinterpretU32AsF32(0xbf800000)],
+ f16: [reinterpretU16AsF16(0xbc01), reinterpretU16AsF16(0xbc00)],
+ },
+ },
+ {
+ value: reinterpretU64AsF64(0xbff0_0000_0000_0002n),
+ expected: {
+ f32: [reinterpretU32AsF32(0xbf800001), reinterpretU32AsF32(0xbf800000)],
+ f16: [reinterpretU16AsF16(0xbc01), reinterpretU16AsF16(0xbc00)],
+ },
+ },
+ {
+ value: reinterpretU64AsF64(0xbff0_0800_0000_0010n),
+ expected: {
+ f32: [reinterpretU32AsF32(0xbf804001), reinterpretU32AsF32(0xbf804000)],
+ f16: [reinterpretU16AsF16(0xbc03), reinterpretU16AsF16(0xbc02)],
+ },
+ },
+ {
+ value: reinterpretU64AsF64(0xbff0_1000_0000_0020n),
+ expected: {
+ f32: [reinterpretU32AsF32(0xbf808001), reinterpretU32AsF32(0xbf808000)],
+ f16: [reinterpretU16AsF16(0xbc05), reinterpretU16AsF16(0xbc04)],
+ },
+ },
+] as const;
+
+g.test('correctlyRoundedInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<CorrectlyRoundedCase>(p => {
+ const constants = FP[p.trait].constants();
+ // prettier-ignore
+ return [
+ // Edge Cases
+ { value: constants.positive.infinity, expected: kUnboundedBounds },
+ { value: constants.negative.infinity, expected: kUnboundedBounds },
+ { value: constants.positive.max, expected: constants.positive.max },
+ { value: constants.negative.min, expected: constants.negative.min },
+ { value: constants.positive.min, expected: constants.positive.min },
+ { value: constants.negative.max, expected: constants.negative.max },
+
+ // Subnormals
+ { value: constants.positive.subnormal.min, expected: [0, constants.positive.subnormal.min] },
+ { value: constants.positive.subnormal.max, expected: [0, constants.positive.subnormal.max] },
+ { value: constants.negative.subnormal.min, expected: [constants.negative.subnormal.min, 0] },
+ { value: constants.negative.subnormal.max, expected: [constants.negative.subnormal.max, 0] },
+
+ // 64-bit subnormals should be rounded down to 0 or up to smallest subnormal
+ { value: reinterpretU64AsF64(0x0000_0000_0000_0001n), expected: [0, constants.positive.subnormal.min] },
+ { value: reinterpretU64AsF64(0x0000_0000_0000_0002n), expected: [0, constants.positive.subnormal.min] },
+ { value: reinterpretU64AsF64(0x800f_ffff_ffff_ffffn), expected: [constants.negative.subnormal.max, 0] },
+ { value: reinterpretU64AsF64(0x800f_ffff_ffff_fffen), expected: [constants.negative.subnormal.max, 0] },
+
+ // Normals
+ ...kCorrectlyRoundedNormalCases[p.trait],
+
+ // 64-bit normals that fall between two conjunction normal values in target type
+ ...kCorrectlyRoundedF64NormalCases.map(t => { return {value: t.value, expected: t.expected[p.trait]} as CorrectlyRoundedCase;}),
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.correctlyRoundedInterval(t.params.value);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.correctlyRoundedInterval(${t.params.value}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+interface ULPCase {
+ value: number;
+ num_ulp: number;
+ expected: number | IntervalBounds;
+}
+
+// Special values used for testing ULP error interval
+const kULPErrorValue = {
+ f32: 4096, // 4096 ULP is required for atan accuracy on f32
+ f16: 5, // 5 ULP is required for atan accuracy on f16
+};
+
+g.test('ulpInterval')
+ .params(u =>
+ u
+ .combine('trait', ['abstract', 'f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ULPCase>(p => {
+ const trait = kFPTraitForULP[p.trait];
+ const constants = FP[trait].constants();
+ const ULPValue = kULPErrorValue[trait];
+ const plusOneULP = kPlusOneULPFunctions[trait];
+ const plusNULP = kPlusNULPFunctions[trait];
+ const minusOneULP = kMinusOneULPFunctions[trait];
+ const minusNULP = kMinusNULPFunctions[trait];
+ // prettier-ignore
+ return [
+ // Edge Cases
+ { value: constants.positive.infinity, num_ulp: 0, expected: kUnboundedBounds },
+ { value: constants.positive.infinity, num_ulp: 1, expected: kUnboundedBounds },
+ { value: constants.positive.infinity, num_ulp: ULPValue, expected: kUnboundedBounds },
+ { value: constants.negative.infinity, num_ulp: 0, expected: kUnboundedBounds },
+ { value: constants.negative.infinity, num_ulp: 1, expected: kUnboundedBounds },
+ { value: constants.negative.infinity, num_ulp: ULPValue, expected: kUnboundedBounds },
+ { value: constants.positive.max, num_ulp: 0, expected: constants.positive.max },
+ { value: constants.positive.max, num_ulp: 1, expected: kUnboundedBounds },
+ { value: constants.positive.max, num_ulp: ULPValue, expected: kUnboundedBounds },
+ { value: constants.positive.min, num_ulp: 0, expected: constants.positive.min },
+ { value: constants.positive.min, num_ulp: 1, expected: [0, plusOneULP(constants.positive.min)] },
+ { value: constants.positive.min, num_ulp: ULPValue, expected: [0, plusNULP(constants.positive.min, ULPValue)] },
+ { value: constants.negative.min, num_ulp: 0, expected: constants.negative.min },
+ { value: constants.negative.min, num_ulp: 1, expected: kUnboundedBounds },
+ { value: constants.negative.min, num_ulp: ULPValue, expected: kUnboundedBounds },
+ { value: constants.negative.max, num_ulp: 0, expected: constants.negative.max },
+ { value: constants.negative.max, num_ulp: 1, expected: [minusOneULP(constants.negative.max), 0] },
+ { value: constants.negative.max, num_ulp: ULPValue, expected: [minusNULP(constants.negative.max, ULPValue), 0] },
+
+ // Subnormals
+ { value: constants.positive.subnormal.max, num_ulp: 0, expected: [0, constants.positive.subnormal.max] },
+ { value: constants.positive.subnormal.max, num_ulp: 1, expected: [minusOneULP(0), plusOneULP(constants.positive.subnormal.max)] },
+ { value: constants.positive.subnormal.max, num_ulp: ULPValue, expected: [minusNULP(0, ULPValue), plusNULP(constants.positive.subnormal.max, ULPValue)] },
+ { value: constants.positive.subnormal.min, num_ulp: 0, expected: [0, constants.positive.subnormal.min] },
+ { value: constants.positive.subnormal.min, num_ulp: 1, expected: [minusOneULP(0), plusOneULP(constants.positive.subnormal.min)] },
+ { value: constants.positive.subnormal.min, num_ulp: ULPValue, expected: [minusNULP(0, ULPValue), plusNULP(constants.positive.subnormal.min, ULPValue)] },
+ { value: constants.negative.subnormal.min, num_ulp: 0, expected: [constants.negative.subnormal.min, 0] },
+ { value: constants.negative.subnormal.min, num_ulp: 1, expected: [minusOneULP(constants.negative.subnormal.min), plusOneULP(0)] },
+ { value: constants.negative.subnormal.min, num_ulp: ULPValue, expected: [minusNULP(constants.negative.subnormal.min, ULPValue), plusNULP(0, ULPValue)] },
+ { value: constants.negative.subnormal.max, num_ulp: 0, expected: [constants.negative.subnormal.max, 0] },
+ { value: constants.negative.subnormal.max, num_ulp: 1, expected: [minusOneULP(constants.negative.subnormal.max), plusOneULP(0)] },
+ { value: constants.negative.subnormal.max, num_ulp: ULPValue, expected: [minusNULP(constants.negative.subnormal.max, ULPValue), plusNULP(0, ULPValue)] },
+
+ // 64-bit subnormals
+ { value: reinterpretU64AsF64(0x0000_0000_0000_0001n), num_ulp: 0, expected: [0, constants.positive.subnormal.min] },
+ { value: reinterpretU64AsF64(0x0000_0000_0000_0001n), num_ulp: 1, expected: [minusOneULP(0), plusOneULP(constants.positive.subnormal.min)] },
+ { value: reinterpretU64AsF64(0x0000_0000_0000_0001n), num_ulp: ULPValue, expected: [minusNULP(0, ULPValue), plusNULP(constants.positive.subnormal.min, ULPValue)] },
+ { value: reinterpretU64AsF64(0x0000_0000_0000_0002n), num_ulp: 0, expected: [0, constants.positive.subnormal.min] },
+ { value: reinterpretU64AsF64(0x0000_0000_0000_0002n), num_ulp: 1, expected: [minusOneULP(0), plusOneULP(constants.positive.subnormal.min)] },
+ { value: reinterpretU64AsF64(0x0000_0000_0000_0002n), num_ulp: ULPValue, expected: [minusNULP(0, ULPValue), plusNULP(constants.positive.subnormal.min, ULPValue)] },
+ { value: reinterpretU64AsF64(0x800f_ffff_ffff_ffffn), num_ulp: 0, expected: [constants.negative.subnormal.max, 0] },
+ { value: reinterpretU64AsF64(0x800f_ffff_ffff_ffffn), num_ulp: 1, expected: [minusOneULP(constants.negative.subnormal.max), plusOneULP(0)] },
+ { value: reinterpretU64AsF64(0x800f_ffff_ffff_ffffn), num_ulp: ULPValue, expected: [minusNULP(constants.negative.subnormal.max, ULPValue), plusNULP(0, ULPValue)] },
+ { value: reinterpretU64AsF64(0x800f_ffff_ffff_fffen), num_ulp: 0, expected: [constants.negative.subnormal.max, 0] },
+ { value: reinterpretU64AsF64(0x800f_ffff_ffff_fffen), num_ulp: 1, expected: [minusOneULP(constants.negative.subnormal.max), plusOneULP(0)] },
+ { value: reinterpretU64AsF64(0x800f_ffff_ffff_fffen), num_ulp: ULPValue, expected: [minusNULP(constants.negative.subnormal.max, ULPValue), plusNULP(0, ULPValue)] },
+
+ // Zero
+ { value: 0, num_ulp: 0, expected: 0 },
+ { value: 0, num_ulp: 1, expected: [minusOneULP(0), plusOneULP(0)] },
+ { value: 0, num_ulp: ULPValue, expected: [minusNULP(0, ULPValue), plusNULP(0, ULPValue)] },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.ulpInterval(t.params.value, t.params.num_ulp);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.ulpInterval(${t.params.value}, ${t.params.num_ulp}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// API - Acceptance Intervals
+// List of frequently used JS number in test cases, which are not exactly representable in f32 or f16.
+type ConstantNumberFrequentlyUsedInCases = '0.1' | '-0.1' | '1.9' | '-1.9';
+
+// Correctly rounded expectation of frequently used JS Number value in test cases
+const kConstantCorrectlyRoundedExpectation = {
+ f32: {
+ // 0.1 falls between f32 0x3DCCCCCC and 0x3DCCCCCD
+ '0.1': [reinterpretU32AsF32(0x3dcccccc), reinterpretU32AsF32(0x3dcccccd)],
+ // -0.1 falls between f32 0xBDCCCCCD and 0xBDCCCCCC
+ '-0.1': [reinterpretU32AsF32(0xbdcccccd), reinterpretU32AsF32(0xbdcccccc)],
+ // 1.9 falls between f32 0x3FF33333 and 0x3FF33334
+ '1.9': [reinterpretU32AsF32(0x3ff33333), reinterpretU32AsF32(0x3ff33334)],
+ // -1.9 falls between f32 0xBFF33334 and 0xBFF33333
+ '-1.9': [reinterpretU32AsF32(0xbff33334), reinterpretU32AsF32(0xbff33333)],
+ } as { [value in ConstantNumberFrequentlyUsedInCases]: IntervalBounds },
+ f16: {
+ // 0.1 falls between f16 0x2E66 and 0x2E67
+ '0.1': [reinterpretU16AsF16(0x2e66), reinterpretU16AsF16(0x2e67)],
+ // -0.1 falls between f16 0xAE67 and 0xAE66
+ '-0.1': [reinterpretU16AsF16(0xae67), reinterpretU16AsF16(0xae66)],
+ // 1.9 falls between f16 0x3F99 and 0x3F9A
+ '1.9': [reinterpretU16AsF16(0x3f99), reinterpretU16AsF16(0x3f9a)],
+ // 1.9 falls between f16 0xBF9A and 0xBF99
+ '-1.9': [reinterpretU16AsF16(0xbf9a), reinterpretU16AsF16(0xbf99)],
+ } as { [value in ConstantNumberFrequentlyUsedInCases]: IntervalBounds },
+ // Since abstract is actually f64 and JS number is also f64, the JS number value will map to
+ // identical abstracty value without rounded.
+ abstract: {
+ '0.1': 0.1,
+ '-0.1': -0.1,
+ '1.9': 1.9,
+ '-1.9': -1.9,
+ } as { [value in ConstantNumberFrequentlyUsedInCases]: number },
+} as const;
+
+interface ScalarToIntervalCase {
+ input: number;
+ expected: number | IntervalBounds;
+}
+
+g.test('absInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const constants = FP[p.trait].constants();
+ // prettier-ignore
+ return [
+ // Common usages
+ { input: 1, expected: 1 },
+ { input: -1, expected: 1 },
+ // abs(+/-0.1) is correctly rounded interval of 0.1
+ { input: 0.1, expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1']},
+ { input: -0.1, expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1']},
+ // abs(+/-1.9) is correctly rounded interval of 1.9
+ { input: 1.9, expected: kConstantCorrectlyRoundedExpectation[p.trait]['1.9']},
+ { input: -1.9, expected: kConstantCorrectlyRoundedExpectation[p.trait]['1.9']},
+
+ // Edge cases
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.positive.max, expected: constants.positive.max },
+ { input: constants.positive.min, expected: constants.positive.min },
+ { input: constants.negative.min, expected: constants.positive.max },
+ { input: constants.negative.max, expected: constants.positive.min },
+
+ // Subnormals
+ { input: constants.positive.subnormal.max, expected: [0, constants.positive.subnormal.max] },
+ { input: constants.positive.subnormal.min, expected: [0, constants.positive.subnormal.min] },
+ { input: constants.negative.subnormal.min, expected: [0, constants.positive.subnormal.max] },
+ { input: constants.negative.subnormal.max, expected: [0, constants.positive.subnormal.min] },
+
+ // Zero
+ { input: 0, expected: 0 },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.absInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.absInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// Acos expectation intervals are bounded by both inherited atan2(sqrt(1.0 - x*x), x) and absolute error.
+// Atan2 introduce 4096ULP for f32 and 5ULP for f16, and sqrt inherited from 1.0/inverseSqrt.
+// prettier-ignore
+const kAcosIntervalCases = {
+ f32: [
+ { input: kPlusOneULPFunctions['f32'](-1), expected: [reinterpretU32AsF32(0x4048fa32), reinterpretU32AsF32(0x40491bdb)] }, // ~π
+ { input: -1/2, expected: [reinterpretU32AsF32(0x4005fa90), reinterpretU32AsF32(0x40061a93)] }, // ~2π/3
+ { input: 1/2, expected: [reinterpretU32AsF32(0x3f85fa8f), reinterpretU32AsF32(0x3f861a94)] }, // ~π/3
+ // Input case to get smallest well-defined expected result, the expectation interval is bounded
+ // by ULP (lower boundary) and absolute error (upper boundary).
+ // f32 1.0-1ULP=0x3F7FFFFF=0.9999999403953552,
+ // acos(0.9999999403953552)=3.4526698478747995220159699019994e-4 rounded to f32 0x39B504F3 or 0x39B504F4,
+ // absolute error interval upper boundary 0x39B504F4+6.77e-5=0.00041296700619608164 i.e. f64 0x3F3B_106F_C933_4FB9.
+ { input: kMinusOneULPFunctions['f32'](1), expected: [reinterpretU64AsF64(0x3f2f_fdff_6000_0000n), reinterpretU64AsF64(0x3f3b_106f_c933_4fb9n)] }, // ~0.0003
+ ] as ScalarToIntervalCase[],
+ f16: [
+ { input: kPlusOneULPFunctions['f16'](-1), expected: [reinterpretU16AsF16(0x4233), reinterpretU16AsF16(0x4243)] }, // ~π
+ { input: -1/2, expected: [reinterpretU16AsF16(0x402a), reinterpretU16AsF16(0x4037)] }, // ~2π/3
+ { input: 1/2, expected: [reinterpretU16AsF16(0x3c29), reinterpretU16AsF16(0x3c38)] }, // ~π/3
+ // Input case to get smallest well-defined expected result, the expectation interval is bounded
+ // by ULP (lower boundary) and absolute error (upper boundary).
+ // f16 1.0-1ULP=0x3BFF=0.99951171875,
+ // acos(0.99951171875)=0.03125127170547389912035676677648 rounded to f16 0x2800 or 0x2801,
+ // absolute error interval upper boundary 0x2801+3.91e-3=0.035190517578125 i.e. f64 0x3FA2_047D_D441_3554.
+ { input: kMinusOneULPFunctions['f16'](1), expected: [reinterpretU16AsF16(0x259d), reinterpretU64AsF64(0x3fa2_047d_d441_3554n)] }, // ~0.03
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('acosInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ // The acceptance interval @ x = -1 and 1 is kUnboundedBounds, because
+ // sqrt(1 - x*x) = sqrt(0), and sqrt is defined in terms of inverseqrt
+ // The acceptance interval @ x = 0 is kUnboundedBounds, because atan2 is not
+ // well-defined/implemented at 0.
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.min, expected: kUnboundedBounds },
+ { input: -1, expected: kUnboundedBounds },
+ { input: 0, expected: kUnboundedBounds },
+ { input: 1, expected: kUnboundedBounds },
+ { input: constants.positive.max, expected: kUnboundedBounds },
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+
+ // Cases that bounded by absolute error and inherited from atan2(sqrt(1-x*x), x). Note that
+ // even x is very close to 1.0 and the expected result is close to 0.0, the expected
+ // interval is still bounded by ULP as well as absolute error, specifically lower boundary
+ // comes from ULP error and upper boundary comes from absolute error in those cases.
+ ...kAcosIntervalCases[p.trait],
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.acosInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.acosInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// Some of these are hard coded, since the error intervals are difficult to express in a closed
+// human-readable form due to the inherited nature of the errors.
+// prettier-ignore
+const kAcoshAlternativeIntervalCases = {
+ f32: [
+ { input: 1.1, expected: [reinterpretU64AsF64(0x3fdc_6368_8000_0000n), reinterpretU64AsF64(0x3fdc_636f_2000_0000n)] }, // ~0.443..., differs from the primary in the later digits
+ { input: 10, expected: [reinterpretU64AsF64(0x4007_f21e_4000_0000n), reinterpretU64AsF64(0x4007_f21f_6000_0000n)] }, // ~2.993...
+ ] as ScalarToIntervalCase[],
+ f16: [
+ { input: 1.1, expected: [reinterpretU64AsF64(0x3fdb_bc00_0000_0000n), reinterpretU64AsF64(0x3fdd_1000_0000_0000n)] }, // ~0.443..., differs from the primary in the later digits
+ { input: 10, expected: [reinterpretU64AsF64(0x4007_e000_0000_0000n), reinterpretU64AsF64(0x4008_0400_0000_0000n)] }, // ~2.993...
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('acoshAlternativeInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ ...kAcoshAlternativeIntervalCases[p.trait],
+
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.min, expected: kUnboundedBounds },
+ { input: -1, expected: kUnboundedBounds },
+ { input: 0, expected: kUnboundedBounds },
+ { input: 1, expected: kUnboundedBounds }, // 1/0 occurs in inverseSqrt in this formulation
+ { input: constants.positive.max, expected: kUnboundedBounds },
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.acoshAlternativeInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.acoshAlternativeInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// Some of these are hard coded, since the error intervals are difficult to express in a closed
+// human-readable form due to the inherited nature of the errors.
+// prettier-ignore
+const kAcoshPrimaryIntervalCases = {
+ f32: [
+ { input: 1.1, expected: [reinterpretU64AsF64(0x3fdc_6368_2000_0000n), reinterpretU64AsF64(0x3fdc_636f_8000_0000n)] }, // ~0.443..., differs from the alternative in the later digits
+ { input: 10, expected: [reinterpretU64AsF64(0x4007_f21e_4000_0000n), reinterpretU64AsF64(0x4007_f21f_6000_0000n)] }, // ~2.993...
+ ] as ScalarToIntervalCase[],
+ f16: [
+ { input: 1.1, expected: [reinterpretU64AsF64(0x3fdb_bc00_0000_0000n), reinterpretU64AsF64(0x3fdd_1c00_0000_0000n)] }, // ~0.443..., differs from the primary in the later digits
+ { input: 10, expected: [reinterpretU64AsF64(0x4007_e000_0000_0000n), reinterpretU64AsF64(0x4008_0400_0000_0000n)] }, // ~2.993...
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('acoshPrimaryInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ ...kAcoshPrimaryIntervalCases[p.trait],
+
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.min, expected: kUnboundedBounds },
+ { input: -1, expected: kUnboundedBounds },
+ { input: 0, expected: kUnboundedBounds },
+ { input: 1, expected: kUnboundedBounds }, // 1/0 occurs in inverseSqrt in this formulation
+ { input: constants.positive.max, expected: kUnboundedBounds },
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.acoshPrimaryInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.acoshPrimaryInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// Asin cases that bounded by inherited atan2(x, sqrt(1.0 - x*x)) rather than absolute error.
+// Atan2 introduce 4096ULP for f32 and 5ULP for f16, and sqrt inherited from 1.0/inverseSqrt.
+// prettier-ignore
+const kAsinIntervalInheritedCases = {
+ f32: [
+ { input: -1/2, expected: [reinterpretU32AsF32(0xbf061a96), reinterpretU32AsF32(0xbf05fa8e)] }, // ~-π/6
+ { input: 1/2, expected: [reinterpretU32AsF32(0x3f05fa8e), reinterpretU32AsF32(0x3f061a96)] }, // ~π/6
+ ] as ScalarToIntervalCase[],
+ f16: [
+ { input: -1/2, expected: [reinterpretU16AsF16(0xb83a), reinterpretU16AsF16(0xb827)] }, // ~-π/6
+ { input: 1/2, expected: [reinterpretU16AsF16(0x3827), reinterpretU16AsF16(0x383a)] }, // ~π/6
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('asinInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ const abs_error = p.trait === 'f32' ? 6.77e-5 : 3.91e-3;
+ // prettier-ignore
+ return [
+ // The acceptance interval @ x = -1 and 1 is kUnboundedBounds, because
+ // sqrt(1 - x*x) = sqrt(0), and sqrt is defined in terms of inversqrt.
+ // The acceptance interval @ x = 0 is kUnboundedBounds, because atan2 is not
+ // well-defined/implemented at 0.
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.min, expected: kUnboundedBounds },
+ { input: -1, expected: kUnboundedBounds },
+ // Subnormal input may get flushed to 0, and result in kUnboundedBounds.
+ { input: constants.negative.subnormal.min, expected: kUnboundedBounds },
+ { input: 0, expected: kUnboundedBounds },
+ { input: constants.positive.subnormal.max, expected: kUnboundedBounds },
+ { input: 1, expected: kUnboundedBounds },
+ { input: constants.positive.max, expected: kUnboundedBounds },
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+
+ // When input near 0, the expected result is bounded by absolute error rather than ULP
+ // error. Away from 0 the atan2 inherited error should be larger.
+ { input: constants.negative.max, expected: trait.absoluteErrorInterval(Math.asin(constants.negative.max), abs_error).bounds() }, // ~0
+ { input: constants.positive.min, expected: trait.absoluteErrorInterval(Math.asin(constants.positive.min), abs_error).bounds() }, // ~0
+
+ // Cases that inherited from atan2(x, sqrt(1-x*x))
+ ...kAsinIntervalInheritedCases[p.trait],
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.asinInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.asinInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// Some of these are hard coded, since the error intervals are difficult to express in a closed
+// human-readable form due to the inherited nature of the errors.
+// prettier-ignore
+const kAsinhIntervalCases = {
+ f32: [
+ { input: -1, expected: [reinterpretU64AsF64(0xbfec_343a_8000_0000n), reinterpretU64AsF64(0xbfec_3432_8000_0000n)] }, // ~-0.88137...
+ { input: 0, expected: [reinterpretU64AsF64(0xbeaa_0000_2000_0000n), reinterpretU64AsF64(0x3eb1_ffff_d000_0000n)] }, // ~0
+ { input: 1, expected: [reinterpretU64AsF64(0x3fec_3435_4000_0000n), reinterpretU64AsF64(0x3fec_3437_8000_0000n)] }, // ~0.88137...
+ ] as ScalarToIntervalCase[],
+ f16: [
+ { input: -1, expected: [reinterpretU64AsF64(0xbfec_b800_0000_0000n), reinterpretU64AsF64(0xbfeb_b800_0000_0000n)] }, // ~-0.88137...
+ { input: 0, expected: [reinterpretU64AsF64(0xbf85_0200_0000_0000n), reinterpretU64AsF64(0x3f89_fa00_0000_0000n)] }, // ~0
+ { input: 1, expected: [reinterpretU64AsF64(0x3fec_1000_0000_0000n), reinterpretU64AsF64(0x3fec_5400_0000_0000n)] }, // ~0.88137...
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('asinhInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ ...kAsinhIntervalCases[p.trait],
+
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.min, expected: kUnboundedBounds },
+ { input: constants.positive.max, expected: kUnboundedBounds },
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.asinhInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.asinhInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// prettier-ignore
+const kAtanIntervalCases = {
+ f32: [
+ // x=-√3=-1.7320508... quantized to f32 0xBFDDB3D7,
+ // atan(0xBFDDB3D7)=-1.0471975434247854181546378047331 ~ -pi/3 rounded to f32 0xBF860A92 or 0xBF860A91,
+ // kValue.f32.negative.pi.third is 0xBF860A92.
+ { input: reinterpretU32AsF32(0xbfddb3d7), expected: [kValue.f32.negative.pi.third, kPlusOneULPFunctions['f32'](kValue.f32.negative.pi.third)] },
+ // atan(-1)=-0.78539816339744830961566084581988 ~ -pi/4 rounded to f32 0xBF490FDB or 0xBF490FDA,
+ // kValue.f32.negative.pi.quarter is 0xBF490FDB.
+ { input: -1, expected: [kValue.f32.negative.pi.quarter, kPlusOneULPFunctions['f32'](kValue.f32.negative.pi.quarter)] },
+ // x=-1/√3=-0.577350269... quantized to f32 0xBF13CD3A,
+ // atan(0xBF13CD3A)=-0.52359876782648663982267459646249 ~ -pi/6 rounded to f32 0xBF060A92 or 0xBF060A91,
+ // kValue.f32.negative.pi.sixth is 0xBF060A92.
+ { input: reinterpretU32AsF32(0xbf13cd3a), expected: [kValue.f32.negative.pi.sixth, kPlusOneULPFunctions['f32'](kValue.f32.negative.pi.sixth)] },
+ // x=1/√3=0.577350269... quantized to f32 0x3F13CD3A.
+ { input: reinterpretU32AsF32(0x3f13cd3a), expected: [kMinusOneULPFunctions['f32'](kValue.f32.positive.pi.sixth), kValue.f32.positive.pi.sixth] },
+ { input: 1, expected: [kMinusOneULPFunctions['f32'](kValue.f32.positive.pi.quarter), kValue.f32.positive.pi.quarter] },
+ // x=√3=1.7320508... quantized to f32 0x3FDDB3D7.
+ { input: reinterpretU32AsF32(0x3fddb3d7), expected: [kMinusOneULPFunctions['f32'](kValue.f32.positive.pi.third), kValue.f32.positive.pi.third] },
+ ] as ScalarToIntervalCase[],
+ f16: [
+ // x=-√3=-1.7320508... quantized to f16 0xBEED,
+ // atan(0xBEED)=-1.0470461377318847079113932677171 ~ -pi/3 rounded to f16 0xBC31 or 0xBC30,
+ // kValue.f16.negative.pi.third is 0xBC30.
+ { input: reinterpretU16AsF16(0xbeed), expected: [kMinusOneULPFunctions['f16'](kValue.f16.negative.pi.third), kValue.f16.negative.pi.third] },
+ // atan(-1)=-0.78539816339744830961566084581988 ~ -pi/4 rounded to f16 0xBA49 or 0xBA48.
+ // kValue.f16.negative.pi.quarter is 0xBA48.
+ { input: -1, expected: [kMinusOneULPFunctions['f16'](kValue.f16.negative.pi.quarter), kValue.f16.negative.pi.quarter] },
+ // x=-1/√3=-0.577350269... quantized to f16 0xB89E,
+ // atan(0xB89E)=-0.52344738860166563645762619364966 ~ -pi/6 rounded to f16 0xB831 or 0xB830,
+ // kValue.f16.negative.pi.sixth is 0xB830.
+ { input: reinterpretU16AsF16(0xb89e), expected: [kMinusOneULPFunctions['f16'](kValue.f16.negative.pi.sixth), kValue.f16.negative.pi.sixth] },
+ // x=1/√3=0.577350269... quantized to f16 0x389E
+ { input: reinterpretU16AsF16(0x389e), expected: [kValue.f16.positive.pi.sixth, kPlusOneULPFunctions['f16'](kValue.f16.positive.pi.sixth)] },
+ { input: 1, expected: [kValue.f16.positive.pi.quarter, kPlusOneULPFunctions['f16'](kValue.f16.positive.pi.quarter)] },
+ // x=√3=1.7320508... quantized to f16 0x3EED
+ { input: reinterpretU16AsF16(0x3eed), expected: [kValue.f16.positive.pi.third, kPlusOneULPFunctions['f16'](kValue.f16.positive.pi.third)] },
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('atanInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const constants = FP[p.trait].constants();
+ // prettier-ignore
+ return [
+ { input: 0, expected: 0 },
+ ...kAtanIntervalCases[p.trait],
+
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+
+ const ulp_error = t.params.trait === 'f32' ? 4096 : 5;
+ const error = (n: number): number => {
+ return ulp_error * trait.oneULP(n);
+ };
+
+ const expected = trait.toInterval(applyError(t.params.expected, error));
+
+ const got = trait.atanInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.atanInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// Some of these are hard coded, since the error intervals are difficult to express in a closed
+// human-readable form due to the inherited nature of the errors.
+// prettier-ignore
+const kAtanhIntervalCases = {
+ f32: [
+ { input: -0.1, expected: [reinterpretU64AsF64(0xbfb9_af9a_6000_0000n), reinterpretU64AsF64(0xbfb9_af8c_c000_0000n)] }, // ~-0.1003...
+ { input: 0, expected: [reinterpretU64AsF64(0xbe96_0000_2000_0000n), reinterpretU64AsF64(0x3e98_0000_0000_0000n)] }, // ~0
+ { input: 0.1, expected: [reinterpretU64AsF64(0x3fb9_af8b_8000_0000n), reinterpretU64AsF64(0x3fb9_af9b_0000_0000n)] }, // ~0.1003...
+ ] as ScalarToIntervalCase[],
+ f16: [
+ { input: -0.1, expected: [reinterpretU64AsF64(0xbfbb_0c00_0000_0000n), reinterpretU64AsF64(0xbfb8_5800_0000_0000n)] }, // ~-0.1003...
+ { input: 0, expected: [reinterpretU64AsF64(0xbf73_0400_0000_0000n), reinterpretU64AsF64(0x3f74_0000_0000_0000n)] }, // ~0
+ { input: 0.1, expected: [reinterpretU64AsF64(0x3fb8_3800_0000_0000n), reinterpretU64AsF64(0x3fbb_2400_0000_0000n)] }, // ~0.1003...
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('atanhInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ ...kAtanhIntervalCases[p.trait],
+
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.min, expected: kUnboundedBounds },
+ { input: -1, expected: kUnboundedBounds },
+ { input: 1, expected: kUnboundedBounds },
+ { input: constants.positive.max, expected: kUnboundedBounds },
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.atanhInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.atanhInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// Large but still representable integer
+const kCeilIntervalCases = {
+ f32: [
+ { input: 2 ** 30, expected: 2 ** 30 },
+ { input: -(2 ** 30), expected: -(2 ** 30) },
+ { input: 0x80000000, expected: 0x80000000 }, // https://github.com/gpuweb/cts/issues/2766
+ ],
+ f16: [
+ { input: 2 ** 14, expected: 2 ** 14 },
+ { input: -(2 ** 14), expected: -(2 ** 14) },
+ { input: 0x8000, expected: 0x8000 }, // https://github.com/gpuweb/cts/issues/2766
+ ],
+} as const;
+
+g.test('ceilInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const constants = FP[p.trait].constants();
+ // prettier-ignore
+ return [
+ { input: 0, expected: 0 },
+ { input: 0.1, expected: 1 },
+ { input: 0.9, expected: 1 },
+ { input: 1.0, expected: 1 },
+ { input: 1.1, expected: 2 },
+ { input: 1.9, expected: 2 },
+ { input: -0.1, expected: 0 },
+ { input: -0.9, expected: 0 },
+ { input: -1.0, expected: -1 },
+ { input: -1.1, expected: -1 },
+ { input: -1.9, expected: -1 },
+
+ // Edge cases
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.positive.max, expected: constants.positive.max },
+ { input: constants.positive.min, expected: 1 },
+ { input: constants.negative.min, expected: constants.negative.min },
+ { input: constants.negative.max, expected: 0 },
+ ...kCeilIntervalCases[p.trait],
+
+ // 32-bit subnormals
+ { input: constants.positive.subnormal.max, expected: [0, 1] },
+ { input: constants.positive.subnormal.min, expected: [0, 1] },
+ { input: constants.negative.subnormal.min, expected: 0 },
+ { input: constants.negative.subnormal.max, expected: 0 },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.ceilInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.ceilInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// Cos interval cases on x=π/3, the result of f32 and f16 is different because π/3 quantized to
+// different direction for two types.
+const kCosIntervalThirdPiCases = {
+ // prettier-ignore
+ f32: [
+ // cos(-1.0471975803375244) = 0.499999974763
+ { input: kValue.f32.negative.pi.third, expected: [kMinusOneULPFunctions['f32'](1/2), 1/2] },
+ // cos(1.0471975803375244) = 0.499999974763
+ { input: kValue.f32.positive.pi.third, expected: [kMinusOneULPFunctions['f32'](1/2), 1/2] },
+ ],
+ f16: [
+ // cos(-1.046875) = 0.50027931
+ {
+ input: kValue.f16.negative.pi.third,
+ expected: FP['f16'].correctlyRoundedInterval(0.50027931).bounds(),
+ },
+ // cos(1.046875) = 0.50027931
+ {
+ input: kValue.f16.positive.pi.third,
+ expected: FP['f16'].correctlyRoundedInterval(0.50027931).bounds(),
+ },
+ ],
+};
+
+g.test('cosInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ // This test does not include some common cases. i.e. f(x = π/2) = 0,
+ // because the difference between true x and x as a f32 is sufficiently
+ // large, such that the high slope of f @ x causes the results to be
+ // substantially different, so instead of getting 0 you get a value on the
+ // order of 10^-8 away from 0, thus difficult to express in a
+ // human-readable manner.
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.min, expected: kUnboundedBounds },
+ { input: constants.negative.pi.whole, expected: [-1, kPlusOneULPFunctions[p.trait](-1)] },
+ { input: 0, expected: [1, 1] },
+ { input: constants.positive.pi.whole, expected: [-1, kPlusOneULPFunctions[p.trait](-1)] },
+ { input: constants.positive.max, expected: kUnboundedBounds },
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+
+ ...(kCosIntervalThirdPiCases[p.trait] as ScalarToIntervalCase[]),
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+
+ const error = (_: number): number => {
+ return t.params.trait === 'f32' ? 2 ** -11 : 2 ** -7;
+ };
+
+ const expected = trait.toInterval(applyError(t.params.expected, error));
+
+ const got = trait.cosInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.cosInterval(${t.params.input}) returned ${got}. Expected ${expected}, ===${t.params.expected}===`
+ );
+ });
+
+// Some of these are hard coded, since the error intervals are difficult to express in a closed
+// human-readable form due to the inherited nature of the errors.
+// prettier-ignore
+const kCoshIntervalCases = {
+ f32: [
+ { input: -1, expected: [reinterpretU32AsF32(0x3fc583a4), reinterpretU32AsF32(0x3fc583b1)] }, // ~1.1543...
+ { input: 0, expected: [reinterpretU32AsF32(0x3f7ffffd), reinterpretU32AsF32(0x3f800002)] }, // ~1
+ { input: 1, expected: [reinterpretU32AsF32(0x3fc583a4), reinterpretU32AsF32(0x3fc583b1)] }, // ~1.1543...
+ ] as ScalarToIntervalCase[],
+ f16: [
+ { input: -1, expected: [reinterpretU16AsF16(0x3e27), reinterpretU16AsF16(0x3e30)] }, // ~1.1543...
+ { input: 0, expected: [reinterpretU16AsF16(0x3bff), reinterpretU16AsF16(0x3c01)] }, // ~1
+ { input: 1, expected: [reinterpretU16AsF16(0x3e27), reinterpretU16AsF16(0x3e30)] }, // ~1.1543...
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('coshInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ ...kCoshIntervalCases[p.trait],
+
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.min, expected: kUnboundedBounds },
+ { input: constants.positive.max, expected: kUnboundedBounds },
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.coshInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.coshInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// prettier-ignore
+const kDegreesIntervalCases = {
+ f32: [
+ { input: kValue.f32.negative.pi.whole, expected: [kMinusOneULPFunctions['f32'](-180), kPlusOneULPFunctions['f32'](-180)] },
+ { input: kValue.f32.negative.pi.three_quarters, expected: [kMinusOneULPFunctions['f32'](-135), kPlusOneULPFunctions['f32'](-135)] },
+ { input: kValue.f32.negative.pi.half, expected: [kMinusOneULPFunctions['f32'](-90), kPlusOneULPFunctions['f32'](-90)] },
+ { input: kValue.f32.negative.pi.third, expected: [kMinusOneULPFunctions['f32'](-60), kPlusOneULPFunctions['f32'](-60)] },
+ { input: kValue.f32.negative.pi.quarter, expected: [kMinusOneULPFunctions['f32'](-45), kPlusOneULPFunctions['f32'](-45)] },
+ { input: kValue.f32.negative.pi.sixth, expected: [kMinusOneULPFunctions['f32'](-30), kPlusOneULPFunctions['f32'](-30)] },
+ { input: kValue.f32.positive.pi.sixth, expected: [kMinusOneULPFunctions['f32'](30), kPlusOneULPFunctions['f32'](30)] },
+ { input: kValue.f32.positive.pi.quarter, expected: [kMinusOneULPFunctions['f32'](45), kPlusOneULPFunctions['f32'](45)] },
+ { input: kValue.f32.positive.pi.third, expected: [kMinusOneULPFunctions['f32'](60), kPlusOneULPFunctions['f32'](60)] },
+ { input: kValue.f32.positive.pi.half, expected: [kMinusOneULPFunctions['f32'](90), kPlusOneULPFunctions['f32'](90)] },
+ { input: kValue.f32.positive.pi.three_quarters, expected: [kMinusOneULPFunctions['f32'](135), kPlusOneULPFunctions['f32'](135)] },
+ { input: kValue.f32.positive.pi.whole, expected: [kMinusOneULPFunctions['f32'](180), kPlusOneULPFunctions['f32'](180)] },
+ ] as ScalarToIntervalCase[],
+ f16: [
+ { input: kValue.f16.negative.pi.whole, expected: [-180, kPlusOneULPFunctions['f16'](-180)] },
+ { input: kValue.f16.negative.pi.three_quarters, expected: [-135, kPlusOneULPFunctions['f16'](-135)] },
+ { input: kValue.f16.negative.pi.half, expected: [-90, kPlusOneULPFunctions['f16'](-90)] },
+ { input: kValue.f16.negative.pi.third, expected: [-60, kPlusNULPFunctions['f16'](-60, 2)] },
+ { input: kValue.f16.negative.pi.quarter, expected: [-45, kPlusOneULPFunctions['f16'](-45)] },
+ { input: kValue.f16.negative.pi.sixth, expected: [-30, kPlusNULPFunctions['f16'](-30, 2)] },
+ { input: kValue.f16.positive.pi.sixth, expected: [kMinusNULPFunctions['f16'](30, 2), 30] },
+ { input: kValue.f16.positive.pi.quarter, expected: [kMinusOneULPFunctions['f16'](45), 45] },
+ { input: kValue.f16.positive.pi.third, expected: [kMinusNULPFunctions['f16'](60, 2), 60] },
+ { input: kValue.f16.positive.pi.half, expected: [kMinusOneULPFunctions['f16'](90), 90] },
+ { input: kValue.f16.positive.pi.three_quarters, expected: [kMinusOneULPFunctions['f16'](135), 135] },
+ { input: kValue.f16.positive.pi.whole, expected: [kMinusOneULPFunctions['f16'](180), 180] },
+ ] as ScalarToIntervalCase[],
+ abstract: [
+ { input: kValue.f64.negative.pi.whole, expected: -180 },
+ { input: kValue.f64.negative.pi.three_quarters, expected: -135 },
+ { input: kValue.f64.negative.pi.half, expected: -90 },
+ { input: kValue.f64.negative.pi.third, expected: kPlusOneULPFunctions['abstract'](-60) },
+ { input: kValue.f64.negative.pi.quarter, expected: -45 },
+ { input: kValue.f64.negative.pi.sixth, expected: kPlusOneULPFunctions['abstract'](-30) },
+ { input: kValue.f64.positive.pi.sixth, expected: kMinusOneULPFunctions['abstract'](30) },
+ { input: kValue.f64.positive.pi.quarter, expected: 45 },
+ { input: kValue.f64.positive.pi.third, expected: kMinusOneULPFunctions['abstract'](60) },
+ { input: kValue.f64.positive.pi.half, expected: 90 },
+ { input: kValue.f64.positive.pi.three_quarters, expected: 135 },
+ { input: kValue.f64.positive.pi.whole, expected: 180 },
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('degreesInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = p.trait;
+ const constants = FP[trait].constants();
+ // prettier-ignore
+ return [
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.min, expected: kUnboundedBounds },
+ { input: 0, expected: 0 },
+ { input: constants.positive.max, expected: kUnboundedBounds },
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ ...kDegreesIntervalCases[trait]
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.degreesInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.degreesInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// prettier-ignore
+const kExpIntervalCases = {
+ f32: [
+ { input: 1, expected: [kValue.f32.positive.e, kPlusOneULPFunctions['f32'](kValue.f32.positive.e)] },
+ // exp(88) = 1.6516362549940018555283297962649e+38 = 0x7ef882b6/7.
+ { input: 88, expected: [reinterpretU32AsF32(0x7ef882b6), reinterpretU32AsF32(0x7ef882b7)] },
+ // exp(89) overflow f32.
+ { input: 89, expected: kUnboundedBounds },
+ ] as ScalarToIntervalCase[],
+ f16: [
+ { input: 1, expected: [kValue.f16.positive.e, kPlusOneULPFunctions['f16'](kValue.f16.positive.e)] },
+ // exp(11) = 59874.141715197818455326485792258 = 0x7b4f/0x7b50.
+ { input: 11, expected: [reinterpretU16AsF16(0x7b4f), reinterpretU16AsF16(0x7b50)] },
+ // exp(12) = 162754.79141900392080800520489849 overflow f16.
+ { input: 12, expected: kUnboundedBounds },
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('expInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = p.trait;
+ const constants = FP[trait].constants();
+ // prettier-ignore
+ return [
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: 0, expected: 1 },
+ ...kExpIntervalCases[trait],
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const error = (x: number): number => {
+ let ulp_error;
+ switch (t.params.trait) {
+ case 'f32': {
+ ulp_error = 3 + 2 * Math.abs(t.params.input);
+ break;
+ }
+ case 'f16': {
+ ulp_error = 1 + 2 * Math.abs(t.params.input);
+ break;
+ }
+ }
+ return ulp_error * trait.oneULP(x);
+ };
+
+ const expected = trait.toInterval(applyError(t.params.expected, error));
+ const got = trait.expInterval(t.params.input);
+
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.expInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// prettier-ignore
+const kExp2IntervalCases = {
+ f32: [
+ // exp2(127) = 1.7014118346046923173168730371588e+38 = 0x7f000000, 3 + 2 * 127 = 258 ulps.
+ { input: 127, expected: reinterpretU32AsF32(0x7f000000) },
+ // exp2(128) overflow f32.
+ { input: 128, expected: kUnboundedBounds },
+ ] as ScalarToIntervalCase[],
+ f16: [
+ // exp2(15) = 32768 = 0x7800, 1 + 2 * 15 = 31 ulps
+ { input: 15, expected: reinterpretU16AsF16(0x7800) },
+ // exp2(16) = 65536 overflow f16.
+ { input: 16, expected: kUnboundedBounds },
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('exp2Interval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = p.trait;
+ const constants = FP[trait].constants();
+ // prettier-ignore
+ return [
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: 0, expected: 1 },
+ { input: 1, expected: 2 },
+ ...kExp2IntervalCases[trait],
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const error = (x: number): number => {
+ let ulp_error;
+ switch (t.params.trait) {
+ case 'f32': {
+ ulp_error = 3 + 2 * Math.abs(t.params.input);
+ break;
+ }
+ case 'f16': {
+ ulp_error = 1 + 2 * Math.abs(t.params.input);
+ break;
+ }
+ }
+ return ulp_error * trait.oneULP(x);
+ };
+
+ const expected = trait.toInterval(applyError(t.params.expected, error));
+
+ const got = trait.exp2Interval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.exp2Interval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// Large but still representable integer
+const kFloorIntervalCases = {
+ f32: [
+ { input: 2 ** 30, expected: 2 ** 30 },
+ { input: -(2 ** 30), expected: -(2 ** 30) },
+ { input: 0x80000000, expected: 0x80000000 }, // https://github.com/gpuweb/cts/issues/2766
+ ],
+ f16: [
+ { input: 2 ** 14, expected: 2 ** 14 },
+ { input: -(2 ** 14), expected: -(2 ** 14) },
+ { input: 0x8000, expected: 0x8000 }, // https://github.com/gpuweb/cts/issues/2766
+ ],
+ abstract: [
+ { input: 2 ** 62, expected: 2 ** 62 },
+ { input: -(2 ** 62), expected: -(2 ** 62) },
+ {
+ input: 0x8000_0000_0000_0000,
+ expected: 0x8000_0000_0000_0000,
+ }, // https://github.com/gpuweb/cts/issues/2766
+ ],
+} as const;
+
+g.test('floorInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const constants = FP[p.trait].constants();
+ // prettier-ignore
+ return [
+ { input: 0, expected: 0 },
+ { input: 0.1, expected: 0 },
+ { input: 0.9, expected: 0 },
+ { input: 1.0, expected: 1 },
+ { input: 1.1, expected: 1 },
+ { input: 1.9, expected: 1 },
+ { input: -0.1, expected: -1 },
+ { input: -0.9, expected: -1 },
+ { input: -1.0, expected: -1 },
+ { input: -1.1, expected: -2 },
+ { input: -1.9, expected: -2 },
+
+ // Edge cases
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.positive.max, expected: constants.positive.max },
+ { input: constants.positive.min, expected: 0 },
+ { input: constants.negative.min, expected: constants.negative.min },
+ { input: constants.negative.max, expected: -1 },
+ ...kFloorIntervalCases[p.trait],
+
+ // Subnormals
+ { input: constants.positive.subnormal.max, expected: 0 },
+ { input: constants.positive.subnormal.min, expected: 0 },
+ { input: constants.negative.subnormal.min, expected: [-1, 0] },
+ { input: constants.negative.subnormal.max, expected: [-1, 0] },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.floorInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.floorInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// prettier-ignore
+const kFractIntervalCases = {
+ f32: [
+ { input: 0.1, expected: [kMinusOneULPFunctions['f32'](reinterpretU32AsF32(0x3dcccccd)), reinterpretU32AsF32(0x3dcccccd)] }, // ~0.1
+ { input: 0.9, expected: [reinterpretU32AsF32(0x3f666666), kPlusOneULPFunctions['f32'](reinterpretU32AsF32(0x3f666666))] }, // ~0.9
+ { input: 1.1, expected: [reinterpretU32AsF32(0x3dccccc0), reinterpretU32AsF32(0x3dccccd0)] }, // ~0.1
+ { input: -0.1, expected: [reinterpretU32AsF32(0x3f666666), kPlusOneULPFunctions['f32'](reinterpretU32AsF32(0x3f666666))] }, // ~0.9
+ { input: -0.9, expected: [reinterpretU32AsF32(0x3dccccc8), reinterpretU32AsF32(0x3dccccd0)] }, // ~0.1
+ { input: -1.1, expected: [reinterpretU32AsF32(0x3f666666), reinterpretU32AsF32(0x3f666668)] }, // ~0.9
+
+ // https://github.com/gpuweb/cts/issues/2766
+ { input: 0x80000000, expected: 0 },
+ ] as ScalarToIntervalCase[],
+ f16: [
+ { input: 0.1, expected: [reinterpretU16AsF16(0x2e66), reinterpretU16AsF16(0x2e67)] }, // ~0.1
+ { input: 0.9, expected: [reinterpretU16AsF16(0x3b33), reinterpretU16AsF16(0x3b34)] }, // ~0.9
+ { input: 1.1, expected: [reinterpretU16AsF16(0x2e60), reinterpretU16AsF16(0x2e70)] }, // ~0.1
+ { input: -0.1, expected: [reinterpretU16AsF16(0x3b33), reinterpretU16AsF16(0x3b34)] }, // ~0.9
+ { input: -0.9, expected: [reinterpretU16AsF16(0x2e60), reinterpretU16AsF16(0x2e68)] }, // ~0.1
+ { input: -1.1, expected: [reinterpretU16AsF16(0x3b32), reinterpretU16AsF16(0x3b34)] }, // ~0.9
+ { input: 658.5, expected: 0.5 },
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('fractInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const constants = FP[p.trait].constants();
+ // prettier-ignore
+ return [
+ { input: 0, expected: 0 },
+ { input: 1.0, expected: 0 },
+ { input: -1.0, expected: 0 },
+
+ ...kFractIntervalCases[p.trait],
+
+ // Edge cases
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.positive.max, expected: 0 },
+ { input: constants.positive.min, expected: constants.positive.min },
+ { input: constants.negative.min, expected: 0 },
+ { input: constants.negative.max, expected: [constants.positive.less_than_one, 1.0] },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.fractInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.fractInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// prettier-ignore
+const kInverseSqrtIntervalCases = {
+ f32: [
+ // 0.04 rounded to f32 0x3D23D70A or 0x3D23D70B,
+ // 1/sqrt(0x3D23D70B)=4.9999998230487200185270893769213 rounded to f32 0x409FFFFF or 0x40A00000,
+ // 1/sqrt(0x3D23D70A)=5.0000000558793553117506910583908 rounded to f32 0x40A00000 or 0x40A00001.
+ { input: 0.04, expected: [reinterpretU32AsF32(0x409FFFFF), reinterpretU32AsF32(0x40A00001)] }, // ~5.0
+ // Maximium f32 0x7F7FFFFF = 3.4028234663852886e+38,
+ // 1/sqrt(0x7F7FFFFF)=5.4210110239862427800382690921791e-20 rounded to f32 0x1F800000 or 0x1F800001
+ { input: kValue.f32.positive.max, expected: [reinterpretU32AsF32(0x1f800000), reinterpretU32AsF32(0x1f800001)] }, // ~5.421...e-20
+ ] as ScalarToIntervalCase[],
+ f16: [
+ // 0.04 rounded to f16 0x291E or 0x291F,
+ // 1/sqrt(0x291F)=4.9994660279328446295684795818427 rounded to f16 0x44FF or 0x4500,
+ // 1/sqrt(0x291E)=5.001373857053206453045376503367 rounded to f16 0x4500 or 0x4501.
+ { input: 0.04, expected: [reinterpretU16AsF16(0x44FF), reinterpretU16AsF16(0x4501)] }, // ~5.0
+ // Maximium f16 0x7BFF = 65504,
+ // 1/sqrt(0x7BFF)=0.00390720402370454101997160826062 rounded to f16 0x1C00 or 0x1C01
+ { input: kValue.f16.positive.max, expected: [reinterpretU16AsF16(0x1c00), reinterpretU16AsF16(0x1c01)] }, // ~3.9072...e-3
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('inverseSqrtInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // Note that the 2 ULP error is not included here.
+ // prettier-ignore
+ return [
+ // Exactly representable cases
+ { input: 1, expected: 1 },
+ { input: 0.25, expected: 2 },
+ { input: 64, expected: 0.125 },
+
+ // Cases that input and/or result not exactly representable
+ ...kInverseSqrtIntervalCases[p.trait],
+ // 1/sqrt(100.0)=0.1, rounded to corresponding trait
+ { input: 100, expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1'] }, // ~0.1
+
+ // Out of definition domain
+ { input: -1, expected: kUnboundedBounds },
+ { input: 0, expected: kUnboundedBounds },
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+
+ const error = (n: number): number => {
+ return 2 * trait.oneULP(n);
+ };
+
+ const expected = trait.toInterval(applyError(t.params.expected, error));
+
+ const got = trait.inverseSqrtInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.inverseSqrtInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// Expectation interval of 1/inverseSqrt(sum(x[i]^2)) on some special values array x for certain
+// float traits, used as expectation for `length` and `distance`.
+// These cases are hard coded, since the error intervals are difficult to express in a closed
+// human-readable form due to the inherited nature of the errors.
+// prettier-ignore
+const kRootSumSquareExpectionInterval = {
+ f32: {
+ '[0.1]': [reinterpretU64AsF64(0x3fb9_9998_9000_0000n), reinterpretU64AsF64(0x3fb9_999a_7000_0000n)], // ~0.1
+ '[1.0]' : [reinterpretU64AsF64(0x3fef_ffff_7000_0000n), reinterpretU64AsF64(0x3ff0_0000_9000_0000n)], // ~1.0
+ '[10]' : [reinterpretU64AsF64(0x4023_ffff_7000_0000n), reinterpretU64AsF64(0x4024_0000_b000_0000n)], // ~10
+ '[1.0, 1.0]' : [reinterpretU64AsF64(0x3ff6_a09d_b000_0000n), reinterpretU64AsF64(0x3ff6_a09f_1000_0000n)], // ~√2
+ '[1.0, 1.0, 1.0]' : [reinterpretU64AsF64(0x3ffb_b67a_1000_0000n), reinterpretU64AsF64(0x3ffb_b67b_b000_0000n)], // ~√3
+ '[1.0, 1.0, 1.0, 1.0]' : [reinterpretU64AsF64(0x3fff_ffff_7000_0000n), reinterpretU64AsF64(0x4000_0000_9000_0000n)], // ~2
+ } as {[s: string]: IntervalBounds},
+ f16: {
+ '[0.1]': [reinterpretU64AsF64(0x3fb9_7e00_0000_0000n), reinterpretU64AsF64(0x3fb9_b600_0000_0000n)], // ~0.1
+ '[1.0]' : [reinterpretU64AsF64(0x3fef_ee00_0000_0000n), reinterpretU64AsF64(0x3ff0_1200_0000_0000n)], // ~1.0
+ '[10]' : [reinterpretU64AsF64(0x4023_ea00_0000_0000n), reinterpretU64AsF64(0x4024_1200_0000_0000n)], // ~10
+ '[1.0, 1.0]' : [reinterpretU64AsF64(0x3ff6_8a00_0000_0000n), reinterpretU64AsF64(0x3ff6_b600_0000_0000n)], // ~√2
+ '[1.0, 1.0, 1.0]' : [reinterpretU64AsF64(0x3ffb_9a00_0000_0000n), reinterpretU64AsF64(0x3ffb_d200_0000_0000n)], // ~√3
+ '[1.0, 1.0, 1.0, 1.0]' : [reinterpretU64AsF64(0x3fff_ee00_0000_0000n), reinterpretU64AsF64(0x4000_1200_0000_0000n)], // ~2
+ } as {[s: string]: IntervalBounds},
+} as const;
+
+g.test('lengthIntervalScalar')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ {input: 1.0, expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ {input: -1.0, expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ {input: 0.1, expected: kRootSumSquareExpectionInterval[p.trait]['[0.1]'] }, // ~0.1
+ {input: -0.1, expected: kRootSumSquareExpectionInterval[p.trait]['[0.1]'] }, // ~0.1
+ {input: 10.0, expected: kRootSumSquareExpectionInterval[p.trait]['[10]'] }, // ~10
+ {input: -10.0, expected: kRootSumSquareExpectionInterval[p.trait]['[10]'] }, // ~10
+
+ // length(0) = kUnboundedBounds, because length uses sqrt, which is defined as 1/inversesqrt
+ {input: 0, expected: kUnboundedBounds },
+
+ // Subnormal Cases
+ { input: constants.negative.subnormal.min, expected: kUnboundedBounds },
+ { input: constants.negative.subnormal.max, expected: kUnboundedBounds },
+ { input: constants.positive.subnormal.min, expected: kUnboundedBounds },
+ { input: constants.positive.subnormal.max, expected: kUnboundedBounds },
+
+ // Edge cases
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.min, expected: kUnboundedBounds },
+ { input: constants.negative.max, expected: kUnboundedBounds },
+ { input: constants.positive.min, expected: kUnboundedBounds },
+ { input: constants.positive.max, expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.lengthInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.lengthInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// prettier-ignore
+const kLogIntervalCases = {
+ f32: [
+ // kValue.f32.positive.e is 0x402DF854 = 2.7182817459106445,
+ // log(0x402DF854) = 0.99999996963214000677592342891704 rounded to f32 0x3F7FFFFF or 0x3F800000 = 1.0
+ { input: kValue.f32.positive.e, expected: [kMinusOneULPFunctions['f32'](1.0), 1.0] },
+ // kValue.f32.positive.max is 0x7F7FFFFF = 3.4028234663852886e+38,
+ // log(0x7F7FFFFF) = 88.72283905206835305421152826479 rounded to f32 0x42B17217 or 0x42B17218.
+ { input: kValue.f32.positive.max, expected: [kMinusOneULPFunctions['f32'](reinterpretU32AsF32(0x42b17218)), reinterpretU32AsF32(0x42b17218)] },
+ ] as ScalarToIntervalCase[],
+ f16: [
+ // kValue.f16.positive.e is 0x416F = 2.716796875,
+ // log(0x416F) = 0.99945356688393512460279716546501 rounded to f16 0x3BFE or 0x3BFF.
+ { input: kValue.f16.positive.e, expected: [reinterpretU16AsF16(0x3bfe), reinterpretU16AsF16(0x3bff)] },
+ // kValue.f16.positive.max is 0x7BFF = 65504,
+ // log(0x7BFF) = 11.089866488461016076210728979771 rounded to f16 0x498B or 0x498C.
+ { input: kValue.f16.positive.max, expected: [reinterpretU16AsF16(0x498b), reinterpretU16AsF16(0x498c)] },
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('logInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ // prettier-ignore
+ return [
+ { input: -1, expected: kUnboundedBounds },
+ { input: 0, expected: kUnboundedBounds },
+ { input: 1, expected: 0 },
+ ...kLogIntervalCases[p.trait],
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const abs_error = t.params.trait === 'f32' ? 2 ** -21 : 2 ** -7;
+ const error = (n: number): number => {
+ if (t.params.input >= 0.5 && t.params.input <= 2.0) {
+ return abs_error;
+ }
+ return 3 * trait.oneULP(n);
+ };
+
+ const expected = trait.toInterval(applyError(t.params.expected, error));
+
+ const got = trait.logInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.logInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// prettier-ignore
+const kLog2IntervalCases = {
+ f32: [
+ // kValue.f32.positive.max is 0x7F7FFFFF = 3.4028234663852886e+38,
+ // log2(0x7F7FFFFF) = 127.99999991400867200665269600978 rounded to f32 0x42FFFFFF or 0x43000000 = 128.0
+ { input: kValue.f32.positive.max, expected: [kMinusOneULPFunctions['f32'](128.0), 128.0] },
+ ] as ScalarToIntervalCase[],
+ f16: [
+ // kValue.f16.positive.max is 0x7BFF = 65504,
+ // log2(0x7BFF) = 15.999295387023410627258428389903 rounded to f16 0x4BFF or 0x4C00 = 16.0
+ { input: kValue.f16.positive.max, expected: [kMinusOneULPFunctions['f16'](16.0), 16.0] },
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('log2Interval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ // prettier-ignore
+ return [
+ { input: -1, expected: kUnboundedBounds },
+ { input: 0, expected: kUnboundedBounds },
+ { input: 1, expected: 0 },
+ { input: 2, expected: 1 },
+ { input: 16, expected: 4 },
+ ...kLog2IntervalCases[p.trait],
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const abs_error = t.params.trait === 'f32' ? 2 ** -21 : 2 ** -7;
+ const error = (n: number): number => {
+ if (t.params.input >= 0.5 && t.params.input <= 2.0) {
+ return abs_error;
+ }
+ return 3 * trait.oneULP(n);
+ };
+
+ const expected = trait.toInterval(applyError(t.params.expected, error));
+
+ const got = trait.log2Interval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.log2Interval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+g.test('negationInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ // Edge cases
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.positive.max, expected: constants.negative.min },
+ { input: constants.positive.min, expected: constants.negative.max },
+ { input: constants.negative.min, expected: constants.positive.max },
+ { input: constants.negative.max, expected: constants.positive.min },
+
+ // Normals
+ { input: 0, expected: 0 },
+ { input: 1.0, expected: -1.0 },
+ { input: -1.0, expected: 1 },
+ { input: 0.1, expected: kConstantCorrectlyRoundedExpectation[p.trait]['-0.1'] }, // ~-0.1
+ { input: 1.9, expected: kConstantCorrectlyRoundedExpectation[p.trait]['-1.9'] }, // ~-1.9
+ { input: -0.1, expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1'] }, // ~0.1
+ { input: -1.9, expected: kConstantCorrectlyRoundedExpectation[p.trait]['1.9'] }, // ~1.9
+
+ // Subnormals
+ { input: constants.positive.subnormal.max, expected: [constants.negative.subnormal.min, 0] },
+ { input: constants.positive.subnormal.min, expected: [constants.negative.subnormal.max, 0] },
+ { input: constants.negative.subnormal.min, expected: [0, constants.positive.subnormal.max] },
+ { input: constants.negative.subnormal.max, expected: [0, constants.positive.subnormal.min] },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.negationInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.negationInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+g.test('quantizeToF16Interval')
+ .paramsSubcasesOnly<ScalarToIntervalCase>(
+ // prettier-ignore
+ [
+ { input: kValue.f32.negative.infinity, expected: kUnboundedBounds },
+ { input: kValue.f32.negative.min, expected: kUnboundedBounds },
+ { input: kValue.f16.negative.min, expected: kValue.f16.negative.min },
+ { input: -1.9, expected: kConstantCorrectlyRoundedExpectation['f16']['-1.9'] }, // ~-1.9
+ { input: -1, expected: -1 },
+ { input: -0.1, expected: kConstantCorrectlyRoundedExpectation['f16']['-0.1'] }, // ~-0.1
+ { input: kValue.f16.negative.max, expected: kValue.f16.negative.max },
+ { input: kValue.f16.negative.subnormal.min, expected: [kValue.f16.negative.subnormal.min, 0] },
+ { input: kValue.f16.negative.subnormal.max, expected: [kValue.f16.negative.subnormal.max, 0] },
+ { input: kValue.f32.negative.subnormal.max, expected: [kValue.f16.negative.subnormal.max, 0] },
+ { input: 0, expected: 0 },
+ { input: kValue.f32.positive.subnormal.min, expected: [0, kValue.f16.positive.subnormal.min] },
+ { input: kValue.f16.positive.subnormal.min, expected: [0, kValue.f16.positive.subnormal.min] },
+ { input: kValue.f16.positive.subnormal.max, expected: [0, kValue.f16.positive.subnormal.max] },
+ { input: kValue.f16.positive.min, expected: kValue.f16.positive.min },
+ { input: 0.1, expected: kConstantCorrectlyRoundedExpectation['f16']['0.1'] }, // ~0.1
+ { input: 1, expected: 1 },
+ { input: 1.9, expected: kConstantCorrectlyRoundedExpectation['f16']['1.9'] }, // ~1.9
+ { input: kValue.f16.positive.max, expected: kValue.f16.positive.max },
+ { input: kValue.f32.positive.max, expected: kUnboundedBounds },
+ { input: kValue.f32.positive.infinity, expected: kUnboundedBounds },
+ ]
+ )
+ .fn(t => {
+ const expected = FP.f32.toInterval(t.params.expected);
+
+ const got = FP.f32.quantizeToF16Interval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `f32.quantizeToF16Interval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// prettier-ignore
+const kRadiansIntervalCases = {
+ f32: [
+ { input: -180, expected: [kMinusOneULPFunctions['f32'](kValue.f32.negative.pi.whole), kPlusOneULPFunctions['f32'](kValue.f32.negative.pi.whole)] },
+ { input: -135, expected: [kMinusOneULPFunctions['f32'](kValue.f32.negative.pi.three_quarters), kPlusOneULPFunctions['f32'](kValue.f32.negative.pi.three_quarters)] },
+ { input: -90, expected: [kMinusOneULPFunctions['f32'](kValue.f32.negative.pi.half), kPlusOneULPFunctions['f32'](kValue.f32.negative.pi.half)] },
+ { input: -60, expected: [kMinusOneULPFunctions['f32'](kValue.f32.negative.pi.third), kPlusOneULPFunctions['f32'](kValue.f32.negative.pi.third)] },
+ { input: -45, expected: [kMinusOneULPFunctions['f32'](kValue.f32.negative.pi.quarter), kPlusOneULPFunctions['f32'](kValue.f32.negative.pi.quarter)] },
+ { input: -30, expected: [kMinusOneULPFunctions['f32'](kValue.f32.negative.pi.sixth), kPlusOneULPFunctions['f32'](kValue.f32.negative.pi.sixth)] },
+ { input: 30, expected: [kMinusOneULPFunctions['f32'](kValue.f32.positive.pi.sixth), kPlusOneULPFunctions['f32'](kValue.f32.positive.pi.sixth)] },
+ { input: 45, expected: [kMinusOneULPFunctions['f32'](kValue.f32.positive.pi.quarter), kPlusOneULPFunctions['f32'](kValue.f32.positive.pi.quarter)] },
+ { input: 60, expected: [kMinusOneULPFunctions['f32'](kValue.f32.positive.pi.third), kPlusOneULPFunctions['f32'](kValue.f32.positive.pi.third)] },
+ { input: 90, expected: [kMinusOneULPFunctions['f32'](kValue.f32.positive.pi.half), kPlusOneULPFunctions['f32'](kValue.f32.positive.pi.half)] },
+ { input: 135, expected: [kMinusOneULPFunctions['f32'](kValue.f32.positive.pi.three_quarters), kPlusOneULPFunctions['f32'](kValue.f32.positive.pi.three_quarters)] },
+ { input: 180, expected: [kMinusOneULPFunctions['f32'](kValue.f32.positive.pi.whole), kPlusOneULPFunctions['f32'](kValue.f32.positive.pi.whole)] },
+ ] as ScalarToIntervalCase[],
+ f16: [
+ { input: -180, expected: [kMinusOneULPFunctions['f16'](kValue.f16.negative.pi.whole), kPlusOneULPFunctions['f16'](kValue.f16.negative.pi.whole)] },
+ { input: -135, expected: [kMinusOneULPFunctions['f16'](kValue.f16.negative.pi.three_quarters), kPlusOneULPFunctions['f16'](kValue.f16.negative.pi.three_quarters)] },
+ { input: -90, expected: [kMinusOneULPFunctions['f16'](kValue.f16.negative.pi.half), kPlusOneULPFunctions['f16'](kValue.f16.negative.pi.half)] },
+ { input: -60, expected: [kMinusOneULPFunctions['f16'](kValue.f16.negative.pi.third), kPlusOneULPFunctions['f16'](kValue.f16.negative.pi.third)] },
+ { input: -45, expected: [kMinusOneULPFunctions['f16'](kValue.f16.negative.pi.quarter), kPlusOneULPFunctions['f16'](kValue.f16.negative.pi.quarter)] },
+ { input: -30, expected: [kMinusOneULPFunctions['f16'](kValue.f16.negative.pi.sixth), kPlusOneULPFunctions['f16'](kValue.f16.negative.pi.sixth)] },
+ { input: 30, expected: [kMinusOneULPFunctions['f16'](kValue.f16.positive.pi.sixth), kPlusOneULPFunctions['f16'](kValue.f16.positive.pi.sixth)] },
+ { input: 45, expected: [kMinusOneULPFunctions['f16'](kValue.f16.positive.pi.quarter), kPlusOneULPFunctions['f16'](kValue.f16.positive.pi.quarter)] },
+ { input: 60, expected: [kMinusOneULPFunctions['f16'](kValue.f16.positive.pi.third), kPlusOneULPFunctions['f16'](kValue.f16.positive.pi.third)] },
+ { input: 90, expected: [kMinusOneULPFunctions['f16'](kValue.f16.positive.pi.half), kPlusOneULPFunctions['f16'](kValue.f16.positive.pi.half)] },
+ { input: 135, expected: [kMinusOneULPFunctions['f16'](kValue.f16.positive.pi.three_quarters), kPlusOneULPFunctions['f16'](kValue.f16.positive.pi.three_quarters)] },
+ { input: 180, expected: [kMinusOneULPFunctions['f16'](kValue.f16.positive.pi.whole), kPlusOneULPFunctions['f16'](kValue.f16.positive.pi.whole)] },
+ ] as ScalarToIntervalCase[],
+ abstract: [
+ { input: -180, expected: kValue.f64.negative.pi.whole },
+ { input: -135, expected: kValue.f64.negative.pi.three_quarters },
+ { input: -90, expected: kValue.f64.negative.pi.half },
+ { input: -60, expected: kValue.f64.negative.pi.third },
+ { input: -45, expected: kValue.f64.negative.pi.quarter },
+ { input: -30, expected: kValue.f64.negative.pi.sixth },
+ { input: 30, expected: kValue.f64.positive.pi.sixth },
+ { input: 45, expected: kValue.f64.positive.pi.quarter },
+ { input: 60, expected: kValue.f64.positive.pi.third },
+ { input: 90, expected: kValue.f64.positive.pi.half },
+ { input: 135, expected: kValue.f64.positive.pi.three_quarters },
+ { input: 180, expected: kValue.f64.positive.pi.whole },
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('radiansInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = p.trait;
+ const constants = FP[trait].constants();
+ // prettier-ignore
+ return [
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ { input: 0, expected: 0 },
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ ...kRadiansIntervalCases[trait]
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.radiansInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.radiansInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// Large but still representable integer
+const kRoundIntervalCases = {
+ f32: [
+ { input: 2 ** 30, expected: 2 ** 30 },
+ { input: -(2 ** 30), expected: -(2 ** 30) },
+ { input: 0x80000000, expected: 0x80000000 }, // https://github.com/gpuweb/cts/issues/2766
+ ],
+ f16: [
+ { input: 2 ** 14, expected: 2 ** 14 },
+ { input: -(2 ** 14), expected: -(2 ** 14) },
+ { input: 0x8000, expected: 0x8000 }, // https://github.com/gpuweb/cts/issues/2766
+ ],
+} as const;
+
+g.test('roundInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const constants = FP[p.trait].constants();
+ // prettier-ignore
+ return [
+ { input: 0, expected: 0 },
+ { input: 0.1, expected: 0 },
+ { input: 0.5, expected: 0 }, // Testing tie breaking
+ { input: 0.9, expected: 1 },
+ { input: 1.0, expected: 1 },
+ { input: 1.1, expected: 1 },
+ { input: 1.5, expected: 2 }, // Testing tie breaking
+ { input: 1.9, expected: 2 },
+ { input: -0.1, expected: 0 },
+ { input: -0.5, expected: 0 }, // Testing tie breaking
+ { input: -0.9, expected: -1 },
+ { input: -1.0, expected: -1 },
+ { input: -1.1, expected: -1 },
+ { input: -1.5, expected: -2 }, // Testing tie breaking
+ { input: -1.9, expected: -2 },
+
+ // Edge cases
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.positive.max, expected: constants.positive.max },
+ { input: constants.positive.min, expected: 0 },
+ { input: constants.negative.min, expected: constants.negative.min },
+ { input: constants.negative.max, expected: 0 },
+ ...kRoundIntervalCases[p.trait],
+
+ // 32-bit subnormals
+ { input: constants.positive.subnormal.max, expected: 0 },
+ { input: constants.positive.subnormal.min, expected: 0 },
+ { input: constants.negative.subnormal.min, expected: 0 },
+ { input: constants.negative.subnormal.max, expected: 0 },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.roundInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.roundInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+g.test('saturateInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const constants = FP[p.trait].constants();
+ // prettier-ignore
+ return [
+ // Normals
+ { input: 0, expected: 0 },
+ { input: 0.1, expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1'] },
+ { input: 1, expected: 1.0 },
+ { input: -0.1, expected: 0 },
+ { input: -1, expected: 0 },
+ { input: -10, expected: 0 },
+ { input: 10, expected: 1.0 },
+ { input: 11.1, expected: 1.0 },
+ { input: constants.positive.max, expected: 1.0 },
+ { input: constants.positive.min, expected: constants.positive.min },
+ { input: constants.negative.max, expected: 0.0 },
+ { input: constants.negative.min, expected: 0.0 },
+
+ // Subnormals
+ { input: constants.positive.subnormal.max, expected: [0.0, constants.positive.subnormal.max] },
+ { input: constants.positive.subnormal.min, expected: [0.0, constants.positive.subnormal.min] },
+ { input: constants.negative.subnormal.min, expected: [constants.negative.subnormal.min, 0.0] },
+ { input: constants.negative.subnormal.max, expected: [constants.negative.subnormal.max, 0.0] },
+
+ // Infinities
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.saturateInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.saturationInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+g.test('signInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const constants = FP[p.trait].constants();
+ // prettier-ignore
+ return [
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.min, expected: -1 },
+ { input: -10, expected: -1 },
+ { input: -1, expected: -1 },
+ { input: -0.1, expected: -1 },
+ { input: constants.negative.max, expected: -1 },
+ { input: constants.negative.subnormal.min, expected: [-1, 0] },
+ { input: constants.negative.subnormal.max, expected: [-1, 0] },
+ { input: 0, expected: 0 },
+ { input: constants.positive.subnormal.max, expected: [0, 1] },
+ { input: constants.positive.subnormal.min, expected: [0, 1] },
+ { input: constants.positive.min, expected: 1 },
+ { input: 0.1, expected: 1 },
+ { input: 1, expected: 1 },
+ { input: 10, expected: 1 },
+ { input: constants.positive.max, expected: 1 },
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.signInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.signInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+g.test('sinInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const constants = FP[p.trait].constants();
+ // prettier-ignore
+ return [
+ // This test does not include some common cases, i.e. f(x = -π|π) = 0,
+ // because the difference between true x and x as a f32 is sufficiently
+ // large, such that the high slope of f @ x causes the results to be
+ // substantially different, so instead of getting 0 you get a value on the
+ // order of 10^-8 away from it, thus difficult to express in a
+ // human-readable manner.
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.min, expected: kUnboundedBounds },
+ { input: constants.negative.pi.half, expected: [-1, kPlusOneULPFunctions[p.trait](-1)] },
+ { input: 0, expected: 0 },
+ { input: constants.positive.pi.half, expected: [kMinusOneULPFunctions[p.trait](1), 1] },
+ { input: constants.positive.max, expected: kUnboundedBounds },
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+
+ const error = (_: number): number => {
+ return t.params.trait === 'f32' ? 2 ** -11 : 2 ** -7;
+ };
+
+ const expected = trait.toInterval(applyError(t.params.expected, error));
+
+ const got = trait.sinInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.sinInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// Some of these are hard coded, since the error intervals are difficult to express in a closed
+// human-readable form due to the inherited nature of the errors.
+// prettier-ignore
+const kSinhIntervalCases = {
+ f32: [
+ { input: -1, expected: [reinterpretU32AsF32(0xbf966d05), reinterpretU32AsF32(0xbf966cf8)] }, // ~-1.175...
+ { input: 0, expected: [reinterpretU32AsF32(0xb4600000), reinterpretU32AsF32(0x34600000)] }, // ~0
+ { input: 1, expected: [reinterpretU32AsF32(0x3f966cf8), reinterpretU32AsF32(0x3f966d05)] }, // ~1.175...
+ ] as ScalarToIntervalCase[],
+ f16: [
+ { input: -1, expected: [reinterpretU16AsF16(0xbcb8), reinterpretU16AsF16(0xbcaf)] }, // ~-1.175...
+ { input: 0, expected: [reinterpretU16AsF16(0x9200), reinterpretU16AsF16(0x1200)] }, // ~0
+ { input: 1, expected: [reinterpretU16AsF16(0x3caf), reinterpretU16AsF16(0x3cb8)] }, // ~1.175...
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('sinhInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ ...kSinhIntervalCases[p.trait],
+
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.min, expected: kUnboundedBounds },
+ { input: constants.positive.max, expected: kUnboundedBounds },
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.sinhInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.sinhInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// For sqrt interval inherited from 1.0 / inverseSqrt(x), errors come from:
+// 1. Rounding of input x, if any;
+// 2. 2 ULP from inverseSqrt;
+// 3. And 2.5 ULP from division.
+// The last 2.5ULP is handled in test and not included in the expected values here.
+// prettier-ignore
+const kSqrtIntervalCases = {
+ f32: [
+ // 0.01 rounded to f32 0x3C23D70A or 0x3C23D70B.
+ // For inverseSqrt interval, floor_f32(1.0/sqrt(0x3C23D70B))-2ULP=0x411FFFFD,
+ // ceil_f32(1.0/sqrt(0x3C23D70A))+2ULP=0x41200003.
+ // For division, 1.0/0x41200003=0.09999997138977868544997855067803 rounded to f32 0x3DCCCCC8 or 0x3DCCCCC9,
+ // 1.0/0x411FFFFD=0.100000028610237685454662304067 rounded to f32 0x3DCCCCD0 or 0x3DCCCCD1.
+ { input: 0.01, expected: [reinterpretU32AsF32(0x3DCCCCC8), reinterpretU32AsF32(0x3DCCCCD1)] }, // ~0.1
+ // For inverseSqrt interval, 1.0/sqrt(1.0)-2ULP=0x3F7FFFFE, 1.0/sqrt(1.0)+2ULP=0x3F800001.
+ // For division, 1.0/0x3F800001=0.9999998807907246108530328709735 rounded to f32 0x3F7FFFFE or 0x3F7FFFFF,
+ // 1.0/0x3F7FFFFE=1.0000001192093038108564210027667 rounded to f32 0x3F800001 or 0x3F800002.
+ { input: 1, expected: [reinterpretU32AsF32(0x3F7FFFFE), reinterpretU32AsF32(0x3F800002)] }, // ~1
+ // For inverseSqrt interval, 1.0/sqrt(4.0)-2ULP=0x3EFFFFFE, 1.0/sqrt(4.0)+2ULP=0x3F000001.
+ // For division, 1.0/0x3F000001=1.999999761581449221706065741947 rounded to f32 0x3FFFFFFE or 0x3FFFFFFF,
+ // 1.0/0x3EFFFFFE=2.0000002384186076217128420055334 rounded to f32 0x40000001 or 0x40000002.
+ { input: 4, expected: [reinterpretU32AsF32(0x3FFFFFFE), reinterpretU32AsF32(0x40000002)] }, // ~2
+ // For inverseSqrt interval, floor_f32(1.0/sqrt(100.0))-2ULP=0x3DCCCCCA,
+ // ceil_f32(1.0/sqrt(100.0))+2ULP=0x3DCCCCCF.
+ // For division, 1.0/0x3DCCCCCF=9.9999983608725376739278142322684 rounded to f32 0x411FFFFE or 0x411FFFFF,
+ // 1.0/0x3DCCCCCA=10.000002086163002207516386565905 rounded to f32 0x41200002 or 0x41200003.
+ { input: 100, expected: [reinterpretU32AsF32(0x411FFFFE), reinterpretU32AsF32(0x41200003)] }, // ~10
+ ] as ScalarToIntervalCase[],
+ f16: [
+ // 0.01 rounded to f16 0x211E or 0x211F.
+ // For inverseSqrt interval, floor_f16(1.0/sqrt(0x211F))-2ULP=0x48FD,
+ // ceil_f16(1.0/sqrt(0x211E))+2ULP=0x4903.
+ // For division, 1.0/0x4903=0.09976617303195635229929851909587 rounded to f16 0x2E62 or 0x2E63,
+ // 1.0/0x48FD=0.10023492560689115113547376664056 rounded to f16 0x2E6A or 0x2E6B.
+ { input: 0.01, expected: [reinterpretU16AsF16(0x2E62), reinterpretU16AsF16(0x2E6B)] }, // ~0.1
+ // For inverseSqrt interval, 1.0/sqrt(1.0)-2ULP=0x3BFE, 1.0/sqrt(1.0)+2ULP=0x3C01.
+ // For division, 1.0/0x3C01=0.99902439024390243902439024390244 rounded to f16 0x3BFE or 0x3BFF,
+ // 1.0/0x3BFE=1.000977517106549364613880742913 rounded to f16 0x3C01 or 0x3C02.
+ { input: 1, expected: [reinterpretU16AsF16(0x3BFE), reinterpretU16AsF16(0x3C02)] }, // ~1
+ // For inverseSqrt interval, 1.0/sqrt(4.0)-2ULP=0x37FE, 1.0/sqrt(4.0)+2ULP=0x3801.
+ // For division, 1.0/0x3801=1.9980487804878048780487804878049 rounded to f16 0x3FFE or 0x3FFF,
+ // 1.0/0x37FE=2.001955034213098729227761485826 rounded to f16 0x4001 or 0x4002.
+ { input: 4, expected: [reinterpretU16AsF16(0x3FFE), reinterpretU16AsF16(0x4002)] }, // ~2
+ // For inverseSqrt interval, floor_f16(1.0/sqrt(100.0))-2ULP=0x2E64,
+ // ceil_f16(1.0/sqrt(100.0))+2ULP=0x2E69.
+ // For division, 1.0/0x2E69=9.9841560024374942258493264279108 rounded to f16 0x48FD or 0x48FE,
+ // 1.0/0x2E64=10.014669926650366748166259168704 rounded to f16 0x4901 or 0x4902.
+ { input: 100, expected: [reinterpretU16AsF16(0x48FD), reinterpretU16AsF16(0x4902)] }, // ~10
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('sqrtInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ // Cases that input and/or result not exactly representable
+ ...kSqrtIntervalCases[p.trait],
+
+ // Cases out of definition domain
+ { input: -1, expected: kUnboundedBounds },
+ { input: 0, expected: kUnboundedBounds },
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+
+ // The expected error interval is inherited from 1.0 / inverseSqrt(x), the 2.5ULP for division
+ // is handled here.
+ const error = (n: number): number => {
+ return 2.5 * trait.oneULP(n);
+ };
+
+ const expected = trait.toInterval(applyError(t.params.expected, error));
+
+ const got = trait.sqrtInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `FP.${t.params.trait}.sqrtInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// All of these are hard coded, since the error intervals are difficult to express in a closed
+// human-readable form.
+// Some easy looking cases like f(x = -π|π) = 0 are actually quite difficult. This is because the
+// interval is calculated from the results of sin(x)/cos(x), which becomes very messy at x = -π|π,
+// since π is irrational, thus does not have an exact representation as a float.
+//
+// Even at 0, which has a precise f32/f16 value, there is still the problem that result of sin(0)
+// and cos(0) will be intervals due to the inherited nature of errors, so the proper interval will
+// be an interval calculated from dividing an interval by another interval and applying an error
+// function to that.
+//
+// This complexity is why the entire interval framework was developed.
+//
+// The examples here have been manually traced to confirm the expectation values are correct.
+// prettier-ignore
+const kTanIntervalCases = {
+ f32: [
+ { input: kValue.f32.negative.pi.whole, expected: [reinterpretU64AsF64(0xbf40_02bc_9000_0000n), reinterpretU64AsF64(0x3f40_0144_f000_0000n)] }, // ~0.0
+ { input: kValue.f32.negative.pi.three_quarters, expected: [reinterpretU64AsF64(0x3fef_f4b1_3000_0000n), reinterpretU64AsF64(0x3ff0_05a9_9000_0000n)] }, // ~1.0
+ { input: kValue.f32.negative.pi.third, expected: [reinterpretU64AsF64(0xbffb_c16b_d000_0000n), reinterpretU64AsF64(0xbffb_ab8f_9000_0000n)] }, // ~-√3
+ { input: kValue.f32.negative.pi.quarter, expected: [reinterpretU64AsF64(0xbff0_05a9_b000_0000n), reinterpretU64AsF64(0xbfef_f4b1_5000_0000n)] }, // ~-1.0
+ { input: kValue.f32.negative.pi.sixth, expected: [reinterpretU64AsF64(0xbfe2_80f1_f000_0000n), reinterpretU64AsF64(0xbfe2_725e_d000_0000n)] }, // ~-1/√3
+ { input: 0, expected: [reinterpretU64AsF64(0xbf40_0200_b000_0000n), reinterpretU64AsF64(0x3f40_0200_b000_0000n)] }, // ~0.0
+ { input: kValue.f32.positive.pi.sixth, expected: [reinterpretU64AsF64(0x3fe2_725e_d000_0000n), reinterpretU64AsF64(0x3fe2_80f1_f000_0000n)] }, // ~1/√3
+ { input: kValue.f32.positive.pi.quarter, expected: [reinterpretU64AsF64(0x3fef_f4b1_5000_0000n), reinterpretU64AsF64(0x3ff0_05a9_b000_0000n)] }, // ~1.0
+ { input: kValue.f32.positive.pi.third, expected: [reinterpretU64AsF64(0x3ffb_ab8f_9000_0000n), reinterpretU64AsF64(0x3ffb_c16b_d000_0000n)] }, // ~√3
+ { input: kValue.f32.positive.pi.three_quarters, expected: [reinterpretU64AsF64(0xbff0_05a9_9000_0000n), reinterpretU64AsF64(0xbfef_f4b1_3000_0000n)] }, // ~-1.0
+ { input: kValue.f32.positive.pi.whole, expected: [reinterpretU64AsF64(0xbf40_0144_f000_0000n), reinterpretU64AsF64(0x3f40_02bc_9000_0000n)] }, // ~0.0
+ ] as ScalarToIntervalCase[],
+ f16: [
+ { input: kValue.f16.negative.pi.whole, expected: [reinterpretU64AsF64(0xbf7c_5600_0000_0000n), reinterpretU64AsF64(0x3f82_2e00_0000_0000n)] }, // ~0.0
+ { input: kValue.f16.negative.pi.three_quarters, expected: [reinterpretU64AsF64(0x3fef_4600_0000_0000n), reinterpretU64AsF64(0x3ff0_7200_0000_0000n)] }, // ~1.0
+ { input: kValue.f16.negative.pi.third, expected: [reinterpretU64AsF64(0xbffc_7600_0000_0000n), reinterpretU64AsF64(0xbffa_f600_0000_0000n)] }, // ~-√3
+ { input: kValue.f16.negative.pi.quarter, expected: [reinterpretU64AsF64(0xbff0_6600_0000_0000n), reinterpretU64AsF64(0xbfef_3600_0000_0000n)] }, // ~-1.0
+ { input: kValue.f16.negative.pi.sixth, expected: [reinterpretU64AsF64(0xbfe2_fe00_0000_0000n), reinterpretU64AsF64(0xbfe1_f600_0000_0000n)] }, // ~-1/√3
+ { input: 0, expected: [reinterpretU64AsF64(0xbf80_2e00_0000_0000n), reinterpretU64AsF64(0x3f80_2e00_0000_0000n)] }, // ~0.0
+ { input: kValue.f16.positive.pi.sixth, expected: [reinterpretU64AsF64(0x3fe1_f600_0000_0000n), reinterpretU64AsF64(0x3fe2_fe00_0000_0000n)] }, // ~1/√3
+ { input: kValue.f16.positive.pi.quarter, expected: [reinterpretU64AsF64(0x3fef_3600_0000_0000n), reinterpretU64AsF64(0x3ff0_6600_0000_0000n)] }, // ~1.0
+ { input: kValue.f16.positive.pi.third, expected: [reinterpretU64AsF64(0x3ffa_f600_0000_0000n), reinterpretU64AsF64(0x3ffc_7600_0000_0000n)] }, // ~√3
+ { input: kValue.f16.positive.pi.three_quarters, expected: [reinterpretU64AsF64(0xbff0_7200_0000_0000n), reinterpretU64AsF64(0xbfef_4600_0000_0000n)] }, // ~-1.0
+ { input: kValue.f16.positive.pi.whole, expected: [reinterpretU64AsF64(0xbf82_2e00_0000_0000n), reinterpretU64AsF64(0x3f7c_5600_0000_0000n)] }, // ~0.0
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('tanInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ ...kTanIntervalCases[p.trait],
+
+ // Cases that result in unbounded interval.
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.min, expected: kUnboundedBounds },
+ { input: constants.negative.pi.half, expected: kUnboundedBounds },
+ { input: constants.positive.pi.half, expected: kUnboundedBounds },
+ { input: constants.positive.max, expected: kUnboundedBounds },
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.tanInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.tanInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// Some of these are hard coded, since the error intervals are difficult to express in a closed
+// human-readable form due to the inherited nature of the errors.
+// prettier-ignore
+const kTanhIntervalCases = {
+ f32: [
+ { input: -1, expected: [reinterpretU64AsF64(0xbfe8_5efd_1000_0000n), reinterpretU64AsF64(0xbfe8_5ef8_9000_0000n)] }, // ~-0.7615...
+ { input: 0, expected: [reinterpretU64AsF64(0xbe8c_0000_b000_0000n), reinterpretU64AsF64(0x3e8c_0000_b000_0000n)] }, // ~0
+ { input: 1, expected: [reinterpretU64AsF64(0x3fe8_5ef8_9000_0000n), reinterpretU64AsF64(0x3fe8_5efd_1000_0000n)] }, // ~0.7615...
+ ] as ScalarToIntervalCase[],
+ f16: [
+ { input: -1, expected: [reinterpretU64AsF64(0xbfe8_9600_0000_0000n), reinterpretU64AsF64(0xbfe8_2e00_0000_0000n)] }, // ~-0.7615...
+ { input: 0, expected: [reinterpretU64AsF64(0xbf48_0e00_0000_0000n), reinterpretU64AsF64(0x3f48_0e00_0000_0000n)] }, // ~0
+ { input: 1, expected: [reinterpretU64AsF64(0x3fe8_2e00_0000_0000n), reinterpretU64AsF64(0x3fe8_9600_0000_0000n)] }, // ~0.7615...
+ ] as ScalarToIntervalCase[],
+} as const;
+
+g.test('tanhInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ ...kTanhIntervalCases[p.trait],
+
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.min, expected: kUnboundedBounds },
+ { input: constants.positive.max, expected: kUnboundedBounds },
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.tanhInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.tanhInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+g.test('truncInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ // Normals
+ { input: 0, expected: 0 },
+ { input: 0.1, expected: 0 },
+ { input: 0.9, expected: 0 },
+ { input: 1.0, expected: 1 },
+ { input: 1.1, expected: 1 },
+ { input: 1.9, expected: 1 },
+ { input: -0.1, expected: 0 },
+ { input: -0.9, expected: 0 },
+ { input: -1.0, expected: -1 },
+ { input: -1.1, expected: -1 },
+ { input: -1.9, expected: -1 },
+
+ // Subnormals
+ { input: constants.positive.subnormal.max, expected: 0 },
+ { input: constants.positive.subnormal.min, expected: 0 },
+ { input: constants.negative.subnormal.min, expected: 0 },
+ { input: constants.negative.subnormal.max, expected: 0 },
+
+ // Edge cases
+ { input: constants.positive.infinity, expected: kUnboundedBounds },
+ { input: constants.negative.infinity, expected: kUnboundedBounds },
+ { input: constants.positive.max, expected: constants.positive.max },
+ { input: constants.positive.min, expected: 0 },
+ { input: constants.negative.min, expected: constants.negative.min },
+ { input: constants.negative.max, expected: 0 },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.truncInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `FP.${t.params.trait}.truncInterval(${t.params.input}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+interface ScalarPairToIntervalCase {
+ // input is a pair of independent values, not a range, so should not be
+ // converted to a FPInterval.
+ input: [number, number];
+ expected: number | IntervalBounds;
+}
+
+// prettier-ignore
+const kAdditionInterval64BitsNormalCases = {
+ f32: [
+ // 0.1 falls between f32 0x3DCCCCCC and 0x3DCCCCCD, -0.1 falls between f32 0xBDCCCCCD and 0xBDCCCCCC
+ // f32 0x3DCCCCCC+0x3DCCCCCC=0x3E4CCCCC, 0x3DCCCCCD+0x3DCCCCCD=0x3E4CCCCD
+ { input: [0.1, 0.1], expected: [reinterpretU32AsF32(0x3e4ccccc), reinterpretU32AsF32(0x3e4ccccd)] }, // ~0.2
+ // f32 0xBDCCCCCD+0xBDCCCCCD=0xBE4CCCCD, 0xBDCCCCCC+0xBDCCCCCC=0xBE4CCCCD
+ { input: [-0.1, -0.1], expected: [reinterpretU32AsF32(0xbe4ccccd), reinterpretU32AsF32(0xbe4ccccc)] }, // ~-0.2
+ // 0.1+(-0.1) expect f32 interval [0x3DCCCCCC+0xBDCCCCCD, 0x3DCCCCCD+0xBDCCCCCC]
+ { input: [0.1, -0.1], expected: [reinterpretU32AsF32(0x3dcccccc)+reinterpretU32AsF32(0xbdcccccd), reinterpretU32AsF32(0x3dcccccd)+reinterpretU32AsF32(0xbdcccccc)] }, // ~0.0
+ // -0.1+0.1 expect f32 interval [0xBDCCCCCD+0x3DCCCCCC, 0xBDCCCCCC+0x3DCCCCCD]
+ { input: [-0.1, 0.1], expected: [reinterpretU32AsF32(0xbdcccccd)+reinterpretU32AsF32(0x3dcccccc), reinterpretU32AsF32(0xbdcccccc)+reinterpretU32AsF32(0x3dcccccd)] }, // ~0.0
+ ] as ScalarPairToIntervalCase[],
+ f16: [
+ // 0.1 falls between f16 0x2E66 and 0x2E67, -0.1 falls between f16 0xAE67 and 0xAE66
+ // f16 0x2E66+0x2E66=0x3266, 0x2E67+0x2E67=0x3267
+ { input: [0.1, 0.1], expected: [reinterpretU16AsF16(0x3266), reinterpretU16AsF16(0x3267)] }, // ~0.2
+ // f16 0xAE67+0xAE67=0xB267, 0xAE66+0xAE66=0xB266
+ { input: [-0.1, -0.1], expected: [reinterpretU16AsF16(0xb267), reinterpretU16AsF16(0xb266)] }, // ~-0.2
+ // 0.1+(-0.1) expect f16 interval [0x2E66+0xAE67, 0x2E67+0xAE66]
+ { input: [0.1, -0.1], expected: [reinterpretU16AsF16(0x2e66)+reinterpretU16AsF16(0xae67), reinterpretU16AsF16(0x2e67)+reinterpretU16AsF16(0xae66)] }, // ~0.0
+ // -0.1+0.1 expect f16 interval [0xAE67+0x2E66, 0xAE66+0x2E67]
+ { input: [-0.1, 0.1], expected: [reinterpretU16AsF16(0xae67)+reinterpretU16AsF16(0x2e66), reinterpretU16AsF16(0xae66)+reinterpretU16AsF16(0x2e67)] }, // ~0.0
+ ] as ScalarPairToIntervalCase[],
+ abstract: [
+ // 0.1 isn't exactly representable in f64, but will be quantized to an
+ // exact value when storing to a 'number' (0x3FB999999999999A).
+ // This is why below the expectations are not intervals.
+ // f64 0x3FB999999999999A+0x3FB999999999999A = 0x3FC999999999999A
+ { input: [0.1, 0.1], expected: reinterpretU64AsF64(0x3FC999999999999An) }, // ~0.2
+ // f64 0xBFB999999999999A+0xBFB999999999999A = 0xBFC999999999999A
+ { input: [-0.1, -0.1], expected: reinterpretU64AsF64(0xBFC999999999999An) }, // ~-0.2
+ { input: [0.1, -0.1], expected: 0 },
+ { input: [-0.1, 0.1], expected: 0 },
+ ] as ScalarPairToIntervalCase[],
+} as const;
+
+g.test('additionInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarPairToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ // Representable normals
+ { input: [0, 0], expected: 0 },
+ { input: [1, 0], expected: 1 },
+ { input: [0, 1], expected: 1 },
+ { input: [-1, 0], expected: -1 },
+ { input: [0, -1], expected: -1 },
+ { input: [1, 1], expected: 2 },
+ { input: [1, -1], expected: 0 },
+ { input: [-1, 1], expected: 0 },
+ { input: [-1, -1], expected: -2 },
+
+ // 0.1 should be correctly rounded
+ { input: [0.1, 0], expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1'] },
+ { input: [0, 0.1], expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1'] },
+ // -0.1 should be correctly rounded
+ { input: [-0.1, 0], expected: kConstantCorrectlyRoundedExpectation[p.trait]['-0.1'] },
+ { input: [0, -0.1], expected: kConstantCorrectlyRoundedExpectation[p.trait]['-0.1'] },
+
+ // 64-bit normals that can not be exactly represented
+ ...kAdditionInterval64BitsNormalCases[p.trait],
+
+ // Subnormals
+ { input: [constants.positive.subnormal.max, 0], expected: [0, constants.positive.subnormal.max] },
+ { input: [0, constants.positive.subnormal.max], expected: [0, constants.positive.subnormal.max] },
+ { input: [constants.positive.subnormal.min, 0], expected: [0, constants.positive.subnormal.min] },
+ { input: [0, constants.positive.subnormal.min], expected: [0, constants.positive.subnormal.min] },
+ { input: [constants.negative.subnormal.max, 0], expected: [constants.negative.subnormal.max, 0] },
+ { input: [0, constants.negative.subnormal.max], expected: [constants.negative.subnormal.max, 0] },
+ { input: [constants.negative.subnormal.min, 0], expected: [constants.negative.subnormal.min, 0] },
+ { input: [0, constants.negative.subnormal.min], expected: [constants.negative.subnormal.min, 0] },
+
+ // Infinities
+ { input: [0, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, 0], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [0, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, 0], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, constants.negative.infinity], expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const [x, y] = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.additionInterval(x, y);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.additionInterval(${x}, ${y}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// Cases for Atan2Interval. The positive x & y quadrant is tested in more detail, and the other
+// quadrants are spot checked that values are pointing in the right direction.
+// Note: atan2's parameters are labelled (y, x) instead of (x, y)
+// prettier-ignore
+const kAtan2IntervalCases = {
+ // atan has 4096ULP error boundary for f32.
+ f32: [
+ // positive y, positive x
+ // √3 rounded to f32 0x3FDDB3D7, atan2(1, 0x3FDDB3D7)=0.52359877749051820266056630237827 ~ pi/6 rounded to f32 0x3F060A91 or 0x3F060A92,
+ // kValue.f32.positive.pi.sixth is 0x3F060A92.
+ { input: [1, reinterpretU32AsF32(0x3fddb3d7)], expected: [kMinusNULPFunctions['f32'](kValue.f32.positive.pi.sixth, 4097), kPlusNULPFunctions['f32'](kValue.f32.positive.pi.sixth, 4096)] },
+ // atan2(1, 1)=0.78539816339744830961566084581988 ~ pi/4 rounded to f32 0x3F490FDA or 0x3F490FDB,
+ // kValue.f32.positive.pi.quarter is 0x3F490FDB.
+ { input: [1, 1], expected: [kMinusNULPFunctions['f32'](kValue.f32.positive.pi.quarter, 4097), kPlusNULPFunctions['f32'](kValue.f32.positive.pi.quarter, 4096)] },
+ // √3 rounded to f32 0x3FDDB3D7, atan2(0x3FDDB3D7, 1) = 1.0471975493043784165707553892615 ~ pi/3 rounded to f32 0x3F860A91 or 0x3F860A92,
+ // kValue.f32.positive.pi.third is 0x3F860A92.
+ { input: [reinterpretU32AsF32(0x3fddb3d7), 1], expected: [kMinusNULPFunctions['f32'](kValue.f32.positive.pi.third, 4097), kPlusNULPFunctions['f32'](kValue.f32.positive.pi.third, 4096)] },
+
+ // positive y, negative x
+ // atan2(1, -1)=pi*3/4=2.3561944901923449288469825374591 rounded to f32 0x4016CBE3 or 0x4016CBE4,
+ // kValue.f32.positive.pi.three_quarters is 0x4016CBE4.
+ { input: [1, -1], expected: [kMinusNULPFunctions['f32'](kValue.f32.positive.pi.three_quarters, 4097), kPlusNULPFunctions['f32'](kValue.f32.positive.pi.three_quarters, 4096)] },
+
+ // negative y, negative x
+ // atan2(-1, -1)=-pi*3/4=-2.3561944901923449288469825374591 rounded to f32 0xC016CBE4 or 0xC016CBE3,
+ // kValue.f32.negative.pi.three_quarters is 0xC016CBE4.
+ { input: [-1, -1], expected: [kMinusNULPFunctions['f32'](kValue.f32.negative.pi.three_quarters, 4096), kPlusNULPFunctions['f32'](kValue.f32.negative.pi.three_quarters, 4097)] },
+
+ // negative y, positive x
+ // atan2(-1, 1)=-pi/4=-0.78539816339744830961566084581988 rounded to f32 0xBF490FDB or 0xBF490FDA,
+ // kValue.f32.negative.pi.quarter is 0xBF490FDB.
+ { input: [-1, 1], expected: [kMinusNULPFunctions['f32'](kValue.f32.negative.pi.quarter, 4096), kPlusNULPFunctions['f32'](kValue.f32.negative.pi.quarter, 4097)] },
+
+ // When y/x ~ 0, test that ULP applied to result of atan2, not the intermediate y/x value.
+ // y/x ~ 0, y<0, x<0, atan2(y,x) ~ -pi rounded to f32 0xC0490FDB or 0xC0490FDA,
+ // kValue.f32.negative.pi.whole is 0xC0490FDB.
+ {input: [kValue.f32.negative.max, -1], expected: [kMinusNULPFunctions['f32'](kValue.f32.negative.pi.whole, 4096), kPlusNULPFunctions['f32'](kValue.f32.negative.pi.whole, 4097)] },
+ // y/x ~ 0, y>0, x<0, atan2(y,x) ~ pi rounded to f32 0x40490FDA or 0x40490FDB,
+ // kValue.f32.positive.pi.whole is 0x40490FDB.
+ {input: [kValue.f32.positive.min, -1], expected: [kMinusNULPFunctions['f32'](kValue.f32.positive.pi.whole, 4097), kPlusNULPFunctions['f32'](kValue.f32.positive.pi.whole, 4096)] },
+ ] as ScalarPairToIntervalCase[],
+ // atan has 5ULP error boundary for f16.
+ f16: [
+ // positive y, positive x
+ // √3 rounded to f16 0x3EED, atan2(1, 0x3EED)=0.52375018906301191131992842392268 ~ pi/6 rounded to f16 0x3830 or 0x3831,
+ // kValue.f16.positive.pi.sixth is 0x3830.
+ { input: [1, reinterpretU16AsF16(0x3eed)], expected: [kMinusNULPFunctions['f16'](kValue.f16.positive.pi.sixth, 5), kPlusNULPFunctions['f16'](kValue.f16.positive.pi.sixth, 6)] },
+ // atan2(1, 1)=0.78539816339744830961566084581988 ~ pi/4 rounded to f16 0x3A48 or 0x3A49,
+ // kValue.f16.positive.pi.quarter is 0x3A48.
+ { input: [1, 1], expected: [kMinusNULPFunctions['f16'](kValue.f16.positive.pi.quarter, 5), kPlusNULPFunctions['f16'](kValue.f16.positive.pi.quarter, 6)] },
+ // √3 rounded to f16 0x3EED, atan2(0x3EED, 1) = 1.0470461377318847079113932677171 ~ pi/3 rounded to f16 0x3C30 or 0x3C31,
+ // kValue.f16.positive.pi.third is 0x3C30.
+ { input: [reinterpretU16AsF16(0x3eed), 1], expected: [kMinusNULPFunctions['f16'](kValue.f16.positive.pi.third, 5), kPlusNULPFunctions['f16'](kValue.f16.positive.pi.third, 6)] },
+
+ // positive y, negative x
+ // atan2(1, -1)=pi*3/4=2.3561944901923449288469825374591 rounded to f16 0x40B6 or 0x40B7,
+ // kValue.f16.positive.pi.three_quarters is 0x40B6.
+ { input: [1, -1], expected: [kMinusNULPFunctions['f16'](kValue.f16.positive.pi.three_quarters, 5), kPlusNULPFunctions['f16'](kValue.f16.positive.pi.three_quarters, 6)] },
+
+ // negative y, negative x
+ // atan2(-1, -1)=-pi*3/4=-2.3561944901923449288469825374591 rounded to f16 0xC0B7 or 0xC0B6,
+ // kValue.f16.negative.pi.three_quarters is 0xC0B6.
+ { input: [-1, -1], expected: [kMinusNULPFunctions['f16'](kValue.f16.negative.pi.three_quarters, 6), kPlusNULPFunctions['f16'](kValue.f16.negative.pi.three_quarters, 5)] },
+
+ // negative y, positive x
+ // atan2(-1, 1)=-pi/4=-0.78539816339744830961566084581988 rounded to f16 0xBA49 or 0xBA48,
+ // kValue.f16.negative.pi.quarter is 0xBA48.
+ { input: [-1, 1], expected: [kMinusNULPFunctions['f16'](kValue.f16.negative.pi.quarter, 6), kPlusNULPFunctions['f16'](kValue.f16.negative.pi.quarter, 5)] },
+
+ // When y/x ~ 0, test that ULP applied to result of atan2, not the intermediate y/x value.
+ // y/x ~ 0, y<0, x<0, atan2(y,x) ~ -pi rounded to f16 0xC249 or 0xC248,
+ // kValue.f16.negative.pi.whole is 0xC248.
+ {input: [kValue.f16.negative.max, -1], expected: [kMinusNULPFunctions['f16'](kValue.f16.negative.pi.whole, 6), kPlusNULPFunctions['f16'](kValue.f16.negative.pi.whole, 5)] },
+ // y/x ~ 0, y>0, x<0, atan2(y,x) ~ pi rounded to f16 0x4248 or 0x4249,
+ // kValue.f16.positive.pi.whole is 0x4248.
+ {input: [kValue.f16.positive.min, -1], expected: [kMinusNULPFunctions['f16'](kValue.f16.positive.pi.whole, 5), kPlusNULPFunctions['f16'](kValue.f16.positive.pi.whole, 6)] },
+ ] as ScalarPairToIntervalCase[],
+} as const;
+
+g.test('atan2Interval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarPairToIntervalCase>(p => {
+ const constants = FP[p.trait].constants();
+ // prettier-ignore
+ return [
+ ...kAtan2IntervalCases[p.trait],
+
+ // Cases that y out of bound.
+ // positive y, positive x
+ { input: [Number.POSITIVE_INFINITY, 1], expected: kUnboundedBounds },
+ // positive y, negative x
+ { input: [Number.POSITIVE_INFINITY, -1], expected: kUnboundedBounds },
+ // negative y, negative x
+ { input: [Number.NEGATIVE_INFINITY, -1], expected: kUnboundedBounds },
+ // negative y, positive x
+ { input: [Number.NEGATIVE_INFINITY, 1], expected: kUnboundedBounds },
+
+ // Discontinuity @ origin (0,0)
+ { input: [0, 0], expected: kUnboundedBounds },
+ { input: [0, constants.positive.subnormal.max], expected: kUnboundedBounds },
+ { input: [0, constants.negative.subnormal.min], expected: kUnboundedBounds },
+ { input: [0, constants.positive.min], expected: kUnboundedBounds },
+ { input: [0, constants.negative.max], expected: kUnboundedBounds },
+ { input: [0, constants.positive.max], expected: kUnboundedBounds },
+ { input: [0, constants.negative.min], expected: kUnboundedBounds },
+ { input: [0, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [0, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [0, 1], expected: kUnboundedBounds },
+ { input: [constants.positive.subnormal.max, 1], expected: kUnboundedBounds },
+ { input: [constants.negative.subnormal.min, 1], expected: kUnboundedBounds },
+
+ // Very large |x| values should cause kUnboundedBounds to be returned, due to the restrictions on division
+ { input: [1, constants.positive.max], expected: kUnboundedBounds },
+ { input: [1, constants.positive.nearest_max], expected: kUnboundedBounds },
+ { input: [1, constants.negative.min], expected: kUnboundedBounds },
+ { input: [1, constants.negative.nearest_min], expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const [y, x] = t.params.input;
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.atan2Interval(y, x);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.atan2Interval(${y}, ${x}) returned ${got}]. Expected ${expected}`
+ );
+ });
+
+g.test('distanceIntervalScalar')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarPairToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ { input: [1.0, 0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [0.0, 1.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [-0.0, -1.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [0.0, -1.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [0.1, 0], expected: kRootSumSquareExpectionInterval[p.trait]['[0.1]'] }, // ~0.1
+ { input: [0, 0.1], expected: kRootSumSquareExpectionInterval[p.trait]['[0.1]'] }, // ~0.1
+ { input: [-0.1, 0], expected: kRootSumSquareExpectionInterval[p.trait]['[0.1]'] }, // ~0.1
+ { input: [0, -0.1], expected: kRootSumSquareExpectionInterval[p.trait]['[0.1]'] }, // ~0.1
+ { input: [10.0, 0], expected: kRootSumSquareExpectionInterval[p.trait]['[10]'] }, // ~10
+ { input: [0, 10.0], expected: kRootSumSquareExpectionInterval[p.trait]['[10]'] }, // ~10
+ { input: [-10.0, 0], expected: kRootSumSquareExpectionInterval[p.trait]['[10]'] }, // ~10
+ { input: [0, -10.0], expected: kRootSumSquareExpectionInterval[p.trait]['[10]'] }, // ~10
+
+ // distance(x, y), where x - y = 0 has an acceptance interval of kUnboundedBounds,
+ // because distance(x, y) = length(x - y), and length(0) = kUnboundedBounds
+ { input: [0, 0], expected: kUnboundedBounds },
+ { input: [1.0, 1.0], expected: kUnboundedBounds },
+ { input: [-1.0, -1.0], expected: kUnboundedBounds },
+
+ // Subnormal Cases
+ { input: [constants.negative.subnormal.min, 0], expected: kUnboundedBounds },
+ { input: [constants.negative.subnormal.max, 0], expected: kUnboundedBounds },
+ { input: [constants.positive.subnormal.min, 0], expected: kUnboundedBounds },
+ { input: [constants.positive.subnormal.max, 0], expected: kUnboundedBounds },
+
+ // Edge cases
+ { input: [constants.positive.infinity, 0], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, 0], expected: kUnboundedBounds },
+ { input: [constants.negative.min, 0], expected: kUnboundedBounds },
+ { input: [constants.negative.max, 0], expected: kUnboundedBounds },
+ { input: [constants.positive.min, 0], expected: kUnboundedBounds },
+ { input: [constants.positive.max, 0], expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.distanceInterval(...t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.distanceInterval(${t.params.input[0]}, ${t.params.input[1]}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// prettier-ignore
+const kDivisionInterval64BitsNormalCases = {
+ f32: [
+ // Zero divided by any non-zero finite value results in zero.
+ { input: [0, 0.1], expected: 0 },
+ { input: [0, -0.1], expected: 0 },
+ // 0.1 rounded to f32 0x3DCCCCCC or 0x3DCCCCCD,
+ // 1.0/0x3DCCCCCD = 9.9999998509883902204460179966303 rounded to f32 0x411FFFFF or 0x41200000,
+ // 1.0/0x3DCCCCCC = 10.000000596046483527138934924167 rounded to f32 0x41200000 or 0x41200001.
+ { input: [1, 0.1], expected: [reinterpretU32AsF32(0x411fffff), reinterpretU32AsF32(0x41200001)] }, // ~10.0
+ // The same for -1/-0.1
+ { input: [-1, -0.1], expected: [reinterpretU32AsF32(0x411fffff), reinterpretU32AsF32(0x41200001)] }, // ~10.0
+ // -10.000000596046483527138934924167 rounded to f32 0xC1200001 or 0xC1200000,
+ // -9.9999998509883902204460179966303 rounded to f32 0xC1200000 or 0xC11FFFFF.
+ { input: [-1, 0.1], expected: [reinterpretU32AsF32(0xc1200001), reinterpretU32AsF32(0xc11fffff)] }, // ~-10.0
+ { input: [1, -0.1], expected: [reinterpretU32AsF32(0xc1200001), reinterpretU32AsF32(0xc11fffff)] }, // ~-10.0
+ // Cases that expected interval larger than +-1ULP.
+ // 0.000001 rounded to f32 0x358637BD or 0x358637BE,
+ // 1.0/0x358637BE = 999999.88883793195700674522548684 rounded to f32 0x497423FE or 0x497423FF,
+ // 1.0/0x358637BD = 1000000.0025247573063743994399971 rounded to f32 0x49742400 or 0x49742401.
+ { input: [1, 0.000001], expected: [reinterpretU32AsF32(0x497423fe), reinterpretU32AsF32(0x49742401)] }, // ~1000000.0
+ { input: [1, -0.000001], expected: [reinterpretU32AsF32(0xc9742401), reinterpretU32AsF32(0xc97423fe)] }, // ~-1000000.0
+ ] as ScalarPairToIntervalCase[],
+ f16: [
+ // Zero divided by any non-zero finite value results in zero.
+ { input: [0, 0.1], expected: 0 },
+ { input: [0, -0.1], expected: 0 },
+ // 0.1 rounded to f16 0x2E66 or 0x2E67,
+ // 1.0/0x2E67 = 9.9963392312385600976205003050641 rounded to f16 0x48FF or 0x4900,
+ // 1.0/0x2E66 = 10.002442002442002442002442002442 rounded to f16 0x4900 or 0x4901.
+ { input: [1, 0.1], expected: [reinterpretU16AsF16(0x48ff), reinterpretU16AsF16(0x4901)] }, // ~10.0
+ // The same for -1/-0.1
+ { input: [-1, -0.1], expected: [reinterpretU16AsF16(0x48ff), reinterpretU16AsF16(0x4901)] }, // ~10.0
+ // -10.002442002442002442002442002442 rounded to f16 0xC901 or 0xC900,
+ // -9.9963392312385600976205003050641 rounded to f16 0xC900 or 0xC8FF.
+ { input: [-1, 0.1], expected: [reinterpretU16AsF16(0xc901), reinterpretU16AsF16(0xc8ff)] }, // ~-10.0
+ { input: [1, -0.1], expected: [reinterpretU16AsF16(0xc901), reinterpretU16AsF16(0xc8ff)] }, // ~-10.0
+ // Cases that expected interval larger than +-1ULP.
+ // 0.001 rounded to f16 0x1418 or 0x1419,
+ // 1.0/0x1419 = 999.59580552907535977846384072716 rounded to f16 0x63CF or 0x63D0,
+ // 1.0/0x1418 = 1000.5496183206106870229007633588 rounded to f16 0x63D1 or 0x63D2.
+ { input: [1, 0.001], expected: [reinterpretU16AsF16(0x63cf), reinterpretU16AsF16(0x63d2)] }, // ~1000.0
+ { input: [1, -0.001], expected: [reinterpretU16AsF16(0xe3d2), reinterpretU16AsF16(0xe3cf)] }, // ~-1000.0
+ ] as ScalarPairToIntervalCase[],
+} as const;
+
+g.test('divisionInterval')
+ .params(u =>
+ u
+ .combine('trait', ['abstract', 'f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarPairToIntervalCase>(p => {
+ // This is a ULP based interval, so abstract should behave like f32, so
+ // swizzling the trait as needed.
+ const trait = p.trait === 'abstract' ? 'f32' : p.trait;
+ const fp = FP[trait];
+ const constants = fp.constants();
+ // prettier-ignore
+ return [
+ // Representable normals
+ { input: [0, 1], expected: 0 },
+ { input: [0, -1], expected: 0 },
+ { input: [1, 1], expected: 1 },
+ { input: [1, -1], expected: -1 },
+ { input: [-1, 1], expected: -1 },
+ { input: [-1, -1], expected: 1 },
+ { input: [4, 2], expected: 2 },
+ { input: [-4, 2], expected: -2 },
+ { input: [4, -2], expected: -2 },
+ { input: [-4, -2], expected: 2 },
+
+ // 64-bit normals that can not be exactly represented
+ ...kDivisionInterval64BitsNormalCases[trait],
+
+ // Denominator out of range
+ { input: [1, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [1, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [1, constants.positive.max], expected: kUnboundedBounds },
+ { input: [1, constants.negative.min], expected: kUnboundedBounds },
+ { input: [1, 0], expected: kUnboundedBounds },
+ { input: [1, constants.positive.subnormal.max], expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ // This is a ULP based interval, so abstract should behave like f32, so
+ // swizzling the trait as needed for calculating the expected result.
+ const trait = t.params.trait === 'abstract' ? 'f32' : t.params.trait;
+ const fp = FP[trait];
+
+ const error = (n: number): number => {
+ return 2.5 * fp.oneULP(n);
+ };
+
+ const [x, y] = t.params.input;
+
+ // Do not swizzle here, so the correct implementation under test is called.
+ const expected = FP[t.params.trait].toInterval(applyError(t.params.expected, error));
+ const got = FP[t.params.trait].divisionInterval(x, y);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.divisionInterval(${x}, ${y}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+const kLdexpIntervalCases = {
+ f32: [
+ // 64-bit normals
+ { input: [1.0000000001, 1], expected: [2, kPlusNULPFunctions['f32'](2, 2)] }, // ~2, additional ULP error due to first param not being f32 precise
+ { input: [-1.0000000001, 1], expected: [kMinusNULPFunctions['f32'](-2, 2), -2] }, // ~-2, additional ULP error due to first param not being f32 precise
+ // Edge Cases
+ // f32 0b0_01111111_11111111111111111111111 = 1.9999998807907104,
+ // 1.9999998807907104 * 2 ** 127 = f32.positive.max
+ { input: [1.9999998807907104, 127], expected: kValue.f32.positive.max },
+ // f32.positive.min = 1 * 2 ** -126
+ { input: [1, -126], expected: kValue.f32.positive.min },
+ // f32.positive.subnormal.max = 0.9999998807907104 * 2 ** -126
+ { input: [0.9999998807907104, -126], expected: [0, kValue.f32.positive.subnormal.max] },
+ // f32.positive.subnormal.min = 1.1920928955078125e-07 * 2 ** -126
+ { input: [1.1920928955078125e-7, -126], expected: [0, kValue.f32.positive.subnormal.min] },
+ { input: [-1.1920928955078125e-7, -126], expected: [kValue.f32.negative.subnormal.max, 0] },
+ { input: [-0.9999998807907104, -126], expected: [kValue.f32.negative.subnormal.min, 0] },
+ { input: [-1, -126], expected: kValue.f32.negative.max },
+ { input: [-1.9999998807907104, 127], expected: kValue.f32.negative.min },
+ // e2 + bias <= 0, expect correctly rounded intervals.
+ { input: [2 ** 120, -130], expected: 2 ** -10 },
+ // Out of Bounds
+ { input: [1, 128], expected: kUnboundedBounds },
+ { input: [-1, 128], expected: kUnboundedBounds },
+ { input: [100, 126], expected: kUnboundedBounds },
+ { input: [-100, 126], expected: kUnboundedBounds },
+ { input: [2 ** 100, 100], expected: kUnboundedBounds },
+ ] as ScalarPairToIntervalCase[],
+ f16: [
+ // 64-bit normals
+ { input: [1.0000000001, 1], expected: [2, kPlusNULPFunctions['f16'](2, 2)] }, // ~2, additional ULP error due to first param not being f16 precise
+ { input: [-1.0000000001, 1], expected: [kMinusNULPFunctions['f16'](-2, 2), -2] }, // ~-2, additional ULP error due to first param not being f16 precise
+ // Edge Cases
+ // f16 0b0_01111_1111111111 = 1.9990234375, 1.9990234375 * 2 ** 15 = f16.positive.max
+ { input: [1.9990234375, 15], expected: kValue.f16.positive.max },
+ // f16.positive.min = 1 * 2 ** -14
+ { input: [1, -14], expected: kValue.f16.positive.min },
+ // f16.positive.subnormal.max = 0.9990234375 * 2 ** -14
+ { input: [0.9990234375, -14], expected: [0, kValue.f16.positive.subnormal.max] },
+ // f16.positive.subnormal.min = 1 * 2 ** -10 * 2 ** -14 = 0.0009765625 * 2 ** -14
+ { input: [0.0009765625, -14], expected: [0, kValue.f16.positive.subnormal.min] },
+ { input: [-0.0009765625, -14], expected: [kValue.f16.negative.subnormal.max, 0] },
+ { input: [-0.9990234375, -14], expected: [kValue.f16.negative.subnormal.min, 0] },
+ { input: [-1, -14], expected: kValue.f16.negative.max },
+ { input: [-1.9990234375, 15], expected: kValue.f16.negative.min },
+ // e2 + bias <= 0, expect correctly rounded intervals.
+ { input: [2 ** 12, -18], expected: 2 ** -6 },
+ // Out of Bounds
+ { input: [1, 16], expected: kUnboundedBounds },
+ { input: [-1, 16], expected: kUnboundedBounds },
+ { input: [100, 14], expected: kUnboundedBounds },
+ { input: [-100, 14], expected: kUnboundedBounds },
+ { input: [2 ** 10, 10], expected: kUnboundedBounds },
+ ] as ScalarPairToIntervalCase[],
+} as const;
+
+g.test('ldexpInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarPairToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ // always exactly represeantable cases
+ { input: [0, 0], expected: 0 },
+ { input: [0, 1], expected: 0 },
+ { input: [0, -1], expected: 0 },
+ { input: [1, 1], expected: 2 },
+ { input: [1, -1], expected: 0.5 },
+ { input: [-1, 1], expected: -2 },
+ { input: [-1, -1], expected: -0.5 },
+
+ ...kLdexpIntervalCases[p.trait],
+
+ // Extremely negative e2, any float value should be scale to 0.0 as the ground truth
+ // f64 e1 * 2 ** e2 would be 0.0 for e2 = -2147483648.
+ { input: [constants.positive.max, kValue.i32.negative.min], expected: 0 },
+ { input: [constants.negative.min, kValue.i32.negative.min], expected: 0 },
+ // Out of Bounds
+ { input: [constants.positive.max, kValue.i32.positive.max], expected: kUnboundedBounds },
+ { input: [constants.negative.min, kValue.i32.positive.max], expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const [x, y] = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.ldexpInterval(x, y);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.ldexpInterval(${x}, ${y}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+g.test('maxInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarPairToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ // Representable normals
+ { input: [0, 0], expected: 0 },
+ { input: [1, 0], expected: 1 },
+ { input: [0, 1], expected: 1 },
+ { input: [-1, 0], expected: 0 },
+ { input: [0, -1], expected: 0 },
+ { input: [1, 1], expected: 1 },
+ { input: [1, -1], expected: 1 },
+ { input: [-1, 1], expected: 1 },
+ { input: [-1, -1], expected: -1 },
+
+ // 0.1 and -0.1 should be correctly rounded
+ { input: [-0.1, 0], expected: 0 },
+ { input: [0, -0.1], expected: 0 },
+ { input: [0.1, 0], expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1'] }, // ~0.1
+ { input: [0, 0.1], expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1'] }, // ~0.1
+ { input: [0.1, 0.1], expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1'] }, // ~0.1
+ { input: [0.1, -0.1], expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1'] }, // ~0.1
+ { input: [-0.1, 0.1], expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1'] }, // ~0.1
+ { input: [-0.1, -0.1], expected: kConstantCorrectlyRoundedExpectation[p.trait]['-0.1'] }, // ~-0.1
+
+ // Representable subnormals
+ { input: [constants.positive.subnormal.max, 0], expected: [0, constants.positive.subnormal.max] },
+ { input: [0, constants.positive.subnormal.max], expected: [0, constants.positive.subnormal.max] },
+ { input: [constants.positive.subnormal.min, 0], expected: [0, constants.positive.subnormal.min] },
+ { input: [0, constants.positive.subnormal.min], expected: [0, constants.positive.subnormal.min] },
+ { input: [constants.negative.subnormal.max, 0], expected: [constants.negative.subnormal.max, 0] },
+ { input: [0, constants.negative.subnormal.max], expected: [constants.negative.subnormal.max, 0] },
+ { input: [constants.negative.subnormal.min, 0], expected: [constants.negative.subnormal.min, 0] },
+ { input: [0, constants.negative.subnormal.min], expected: [constants.negative.subnormal.min, 0] },
+ { input: [1, constants.positive.subnormal.max], expected: 1 },
+ { input: [constants.negative.subnormal.min, constants.positive.subnormal.max], expected: [constants.negative.subnormal.min, constants.positive.subnormal.max] },
+
+ // Infinities
+ { input: [0, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, 0], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [0, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, 0], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, constants.negative.infinity], expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const [x, y] = t.params.input;
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.maxInterval(x, y);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.maxInterval(${x}, ${y}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+g.test('minInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarPairToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ // Representable normals
+ { input: [0, 0], expected: 0 },
+ { input: [1, 0], expected: 0 },
+ { input: [0, 1], expected: 0 },
+ { input: [-1, 0], expected: -1 },
+ { input: [0, -1], expected: -1 },
+ { input: [1, 1], expected: 1 },
+ { input: [1, -1], expected: -1 },
+ { input: [-1, 1], expected: -1 },
+ { input: [-1, -1], expected: -1 },
+
+ // 64-bit normals that not exactly representable
+ { input: [0.1, 0], expected: 0 },
+ { input: [0, 0.1], expected: 0 },
+ { input: [-0.1, 0], expected: kConstantCorrectlyRoundedExpectation[p.trait]['-0.1'] }, // ~-0.1
+ { input: [0, -0.1], expected: kConstantCorrectlyRoundedExpectation[p.trait]['-0.1'] }, // ~-0.1
+ { input: [0.1, 0.1], expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1'] }, // ~0.1
+ { input: [0.1, -0.1], expected: kConstantCorrectlyRoundedExpectation[p.trait]['-0.1'] }, // ~-0.1
+ { input: [-0.1, 0.1], expected: kConstantCorrectlyRoundedExpectation[p.trait]['-0.1'] }, // ~-0.1
+ { input: [-0.1, -0.1], expected: kConstantCorrectlyRoundedExpectation[p.trait]['-0.1'] }, // ~-0.1
+
+ // Representable subnormals
+ { input: [constants.positive.subnormal.max, 0], expected: [0, constants.positive.subnormal.max] },
+ { input: [0, constants.positive.subnormal.max], expected: [0, constants.positive.subnormal.max] },
+ { input: [constants.positive.subnormal.min, 0], expected: [0, constants.positive.subnormal.min] },
+ { input: [0, constants.positive.subnormal.min], expected: [0, constants.positive.subnormal.min] },
+ { input: [constants.negative.subnormal.max, 0], expected: [constants.negative.subnormal.max, 0] },
+ { input: [0, constants.negative.subnormal.max], expected: [constants.negative.subnormal.max, 0] },
+ { input: [constants.negative.subnormal.min, 0], expected: [constants.negative.subnormal.min, 0] },
+ { input: [0, constants.negative.subnormal.min], expected: [constants.negative.subnormal.min, 0] },
+ { input: [-1, constants.positive.subnormal.max], expected: -1 },
+ { input: [constants.negative.subnormal.min, constants.positive.subnormal.max], expected: [constants.negative.subnormal.min, constants.positive.subnormal.max] },
+
+ // Infinities
+ { input: [0, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, 0], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [0, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, 0], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, constants.negative.infinity], expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const [x, y] = t.params.input;
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.minInterval(x, y);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.minInterval(${x}, ${y}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// prettier-ignore
+const kMultiplicationInterval64BitsNormalCases = {
+ f32: [
+ // 0.1*0.1, 0.1 falls between f32 0x3DCCCCCC and 0x3DCCCCCD,
+ // min result 0x3DCCCCCC*0x3DCCCCCC=0.00999999880790713952713681734167 rounded to f32 0x3C23D708 or 0x3C23D709,
+ // max result 0x3DCCCCCD*0x3DCCCCCD=0.01000000029802322622044605108385 rounded to f32 0x3C23D70A or 0x3C23D70B.
+ { input: [0.1, 0.1], expected: [reinterpretU32AsF32(0x3c23d708), reinterpretU32AsF32(0x3c23d70b)] }, // ~0.01
+ { input: [-0.1, -0.1], expected: [reinterpretU32AsF32(0x3c23d708), reinterpretU32AsF32(0x3c23d70b)] }, // ~0.01
+ // -0.01000000029802322622044605108385 rounded to f32 0xBC23D70B or 0xBC23D70A,
+ // -0.00999999880790713952713681734167 rounded to f32 0xBC23D709 or 0xBC23D708.
+ { input: [0.1, -0.1], expected: [reinterpretU32AsF32(0xbc23d70b), reinterpretU32AsF32(0xbc23d708)] }, // ~-0.01
+ { input: [-0.1, 0.1], expected: [reinterpretU32AsF32(0xbc23d70b), reinterpretU32AsF32(0xbc23d708)] }, // ~-0.01
+ ] as ScalarPairToIntervalCase[],
+ f16: [
+ // 0.1*0.1, 0.1 falls between f16 0x2E66 and 0x2E67,
+ // min result 0x2E66*0x2E66=0.00999511778354644775390625 rounded to f16 0x211E or 0x211F,
+ // max result 0x2E67*0x2E67=0.0100073255598545074462890625 rounded to f16 0x211F or 0x2120.
+ { input: [0.1, 0.1], expected: [reinterpretU16AsF16(0x211e), reinterpretU16AsF16(0x2120)] }, // ~0.01
+ { input: [-0.1, -0.1], expected: [reinterpretU16AsF16(0x211e), reinterpretU16AsF16(0x2120)] }, // ~0.01
+ // -0.0100073255598545074462890625 rounded to f16 0xA120 or 0xA11F,
+ // -0.00999511778354644775390625 rounded to f16 0xA11F or 0xA11E.
+ { input: [0.1, -0.1], expected: [reinterpretU16AsF16(0xa120), reinterpretU16AsF16(0xa11e)] }, // ~-0.01
+ { input: [-0.1, 0.1], expected: [reinterpretU16AsF16(0xa120), reinterpretU16AsF16(0xa11e)] }, // ~-0.01
+ ] as ScalarPairToIntervalCase[],
+ abstract: [
+ // 0.1 isn't exactly representable in f64, but will be quantized to an
+ // exact value when storing to a 'number' (0x3FB999999999999A).
+ // This is why below the expectations are not intervals.
+ // f64 0.1 * 0.1 = 0x3f847ae147ae147c,
+ { input: [0.1, 0.1], expected: reinterpretU64AsF64(0x3f847ae147ae147cn) }, // ~0.01
+ { input: [-0.1, -0.1], expected: reinterpretU64AsF64(0x3f847ae147ae147cn) }, // ~0.01
+ { input: [0.1, -0.1], expected: reinterpretU64AsF64(0xbf847ae147ae147cn) }, // ~-0.01
+ { input: [-0.1, 0.1], expected: reinterpretU64AsF64(0xbf847ae147ae147cn) }, // ~-0.01
+ ] as ScalarPairToIntervalCase[],
+} as const;
+
+g.test('multiplicationInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarPairToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ // Representable normals
+ { input: [0, 0], expected: 0 },
+ { input: [1, 0], expected: 0 },
+ { input: [0, 1], expected: 0 },
+ { input: [-1, 0], expected: 0 },
+ { input: [0, -1], expected: 0 },
+ { input: [1, 1], expected: 1 },
+ { input: [1, -1], expected: -1 },
+ { input: [-1, 1], expected: -1 },
+ { input: [-1, -1], expected: 1 },
+ { input: [2, 1], expected: 2 },
+ { input: [1, -2], expected: -2 },
+ { input: [-2, 1], expected: -2 },
+ { input: [-2, -1], expected: 2 },
+ { input: [2, 2], expected: 4 },
+ { input: [2, -2], expected: -4 },
+ { input: [-2, 2], expected: -4 },
+ { input: [-2, -2], expected: 4 },
+
+ // 64-bit normals that can not be exactly represented
+ // Finite values multiply zero result in zero
+ { input: [0.1, 0], expected: 0 },
+ { input: [0, 0.1], expected: 0 },
+ { input: [-0.1, 0], expected: 0 },
+ { input: [0, -0.1], expected: 0 },
+ // Finite value multiply +/-1.0
+ { input: [0.1, 1], expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1'] },
+ { input: [-1, -0.1], expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1'] },
+ { input: [-0.1, 1], expected: kConstantCorrectlyRoundedExpectation[p.trait]['-0.1'] },
+ { input: [-1, 0.1], expected: kConstantCorrectlyRoundedExpectation[p.trait]['-0.1'] },
+ // Other cases
+ ...kMultiplicationInterval64BitsNormalCases[p.trait],
+
+ // Infinities
+ { input: [0, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [1, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [-1, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [0, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [1, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [-1, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+
+ // Edges
+ { input: [constants.positive.max, constants.positive.max], expected: kUnboundedBounds },
+ { input: [constants.negative.min, constants.negative.min], expected: kUnboundedBounds },
+ { input: [constants.positive.max, constants.negative.min], expected: kUnboundedBounds },
+ { input: [constants.negative.min, constants.positive.max], expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const [x, y] = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.multiplicationInterval(x, y);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.multiplicationInterval(${x}, ${y}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// Some of these are hard coded, since the error intervals are difficult to express in a closed
+// human-readable form due to the inherited nature of the errors.
+// prettier-ignore
+const kPowIntervalCases = {
+ f32 : [
+ { input: [1, 0], expected: [kMinusNULPFunctions['f32'](1, 3), reinterpretU64AsF64(0x3ff0_0000_3000_0000n)] }, // ~1
+ { input: [2, 0], expected: [kMinusNULPFunctions['f32'](1, 3), reinterpretU64AsF64(0x3ff0_0000_3000_0000n)] }, // ~1
+ { input: [kValue.f32.positive.max, 0], expected: [kMinusNULPFunctions['f32'](1, 3), reinterpretU64AsF64(0x3ff0_0000_3000_0000n)] }, // ~1
+ { input: [1, 1], expected: [reinterpretU64AsF64(0x3fef_fffe_dfff_fe00n), reinterpretU64AsF64(0x3ff0_0000_c000_0200n)] }, // ~1
+ { input: [1, 100], expected: [reinterpretU64AsF64(0x3fef_ffba_3fff_3800n), reinterpretU64AsF64(0x3ff0_0023_2000_c800n)] }, // ~1
+ { input: [2, 1], expected: [reinterpretU64AsF64(0x3fff_fffe_a000_0200n), reinterpretU64AsF64(0x4000_0001_0000_0200n)] }, // ~2
+ { input: [2, 2], expected: [reinterpretU64AsF64(0x400f_fffd_a000_0400n), reinterpretU64AsF64(0x4010_0001_a000_0400n)] }, // ~4
+ { input: [10, 10], expected: [reinterpretU64AsF64(0x4202_a04f_51f7_7000n), reinterpretU64AsF64(0x4202_a070_ee08_e000n)] }, // ~10000000000
+ { input: [10, 1], expected: [reinterpretU64AsF64(0x4023_fffe_0b65_8b00n), reinterpretU64AsF64(0x4024_0002_149a_7c00n)] }, // ~10
+ ] as ScalarPairToIntervalCase[],
+ f16 : [
+ { input: [1, 0], expected: [reinterpretU64AsF64(0x3fef_fc00_0000_0000n), reinterpretU64AsF64(0x3ff0_0200_0000_0000n)] }, // ~1
+ { input: [2, 0], expected: [reinterpretU64AsF64(0x3fef_fc00_0000_0000n), reinterpretU64AsF64(0x3ff0_0200_0000_0000n)] }, // ~1
+ { input: [kValue.f16.positive.max, 0], expected: [reinterpretU64AsF64(0x3fef_fc00_0000_0000n), reinterpretU64AsF64(0x3ff0_0200_0000_0000n)] }, // ~1
+ { input: [1, 1], expected: [reinterpretU64AsF64(0x3fef_cbf0_0000_0000n), reinterpretU64AsF64(0x3ff0_1c10_0000_0000n)] }, // ~1
+ { input: [1, 100], expected: [reinterpretU64AsF64(0x3fe2_91c0_0000_0000n), reinterpretU64AsF64(0x3ffb_8a40_0000_0000n)] }, // ~1
+ { input: [2, 1], expected: [reinterpretU64AsF64(0x3fff_c410_0000_0000n), reinterpretU64AsF64(0x4000_2410_0000_0000n)] }, // ~2
+ { input: [2, 2], expected: [reinterpretU64AsF64(0x400f_9020_0000_0000n), reinterpretU64AsF64(0x4010_4420_0000_0000n)] }, // ~4
+ { input: [5, 5], expected: [reinterpretU64AsF64(0x40a7_5f70_0000_0000n), reinterpretU64AsF64(0x40a9_5520_0000_0000n)] }, // ~3125
+ { input: [10, 1], expected: [reinterpretU64AsF64(0x4023_c57c_0000_0000n), reinterpretU64AsF64(0x4024_36a0_0000_0000n)] }, // ~10
+ ] as ScalarPairToIntervalCase[],
+} as const;
+
+g.test('powInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarPairToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ { input: [-1, 0], expected: kUnboundedBounds },
+ { input: [0, 0], expected: kUnboundedBounds },
+ { input: [0, 1], expected: kUnboundedBounds },
+ { input: [1, constants.positive.max], expected: kUnboundedBounds },
+ { input: [constants.positive.max, 1], expected: kUnboundedBounds },
+
+ ...kPowIntervalCases[p.trait],
+ ];
+ })
+ )
+ .fn(t => {
+ const [x, y] = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.powInterval(x, y);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.powInterval(${x}, ${y}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// prettier-ignore
+const kRemainderCases = {
+ f32: [
+ { input: [1, 0.1], expected: [reinterpretU32AsF32(0xb4000000), reinterpretU32AsF32(0x3dccccd8)] }, // ~[0, 0.1]
+ { input: [-1, 0.1], expected: [reinterpretU32AsF32(0xbdccccd8), reinterpretU32AsF32(0x34000000)] }, // ~[-0.1, 0]
+ { input: [1, -0.1], expected: [reinterpretU32AsF32(0xb4000000), reinterpretU32AsF32(0x3dccccd8)] }, // ~[0, 0.1]
+ { input: [-1, -0.1], expected: [reinterpretU32AsF32(0xbdccccd8), reinterpretU32AsF32(0x34000000)] }, // ~[-0.1, 0]
+ ] as ScalarPairToIntervalCase[],
+ f16: [
+ { input: [1, 0.1], expected: [reinterpretU16AsF16(0x9400), reinterpretU16AsF16(0x2e70)] }, // ~[0, 0.1]
+ { input: [-1, 0.1], expected: [reinterpretU16AsF16(0xae70), reinterpretU16AsF16(0x1400)] }, // ~[-0.1, 0]
+ { input: [1, -0.1], expected: [reinterpretU16AsF16(0x9400), reinterpretU16AsF16(0x2e70)] }, // ~[0, 0.1]
+ { input: [-1, -0.1], expected: [reinterpretU16AsF16(0xae70), reinterpretU16AsF16(0x1400)] }, // ~[-0.1, 0]
+ ] as ScalarPairToIntervalCase[],
+} as const;
+
+g.test('remainderInterval')
+ .params(u =>
+ u
+ .combine('trait', ['abstract', 'f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarPairToIntervalCase>(p => {
+ const trait = kFPTraitForULP[p.trait];
+ const constants = FP[trait].constants();
+
+ // prettier-ignore
+ return [
+ ...kRemainderCases[trait],
+ // Normals
+ { input: [0, 1], expected: 0 },
+ { input: [0, -1], expected: 0 },
+ { input: [1, 1], expected: [0, 1] },
+ { input: [1, -1], expected: [0, 1] },
+ { input: [-1, 1], expected: [-1, 0] },
+ { input: [-1, -1], expected: [-1, 0] },
+ { input: [4, 2], expected: [0, 2] },
+ { input: [-4, 2], expected: [-2, 0] },
+ { input: [4, -2], expected: [0, 2] },
+ { input: [-4, -2], expected: [-2, 0] },
+ { input: [2, 4], expected: [2, 2] },
+ { input: [-2, 4], expected: -2 },
+ { input: [2, -4], expected: 2 },
+ { input: [-2, -4], expected: [-2, -2] },
+ { input: [0, 0.1], expected: 0 },
+ { input: [0, -0.1], expected: 0 },
+ { input: [8.5, 2], expected: 0.5 },
+ { input: [1.125, 1], expected: 0.125 },
+
+ // Denominator out of range
+ { input: [1, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [1, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [1, constants.positive.max], expected: kUnboundedBounds },
+ { input: [1, constants.negative.min], expected: kUnboundedBounds },
+ { input: [1, 0], expected: kUnboundedBounds },
+ { input: [1, constants.positive.subnormal.max], expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const [x, y] = t.params.input;
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.remainderInterval(x, y);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.remainderInterval(${x}, ${y}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+g.test('stepInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarPairToIntervalCase>(p => {
+ const constants = FP[p.trait].constants();
+ // prettier-ignore
+ return [
+ // 32-bit normals
+ { input: [0, 0], expected: 1 },
+ { input: [1, 1], expected: 1 },
+ { input: [0, 1], expected: 1 },
+ { input: [1, 0], expected: 0 },
+ { input: [-1, -1], expected: 1 },
+ { input: [0, -1], expected: 0 },
+ { input: [-1, 0], expected: 1 },
+ { input: [-1, 1], expected: 1 },
+ { input: [1, -1], expected: 0 },
+
+ // 64-bit normals
+ { input: [0.1, 0.1], expected: [0, 1] },
+ { input: [0, 0.1], expected: 1 },
+ { input: [0.1, 0], expected: 0 },
+ { input: [0.1, 1], expected: 1 },
+ { input: [1, 0.1], expected: 0 },
+ { input: [-0.1, -0.1], expected: [0, 1] },
+ { input: [0, -0.1], expected: 0 },
+ { input: [-0.1, 0], expected: 1 },
+ { input: [-0.1, -1], expected: 0 },
+ { input: [-1, -0.1], expected: 1 },
+
+ // Subnormals
+ { input: [0, constants.positive.subnormal.max], expected: 1 },
+ { input: [0, constants.positive.subnormal.min], expected: 1 },
+ { input: [0, constants.negative.subnormal.max], expected: [0, 1] },
+ { input: [0, constants.negative.subnormal.min], expected: [0, 1] },
+ { input: [1, constants.positive.subnormal.max], expected: 0 },
+ { input: [1, constants.positive.subnormal.min], expected: 0 },
+ { input: [1, constants.negative.subnormal.max], expected: 0 },
+ { input: [1, constants.negative.subnormal.min], expected: 0 },
+ { input: [-1, constants.positive.subnormal.max], expected: 1 },
+ { input: [-1, constants.positive.subnormal.min], expected: 1 },
+ { input: [-1, constants.negative.subnormal.max], expected: 1 },
+ { input: [-1, constants.negative.subnormal.min], expected: 1 },
+ { input: [constants.positive.subnormal.max, 0], expected: [0, 1] },
+ { input: [constants.positive.subnormal.min, 0], expected: [0, 1] },
+ { input: [constants.negative.subnormal.max, 0], expected: 1 },
+ { input: [constants.negative.subnormal.min, 0], expected: 1 },
+ { input: [constants.positive.subnormal.max, 1], expected: 1 },
+ { input: [constants.positive.subnormal.min, 1], expected: 1 },
+ { input: [constants.negative.subnormal.max, 1], expected: 1 },
+ { input: [constants.negative.subnormal.min, 1], expected: 1 },
+ { input: [constants.positive.subnormal.max, -1], expected: 0 },
+ { input: [constants.positive.subnormal.min, -1], expected: 0 },
+ { input: [constants.negative.subnormal.max, -1], expected: 0 },
+ { input: [constants.negative.subnormal.min, -1], expected: 0 },
+ { input: [constants.negative.subnormal.min, constants.positive.subnormal.max], expected: 1 },
+ { input: [constants.positive.subnormal.max, constants.negative.subnormal.min], expected: [0, 1] },
+
+ // Infinities
+ { input: [0, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, 0], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [0, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, 0], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, constants.negative.infinity], expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const [edge, x] = t.params.input;
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.stepInterval(edge, x);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.stepInterval(${edge}, ${x}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// prettier-ignore
+const kSubtractionInterval64BitsNormalCases = {
+ f32: [
+ // 0.1 falls between f32 0x3DCCCCCC and 0x3DCCCCCD, -0.1 falls between f32 0xBDCCCCCD and 0xBDCCCCCC
+ // Expect f32 interval [0x3DCCCCCC-0x3DCCCCCD, 0x3DCCCCCD-0x3DCCCCCC]
+ { input: [0.1, 0.1], expected: [reinterpretU32AsF32(0x3dcccccc)-reinterpretU32AsF32(0x3dcccccd), reinterpretU32AsF32(0x3dcccccd)-reinterpretU32AsF32(0x3dcccccc)] },
+ // Expect f32 interval [0xBDCCCCCD-0xBDCCCCCC, 0xBDCCCCCC-0xBDCCCCCD]
+ { input: [-0.1, -0.1], expected: [reinterpretU32AsF32(0xbdcccccd)-reinterpretU32AsF32(0xbdcccccc), reinterpretU32AsF32(0xbdcccccc)-reinterpretU32AsF32(0xbdcccccd)] },
+ // Expect f32 interval [0x3DCCCCCC-0xBDCCCCCC, 0x3DCCCCCD-0xBDCCCCCD]
+ { input: [0.1, -0.1], expected: [reinterpretU32AsF32(0x3dcccccc)-reinterpretU32AsF32(0xbdcccccc), reinterpretU32AsF32(0x3dcccccd)-reinterpretU32AsF32(0xbdcccccd)] },
+ // Expect f32 interval [0xBDCCCCCD-0x3DCCCCCD, 0xBDCCCCCC-0x3DCCCCCC]
+ { input: [-0.1, 0.1], expected: [reinterpretU32AsF32(0xbdcccccd)-reinterpretU32AsF32(0x3dcccccd), reinterpretU32AsF32(0xbdcccccc)-reinterpretU32AsF32(0x3dcccccc)] },
+ ] as ScalarPairToIntervalCase[],
+ f16: [
+ // 0.1 falls between f16 0x2E66 and 0x2E67, -0.1 falls between f16 0xAE67 and 0xAE66
+ // Expect f16 interval [0x2E66-0x2E67, 0x2E67-0x2E66]
+ { input: [0.1, 0.1], expected: [reinterpretU16AsF16(0x2e66)-reinterpretU16AsF16(0x2e67), reinterpretU16AsF16(0x2e67)-reinterpretU16AsF16(0x2e66)] },
+ // Expect f16 interval [0xAE67-0xAE66, 0xAE66-0xAE67]
+ { input: [-0.1, -0.1], expected: [reinterpretU16AsF16(0xae67)-reinterpretU16AsF16(0xae66), reinterpretU16AsF16(0xae66)-reinterpretU16AsF16(0xae67)] },
+ // Expect f16 interval [0x2E66-0xAE66, 0x2E67-0xAE67]
+ { input: [0.1, -0.1], expected: [reinterpretU16AsF16(0x2e66)-reinterpretU16AsF16(0xae66), reinterpretU16AsF16(0x2e67)-reinterpretU16AsF16(0xae67)] },
+ // Expect f16 interval [0xAE67-0x2E67, 0xAE66-0x2E66]
+ { input: [-0.1, 0.1], expected: [reinterpretU16AsF16(0xae67)-reinterpretU16AsF16(0x2e67), reinterpretU16AsF16(0xae66)-reinterpretU16AsF16(0x2e66)] },
+ ] as ScalarPairToIntervalCase[],
+ abstract: [
+ // 0.1 isn't exactly representable in f64, but will be quantized to an
+ // exact value when storing to a 'number' (0x3FB999999999999A).
+ // This is why below the expectations are not intervals.
+ { input: [0.1, 0.1], expected: 0 },
+ { input: [-0.1, -0.1], expected: 0 },
+ // f64 0x3FB999999999999A - 0xBFB999999999999A = 0x3FC999999999999A
+ { input: [0.1, -0.1], expected: reinterpretU64AsF64(0x3fc999999999999an) }, // ~0.2
+ // f64 0xBFB999999999999A - 0x3FB999999999999A = 0xBFC999999999999A
+ { input: [-0.1, 0.1], expected: reinterpretU64AsF64(0xbfc999999999999an) }, // ~-0.2,
+ ] as ScalarPairToIntervalCase[],
+} as const;
+
+g.test('subtractionInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarPairToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ // Representable normals
+ { input: [0, 0], expected: 0 },
+ { input: [1, 0], expected: 1 },
+ { input: [0, 1], expected: -1 },
+ { input: [-1, 0], expected: -1 },
+ { input: [0, -1], expected: 1 },
+ { input: [1, 1], expected: 0 },
+ { input: [1, -1], expected: 2 },
+ { input: [-1, 1], expected: -2 },
+ { input: [-1, -1], expected: 0 },
+
+ // 64-bit normals that can not be exactly represented in f32/f16
+ { input: [0.1, 0], expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1'] },
+ { input: [0, -0.1], expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1'] },
+ { input: [-0.1, 0], expected: kConstantCorrectlyRoundedExpectation[p.trait]['-0.1'] },
+ { input: [0, 0.1], expected: kConstantCorrectlyRoundedExpectation[p.trait]['-0.1'] },
+ ...kSubtractionInterval64BitsNormalCases[p.trait],
+
+ // Subnormals
+ { input: [constants.positive.subnormal.max, 0], expected: [0, constants.positive.subnormal.max] },
+ { input: [0, constants.positive.subnormal.max], expected: [constants.negative.subnormal.min, 0] },
+ { input: [constants.positive.subnormal.min, 0], expected: [0, constants.positive.subnormal.min] },
+ { input: [0, constants.positive.subnormal.min], expected: [constants.negative.subnormal.max, 0] },
+ { input: [constants.negative.subnormal.max, 0], expected: [constants.negative.subnormal.max, 0] },
+ { input: [0, constants.negative.subnormal.max], expected: [0, constants.positive.subnormal.min] },
+ { input: [constants.negative.subnormal.min, 0], expected: [constants.negative.subnormal.min, 0] },
+ { input: [0, constants.negative.subnormal.min], expected: [0, constants.positive.subnormal.max] },
+
+ // Infinities
+ { input: [0, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, 0], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [0, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, 0], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, constants.negative.infinity], expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const [x, y] = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.subtractionInterval(x, y);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.subtractionInterval(${x}, ${y}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+interface ScalarTripleToIntervalCase {
+ input: [number, number, number];
+ expected: number | IntervalBounds;
+}
+
+g.test('clampMedianInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarTripleToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ // Normals
+ { input: [0, 0, 0], expected: 0 },
+ { input: [1, 0, 0], expected: 0 },
+ { input: [0, 1, 0], expected: 0 },
+ { input: [0, 0, 1], expected: 0 },
+ { input: [1, 0, 1], expected: 1 },
+ { input: [1, 1, 0], expected: 1 },
+ { input: [0, 1, 1], expected: 1 },
+ { input: [1, 1, 1], expected: 1 },
+ { input: [1, 10, 100], expected: 10 },
+ { input: [10, 1, 100], expected: 10 },
+ { input: [100, 1, 10], expected: 10 },
+ { input: [-10, 1, 100], expected: 1 },
+ { input: [10, 1, -100], expected: 1 },
+ { input: [-10, 1, -100], expected: -10 },
+ { input: [-10, -10, -10], expected: -10 },
+
+ // Subnormals
+ { input: [constants.positive.subnormal.max, 0, 0], expected: 0 },
+ { input: [0, constants.positive.subnormal.max, 0], expected: 0 },
+ { input: [0, 0, constants.positive.subnormal.max], expected: 0 },
+ { input: [constants.positive.subnormal.max, 0, constants.positive.subnormal.max], expected: [0, constants.positive.subnormal.max] },
+ { input: [constants.positive.subnormal.max, constants.positive.subnormal.max, 0], expected: [0, constants.positive.subnormal.max] },
+ { input: [0, constants.positive.subnormal.max, constants.positive.subnormal.max], expected: [0, constants.positive.subnormal.max] },
+ { input: [constants.positive.subnormal.max, constants.positive.subnormal.max, constants.positive.subnormal.max], expected: [0, constants.positive.subnormal.max] },
+ { input: [constants.positive.subnormal.max, constants.positive.subnormal.min, constants.negative.subnormal.max], expected: [0, constants.positive.subnormal.min] },
+ { input: [constants.positive.subnormal.max, constants.negative.subnormal.min, constants.negative.subnormal.max], expected: [constants.negative.subnormal.max, 0] },
+ { input: [constants.positive.max, constants.positive.max, constants.positive.subnormal.min], expected: constants.positive.max },
+
+ // Infinities
+ { input: [0, 1, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [0, constants.positive.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.positive.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.positive.infinity, constants.negative.infinity], expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const [x, y, z] = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.clampMedianInterval(x, y, z);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.clampMedianInterval(${x}, ${y}, ${z}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+g.test('clampMinMaxInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarTripleToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ // Normals
+ { input: [0, 0, 0], expected: 0 },
+ { input: [1, 0, 0], expected: 0 },
+ { input: [0, 1, 0], expected: 0 },
+ { input: [0, 0, 1], expected: 0 },
+ { input: [1, 0, 1], expected: 1 },
+ { input: [1, 1, 0], expected: 0 },
+ { input: [0, 1, 1], expected: 1 },
+ { input: [1, 1, 1], expected: 1 },
+ { input: [1, 10, 100], expected: 10 },
+ { input: [10, 1, 100], expected: 10 },
+ { input: [100, 1, 10], expected: 10 },
+ { input: [-10, 1, 100], expected: 1 },
+ { input: [10, 1, -100], expected: -100 },
+ { input: [-10, 1, -100], expected: -100 },
+ { input: [-10, -10, -10], expected: -10 },
+
+ // Subnormals
+ { input: [constants.positive.subnormal.max, 0, 0], expected: [0, constants.positive.subnormal.max] },
+ { input: [0, constants.positive.subnormal.max, 0], expected: [0, constants.positive.subnormal.max] },
+ { input: [0, 0, constants.positive.subnormal.max], expected: [0, constants.positive.subnormal.max] },
+ { input: [constants.positive.subnormal.max, 0, constants.positive.subnormal.max], expected: [0, constants.positive.subnormal.max] },
+ { input: [constants.positive.subnormal.max, constants.positive.subnormal.max, 0], expected: [0, constants.positive.subnormal.max] },
+ { input: [0, constants.positive.subnormal.max, constants.positive.subnormal.max], expected: [0, constants.positive.subnormal.max] },
+ { input: [constants.positive.subnormal.max, constants.positive.subnormal.max, constants.positive.subnormal.max], expected: [0, constants.positive.subnormal.max] },
+ { input: [constants.positive.subnormal.max, constants.positive.subnormal.min, constants.negative.subnormal.max], expected: [constants.negative.subnormal.max, constants.positive.subnormal.max] },
+ { input: [constants.positive.subnormal.max, constants.negative.subnormal.min, constants.negative.subnormal.max], expected: [constants.negative.subnormal.min, constants.positive.subnormal.max] },
+ { input: [constants.positive.max, constants.positive.max, constants.positive.subnormal.min], expected: [0, constants.positive.subnormal.min] },
+
+ // Infinities
+ { input: [0, 1, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [0, constants.positive.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.positive.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.positive.infinity, constants.negative.infinity], expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const [x, y, z] = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.clampMinMaxInterval(x, y, z);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.clampMinMaxInterval(${x}, ${y}, ${z}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// prettier-ignore
+const kFmaIntervalCases = {
+ f32: [
+ // positive.subnormal.max * positive.subnormal.max is much smaller than positive.subnormal.min but larger than 0, rounded to [0, positive.subnormal.min]
+ { input: [kValue.f32.positive.subnormal.max, kValue.f32.positive.subnormal.max, 0], expected: [0, kValue.f32.positive.subnormal.min] },
+ // positive.subnormal.max * positive.subnormal.max rounded to 0 or positive.subnormal.min,
+ // 0 + constants.positive.subnormal.max rounded to [0, constants.positive.subnormal.max],
+ // positive.subnormal.min + constants.positive.subnormal.max = constants.positive.min.
+ { input: [kValue.f32.positive.subnormal.max, kValue.f32.positive.subnormal.max, kValue.f32.positive.subnormal.max], expected: [0, kValue.f32.positive.min] },
+ // positive.subnormal.max * positive.subnormal.max rounded to 0 or positive.subnormal.min,
+ // negative.subnormal.max may flushed to 0,
+ // minimum case: 0 + negative.subnormal.max rounded to [negative.subnormal.max, 0],
+ // maximum case: positive.subnormal.min + 0 rounded to [0, positive.subnormal.min].
+ { input: [kValue.f32.positive.subnormal.max, kValue.f32.positive.subnormal.min, kValue.f32.negative.subnormal.max], expected: [kValue.f32.negative.subnormal.max, kValue.f32.positive.subnormal.min] },
+ // positive.subnormal.max * negative.subnormal.min rounded to -0.0 or negative.subnormal.max = -1 * [subnormal ulp],
+ // negative.subnormal.max = -1 * [subnormal ulp] may flushed to -0.0,
+ // minimum case: -1 * [subnormal ulp] + -1 * [subnormal ulp] rounded to [-2 * [subnormal ulp], 0],
+ // maximum case: -0.0 + -0.0 = 0.
+ { input: [kValue.f32.positive.subnormal.max, kValue.f32.negative.subnormal.min, kValue.f32.negative.subnormal.max], expected: [-2 * FP['f32'].oneULP(0, 'no-flush'), 0] },
+ ] as ScalarTripleToIntervalCase[],
+ f16: [
+ // positive.subnormal.max * positive.subnormal.max is much smaller than positive.subnormal.min but larger than 0, rounded to [0, positive.subnormal.min]
+ { input: [kValue.f16.positive.subnormal.max, kValue.f16.positive.subnormal.max, 0], expected: [0, kValue.f16.positive.subnormal.min] },
+ // positive.subnormal.max * positive.subnormal.max rounded to 0 or positive.subnormal.min,
+ // 0 + constants.positive.subnormal.max rounded to [0, constants.positive.subnormal.max],
+ // positive.subnormal.min + constants.positive.subnormal.max = constants.positive.min.
+ { input: [kValue.f16.positive.subnormal.max, kValue.f16.positive.subnormal.max, kValue.f16.positive.subnormal.max], expected: [0, kValue.f16.positive.min] },
+ // positive.subnormal.max * positive.subnormal.max rounded to 0 or positive.subnormal.min,
+ // negative.subnormal.max may flushed to 0,
+ // minimum case: 0 + negative.subnormal.max rounded to [negative.subnormal.max, 0],
+ // maximum case: positive.subnormal.min + 0 rounded to [0, positive.subnormal.min].
+ { input: [kValue.f16.positive.subnormal.max, kValue.f16.positive.subnormal.min, kValue.f16.negative.subnormal.max], expected: [kValue.f16.negative.subnormal.max, kValue.f16.positive.subnormal.min] },
+ // positive.subnormal.max * negative.subnormal.min rounded to -0.0 or negative.subnormal.max = -1 * [subnormal ulp],
+ // negative.subnormal.max = -1 * [subnormal ulp] may flushed to -0.0,
+ // minimum case: -1 * [subnormal ulp] + -1 * [subnormal ulp] rounded to [-2 * [subnormal ulp], 0],
+ // maximum case: -0.0 + -0.0 = 0.
+ { input: [kValue.f16.positive.subnormal.max, kValue.f16.negative.subnormal.min, kValue.f16.negative.subnormal.max], expected: [-2 * FP['f16'].oneULP(0, 'no-flush'), 0] }, ] as ScalarTripleToIntervalCase[],
+ abstract: [
+ // These operations break down in the CTS, because `number` is a f64 under the hood, so precision is sometimes lost
+ // if intermediate results are closer to 0 than the smallest subnormal will be precisely 0.
+ // See https://github.com/gpuweb/cts/issues/2993 for details
+ { input: [kValue.f64.positive.subnormal.max, kValue.f64.positive.subnormal.max, 0], expected: 0 },
+ { input: [kValue.f64.positive.subnormal.max, kValue.f64.positive.subnormal.max, kValue.f64.positive.subnormal.max], expected: [0, kValue.f64.positive.subnormal.max] },
+ { input: [kValue.f64.positive.subnormal.max, kValue.f64.positive.subnormal.min, kValue.f64.negative.subnormal.max], expected: [kValue.f64.negative.subnormal.max, 0] },
+ { input: [kValue.f64.positive.subnormal.max, kValue.f64.negative.subnormal.min, kValue.f64.negative.subnormal.max], expected: [kValue.f64.negative.subnormal.max, 0] },
+ ] as ScalarTripleToIntervalCase[],
+} as const;
+
+g.test('fmaInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarTripleToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ // Normals
+ { input: [0, 0, 0], expected: 0 },
+ { input: [1, 0, 0], expected: 0 },
+ { input: [0, 1, 0], expected: 0 },
+ { input: [0, 0, 1], expected: 1 },
+ { input: [1, 0, 1], expected: 1 },
+ { input: [1, 1, 0], expected: 1 },
+ { input: [0, 1, 1], expected: 1 },
+ { input: [1, 1, 1], expected: 2 },
+ { input: [1, 10, 100], expected: 110 },
+ { input: [10, 1, 100], expected: 110 },
+ { input: [100, 1, 10], expected: 110 },
+ { input: [-10, 1, 100], expected: 90 },
+ { input: [10, 1, -100], expected: -90 },
+ { input: [-10, 1, -100], expected: -110 },
+ { input: [-10, -10, -10], expected: 90 },
+
+ // Subnormals
+ { input: [constants.positive.subnormal.max, 0, 0], expected: 0 },
+ { input: [0, constants.positive.subnormal.max, 0], expected: 0 },
+ { input: [0, 0, constants.positive.subnormal.max], expected: [0, constants.positive.subnormal.max] },
+ { input: [constants.positive.subnormal.max, 0, constants.positive.subnormal.max], expected: [0, constants.positive.subnormal.max] },
+ { input: [0, constants.positive.subnormal.max, constants.positive.subnormal.max], expected: [0, constants.positive.subnormal.max] },
+
+ // Infinities
+ { input: [0, 1, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [0, constants.positive.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.positive.infinity, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.positive.infinity, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [constants.positive.max, constants.positive.max, constants.positive.subnormal.min], expected: kUnboundedBounds },
+ ...kFmaIntervalCases[p.trait],
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.fmaInterval(...t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.fmaInterval(${t.params.input.join(
+ ','
+ )}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// Some of these are hard coded, since the error intervals are difficult to express in a closed
+// human-readable form due to the inherited nature of the errors.
+// prettier-ignore
+const kMixImpreciseIntervalCases = {
+ f32: [
+ // [0.0, 1.0] cases
+ { input: [0.0, 1.0, 0.1], expected: [reinterpretU64AsF64(0x3fb9_9999_8000_0000n), reinterpretU64AsF64(0x3fb9_9999_a000_0000n)] }, // ~0.1
+ { input: [0.0, 1.0, 0.9], expected: [reinterpretU64AsF64(0x3fec_cccc_c000_0000n), reinterpretU64AsF64(0x3fec_cccc_e000_0000n)] }, // ~0.9
+ // [1.0, 0.0] cases
+ { input: [1.0, 0.0, 0.1], expected: [reinterpretU64AsF64(0x3fec_cccc_c000_0000n), reinterpretU64AsF64(0x3fec_cccc_e000_0000n)] }, // ~0.9
+ { input: [1.0, 0.0, 0.9], expected: [reinterpretU64AsF64(0x3fb9_9999_0000_0000n), reinterpretU64AsF64(0x3fb9_999a_0000_0000n)] }, // ~0.1
+ // [0.0, 10.0] cases
+ { input: [0.0, 10.0, 0.1], expected: [reinterpretU64AsF64(0x3fef_ffff_e000_0000n), reinterpretU64AsF64(0x3ff0_0000_2000_0000n)] }, // ~1
+ { input: [0.0, 10.0, 0.9], expected: [reinterpretU64AsF64(0x4021_ffff_e000_0000n), reinterpretU64AsF64(0x4022_0000_2000_0000n)] }, // ~9
+ // [2.0, 10.0] cases
+ { input: [2.0, 10.0, 0.1], expected: [reinterpretU64AsF64(0x4006_6666_6000_0000n), reinterpretU64AsF64(0x4006_6666_8000_0000n)] }, // ~2.8
+ { input: [2.0, 10.0, 0.9], expected: [reinterpretU64AsF64(0x4022_6666_6000_0000n), reinterpretU64AsF64(0x4022_6666_8000_0000n)] }, // ~9.2
+ // [-1.0, 1.0] cases
+ { input: [-1.0, 1.0, 0.1], expected: [reinterpretU64AsF64(0xbfe9_9999_a000_0000n), reinterpretU64AsF64(0xbfe9_9999_8000_0000n)] }, // ~-0.8
+ { input: [-1.0, 1.0, 0.9], expected: [reinterpretU64AsF64(0x3fe9_9999_8000_0000n), reinterpretU64AsF64(0x3fe9_9999_c000_0000n)] }, // ~0.8
+
+ // Showing how precise and imprecise versions diff
+ // Note that this expectation is 0 only in f32 as 10.0 is much smaller that f32.negative.min,
+ // So that 10 - f32.negative.min == f32.negative.min even in f64. But for f16, there is not
+ // a exactly-represenatble f16 value v that make v - f16.negative.min == f16.negative.min
+ // in f64, in fact that require v being smaller than 2**-37.
+ { input: [kValue.f32.negative.min, 10.0, 1.0], expected: 0.0 },
+ // -10.0 is the same, much smaller than f32.negative.min
+ { input: [kValue.f32.negative.min, -10.0, 1.0], expected: 0.0 },
+ ] as ScalarTripleToIntervalCase[],
+ f16: [
+ // [0.0, 1.0] cases
+ { input: [0.0, 1.0, 0.1], expected: [reinterpretU64AsF64(0x3fb9_9800_0000_0000n), reinterpretU64AsF64(0x3fb9_9c00_0000_0000n)] }, // ~0.1
+ { input: [0.0, 1.0, 0.9], expected: [reinterpretU64AsF64(0x3fec_cc00_0000_0000n), reinterpretU64AsF64(0x3fec_d000_0000_0000n)] }, // ~0.9
+ // [1.0, 0.0] cases
+ { input: [1.0, 0.0, 0.1], expected: [reinterpretU64AsF64(0x3fec_cc00_0000_0000n), reinterpretU64AsF64(0x3fec_d000_0000_0000n)] }, // ~0.9
+ { input: [1.0, 0.0, 0.9], expected: [reinterpretU64AsF64(0x3fb9_8000_0000_0000n), reinterpretU64AsF64(0x3fb9_a000_0000_0000n)] }, // ~0.1
+ // [0.0, 10.0] cases
+ { input: [0.0, 10.0, 0.1], expected: [reinterpretU64AsF64(0x3fef_fc00_0000_0000n), reinterpretU64AsF64(0x3ff0_0400_0000_0000n)] }, // ~1
+ { input: [0.0, 10.0, 0.9], expected: [reinterpretU64AsF64(0x4021_fc00_0000_0000n), reinterpretU64AsF64(0x4022_0400_0000_0000n)] }, // ~9
+ // [2.0, 10.0] cases
+ { input: [2.0, 10.0, 0.1], expected: [reinterpretU64AsF64(0x4006_6400_0000_0000n), reinterpretU64AsF64(0x4006_6800_0000_0000n)] }, // ~2.8
+ { input: [2.0, 10.0, 0.9], expected: [reinterpretU64AsF64(0x4022_6400_0000_0000n), reinterpretU64AsF64(0x4022_6800_0000_0000n)] }, // ~9.2
+ // [-1.0, 1.0] cases
+ { input: [-1.0, 1.0, 0.1], expected: [reinterpretU64AsF64(0xbfe9_9c00_0000_0000n), reinterpretU64AsF64(0xbfe9_9800_0000_0000n)] }, // ~-0.8
+ { input: [-1.0, 1.0, 0.9], expected: [reinterpretU64AsF64(0x3fe9_9800_0000_0000n), reinterpretU64AsF64(0x3fe9_a000_0000_0000n)] }, // ~0.8
+
+ // Showing how precise and imprecise versions diff
+ // In imprecise version, we compute (y - x), where y = 10 and x = -65504, the result is 65514
+ // and cause an overflow in f16.
+ { input: [kValue.f16.negative.min, 10.0, 1.0], expected: kUnboundedBounds },
+ // (y - x) * 1.0, where y = -10 and x = -65504, the result is 65494 rounded to 65472 or 65504.
+ // The result is -65504 + 65472 = -32 or -65504 + 65504 = 0.
+ { input: [kValue.f16.negative.min, -10.0, 1.0], expected: [-32, 0] },
+ ] as ScalarTripleToIntervalCase[],
+} as const;
+
+g.test('mixImpreciseInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarTripleToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ ...kMixImpreciseIntervalCases[p.trait],
+
+ // [0.0, 1.0] cases
+ { input: [0.0, 1.0, -1.0], expected: -1.0 },
+ { input: [0.0, 1.0, 0.0], expected: 0.0 },
+ { input: [0.0, 1.0, 0.5], expected: 0.5 },
+ { input: [0.0, 1.0, 1.0], expected: 1.0 },
+ { input: [0.0, 1.0, 2.0], expected: 2.0 },
+
+ // [1.0, 0.0] cases
+ { input: [1.0, 0.0, -1.0], expected: 2.0 },
+ { input: [1.0, 0.0, 0.0], expected: 1.0 },
+ { input: [1.0, 0.0, 0.5], expected: 0.5 },
+ { input: [1.0, 0.0, 1.0], expected: 0.0 },
+ { input: [1.0, 0.0, 2.0], expected: -1.0 },
+
+ // [0.0, 10.0] cases
+ { input: [0.0, 10.0, -1.0], expected: -10.0 },
+ { input: [0.0, 10.0, 0.0], expected: 0.0 },
+ { input: [0.0, 10.0, 0.5], expected: 5.0 },
+ { input: [0.0, 10.0, 1.0], expected: 10.0 },
+ { input: [0.0, 10.0, 2.0], expected: 20.0 },
+
+ // [2.0, 10.0] cases
+ { input: [2.0, 10.0, -1.0], expected: -6.0 },
+ { input: [2.0, 10.0, 0.0], expected: 2.0 },
+ { input: [2.0, 10.0, 0.5], expected: 6.0 },
+ { input: [2.0, 10.0, 1.0], expected: 10.0 },
+ { input: [2.0, 10.0, 2.0], expected: 18.0 },
+
+ // [-1.0, 1.0] cases
+ { input: [-1.0, 1.0, -2.0], expected: -5.0 },
+ { input: [-1.0, 1.0, 0.0], expected: -1.0 },
+ { input: [-1.0, 1.0, 0.5], expected: 0.0 },
+ { input: [-1.0, 1.0, 1.0], expected: 1.0 },
+ { input: [-1.0, 1.0, 2.0], expected: 3.0 },
+
+ // Infinities
+ { input: [0.0, constants.positive.infinity, 0.5], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, 0.0, 0.5], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, 1.0, 0.5], expected: kUnboundedBounds },
+ { input: [1.0, constants.negative.infinity, 0.5], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.positive.infinity, 0.5], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, constants.negative.infinity, 0.5], expected: kUnboundedBounds },
+ { input: [0.0, 1.0, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [1.0, 0.0, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [0.0, 1.0, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [1.0, 0.0, constants.positive.infinity], expected: kUnboundedBounds },
+
+ // The [negative.min, +/-10.0, 1.0] cases has different result for different trait on
+ // imprecise version.
+ ];
+ })
+ )
+ .fn(t => {
+ const [x, y, z] = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.mixImpreciseInterval(x, y, z);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.mixImpreciseInterval(${x}, ${y}, ${z}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// Some of these are hard coded, since the error intervals are difficult to express in a closed
+// human-readable form due to the inherited nature of the errors.
+// prettier-ignore
+const kMixPreciseIntervalCases = {
+ f32: [
+ // [0.0, 1.0] cases
+ { input: [0.0, 1.0, 0.1], expected: [reinterpretU64AsF64(0x3fb9_9999_8000_0000n), reinterpretU64AsF64(0x3fb9_9999_a000_0000n)] }, // ~0.1
+ { input: [0.0, 1.0, 0.9], expected: [reinterpretU64AsF64(0x3fec_cccc_c000_0000n), reinterpretU64AsF64(0x3fec_cccc_e000_0000n)] }, // ~0.9
+ // [1.0, 0.0] cases
+ { input: [1.0, 0.0, 0.1], expected: [reinterpretU64AsF64(0x3fec_cccc_c000_0000n), reinterpretU64AsF64(0x3fec_cccc_e000_0000n)] }, // ~0.9
+ { input: [1.0, 0.0, 0.9], expected: [reinterpretU64AsF64(0x3fb9_9999_0000_0000n), reinterpretU64AsF64(0x3fb9_999a_0000_0000n)] }, // ~0.1
+ // [0.0, 10.0] cases
+ { input: [0.0, 10.0, 0.1], expected: [reinterpretU64AsF64(0x3fef_ffff_e000_0000n), reinterpretU64AsF64(0x3ff0_0000_2000_0000n)] }, // ~1
+ { input: [0.0, 10.0, 0.9], expected: [reinterpretU64AsF64(0x4021_ffff_e000_0000n), reinterpretU64AsF64(0x4022_0000_2000_0000n)] }, // ~9
+ // [2.0, 10.0] cases
+ { input: [2.0, 10.0, 0.1], expected: [reinterpretU64AsF64(0x4006_6666_4000_0000n), reinterpretU64AsF64(0x4006_6666_8000_0000n)] }, // ~2.8
+ { input: [2.0, 10.0, 0.9], expected: [reinterpretU64AsF64(0x4022_6666_4000_0000n), reinterpretU64AsF64(0x4022_6666_a000_0000n)] }, // ~9.2
+ // [-1.0, 1.0] cases
+ { input: [-1.0, 1.0, 0.1], expected: [reinterpretU64AsF64(0xbfe9_9999_c000_0000n), reinterpretU64AsF64(0xbfe9_9999_8000_0000n)] }, // ~-0.8
+ { input: [-1.0, 1.0, 0.9], expected: [reinterpretU64AsF64(0x3fe9_9999_8000_0000n), reinterpretU64AsF64(0x3fe9_9999_c000_0000n)] }, // ~0.8
+ ] as ScalarTripleToIntervalCase[],
+ f16: [
+ // [0.0, 1.0] cases
+ { input: [0.0, 1.0, 0.1], expected: [reinterpretU64AsF64(0x3fb9_9800_0000_0000n), reinterpretU64AsF64(0x3fb9_9c00_0000_0000n)] }, // ~0.1
+ { input: [0.0, 1.0, 0.9], expected: [reinterpretU64AsF64(0x3fec_cc00_0000_0000n), reinterpretU64AsF64(0x3fec_d000_0000_0000n)] }, // ~0.9
+ // [1.0, 0.0] cases
+ { input: [1.0, 0.0, 0.1], expected: [reinterpretU64AsF64(0x3fec_cc00_0000_0000n), reinterpretU64AsF64(0x3fec_d000_0000_0000n)] }, // ~0.9
+ { input: [1.0, 0.0, 0.9], expected: [reinterpretU64AsF64(0x3fb9_8000_0000_0000n), reinterpretU64AsF64(0x3fb9_a000_0000_0000n)] }, // ~0.1
+ // [0.0, 10.0] cases
+ { input: [0.0, 10.0, 0.1], expected: [reinterpretU64AsF64(0x3fef_fc00_0000_0000n), reinterpretU64AsF64(0x3ff0_0400_0000_0000n)] }, // ~1
+ { input: [0.0, 10.0, 0.9], expected: [reinterpretU64AsF64(0x4021_fc00_0000_0000n), reinterpretU64AsF64(0x4022_0400_0000_0000n)] }, // ~9
+ // [2.0, 10.0] cases
+ { input: [2.0, 10.0, 0.1], expected: [reinterpretU64AsF64(0x4006_6400_0000_0000n), reinterpretU64AsF64(0x4006_6c00_0000_0000n)] }, // ~2.8
+ { input: [2.0, 10.0, 0.9], expected: [reinterpretU64AsF64(0x4022_6000_0000_0000n), reinterpretU64AsF64(0x4022_6c00_0000_0000n)] }, // ~9.2
+ // [-1.0, 1.0] cases
+ { input: [-1.0, 1.0, 0.1], expected: [reinterpretU64AsF64(0xbfe9_a000_0000_0000n), reinterpretU64AsF64(0xbfe9_9800_0000_0000n)] }, // ~-0.8
+ { input: [-1.0, 1.0, 0.9], expected: [reinterpretU64AsF64(0x3fe9_9800_0000_0000n), reinterpretU64AsF64(0x3fe9_a000_0000_0000n)] }, // ~0.8
+ ] as ScalarTripleToIntervalCase[],
+} as const;
+
+g.test('mixPreciseInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarTripleToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ ...kMixPreciseIntervalCases[p.trait],
+
+ // [0.0, 1.0] cases
+ { input: [0.0, 1.0, -1.0], expected: -1.0 },
+ { input: [0.0, 1.0, 0.0], expected: 0.0 },
+ { input: [0.0, 1.0, 0.5], expected: 0.5 },
+ { input: [0.0, 1.0, 1.0], expected: 1.0 },
+ { input: [0.0, 1.0, 2.0], expected: 2.0 },
+
+ // [1.0, 0.0] cases
+ { input: [1.0, 0.0, -1.0], expected: 2.0 },
+ { input: [1.0, 0.0, 0.0], expected: 1.0 },
+ { input: [1.0, 0.0, 0.5], expected: 0.5 },
+ { input: [1.0, 0.0, 1.0], expected: 0.0 },
+ { input: [1.0, 0.0, 2.0], expected: -1.0 },
+
+ // [0.0, 10.0] cases
+ { input: [0.0, 10.0, -1.0], expected: -10.0 },
+ { input: [0.0, 10.0, 0.0], expected: 0.0 },
+ { input: [0.0, 10.0, 0.5], expected: 5.0 },
+ { input: [0.0, 10.0, 1.0], expected: 10.0 },
+ { input: [0.0, 10.0, 2.0], expected: 20.0 },
+
+ // [2.0, 10.0] cases
+ { input: [2.0, 10.0, -1.0], expected: -6.0 },
+ { input: [2.0, 10.0, 0.0], expected: 2.0 },
+ { input: [2.0, 10.0, 0.5], expected: 6.0 },
+ { input: [2.0, 10.0, 1.0], expected: 10.0 },
+ { input: [2.0, 10.0, 2.0], expected: 18.0 },
+
+ // [-1.0, 1.0] cases
+ { input: [-1.0, 1.0, -2.0], expected: -5.0 },
+ { input: [-1.0, 1.0, 0.0], expected: -1.0 },
+ { input: [-1.0, 1.0, 0.5], expected: 0.0 },
+ { input: [-1.0, 1.0, 1.0], expected: 1.0 },
+ { input: [-1.0, 1.0, 2.0], expected: 3.0 },
+
+ // Infinities
+ { input: [0.0, constants.positive.infinity, 0.5], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, 0.0, 0.5], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, 1.0, 0.5], expected: kUnboundedBounds },
+ { input: [1.0, constants.negative.infinity, 0.5], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, constants.positive.infinity, 0.5], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, constants.negative.infinity, 0.5], expected: kUnboundedBounds },
+ { input: [0.0, 1.0, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [1.0, 0.0, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [0.0, 1.0, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [1.0, 0.0, constants.positive.infinity], expected: kUnboundedBounds },
+
+ // Showing how precise and imprecise versions diff
+ { input: [constants.negative.min, 10.0, 1.0], expected: 10.0 },
+ { input: [constants.negative.min, -10.0, 1.0], expected: -10.0 },
+ ];
+ })
+ )
+ .fn(t => {
+ const [x, y, z] = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.mixPreciseInterval(x, y, z);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.mixPreciseInterval(${x}, ${y}, ${z}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// Some of these are hard coded, since the error intervals are difficult to express in a closed
+// human-readable form due to the inherited nature of the errors.
+// prettier-ignore
+const kSmoothStepIntervalCases = {
+ f32: [
+ // Normals
+ { input: [0, 1, 0], expected: [0, kValue.f32.positive.subnormal.min] },
+ { input: [0, 1, 1], expected: [reinterpretU32AsF32(0x3f7ffffa), reinterpretU32AsF32(0x3f800003)] }, // ~1
+ { input: [0, 2, 1], expected: [reinterpretU32AsF32(0x3efffff8), reinterpretU32AsF32(0x3f000007)] }, // ~0.5
+ { input: [0, 2, 0.5], expected: [reinterpretU32AsF32(0x3e1ffffb), reinterpretU32AsF32(0x3e200007)] }, // ~0.15625...
+ { input: [2, 0, 1], expected: [reinterpretU32AsF32(0x3efffff8), reinterpretU32AsF32(0x3f000007)] }, // ~0.5
+ { input: [2, 0, 1.5], expected: [reinterpretU32AsF32(0x3e1ffffb), reinterpretU32AsF32(0x3e200007)] }, // ~0.15625...
+ { input: [0, 100, 50], expected: [reinterpretU32AsF32(0x3efffff8), reinterpretU32AsF32(0x3f000007)] }, // ~0.5
+ { input: [0, 100, 25], expected: [reinterpretU32AsF32(0x3e1ffffb), reinterpretU32AsF32(0x3e200007)] }, // ~0.15625...
+ { input: [0, -2, -1], expected: [reinterpretU32AsF32(0x3efffff8), reinterpretU32AsF32(0x3f000007)] }, // ~0.5
+ { input: [0, -2, -0.5], expected: [reinterpretU32AsF32(0x3e1ffffb), reinterpretU32AsF32(0x3e200007)] }, // ~0.15625...
+ // Subnormals
+ { input: [kValue.f32.positive.subnormal.max, 2, 1], expected: [reinterpretU32AsF32(0x3efffff8), reinterpretU32AsF32(0x3f000007)] }, // ~0.5
+ { input: [kValue.f32.positive.subnormal.min, 2, 1], expected: [reinterpretU32AsF32(0x3efffff8), reinterpretU32AsF32(0x3f000007)] }, // ~0.5
+ { input: [kValue.f32.negative.subnormal.max, 2, 1], expected: [reinterpretU32AsF32(0x3efffff8), reinterpretU32AsF32(0x3f000007)] }, // ~0.5
+ { input: [kValue.f32.negative.subnormal.min, 2, 1], expected: [reinterpretU32AsF32(0x3efffff8), reinterpretU32AsF32(0x3f000007)] }, // ~0.5
+ { input: [0, 2, kValue.f32.positive.subnormal.max], expected: [0, kValue.f32.positive.subnormal.min] },
+ { input: [0, 2, kValue.f32.positive.subnormal.min], expected: [0, kValue.f32.positive.subnormal.min] },
+ { input: [0, 2, kValue.f32.negative.subnormal.max], expected: [0, kValue.f32.positive.subnormal.min] },
+ { input: [0, 2, kValue.f32.negative.subnormal.min], expected: [0, kValue.f32.positive.subnormal.min] },
+ ] as ScalarTripleToIntervalCase[],
+ f16: [
+ // Normals
+ { input: [0, 1, 0], expected: [0, reinterpretU16AsF16(0x0002)] },
+ { input: [0, 1, 1], expected: [reinterpretU16AsF16(0x3bfa), reinterpretU16AsF16(0x3c03)] }, // ~1
+ { input: [0, 2, 1], expected: [reinterpretU16AsF16(0x37f8), reinterpretU16AsF16(0x3807)] }, // ~0.5
+ { input: [0, 2, 0.5], expected: [reinterpretU16AsF16(0x30fb), reinterpretU16AsF16(0x3107)] }, // ~0.15625...
+ { input: [2, 0, 1], expected: [reinterpretU16AsF16(0x37f8), reinterpretU16AsF16(0x3807)] }, // ~0.5
+ { input: [2, 0, 1.5], expected: [reinterpretU16AsF16(0x30fb), reinterpretU16AsF16(0x3107)] }, // ~0.15625...
+ { input: [0, 100, 50], expected: [reinterpretU16AsF16(0x37f8), reinterpretU16AsF16(0x3807)] }, // ~0.5
+ { input: [0, 100, 25], expected: [reinterpretU16AsF16(0x30fb), reinterpretU16AsF16(0x3107)] }, // ~0.15625...
+ { input: [0, -2, -1], expected: [reinterpretU16AsF16(0x37f8), reinterpretU16AsF16(0x3807)] }, // ~0.5
+ { input: [0, -2, -0.5], expected: [reinterpretU16AsF16(0x30fb), reinterpretU16AsF16(0x3107)] }, // ~0.15625...
+ // Subnormals
+ { input: [kValue.f16.positive.subnormal.max, 2, 1], expected: [reinterpretU16AsF16(0x37f4), reinterpretU16AsF16(0x380b)] }, // ~0.5
+ { input: [kValue.f16.positive.subnormal.min, 2, 1], expected: [reinterpretU16AsF16(0x37f4), reinterpretU16AsF16(0x380b)] }, // ~0.5
+ { input: [kValue.f16.negative.subnormal.max, 2, 1], expected: [reinterpretU16AsF16(0x37f2), reinterpretU16AsF16(0x380c)] }, // ~0.5
+ { input: [kValue.f16.negative.subnormal.min, 2, 1], expected: [reinterpretU16AsF16(0x37f2), reinterpretU16AsF16(0x380c)] }, // ~0.5
+ { input: [0, 2, kValue.f16.positive.subnormal.max], expected: [0, reinterpretU16AsF16(0x0002)] },
+ { input: [0, 2, kValue.f16.positive.subnormal.min], expected: [0, reinterpretU16AsF16(0x0002)] },
+ { input: [0, 2, kValue.f32.negative.subnormal.max], expected: [0, reinterpretU16AsF16(0x0002)] },
+ { input: [0, 2, kValue.f32.negative.subnormal.min], expected: [0, reinterpretU16AsF16(0x0002)] },
+ ] as ScalarTripleToIntervalCase[],
+} as const;
+
+g.test('smoothStepInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<ScalarTripleToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ ...kSmoothStepIntervalCases[p.trait],
+
+ // Normals
+ { input: [0, 1, 10], expected: 1 },
+ { input: [0, 1, -10], expected: 0 },
+
+ // Subnormals
+ { input: [0, constants.positive.subnormal.max, 1], expected: kUnboundedBounds },
+ { input: [0, constants.positive.subnormal.min, 1], expected: kUnboundedBounds },
+ { input: [0, constants.negative.subnormal.max, 1], expected: kUnboundedBounds },
+ { input: [0, constants.negative.subnormal.min, 1], expected: kUnboundedBounds },
+
+ // Infinities
+ { input: [0, 2, constants.positive.infinity], expected: kUnboundedBounds },
+ { input: [0, 2, constants.negative.infinity], expected: kUnboundedBounds },
+ { input: [constants.positive.infinity, 2, 1], expected: kUnboundedBounds },
+ { input: [constants.negative.infinity, 2, 1], expected: kUnboundedBounds },
+ { input: [0, constants.positive.infinity, 1], expected: kUnboundedBounds },
+ { input: [0, constants.negative.infinity, 1], expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const [low, high, x] = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.smoothStepInterval(low, high, x);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.smoothStepInterval(${low}, ${high}, ${x}) returned ${got}. Expected ${expected}`
+ );
+ });
+
+interface ScalarToVectorCase {
+ input: number;
+ expected: (number | IntervalBounds)[];
+}
+
+g.test('unpack2x16floatInterval')
+ .paramsSubcasesOnly<ScalarToVectorCase>(
+ // prettier-ignore
+ [
+ // f16 normals
+ { input: 0x00000000, expected: [0, 0] },
+ { input: 0x80000000, expected: [0, 0] },
+ { input: 0x00008000, expected: [0, 0] },
+ { input: 0x80008000, expected: [0, 0] },
+ { input: 0x00003c00, expected: [1, 0] },
+ { input: 0x3c000000, expected: [0, 1] },
+ { input: 0x3c003c00, expected: [1, 1] },
+ { input: 0xbc00bc00, expected: [-1, -1] },
+ { input: 0x49004900, expected: [10, 10] },
+ { input: 0xc900c900, expected: [-10, -10] },
+
+ // f16 subnormals
+ { input: 0x000003ff, expected: [[0, kValue.f16.positive.subnormal.max], 0] },
+ { input: 0x000083ff, expected: [[kValue.f16.negative.subnormal.min, 0], 0] },
+
+ // f16 out of bounds
+ { input: 0x7c000000, expected: [kUnboundedBounds, kUnboundedBounds] },
+ { input: 0xffff0000, expected: [kUnboundedBounds, kUnboundedBounds] },
+ ]
+ )
+ .fn(t => {
+ const expected = FP.f32.toVector(t.params.expected);
+ const got = FP.f32.unpack2x16floatInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `unpack2x16floatInterval(${t.params.input}) returned [${got}]. Expected [${expected}]`
+ );
+ });
+
+// Scope for unpack2x16snormInterval tests so that they can have constants for
+// magic numbers that don't pollute the global namespace or have unwieldy long
+// names.
+{
+ const kZeroBounds: IntervalBounds = [
+ reinterpretU32AsF32(0x81400000),
+ reinterpretU32AsF32(0x01400000),
+ ];
+ const kOneBoundsSnorm: IntervalBounds = [
+ reinterpretU64AsF64(0x3fef_ffff_a000_0000n),
+ reinterpretU64AsF64(0x3ff0_0000_3000_0000n),
+ ];
+ const kNegOneBoundsSnorm: IntervalBounds = [
+ reinterpretU64AsF64(0xbff0_0000_3000_0000n),
+ reinterpretU64AsF64(0xbfef_ffff_a000_0000n),
+ ];
+
+ const kHalfBounds2x16snorm: IntervalBounds = [
+ reinterpretU64AsF64(0x3fe0_001f_a000_0000n),
+ reinterpretU64AsF64(0x3fe0_0020_8000_0000n),
+ ]; // ~0.5..., due to lack of precision in i16
+ const kNegHalfBounds2x16snorm: IntervalBounds = [
+ reinterpretU64AsF64(0xbfdf_ffc0_6000_0000n),
+ reinterpretU64AsF64(0xbfdf_ffbf_8000_0000n),
+ ]; // ~-0.5..., due to lack of precision in i16
+
+ g.test('unpack2x16snormInterval')
+ .paramsSubcasesOnly<ScalarToVectorCase>(
+ // prettier-ignore
+ [
+ { input: 0x00000000, expected: [kZeroBounds, kZeroBounds] },
+ { input: 0x00007fff, expected: [kOneBoundsSnorm, kZeroBounds] },
+ { input: 0x7fff0000, expected: [kZeroBounds, kOneBoundsSnorm] },
+ { input: 0x7fff7fff, expected: [kOneBoundsSnorm, kOneBoundsSnorm] },
+ { input: 0x80018001, expected: [kNegOneBoundsSnorm, kNegOneBoundsSnorm] },
+ { input: 0x40004000, expected: [kHalfBounds2x16snorm, kHalfBounds2x16snorm] },
+ { input: 0xc001c001, expected: [kNegHalfBounds2x16snorm, kNegHalfBounds2x16snorm] },
+ ]
+ )
+ .fn(t => {
+ const expected = FP.f32.toVector(t.params.expected);
+ const got = FP.f32.unpack2x16snormInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `unpack2x16snormInterval(${t.params.input}) returned [${got}]. Expected [${expected}]`
+ );
+ });
+}
+
+// Scope for unpack2x16unormInterval tests so that they can have constants for
+// magic numbers that don't pollute the global namespace or have unwieldy long
+// names.
+{
+ const kZeroBounds: IntervalBounds = [
+ reinterpretU32AsF32(0x8140_0000),
+ reinterpretU32AsF32(0x0140_0000),
+ ]; // ~0
+ const kOneBounds: IntervalBounds = [
+ reinterpretU64AsF64(0x3fef_ffff_a000_0000n),
+ reinterpretU64AsF64(0x3ff0_0000_3000_0000n),
+ ]; // ~1
+ const kHalfBounds: IntervalBounds = [
+ reinterpretU64AsF64(0x3fe0_000f_a000_0000n),
+ reinterpretU64AsF64(0x3fe0_0010_8000_0000n),
+ ]; // ~0.5..., due to the lack of accuracy in u16
+
+ g.test('unpack2x16unormInterval')
+ .paramsSubcasesOnly<ScalarToVectorCase>(
+ // prettier-ignore
+ [
+ { input: 0x00000000, expected: [kZeroBounds, kZeroBounds] },
+ { input: 0x0000ffff, expected: [kOneBounds, kZeroBounds] },
+ { input: 0xffff0000, expected: [kZeroBounds, kOneBounds] },
+ { input: 0xffffffff, expected: [kOneBounds, kOneBounds] },
+ { input: 0x80008000, expected: [kHalfBounds, kHalfBounds] },
+ ]
+ )
+ .fn(t => {
+ const expected = FP.f32.toVector(t.params.expected);
+ const got = FP.f32.unpack2x16unormInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `unpack2x16unormInterval(${t.params.input})\n\tReturned [${got}]\n\tExpected [${expected}]`
+ );
+ });
+}
+
+// Scope for unpack4x8snormInterval tests so that they can have constants for
+// magic numbers that don't pollute the global namespace or have unwieldy long
+// names.
+{
+ const kZeroBounds: IntervalBounds = [
+ reinterpretU32AsF32(0x8140_0000),
+ reinterpretU32AsF32(0x0140_0000),
+ ]; // ~0
+ const kOneBounds: IntervalBounds = [
+ reinterpretU64AsF64(0x3fef_ffff_a000_0000n),
+ reinterpretU64AsF64(0x3ff0_0000_3000_0000n),
+ ]; // ~1
+ const kNegOneBounds: IntervalBounds = [
+ reinterpretU64AsF64(0xbff0_0000_3000_0000n),
+ reinterpretU64AsF64(0xbfef_ffff_a0000_000n),
+ ]; // ~-1
+ const kHalfBounds: IntervalBounds = [
+ reinterpretU64AsF64(0x3fe0_2040_2000_0000n),
+ reinterpretU64AsF64(0x3fe0_2041_0000_0000n),
+ ]; // ~0.50196..., due to lack of precision in i8
+ const kNegHalfBounds: IntervalBounds = [
+ reinterpretU64AsF64(0xbfdf_bf7f_6000_0000n),
+ reinterpretU64AsF64(0xbfdf_bf7e_8000_0000n),
+ ]; // ~-0.49606..., due to lack of precision in i8
+
+ g.test('unpack4x8snormInterval')
+ .paramsSubcasesOnly<ScalarToVectorCase>(
+ // prettier-ignore
+ [
+ { input: 0x00000000, expected: [kZeroBounds, kZeroBounds, kZeroBounds, kZeroBounds] },
+ { input: 0x0000007f, expected: [kOneBounds, kZeroBounds, kZeroBounds, kZeroBounds] },
+ { input: 0x00007f00, expected: [kZeroBounds, kOneBounds, kZeroBounds, kZeroBounds] },
+ { input: 0x007f0000, expected: [kZeroBounds, kZeroBounds, kOneBounds, kZeroBounds] },
+ { input: 0x7f000000, expected: [kZeroBounds, kZeroBounds, kZeroBounds, kOneBounds] },
+ { input: 0x00007f7f, expected: [kOneBounds, kOneBounds, kZeroBounds, kZeroBounds] },
+ { input: 0x7f7f0000, expected: [kZeroBounds, kZeroBounds, kOneBounds, kOneBounds] },
+ { input: 0x7f007f00, expected: [kZeroBounds, kOneBounds, kZeroBounds, kOneBounds] },
+ { input: 0x007f007f, expected: [kOneBounds, kZeroBounds, kOneBounds, kZeroBounds] },
+ { input: 0x7f7f7f7f, expected: [kOneBounds, kOneBounds, kOneBounds, kOneBounds] },
+ {
+ input: 0x81818181,
+ expected: [kNegOneBounds, kNegOneBounds, kNegOneBounds, kNegOneBounds]
+ },
+ {
+ input: 0x40404040,
+ expected: [kHalfBounds, kHalfBounds, kHalfBounds, kHalfBounds]
+ },
+ {
+ input: 0xc1c1c1c1,
+ expected: [kNegHalfBounds, kNegHalfBounds, kNegHalfBounds, kNegHalfBounds]
+ },
+ ]
+ )
+ .fn(t => {
+ const expected = FP.f32.toVector(t.params.expected);
+ const got = FP.f32.unpack4x8snormInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `unpack4x8snormInterval(${t.params.input})\n\tReturned [${got}]\n\tExpected [${expected}]`
+ );
+ });
+}
+
+// Scope for unpack4x8unormInterval tests so that they can have constants for
+// magic numbers that don't pollute the global namespace or have unwieldy long
+// names.
+{
+ const kZeroBounds: IntervalBounds = [
+ reinterpretU32AsF32(0x8140_0000),
+ reinterpretU32AsF32(0x0140_0000),
+ ]; // ~0
+ const kOneBounds: IntervalBounds = [
+ reinterpretU64AsF64(0x3fef_ffff_a000_0000n),
+ reinterpretU64AsF64(0x3ff0_0000_3000_0000n),
+ ]; // ~1
+ const kHalfBounds: IntervalBounds = [
+ reinterpretU64AsF64(0x3fe0_100f_a000_0000n),
+ reinterpretU64AsF64(0x3fe0_1010_8000_0000n),
+ ]; // ~0.50196..., due to lack of precision in u8
+
+ g.test('unpack4x8unormInterval')
+ .paramsSubcasesOnly<ScalarToVectorCase>(
+ // prettier-ignore
+ [
+ { input: 0x00000000, expected: [kZeroBounds, kZeroBounds, kZeroBounds, kZeroBounds] },
+ { input: 0x000000ff, expected: [kOneBounds, kZeroBounds, kZeroBounds, kZeroBounds] },
+ { input: 0x0000ff00, expected: [kZeroBounds, kOneBounds, kZeroBounds, kZeroBounds] },
+ { input: 0x00ff0000, expected: [kZeroBounds, kZeroBounds, kOneBounds, kZeroBounds] },
+ { input: 0xff000000, expected: [kZeroBounds, kZeroBounds, kZeroBounds, kOneBounds] },
+ { input: 0x0000ffff, expected: [kOneBounds, kOneBounds, kZeroBounds, kZeroBounds] },
+ { input: 0xffff0000, expected: [kZeroBounds, kZeroBounds, kOneBounds, kOneBounds] },
+ { input: 0xff00ff00, expected: [kZeroBounds, kOneBounds, kZeroBounds, kOneBounds] },
+ { input: 0x00ff00ff, expected: [kOneBounds, kZeroBounds, kOneBounds, kZeroBounds] },
+ { input: 0xffffffff, expected: [kOneBounds, kOneBounds, kOneBounds, kOneBounds] },
+ {
+ input: 0x80808080,
+ expected: [kHalfBounds, kHalfBounds, kHalfBounds, kHalfBounds]
+ },
+ ]
+ )
+ .fn(t => {
+ const expected = FP.f32.toVector(t.params.expected);
+ const got = FP.f32.unpack4x8unormInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `unpack4x8unormInterval(${t.params.input})\n\tReturned [${got}]\n\tExpected [${expected}]`
+ );
+ });
+}
+
+interface VectorToIntervalCase {
+ input: number[];
+ expected: number | IntervalBounds;
+}
+
+g.test('lengthIntervalVector')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<VectorToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ // vec2
+ {input: [1.0, 0.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ {input: [0.0, 1.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ {input: [1.0, 1.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0, 1.0]'] }, // ~√2
+ {input: [-1.0, -1.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0, 1.0]'] }, // ~√2
+ {input: [-1.0, 1.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0, 1.0]'] }, // ~√2
+ {input: [0.1, 0.0], expected: kRootSumSquareExpectionInterval[p.trait]['[0.1]'] }, // ~0.1
+
+ // vec3
+ {input: [1.0, 0.0, 0.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ {input: [0.0, 1.0, 0.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ {input: [0.0, 0.0, 1.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ {input: [1.0, 1.0, 1.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0, 1.0, 1.0]'] }, // ~√3
+ {input: [-1.0, -1.0, -1.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0, 1.0, 1.0]'] }, // ~√3
+ {input: [1.0, -1.0, -1.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0, 1.0, 1.0]'] }, // ~√3
+ {input: [0.1, 0.0, 0.0], expected: kRootSumSquareExpectionInterval[p.trait]['[0.1]'] }, // ~0.1
+
+ // vec4
+ {input: [1.0, 0.0, 0.0, 0.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ {input: [0.0, 1.0, 0.0, 0.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ {input: [0.0, 0.0, 1.0, 0.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ {input: [0.0, 0.0, 0.0, 1.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ {input: [1.0, 1.0, 1.0, 1.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0, 1.0, 1.0, 1.0]'] }, // ~2
+ {input: [-1.0, -1.0, -1.0, -1.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0, 1.0, 1.0, 1.0]'] }, // ~2
+ {input: [-1.0, 1.0, -1.0, 1.0], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0, 1.0, 1.0, 1.0]'] }, // ~2
+ {input: [0.1, 0.0, 0.0, 0.0], expected: kRootSumSquareExpectionInterval[p.trait]['[0.1]'] }, // ~0.1
+
+ // Test that dot going OOB bounds in the intermediate calculations propagates
+ { input: [constants.positive.nearest_max, constants.positive.max, constants.negative.min], expected: kUnboundedBounds },
+ { input: [constants.positive.max, constants.positive.nearest_max, constants.negative.min], expected: kUnboundedBounds },
+ { input: [constants.negative.min, constants.positive.max, constants.positive.nearest_max], expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.lengthInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.lengthInterval([${t.params.input}]) returned ${got}. Expected ${expected}`
+ );
+ });
+
+interface VectorPairToIntervalCase {
+ input: [number[], number[]];
+ expected: number | IntervalBounds;
+}
+
+g.test('distanceIntervalVector')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<VectorPairToIntervalCase>(p => {
+ // prettier-ignore
+ return [
+ // distance(x, y), where x - y = 0 has an acceptance interval of kUnboundedBounds,
+ // because distance(x, y) = length(x - y), and length(0) = kUnboundedBounds.
+
+ // vec2
+ { input: [[1.0, 0.0], [1.0, 0.0]], expected: kUnboundedBounds },
+ { input: [[1.0, 0.0], [0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [[0.0, 0.0], [1.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [[-1.0, 0.0], [0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [[0.0, 0.0], [-1.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [[0.0, 1.0], [-1.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0, 1.0]'] }, // ~√2
+ { input: [[0.1, 0.0], [0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[0.1]'] }, // ~0.1
+
+ // vec3
+ { input: [[1.0, 0.0, 0.0], [1.0, 0.0, 0.0]], expected: kUnboundedBounds },
+ { input: [[1.0, 0.0, 0.0], [0.0, 0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [[0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [[0.0, 0.0, 1.0], [0.0, 0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [[1.0, 1.0, 1.0], [0.0, 0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0, 1.0, 1.0]'] }, // ~√3
+ { input: [[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0, 1.0, 1.0]'] }, // ~√3
+ { input: [[-1.0, -1.0, -1.0], [0.0, 0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0, 1.0, 1.0]'] }, // ~√3
+ { input: [[0.0, 0.0, 0.0], [-1.0, -1.0, -1.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0, 1.0, 1.0]'] }, // ~√3
+ { input: [[0.1, 0.0, 0.0], [0.0, 0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[0.1]'] }, // ~0.1
+ { input: [[0.0, 0.0, 0.0], [0.1, 0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[0.1]'] }, // ~0.1
+
+ // vec4
+ { input: [[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]], expected: kUnboundedBounds },
+ { input: [[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [[0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [[0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0]'] }, // ~1
+ { input: [[1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0, 1.0, 1.0, 1.0]'] }, // ~2
+ { input: [[0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0, 1.0, 1.0, 1.0]'] }, // ~2
+ { input: [[-1.0, 1.0, -1.0, 1.0], [0.0, 0.0, 0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0, 1.0, 1.0, 1.0]'] }, // ~2
+ { input: [[0.0, 0.0, 0.0, 0.0], [1.0, -1.0, 1.0, -1.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[1.0, 1.0, 1.0, 1.0]'] }, // ~2
+ { input: [[0.1, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[0.1]'] }, // ~0.1
+ { input: [[0.0, 0.0, 0.0, 0.0], [0.1, 0.0, 0.0, 0.0]], expected: kRootSumSquareExpectionInterval[p.trait]['[0.1]'] }, // ~0.1
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.distanceInterval(...t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.lengthInterval([${t.params.input[0]}, ${t.params.input[1]}]) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// prettier-ignore
+const kDotIntervalCases = {
+ f32: [
+ // Inputs with large values but cancel out to finite result. In these cases, 2.0*2.0 = 4.0 and
+ // 3.0*3.0 = 9.0 is much smaller than kValue.f32.positive.max, as a result
+ // kValue.f32.positive.max + 9.0 = kValue.f32.positive.max in f32 and even f64. So, if the
+ // positive and negative large number cancel each other first, the result would be
+ // 2.0*2.0+3.0*3.0 = 13. Otherwise, the resule would be 0.0 or 4.0 or 9.0.
+ // https://github.com/gpuweb/cts/issues/2155
+ { input: [[kValue.f32.positive.max, 1.0, 2.0, 3.0], [-1.0, kValue.f32.positive.max, -2.0, -3.0]], expected: [-13, 0] },
+ { input: [[kValue.f32.positive.max, 1.0, 2.0, 3.0], [1.0, kValue.f32.negative.min, 2.0, 3.0]], expected: [0, 13] },
+ ] as VectorPairToIntervalCase[],
+ f16: [
+ // Inputs with large values but cancel out to finite result. In these cases, 2.0*2.0 = 4.0 and
+ // 3.0*3.0 = 9.0 is not small enough comparing to kValue.f16.positive.max = 65504, as a result
+ // kValue.f16.positive.max + 9.0 = 65513 is exactly representable in f32 and f64. So, if the
+ // positive and negative large number don't cancel each other first, the computation will
+ // overflow f16 and result in unbounded bounds.
+ // https://github.com/gpuweb/cts/issues/2155
+ { input: [[kValue.f16.positive.max, 1.0, 2.0, 3.0], [-1.0, kValue.f16.positive.max, -2.0, -3.0]], expected: kUnboundedBounds },
+ { input: [[kValue.f16.positive.max, 1.0, 2.0, 3.0], [1.0, kValue.f16.negative.min, 2.0, 3.0]], expected: kUnboundedBounds },
+ ] as VectorPairToIntervalCase[],
+} as const;
+
+g.test('dotInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<VectorPairToIntervalCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ // vec2
+ { input: [[1.0, 0.0], [1.0, 0.0]], expected: 1.0 },
+ { input: [[0.0, 1.0], [0.0, 1.0]], expected: 1.0 },
+ { input: [[1.0, 1.0], [1.0, 1.0]], expected: 2.0 },
+ { input: [[-1.0, -1.0], [-1.0, -1.0]], expected: 2.0 },
+ { input: [[-1.0, 1.0], [1.0, -1.0]], expected: -2.0 },
+ { input: [[0.1, 0.0], [1.0, 0.0]], expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1']}, // correclt rounded of 0.1
+
+ // vec3
+ { input: [[1.0, 0.0, 0.0], [1.0, 0.0, 0.0]], expected: 1.0 },
+ { input: [[0.0, 1.0, 0.0], [0.0, 1.0, 0.0]], expected: 1.0 },
+ { input: [[0.0, 0.0, 1.0], [0.0, 0.0, 1.0]], expected: 1.0 },
+ { input: [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], expected: 3.0 },
+ { input: [[-1.0, -1.0, -1.0], [-1.0, -1.0, -1.0]], expected: 3.0 },
+ { input: [[1.0, -1.0, -1.0], [-1.0, 1.0, -1.0]], expected: -1.0 },
+ { input: [[0.1, 0.0, 0.0], [1.0, 0.0, 0.0]], expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1']}, // correclt rounded of 0.1
+
+ // vec4
+ { input: [[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]], expected: 1.0 },
+ { input: [[0.0, 1.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], expected: 1.0 },
+ { input: [[0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 1.0, 0.0]], expected: 1.0 },
+ { input: [[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0]], expected: 1.0 },
+ { input: [[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], expected: 4.0 },
+ { input: [[-1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0]], expected: 4.0 },
+ { input: [[-1.0, 1.0, -1.0, 1.0], [1.0, -1.0, 1.0, -1.0]], expected: -4.0 },
+ { input: [[0.1, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]], expected: kConstantCorrectlyRoundedExpectation[p.trait]['0.1']}, // correclt rounded of 0.1
+
+ ...kDotIntervalCases[p.trait],
+
+ // Test that going out of bounds in the intermediate calculations is caught correctly.
+ { input: [[constants.positive.nearest_max, constants.positive.max, constants.negative.min], [1.0, 1.0, 1.0]], expected: kUnboundedBounds },
+ { input: [[constants.positive.nearest_max, constants.negative.min, constants.positive.max], [1.0, 1.0, 1.0]], expected: kUnboundedBounds },
+ { input: [[constants.positive.max, constants.positive.nearest_max, constants.negative.min], [1.0, 1.0, 1.0]], expected: kUnboundedBounds },
+ { input: [[constants.negative.min, constants.positive.nearest_max, constants.positive.max], [1.0, 1.0, 1.0]], expected: kUnboundedBounds },
+ { input: [[constants.positive.max, constants.negative.min, constants.positive.nearest_max], [1.0, 1.0, 1.0]], expected: kUnboundedBounds },
+ { input: [[constants.negative.min, constants.positive.max, constants.positive.nearest_max], [1.0, 1.0, 1.0]], expected: kUnboundedBounds },
+ ];
+ })
+ )
+ .fn(t => {
+ const [x, y] = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.dotInterval(x, y);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.dotInterval([${x}], [${y}]) returned ${got}. Expected ${expected}`
+ );
+ });
+
+interface VectorToVectorCase {
+ input: number[];
+ expected: (number | IntervalBounds)[];
+}
+
+// prettier-ignore
+const kNormalizeIntervalCases = {
+ f32: [
+ // vec2
+ {input: [1.0, 0.0], expected: [[reinterpretU64AsF64(0x3fef_fffe_7000_0000n), reinterpretU64AsF64(0x3ff0_0000_b000_0000n)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)]] }, // [ ~1.0, ~0.0]
+ {input: [0.0, 1.0], expected: [[reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)], [reinterpretU64AsF64(0x3fef_fffe_7000_0000n), reinterpretU64AsF64(0x3ff0_0000_b000_0000n)]] }, // [ ~0.0, ~1.0]
+ {input: [-1.0, 0.0], expected: [[reinterpretU64AsF64(0xbff0_0000_b000_0000n), reinterpretU64AsF64(0xbfef_fffe_7000_0000n)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)]] }, // [ ~1.0, ~0.0]
+ {input: [1.0, 1.0], expected: [[reinterpretU64AsF64(0x3fe6_a09d_5000_0000n), reinterpretU64AsF64(0x3fe6_a09f_9000_0000n)], [reinterpretU64AsF64(0x3fe6_a09d_5000_0000n), reinterpretU64AsF64(0x3fe6_a09f_9000_0000n)]] }, // [ ~1/√2, ~1/√2]
+
+ // vec3
+ {input: [1.0, 0.0, 0.0], expected: [[reinterpretU64AsF64(0x3fef_fffe_7000_0000n), reinterpretU64AsF64(0x3ff0_0000_b000_0000n)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)]] }, // [ ~1.0, ~0.0, ~0.0]
+ {input: [0.0, 1.0, 0.0], expected: [[reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)], [reinterpretU64AsF64(0x3fef_fffe_7000_0000n), reinterpretU64AsF64(0x3ff0_0000_b000_0000n)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)]] }, // [ ~0.0, ~1.0, ~0.0]
+ {input: [0.0, 0.0, 1.0], expected: [[reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)], [reinterpretU64AsF64(0x3fef_fffe_7000_0000n), reinterpretU64AsF64(0x3ff0_0000_b000_0000n)]] }, // [ ~0.0, ~0.0, ~1.0]
+ {input: [-1.0, 0.0, 0.0], expected: [[reinterpretU64AsF64(0xbff0_0000_b000_0000n), reinterpretU64AsF64(0xbfef_fffe_7000_0000n)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)]] }, // [ ~1.0, ~0.0, ~0.0]
+ {input: [1.0, 1.0, 1.0], expected: [[reinterpretU64AsF64(0x3fe2_79a6_5000_0000n), reinterpretU64AsF64(0x3fe2_79a8_5000_0000n)], [reinterpretU64AsF64(0x3fe2_79a6_5000_0000n), reinterpretU64AsF64(0x3fe2_79a8_5000_0000n)], [reinterpretU64AsF64(0x3fe2_79a6_5000_0000n), reinterpretU64AsF64(0x3fe2_79a8_5000_0000n)]] }, // [ ~1/√3, ~1/√3, ~1/√3]
+
+ // vec4
+ {input: [1.0, 0.0, 0.0, 0.0], expected: [[reinterpretU64AsF64(0x3fef_fffe_7000_0000n), reinterpretU64AsF64(0x3ff0_0000_b000_0000n)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)]] }, // [ ~1.0, ~0.0, ~0.0, ~0.0]
+ {input: [0.0, 1.0, 0.0, 0.0], expected: [[reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)], [reinterpretU64AsF64(0x3fef_fffe_7000_0000n), reinterpretU64AsF64(0x3ff0_0000_b000_0000n)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)]] }, // [ ~0.0, ~1.0, ~0.0, ~0.0]
+ {input: [0.0, 0.0, 1.0, 0.0], expected: [[reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)], [reinterpretU64AsF64(0x3fef_fffe_7000_0000n), reinterpretU64AsF64(0x3ff0_0000_b000_0000n)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)]] }, // [ ~0.0, ~0.0, ~1.0, ~0.0]
+ {input: [0.0, 0.0, 0.0, 1.0], expected: [[reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)], [reinterpretU64AsF64(0x3fef_fffe_7000_0000n), reinterpretU64AsF64(0x3ff0_0000_b000_0000n)]] }, // [ ~0.0, ~0.0, ~0.0, ~1.0]
+ {input: [-1.0, 0.0, 0.0, 0.0], expected: [[reinterpretU64AsF64(0xbff0_0000_b000_0000n), reinterpretU64AsF64(0xbfef_fffe_7000_0000n)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)], [reinterpretU32AsF32(0x81200000), reinterpretU32AsF32(0x01200000)]] }, // [ ~1.0, ~0.0, ~0.0, ~0.0]
+ {input: [1.0, 1.0, 1.0, 1.0], expected: [[reinterpretU64AsF64(0x3fdf_fffe_7000_0000n), reinterpretU64AsF64(0x3fe0_0000_b000_0000n)], [reinterpretU64AsF64(0x3fdf_fffe_7000_0000n), reinterpretU64AsF64(0x3fe0_0000_b000_0000n)], [reinterpretU64AsF64(0x3fdf_fffe_7000_0000n), reinterpretU64AsF64(0x3fe0_0000_b000_0000n)], [reinterpretU64AsF64(0x3fdf_fffe_7000_0000n), reinterpretU64AsF64(0x3fe0_0000_b000_0000n)]] }, // [ ~1/√4, ~1/√4, ~1/√4]
+ ] as VectorToVectorCase[],
+ f16: [
+ // vec2
+ {input: [1.0, 0.0], expected: [[reinterpretU64AsF64(0x3fef_ce00_0000_0000n), reinterpretU64AsF64(0x3ff0_1600_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)]] }, // [ ~1.0, ~0.0]
+ {input: [0.0, 1.0], expected: [[reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)], [reinterpretU64AsF64(0x3fef_ce00_0000_0000n), reinterpretU64AsF64(0x3ff0_1600_0000_0000n)]] }, // [ ~0.0, ~1.0]
+ {input: [-1.0, 0.0], expected: [[reinterpretU64AsF64(0xbff0_1600_0000_0000n), reinterpretU64AsF64(0xbfef_ce00_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)]] }, // [ ~1.0, ~0.0]
+ {input: [1.0, 1.0], expected: [[reinterpretU64AsF64(0x3fe6_7e00_0000_0000n), reinterpretU64AsF64(0x3fe6_c600_0000_0000n)], [reinterpretU64AsF64(0x3fe6_7e00_0000_0000n), reinterpretU64AsF64(0x3fe6_c600_0000_0000n)]] }, // [ ~1/√2, ~1/√2]
+
+ // vec3
+ {input: [1.0, 0.0, 0.0], expected: [[reinterpretU64AsF64(0x3fef_ce00_0000_0000n), reinterpretU64AsF64(0x3ff0_1600_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)]] }, // [ ~1.0, ~0.0, ~0.0]
+ {input: [0.0, 1.0, 0.0], expected: [[reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)], [reinterpretU64AsF64(0x3fef_ce00_0000_0000n), reinterpretU64AsF64(0x3ff0_1600_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)]] }, // [ ~0.0, ~1.0, ~0.0]
+ {input: [0.0, 0.0, 1.0], expected: [[reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)], [reinterpretU64AsF64(0x3fef_ce00_0000_0000n), reinterpretU64AsF64(0x3ff0_1600_0000_0000n)]] }, // [ ~0.0, ~0.0, ~1.0]
+ {input: [-1.0, 0.0, 0.0], expected: [[reinterpretU64AsF64(0xbff0_1600_0000_0000n), reinterpretU64AsF64(0xbfef_ce00_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)]] }, // [ ~1.0, ~0.0, ~0.0]
+ {input: [1.0, 1.0, 1.0], expected: [[reinterpretU64AsF64(0x3fe2_5a00_0000_0000n), reinterpretU64AsF64(0x3fe2_9a00_0000_0000n)], [reinterpretU64AsF64(0x3fe2_5a00_0000_0000n), reinterpretU64AsF64(0x3fe2_9a00_0000_0000n)], [reinterpretU64AsF64(0x3fe2_5a00_0000_0000n), reinterpretU64AsF64(0x3fe2_9a00_0000_0000n)]] }, // [ ~1/√3, ~1/√3, ~1/√3]
+
+ // vec4
+ {input: [1.0, 0.0, 0.0, 0.0], expected: [[reinterpretU64AsF64(0x3fef_ce00_0000_0000n), reinterpretU64AsF64(0x3ff0_1600_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)]] }, // [ ~1.0, ~0.0, ~0.0, ~0.0]
+ {input: [0.0, 1.0, 0.0, 0.0], expected: [[reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)], [reinterpretU64AsF64(0x3fef_ce00_0000_0000n), reinterpretU64AsF64(0x3ff0_1600_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)]] }, // [ ~0.0, ~1.0, ~0.0, ~0.0]
+ {input: [0.0, 0.0, 1.0, 0.0], expected: [[reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)], [reinterpretU64AsF64(0x3fef_ce00_0000_0000n), reinterpretU64AsF64(0x3ff0_1600_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)]] }, // [ ~0.0, ~0.0, ~1.0, ~0.0]
+ {input: [0.0, 0.0, 0.0, 1.0], expected: [[reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)], [reinterpretU64AsF64(0x3fef_ce00_0000_0000n), reinterpretU64AsF64(0x3ff0_1600_0000_0000n)]] }, // [ ~0.0, ~0.0, ~0.0, ~1.0]
+ {input: [-1.0, 0.0, 0.0, 0.0], expected: [[reinterpretU64AsF64(0xbff0_1600_0000_0000n), reinterpretU64AsF64(0xbfef_ce00_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)], [reinterpretU64AsF64(0xbf24_0000_0000_0000n), reinterpretU64AsF64(0x3f24_0000_0000_0000n)]] }, // [ ~1.0, ~0.0, ~0.0, ~0.0]
+ {input: [1.0, 1.0, 1.0, 1.0], expected: [[reinterpretU64AsF64(0x3fdf_ce00_0000_0000n), reinterpretU64AsF64(0x3fe0_1600_0000_0000n)], [reinterpretU64AsF64(0x3fdf_ce00_0000_0000n), reinterpretU64AsF64(0x3fe0_1600_0000_0000n)], [reinterpretU64AsF64(0x3fdf_ce00_0000_0000n), reinterpretU64AsF64(0x3fe0_1600_0000_0000n)], [reinterpretU64AsF64(0x3fdf_ce00_0000_0000n), reinterpretU64AsF64(0x3fe0_1600_0000_0000n)]] }, // [ ~1/√4, ~1/√4, ~1/√4]
+ ] as VectorToVectorCase[],
+} as const;
+
+g.test('normalizeInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<VectorToVectorCase>(p => kNormalizeIntervalCases[p.trait])
+ )
+ .fn(t => {
+ const x = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toVector(t.params.expected);
+ const got = trait.normalizeInterval(x);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.normalizeInterval([${x}]) returned ${got}. Expected ${expected}`
+ );
+ });
+
+interface VectorPairToVectorCase {
+ input: [number[], number[]];
+ expected: (number | IntervalBounds)[];
+}
+
+// prettier-ignore
+const kCrossIntervalCases = {
+ f32: [
+ { input: [
+ [kValue.f32.positive.subnormal.max, kValue.f32.negative.subnormal.max, kValue.f32.negative.subnormal.min],
+ [kValue.f32.negative.subnormal.min, kValue.f32.positive.subnormal.min, kValue.f32.negative.subnormal.max]
+ ],
+ expected: [
+ [0.0, reinterpretU32AsF32(0x00000002)], // ~0
+ [0.0, reinterpretU32AsF32(0x00000002)], // ~0
+ [kValue.f32.negative.subnormal.max, kValue.f32.positive.subnormal.min] // ~0
+ ]
+ },
+ { input: [
+ [0.1, -0.1, -0.1],
+ [-0.1, 0.1, -0.1]
+ ],
+ expected: [
+ [reinterpretU32AsF32(0x3ca3d708), reinterpretU32AsF32(0x3ca3d70b)], // ~0.02
+ [reinterpretU32AsF32(0x3ca3d708), reinterpretU32AsF32(0x3ca3d70b)], // ~0.02
+ [reinterpretU32AsF32(0xb1400000), reinterpretU32AsF32(0x31400000)], // ~0
+ ]
+ },
+ ] as VectorPairToVectorCase[],
+ f16: [
+ { input: [
+ [kValue.f16.positive.subnormal.max, kValue.f16.negative.subnormal.max, kValue.f16.negative.subnormal.min],
+ [kValue.f16.negative.subnormal.min, kValue.f16.positive.subnormal.min, kValue.f16.negative.subnormal.max]
+ ],
+ expected: [
+ [0.0, reinterpretU16AsF16(0x0002)], // ~0
+ [0.0, reinterpretU16AsF16(0x0002)], // ~0
+ [kValue.f16.negative.subnormal.max, kValue.f16.positive.subnormal.min] // ~0
+ ]
+ },
+ { input: [
+ [0.1, -0.1, -0.1],
+ [-0.1, 0.1, -0.1]
+ ],
+ expected: [
+ [reinterpretU16AsF16(0x251e), reinterpretU16AsF16(0x2520)], // ~0.02
+ [reinterpretU16AsF16(0x251e), reinterpretU16AsF16(0x2520)], // ~0.02
+ [reinterpretU16AsF16(0x8100), reinterpretU16AsF16(0x0100)] // ~0
+ ]
+ },
+ ] as VectorPairToVectorCase[],
+ abstract: [
+ { input: [
+ [kValue.f64.positive.subnormal.max, kValue.f64.negative.subnormal.max, kValue.f64.negative.subnormal.min],
+ [kValue.f64.negative.subnormal.min, kValue.f64.positive.subnormal.min, kValue.f64.negative.subnormal.max]
+ ],
+ expected: [0.0, 0.0, 0.0]
+ },
+ { input: [
+ [0.1, -0.1, -0.1],
+ [-0.1, 0.1, -0.1]
+ ],
+ expected: [
+ reinterpretU64AsF64(0x3f94_7ae1_47ae_147cn), // ~0.02
+ reinterpretU64AsF64(0x3f94_7ae1_47ae_147cn), // ~0.02
+ 0.0
+ ]
+ },
+ ] as VectorPairToVectorCase[],
+} as const;
+
+g.test('crossInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<VectorPairToVectorCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ // parallel vectors, AXB == 0
+ { input: [[1.0, 0.0, 0.0], [1.0, 0.0, 0.0]], expected: [0.0, 0.0, 0.0] },
+ { input: [[0.0, 1.0, 0.0], [0.0, 1.0, 0.0]], expected: [0.0, 0.0, 0.0] },
+ { input: [[0.0, 0.0, 1.0], [0.0, 0.0, 1.0]], expected: [0.0, 0.0, 0.0] },
+ { input: [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], expected: [0.0, 0.0, 0.0] },
+ { input: [[-1.0, -1.0, -1.0], [-1.0, -1.0, -1.0]], expected: [0.0, 0.0, 0.0] },
+ { input: [[0.1, 0.0, 0.0], [1.0, 0.0, 0.0]], expected: [0.0, 0.0, 0.0] },
+ { input: [[constants.positive.subnormal.max, 0.0, 0.0], [1.0, 0.0, 0.0]], expected: [0.0, 0.0, 0.0] },
+
+ // non-parallel vectors, AXB != 0
+ { input: [[1.0, -1.0, -1.0], [-1.0, 1.0, -1.0]], expected: [2.0, 2.0, 0.0] },
+ { input: [[1.0, 2, 3], [1.0, 5.0, 7.0]], expected: [-1, -4, 3] },
+ ...kCrossIntervalCases[p.trait],
+ ];
+ })
+ )
+ .fn(t => {
+ const [x, y] = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toVector(t.params.expected);
+ const got = trait.crossInterval(x, y);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.crossInterval([${x}], [${y}]) returned ${got}. Expected ${expected}`
+ );
+ });
+
+// prettier-ignore
+const kReflectIntervalCases = {
+ f32: [
+ // vec2s
+ { input: [[0.1, 0.1], [1.0, 1.0]], expected: [[reinterpretU32AsF32(0xbe99999a), reinterpretU32AsF32(0xbe999998)], [reinterpretU32AsF32(0xbe99999a), reinterpretU32AsF32(0xbe999998)]] }, // [~-0.3, ~-0.3]
+ { input: [[kValue.f32.positive.subnormal.max, kValue.f32.negative.subnormal.max], [1.0, 1.0]], expected: [[reinterpretU32AsF32(0x80fffffe), reinterpretU32AsF32(0x00800001)], [reinterpretU32AsF32(0x80ffffff), reinterpretU32AsF32(0x00000002)]] }, // [~0.0, ~0.0]
+ // vec3s
+ { input: [[0.1, 0.1, 0.1], [1.0, 1.0, 1.0]], expected: [[reinterpretU32AsF32(0xbf000001), reinterpretU32AsF32(0xbefffffe)], [reinterpretU32AsF32(0xbf000001), reinterpretU32AsF32(0xbefffffe)], [reinterpretU32AsF32(0xbf000001), reinterpretU32AsF32(0xbefffffe)]] }, // [~-0.5, ~-0.5, ~-0.5]
+ { input: [[kValue.f32.positive.subnormal.max, kValue.f32.negative.subnormal.max, 0.0], [1.0, 1.0, 1.0]], expected: [[reinterpretU32AsF32(0x80fffffe), reinterpretU32AsF32(0x00800001)], [reinterpretU32AsF32(0x80ffffff), reinterpretU32AsF32(0x00000002)], [reinterpretU32AsF32(0x80fffffe), reinterpretU32AsF32(0x00000002)]] }, // [~0.0, ~0.0, ~0.0]
+ // vec4s
+ { input: [[0.1, 0.1, 0.1, 0.1], [1.0, 1.0, 1.0, 1.0]], expected: [[reinterpretU32AsF32(0xbf333335), reinterpretU32AsF32(0xbf333332)], [reinterpretU32AsF32(0xbf333335), reinterpretU32AsF32(0xbf333332)], [reinterpretU32AsF32(0xbf333335), reinterpretU32AsF32(0xbf333332)], [reinterpretU32AsF32(0xbf333335), reinterpretU32AsF32(0xbf333332)]] }, // [~-0.7, ~-0.7, ~-0.7, ~-0.7]
+ { input: [[kValue.f32.positive.subnormal.max, kValue.f32.negative.subnormal.max, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]], expected: [[reinterpretU32AsF32(0x80fffffe), reinterpretU32AsF32(0x00800001)], [reinterpretU32AsF32(0x80ffffff), reinterpretU32AsF32(0x00000002)], [reinterpretU32AsF32(0x80fffffe), reinterpretU32AsF32(0x00000002)], [reinterpretU32AsF32(0x80fffffe), reinterpretU32AsF32(0x00000002)]] }, // [~0.0, ~0.0, ~0.0, ~0.0]
+ ] as VectorPairToVectorCase[],
+ f16: [
+ // vec2s
+ { input: [[0.1, 0.1], [1.0, 1.0]], expected: [[reinterpretU16AsF16(0xb4ce), reinterpretU16AsF16(0xb4cc)], [reinterpretU16AsF16(0xb4ce), reinterpretU16AsF16(0xb4cc)]] }, // [~-0.3, ~-0.3]
+ { input: [[kValue.f16.positive.subnormal.max, kValue.f16.negative.subnormal.max], [1.0, 1.0]], expected: [[reinterpretU16AsF16(0x87fe), reinterpretU16AsF16(0x0401)], [reinterpretU16AsF16(0x87ff), reinterpretU16AsF16(0x0002)]] }, // [~0.0, ~0.0]
+ // vec3s
+ { input: [[0.1, 0.1, 0.1], [1.0, 1.0, 1.0]], expected: [[reinterpretU16AsF16(0xb802), reinterpretU16AsF16(0xb7fe)], [reinterpretU16AsF16(0xb802), reinterpretU16AsF16(0xb7fe)], [reinterpretU16AsF16(0xb802), reinterpretU16AsF16(0xb7fe)]] }, // [~-0.5, ~-0.5, ~-0.5]
+ { input: [[kValue.f16.positive.subnormal.max, kValue.f16.negative.subnormal.max, 0.0], [1.0, 1.0, 1.0]], expected: [[reinterpretU16AsF16(0x87fe), reinterpretU16AsF16(0x0401)], [reinterpretU16AsF16(0x87ff), reinterpretU16AsF16(0x0002)], [reinterpretU16AsF16(0x87fe), reinterpretU16AsF16(0x0002)]] }, // [~0.0, ~0.0, ~0.0]
+ // vec4s
+ { input: [[0.1, 0.1, 0.1, 0.1], [1.0, 1.0, 1.0, 1.0]], expected: [[reinterpretU16AsF16(0xb99c), reinterpretU16AsF16(0xb998)], [reinterpretU16AsF16(0xb99c), reinterpretU16AsF16(0xb998)], [reinterpretU16AsF16(0xb99c), reinterpretU16AsF16(0xb998)], [reinterpretU16AsF16(0xb99c), reinterpretU16AsF16(0xb998)]] }, // [~-0.7, ~-0.7, ~-0.7, ~-0.7]
+ { input: [[kValue.f16.positive.subnormal.max, kValue.f16.negative.subnormal.max, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]], expected: [[reinterpretU16AsF16(0x87fe), reinterpretU16AsF16(0x0401)], [reinterpretU16AsF16(0x87ff), reinterpretU16AsF16(0x0002)], [reinterpretU16AsF16(0x87fe), reinterpretU16AsF16(0x0002)], [reinterpretU16AsF16(0x87fe), reinterpretU16AsF16(0x0002)]] }, // [~0.0, ~0.0, ~0.0, ~0.0]
+ ] as VectorPairToVectorCase[],
+} as const;
+
+g.test('reflectInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<VectorPairToVectorCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ ...kReflectIntervalCases[p.trait],
+
+ // vec2s
+ { input: [[1.0, 0.0], [1.0, 0.0]], expected: [-1.0, 0.0] },
+ { input: [[1.0, 0.0], [0.0, 1.0]], expected: [1.0, 0.0] },
+ { input: [[0.0, 1.0], [0.0, 1.0]], expected: [0.0, -1.0] },
+ { input: [[0.0, 1.0], [1.0, 0.0]], expected: [0.0, 1.0] },
+ { input: [[1.0, 1.0], [1.0, 1.0]], expected: [-3.0, -3.0] },
+ { input: [[-1.0, -1.0], [1.0, 1.0]], expected: [3.0, 3.0] },
+ // vec3s
+ { input: [[1.0, 0.0, 0.0], [1.0, 0.0, 0.0]], expected: [-1.0, 0.0, 0.0] },
+ { input: [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0]], expected: [0.0, 1.0, 0.0] },
+ { input: [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]], expected: [0.0, 0.0, 1.0] },
+ { input: [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], expected: [1.0, 0.0, 0.0] },
+ { input: [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]], expected: [1.0, 0.0, 0.0] },
+ { input: [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], expected: [-5.0, -5.0, -5.0] },
+ { input: [[-1.0, -1.0, -1.0], [1.0, 1.0, 1.0]], expected: [5.0, 5.0, 5.0] },
+ // vec4s
+ { input: [[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]], expected: [-1.0, 0.0, 0.0, 0.0] },
+ { input: [[0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]], expected: [0.0, 1.0, 0.0, 0.0] },
+ { input: [[0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0]], expected: [0.0, 0.0, 1.0, 0.0] },
+ { input: [[0.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 0.0]], expected: [0.0, 0.0, 0.0, 1.0] },
+ { input: [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], expected: [1.0, 0.0, 0.0, 0.0] },
+ { input: [[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]], expected: [1.0, 0.0, 0.0, 0.0] },
+ { input: [[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]], expected: [1.0, 0.0, 0.0, 0.0] },
+ { input: [[-1.0, -1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0]], expected: [7.0, 7.0, 7.0, 7.0] },
+ // Test that dot going OOB bounds in the intermediate calculations propagates
+ { input: [[constants.positive.nearest_max, constants.positive.max, constants.negative.min], [1.0, 1.0, 1.0]], expected: [kUnboundedBounds, kUnboundedBounds, kUnboundedBounds] },
+ { input: [[constants.positive.nearest_max, constants.negative.min, constants.positive.max], [1.0, 1.0, 1.0]], expected: [kUnboundedBounds, kUnboundedBounds, kUnboundedBounds] },
+ { input: [[constants.positive.max, constants.positive.nearest_max, constants.negative.min], [1.0, 1.0, 1.0]], expected: [kUnboundedBounds, kUnboundedBounds, kUnboundedBounds] },
+ { input: [[constants.negative.min, constants.positive.nearest_max, constants.positive.max], [1.0, 1.0, 1.0]], expected: [kUnboundedBounds, kUnboundedBounds, kUnboundedBounds] },
+ { input: [[constants.positive.max, constants.negative.min, constants.positive.nearest_max], [1.0, 1.0, 1.0]], expected: [kUnboundedBounds, kUnboundedBounds, kUnboundedBounds] },
+ { input: [[constants.negative.min, constants.positive.max, constants.positive.nearest_max], [1.0, 1.0, 1.0]], expected: [kUnboundedBounds, kUnboundedBounds, kUnboundedBounds] },
+
+ // Test that post-dot going OOB propagates
+ { input: [[constants.positive.max, 1.0, 2.0, 3.0], [-1.0, constants.positive.max, -2.0, -3.0]], expected: [kUnboundedBounds, kUnboundedBounds, kUnboundedBounds, kUnboundedBounds] },
+ ];
+ })
+ )
+ .fn(t => {
+ const [x, y] = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toVector(t.params.expected);
+ const got = trait.reflectInterval(x, y);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.reflectInterval([${x}], [${y}]) returned ${JSON.stringify(
+ got
+ )}. Expected ${JSON.stringify(expected)}`
+ );
+ });
+
+interface MatrixToScalarCase {
+ input: number[][];
+ expected: number | IntervalBounds;
+}
+
+g.test('determinantInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .combineWithParams<MatrixToScalarCase>([
+ // Extreme values, i.e. subnormals, very large magnitudes, and those lead to
+ // non-precise products, are intentionally not tested, since the accuracy of
+ // determinant is restricted to well behaving inputs. Handling all cases
+ // requires ~23! options to be calculated in the 4x4 case, so is not
+ // feasible.
+ {
+ input: [
+ [1, 2],
+ [3, 4],
+ ],
+ expected: -2,
+ },
+ {
+ input: [
+ [-1, 2],
+ [-3, 4],
+ ],
+ expected: 2,
+ },
+ {
+ input: [
+ [11, 22],
+ [33, 44],
+ ],
+ expected: -242,
+ },
+ {
+ input: [
+ [5, 6],
+ [8, 9],
+ ],
+ expected: -3,
+ },
+ {
+ input: [
+ [4, 6],
+ [7, 9],
+ ],
+ expected: -6,
+ },
+ {
+ input: [
+ [4, 5],
+ [7, 8],
+ ],
+ expected: -3,
+ },
+ {
+ input: [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ ],
+ expected: 0,
+ },
+ {
+ input: [
+ [-1, 2, 3],
+ [-4, 5, 6],
+ [-7, 8, 9],
+ ],
+ expected: 0,
+ },
+ {
+ input: [
+ [4, 1, -1],
+ [-3, 0, 5],
+ [5, 3, 2],
+ ],
+ expected: -20,
+ },
+ {
+ input: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ],
+ expected: 0,
+ },
+ {
+ input: [
+ [4, 0, 0, 0],
+ [3, 1, -1, 3],
+ [2, -3, 3, 1],
+ [2, 3, 3, 1],
+ ],
+ expected: -240,
+ },
+ ])
+ )
+ .fn(t => {
+ const input = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toInterval(t.params.expected);
+ const got = trait.determinantInterval(input);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.determinantInterval([${JSON.stringify(
+ input
+ )}]) returned '${got}. Expected '${expected}'`
+ );
+ });
+
+interface MatrixToMatrixCase {
+ input: number[][];
+ expected: (number | IntervalBounds)[][];
+}
+
+g.test('transposeInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<MatrixToMatrixCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ return [
+ {
+ input: [
+ [1, 2],
+ [3, 4],
+ ],
+ expected: [
+ [1, 3],
+ [2, 4],
+ ],
+ },
+ {
+ input: [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ ],
+ expected: [
+ [1, 3, 5],
+ [2, 4, 6],
+ ],
+ },
+ {
+ input: [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ [7, 8],
+ ],
+ expected: [
+ [1, 3, 5, 7],
+ [2, 4, 6, 8],
+ ],
+ },
+ {
+ input: [
+ [1, 2, 3],
+ [4, 5, 6],
+ ],
+ expected: [
+ [1, 4],
+ [2, 5],
+ [3, 6],
+ ],
+ },
+ {
+ input: [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ ],
+ expected: [
+ [1, 4, 7],
+ [2, 5, 8],
+ [3, 6, 9],
+ ],
+ },
+ {
+ input: [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ [10, 11, 12],
+ ],
+ expected: [
+ [1, 4, 7, 10],
+ [2, 5, 8, 11],
+ [3, 6, 9, 12],
+ ],
+ },
+ {
+ input: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ ],
+ expected: [
+ [1, 5],
+ [2, 6],
+ [3, 7],
+ [4, 8],
+ ],
+ },
+ {
+ input: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ ],
+ expected: [
+ [1, 5, 9],
+ [2, 6, 10],
+ [3, 7, 11],
+ [4, 8, 12],
+ ],
+ },
+ {
+ input: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ],
+ expected: [
+ [1, 5, 9, 13],
+ [2, 6, 10, 14],
+ [3, 7, 11, 15],
+ [4, 8, 12, 16],
+ ],
+ },
+ {
+ input: [
+ [constants.positive.subnormal.max, constants.positive.subnormal.min],
+ [constants.negative.subnormal.min, constants.negative.subnormal.max],
+ ],
+ expected: [
+ [
+ [0, constants.positive.subnormal.max],
+ [constants.negative.subnormal.min, 0],
+ ],
+ [
+ [0, constants.positive.subnormal.min],
+ [constants.negative.subnormal.max, 0],
+ ],
+ ],
+ },
+ ];
+ })
+ )
+ .fn(t => {
+ const input = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toMatrix(t.params.expected);
+ const got = trait.transposeInterval(input);
+ t.expect(
+ objectEquals(expected, got),
+ `FP.${t.params.trait}.transposeInterval([${JSON.stringify(
+ input
+ )}]) returned '[${JSON.stringify(got)}]'. Expected '[${JSON.stringify(expected)}]'`
+ );
+ });
+
+interface MatrixPairToMatrixCase {
+ input: [number[][], number[][]];
+ expected: (number | IntervalBounds)[][];
+}
+
+g.test('additionMatrixMatrixInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .combineWithParams<MatrixPairToMatrixCase>([
+ // Only testing that different shapes of matrices are handled correctly
+ // here, to reduce test duplication.
+ // additionMatrixMatrixInterval uses AdditionIntervalOp for calculating intervals,
+ // so the testing for additionInterval covers the actual interval
+ // calculations.
+ {
+ input: [
+ [
+ [1, 2],
+ [3, 4],
+ ],
+ [
+ [10, 20],
+ [30, 40],
+ ],
+ ],
+ expected: [
+ [11, 22],
+ [33, 44],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ ],
+ [
+ [10, 20],
+ [30, 40],
+ [50, 60],
+ ],
+ ],
+ expected: [
+ [11, 22],
+ [33, 44],
+ [55, 66],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ [7, 8],
+ ],
+ [
+ [10, 20],
+ [30, 40],
+ [50, 60],
+ [70, 80],
+ ],
+ ],
+ expected: [
+ [11, 22],
+ [33, 44],
+ [55, 66],
+ [77, 88],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3],
+ [4, 5, 6],
+ ],
+ [
+ [10, 20, 30],
+ [40, 50, 60],
+ ],
+ ],
+ expected: [
+ [11, 22, 33],
+ [44, 55, 66],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ ],
+ [
+ [10, 20, 30],
+ [40, 50, 60],
+ [70, 80, 90],
+ ],
+ ],
+ expected: [
+ [11, 22, 33],
+ [44, 55, 66],
+ [77, 88, 99],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ [10, 11, 12],
+ ],
+ [
+ [10, 20, 30],
+ [40, 50, 60],
+ [70, 80, 90],
+ [1000, 1100, 1200],
+ ],
+ ],
+ expected: [
+ [11, 22, 33],
+ [44, 55, 66],
+ [77, 88, 99],
+ [1010, 1111, 1212],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ ],
+ [
+ [10, 20, 30, 40],
+ [50, 60, 70, 80],
+ ],
+ ],
+ expected: [
+ [11, 22, 33, 44],
+ [55, 66, 77, 88],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ ],
+ [
+ [10, 20, 30, 40],
+ [50, 60, 70, 80],
+ [90, 1000, 1100, 1200],
+ ],
+ ],
+ expected: [
+ [11, 22, 33, 44],
+ [55, 66, 77, 88],
+ [99, 1010, 1111, 1212],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ],
+ [
+ [10, 20, 30, 40],
+ [50, 60, 70, 80],
+ [90, 1000, 1100, 1200],
+ [1300, 1400, 1500, 1600],
+ ],
+ ],
+ expected: [
+ [11, 22, 33, 44],
+ [55, 66, 77, 88],
+ [99, 1010, 1111, 1212],
+ [1313, 1414, 1515, 1616],
+ ],
+ },
+ ])
+ )
+ .fn(t => {
+ const [x, y] = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toMatrix(t.params.expected);
+ const got = trait.additionMatrixMatrixInterval(x, y);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.additionMatrixMatrixInterval([${JSON.stringify(x)}], [${JSON.stringify(
+ y
+ )}]) returned '[${JSON.stringify(got)}]'. Expected '[${JSON.stringify(expected)}]'`
+ );
+ });
+
+g.test('subtractionMatrixMatrixInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .combineWithParams<MatrixPairToMatrixCase>([
+ // Only testing that different shapes of matrices are handled correctly
+ // here, to reduce test duplication.
+ // subtractionMatrixMatrixInterval uses AdditionIntervalOp for calculating intervals,
+ // so the testing for subtractionInterval covers the actual interval
+ // calculations.
+ {
+ input: [
+ [
+ [1, 2],
+ [3, 4],
+ ],
+ [
+ [-10, -20],
+ [-30, -40],
+ ],
+ ],
+ expected: [
+ [11, 22],
+ [33, 44],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ ],
+ [
+ [-10, -20],
+ [-30, -40],
+ [-50, -60],
+ ],
+ ],
+ expected: [
+ [11, 22],
+ [33, 44],
+ [55, 66],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ [7, 8],
+ ],
+ [
+ [-10, -20],
+ [-30, -40],
+ [-50, -60],
+ [-70, -80],
+ ],
+ ],
+ expected: [
+ [11, 22],
+ [33, 44],
+ [55, 66],
+ [77, 88],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3],
+ [4, 5, 6],
+ ],
+ [
+ [-10, -20, -30],
+ [-40, -50, -60],
+ ],
+ ],
+ expected: [
+ [11, 22, 33],
+ [44, 55, 66],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ ],
+ [
+ [-10, -20, -30],
+ [-40, -50, -60],
+ [-70, -80, -90],
+ ],
+ ],
+ expected: [
+ [11, 22, 33],
+ [44, 55, 66],
+ [77, 88, 99],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ [10, 11, 12],
+ ],
+ [
+ [-10, -20, -30],
+ [-40, -50, -60],
+ [-70, -80, -90],
+ [-1000, -1100, -1200],
+ ],
+ ],
+ expected: [
+ [11, 22, 33],
+ [44, 55, 66],
+ [77, 88, 99],
+ [1010, 1111, 1212],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ ],
+ [
+ [-10, -20, -30, -40],
+ [-50, -60, -70, -80],
+ ],
+ ],
+ expected: [
+ [11, 22, 33, 44],
+ [55, 66, 77, 88],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ ],
+ [
+ [-10, -20, -30, -40],
+ [-50, -60, -70, -80],
+ [-90, -1000, -1100, -1200],
+ ],
+ ],
+ expected: [
+ [11, 22, 33, 44],
+ [55, 66, 77, 88],
+ [99, 1010, 1111, 1212],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ],
+ [
+ [-10, -20, -30, -40],
+ [-50, -60, -70, -80],
+ [-90, -1000, -1100, -1200],
+ [-1300, -1400, -1500, -1600],
+ ],
+ ],
+ expected: [
+ [11, 22, 33, 44],
+ [55, 66, 77, 88],
+ [99, 1010, 1111, 1212],
+ [1313, 1414, 1515, 1616],
+ ],
+ },
+ ])
+ )
+ .fn(t => {
+ const [x, y] = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toMatrix(t.params.expected);
+ const got = trait.subtractionMatrixMatrixInterval(x, y);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.subtractionMatrixMatrixInterval([${JSON.stringify(x)}], [${JSON.stringify(
+ y
+ )}]) returned '[${JSON.stringify(got)}]'. Expected '[${JSON.stringify(expected)}]'`
+ );
+ });
+
+g.test('multiplicationMatrixMatrixInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .combineWithParams<MatrixPairToMatrixCase>([
+ // Only testing that different shapes of matrices are handled correctly
+ // here, to reduce test duplication.
+ // multiplicationMatrixMatrixInterval uses and transposeInterval &
+ // dotInterval for calculating intervals, so the testing for those functions
+ // will cover the actual interval calculations.
+ // Keep all expected result integer no larger than 2047 to ensure that all result is exactly
+ // represeantable in both f32 and f16.
+ {
+ input: [
+ [
+ [1, 2],
+ [3, 4],
+ ],
+ [
+ [11, 22],
+ [33, 44],
+ ],
+ ],
+ expected: [
+ [77, 110],
+ [165, 242],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2],
+ [3, 4],
+ ],
+ [
+ [11, 22],
+ [33, 44],
+ [55, 66],
+ ],
+ ],
+ expected: [
+ [77, 110],
+ [165, 242],
+ [253, 374],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2],
+ [3, 4],
+ ],
+ [
+ [11, 22],
+ [33, 44],
+ [55, 66],
+ [77, 88],
+ ],
+ ],
+ expected: [
+ [77, 110],
+ [165, 242],
+ [253, 374],
+ [341, 506],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3],
+ [4, 5, 6],
+ ],
+ [
+ [11, 22],
+ [33, 44],
+ ],
+ ],
+ expected: [
+ [99, 132, 165],
+ [209, 286, 363],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3],
+ [4, 5, 6],
+ ],
+ [
+ [11, 22],
+ [33, 44],
+ [55, 66],
+ ],
+ ],
+ expected: [
+ [99, 132, 165],
+ [209, 286, 363],
+ [319, 440, 561],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3],
+ [4, 5, 6],
+ ],
+ [
+ [11, 22],
+ [33, 44],
+ [55, 66],
+ [77, 88],
+ ],
+ ],
+ expected: [
+ [99, 132, 165],
+ [209, 286, 363],
+ [319, 440, 561],
+ [429, 594, 759],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ ],
+ [
+ [11, 22],
+ [33, 44],
+ ],
+ ],
+ expected: [
+ [121, 154, 187, 220],
+ [253, 330, 407, 484],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ ],
+ [
+ [11, 22],
+ [33, 44],
+ [55, 66],
+ ],
+ ],
+ expected: [
+ [121, 154, 187, 220],
+ [253, 330, 407, 484],
+ [385, 506, 627, 748],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ ],
+ [
+ [11, 22],
+ [33, 44],
+ [55, 66],
+ [77, 88],
+ ],
+ ],
+ expected: [
+ [121, 154, 187, 220],
+ [253, 330, 407, 484],
+ [385, 506, 627, 748],
+ [517, 682, 847, 1012],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ ],
+ [
+ [11, 22, 33],
+ [44, 55, 66],
+ ],
+ ],
+ expected: [
+ [242, 308],
+ [539, 704],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ ],
+ [
+ [11, 22, 33],
+ [44, 55, 66],
+ [77, 88, 99],
+ ],
+ ],
+ expected: [
+ [242, 308],
+ [539, 704],
+ [836, 1100],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ ],
+ [
+ [11, 22, 33],
+ [44, 55, 66],
+ [77, 88, 99],
+ [10, 11, 12],
+ ],
+ ],
+ expected: [
+ [242, 308],
+ [539, 704],
+ [836, 1100],
+ [103, 136],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ ],
+ [
+ [11, 22, 33],
+ [44, 55, 66],
+ ],
+ ],
+ expected: [
+ [330, 396, 462],
+ [726, 891, 1056],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ ],
+ [
+ [11, 22, 33],
+ [44, 55, 66],
+ [77, 88, 99],
+ ],
+ ],
+ expected: [
+ [330, 396, 462],
+ [726, 891, 1056],
+ [1122, 1386, 1650],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ ],
+ [
+ [11, 22, 33],
+ [44, 55, 66],
+ [77, 88, 99],
+ [10, 11, 12],
+ ],
+ ],
+ expected: [
+ [330, 396, 462],
+ [726, 891, 1056],
+ [1122, 1386, 1650],
+ [138, 171, 204],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ ],
+ [
+ [11, 12, 13],
+ [21, 22, 23],
+ ],
+ ],
+ expected: [
+ [188, 224, 260, 296],
+ [338, 404, 470, 536],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ ],
+ [
+ [11, 12, 13],
+ [21, 22, 23],
+ [31, 32, 33],
+ ],
+ ],
+ expected: [
+ [188, 224, 260, 296],
+ [338, 404, 470, 536],
+ [488, 584, 680, 776],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ ],
+ [
+ [11, 12, 13],
+ [21, 22, 23],
+ [31, 32, 33],
+ [41, 42, 43],
+ ],
+ ],
+ expected: [
+ [188, 224, 260, 296],
+ [338, 404, 470, 536],
+ [488, 584, 680, 776],
+ [638, 764, 890, 1016],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ [7, 8],
+ ],
+ [
+ [11, 22, 33, 44],
+ [55, 66, 77, 88],
+ ],
+ ],
+ expected: [
+ [550, 660],
+ [1254, 1540],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ [7, 8],
+ ],
+ [
+ [11, 12, 13, 14],
+ [21, 22, 23, 24],
+ [31, 32, 33, 34],
+ ],
+ ],
+ expected: [
+ [210, 260],
+ [370, 460],
+ [530, 660],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ [7, 8],
+ ],
+ [
+ [11, 12, 13, 14],
+ [21, 22, 23, 24],
+ [31, 32, 33, 34],
+ [41, 42, 43, 44],
+ ],
+ ],
+ expected: [
+ [210, 260],
+ [370, 460],
+ [530, 660],
+ [690, 860],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ [10, 11, 12],
+ ],
+ [
+ [11, 12, 13, 14],
+ [21, 22, 23, 24],
+ ],
+ ],
+ expected: [
+ [290, 340, 390],
+ [510, 600, 690],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ [10, 11, 12],
+ ],
+ [
+ [11, 12, 13, 14],
+ [21, 22, 23, 24],
+ [31, 32, 33, 34],
+ ],
+ ],
+ expected: [
+ [290, 340, 390],
+ [510, 600, 690],
+ [730, 860, 990],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ [10, 11, 12],
+ ],
+ [
+ [11, 12, 13, 14],
+ [21, 22, 23, 24],
+ [31, 32, 33, 34],
+ [41, 42, 43, 44],
+ ],
+ ],
+ expected: [
+ [290, 340, 390],
+ [510, 600, 690],
+ [730, 860, 990],
+ [950, 1120, 1290],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ],
+ [
+ [11, 12, 13, 14],
+ [21, 22, 23, 24],
+ ],
+ ],
+ expected: [
+ [370, 420, 470, 520],
+ [650, 740, 830, 920],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ],
+ [
+ [11, 12, 13, 14],
+ [21, 22, 23, 24],
+ [31, 32, 33, 34],
+ ],
+ ],
+ expected: [
+ [370, 420, 470, 520],
+ [650, 740, 830, 920],
+ [930, 1060, 1190, 1320],
+ ],
+ },
+ {
+ input: [
+ [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ],
+ [
+ [11, 12, 13, 14],
+ [21, 22, 23, 24],
+ [31, 32, 33, 34],
+ [41, 42, 43, 44],
+ ],
+ ],
+ expected: [
+ [370, 420, 470, 520],
+ [650, 740, 830, 920],
+ [930, 1060, 1190, 1320],
+ [1210, 1380, 1550, 1720],
+ ],
+ },
+ ])
+ )
+ .fn(t => {
+ const [x, y] = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toMatrix(t.params.expected);
+ const got = trait.multiplicationMatrixMatrixInterval(x, y);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.multiplicationMatrixMatrixInterval([${JSON.stringify(
+ x
+ )}], [${JSON.stringify(y)}]) returned '[${JSON.stringify(got)}]'. Expected '[${JSON.stringify(
+ expected
+ )}]'`
+ );
+ });
+
+interface MatrixScalarToMatrixCase {
+ matrix: number[][];
+ scalar: number;
+ expected: (number | IntervalBounds)[][];
+}
+
+const kMultiplicationMatrixScalarIntervalCases = {
+ f32: [
+ // From https://github.com/gpuweb/cts/issues/3044
+ {
+ matrix: [
+ [kValue.f32.negative.min, 0],
+ [0, 0],
+ ],
+ scalar: kValue.f32.negative.subnormal.min,
+ expected: [
+ [[0, reinterpretU32AsF32(0x407ffffe)], 0], // [[0, 3.9999995...], 0],
+ [0, 0],
+ ],
+ },
+ ] as MatrixScalarToMatrixCase[],
+ f16: [
+ // From https://github.com/gpuweb/cts/issues/3044
+ {
+ matrix: [
+ [kValue.f16.negative.min, 0],
+ [0, 0],
+ ],
+ scalar: kValue.f16.negative.subnormal.min,
+ expected: [
+ [[0, reinterpretU16AsF16(0x43fe)], 0], // [[0, 3.99609375], 0]
+ [0, 0],
+ ],
+ },
+ ] as MatrixScalarToMatrixCase[],
+} as const;
+
+g.test('multiplicationMatrixScalarInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<MatrixScalarToMatrixCase>(p => {
+ // Primarily testing that different shapes of matrices are handled correctly
+ // here, to reduce test duplication. Additional testing for edge case
+ // discovered in https://github.com/gpuweb/cts/issues/3044.
+ //
+ // multiplicationMatrixScalarInterval uses for calculating intervals,
+ // so the testing for multiplicationInterval covers the actual interval
+ // calculations.
+ return [
+ {
+ matrix: [
+ [1, 2],
+ [3, 4],
+ ],
+ scalar: 10,
+ expected: [
+ [10, 20],
+ [30, 40],
+ ],
+ },
+ {
+ matrix: [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ ],
+ scalar: 10,
+ expected: [
+ [10, 20],
+ [30, 40],
+ [50, 60],
+ ],
+ },
+ {
+ matrix: [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ [7, 8],
+ ],
+ scalar: 10,
+ expected: [
+ [10, 20],
+ [30, 40],
+ [50, 60],
+ [70, 80],
+ ],
+ },
+ {
+ matrix: [
+ [1, 2, 3],
+ [4, 5, 6],
+ ],
+ scalar: 10,
+ expected: [
+ [10, 20, 30],
+ [40, 50, 60],
+ ],
+ },
+ {
+ matrix: [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ ],
+ scalar: 10,
+ expected: [
+ [10, 20, 30],
+ [40, 50, 60],
+ [70, 80, 90],
+ ],
+ },
+ {
+ matrix: [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ [10, 11, 12],
+ ],
+ scalar: 10,
+ expected: [
+ [10, 20, 30],
+ [40, 50, 60],
+ [70, 80, 90],
+ [100, 110, 120],
+ ],
+ },
+ {
+ matrix: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ ],
+ scalar: 10,
+ expected: [
+ [10, 20, 30, 40],
+ [50, 60, 70, 80],
+ ],
+ },
+ {
+ matrix: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ ],
+ scalar: 10,
+ expected: [
+ [10, 20, 30, 40],
+ [50, 60, 70, 80],
+ [90, 100, 110, 120],
+ ],
+ },
+ {
+ matrix: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ],
+ scalar: 10,
+ expected: [
+ [10, 20, 30, 40],
+ [50, 60, 70, 80],
+ [90, 100, 110, 120],
+ [130, 140, 150, 160],
+ ],
+ },
+ ...kMultiplicationMatrixScalarIntervalCases[p.trait],
+ ];
+ })
+ )
+ .fn(t => {
+ const matrix = t.params.matrix;
+ const scalar = t.params.scalar;
+ const trait = FP[t.params.trait];
+ const expected = trait.toMatrix(t.params.expected);
+ const got = trait.multiplicationMatrixScalarInterval(matrix, scalar);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.multiplicationMatrixScalarInterval([${JSON.stringify(
+ matrix
+ )}], ${scalar}) returned '[${JSON.stringify(got)}]'. Expected '[${JSON.stringify(expected)}]'`
+ );
+ });
+
+// There are no explicit tests for multiplicationScalarMatrixInterval, since it
+// is just a pass-through to multiplicationMatrixScalarInterval
+
+interface MatrixVectorToVectorCase {
+ matrix: number[][];
+ vector: number[];
+ expected: (number | IntervalBounds)[];
+}
+
+g.test('multiplicationMatrixVectorInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .combineWithParams<MatrixVectorToVectorCase>([
+ // Only testing that different shapes of matrices are handled correctly
+ // here, to reduce test duplication.
+ // multiplicationMatrixVectorInterval uses DotIntervalOp &
+ // TransposeIntervalOp for calculating intervals, so the testing for
+ // dotInterval & transposeInterval covers the actual interval
+ // calculations.
+ {
+ matrix: [
+ [1, 2],
+ [3, 4],
+ ],
+ vector: [11, 22],
+ expected: [77, 110],
+ },
+ {
+ matrix: [
+ [1, 2, 3],
+ [4, 5, 6],
+ ],
+ vector: [11, 22],
+ expected: [99, 132, 165],
+ },
+ {
+ matrix: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ ],
+ vector: [11, 22],
+ expected: [121, 154, 187, 220],
+ },
+ {
+ matrix: [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ ],
+ vector: [11, 22, 33],
+ expected: [242, 308],
+ },
+ {
+ matrix: [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ ],
+ vector: [11, 22, 33],
+ expected: [330, 396, 462],
+ },
+ {
+ matrix: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ ],
+ vector: [11, 22, 33],
+ expected: [418, 484, 550, 616],
+ },
+ {
+ matrix: [
+ [1, 2],
+ [3, 4],
+ [5, 6],
+ [7, 8],
+ ],
+ vector: [11, 22, 33, 44],
+ expected: [550, 660],
+ },
+ {
+ matrix: [
+ [1, 2, 3],
+ [4, 5, 6],
+ [7, 8, 9],
+ [10, 11, 12],
+ ],
+ vector: [11, 22, 33, 44],
+ expected: [770, 880, 990],
+ },
+ {
+ matrix: [
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16],
+ ],
+ vector: [11, 22, 33, 44],
+ expected: [990, 1100, 1210, 1320],
+ },
+ ])
+ )
+ .fn(t => {
+ const matrix = t.params.matrix;
+ const vector = t.params.vector;
+ const trait = FP[t.params.trait];
+ const expected = trait.toVector(t.params.expected);
+ const got = trait.multiplicationMatrixVectorInterval(matrix, vector);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.multiplicationMatrixVectorInterval([${JSON.stringify(
+ matrix
+ )}], [${JSON.stringify(vector)}]) returned '[${JSON.stringify(
+ got
+ )}]'. Expected '[${JSON.stringify(expected)}]'`
+ );
+ });
+
+interface VectorMatrixToVectorCase {
+ vector: number[];
+ matrix: number[][];
+ expected: (number | IntervalBounds)[];
+}
+
+g.test('multiplicationVectorMatrixInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .combineWithParams<VectorMatrixToVectorCase>([
+ // Only testing that different shapes of matrices are handled correctly
+ // here, to reduce test duplication.
+ // multiplicationVectorMatrixInterval uses DotIntervalOp for calculating
+ // intervals, so the testing for dotInterval covers the actual interval
+ // calculations.
+ // Keep all expected result integer no larger than 2047 to ensure that all result is exactly
+ // represeantable in both f32 and f16.
+ {
+ vector: [1, 2],
+ matrix: [
+ [11, 22],
+ [33, 44],
+ ],
+ expected: [55, 121],
+ },
+ {
+ vector: [1, 2],
+ matrix: [
+ [11, 22],
+ [33, 44],
+ [55, 66],
+ ],
+ expected: [55, 121, 187],
+ },
+ {
+ vector: [1, 2],
+ matrix: [
+ [11, 22],
+ [33, 44],
+ [55, 66],
+ [77, 88],
+ ],
+ expected: [55, 121, 187, 253],
+ },
+ {
+ vector: [1, 2, 3],
+ matrix: [
+ [11, 12, 13],
+ [21, 22, 23],
+ ],
+ expected: [74, 134],
+ },
+ {
+ vector: [1, 2, 3],
+ matrix: [
+ [11, 12, 13],
+ [21, 22, 23],
+ [31, 32, 33],
+ ],
+ expected: [74, 134, 194],
+ },
+ {
+ vector: [1, 2, 3],
+ matrix: [
+ [11, 12, 13],
+ [21, 22, 23],
+ [31, 32, 33],
+ [41, 42, 43],
+ ],
+ expected: [74, 134, 194, 254],
+ },
+ {
+ vector: [1, 2, 3, 4],
+ matrix: [
+ [11, 12, 13, 14],
+ [21, 22, 23, 24],
+ ],
+ expected: [130, 230],
+ },
+ {
+ vector: [1, 2, 3, 4],
+ matrix: [
+ [11, 12, 13, 14],
+ [21, 22, 23, 24],
+ [31, 32, 33, 34],
+ ],
+ expected: [130, 230, 330],
+ },
+ {
+ vector: [1, 2, 3, 4],
+ matrix: [
+ [11, 12, 13, 14],
+ [21, 22, 23, 24],
+ [31, 32, 33, 34],
+ [41, 42, 43, 44],
+ ],
+ expected: [130, 230, 330, 430],
+ },
+ ])
+ )
+ .fn(t => {
+ const vector = t.params.vector;
+ const matrix = t.params.matrix;
+ const trait = FP[t.params.trait];
+ const expected = trait.toVector(t.params.expected);
+ const got = trait.multiplicationVectorMatrixInterval(vector, matrix);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.multiplicationVectorMatrixInterval([${JSON.stringify(
+ vector
+ )}], [${JSON.stringify(matrix)}]) returned '[${JSON.stringify(
+ got
+ )}]'. Expected '[${JSON.stringify(expected)}]'`
+ );
+ });
+
+// API - Acceptance Intervals w/ bespoke implementations
+
+interface FaceForwardCase {
+ input: [number[], number[], number[]];
+ expected: ((number | IntervalBounds)[] | undefined)[];
+}
+
+g.test('faceForwardIntervals')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<FaceForwardCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ // vec2
+ { input: [[1.0, 0.0], [1.0, 0.0], [1.0, 0.0]], expected: [[-1.0, 0.0]] },
+ { input: [[-1.0, 0.0], [1.0, 0.0], [1.0, 0.0]], expected: [[1.0, 0.0]] },
+ { input: [[1.0, 0.0], [-1.0, 1.0], [1.0, -1.0]], expected: [[1.0, 0.0]] },
+ { input: [[-1.0, 0.0], [-1.0, 1.0], [1.0, -1.0]], expected: [[-1.0, 0.0]] },
+ { input: [[10.0, 0.0], [10.0, 0.0], [10.0, 0.0]], expected: [[-10.0, 0.0]] },
+ { input: [[-10.0, 0.0], [10.0, 0.0], [10.0, 0.0]], expected: [[10.0, 0.0]] },
+ { input: [[10.0, 0.0], [-10.0, 10.0], [10.0, -10.0]], expected: [[10.0, 0.0]] },
+ { input: [[-10.0, 0.0], [-10.0, 10.0], [10.0, -10.0]], expected: [[-10.0, 0.0]] },
+ { input: [[0.1, 0.0], [0.1, 0.0], [0.1, 0.0]], expected: [[kConstantCorrectlyRoundedExpectation[p.trait]['-0.1'], 0.0]] },
+ { input: [[-0.1, 0.0], [0.1, 0.0], [0.1, 0.0]], expected: [[kConstantCorrectlyRoundedExpectation[p.trait]['0.1'], 0.0]] },
+ { input: [[0.1, 0.0], [-0.1, 0.1], [0.1, -0.1]], expected: [[kConstantCorrectlyRoundedExpectation[p.trait]['0.1'], 0.0]] },
+ { input: [[-0.1, 0.0], [-0.1, 0.1], [0.1, -0.1]], expected: [[kConstantCorrectlyRoundedExpectation[p.trait]['-0.1'], 0.0]] },
+
+ // vec3
+ { input: [[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]], expected: [[-1.0, 0.0, 0.0]] },
+ { input: [[-1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0]], expected: [[1.0, 0.0, 0.0]] },
+ { input: [[1.0, 0.0, 0.0], [-1.0, 1.0, 0.0], [1.0, -1.0, 0.0]], expected: [[1.0, 0.0, 0.0]] },
+ { input: [[-1.0, 0.0, 0.0], [-1.0, 1.0, 0.0], [1.0, -1.0, 0.0]], expected: [[-1.0, 0.0, 0.0]] },
+ { input: [[10.0, 0.0, 0.0], [10.0, 0.0, 0.0], [10.0, 0.0, 0.0]], expected: [[-10.0, 0.0, 0.0]] },
+ { input: [[-10.0, 0.0, 0.0], [10.0, 0.0, 0.0], [10.0, 0.0, 0.0]], expected: [[10.0, 0.0, 0.0]] },
+ { input: [[10.0, 0.0, 0.0], [-10.0, 10.0, 0.0], [10.0, -10.0, 0.0]], expected: [[10.0, 0.0, 0.0]] },
+ { input: [[-10.0, 0.0, 0.0], [-10.0, 10.0, 0.0], [10.0, -10.0, 0.0]], expected: [[-10.0, 0.0, 0.0]] },
+ { input: [[0.1, 0.0, 0.0], [0.1, 0.0, 0.0], [0.1, 0.0, 0.0]], expected: [[kConstantCorrectlyRoundedExpectation[p.trait]['-0.1'], 0.0, 0.0]] },
+ { input: [[-0.1, 0.0, 0.0], [0.1, 0.0, 0.0], [0.1, 0.0, 0.0]], expected: [[kConstantCorrectlyRoundedExpectation[p.trait]['0.1'], 0.0, 0.0]] },
+ { input: [[0.1, 0.0, 0.0], [-0.1, 0.0, 0.0], [0.1, -0.0, 0.0]], expected: [[kConstantCorrectlyRoundedExpectation[p.trait]['0.1'], 0.0, 0.0]] },
+ { input: [[-0.1, 0.0, 0.0], [-0.1, 0.0, 0.0], [0.1, -0.0, 0.0]], expected: [[kConstantCorrectlyRoundedExpectation[p.trait]['-0.1'], 0.0, 0.0]] },
+
+ // vec4
+ { input: [[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]], expected: [[-1.0, 0.0, 0.0, 0.0]] },
+ { input: [[-1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]], expected: [[1.0, 0.0, 0.0, 0.0]] },
+ { input: [[1.0, 0.0, 0.0, 0.0], [-1.0, 1.0, 0.0, 0.0], [1.0, -1.0, 0.0, 0.0]], expected: [[1.0, 0.0, 0.0, 0.0]] },
+ { input: [[-1.0, 0.0, 0.0, 0.0], [-1.0, 1.0, 0.0, 0.0], [1.0, -1.0, 0.0, 0.0]], expected: [[-1.0, 0.0, 0.0, 0.0]] },
+ { input: [[10.0, 0.0, 0.0, 0.0], [10.0, 0.0, 0.0, 0.0], [10.0, 0.0, 0.0, 0.0]], expected: [[-10.0, 0.0, 0.0, 0.0]] },
+ { input: [[-10.0, 0.0, 0.0, 0.0], [10.0, 0.0, 0.0, 0.0], [10.0, 0.0, 0.0, 0.0]], expected: [[10.0, 0.0, 0.0, 0.0]] },
+ { input: [[10.0, 0.0, 0.0, 0.0], [-10.0, 10.0, 0.0, 0.0], [10.0, -10.0, 0.0, 0.0]], expected: [[10.0, 0.0, 0.0, 0.0]] },
+ { input: [[-10.0, 0.0, 0.0, 0.0], [-10.0, 10.0, 0.0, 0.0], [10.0, -10.0, 0.0, 0.0]], expected: [[-10.0, 0.0, 0.0, 0.0]] },
+ { input: [[0.1, 0.0, 0.0, 0.0], [0.1, 0.0, 0.0, 0.0], [0.1, 0.0, 0.0, 0.0]], expected: [[kConstantCorrectlyRoundedExpectation[p.trait]['-0.1'], 0.0, 0.0, 0.0]] },
+ { input: [[-0.1, 0.0, 0.0, 0.0], [0.1, 0.0, 0.0, 0.0], [0.1, 0.0, 0.0, 0.0]], expected: [[kConstantCorrectlyRoundedExpectation[p.trait]['0.1'], 0.0, 0.0, 0.0]] },
+ { input: [[0.1, 0.0, 0.0, 0.0], [-0.1, 0.0, 0.0, 0.0], [0.1, -0.0, 0.0, 0.0]], expected: [[kConstantCorrectlyRoundedExpectation[p.trait]['0.1'], 0.0, 0.0, 0.0]] },
+ { input: [[-0.1, 0.0, 0.0, 0.0], [-0.1, 0.0, 0.0, 0.0], [0.1, -0.0, 0.0, 0.0]], expected: [[kConstantCorrectlyRoundedExpectation[p.trait]['-0.1'], 0.0, 0.0, 0.0]] },
+
+ // dot(y, z) === 0
+ { input: [[1.0, 1.0], [1.0, 0.0], [0.0, 1.0]], expected: [[-1.0, -1.0]] },
+
+ // subnormals, also dot(y, z) spans 0
+ { input: [[constants.positive.subnormal.max, 0.0], [constants.positive.subnormal.min, 0.0], [constants.negative.subnormal.min, 0.0]], expected: [[[0.0, constants.positive.subnormal.max], 0.0], [[constants.negative.subnormal.min, 0], 0.0]] },
+
+ // dot going OOB returns [undefined, x, -x]
+ { input: [[1.0, 1.0], [constants.positive.max, constants.positive.max], [constants.positive.max, constants.positive.max]], expected: [undefined, [1, 1], [-1, -1]] },
+ ];
+ })
+ )
+ .fn(t => {
+ const [x, y, z] = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = t.params.expected.map(e => (e !== undefined ? trait.toVector(e) : undefined));
+ const got = trait.faceForwardIntervals(x, y, z);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.faceForwardInterval([${x}], [${y}], [${z}]) returned [${got}]. Expected [${expected}]`
+ );
+ });
+
+interface ModfCase {
+ input: number;
+ fract: number | IntervalBounds;
+ whole: number | IntervalBounds;
+}
+
+g.test('modfInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'abstract'] as const)
+ .beginSubcases()
+ .expandWithParams<ModfCase>(p => {
+ const constants = FP[p.trait].constants();
+ // prettier-ignore
+ return [
+ // Normals
+ { input: 0, fract: 0, whole: 0 },
+ { input: 1, fract: 0, whole: 1 },
+ { input: -1, fract: 0, whole: -1 },
+ { input: 0.5, fract: 0.5, whole: 0 },
+ { input: -0.5, fract: -0.5, whole: 0 },
+ { input: 2.5, fract: 0.5, whole: 2 },
+ { input: -2.5, fract: -0.5, whole: -2 },
+ { input: 10.0, fract: 0, whole: 10 },
+ { input: -10.0, fract: 0, whole: -10 },
+
+ // Subnormals
+ { input: constants.positive.subnormal.min, fract: [0, constants.positive.subnormal.min], whole: 0 },
+ { input: constants.positive.subnormal.max, fract: [0, constants.positive.subnormal.max], whole: 0 },
+ { input: constants.negative.subnormal.min, fract: [constants.negative.subnormal.min, 0], whole: 0 },
+ { input: constants.negative.subnormal.max, fract: [constants.negative.subnormal.max, 0], whole: 0 },
+
+ // Boundaries
+ { input: constants.negative.min, fract: 0, whole: constants.negative.min },
+ { input: constants.negative.max, fract: constants.negative.max, whole: 0 },
+ { input: constants.positive.min, fract: constants.positive.min, whole: 0 },
+ { input: constants.positive.max, fract: 0, whole: constants.positive.max },
+ ];
+ })
+ )
+ .fn(t => {
+ const trait = FP[t.params.trait];
+ const expected = {
+ fract: trait.toInterval(t.params.fract),
+ whole: trait.toInterval(t.params.whole),
+ };
+
+ const got = trait.modfInterval(t.params.input);
+ t.expect(
+ objectEquals(expected, got),
+ `${trait}.modfInterval([${t.params.input}) returned { fract: [${got.fract}], whole: [${got.whole}] }. Expected { fract: [${expected.fract}], whole: [${expected.whole}] }`
+ );
+ });
+
+interface RefractCase {
+ input: [number[], number[], number];
+ expected: (number | IntervalBounds)[];
+}
+
+// Scope for refractInterval tests so that they can have constants for magic
+// numbers that don't pollute the global namespace or have unwieldy long names.
+{
+ const kNegativeOneBounds = {
+ f32: [
+ reinterpretU64AsF64(0xbff0_0000_c000_0000n),
+ reinterpretU64AsF64(0xbfef_ffff_4000_0000n),
+ ] as IntervalBounds,
+ f16: [reinterpretU16AsF16(0xbc06), reinterpretU16AsF16(0xbbfa)] as IntervalBounds,
+ } as const;
+
+ // prettier-ignore
+ const kRefractIntervalCases = {
+ f32: [
+ // k > 0
+ // vec2
+ { input: [[1, -2], [3, 4], 5], expected: [[reinterpretU32AsF32(0x40ce87a4), reinterpretU32AsF32(0x40ce8840)], // ~6.454...
+ [reinterpretU32AsF32(0xc100fae8), reinterpretU32AsF32(0xc100fa80)]] }, // ~-8.061...
+ // vec3
+ { input: [[1, -2, 3], [-4, 5, -6], 7], expected: [[reinterpretU32AsF32(0x40d24480), reinterpretU32AsF32(0x40d24c00)], // ~6.571...
+ [reinterpretU32AsF32(0xc1576f80), reinterpretU32AsF32(0xc1576ad0)], // ~-13.464...
+ [reinterpretU32AsF32(0x41a2d9b0), reinterpretU32AsF32(0x41a2dc80)]] }, // ~20.356...
+ // vec4
+ { input: [[1, -2, 3, -4], [-5, 6, -7, 8], 9], expected: [[reinterpretU32AsF32(0x410ae480), reinterpretU32AsF32(0x410af240)], // ~8.680...
+ [reinterpretU32AsF32(0xc18cf7c0), reinterpretU32AsF32(0xc18cef80)], // ~-17.620...
+ [reinterpretU32AsF32(0x41d46cc0), reinterpretU32AsF32(0x41d47660)], // ~26.553...
+ [reinterpretU32AsF32(0xc20dfa80), reinterpretU32AsF32(0xc20df500)]] }, // ~-35.494...
+ ] as RefractCase[],
+ f16: [
+ // k > 0
+ // vec2
+ { input: [[1, -2], [3, 4], 5], expected: [[reinterpretU16AsF16(0x4620), reinterpretU16AsF16(0x46bc)], // ~6.454...
+ [reinterpretU16AsF16(0xc840), reinterpretU16AsF16(0xc7b0)]] }, // ~-8.061...
+ // vec3
+ { input: [[1, -2, 3], [-4, 5, -6], 7], expected: [[reinterpretU16AsF16(0x4100), reinterpretU16AsF16(0x4940)], // ~6.571...
+ [reinterpretU16AsF16(0xcc98), reinterpretU16AsF16(0xc830)], // ~-13.464...
+ [reinterpretU16AsF16(0x4b20), reinterpretU16AsF16(0x4e90)]] }, // ~20.356...
+ // vec4
+ // x = [1, -2, 3, -4], y = [-5, 6, -7, 8], z = 9,
+ // dot(y, x) = -71, k = 1.0 - 9 * 9 * (1.0 - 71 * 71) = 408241 overflow f16.
+ { input: [[1, -2, 3, -4], [-5, 6, -7, 8], 9], expected: [kUnboundedBounds, kUnboundedBounds, kUnboundedBounds, kUnboundedBounds] },
+ // x = [1, -2, 3, -4], y = [-5, 4, -3, 2], z = 2.5,
+ // dot(y, x) = -30, k = 1.0 - 2.5 * 2.5 * (1.0 - 30 * 30) = 5619.75.
+ // a = z * dot(y, x) + sqrt(k) = ~-0.035, result is about z * x - a * y = [~2.325, ~-4.86, ~7.4025, ~-9.93]
+ { input: [[1, -2, 3, -4], [-5, 4, -3, 2], 2.5], expected: [[reinterpretU16AsF16(0x3900), reinterpretU16AsF16(0x4410)], // ~2.325
+ [reinterpretU16AsF16(0xc640), reinterpretU16AsF16(0xc300)], // ~-4.86
+ [reinterpretU16AsF16(0x4660), reinterpretU16AsF16(0x4838)], // ~7.4025
+ [reinterpretU16AsF16(0xc950), reinterpretU16AsF16(0xc8a0)]] }, // ~-9.93
+ ] as RefractCase[],
+ } as const;
+
+ g.test('refractInterval')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16'] as const)
+ .beginSubcases()
+ .expandWithParams<RefractCase>(p => {
+ const trait = FP[p.trait];
+ const constants = trait.constants();
+ // prettier-ignore
+ return [
+ ...kRefractIntervalCases[p.trait],
+
+ // k < 0
+ { input: [[1, 1], [0.1, 0], 10], expected: [0, 0] },
+
+ // k contains 0
+ { input: [[1, 1], [0.1, 0], 1.005038], expected: [kUnboundedBounds, kUnboundedBounds] },
+
+ // k > 0
+ // vec2
+ { input: [[1, 1], [1, 0], 1], expected: [kNegativeOneBounds[p.trait], 1] },
+ // vec3
+ { input: [[1, 1, 1], [1, 0, 0], 1], expected: [kNegativeOneBounds[p.trait], 1, 1] },
+ // vec4
+ { input: [[1, 1, 1, 1], [1, 0, 0, 0], 1], expected: [kNegativeOneBounds[p.trait], 1, 1, 1] },
+
+ // Test that dot going OOB bounds in the intermediate calculations propagates
+ { input: [[constants.positive.nearest_max, constants.positive.max, constants.negative.min], [1.0, 1.0, 1.0], 1], expected: [kUnboundedBounds, kUnboundedBounds, kUnboundedBounds] },
+ { input: [[constants.positive.nearest_max, constants.negative.min, constants.positive.max], [1.0, 1.0, 1.0], 1], expected: [kUnboundedBounds, kUnboundedBounds, kUnboundedBounds] },
+ { input: [[constants.positive.max, constants.positive.nearest_max, constants.negative.min], [1.0, 1.0, 1.0], 1], expected: [kUnboundedBounds, kUnboundedBounds, kUnboundedBounds] },
+ { input: [[constants.negative.min, constants.positive.nearest_max, constants.positive.max], [1.0, 1.0, 1.0], 1], expected: [kUnboundedBounds, kUnboundedBounds, kUnboundedBounds] },
+ { input: [[constants.positive.max, constants.negative.min, constants.positive.nearest_max], [1.0, 1.0, 1.0], 1], expected: [kUnboundedBounds, kUnboundedBounds, kUnboundedBounds] },
+ { input: [[constants.negative.min, constants.positive.max, constants.positive.nearest_max], [1.0, 1.0, 1.0], 1], expected: [kUnboundedBounds, kUnboundedBounds, kUnboundedBounds] },
+ ];
+ })
+ )
+ .fn(t => {
+ const [i, s, r] = t.params.input;
+ const trait = FP[t.params.trait];
+ const expected = trait.toVector(t.params.expected);
+ const got = trait.refractInterval(i, s, r);
+ t.expect(
+ objectEquals(expected, got),
+ `${t.params.trait}.refractIntervals([${i}], [${s}], ${r}) returned [${got}]. Expected [${expected}]`
+ );
+ });
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/getStackTrace.spec.ts b/dom/webgpu/tests/cts/checkout/src/unittests/getStackTrace.spec.ts
new file mode 100644
index 0000000000..5090fe3f9d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/getStackTrace.spec.ts
@@ -0,0 +1,138 @@
+export const description = `
+Tests for getStackTrace.
+`;
+
+import { makeTestGroup } from '../common/framework/test_group.js';
+import { extractImportantStackTrace } from '../common/internal/stack.js';
+
+import { UnitTest } from './unit_test.js';
+
+export const g = makeTestGroup(UnitTest);
+
+g.test('stacks')
+ .paramsSimple([
+ {
+ case: 'node_fail',
+ _expectedLines: 3,
+ _stack: `Error:
+ at CaseRecorder.fail (/Users/kainino/src/cts/src/common/framework/logger.ts:99:30)
+ at RunCaseSpecific.exports.g.test.t [as fn] (/Users/kainino/src/cts/src/unittests/logger.spec.ts:80:7)
+ at RunCaseSpecific.run (/Users/kainino/src/cts/src/common/framework/test_group.ts:121:18)
+ at processTicksAndRejections (internal/process/task_queues.js:86:5)`,
+ },
+ {
+ // MAINTENANCE_TODO: make sure this test case actually matches what happens on windows
+ case: 'node_fail_backslash',
+ _expectedLines: 3,
+ _stack: `Error:
+ at CaseRecorder.fail (C:\\Users\\kainino\\src\\cts\\src\\common\\framework\\logger.ts:99:30)
+ at RunCaseSpecific.exports.g.test.t [as fn] (C:\\Users\\kainino\\src\\cts\\src\\unittests\\logger.spec.ts:80:7)
+ at RunCaseSpecific.run (C:\\Users\\kainino\\src\\cts\\src\\common\\framework\\test_group.ts:121:18)
+ at processTicksAndRejections (internal\\process\\task_queues.js:86:5)`,
+ },
+ {
+ case: 'node_fail_processTicksAndRejections',
+ _expectedLines: 5,
+ _stack: `Error: expectation had no effect: suite1:foo:
+ at Object.generateMinimalQueryList (/Users/kainino/src/cts/src/common/framework/generate_minimal_query_list.ts:72:24)
+ at testGenerateMinimalQueryList (/Users/kainino/src/cts/src/unittests/loading.spec.ts:289:25)
+ at processTicksAndRejections (internal/process/task_queues.js:93:5)
+ at RunCaseSpecific.fn (/Users/kainino/src/cts/src/unittests/loading.spec.ts:300:3)
+ at RunCaseSpecific.run (/Users/kainino/src/cts/src/common/framework/test_group.ts:144:9)
+ at /Users/kainino/src/cts/src/common/runtime/cmdline.ts:62:25
+ at async Promise.all (index 29)
+ at /Users/kainino/src/cts/src/common/runtime/cmdline.ts:78:5`,
+ },
+ {
+ case: 'node_throw',
+ _expectedLines: 2,
+ _stack: `Error: hello
+ at RunCaseSpecific.g.test.t [as fn] (/Users/kainino/src/cts/src/unittests/test_group.spec.ts:51:11)
+ at RunCaseSpecific.run (/Users/kainino/src/cts/src/common/framework/test_group.ts:121:18)
+ at processTicksAndRejections (internal/process/task_queues.js:86:5)`,
+ },
+ {
+ case: 'firefox_fail',
+ _expectedLines: 3,
+ _stack: `fail@http://localhost:8080/out/common/framework/logger.js:104:30
+expect@http://localhost:8080/out/common/framework/default_fixture.js:59:16
+@http://localhost:8080/out/unittests/util.spec.js:35:5
+run@http://localhost:8080/out/common/framework/test_group.js:119:18`,
+ },
+ {
+ case: 'firefox_throw',
+ _expectedLines: 1,
+ _stack: `@http://localhost:8080/out/unittests/test_group.spec.js:48:11
+run@http://localhost:8080/out/common/framework/test_group.js:119:18`,
+ },
+ {
+ case: 'safari_fail',
+ _expectedLines: 3,
+ _stack: `fail@http://localhost:8080/out/common/framework/logger.js:104:39
+expect@http://localhost:8080/out/common/framework/default_fixture.js:59:20
+http://localhost:8080/out/unittests/util.spec.js:35:11
+http://localhost:8080/out/common/framework/test_group.js:119:20
+asyncFunctionResume@[native code]
+[native code]
+promiseReactionJob@[native code]`,
+ },
+ {
+ case: 'safari_throw',
+ _expectedLines: 1,
+ _stack: `http://localhost:8080/out/unittests/test_group.spec.js:48:20
+http://localhost:8080/out/common/framework/test_group.js:119:20
+asyncFunctionResume@[native code]
+[native code]
+promiseReactionJob@[native code]`,
+ },
+ {
+ case: 'chrome_fail',
+ _expectedLines: 4,
+ _stack: `Error
+ at CaseRecorder.fail (http://localhost:8080/out/common/framework/logger.js:104:30)
+ at DefaultFixture.expect (http://localhost:8080/out/common/framework/default_fixture.js:59:16)
+ at RunCaseSpecific.fn (http://localhost:8080/out/unittests/util.spec.js:35:5)
+ at RunCaseSpecific.run (http://localhost:8080/out/common/framework/test_group.js:119:18)
+ at async runCase (http://localhost:8080/out/common/runtime/standalone.js:37:17)
+ at async http://localhost:8080/out/common/runtime/standalone.js:102:7`,
+ },
+ {
+ case: 'chrome_throw',
+ _expectedLines: 6,
+ _stack: `Error: hello
+ at RunCaseSpecific.fn (http://localhost:8080/out/unittests/test_group.spec.js:48:11)
+ at RunCaseSpecific.run (http://localhost:8080/out/common/framework/test_group.js:119:18)"
+ at async Promise.all (index 0)
+ at async TestGroupTest.run (http://localhost:8080/out/unittests/test_group_test.js:6:5)
+ at async RunCaseSpecific.fn (http://localhost:8080/out/unittests/test_group.spec.js:53:15)
+ at async RunCaseSpecific.run (http://localhost:8080/out/common/framework/test_group.js:119:7)
+ at async runCase (http://localhost:8080/out/common/runtime/standalone.js:37:17)
+ at async http://localhost:8080/out/common/runtime/standalone.js:102:7`,
+ },
+ {
+ case: 'multiple_lines',
+ _expectedLines: 8,
+ _stack: `Error: hello
+ at RunCaseSpecific.fn (http://localhost:8080/out/unittests/test_group.spec.js:48:11)
+ at RunCaseSpecific.fn (http://localhost:8080/out/unittests/test_group.spec.js:48:11)
+ at RunCaseSpecific.fn (http://localhost:8080/out/unittests/test_group.spec.js:48:11)
+ at RunCaseSpecific.run (http://localhost:8080/out/common/framework/test_group.js:119:18)"
+ at async Promise.all (index 0)
+ at async TestGroupTest.run (http://localhost:8080/out/unittests/test_group_test.js:6:5)
+ at async RunCaseSpecific.fn (http://localhost:8080/out/unittests/test_group.spec.js:53:15)
+ at async RunCaseSpecific.run (http://localhost:8080/out/common/framework/test_group.js:119:7)
+ at async runCase (http://localhost:8080/out/common/runtime/standalone.js:37:17)
+ at async http://localhost:8080/out/common/runtime/standalone.js:102:7`,
+ },
+ ])
+ .fn(t => {
+ const ex = new Error();
+ ex.stack = t.params._stack;
+ t.expect(ex.stack === t.params._stack);
+ const stringified = extractImportantStackTrace(ex);
+ const parts = stringified.split('\n');
+
+ t.expect(parts.length === t.params._expectedLines);
+ const last = parts[parts.length - 1];
+ t.expect(last.indexOf('/unittests/') !== -1 || last.indexOf('\\unittests\\') !== -1);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/listing.ts b/dom/webgpu/tests/cts/checkout/src/unittests/listing.ts
new file mode 100644
index 0000000000..823639c692
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/listing.ts
@@ -0,0 +1,5 @@
+/* eslint-disable import/no-restricted-paths */
+import { TestSuiteListing } from '../common/internal/test_suite_listing.js';
+import { makeListing } from '../common/tools/crawl.js';
+
+export const listing: Promise<TestSuiteListing> = makeListing(__filename);
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/loaders_and_trees.spec.ts b/dom/webgpu/tests/cts/checkout/src/unittests/loaders_and_trees.spec.ts
new file mode 100644
index 0000000000..a22c06e669
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/loaders_and_trees.spec.ts
@@ -0,0 +1,978 @@
+export const description = `
+Tests for queries/filtering, loading, and running.
+`;
+
+import { Fixture } from '../common/framework/fixture.js';
+import { makeTestGroup } from '../common/framework/test_group.js';
+import { TestFileLoader, SpecFile } from '../common/internal/file_loader.js';
+import { Logger } from '../common/internal/logging/logger.js';
+import { Status } from '../common/internal/logging/result.js';
+import { parseQuery } from '../common/internal/query/parseQuery.js';
+import {
+ TestQuery,
+ TestQuerySingleCase,
+ TestQueryMultiCase,
+ TestQueryMultiTest,
+ TestQueryMultiFile,
+ TestQueryWithExpectation,
+} from '../common/internal/query/query.js';
+import { makeTestGroupForUnitTesting } from '../common/internal/test_group.js';
+import { TestSuiteListing, TestSuiteListingEntry } from '../common/internal/test_suite_listing.js';
+import { ExpandThroughLevel, TestTreeLeaf } from '../common/internal/tree.js';
+import { assert, objectEquals } from '../common/util/util.js';
+
+import { UnitTest } from './unit_test.js';
+
+const listingData: { [k: string]: TestSuiteListingEntry[] } = {
+ suite1: [
+ { file: [], readme: 'desc 1a' },
+ { file: ['foo'] },
+ { file: ['bar'], readme: 'desc 1h' },
+ { file: ['bar', 'biz'] },
+ { file: ['bar', 'buzz', 'buzz'] },
+ { file: ['baz'] },
+ { file: ['empty'], readme: 'desc 1z' }, // directory with no files
+ ],
+ suite2: [{ file: [], readme: 'desc 2a' }, { file: ['foof'] }],
+};
+
+const specsData: { [k: string]: SpecFile } = {
+ 'suite1/foo.spec.js': {
+ description: 'desc 1b',
+ g: (() => {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+ g.test('hello').fn(() => {});
+ g.test('bonjour').fn(() => {});
+ g.test('hola')
+ .desc('TODO TODO')
+ .fn(() => {});
+ return g;
+ })(),
+ },
+ 'suite1/bar/biz.spec.js': {
+ description: 'desc 1f TODO TODO',
+ g: makeTestGroupForUnitTesting(UnitTest), // file with no tests
+ },
+ 'suite1/bar/buzz/buzz.spec.js': {
+ description: 'desc 1d TODO',
+ g: (() => {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+ g.test('zap').fn(() => {});
+ return g;
+ })(),
+ },
+ 'suite1/baz.spec.js': {
+ description: 'desc 1e',
+ g: (() => {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+ g.test('wye')
+ .paramsSimple([{}, { x: 1 }])
+ .fn(() => {});
+ g.test('zed')
+ .paramsSimple([
+ { a: 1, b: 2, _c: 0 },
+ { b: 3, a: 1, _c: 0 },
+ ])
+ .fn(() => {});
+ g.test('batched')
+ // creates two cases: one for subcases 1,2 and one for subcase 3
+ .paramsSubcasesOnly(u => u.combine('x', [1, 2, 3]))
+ .batch(2)
+ .fn(() => {});
+ return g;
+ })(),
+ },
+ 'suite2/foof.spec.js': {
+ description: 'desc 2b',
+ g: (() => {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+ g.test('blah').fn(t => {
+ t.debug('OK');
+ });
+ g.test('bleh')
+ .paramsSimple([{ a: 1 }])
+ .fn(t => {
+ t.debug('OK');
+ t.debug('OK');
+ });
+ g.test('bluh,a').fn(t => {
+ t.fail('goodbye');
+ });
+ return g;
+ })(),
+ },
+};
+
+class FakeTestFileLoader extends TestFileLoader {
+ listing(suite: string): Promise<TestSuiteListing> {
+ return Promise.resolve(listingData[suite]);
+ }
+
+ import(path: string): Promise<SpecFile> {
+ assert(path in specsData, '[test] mock file ' + path + ' does not exist');
+ return Promise.resolve(specsData[path]);
+ }
+}
+
+class LoadingTest extends UnitTest {
+ loader: FakeTestFileLoader = new FakeTestFileLoader();
+ events: (string | null)[] = [];
+ private isListenersAdded = false;
+
+ collectEvents(): void {
+ this.events = [];
+ if (!this.isListenersAdded) {
+ this.isListenersAdded = true;
+ this.loader.addEventListener('import', ev => this.events.push(ev.data.url));
+ this.loader.addEventListener('finish', _ev => this.events.push(null));
+ }
+ }
+
+ async load(query: string): Promise<TestTreeLeaf[]> {
+ return Array.from(await this.loader.loadCases(parseQuery(query)));
+ }
+
+ async loadNames(query: string): Promise<string[]> {
+ return (await this.load(query)).map(c => c.query.toString());
+ }
+}
+
+export const g = makeTestGroup(LoadingTest);
+
+g.test('suite').fn(t => {
+ t.shouldReject('Error', t.load('suite1'));
+ t.shouldReject('Error', t.load('suite1:'));
+});
+
+g.test('group').fn(async t => {
+ t.collectEvents();
+ t.expect((await t.load('suite1:*')).length === 10);
+ t.expect(
+ objectEquals(t.events, [
+ 'suite1/foo.spec.js',
+ 'suite1/bar/biz.spec.js',
+ 'suite1/bar/buzz/buzz.spec.js',
+ 'suite1/baz.spec.js',
+ null,
+ ])
+ );
+
+ t.collectEvents();
+ t.expect((await t.load('suite1:foo,*')).length === 3); // x:foo,* matches x:foo:
+ t.expect(objectEquals(t.events, ['suite1/foo.spec.js', null]));
+
+ t.collectEvents();
+ t.expect((await t.load('suite1:bar,*')).length === 1);
+ t.expect(
+ objectEquals(t.events, ['suite1/bar/biz.spec.js', 'suite1/bar/buzz/buzz.spec.js', null])
+ );
+
+ t.collectEvents();
+ t.expect((await t.load('suite1:bar,buzz,buzz,*')).length === 1);
+ t.expect(objectEquals(t.events, ['suite1/bar/buzz/buzz.spec.js', null]));
+
+ t.shouldReject('Error', t.load('suite1:f*'));
+
+ {
+ const s = new TestQueryMultiFile('suite1', ['bar', 'buzz']).toString();
+ t.collectEvents();
+ t.expect((await t.load(s)).length === 1);
+ t.expect(objectEquals(t.events, ['suite1/bar/buzz/buzz.spec.js', null]));
+ }
+});
+
+g.test('test').fn(async t => {
+ t.shouldReject('Error', t.load('suite1::'));
+ t.shouldReject('Error', t.load('suite1:bar:'));
+ t.shouldReject('Error', t.load('suite1:bar,:'));
+
+ t.shouldReject('Error', t.load('suite1::*'));
+ t.shouldReject('Error', t.load('suite1:bar,:*'));
+ t.shouldReject('Error', t.load('suite1:bar:*'));
+
+ t.expect((await t.load('suite1:foo:*')).length === 3);
+ t.expect((await t.load('suite1:bar,buzz,buzz:*')).length === 1);
+ t.expect((await t.load('suite1:baz:*')).length === 6);
+
+ t.expect((await t.load('suite2:foof:bluh,*')).length === 1);
+ t.expect((await t.load('suite2:foof:bluh,a,*')).length === 1);
+
+ {
+ const s = new TestQueryMultiTest('suite2', ['foof'], ['bluh']).toString();
+ t.expect((await t.load(s)).length === 1);
+ }
+});
+
+g.test('case').fn(async t => {
+ t.shouldReject('Error', t.load('suite1:foo::'));
+ t.shouldReject('Error', t.load('suite1:bar:zed,:'));
+
+ t.shouldReject('Error', t.load('suite1:foo:h*'));
+
+ t.shouldReject('Error', t.load('suite1:foo::*'));
+ t.shouldReject('Error', t.load('suite1:baz::*'));
+ t.shouldReject('Error', t.load('suite1:baz:zed,:*'));
+
+ t.shouldReject('Error', t.load('suite1:baz:zed:'));
+ t.shouldReject('Error', t.load('suite1:baz:zed:a=1'));
+ t.shouldReject('Error', t.load('suite1:baz:zed:a=1;b=2*'));
+ t.shouldReject('Error', t.load('suite1:baz:zed:a=1;b=2;'));
+ t.shouldReject('SyntaxError', t.load('suite1:baz:zed:a=1;b=2,')); // tries to parse '2,' as JSON
+ t.shouldReject('Error', t.load('suite1:baz:zed:a=1,b=2')); // '=' not allowed in value '1,b=2'
+ t.shouldReject('Error', t.load('suite1:baz:zed:b=2*'));
+ t.shouldReject('Error', t.load('suite1:baz:zed:b=2;a=1;_c=0'));
+ t.shouldReject('Error', t.load('suite1:baz:zed:a=1,*'));
+
+ t.expect((await t.load('suite1:baz:zed:*')).length === 2);
+ t.expect((await t.load('suite1:baz:zed:a=1;*')).length === 2);
+ t.expect((await t.load('suite1:baz:zed:a=1;b=2')).length === 1);
+ t.expect((await t.load('suite1:baz:zed:a=1;b=2;*')).length === 1);
+ t.expect((await t.load('suite1:baz:zed:b=2;*')).length === 1);
+ t.expect((await t.load('suite1:baz:zed:b=2;a=1')).length === 1);
+ t.expect((await t.load('suite1:baz:zed:b=2;a=1;*')).length === 1);
+ t.expect((await t.load('suite1:baz:zed:b=3;a=1')).length === 1);
+ t.expect((await t.load('suite1:baz:zed:a=1;b=3')).length === 1);
+ t.expect((await t.load('suite1:foo:hello:')).length === 1);
+
+ {
+ const s = new TestQueryMultiCase('suite1', ['baz'], ['zed'], { a: 1, b: 2 }).toString();
+ t.expect((await t.load(s)).length === 1);
+ }
+ {
+ const s = new TestQuerySingleCase('suite1', ['baz'], ['zed'], { a: 1, b: 2 }).toString();
+ t.expect((await t.load(s)).length === 1);
+ }
+});
+
+g.test('batching').fn(async t => {
+ t.expect((await t.load('suite1:baz:batched,*')).length === 2);
+ t.expect((await t.load('suite1:baz:batched:*')).length === 2);
+ t.expect((await t.load('suite1:baz:batched:batch__=1;*')).length === 1);
+ t.expect((await t.load('suite1:baz:batched:batch__=1')).length === 1);
+});
+
+async function runTestcase(
+ t: Fixture,
+ log: Logger,
+ testcases: TestTreeLeaf[],
+ i: number,
+ query: TestQuery,
+ expectations: TestQueryWithExpectation[],
+ status: Status,
+ logs: (s: string[]) => boolean
+) {
+ t.expect(objectEquals(testcases[i].query, query));
+ const name = testcases[i].query.toString();
+ const [rec, res] = log.record(name);
+ await testcases[i].run(rec, expectations);
+
+ t.expect(log.results.get(name) === res);
+ t.expect(res.status === status);
+ t.expect(res.timems >= 0);
+ assert(res.logs !== undefined); // only undefined while pending
+ t.expect(logs(res.logs.map(l => JSON.stringify(l))));
+}
+
+g.test('end2end').fn(async t => {
+ const l = await t.load('suite2:foof:*');
+ assert(l.length === 3, 'listing length');
+
+ const log = new Logger({ overrideDebugMode: true });
+
+ await runTestcase(
+ t,
+ log,
+ l,
+ 0,
+ new TestQuerySingleCase('suite2', ['foof'], ['blah'], {}),
+ [],
+ 'pass',
+ logs => objectEquals(logs, ['"DEBUG: OK"'])
+ );
+ await runTestcase(
+ t,
+ log,
+ l,
+ 1,
+ new TestQuerySingleCase('suite2', ['foof'], ['bleh'], { a: 1 }),
+ [],
+ 'pass',
+ logs => objectEquals(logs, ['"DEBUG: OK"', '"DEBUG: OK"'])
+ );
+ await runTestcase(
+ t,
+ log,
+ l,
+ 2,
+ new TestQuerySingleCase('suite2', ['foof'], ['bluh', 'a'], {}),
+ [],
+ 'fail',
+ logs =>
+ logs.length === 1 &&
+ logs[0].startsWith('"EXPECTATION FAILED: goodbye\\n') &&
+ logs[0].indexOf('loaders_and_trees.spec.') !== -1
+ );
+});
+
+g.test('expectations,single_case').fn(async t => {
+ const log = new Logger({ overrideDebugMode: true });
+ const zedCases = await t.load('suite1:baz:zed:*');
+
+ // Single-case. Covers one case.
+ const zedExpectationsSkipA1B2 = [
+ {
+ query: new TestQuerySingleCase('suite1', ['baz'], ['zed'], { a: 1, b: 2 }),
+ expectation: 'skip' as const,
+ },
+ ];
+
+ await runTestcase(
+ t,
+ log,
+ zedCases,
+ 0,
+ new TestQuerySingleCase('suite1', ['baz'], ['zed'], { a: 1, b: 2 }),
+ zedExpectationsSkipA1B2,
+ 'skip',
+ logs => logs.length === 1 && logs[0].startsWith('"SKIP: Skipped by expectations"')
+ );
+
+ await runTestcase(
+ t,
+ log,
+ zedCases,
+ 1,
+ new TestQuerySingleCase('suite1', ['baz'], ['zed'], { a: 1, b: 3 }),
+ zedExpectationsSkipA1B2,
+ 'pass',
+ logs => logs.length === 0
+ );
+});
+
+g.test('expectations,single_case,none').fn(async t => {
+ const log = new Logger({ overrideDebugMode: true });
+ const zedCases = await t.load('suite1:baz:zed:*');
+ // Single-case. Doesn't cover any cases.
+ const zedExpectationsSkipA1B0 = [
+ {
+ query: new TestQuerySingleCase('suite1', ['baz'], ['zed'], { a: 1, b: 0 }),
+ expectation: 'skip' as const,
+ },
+ ];
+
+ await runTestcase(
+ t,
+ log,
+ zedCases,
+ 0,
+ new TestQuerySingleCase('suite1', ['baz'], ['zed'], { a: 1, b: 2 }),
+ zedExpectationsSkipA1B0,
+ 'pass',
+ logs => logs.length === 0
+ );
+
+ await runTestcase(
+ t,
+ log,
+ zedCases,
+ 1,
+ new TestQuerySingleCase('suite1', ['baz'], ['zed'], { a: 1, b: 3 }),
+ zedExpectationsSkipA1B0,
+ 'pass',
+ logs => logs.length === 0
+ );
+});
+
+g.test('expectations,multi_case').fn(async t => {
+ const log = new Logger({ overrideDebugMode: true });
+ const zedCases = await t.load('suite1:baz:zed:*');
+ // Multi-case, not all cases covered.
+ const zedExpectationsSkipB3 = [
+ {
+ query: new TestQueryMultiCase('suite1', ['baz'], ['zed'], { b: 3 }),
+ expectation: 'skip' as const,
+ },
+ ];
+
+ await runTestcase(
+ t,
+ log,
+ zedCases,
+ 0,
+ new TestQuerySingleCase('suite1', ['baz'], ['zed'], { a: 1, b: 2 }),
+ zedExpectationsSkipB3,
+ 'pass',
+ logs => logs.length === 0
+ );
+
+ await runTestcase(
+ t,
+ log,
+ zedCases,
+ 1,
+ new TestQuerySingleCase('suite1', ['baz'], ['zed'], { a: 1, b: 3 }),
+ zedExpectationsSkipB3,
+ 'skip',
+ logs => logs.length === 1 && logs[0].startsWith('"SKIP: Skipped by expectations"')
+ );
+});
+
+g.test('expectations,multi_case_all').fn(async t => {
+ const log = new Logger({ overrideDebugMode: true });
+ const zedCases = await t.load('suite1:baz:zed:*');
+ // Multi-case, all cases covered.
+ const zedExpectationsSkipA1 = [
+ {
+ query: new TestQueryMultiCase('suite1', ['baz'], ['zed'], { a: 1 }),
+ expectation: 'skip' as const,
+ },
+ ];
+
+ await runTestcase(
+ t,
+ log,
+ zedCases,
+ 0,
+ new TestQuerySingleCase('suite1', ['baz'], ['zed'], { a: 1, b: 2 }),
+ zedExpectationsSkipA1,
+ 'skip',
+ logs => logs.length === 1 && logs[0].startsWith('"SKIP: Skipped by expectations"')
+ );
+
+ await runTestcase(
+ t,
+ log,
+ zedCases,
+ 1,
+ new TestQuerySingleCase('suite1', ['baz'], ['zed'], { a: 1, b: 3 }),
+ zedExpectationsSkipA1,
+ 'skip',
+ logs => logs.length === 1 && logs[0].startsWith('"SKIP: Skipped by expectations"')
+ );
+});
+
+g.test('expectations,multi_case_none').fn(async t => {
+ const log = new Logger({ overrideDebugMode: true });
+ const zedCases = await t.load('suite1:baz:zed:*');
+ // Multi-case, no params, all cases covered.
+ const zedExpectationsSkipZed = [
+ {
+ query: new TestQueryMultiCase('suite1', ['baz'], ['zed'], {}),
+ expectation: 'skip' as const,
+ },
+ ];
+
+ await runTestcase(
+ t,
+ log,
+ zedCases,
+ 0,
+ new TestQuerySingleCase('suite1', ['baz'], ['zed'], { a: 1, b: 2 }),
+ zedExpectationsSkipZed,
+ 'skip',
+ logs => logs.length === 1 && logs[0].startsWith('"SKIP: Skipped by expectations"')
+ );
+
+ await runTestcase(
+ t,
+ log,
+ zedCases,
+ 1,
+ new TestQuerySingleCase('suite1', ['baz'], ['zed'], { a: 1, b: 3 }),
+ zedExpectationsSkipZed,
+ 'skip',
+ logs => logs.length === 1 && logs[0].startsWith('"SKIP: Skipped by expectations"')
+ );
+});
+
+g.test('expectations,multi_test').fn(async t => {
+ const log = new Logger({ overrideDebugMode: true });
+ const suite1Cases = await t.load('suite1:*');
+
+ // Multi-test, all cases covered.
+ const expectationsSkipAllInBaz = [
+ {
+ query: new TestQueryMultiTest('suite1', ['baz'], []),
+ expectation: 'skip' as const,
+ },
+ ];
+
+ await runTestcase(
+ t,
+ log,
+ suite1Cases,
+ 4,
+ new TestQuerySingleCase('suite1', ['baz'], ['wye'], {}),
+ expectationsSkipAllInBaz,
+ 'skip',
+ logs => logs.length === 1 && logs[0].startsWith('"SKIP: Skipped by expectations"')
+ );
+
+ await runTestcase(
+ t,
+ log,
+ suite1Cases,
+ 6,
+ new TestQuerySingleCase('suite1', ['baz'], ['zed'], { a: 1, b: 2 }),
+ expectationsSkipAllInBaz,
+ 'skip',
+ logs => logs.length === 1 && logs[0].startsWith('"SKIP: Skipped by expectations"')
+ );
+});
+
+g.test('expectations,multi_test,none').fn(async t => {
+ const log = new Logger({ overrideDebugMode: true });
+ const suite1Cases = await t.load('suite1:*');
+
+ // Multi-test, no cases covered.
+ const expectationsSkipAllInFoo = [
+ {
+ query: new TestQueryMultiTest('suite1', ['foo'], []),
+ expectation: 'skip' as const,
+ },
+ ];
+
+ await runTestcase(
+ t,
+ log,
+ suite1Cases,
+ 4,
+ new TestQuerySingleCase('suite1', ['baz'], ['wye'], {}),
+ expectationsSkipAllInFoo,
+ 'pass',
+ logs => logs.length === 0
+ );
+
+ await runTestcase(
+ t,
+ log,
+ suite1Cases,
+ 6,
+ new TestQuerySingleCase('suite1', ['baz'], ['zed'], { a: 1, b: 2 }),
+ expectationsSkipAllInFoo,
+ 'pass',
+ logs => logs.length === 0
+ );
+});
+
+g.test('expectations,multi_file').fn(async t => {
+ const log = new Logger({ overrideDebugMode: true });
+ const suite1Cases = await t.load('suite1:*');
+
+ // Multi-file
+ const expectationsSkipAll = [
+ {
+ query: new TestQueryMultiFile('suite1', []),
+ expectation: 'skip' as const,
+ },
+ ];
+
+ await runTestcase(
+ t,
+ log,
+ suite1Cases,
+ 0,
+ new TestQuerySingleCase('suite1', ['foo'], ['hello'], {}),
+ expectationsSkipAll,
+ 'skip',
+ logs => logs.length === 1 && logs[0].startsWith('"SKIP: Skipped by expectations"')
+ );
+
+ await runTestcase(
+ t,
+ log,
+ suite1Cases,
+ 3,
+ new TestQuerySingleCase('suite1', ['bar', 'buzz', 'buzz'], ['zap'], {}),
+ expectationsSkipAll,
+ 'skip',
+ logs => logs.length === 1 && logs[0].startsWith('"SKIP: Skipped by expectations"')
+ );
+});
+
+g.test('expectations,catches_failure').fn(async t => {
+ const log = new Logger({ overrideDebugMode: true });
+ const suite2Cases = await t.load('suite2:*');
+
+ // Catches failure
+ const expectedFailures = [
+ {
+ query: new TestQueryMultiCase('suite2', ['foof'], ['bluh', 'a'], {}),
+ expectation: 'fail' as const,
+ },
+ ];
+
+ await runTestcase(
+ t,
+ log,
+ suite2Cases,
+ 0,
+ new TestQuerySingleCase('suite2', ['foof'], ['blah'], {}),
+ expectedFailures,
+ 'pass',
+ logs => objectEquals(logs, ['"DEBUG: OK"'])
+ );
+
+ // Status is passed, but failure is logged.
+ await runTestcase(
+ t,
+ log,
+ suite2Cases,
+ 2,
+ new TestQuerySingleCase('suite2', ['foof'], ['bluh', 'a'], {}),
+ expectedFailures,
+ 'pass',
+ logs => logs.length === 1 && logs[0].startsWith('"EXPECTATION FAILED: goodbye\\n')
+ );
+});
+
+g.test('expectations,skip_dominates_failure').fn(async t => {
+ const log = new Logger({ overrideDebugMode: true });
+ const suite2Cases = await t.load('suite2:*');
+
+ const expectedFailures = [
+ {
+ query: new TestQueryMultiCase('suite2', ['foof'], ['bluh', 'a'], {}),
+ expectation: 'fail' as const,
+ },
+ {
+ query: new TestQueryMultiCase('suite2', ['foof'], ['bluh', 'a'], {}),
+ expectation: 'skip' as const,
+ },
+ ];
+
+ await runTestcase(
+ t,
+ log,
+ suite2Cases,
+ 2,
+ new TestQuerySingleCase('suite2', ['foof'], ['bluh', 'a'], {}),
+ expectedFailures,
+ 'skip',
+ logs => logs.length === 1 && logs[0].startsWith('"SKIP: Skipped by expectations"')
+ );
+});
+
+g.test('expectations,skip_inside_failure').fn(async t => {
+ const log = new Logger({ overrideDebugMode: true });
+ const suite2Cases = await t.load('suite2:*');
+
+ const expectedFailures = [
+ {
+ query: new TestQueryMultiFile('suite2', []),
+ expectation: 'fail' as const,
+ },
+ {
+ query: new TestQueryMultiCase('suite2', ['foof'], ['blah'], {}),
+ expectation: 'skip' as const,
+ },
+ ];
+
+ await runTestcase(
+ t,
+ log,
+ suite2Cases,
+ 0,
+ new TestQuerySingleCase('suite2', ['foof'], ['blah'], {}),
+ expectedFailures,
+ 'skip',
+ logs => logs.length === 1 && logs[0].startsWith('"SKIP: Skipped by expectations"')
+ );
+
+ await runTestcase(
+ t,
+ log,
+ suite2Cases,
+ 2,
+ new TestQuerySingleCase('suite2', ['foof'], ['bluh', 'a'], {}),
+ expectedFailures,
+ 'pass',
+ logs => logs.length === 1 && logs[0].startsWith('"EXPECTATION FAILED: goodbye\\n')
+ );
+});
+
+async function testIterateCollapsed(
+ t: LoadingTest,
+ alwaysExpandThroughLevel: ExpandThroughLevel,
+ expectations: string[],
+ expectedResult: 'throws' | string[] | [string, number | undefined][],
+ includeEmptySubtrees = false
+) {
+ t.debug(`expandThrough=${alwaysExpandThroughLevel} expectations=${expectations}`);
+ const treePromise = t.loader.loadTree(new TestQueryMultiFile('suite1', []), {
+ subqueriesToExpand: expectations,
+ });
+ if (expectedResult === 'throws') {
+ t.shouldReject('Error', treePromise, {
+ // Some errors here use StacklessError to print nicer command line outputs.
+ allowMissingStack: true,
+ });
+ return;
+ }
+ const tree = await treePromise;
+ const actualIter = tree.iterateCollapsedNodes({
+ includeEmptySubtrees,
+ alwaysExpandThroughLevel,
+ });
+ const testingTODOs = expectedResult.length > 0 && expectedResult[0] instanceof Array;
+ const actual = Array.from(actualIter, ({ query, subtreeCounts }) =>
+ testingTODOs ? [query.toString(), subtreeCounts?.nodesWithTODO] : query.toString()
+ );
+ if (!objectEquals(actual, expectedResult)) {
+ t.fail(
+ `iterateCollapsed failed:
+ got ${JSON.stringify(actual)}
+ exp ${JSON.stringify(expectedResult)}
+${tree.toString()}`
+ );
+ }
+}
+
+g.test('print').fn(async t => {
+ const tree = await t.loader.loadTree(new TestQueryMultiFile('suite1', []));
+ tree.toString();
+});
+
+g.test('iterateCollapsed').fn(async t => {
+ await testIterateCollapsed(
+ t,
+ 1,
+ [],
+ [
+ ['suite1:foo:*', 1], // to-do propagated up from foo:hola
+ ['suite1:bar,buzz,buzz:*', 1], // to-do in file description
+ ['suite1:baz:*', 0],
+ ]
+ );
+ await testIterateCollapsed(
+ t,
+ 2,
+ [],
+ [
+ ['suite1:foo:hello:*', 0],
+ ['suite1:foo:bonjour:*', 0],
+ ['suite1:foo:hola:*', 1], // to-do in test description
+ ['suite1:bar,buzz,buzz:zap:*', 0],
+ ['suite1:baz:wye:*', 0],
+ ['suite1:baz:zed:*', 0],
+ ['suite1:baz:batched:*', 0],
+ ]
+ );
+ await testIterateCollapsed(
+ t,
+ 3,
+ [],
+ [
+ ['suite1:foo:hello:', undefined],
+ ['suite1:foo:bonjour:', undefined],
+ ['suite1:foo:hola:', undefined],
+ ['suite1:bar,buzz,buzz:zap:', undefined],
+ ['suite1:baz:wye:', undefined],
+ ['suite1:baz:wye:x=1', undefined],
+ ['suite1:baz:zed:a=1;b=2', undefined],
+ ['suite1:baz:zed:b=3;a=1', undefined],
+ ['suite1:baz:batched:batch__=0', undefined],
+ ['suite1:baz:batched:batch__=1', undefined],
+ ]
+ );
+
+ // Expectations lists that have no effect
+ await testIterateCollapsed(
+ t,
+ 1,
+ ['suite1:foo:*'],
+ ['suite1:foo:*', 'suite1:bar,buzz,buzz:*', 'suite1:baz:*']
+ );
+ await testIterateCollapsed(
+ t,
+ 1,
+ ['suite1:bar,buzz,buzz:*'],
+ ['suite1:foo:*', 'suite1:bar,buzz,buzz:*', 'suite1:baz:*']
+ );
+ await testIterateCollapsed(
+ t,
+ 2,
+ ['suite1:baz:wye:*'],
+ [
+ 'suite1:foo:hello:*',
+ 'suite1:foo:bonjour:*',
+ 'suite1:foo:hola:*',
+ 'suite1:bar,buzz,buzz:zap:*',
+ 'suite1:baz:wye:*',
+ 'suite1:baz:zed:*',
+ 'suite1:baz:batched:*',
+ ]
+ );
+ // Test with includeEmptySubtrees=true
+ await testIterateCollapsed(
+ t,
+ 1,
+ [],
+ [
+ 'suite1:foo:*',
+ 'suite1:bar,biz:*',
+ 'suite1:bar,buzz,buzz:*',
+ 'suite1:baz:*',
+ 'suite1:empty,*',
+ ],
+ true
+ );
+ await testIterateCollapsed(
+ t,
+ 2,
+ [],
+ [
+ 'suite1:foo:hello:*',
+ 'suite1:foo:bonjour:*',
+ 'suite1:foo:hola:*',
+ 'suite1:bar,biz:*',
+ 'suite1:bar,buzz,buzz:zap:*',
+ 'suite1:baz:wye:*',
+ 'suite1:baz:zed:*',
+ 'suite1:baz:batched:*',
+ 'suite1:empty,*',
+ ],
+ true
+ );
+
+ // Expectations lists that have some effect
+ await testIterateCollapsed(
+ t,
+ 1,
+ ['suite1:baz:wye:*'],
+ [
+ 'suite1:foo:*',
+ 'suite1:bar,buzz,buzz:*',
+ 'suite1:baz:wye:*',
+ 'suite1:baz:zed,*',
+ 'suite1:baz:batched,*',
+ ]
+ );
+ await testIterateCollapsed(
+ t,
+ 1,
+ ['suite1:baz:zed:*'],
+ [
+ 'suite1:foo:*',
+ 'suite1:bar,buzz,buzz:*',
+ 'suite1:baz:wye,*',
+ 'suite1:baz:zed:*',
+ 'suite1:baz:batched,*',
+ ]
+ );
+ await testIterateCollapsed(
+ t,
+ 1,
+ ['suite1:baz:wye:*', 'suite1:baz:zed:*'],
+ [
+ 'suite1:foo:*',
+ 'suite1:bar,buzz,buzz:*',
+ 'suite1:baz:wye:*',
+ 'suite1:baz:zed:*',
+ 'suite1:baz:batched,*',
+ ]
+ );
+ await testIterateCollapsed(
+ t,
+ 1,
+ ['suite1:baz:wye:'],
+ [
+ 'suite1:foo:*',
+ 'suite1:bar,buzz,buzz:*',
+ 'suite1:baz:wye:',
+ 'suite1:baz:wye:x=1;*',
+ 'suite1:baz:zed,*',
+ 'suite1:baz:batched,*',
+ ]
+ );
+ await testIterateCollapsed(
+ t,
+ 1,
+ ['suite1:baz:wye:x=1'],
+ [
+ 'suite1:foo:*',
+ 'suite1:bar,buzz,buzz:*',
+ 'suite1:baz:wye:',
+ 'suite1:baz:wye:x=1',
+ 'suite1:baz:zed,*',
+ 'suite1:baz:batched,*',
+ ]
+ );
+ await testIterateCollapsed(
+ t,
+ 1,
+ ['suite1:foo:*', 'suite1:baz:wye:'],
+ [
+ 'suite1:foo:*',
+ 'suite1:bar,buzz,buzz:*',
+ 'suite1:baz:wye:',
+ 'suite1:baz:wye:x=1;*',
+ 'suite1:baz:zed,*',
+ 'suite1:baz:batched,*',
+ ]
+ );
+ await testIterateCollapsed(
+ t,
+ 2,
+ ['suite1:baz:wye:'],
+ [
+ 'suite1:foo:hello:*',
+ 'suite1:foo:bonjour:*',
+ 'suite1:foo:hola:*',
+ 'suite1:bar,buzz,buzz:zap:*',
+ 'suite1:baz:wye:',
+ 'suite1:baz:wye:x=1;*',
+ 'suite1:baz:zed:*',
+ 'suite1:baz:batched:*',
+ ]
+ );
+ await testIterateCollapsed(
+ t,
+ 2,
+ ['suite1:baz:wye:x=1'],
+ [
+ 'suite1:foo:hello:*',
+ 'suite1:foo:bonjour:*',
+ 'suite1:foo:hola:*',
+ 'suite1:bar,buzz,buzz:zap:*',
+ 'suite1:baz:wye:',
+ 'suite1:baz:wye:x=1',
+ 'suite1:baz:zed:*',
+ 'suite1:baz:batched:*',
+ ]
+ );
+ await testIterateCollapsed(
+ t,
+ 2,
+ ['suite1:foo:hello:*', 'suite1:baz:wye:'],
+ [
+ 'suite1:foo:hello:*',
+ 'suite1:foo:bonjour:*',
+ 'suite1:foo:hola:*',
+ 'suite1:bar,buzz,buzz:zap:*',
+ 'suite1:baz:wye:',
+ 'suite1:baz:wye:x=1;*',
+ 'suite1:baz:zed:*',
+ 'suite1:baz:batched:*',
+ ]
+ );
+
+ // Invalid expectation queries
+ await testIterateCollapsed(t, 1, ['*'], 'throws');
+ await testIterateCollapsed(t, 1, ['garbage'], 'throws');
+ await testIterateCollapsed(t, 1, ['garbage*'], 'throws');
+ await testIterateCollapsed(t, 1, ['suite1*'], 'throws');
+ await testIterateCollapsed(t, 1, ['suite1:foo*'], 'throws');
+ await testIterateCollapsed(t, 1, ['suite1:foo:he*'], 'throws');
+
+ // Valid expectation queries but they don't match anything
+ await testIterateCollapsed(t, 1, ['garbage:*'], 'throws');
+ await testIterateCollapsed(t, 1, ['suite1:doesntexist:*'], 'throws');
+ await testIterateCollapsed(t, 1, ['suite2:foo:*'], 'throws');
+ // Can't expand subqueries bigger than one file.
+ await testIterateCollapsed(t, 1, ['suite1:*'], 'throws');
+ await testIterateCollapsed(t, 1, ['suite1:bar,*'], 'throws');
+ await testIterateCollapsed(t, 1, ['suite1:*'], 'throws');
+ await testIterateCollapsed(t, 1, ['suite1:bar:hello,*'], 'throws');
+ await testIterateCollapsed(t, 1, ['suite1:baz,*'], 'throws');
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/logger.spec.ts b/dom/webgpu/tests/cts/checkout/src/unittests/logger.spec.ts
new file mode 100644
index 0000000000..abc27e2876
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/logger.spec.ts
@@ -0,0 +1,173 @@
+export const description = `
+Unit tests for namespaced logging system.
+
+Also serves as a larger test of async test functions, and of the logging system.
+`;
+
+import { SkipTestCase } from '../common/framework/fixture.js';
+import { makeTestGroup } from '../common/framework/test_group.js';
+import { Logger } from '../common/internal/logging/logger.js';
+import { assert } from '../common/util/util.js';
+
+import { UnitTest } from './unit_test.js';
+
+export const g = makeTestGroup(UnitTest);
+
+g.test('construct').fn(t => {
+ const mylog = new Logger({ overrideDebugMode: true });
+ const [, res1] = mylog.record('one');
+ const [, res2] = mylog.record('two');
+
+ t.expect(mylog.results.get('one') === res1);
+ t.expect(mylog.results.get('two') === res2);
+ t.expect(res1.logs === undefined);
+ t.expect(res1.status === 'running');
+ t.expect(res1.timems < 0);
+ t.expect(res2.logs === undefined);
+ t.expect(res2.status === 'running');
+ t.expect(res2.timems < 0);
+});
+
+g.test('empty').fn(t => {
+ const mylog = new Logger({ overrideDebugMode: true });
+ const [rec, res] = mylog.record('one');
+
+ rec.start();
+ t.expect(res.status === 'running');
+ rec.finish();
+
+ t.expect(res.status === 'notrun');
+ t.expect(res.timems >= 0);
+});
+
+g.test('passed').fn(t => {
+ const mylog = new Logger({ overrideDebugMode: true });
+ const [rec, res] = mylog.record('one');
+
+ rec.start();
+ rec.passed();
+ rec.finish();
+
+ t.expect(res.status === 'pass');
+ t.expect(res.timems >= 0);
+});
+
+g.test('pass').fn(t => {
+ const mylog = new Logger({ overrideDebugMode: true });
+ const [rec, res] = mylog.record('one');
+
+ rec.start();
+ rec.debug(new Error('hello'));
+ t.expect(res.status === 'running');
+ rec.finish();
+
+ t.expect(res.status === 'pass');
+ t.expect(res.timems >= 0);
+});
+
+g.test('skip').fn(t => {
+ const mylog = new Logger({ overrideDebugMode: true });
+ const [rec, res] = mylog.record('one');
+
+ rec.start();
+ rec.skipped(new SkipTestCase());
+ rec.finish();
+
+ t.expect(res.status === 'skip');
+ t.expect(res.timems >= 0);
+});
+
+// Tests if there's some skips and at least one pass it's pass.
+g.test('skip_pass').fn(t => {
+ const mylog = new Logger({ overrideDebugMode: true });
+ const [rec, res] = mylog.record('one');
+
+ rec.start();
+ rec.skipped(new SkipTestCase());
+ rec.debug(new Error('hello'));
+ rec.skipped(new SkipTestCase());
+ rec.finish();
+
+ t.expect(res.status === 'pass');
+ t.expect(res.timems >= 0);
+});
+
+g.test('warn').fn(t => {
+ const mylog = new Logger({ overrideDebugMode: true });
+ const [rec, res] = mylog.record('one');
+
+ rec.start();
+ rec.warn(new Error('hello'));
+ rec.skipped(new SkipTestCase());
+ rec.finish();
+
+ t.expect(res.status === 'warn');
+ t.expect(res.timems >= 0);
+});
+
+g.test('fail,expectationFailed').fn(t => {
+ const mylog = new Logger({ overrideDebugMode: true });
+ const [rec, res] = mylog.record('one');
+
+ rec.start();
+ rec.expectationFailed(new Error('bye'));
+ rec.warn(new Error());
+ rec.skipped(new SkipTestCase());
+ rec.finish();
+
+ t.expect(res.status === 'fail');
+ t.expect(res.timems >= 0);
+});
+
+g.test('fail,validationFailed').fn(t => {
+ const mylog = new Logger({ overrideDebugMode: true });
+ const [rec, res] = mylog.record('one');
+
+ rec.start();
+ rec.validationFailed(new Error('bye'));
+ rec.warn(new Error());
+ rec.skipped(new SkipTestCase());
+ rec.finish();
+
+ t.expect(res.status === 'fail');
+ t.expect(res.timems >= 0);
+});
+
+g.test('fail,threw').fn(t => {
+ const mylog = new Logger({ overrideDebugMode: true });
+ const [rec, res] = mylog.record('one');
+
+ rec.start();
+ rec.threw(new Error('bye'));
+ rec.warn(new Error());
+ rec.skipped(new SkipTestCase());
+ rec.finish();
+
+ t.expect(res.status === 'fail');
+ t.expect(res.timems >= 0);
+});
+
+g.test('debug')
+ .paramsSimple([
+ { debug: true, _logsCount: 5 }, //
+ { debug: false, _logsCount: 3 },
+ ])
+ .fn(t => {
+ const { debug, _logsCount } = t.params;
+
+ const mylog = new Logger({ overrideDebugMode: debug });
+ const [rec, res] = mylog.record('one');
+
+ rec.start();
+ rec.debug(new Error('hello'));
+ rec.expectationFailed(new Error('bye'));
+ rec.warn(new Error());
+ rec.skipped(new SkipTestCase());
+ rec.debug(new Error('foo'));
+ rec.finish();
+
+ t.expect(res.status === 'fail');
+ t.expect(res.timems >= 0);
+ assert(res.logs !== undefined);
+ t.expect(res.logs.length === _logsCount);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/maths.spec.ts b/dom/webgpu/tests/cts/checkout/src/unittests/maths.spec.ts
new file mode 100644
index 0000000000..357c574281
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/maths.spec.ts
@@ -0,0 +1,1924 @@
+export const description = `
+Util math unit tests.
+`;
+
+import { makeTestGroup } from '../common/framework/test_group.js';
+import { objectEquals } from '../common/util/util.js';
+import { kBit, kValue } from '../webgpu/util/constants.js';
+import {
+ f16,
+ f32,
+ f64,
+ float16ToUint16,
+ float32ToUint32,
+ uint16ToFloat16,
+ uint32ToFloat32,
+} from '../webgpu/util/conversion.js';
+import {
+ biasedRange,
+ calculatePermutations,
+ cartesianProduct,
+ correctlyRoundedF16,
+ correctlyRoundedF32,
+ FlushMode,
+ frexp,
+ fullF16Range,
+ fullF32Range,
+ fullI32Range,
+ lerp,
+ linearRange,
+ nextAfterF16,
+ nextAfterF32,
+ nextAfterF64,
+ NextDirection,
+ oneULPF16,
+ oneULPF32,
+ oneULPF64,
+ lerpBigInt,
+ linearRangeBigInt,
+} from '../webgpu/util/math.js';
+import {
+ reinterpretU16AsF16,
+ reinterpretU32AsF32,
+ reinterpretU64AsF64,
+} from '../webgpu/util/reinterpret.js';
+
+import { UnitTest } from './unit_test.js';
+
+export const g = makeTestGroup(UnitTest);
+
+/**
+ * Utility wrapper around oneULP to test if a value is within 1 ULP(x)
+ *
+ * @param got number to test
+ * @param expected number to be within 1 ULP of
+ * @param mode should oneULP FTZ
+ * @returns if got is within 1 ULP of expected
+ */
+function withinOneULPF32(got: number, expected: number, mode: FlushMode): boolean {
+ const ulp = oneULPF32(expected, mode);
+ return got >= expected - ulp && got <= expected + ulp;
+}
+
+/**
+ * @returns true if arrays are equal within 1ULP, doing element-wise comparison
+ * as needed, and considering NaNs to be equal.
+ *
+ * Depends on the correctness of oneULP, which is tested in this file.
+ **
+ * @param got array of numbers to compare for equality
+ * @param expect array of numbers to compare against
+ * @param mode should different subnormals be considered the same, i.e. should
+ * FTZ occur during comparison
+ **/
+function compareArrayOfNumbersF32(
+ got: readonly number[],
+ expect: readonly number[],
+ mode: FlushMode = 'flush'
+): boolean {
+ return (
+ got.length === expect.length &&
+ got.every((value, index) => {
+ const expected = expect[index];
+ return (
+ (Number.isNaN(value) && Number.isNaN(expected)) || withinOneULPF32(value, expected, mode)
+ );
+ })
+ );
+}
+
+/** @returns the hex value representation of a f64, from is numeric representation */
+function float64ToUint64(value: number): bigint {
+ return new BigUint64Array(new Float64Array([value]).buffer)[0];
+}
+
+/** @returns the numeric representation of a f64, from its hex value representation */
+function uint64ToFloat64(bits: bigint): number {
+ return new Float64Array(new BigUint64Array([bits]).buffer)[0];
+}
+
+interface nextAfterCase {
+ val: number;
+ dir: NextDirection;
+ result: number;
+}
+
+g.test('nextAfterF64FlushToZero')
+ .paramsSubcasesOnly<nextAfterCase>(
+ // prettier-ignore
+ [
+ // Edge Cases
+ { val: Number.NaN, dir: 'positive', result: Number.NaN },
+ { val: Number.NaN, dir: 'negative', result: Number.NaN },
+ { val: Number.POSITIVE_INFINITY, dir: 'positive', result: kValue.f64.positive.infinity },
+ { val: Number.POSITIVE_INFINITY, dir: 'negative', result: kValue.f64.positive.infinity },
+ { val: Number.NEGATIVE_INFINITY, dir: 'positive', result: kValue.f64.negative.infinity },
+ { val: Number.NEGATIVE_INFINITY, dir: 'negative', result: kValue.f64.negative.infinity },
+
+ // Zeroes
+ { val: +0, dir: 'positive', result: kValue.f64.positive.min },
+ { val: +0, dir: 'negative', result: kValue.f64.negative.max },
+ { val: -0, dir: 'positive', result: kValue.f64.positive.min },
+ { val: -0, dir: 'negative', result: kValue.f64.negative.max },
+
+ // Subnormals
+ { val: kValue.f64.positive.subnormal.min, dir: 'positive', result: kValue.f64.positive.min },
+ { val: kValue.f64.positive.subnormal.min, dir: 'negative', result: kValue.f64.negative.max },
+ { val: kValue.f64.positive.subnormal.max, dir: 'positive', result: kValue.f64.positive.min },
+ { val: kValue.f64.positive.subnormal.max, dir: 'negative', result: kValue.f64.negative.max },
+ { val: kValue.f64.negative.subnormal.min, dir: 'positive', result: kValue.f64.positive.min },
+ { val: kValue.f64.negative.subnormal.min, dir: 'negative', result: kValue.f64.negative.max },
+ { val: kValue.f64.negative.subnormal.max, dir: 'positive', result: kValue.f64.positive.min },
+ { val: kValue.f64.negative.subnormal.max, dir: 'negative', result: kValue.f64.negative.max },
+
+ // Normals
+ { val: kValue.f64.positive.max, dir: 'positive', result: kValue.f64.positive.infinity },
+ { val: kValue.f64.positive.max, dir: 'negative', result: kValue.f64.positive.nearest_max },
+ { val: kValue.f64.positive.min, dir: 'positive', result: reinterpretU64AsF64(0x0010_0000_0000_0001n ) },
+ { val: kValue.f64.positive.min, dir: 'negative', result: 0 },
+ { val: kValue.f64.negative.max, dir: 'positive', result: 0 },
+ { val: kValue.f64.negative.max, dir: 'negative', result: reinterpretU64AsF64(0x8010_0000_0000_0001n) },
+ { val: kValue.f64.negative.min, dir: 'positive', result: kValue.f64.negative.nearest_min },
+ { val: kValue.f64.negative.min, dir: 'negative', result: kValue.f64.negative.infinity },
+ { val: reinterpretU64AsF64(0x0380_0000_0000_0000n), dir: 'positive', result: reinterpretU64AsF64(0x0380_0000_0000_0001n) },
+ { val: reinterpretU64AsF64(0x0380_0000_0000_0000n), dir: 'negative', result: reinterpretU64AsF64(0x037f_ffff_ffff_ffffn) },
+ { val: reinterpretU64AsF64(0x8380_0000_0000_0000n), dir: 'positive', result: reinterpretU64AsF64(0x837f_ffff_ffff_ffffn) },
+ { val: reinterpretU64AsF64(0x8380_0000_0000_0000n), dir: 'negative', result: reinterpretU64AsF64(0x8380_0000_0000_0001n) },
+ ]
+ )
+ .fn(t => {
+ const val = t.params.val;
+ const dir = t.params.dir;
+ const expect = t.params.result;
+ const got = nextAfterF64(val, dir, 'flush');
+ t.expect(
+ got === expect || (Number.isNaN(got) && Number.isNaN(expect)),
+ `nextAfterF64(${f64(val)}, '${dir}', 'flush') returned ${f64(got)}. Expected ${f64(expect)}`
+ );
+ });
+
+g.test('nextAfterF64NoFlush')
+ .paramsSubcasesOnly<nextAfterCase>(
+ // prettier-ignore
+ [
+ // Edge Cases
+ { val: Number.NaN, dir: 'positive', result: Number.NaN },
+ { val: Number.NaN, dir: 'negative', result: Number.NaN },
+ { val: Number.POSITIVE_INFINITY, dir: 'positive', result: kValue.f64.positive.infinity },
+ { val: Number.POSITIVE_INFINITY, dir: 'negative', result: kValue.f64.positive.infinity },
+ { val: Number.NEGATIVE_INFINITY, dir: 'positive', result: kValue.f64.negative.infinity },
+ { val: Number.NEGATIVE_INFINITY, dir: 'negative', result: kValue.f64.negative.infinity },
+
+ // Zeroes
+ { val: +0, dir: 'positive', result: kValue.f64.positive.subnormal.min },
+ { val: +0, dir: 'negative', result: kValue.f64.negative.subnormal.max },
+ { val: -0, dir: 'positive', result: kValue.f64.positive.subnormal.min },
+ { val: -0, dir: 'negative', result: kValue.f64.negative.subnormal.max },
+
+ // Subnormals
+ { val: kValue.f64.positive.subnormal.min, dir: 'positive', result: reinterpretU64AsF64(0x0000_0000_0000_0002n) },
+ { val: kValue.f64.positive.subnormal.min, dir: 'negative', result: 0 },
+ { val: kValue.f64.positive.subnormal.max, dir: 'positive', result: kValue.f64.positive.min },
+ { val: kValue.f64.positive.subnormal.max, dir: 'negative', result: reinterpretU64AsF64(0x000f_ffff_ffff_fffen) },
+ { val: kValue.f64.negative.subnormal.min, dir: 'positive', result: reinterpretU64AsF64(0x800f_ffff_ffff_fffen) },
+ { val: kValue.f64.negative.subnormal.min, dir: 'negative', result: kValue.f64.negative.max },
+ { val: kValue.f64.negative.subnormal.max, dir: 'positive', result: 0 },
+ { val: kValue.f64.negative.subnormal.max, dir: 'negative', result: reinterpretU64AsF64(0x8000_0000_0000_0002n) },
+
+ // Normals
+ { val: kValue.f64.positive.max, dir: 'positive', result: kValue.f64.positive.infinity },
+ { val: kValue.f64.positive.max, dir: 'negative', result: kValue.f64.positive.nearest_max },
+ { val: kValue.f64.positive.min, dir: 'positive', result: reinterpretU64AsF64(0x0010_0000_0000_0001n ) },
+ { val: kValue.f64.positive.min, dir: 'negative', result: reinterpretU64AsF64(0x000f_ffff_ffff_ffffn) },
+ { val: kValue.f64.negative.max, dir: 'positive', result: reinterpretU64AsF64(0x800f_ffff_ffff_ffffn) },
+ { val: kValue.f64.negative.max, dir: 'negative', result: reinterpretU64AsF64(0x8010_0000_0000_0001n) },
+ { val: kValue.f64.negative.min, dir: 'positive', result: kValue.f64.negative.nearest_min },
+ { val: kValue.f64.negative.min, dir: 'negative', result: kValue.f64.negative.infinity },
+ { val: reinterpretU64AsF64(0x0380_0000_0000_0000n), dir: 'positive', result: reinterpretU64AsF64(0x0380_0000_0000_0001n) },
+ { val: reinterpretU64AsF64(0x0380_0000_0000_0000n), dir: 'negative', result: reinterpretU64AsF64(0x037f_ffff_ffff_ffffn) },
+ { val: reinterpretU64AsF64(0x8380_0000_0000_0000n), dir: 'positive', result: reinterpretU64AsF64(0x837f_ffff_ffff_ffffn) },
+ { val: reinterpretU64AsF64(0x8380_0000_0000_0000n), dir: 'negative', result: reinterpretU64AsF64(0x8380_0000_0000_0001n) },
+ ]
+ )
+ .fn(t => {
+ const val = t.params.val;
+ const dir = t.params.dir;
+ const expect = t.params.result;
+ const got = nextAfterF64(val, dir, 'no-flush');
+ t.expect(
+ got === expect || (Number.isNaN(got) && Number.isNaN(expect)),
+ `nextAfterF64(${f64(val)}, '${dir}', 'no-flush') returned ${f64(got)}. Expected ${f64(
+ expect
+ )}`
+ );
+ });
+
+g.test('nextAfterF32FlushToZero')
+ .paramsSubcasesOnly<nextAfterCase>(
+ // prettier-ignore
+ [
+ // Edge Cases
+ { val: Number.NaN, dir: 'positive', result: Number.NaN },
+ { val: Number.NaN, dir: 'negative', result: Number.NaN },
+ { val: Number.POSITIVE_INFINITY, dir: 'positive', result: kValue.f32.positive.infinity },
+ { val: Number.POSITIVE_INFINITY, dir: 'negative', result: kValue.f32.positive.infinity },
+ { val: Number.NEGATIVE_INFINITY, dir: 'positive', result: kValue.f32.negative.infinity },
+ { val: Number.NEGATIVE_INFINITY, dir: 'negative', result: kValue.f32.negative.infinity },
+
+ // Zeroes
+ { val: +0, dir: 'positive', result: kValue.f32.positive.min },
+ { val: +0, dir: 'negative', result: kValue.f32.negative.max },
+ { val: -0, dir: 'positive', result: kValue.f32.positive.min },
+ { val: -0, dir: 'negative', result: kValue.f32.negative.max },
+
+ // Subnormals
+ { val: kValue.f32.positive.subnormal.min, dir: 'positive', result: kValue.f32.positive.min },
+ { val: kValue.f32.positive.subnormal.min, dir: 'negative', result: kValue.f32.negative.max },
+ { val: kValue.f32.positive.subnormal.max, dir: 'positive', result: kValue.f32.positive.min },
+ { val: kValue.f32.positive.subnormal.max, dir: 'negative', result: kValue.f32.negative.max },
+ { val: kValue.f32.negative.subnormal.min, dir: 'positive', result: kValue.f32.positive.min },
+ { val: kValue.f32.negative.subnormal.min, dir: 'negative', result: kValue.f32.negative.max },
+ { val: kValue.f32.negative.subnormal.max, dir: 'positive', result: kValue.f32.positive.min },
+ { val: kValue.f32.negative.subnormal.max, dir: 'negative', result: kValue.f32.negative.max },
+
+ // Normals
+ { val: kValue.f32.positive.max, dir: 'positive', result: kValue.f32.positive.infinity },
+ { val: kValue.f32.positive.max, dir: 'negative', result: kValue.f32.positive.nearest_max },
+ { val: kValue.f32.positive.min, dir: 'positive', result: reinterpretU32AsF32(0x00800001) },
+ { val: kValue.f32.positive.min, dir: 'negative', result: 0 },
+ { val: kValue.f32.negative.max, dir: 'positive', result: 0 },
+ { val: kValue.f32.negative.max, dir: 'negative', result: reinterpretU32AsF32(0x80800001) },
+ { val: kValue.f32.negative.min, dir: 'positive', result: reinterpretU32AsF32(0xff7ffffe) },
+ { val: kValue.f32.negative.min, dir: 'negative', result: kValue.f32.negative.infinity },
+ { val: reinterpretU32AsF32(0x03800000), dir: 'positive', result: reinterpretU32AsF32(0x03800001) },
+ { val: reinterpretU32AsF32(0x03800000), dir: 'negative', result: reinterpretU32AsF32(0x037fffff) },
+ { val: reinterpretU32AsF32(0x83800000), dir: 'positive', result: reinterpretU32AsF32(0x837fffff) },
+ { val: reinterpretU32AsF32(0x83800000), dir: 'negative', result: reinterpretU32AsF32(0x83800001) },
+
+ // Not precisely expressible as f32
+ { val: 0.001, dir: 'positive', result: reinterpretU32AsF32(0x3a83126f) }, // positive normal
+ { val: 0.001, dir: 'negative', result: reinterpretU32AsF32(0x3a83126e) }, // positive normal
+ { val: -0.001, dir: 'positive', result: reinterpretU32AsF32(0xba83126e) }, // negative normal
+ { val: -0.001, dir: 'negative', result: reinterpretU32AsF32(0xba83126f) }, // negative normal
+ { val: 2.82E-40, dir: 'positive', result: kValue.f32.positive.min }, // positive subnormal
+ { val: 2.82E-40, dir: 'negative', result: kValue.f32.negative.max }, // positive subnormal
+ { val: -2.82E-40, dir: 'positive', result: kValue.f32.positive.min }, // negative subnormal
+ { val: -2.82E-40, dir: 'negative', result: kValue.f32.negative.max }, // negative subnormal
+ ]
+ )
+ .fn(t => {
+ const val = t.params.val;
+ const dir = t.params.dir;
+ const expect = t.params.result;
+ const got = nextAfterF32(val, dir, 'flush');
+ t.expect(
+ got === expect || (Number.isNaN(got) && Number.isNaN(expect)),
+ `nextAfterF32(${f64(val)}, '${dir}', 'flush') returned ${f32(got)}. Expected ${f32(expect)}`
+ );
+ });
+
+g.test('nextAfterF32NoFlush')
+ .paramsSubcasesOnly<nextAfterCase>(
+ // prettier-ignore
+ [
+ // Edge Cases
+ { val: Number.NaN, dir: 'positive', result: Number.NaN },
+ { val: Number.NaN, dir: 'negative', result: Number.NaN },
+ { val: Number.POSITIVE_INFINITY, dir: 'positive', result: kValue.f32.positive.infinity },
+ { val: Number.POSITIVE_INFINITY, dir: 'negative', result: kValue.f32.positive.infinity },
+ { val: Number.NEGATIVE_INFINITY, dir: 'positive', result: kValue.f32.negative.infinity },
+ { val: Number.NEGATIVE_INFINITY, dir: 'negative', result: kValue.f32.negative.infinity },
+
+ // Zeroes
+ { val: +0, dir: 'positive', result: kValue.f32.positive.subnormal.min },
+ { val: +0, dir: 'negative', result: kValue.f32.negative.subnormal.max },
+ { val: -0, dir: 'positive', result: kValue.f32.positive.subnormal.min },
+ { val: -0, dir: 'negative', result: kValue.f32.negative.subnormal.max },
+
+ // Subnormals
+ { val:kValue.f32.positive.subnormal.min, dir: 'positive', result: reinterpretU32AsF32(0x00000002) },
+ { val:kValue.f32.positive.subnormal.min, dir: 'negative', result: 0 },
+ { val:kValue.f32.positive.subnormal.max, dir: 'positive', result: kValue.f32.positive.min },
+ { val:kValue.f32.positive.subnormal.max, dir: 'negative', result: reinterpretU32AsF32(0x007ffffe) },
+ { val:kValue.f32.negative.subnormal.min, dir: 'positive', result: reinterpretU32AsF32(0x807ffffe) },
+ { val:kValue.f32.negative.subnormal.min, dir: 'negative', result: kValue.f32.negative.max },
+ { val:kValue.f32.negative.subnormal.max, dir: 'positive', result: 0 },
+ { val:kValue.f32.negative.subnormal.max, dir: 'negative', result: reinterpretU32AsF32(0x80000002) },
+
+ // Normals
+ { val: kValue.f32.positive.max, dir: 'positive', result: kValue.f32.positive.infinity },
+ { val: kValue.f32.positive.max, dir: 'negative', result: kValue.f32.positive.nearest_max },
+ { val: kValue.f32.positive.min, dir: 'positive', result: reinterpretU32AsF32(0x00800001) },
+ { val: kValue.f32.positive.min, dir: 'negative', result: kValue.f32.positive.subnormal.max },
+ { val: kValue.f32.negative.max, dir: 'positive', result: kValue.f32.negative.subnormal.min },
+ { val: kValue.f32.negative.max, dir: 'negative', result: reinterpretU32AsF32(0x80800001) },
+ { val: kValue.f32.negative.min, dir: 'positive', result: kValue.f32.negative.nearest_min },
+ { val: kValue.f32.negative.min, dir: 'negative', result: kValue.f32.negative.infinity },
+ { val: reinterpretU32AsF32(0x03800000), dir: 'positive', result: reinterpretU32AsF32(0x03800001) },
+ { val: reinterpretU32AsF32(0x03800000), dir: 'negative', result: reinterpretU32AsF32(0x037fffff) },
+ { val: reinterpretU32AsF32(0x83800000), dir: 'positive', result: reinterpretU32AsF32(0x837fffff) },
+ { val: reinterpretU32AsF32(0x83800000), dir: 'negative', result: reinterpretU32AsF32(0x83800001) },
+
+ // Not precisely expressible as f32
+ { val: 0.001, dir: 'positive', result: reinterpretU32AsF32(0x3a83126f) }, // positive normal
+ { val: 0.001, dir: 'negative', result: reinterpretU32AsF32(0x3a83126e) }, // positive normal
+ { val: -0.001, dir: 'positive', result: reinterpretU32AsF32(0xba83126e) }, // negative normal
+ { val: -0.001, dir: 'negative', result: reinterpretU32AsF32(0xba83126f) }, // negative normal
+ { val: 2.82E-40, dir: 'positive', result: reinterpretU32AsF32(0x0003121a) }, // positive subnormal
+ { val: 2.82E-40, dir: 'negative', result: reinterpretU32AsF32(0x00031219) }, // positive subnormal
+ { val: -2.82E-40, dir: 'positive', result: reinterpretU32AsF32(0x80031219) }, // negative subnormal
+ { val: -2.82E-40, dir: 'negative', result: reinterpretU32AsF32(0x8003121a) }, // negative subnormal
+ ]
+ )
+ .fn(t => {
+ const val = t.params.val;
+ const dir = t.params.dir;
+ const expect = t.params.result;
+ const got = nextAfterF32(val, dir, 'no-flush');
+ t.expect(
+ got === expect || (Number.isNaN(got) && Number.isNaN(expect)),
+ `nextAfterF32(${f64(val)}, '${dir}', 'no-flush') returned ${f32(got)}. Expected ${f32(
+ expect
+ )}`
+ );
+ });
+
+g.test('nextAfterF16FlushToZero')
+ .paramsSubcasesOnly<nextAfterCase>(
+ // prettier-ignore
+ [
+ // Edge Cases
+ { val: Number.NaN, dir: 'positive', result: Number.NaN },
+ { val: Number.NaN, dir: 'negative', result: Number.NaN },
+ { val: Number.POSITIVE_INFINITY, dir: 'positive', result: kValue.f16.positive.infinity },
+ { val: Number.POSITIVE_INFINITY, dir: 'negative', result: kValue.f16.positive.infinity },
+ { val: Number.NEGATIVE_INFINITY, dir: 'positive', result: kValue.f16.negative.infinity },
+ { val: Number.NEGATIVE_INFINITY, dir: 'negative', result: kValue.f16.negative.infinity },
+
+ // Zeroes
+ { val: +0, dir: 'positive', result: kValue.f16.positive.min },
+ { val: +0, dir: 'negative', result: kValue.f16.negative.max },
+ { val: -0, dir: 'positive', result: kValue.f16.positive.min },
+ { val: -0, dir: 'negative', result: kValue.f16.negative.max },
+
+ // Subnormals
+ { val: kValue.f16.positive.subnormal.min, dir: 'positive', result: kValue.f16.positive.min },
+ { val: kValue.f16.positive.subnormal.min, dir: 'negative', result: kValue.f16.negative.max },
+ { val: kValue.f16.positive.subnormal.max, dir: 'positive', result: kValue.f16.positive.min },
+ { val: kValue.f16.positive.subnormal.max, dir: 'negative', result: kValue.f16.negative.max },
+ { val: kValue.f16.negative.subnormal.min, dir: 'positive', result: kValue.f16.positive.min },
+ { val: kValue.f16.negative.subnormal.min, dir: 'negative', result: kValue.f16.negative.max },
+ { val: kValue.f16.negative.subnormal.max, dir: 'positive', result: kValue.f16.positive.min },
+ { val: kValue.f16.negative.subnormal.max, dir: 'negative', result: kValue.f16.negative.max },
+
+ // Normals
+ { val: kValue.f16.positive.max, dir: 'positive', result: kValue.f16.positive.infinity },
+ { val: kValue.f16.positive.max, dir: 'negative', result: reinterpretU16AsF16(0x7bfe) },
+ { val: kValue.f16.positive.min, dir: 'positive', result: reinterpretU16AsF16(0x0401) },
+ { val: kValue.f16.positive.min, dir: 'negative', result: 0 },
+ { val: kValue.f16.negative.max, dir: 'positive', result: 0 },
+ { val: kValue.f16.negative.max, dir: 'negative', result: reinterpretU16AsF16(0x8401) },
+ { val: kValue.f16.negative.min, dir: 'positive', result: reinterpretU16AsF16(0xfbfe) },
+ { val: kValue.f16.negative.min, dir: 'negative', result: kValue.f16.negative.infinity },
+ { val: reinterpretU16AsF16(0x1380), dir: 'positive', result: reinterpretU16AsF16(0x1381) },
+ { val: reinterpretU16AsF16(0x1380), dir: 'negative', result: reinterpretU16AsF16(0x137f) },
+ { val: reinterpretU16AsF16(0x9380), dir: 'positive', result: reinterpretU16AsF16(0x937f) },
+ { val: reinterpretU16AsF16(0x9380), dir: 'negative', result: reinterpretU16AsF16(0x9381) },
+
+ // Not precisely expressible as f16
+ { val: 0.01, dir: 'positive', result: reinterpretU16AsF16(0x211f) }, // positive normal
+ { val: 0.01, dir: 'negative', result: reinterpretU16AsF16(0x211e) }, // positive normal
+ { val: -0.01, dir: 'positive', result: reinterpretU16AsF16(0xa11e) }, // negative normal
+ { val: -0.01, dir: 'negative', result: reinterpretU16AsF16(0xa11f) }, // negative normal
+ { val: 2.82E-40, dir: 'positive', result: kValue.f16.positive.min }, // positive subnormal
+ { val: 2.82E-40, dir: 'negative', result: kValue.f16.negative.max }, // positive subnormal
+ { val: -2.82E-40, dir: 'positive', result: kValue.f16.positive.min }, // negative subnormal
+ { val: -2.82E-40, dir: 'negative', result: kValue.f16.negative.max }, // negative subnormal
+ ]
+ )
+ .fn(t => {
+ const val = t.params.val;
+ const dir = t.params.dir;
+ const expect = t.params.result;
+ const got = nextAfterF16(val, dir, 'flush');
+ t.expect(
+ got === expect || (Number.isNaN(got) && Number.isNaN(expect)),
+ `nextAfterF16(${f64(val)}, '${dir}', 'flush') returned ${f16(got)}. Expected ${f16(expect)}`
+ );
+ });
+
+g.test('nextAfterF16NoFlush')
+ .paramsSubcasesOnly<nextAfterCase>(
+ // prettier-ignore
+ [
+ // Edge Cases
+ { val: Number.NaN, dir: 'positive', result: Number.NaN },
+ { val: Number.NaN, dir: 'negative', result: Number.NaN },
+ { val: Number.POSITIVE_INFINITY, dir: 'positive', result: kValue.f16.positive.infinity },
+ { val: Number.POSITIVE_INFINITY, dir: 'negative', result: kValue.f16.positive.infinity },
+ { val: Number.NEGATIVE_INFINITY, dir: 'positive', result: kValue.f16.negative.infinity },
+ { val: Number.NEGATIVE_INFINITY, dir: 'negative', result: kValue.f16.negative.infinity },
+
+ // Zeroes
+ { val: +0, dir: 'positive', result: kValue.f16.positive.subnormal.min },
+ { val: +0, dir: 'negative', result: kValue.f16.negative.subnormal.max },
+ { val: -0, dir: 'positive', result: kValue.f16.positive.subnormal.min },
+ { val: -0, dir: 'negative', result: kValue.f16.negative.subnormal.max },
+
+ // Subnormals
+ { val: kValue.f16.positive.subnormal.min, dir: 'positive', result: reinterpretU16AsF16(0x0002) },
+ { val: kValue.f16.positive.subnormal.min, dir: 'negative', result: 0 },
+ { val: kValue.f16.positive.subnormal.max, dir: 'positive', result: kValue.f16.positive.min },
+ { val: kValue.f16.positive.subnormal.max, dir: 'negative', result: reinterpretU16AsF16(0x03fe) },
+ { val: kValue.f16.negative.subnormal.min, dir: 'positive', result: reinterpretU16AsF16(0x83fe) },
+ { val: kValue.f16.negative.subnormal.min, dir: 'negative', result: kValue.f16.negative.max },
+ { val: kValue.f16.negative.subnormal.max, dir: 'positive', result: 0 },
+ { val: kValue.f16.negative.subnormal.max, dir: 'negative', result: reinterpretU16AsF16(0x8002) },
+
+ // Normals
+ { val: kValue.f16.positive.max, dir: 'positive', result: kValue.f16.positive.infinity },
+ { val: kValue.f16.positive.max, dir: 'negative', result: reinterpretU16AsF16(0x7bfe) },
+ { val: kValue.f16.positive.min, dir: 'positive', result: reinterpretU16AsF16(0x0401) },
+ { val: kValue.f16.positive.min, dir: 'negative', result: kValue.f16.positive.subnormal.max },
+ { val: kValue.f16.negative.max, dir: 'positive', result: kValue.f16.negative.subnormal.min },
+ { val: kValue.f16.negative.max, dir: 'negative', result: reinterpretU16AsF16(0x8401) },
+ { val: kValue.f16.negative.min, dir: 'positive', result: reinterpretU16AsF16(0xfbfe) },
+ { val: kValue.f16.negative.min, dir: 'negative', result: kValue.f16.negative.infinity },
+ { val: reinterpretU16AsF16(0x1380), dir: 'positive', result: reinterpretU16AsF16(0x1381) },
+ { val: reinterpretU16AsF16(0x1380), dir: 'negative', result: reinterpretU16AsF16(0x137f) },
+ { val: reinterpretU16AsF16(0x9380), dir: 'positive', result: reinterpretU16AsF16(0x937f) },
+ { val: reinterpretU16AsF16(0x9380), dir: 'negative', result: reinterpretU16AsF16(0x9381) },
+
+ // Not precisely expressible as f16
+ { val: 0.01, dir: 'positive', result: reinterpretU16AsF16(0x211f) }, // positive normal
+ { val: 0.01, dir: 'negative', result: reinterpretU16AsF16(0x211e) }, // positive normal
+ { val: -0.01, dir: 'positive', result: reinterpretU16AsF16(0xa11e) }, // negative normal
+ { val: -0.01, dir: 'negative', result: reinterpretU16AsF16(0xa11f) }, // negative normal
+ { val: 2.82E-40, dir: 'positive', result: kValue.f16.positive.subnormal.min }, // positive subnormal
+ { val: 2.82E-40, dir: 'negative', result: 0 }, // positive subnormal
+ { val: -2.82E-40, dir: 'positive', result: 0 }, // negative subnormal
+ { val: -2.82E-40, dir: 'negative', result: kValue.f16.negative.subnormal.max }, // negative subnormal
+ ]
+ )
+ .fn(t => {
+ const val = t.params.val;
+ const dir = t.params.dir;
+ const expect = t.params.result;
+ const got = nextAfterF16(val, dir, 'no-flush');
+ t.expect(
+ got === expect || (Number.isNaN(got) && Number.isNaN(expect)),
+ `nextAfterF16(${f64(val)}, '${dir}', 'no-flush') returned ${f16(got)}. Expected ${f16(
+ expect
+ )}`
+ );
+ });
+
+interface OneULPCase {
+ target: number;
+ expect: number;
+}
+
+g.test('oneULPF64FlushToZero')
+ .paramsSimple<OneULPCase>([
+ // Edge Cases
+ { target: Number.NaN, expect: Number.NaN },
+ { target: Number.POSITIVE_INFINITY, expect: reinterpretU64AsF64(0x7ca0_0000_0000_0000n) },
+ { target: Number.NEGATIVE_INFINITY, expect: reinterpretU64AsF64(0x7ca0_0000_0000_0000n) },
+
+ // Zeroes
+ { target: +0, expect: reinterpretU64AsF64(0x0010_0000_0000_0000n) },
+ { target: -0, expect: reinterpretU64AsF64(0x0010_0000_0000_0000n) },
+
+ // Subnormals
+ {
+ target: kValue.f64.positive.subnormal.min,
+ expect: reinterpretU64AsF64(0x0010_0000_0000_0000n),
+ },
+ {
+ target: kValue.f64.positive.subnormal.max,
+ expect: reinterpretU64AsF64(0x0010_0000_0000_0000n),
+ },
+ {
+ target: kValue.f64.negative.subnormal.min,
+ expect: reinterpretU64AsF64(0x0010_0000_0000_0000n),
+ },
+ {
+ target: kValue.f64.negative.subnormal.max,
+ expect: reinterpretU64AsF64(0x0010_0000_0000_0000n),
+ },
+
+ // Normals
+ { target: kValue.f64.positive.min, expect: reinterpretU64AsF64(0x0000_0000_0000_0001n) },
+ { target: 1, expect: reinterpretU64AsF64(0x3ca0_0000_0000_0000n) },
+ { target: 2, expect: reinterpretU64AsF64(0x3cb0_0000_0000_0000n) },
+ { target: 4, expect: reinterpretU64AsF64(0x3cc0_0000_0000_0000n) },
+ { target: 1000000, expect: reinterpretU64AsF64(0x3de0_0000_0000_0000n) },
+ { target: kValue.f64.positive.max, expect: reinterpretU64AsF64(0x7ca0_0000_0000_0000n) },
+ { target: kValue.f64.negative.max, expect: reinterpretU64AsF64(0x0000_0000_0000_0001n) },
+ { target: -1, expect: reinterpretU64AsF64(0x3ca0_0000_0000_0000n) },
+ { target: -2, expect: reinterpretU64AsF64(0x3cb0_0000_0000_0000n) },
+ { target: -4, expect: reinterpretU64AsF64(0x3cc0_0000_0000_0000n) },
+ { target: -1000000, expect: reinterpretU64AsF64(0x3de0_0000_0000_0000n) },
+ { target: kValue.f64.negative.min, expect: reinterpretU64AsF64(0x7ca0_0000_0000_0000n) },
+ ])
+ .fn(t => {
+ const target = t.params.target;
+ const got = oneULPF64(target, 'flush');
+ const expect = t.params.expect;
+ t.expect(
+ got === expect || (Number.isNaN(got) && Number.isNaN(expect)),
+ `oneULPF64(${f64(target)}, 'flush') returned ${f64(got)}. Expected ${f64(expect)}`
+ );
+ });
+
+g.test('oneULPF64NoFlush')
+ .paramsSimple<OneULPCase>([
+ // Edge Cases
+ { target: Number.NaN, expect: Number.NaN },
+ { target: Number.POSITIVE_INFINITY, expect: reinterpretU64AsF64(0x7ca0_0000_0000_0000n) },
+ { target: Number.NEGATIVE_INFINITY, expect: reinterpretU64AsF64(0x7ca0_0000_0000_0000n) },
+
+ // Zeroes
+ { target: +0, expect: reinterpretU64AsF64(0x0000_0000_0000_0001n) },
+ { target: -0, expect: reinterpretU64AsF64(0x0000_0000_0000_0001n) },
+
+ // Subnormals
+ {
+ target: kValue.f64.positive.subnormal.min,
+ expect: reinterpretU64AsF64(0x0000_0000_0000_0001n),
+ },
+ {
+ target: kValue.f64.positive.subnormal.max,
+ expect: reinterpretU64AsF64(0x0000_0000_0000_0001n),
+ },
+ {
+ target: kValue.f64.negative.subnormal.min,
+ expect: reinterpretU64AsF64(0x0000_0000_0000_0001n),
+ },
+ {
+ target: kValue.f64.negative.subnormal.max,
+ expect: reinterpretU64AsF64(0x0000_0000_0000_0001n),
+ },
+
+ // Normals
+ { target: kValue.f64.positive.min, expect: reinterpretU64AsF64(0x0000_0000_0000_0001n) },
+ { target: 1, expect: reinterpretU64AsF64(0x3ca0_0000_0000_0000n) },
+ { target: 2, expect: reinterpretU64AsF64(0x3cb0_0000_0000_0000n) },
+ { target: 4, expect: reinterpretU64AsF64(0x3cc0_0000_0000_0000n) },
+ { target: 1000000, expect: reinterpretU64AsF64(0x3de0_0000_0000_0000n) },
+ { target: kValue.f64.positive.max, expect: reinterpretU64AsF64(0x7ca0_0000_0000_0000n) },
+ { target: kValue.f64.negative.max, expect: reinterpretU64AsF64(0x0000_0000_0000_0001n) },
+ { target: -1, expect: reinterpretU64AsF64(0x3ca0_0000_0000_0000n) },
+ { target: -2, expect: reinterpretU64AsF64(0x3cb0_0000_0000_0000n) },
+ { target: -4, expect: reinterpretU64AsF64(0x3cc0_0000_0000_0000n) },
+ { target: -1000000, expect: reinterpretU64AsF64(0x3de0_0000_0000_0000n) },
+ { target: kValue.f64.negative.min, expect: reinterpretU64AsF64(0x7ca0_0000_0000_0000n) },
+ ])
+ .fn(t => {
+ const target = t.params.target;
+ const got = oneULPF64(target, 'no-flush');
+ const expect = t.params.expect;
+ t.expect(
+ got === expect || (Number.isNaN(got) && Number.isNaN(expect)),
+ `oneULPF64(${f64(target)}, 'no-flush') returned ${f64(got)}. Expected ${f64(expect)}`
+ );
+ });
+
+g.test('oneULPF64')
+ .paramsSimple<OneULPCase>([
+ // Edge Cases
+ { target: Number.NaN, expect: Number.NaN },
+ { target: Number.POSITIVE_INFINITY, expect: reinterpretU64AsF64(0x7ca0_0000_0000_0000n) },
+ { target: Number.NEGATIVE_INFINITY, expect: reinterpretU64AsF64(0x7ca0_0000_0000_0000n) },
+
+ // Zeroes
+ { target: +0, expect: reinterpretU64AsF64(0x0010_0000_0000_0000n) },
+ { target: -0, expect: reinterpretU64AsF64(0x0010_0000_0000_0000n) },
+
+ // Subnormals
+ {
+ target: kValue.f64.positive.subnormal.min,
+ expect: reinterpretU64AsF64(0x0010_0000_0000_0000n),
+ },
+ {
+ target: kValue.f64.positive.subnormal.max,
+ expect: reinterpretU64AsF64(0x0010_0000_0000_0000n),
+ },
+ {
+ target: kValue.f64.negative.subnormal.min,
+ expect: reinterpretU64AsF64(0x0010_0000_0000_0000n),
+ },
+ {
+ target: kValue.f64.negative.subnormal.max,
+ expect: reinterpretU64AsF64(0x0010_0000_0000_0000n),
+ },
+
+ // Normals
+ { target: kValue.f64.positive.min, expect: reinterpretU64AsF64(0x0000_0000_0000_0001n) },
+ { target: 1, expect: reinterpretU64AsF64(0x3ca0_0000_0000_0000n) },
+ { target: 2, expect: reinterpretU64AsF64(0x3cb0_0000_0000_0000n) },
+ { target: 4, expect: reinterpretU64AsF64(0x3cc0_0000_0000_0000n) },
+ { target: 1000000, expect: reinterpretU64AsF64(0x3de0_0000_0000_0000n) },
+ { target: kValue.f64.positive.max, expect: reinterpretU64AsF64(0x7ca0_0000_0000_0000n) },
+ { target: kValue.f64.negative.max, expect: reinterpretU64AsF64(0x0000_0000_0000_0001n) },
+ { target: -1, expect: reinterpretU64AsF64(0x3ca0_0000_0000_0000n) },
+ { target: -2, expect: reinterpretU64AsF64(0x3cb0_0000_0000_0000n) },
+ { target: -4, expect: reinterpretU64AsF64(0x3cc0_0000_0000_0000n) },
+ { target: -1000000, expect: reinterpretU64AsF64(0x3de0_0000_0000_0000n) },
+ { target: kValue.f64.negative.min, expect: reinterpretU64AsF64(0x7ca0_0000_0000_0000n) },
+ ])
+ .fn(t => {
+ const target = t.params.target;
+ const got = oneULPF64(target);
+ const expect = t.params.expect;
+ t.expect(
+ got === expect || (Number.isNaN(got) && Number.isNaN(expect)),
+ `oneULPF64(${f64(target)}) returned ${f64(got)}. Expected ${f64(expect)}`
+ );
+ });
+
+g.test('oneULPF32FlushToZero')
+ .paramsSimple<OneULPCase>([
+ // Edge Cases
+ { target: Number.NaN, expect: Number.NaN },
+ { target: Number.POSITIVE_INFINITY, expect: reinterpretU32AsF32(0x73800000) },
+ { target: Number.NEGATIVE_INFINITY, expect: reinterpretU32AsF32(0x73800000) },
+
+ // Zeroes
+ { target: +0, expect: reinterpretU32AsF32(0x00800000) },
+ { target: -0, expect: reinterpretU32AsF32(0x00800000) },
+
+ // Subnormals
+ { target: kValue.f32.positive.subnormal.min, expect: reinterpretU32AsF32(0x00800000) },
+ { target: 2.82e-40, expect: reinterpretU32AsF32(0x00800000) }, // positive subnormal
+ { target: kValue.f32.positive.subnormal.max, expect: reinterpretU32AsF32(0x00800000) },
+ { target: kValue.f32.negative.subnormal.min, expect: reinterpretU32AsF32(0x00800000) },
+ { target: -2.82e-40, expect: reinterpretU32AsF32(0x00800000) }, // negative subnormal
+ { target: kValue.f32.negative.subnormal.max, expect: reinterpretU32AsF32(0x00800000) },
+
+ // Normals
+ { target: kValue.f32.positive.min, expect: reinterpretU32AsF32(0x00000001) },
+ { target: 1, expect: reinterpretU32AsF32(0x33800000) },
+ { target: 2, expect: reinterpretU32AsF32(0x34000000) },
+ { target: 4, expect: reinterpretU32AsF32(0x34800000) },
+ { target: 1000000, expect: reinterpretU32AsF32(0x3d800000) },
+ { target: kValue.f32.positive.max, expect: reinterpretU32AsF32(0x73800000) },
+ { target: kValue.f32.negative.max, expect: reinterpretU32AsF32(0x00000001) },
+ { target: -1, expect: reinterpretU32AsF32(0x33800000) },
+ { target: -2, expect: reinterpretU32AsF32(0x34000000) },
+ { target: -4, expect: reinterpretU32AsF32(0x34800000) },
+ { target: -1000000, expect: reinterpretU32AsF32(0x3d800000) },
+ { target: kValue.f32.negative.min, expect: reinterpretU32AsF32(0x73800000) },
+
+ // No precise f32 value
+ { target: 0.001, expect: reinterpretU32AsF32(0x2f000000) }, // positive normal
+ { target: -0.001, expect: reinterpretU32AsF32(0x2f000000) }, // negative normal
+ { target: 1e40, expect: reinterpretU32AsF32(0x73800000) }, // positive out of range
+ { target: -1e40, expect: reinterpretU32AsF32(0x73800000) }, // negative out of range
+ ])
+ .fn(t => {
+ const target = t.params.target;
+ const got = oneULPF32(target, 'flush');
+ const expect = t.params.expect;
+ t.expect(
+ got === expect || (Number.isNaN(got) && Number.isNaN(expect)),
+ `oneULPF32(${target}, 'flush') returned ${got}. Expected ${expect}`
+ );
+ });
+
+g.test('oneULPF32NoFlush')
+ .paramsSimple<OneULPCase>([
+ // Edge Cases
+ { target: Number.NaN, expect: Number.NaN },
+ { target: Number.POSITIVE_INFINITY, expect: reinterpretU32AsF32(0x73800000) },
+ { target: Number.NEGATIVE_INFINITY, expect: reinterpretU32AsF32(0x73800000) },
+
+ // Zeroes
+ { target: +0, expect: reinterpretU32AsF32(0x00000001) },
+ { target: -0, expect: reinterpretU32AsF32(0x00000001) },
+
+ // Subnormals
+ { target: kValue.f32.positive.subnormal.min, expect: reinterpretU32AsF32(0x00000001) },
+ { target: -2.82e-40, expect: reinterpretU32AsF32(0x00000001) }, // negative subnormal
+ { target: kValue.f32.positive.subnormal.max, expect: reinterpretU32AsF32(0x00000001) },
+ { target: kValue.f32.negative.subnormal.min, expect: reinterpretU32AsF32(0x00000001) },
+ { target: 2.82e-40, expect: reinterpretU32AsF32(0x00000001) }, // positive subnormal
+ { target: kValue.f32.negative.subnormal.max, expect: reinterpretU32AsF32(0x00000001) },
+
+ // Normals
+ { target: kValue.f32.positive.min, expect: reinterpretU32AsF32(0x00000001) },
+ { target: 1, expect: reinterpretU32AsF32(0x33800000) },
+ { target: 2, expect: reinterpretU32AsF32(0x34000000) },
+ { target: 4, expect: reinterpretU32AsF32(0x34800000) },
+ { target: 1000000, expect: reinterpretU32AsF32(0x3d800000) },
+ { target: kValue.f32.positive.max, expect: reinterpretU32AsF32(0x73800000) },
+ { target: kValue.f32.negative.max, expect: reinterpretU32AsF32(0x00000001) },
+ { target: -1, expect: reinterpretU32AsF32(0x33800000) },
+ { target: -2, expect: reinterpretU32AsF32(0x34000000) },
+ { target: -4, expect: reinterpretU32AsF32(0x34800000) },
+ { target: -1000000, expect: reinterpretU32AsF32(0x3d800000) },
+ { target: kValue.f32.negative.min, expect: reinterpretU32AsF32(0x73800000) },
+
+ // No precise f32 value
+ { target: 0.001, expect: reinterpretU32AsF32(0x2f000000) }, // positive normal
+ { target: -0.001, expect: reinterpretU32AsF32(0x2f000000) }, // negative normal
+ { target: 1e40, expect: reinterpretU32AsF32(0x73800000) }, // positive out of range
+ { target: -1e40, expect: reinterpretU32AsF32(0x73800000) }, // negative out of range
+ ])
+ .fn(t => {
+ const target = t.params.target;
+ const got = oneULPF32(target, 'no-flush');
+ const expect = t.params.expect;
+ t.expect(
+ got === expect || (Number.isNaN(got) && Number.isNaN(expect)),
+ `oneULPF32(${target}, no-flush) returned ${got}. Expected ${expect}`
+ );
+ });
+
+g.test('oneULPF32')
+ .paramsSimple<OneULPCase>([
+ // Edge Cases
+ { target: Number.NaN, expect: Number.NaN },
+ { target: Number.NEGATIVE_INFINITY, expect: reinterpretU32AsF32(0x73800000) },
+ { target: Number.POSITIVE_INFINITY, expect: reinterpretU32AsF32(0x73800000) },
+
+ // Zeroes
+ { target: +0, expect: reinterpretU32AsF32(0x00800000) },
+ { target: -0, expect: reinterpretU32AsF32(0x00800000) },
+
+ // Subnormals
+ { target: kValue.f32.negative.subnormal.max, expect: reinterpretU32AsF32(0x00800000) },
+ { target: -2.82e-40, expect: reinterpretU32AsF32(0x00800000) },
+ { target: kValue.f32.negative.subnormal.min, expect: reinterpretU32AsF32(0x00800000) },
+ { target: kValue.f32.positive.subnormal.max, expect: reinterpretU32AsF32(0x00800000) },
+ { target: 2.82e-40, expect: reinterpretU32AsF32(0x00800000) },
+ { target: kValue.f32.positive.subnormal.min, expect: reinterpretU32AsF32(0x00800000) },
+
+ // Normals
+ { target: kValue.f32.positive.min, expect: reinterpretU32AsF32(0x00000001) },
+ { target: 1, expect: reinterpretU32AsF32(0x33800000) },
+ { target: 2, expect: reinterpretU32AsF32(0x34000000) },
+ { target: 4, expect: reinterpretU32AsF32(0x34800000) },
+ { target: 1000000, expect: reinterpretU32AsF32(0x3d800000) },
+ { target: kValue.f32.positive.max, expect: reinterpretU32AsF32(0x73800000) },
+ { target: kValue.f32.negative.max, expect: reinterpretU32AsF32(0x000000001) },
+ { target: -1, expect: reinterpretU32AsF32(0x33800000) },
+ { target: -2, expect: reinterpretU32AsF32(0x34000000) },
+ { target: -4, expect: reinterpretU32AsF32(0x34800000) },
+ { target: -1000000, expect: reinterpretU32AsF32(0x3d800000) },
+ { target: kValue.f32.negative.min, expect: reinterpretU32AsF32(0x73800000) },
+
+ // No precise f32 value
+ { target: -0.001, expect: reinterpretU32AsF32(0x2f000000) }, // negative normal
+ { target: -1e40, expect: reinterpretU32AsF32(0x73800000) }, // negative out of range
+ { target: 0.001, expect: reinterpretU32AsF32(0x2f000000) }, // positive normal
+ { target: 1e40, expect: reinterpretU32AsF32(0x73800000) }, // positive out of range
+ ])
+ .fn(t => {
+ const target = t.params.target;
+ const got = oneULPF32(target);
+ const expect = t.params.expect;
+ t.expect(
+ got === expect || (Number.isNaN(got) && Number.isNaN(expect)),
+ `oneULPF32(${target}) returned ${got}. Expected ${expect}`
+ );
+ });
+
+g.test('oneULPF16FlushToZero')
+ .paramsSubcasesOnly<OneULPCase>([
+ // Edge Cases
+ { target: Number.NaN, expect: Number.NaN },
+ { target: Number.POSITIVE_INFINITY, expect: reinterpretU16AsF16(0x5000) },
+ { target: Number.NEGATIVE_INFINITY, expect: reinterpretU16AsF16(0x5000) },
+
+ // Zeroes, expect positive.min in flush mode
+ { target: +0, expect: reinterpretU16AsF16(0x0400) },
+ { target: -0, expect: reinterpretU16AsF16(0x0400) },
+
+ // Subnormals
+ { target: kValue.f16.positive.subnormal.min, expect: reinterpretU16AsF16(0x0400) },
+ { target: 1.91e-6, expect: reinterpretU16AsF16(0x0400) }, // positive subnormal
+ { target: kValue.f16.positive.subnormal.max, expect: reinterpretU16AsF16(0x0400) },
+ { target: kValue.f16.negative.subnormal.min, expect: reinterpretU16AsF16(0x0400) },
+ { target: -1.91e-6, expect: reinterpretU16AsF16(0x0400) }, // negative subnormal
+ { target: kValue.f16.negative.subnormal.max, expect: reinterpretU16AsF16(0x0400) },
+
+ // Normals
+ { target: kValue.f16.positive.min, expect: reinterpretU16AsF16(0x0001) },
+ { target: 1, expect: reinterpretU16AsF16(0x1000) },
+ { target: 2, expect: reinterpretU16AsF16(0x1400) },
+ { target: 4, expect: reinterpretU16AsF16(0x1800) },
+ { target: 1000, expect: reinterpretU16AsF16(0x3800) },
+ { target: kValue.f16.positive.max, expect: reinterpretU16AsF16(0x5000) },
+ { target: kValue.f16.negative.max, expect: reinterpretU16AsF16(0x0001) },
+ { target: -1, expect: reinterpretU16AsF16(0x1000) },
+ { target: -2, expect: reinterpretU16AsF16(0x1400) },
+ { target: -4, expect: reinterpretU16AsF16(0x1800) },
+ { target: -1000, expect: reinterpretU16AsF16(0x3800) },
+ { target: kValue.f16.negative.min, expect: reinterpretU16AsF16(0x5000) },
+
+ // No precise f16 value
+ { target: 0.001, expect: reinterpretU16AsF16(0x0010) }, // positive normal
+ { target: -0.001, expect: reinterpretU16AsF16(0x0010) }, // negative normal
+ { target: 1e8, expect: reinterpretU16AsF16(0x5000) }, // positive out of range
+ { target: -1e8, expect: reinterpretU16AsF16(0x5000) }, // negative out of range
+ ])
+ .fn(t => {
+ const target = t.params.target;
+ const got = oneULPF16(target, 'flush');
+ const expect = t.params.expect;
+ t.expect(
+ got === expect || (Number.isNaN(got) && Number.isNaN(expect)),
+ `oneULPF16(${target}, 'flush') returned ${got}. Expected ${expect}`
+ );
+ });
+
+g.test('oneULPF16NoFlush')
+ .paramsSubcasesOnly<OneULPCase>([
+ // Edge Cases
+ { target: Number.NaN, expect: Number.NaN },
+ { target: Number.POSITIVE_INFINITY, expect: reinterpretU16AsF16(0x5000) },
+ { target: Number.NEGATIVE_INFINITY, expect: reinterpretU16AsF16(0x5000) },
+
+ // Zeroes, expect positive.min in flush mode
+ { target: +0, expect: reinterpretU16AsF16(0x0001) },
+ { target: -0, expect: reinterpretU16AsF16(0x0001) },
+
+ // Subnormals
+ { target: kValue.f16.positive.subnormal.min, expect: reinterpretU16AsF16(0x0001) },
+ { target: 1.91e-6, expect: reinterpretU16AsF16(0x0001) }, // positive subnormal
+ { target: kValue.f16.positive.subnormal.max, expect: reinterpretU16AsF16(0x0001) },
+ { target: kValue.f16.negative.subnormal.min, expect: reinterpretU16AsF16(0x0001) },
+ { target: -1.91e-6, expect: reinterpretU16AsF16(0x0001) }, // negative subnormal
+ { target: kValue.f16.negative.subnormal.max, expect: reinterpretU16AsF16(0x0001) },
+
+ // Normals
+ { target: kValue.f16.positive.min, expect: reinterpretU16AsF16(0x0001) },
+ { target: 1, expect: reinterpretU16AsF16(0x1000) },
+ { target: 2, expect: reinterpretU16AsF16(0x1400) },
+ { target: 4, expect: reinterpretU16AsF16(0x1800) },
+ { target: 1000, expect: reinterpretU16AsF16(0x3800) },
+ { target: kValue.f16.positive.max, expect: reinterpretU16AsF16(0x5000) },
+ { target: kValue.f16.negative.max, expect: reinterpretU16AsF16(0x0001) },
+ { target: -1, expect: reinterpretU16AsF16(0x1000) },
+ { target: -2, expect: reinterpretU16AsF16(0x1400) },
+ { target: -4, expect: reinterpretU16AsF16(0x1800) },
+ { target: -1000, expect: reinterpretU16AsF16(0x3800) },
+ { target: kValue.f16.negative.min, expect: reinterpretU16AsF16(0x5000) },
+
+ // No precise f16 value
+ { target: 0.001, expect: reinterpretU16AsF16(0x0010) }, // positive normal
+ { target: -0.001, expect: reinterpretU16AsF16(0x0010) }, // negative normal
+ { target: 1e8, expect: reinterpretU16AsF16(0x5000) }, // positive out of range
+ { target: -1e8, expect: reinterpretU16AsF16(0x5000) }, // negative out of range
+ ])
+ .fn(t => {
+ const target = t.params.target;
+ const got = oneULPF16(target, 'no-flush');
+ const expect = t.params.expect;
+ t.expect(
+ got === expect || (Number.isNaN(got) && Number.isNaN(expect)),
+ `oneULPF16(${target}, no-flush) returned ${got}. Expected ${expect}`
+ );
+ });
+
+g.test('oneULPF16')
+ .paramsSubcasesOnly<OneULPCase>([
+ // Edge Cases
+ { target: Number.NaN, expect: Number.NaN },
+ { target: Number.POSITIVE_INFINITY, expect: reinterpretU16AsF16(0x5000) },
+ { target: Number.NEGATIVE_INFINITY, expect: reinterpretU16AsF16(0x5000) },
+
+ // Zeroes, expect positive.min in flush mode
+ { target: +0, expect: reinterpretU16AsF16(0x0400) },
+ { target: -0, expect: reinterpretU16AsF16(0x0400) },
+
+ // Subnormals
+ { target: kValue.f16.positive.subnormal.min, expect: reinterpretU16AsF16(0x0400) },
+ { target: 1.91e-6, expect: reinterpretU16AsF16(0x0400) }, // positive subnormal
+ { target: kValue.f16.positive.subnormal.max, expect: reinterpretU16AsF16(0x0400) },
+ { target: kValue.f16.negative.subnormal.min, expect: reinterpretU16AsF16(0x0400) },
+ { target: -1.91e-6, expect: reinterpretU16AsF16(0x0400) }, // negative subnormal
+ { target: kValue.f16.negative.subnormal.max, expect: reinterpretU16AsF16(0x0400) },
+
+ // Normals
+ { target: kValue.f16.positive.min, expect: reinterpretU16AsF16(0x0001) },
+ { target: 1, expect: reinterpretU16AsF16(0x1000) },
+ { target: 2, expect: reinterpretU16AsF16(0x1400) },
+ { target: 4, expect: reinterpretU16AsF16(0x1800) },
+ { target: 1000, expect: reinterpretU16AsF16(0x3800) },
+ { target: kValue.f16.positive.max, expect: reinterpretU16AsF16(0x5000) },
+ { target: kValue.f16.negative.max, expect: reinterpretU16AsF16(0x0001) },
+ { target: -1, expect: reinterpretU16AsF16(0x1000) },
+ { target: -2, expect: reinterpretU16AsF16(0x1400) },
+ { target: -4, expect: reinterpretU16AsF16(0x1800) },
+ { target: -1000, expect: reinterpretU16AsF16(0x3800) },
+ { target: kValue.f16.negative.min, expect: reinterpretU16AsF16(0x5000) },
+
+ // No precise f16 value
+ { target: 0.001, expect: reinterpretU16AsF16(0x0010) }, // positive normal
+ { target: -0.001, expect: reinterpretU16AsF16(0x0010) }, // negative normal
+ { target: 1e8, expect: reinterpretU16AsF16(0x5000) }, // positive out of range
+ { target: -1e8, expect: reinterpretU16AsF16(0x5000) }, // negative out of range
+ ])
+ .fn(t => {
+ const target = t.params.target;
+ const got = oneULPF16(target, 'flush');
+ const expect = t.params.expect;
+ t.expect(
+ got === expect || (Number.isNaN(got) && Number.isNaN(expect)),
+ `oneULPF16(${target}, 'flush') returned ${got}. Expected ${expect}`
+ );
+ });
+
+interface correctlyRoundedCase {
+ value: number;
+ expected: Array<number>;
+}
+
+g.test('correctlyRoundedF32')
+ .paramsSubcasesOnly<correctlyRoundedCase>(
+ // prettier-ignore
+ [
+ // Edge Cases
+ { value: kValue.f32.positive.max, expected: [kValue.f32.positive.max] },
+ { value: kValue.f32.negative.min, expected: [kValue.f32.negative.min] },
+ { value: kValue.f32.positive.max + oneULPF64(kValue.f32.positive.max), expected: [kValue.f32.positive.max, Number.POSITIVE_INFINITY] },
+ { value: kValue.f32.negative.min - oneULPF64(kValue.f32.negative.min), expected: [Number.NEGATIVE_INFINITY, kValue.f32.negative.min] },
+ { value: 2 ** (kValue.f32.emax + 1) - oneULPF64(kValue.f32.positive.max), expected: [kValue.f32.positive.max, Number.POSITIVE_INFINITY] },
+ { value: -(2 ** (kValue.f32.emax + 1)) + oneULPF64(kValue.f32.positive.max), expected: [Number.NEGATIVE_INFINITY, kValue.f32.negative.min] },
+ { value: 2 ** (kValue.f32.emax + 1), expected: [Number.POSITIVE_INFINITY] },
+ { value: -(2 ** (kValue.f32.emax + 1)), expected: [Number.NEGATIVE_INFINITY] },
+ { value: kValue.f32.positive.infinity, expected: [Number.POSITIVE_INFINITY] },
+ { value: kValue.f32.negative.infinity, expected: [Number.NEGATIVE_INFINITY] },
+
+ // 32-bit subnormals
+ { value: kValue.f32.positive.subnormal.min, expected: [kValue.f32.positive.subnormal.min] },
+ { value: kValue.f32.positive.subnormal.max, expected: [kValue.f32.positive.subnormal.max] },
+ { value: kValue.f32.negative.subnormal.min, expected: [kValue.f32.negative.subnormal.min] },
+ { value: kValue.f32.negative.subnormal.max, expected: [kValue.f32.negative.subnormal.max] },
+
+ // 64-bit subnormals
+ { value: reinterpretU64AsF64(0x0000_0000_0000_0001n), expected: [0, kValue.f32.positive.subnormal.min] },
+ { value: reinterpretU64AsF64(0x0000_0000_0000_0002n), expected: [0, kValue.f32.positive.subnormal.min] },
+ { value: reinterpretU64AsF64(0x800f_ffff_ffff_ffffn), expected: [kValue.f32.negative.subnormal.max, 0] },
+ { value: reinterpretU64AsF64(0x800f_ffff_ffff_fffen), expected: [kValue.f32.negative.subnormal.max, 0] },
+
+ // 32-bit normals
+ { value: 0, expected: [0] },
+ { value: kValue.f32.positive.min, expected: [kValue.f32.positive.min] },
+ { value: kValue.f32.negative.max, expected: [kValue.f32.negative.max] },
+ { value: reinterpretU32AsF32(0x03800000), expected: [reinterpretU32AsF32(0x03800000)] },
+ { value: reinterpretU32AsF32(0x03800001), expected: [reinterpretU32AsF32(0x03800001)] },
+ { value: reinterpretU32AsF32(0x83800000), expected: [reinterpretU32AsF32(0x83800000)] },
+ { value: reinterpretU32AsF32(0x83800001), expected: [reinterpretU32AsF32(0x83800001)] },
+
+ // 64-bit normals
+ { value: reinterpretU64AsF64(0x3ff0_0000_0000_0001n), expected: [reinterpretU32AsF32(0x3f800000), reinterpretU32AsF32(0x3f800001)] },
+ { value: reinterpretU64AsF64(0x3ff0_0000_0000_0002n), expected: [reinterpretU32AsF32(0x3f800000), reinterpretU32AsF32(0x3f800001)] },
+ { value: reinterpretU64AsF64(0x3ff0_0010_0000_0010n), expected: [reinterpretU32AsF32(0x3f800080), reinterpretU32AsF32(0x3f800081)] },
+ { value: reinterpretU64AsF64(0x3ff0_0020_0000_0020n), expected: [reinterpretU32AsF32(0x3f800100), reinterpretU32AsF32(0x3f800101)] },
+ { value: reinterpretU64AsF64(0xbff0_0000_0000_0001n), expected: [reinterpretU32AsF32(0xbf800001), reinterpretU32AsF32(0xbf800000)] },
+ { value: reinterpretU64AsF64(0xbff0_0000_0000_0002n), expected: [reinterpretU32AsF32(0xbf800001), reinterpretU32AsF32(0xbf800000)] },
+ { value: reinterpretU64AsF64(0xbff0_0010_0000_0010n), expected: [reinterpretU32AsF32(0xbf800081), reinterpretU32AsF32(0xbf800080)] },
+ { value: reinterpretU64AsF64(0xbff0_0020_0000_0020n), expected: [reinterpretU32AsF32(0xbf800101), reinterpretU32AsF32(0xbf800100)] },
+ ]
+ )
+ .fn(t => {
+ const value = t.params.value;
+ const expected = t.params.expected;
+
+ const got = correctlyRoundedF32(value);
+ t.expect(
+ objectEquals(expected, got),
+ `correctlyRoundedF32(${f64(value)}) returned [${got.map(f32)}]. Expected [${expected.map(
+ f32
+ )}]`
+ );
+ });
+
+g.test('correctlyRoundedF16')
+ .paramsSubcasesOnly<correctlyRoundedCase>(
+ // prettier-ignore
+ [
+ // Edge Cases
+ { value: kValue.f16.positive.max, expected: [kValue.f16.positive.max] },
+ { value: kValue.f16.negative.min, expected: [kValue.f16.negative.min] },
+ { value: kValue.f16.positive.max + oneULPF64(kValue.f16.positive.max), expected: [kValue.f16.positive.max, Number.POSITIVE_INFINITY] },
+ { value: kValue.f16.negative.min - oneULPF64(kValue.f16.negative.min), expected: [Number.NEGATIVE_INFINITY, kValue.f16.negative.min] },
+ { value: 2 ** (kValue.f16.emax + 1) - oneULPF64(kValue.f16.positive.max), expected: [kValue.f16.positive.max, Number.POSITIVE_INFINITY] },
+ { value: -(2 ** (kValue.f16.emax + 1)) + oneULPF64(kValue.f16.positive.max), expected: [Number.NEGATIVE_INFINITY, kValue.f16.negative.min] },
+ { value: 2 ** (kValue.f16.emax + 1), expected: [Number.POSITIVE_INFINITY] },
+ { value: -(2 ** (kValue.f16.emax + 1)), expected: [Number.NEGATIVE_INFINITY] },
+ { value: kValue.f16.positive.infinity, expected: [Number.POSITIVE_INFINITY] },
+ { value: kValue.f16.negative.infinity, expected: [Number.NEGATIVE_INFINITY] },
+
+ // 16-bit subnormals
+ { value: kValue.f16.positive.subnormal.min, expected: [kValue.f16.positive.subnormal.min] },
+ { value: kValue.f16.positive.subnormal.max, expected: [kValue.f16.positive.subnormal.max] },
+ { value: kValue.f16.negative.subnormal.min, expected: [kValue.f16.negative.subnormal.min] },
+ { value: kValue.f16.negative.subnormal.max, expected: [kValue.f16.negative.subnormal.max] },
+
+ // 32-bit subnormals
+ { value: kValue.f32.positive.subnormal.min, expected: [0, kValue.f16.positive.subnormal.min] },
+ { value: kValue.f32.positive.subnormal.max, expected: [0, kValue.f16.positive.subnormal.min] },
+ { value: kValue.f32.negative.subnormal.max, expected: [kValue.f16.negative.subnormal.max, 0] },
+ { value: kValue.f32.negative.subnormal.min, expected: [kValue.f16.negative.subnormal.max, 0] },
+
+ // 16-bit normals
+ { value: 0, expected: [0] },
+ { value: kValue.f16.positive.min, expected: [kValue.f16.positive.min] },
+ { value: kValue.f16.negative.max, expected: [kValue.f16.negative.max] },
+ { value: reinterpretU16AsF16(0x1380), expected: [reinterpretU16AsF16(0x1380)] },
+ { value: reinterpretU16AsF16(0x1381), expected: [reinterpretU16AsF16(0x1381)] },
+ { value: reinterpretU16AsF16(0x9380), expected: [reinterpretU16AsF16(0x9380)] },
+ { value: reinterpretU16AsF16(0x9381), expected: [reinterpretU16AsF16(0x9381)] },
+
+ // 32-bit normals
+ { value: reinterpretU32AsF32(0x3a700001), expected: [reinterpretU16AsF16(0x1380), reinterpretU16AsF16(0x1381)] },
+ { value: reinterpretU32AsF32(0x3a700002), expected: [reinterpretU16AsF16(0x1380), reinterpretU16AsF16(0x1381)] },
+ { value: reinterpretU32AsF32(0xba700001), expected: [reinterpretU16AsF16(0x9381), reinterpretU16AsF16(0x9380)] },
+ { value: reinterpretU32AsF32(0xba700002), expected: [reinterpretU16AsF16(0x9381), reinterpretU16AsF16(0x9380)] },
+ ]
+ )
+ .fn(t => {
+ const value = t.params.value;
+ const expected = t.params.expected;
+
+ const got = correctlyRoundedF16(value);
+ t.expect(
+ objectEquals(expected, got),
+ `correctlyRoundedF16(${f64(value)}) returned [${got.map(f16)}]. Expected [${expected.map(
+ f16
+ )}]`
+ );
+ });
+
+interface frexpCase {
+ input: number;
+ fract: number;
+ exp: number;
+}
+
+// prettier-ignore
+const kFrexpCases = {
+ f32: [
+ { input: kValue.f32.positive.max, fract: 0.9999999403953552, exp: 128 },
+ { input: kValue.f32.positive.min, fract: 0.5, exp: -125 },
+ { input: kValue.f32.negative.max, fract: -0.5, exp: -125 },
+ { input: kValue.f32.negative.min, fract: -0.9999999403953552, exp: 128 },
+ { input: kValue.f32.positive.subnormal.max, fract: 0.9999998807907104, exp: -126 },
+ { input: kValue.f32.positive.subnormal.min, fract: 0.5, exp: -148 },
+ { input: kValue.f32.negative.subnormal.max, fract: -0.5, exp: -148 },
+ { input: kValue.f32.negative.subnormal.min, fract: -0.9999998807907104, exp: -126 },
+ ] as frexpCase[],
+ f16: [
+ { input: kValue.f16.positive.max, fract: 0.99951171875, exp: 16 },
+ { input: kValue.f16.positive.min, fract: 0.5, exp: -13 },
+ { input: kValue.f16.negative.max, fract: -0.5, exp: -13 },
+ { input: kValue.f16.negative.min, fract: -0.99951171875, exp: 16 },
+ { input: kValue.f16.positive.subnormal.max, fract: 0.9990234375, exp: -14 },
+ { input: kValue.f16.positive.subnormal.min, fract: 0.5, exp: -23 },
+ { input: kValue.f16.negative.subnormal.max, fract: -0.5, exp: -23 },
+ { input: kValue.f16.negative.subnormal.min, fract: -0.9990234375, exp: -14 },
+ ] as frexpCase[],
+ f64: [
+ { input: kValue.f64.positive.max, fract: reinterpretU64AsF64(0x3fef_ffff_ffff_ffffn) /* ~0.9999999999999999 */, exp: 1024 },
+ { input: kValue.f64.positive.min, fract: 0.5, exp: -1021 },
+ { input: kValue.f64.negative.max, fract: -0.5, exp: -1021 },
+ { input: kValue.f64.negative.min, fract: reinterpretU64AsF64(0xbfef_ffff_ffff_ffffn) /* ~-0.9999999999999999 */, exp: 1024 },
+ { input: kValue.f64.positive.subnormal.max, fract: reinterpretU64AsF64(0x3fef_ffff_ffff_fffen) /* ~0.9999999999999998 */, exp: -1022 },
+ { input: kValue.f64.positive.subnormal.min, fract: 0.5, exp: -1073 },
+ { input: kValue.f64.negative.subnormal.max, fract: -0.5, exp: -1073 },
+ { input: kValue.f64.negative.subnormal.min, fract: reinterpretU64AsF64(0xbfef_ffff_ffff_fffen) /* ~-0.9999999999999998 */, exp: -1022 },
+ ] as frexpCase[],
+} as const;
+
+g.test('frexp')
+ .params(u =>
+ u
+ .combine('trait', ['f32', 'f16', 'f64'] as const)
+ .beginSubcases()
+ .expandWithParams<frexpCase>(p => {
+ // prettier-ignore
+ return [
+ // +/- 0.0
+ { input: 0, fract: 0, exp: 0 },
+ { input: -0, fract: -0, exp: 0 },
+ // Normal float values that can be exactly represented by all float types
+ { input: 0.171875, fract: 0.6875, exp: -2 },
+ { input: -0.171875, fract: -0.6875, exp: -2 },
+ { input: 0.5, fract: 0.5, exp: 0 },
+ { input: -0.5, fract: -0.5, exp: 0 },
+ { input: 1, fract: 0.5, exp: 1 },
+ { input: -1, fract: -0.5, exp: 1 },
+ { input: 2, fract: 0.5, exp: 2 },
+ { input: -2, fract: -0.5, exp: 2 },
+ { input: 10000, fract: 0.6103515625, exp: 14 },
+ { input: -10000, fract: -0.6103515625, exp: 14 },
+ // Normal ans subnormal cases that are different for each type
+ ...kFrexpCases[p.trait],
+ // Inf and NaN
+ { input: Number.POSITIVE_INFINITY, fract: Number.POSITIVE_INFINITY, exp: 0 },
+ { input: Number.NEGATIVE_INFINITY, fract: Number.NEGATIVE_INFINITY, exp: 0 },
+ { input: Number.NaN, fract: Number.NaN, exp: 0 },
+ ];
+ })
+ )
+ .fn(test => {
+ const input = test.params.input;
+ const got = frexp(input, test.params.trait);
+ const expect = { fract: test.params.fract, exp: test.params.exp };
+
+ test.expect(
+ objectEquals(got, expect),
+ `frexp(${input}, ${test.params.trait}) returned { fract: ${got.fract}, exp: ${got.exp} }. Expected { fract: ${expect.fract}, exp: ${expect.exp} }`
+ );
+ });
+
+interface lerpCase {
+ a: number;
+ b: number;
+ t: number;
+ result: number;
+}
+
+g.test('lerp')
+ .paramsSimple<lerpCase>([
+ // Infinite cases
+ { a: 0.0, b: Number.POSITIVE_INFINITY, t: 0.5, result: Number.NaN },
+ { a: Number.POSITIVE_INFINITY, b: 0.0, t: 0.5, result: Number.NaN },
+ { a: Number.NEGATIVE_INFINITY, b: 1.0, t: 0.5, result: Number.NaN },
+ { a: 1.0, b: Number.NEGATIVE_INFINITY, t: 0.5, result: Number.NaN },
+ { a: Number.NEGATIVE_INFINITY, b: Number.POSITIVE_INFINITY, t: 0.5, result: Number.NaN },
+ { a: Number.POSITIVE_INFINITY, b: Number.NEGATIVE_INFINITY, t: 0.5, result: Number.NaN },
+ { a: 0.0, b: 1.0, t: Number.NEGATIVE_INFINITY, result: Number.NaN },
+ { a: 1.0, b: 0.0, t: Number.NEGATIVE_INFINITY, result: Number.NaN },
+ { a: 0.0, b: 1.0, t: Number.POSITIVE_INFINITY, result: Number.NaN },
+ { a: 1.0, b: 0.0, t: Number.POSITIVE_INFINITY, result: Number.NaN },
+
+ // [0.0, 1.0] cases
+ { a: 0.0, b: 1.0, t: -1.0, result: -1.0 },
+ { a: 0.0, b: 1.0, t: 0.0, result: 0.0 },
+ { a: 0.0, b: 1.0, t: 0.1, result: 0.1 },
+ { a: 0.0, b: 1.0, t: 0.01, result: 0.01 },
+ { a: 0.0, b: 1.0, t: 0.001, result: 0.001 },
+ { a: 0.0, b: 1.0, t: 0.25, result: 0.25 },
+ { a: 0.0, b: 1.0, t: 0.5, result: 0.5 },
+ { a: 0.0, b: 1.0, t: 0.9, result: 0.9 },
+ { a: 0.0, b: 1.0, t: 0.99, result: 0.99 },
+ { a: 0.0, b: 1.0, t: 0.999, result: 0.999 },
+ { a: 0.0, b: 1.0, t: 1.0, result: 1.0 },
+ { a: 0.0, b: 1.0, t: 2.0, result: 2.0 },
+
+ // [1.0, 0.0] cases
+ { a: 1.0, b: 0.0, t: -1.0, result: 2.0 },
+ { a: 1.0, b: 0.0, t: 0.0, result: 1.0 },
+ { a: 1.0, b: 0.0, t: 0.1, result: 0.9 },
+ { a: 1.0, b: 0.0, t: 0.01, result: 0.99 },
+ { a: 1.0, b: 0.0, t: 0.001, result: 0.999 },
+ { a: 1.0, b: 0.0, t: 0.25, result: 0.75 },
+ { a: 1.0, b: 0.0, t: 0.5, result: 0.5 },
+ { a: 1.0, b: 0.0, t: 0.9, result: 0.1 },
+ { a: 1.0, b: 0.0, t: 0.99, result: 0.01 },
+ { a: 1.0, b: 0.0, t: 0.999, result: 0.001 },
+ { a: 1.0, b: 0.0, t: 1.0, result: 0.0 },
+ { a: 1.0, b: 0.0, t: 2.0, result: -1.0 },
+
+ // [0.0, 10.0] cases
+ { a: 0.0, b: 10.0, t: -1.0, result: -10.0 },
+ { a: 0.0, b: 10.0, t: 0.0, result: 0.0 },
+ { a: 0.0, b: 10.0, t: 0.1, result: 1.0 },
+ { a: 0.0, b: 10.0, t: 0.01, result: 0.1 },
+ { a: 0.0, b: 10.0, t: 0.001, result: 0.01 },
+ { a: 0.0, b: 10.0, t: 0.25, result: 2.5 },
+ { a: 0.0, b: 10.0, t: 0.5, result: 5.0 },
+ { a: 0.0, b: 10.0, t: 0.9, result: 9.0 },
+ { a: 0.0, b: 10.0, t: 0.99, result: 9.9 },
+ { a: 0.0, b: 10.0, t: 0.999, result: 9.99 },
+ { a: 0.0, b: 10.0, t: 1.0, result: 10.0 },
+ { a: 0.0, b: 10.0, t: 2.0, result: 20.0 },
+
+ // [10.0, 0.0] cases
+ { a: 10.0, b: 0.0, t: -1.0, result: 20.0 },
+ { a: 10.0, b: 0.0, t: 0.0, result: 10.0 },
+ { a: 10.0, b: 0.0, t: 0.1, result: 9 },
+ { a: 10.0, b: 0.0, t: 0.01, result: 9.9 },
+ { a: 10.0, b: 0.0, t: 0.001, result: 9.99 },
+ { a: 10.0, b: 0.0, t: 0.25, result: 7.5 },
+ { a: 10.0, b: 0.0, t: 0.5, result: 5.0 },
+ { a: 10.0, b: 0.0, t: 0.9, result: 1.0 },
+ { a: 10.0, b: 0.0, t: 0.99, result: 0.1 },
+ { a: 10.0, b: 0.0, t: 0.999, result: 0.01 },
+ { a: 10.0, b: 0.0, t: 1.0, result: 0.0 },
+ { a: 10.0, b: 0.0, t: 2.0, result: -10.0 },
+
+ // [2.0, 10.0] cases
+ { a: 2.0, b: 10.0, t: -1.0, result: -6.0 },
+ { a: 2.0, b: 10.0, t: 0.0, result: 2.0 },
+ { a: 2.0, b: 10.0, t: 0.1, result: 2.8 },
+ { a: 2.0, b: 10.0, t: 0.01, result: 2.08 },
+ { a: 2.0, b: 10.0, t: 0.001, result: 2.008 },
+ { a: 2.0, b: 10.0, t: 0.25, result: 4.0 },
+ { a: 2.0, b: 10.0, t: 0.5, result: 6.0 },
+ { a: 2.0, b: 10.0, t: 0.9, result: 9.2 },
+ { a: 2.0, b: 10.0, t: 0.99, result: 9.92 },
+ { a: 2.0, b: 10.0, t: 0.999, result: 9.992 },
+ { a: 2.0, b: 10.0, t: 1.0, result: 10.0 },
+ { a: 2.0, b: 10.0, t: 2.0, result: 18.0 },
+
+ // [10.0, 2.0] cases
+ { a: 10.0, b: 2.0, t: -1.0, result: 18.0 },
+ { a: 10.0, b: 2.0, t: 0.0, result: 10.0 },
+ { a: 10.0, b: 2.0, t: 0.1, result: 9.2 },
+ { a: 10.0, b: 2.0, t: 0.01, result: 9.92 },
+ { a: 10.0, b: 2.0, t: 0.001, result: 9.992 },
+ { a: 10.0, b: 2.0, t: 0.25, result: 8.0 },
+ { a: 10.0, b: 2.0, t: 0.5, result: 6.0 },
+ { a: 10.0, b: 2.0, t: 0.9, result: 2.8 },
+ { a: 10.0, b: 2.0, t: 0.99, result: 2.08 },
+ { a: 10.0, b: 2.0, t: 0.999, result: 2.008 },
+ { a: 10.0, b: 2.0, t: 1.0, result: 2.0 },
+ { a: 10.0, b: 2.0, t: 2.0, result: -6.0 },
+
+ // [-1.0, 1.0] cases
+ { a: -1.0, b: 1.0, t: -2.0, result: -5.0 },
+ { a: -1.0, b: 1.0, t: 0.0, result: -1.0 },
+ { a: -1.0, b: 1.0, t: 0.1, result: -0.8 },
+ { a: -1.0, b: 1.0, t: 0.01, result: -0.98 },
+ { a: -1.0, b: 1.0, t: 0.001, result: -0.998 },
+ { a: -1.0, b: 1.0, t: 0.25, result: -0.5 },
+ { a: -1.0, b: 1.0, t: 0.5, result: 0.0 },
+ { a: -1.0, b: 1.0, t: 0.9, result: 0.8 },
+ { a: -1.0, b: 1.0, t: 0.99, result: 0.98 },
+ { a: -1.0, b: 1.0, t: 0.999, result: 0.998 },
+ { a: -1.0, b: 1.0, t: 1.0, result: 1.0 },
+ { a: -1.0, b: 1.0, t: 2.0, result: 3.0 },
+
+ // [1.0, -1.0] cases
+ { a: 1.0, b: -1.0, t: -2.0, result: 5.0 },
+ { a: 1.0, b: -1.0, t: 0.0, result: 1.0 },
+ { a: 1.0, b: -1.0, t: 0.1, result: 0.8 },
+ { a: 1.0, b: -1.0, t: 0.01, result: 0.98 },
+ { a: 1.0, b: -1.0, t: 0.001, result: 0.998 },
+ { a: 1.0, b: -1.0, t: 0.25, result: 0.5 },
+ { a: 1.0, b: -1.0, t: 0.5, result: 0.0 },
+ { a: 1.0, b: -1.0, t: 0.9, result: -0.8 },
+ { a: 1.0, b: -1.0, t: 0.99, result: -0.98 },
+ { a: 1.0, b: -1.0, t: 0.999, result: -0.998 },
+ { a: 1.0, b: -1.0, t: 1.0, result: -1.0 },
+ { a: 1.0, b: -1.0, t: 2.0, result: -3.0 },
+
+ // [-1.0, 0.0] cases
+ { a: -1.0, b: 0.0, t: -1.0, result: -2.0 },
+ { a: -1.0, b: 0.0, t: 0.0, result: -1.0 },
+ { a: -1.0, b: 0.0, t: 0.1, result: -0.9 },
+ { a: -1.0, b: 0.0, t: 0.01, result: -0.99 },
+ { a: -1.0, b: 0.0, t: 0.001, result: -0.999 },
+ { a: -1.0, b: 0.0, t: 0.25, result: -0.75 },
+ { a: -1.0, b: 0.0, t: 0.5, result: -0.5 },
+ { a: -1.0, b: 0.0, t: 0.9, result: -0.1 },
+ { a: -1.0, b: 0.0, t: 0.99, result: -0.01 },
+ { a: -1.0, b: 0.0, t: 0.999, result: -0.001 },
+ { a: -1.0, b: 0.0, t: 1.0, result: 0.0 },
+ { a: -1.0, b: 0.0, t: 2.0, result: 1.0 },
+
+ // [0.0, -1.0] cases
+ { a: 0.0, b: -1.0, t: -1.0, result: 1.0 },
+ { a: 0.0, b: -1.0, t: 0.0, result: 0.0 },
+ { a: 0.0, b: -1.0, t: 0.1, result: -0.1 },
+ { a: 0.0, b: -1.0, t: 0.01, result: -0.01 },
+ { a: 0.0, b: -1.0, t: 0.001, result: -0.001 },
+ { a: 0.0, b: -1.0, t: 0.25, result: -0.25 },
+ { a: 0.0, b: -1.0, t: 0.5, result: -0.5 },
+ { a: 0.0, b: -1.0, t: 0.9, result: -0.9 },
+ { a: 0.0, b: -1.0, t: 0.99, result: -0.99 },
+ { a: 0.0, b: -1.0, t: 0.999, result: -0.999 },
+ { a: 0.0, b: -1.0, t: 1.0, result: -1.0 },
+ { a: 0.0, b: -1.0, t: 2.0, result: -2.0 },
+ ])
+ .fn(test => {
+ const a = test.params.a;
+ const b = test.params.b;
+ const t = test.params.t;
+ const got = lerp(a, b, t);
+ const expect = test.params.result;
+
+ test.expect(
+ (Number.isNaN(got) && Number.isNaN(expect)) || withinOneULPF32(got, expect, 'flush'),
+ `lerp(${a}, ${b}, ${t}) returned ${got}. Expected ${expect}`
+ );
+ });
+
+interface lerpBigIntCase {
+ a: bigint;
+ b: bigint;
+ idx: number;
+ steps: number;
+ result: bigint;
+}
+
+g.test('lerpBigInt')
+ .paramsSimple<lerpBigIntCase>([
+ // [0n, 1000n] cases
+ { a: 0n, b: 1000n, idx: 0, steps: 1, result: 0n },
+ { a: 0n, b: 1000n, idx: 0, steps: 2, result: 0n },
+ { a: 0n, b: 1000n, idx: 1, steps: 2, result: 1000n },
+ { a: 0n, b: 1000n, idx: 0, steps: 1000, result: 0n },
+ { a: 0n, b: 1000n, idx: 500, steps: 1000, result: 500n },
+ { a: 0n, b: 1000n, idx: 999, steps: 1000, result: 1000n },
+
+ // [1000n, 0n] cases
+ { a: 1000n, b: 0n, idx: 0, steps: 1, result: 1000n },
+ { a: 1000n, b: 0n, idx: 0, steps: 2, result: 1000n },
+ { a: 1000n, b: 0n, idx: 1, steps: 2, result: 0n },
+ { a: 1000n, b: 0n, idx: 0, steps: 1000, result: 1000n },
+ { a: 1000n, b: 0n, idx: 500, steps: 1000, result: 500n },
+ { a: 1000n, b: 0n, idx: 999, steps: 1000, result: 0n },
+
+ // [0n, -1000n] cases
+ { a: 0n, b: -1000n, idx: 0, steps: 1, result: 0n },
+ { a: 0n, b: -1000n, idx: 0, steps: 2, result: 0n },
+ { a: 0n, b: -1000n, idx: 1, steps: 2, result: -1000n },
+ { a: 0n, b: -1000n, idx: 0, steps: 1000, result: 0n },
+ { a: 0n, b: -1000n, idx: 500, steps: 1000, result: -500n },
+ { a: 0n, b: -1000n, idx: 999, steps: 1000, result: -1000n },
+
+ // [-1000n, 0n] cases
+ { a: -1000n, b: 0n, idx: 0, steps: 1, result: -1000n },
+ { a: -1000n, b: 0n, idx: 0, steps: 2, result: -1000n },
+ { a: -1000n, b: 0n, idx: 1, steps: 2, result: 0n },
+ { a: -1000n, b: 0n, idx: 0, steps: 1000, result: -1000n },
+ { a: -1000n, b: 0n, idx: 500, steps: 1000, result: -500n },
+ { a: -1000n, b: 0n, idx: 999, steps: 1000, result: 0n },
+
+ // [100n, 1000n] cases
+ { a: 100n, b: 1000n, idx: 0, steps: 1, result: 100n },
+ { a: 100n, b: 1000n, idx: 0, steps: 2, result: 100n },
+ { a: 100n, b: 1000n, idx: 1, steps: 2, result: 1000n },
+ { a: 100n, b: 1000n, idx: 0, steps: 9, result: 100n },
+ { a: 100n, b: 1000n, idx: 4, steps: 9, result: 550n },
+ { a: 100n, b: 1000n, idx: 8, steps: 9, result: 1000n },
+
+ // [1000n, 100n] cases
+ { a: 1000n, b: 100n, idx: 0, steps: 1, result: 1000n },
+ { a: 1000n, b: 100n, idx: 0, steps: 2, result: 1000n },
+ { a: 1000n, b: 100n, idx: 1, steps: 2, result: 100n },
+ { a: 1000n, b: 100n, idx: 0, steps: 9, result: 1000n },
+ { a: 1000n, b: 100n, idx: 4, steps: 9, result: 550n },
+ { a: 1000n, b: 100n, idx: 8, steps: 9, result: 100n },
+
+ // [01000n, 1000n] cases
+ { a: -1000n, b: 1000n, idx: 0, steps: 1, result: -1000n },
+ { a: -1000n, b: 1000n, idx: 0, steps: 2, result: -1000n },
+ { a: -1000n, b: 1000n, idx: 1, steps: 2, result: 1000n },
+ { a: -1000n, b: 1000n, idx: 0, steps: 9, result: -1000n },
+ { a: -1000n, b: 1000n, idx: 4, steps: 9, result: 0n },
+ { a: -1000n, b: 1000n, idx: 8, steps: 9, result: 1000n },
+ ])
+ .fn(test => {
+ const a = test.params.a;
+ const b = test.params.b;
+ const idx = test.params.idx;
+ const steps = test.params.steps;
+ const got = lerpBigInt(a, b, idx, steps);
+ const expect = test.params.result;
+
+ test.expect(
+ got === expect,
+ `lerpBigInt(${a}, ${b}, ${idx}, ${steps}) returned ${got}. Expected ${expect}`
+ );
+ });
+
+interface rangeCase {
+ a: number;
+ b: number;
+ num_steps: number;
+ result: number[];
+}
+
+g.test('linearRange')
+ .paramsSimple<rangeCase>(
+ // prettier-ignore
+ [
+ { a: 0.0, b: Number.POSITIVE_INFINITY, num_steps: 10, result: new Array<number>(10).fill(Number.NaN) },
+ { a: Number.POSITIVE_INFINITY, b: 0.0, num_steps: 10, result: new Array<number>(10).fill(Number.NaN) },
+ { a: Number.NEGATIVE_INFINITY, b: 1.0, num_steps: 10, result: new Array<number>(10).fill(Number.NaN) },
+ { a: 1.0, b: Number.NEGATIVE_INFINITY, num_steps: 10, result: new Array<number>(10).fill(Number.NaN) },
+ { a: Number.NEGATIVE_INFINITY, b: Number.POSITIVE_INFINITY, num_steps: 10, result: new Array<number>(10).fill(Number.NaN) },
+ { a: Number.POSITIVE_INFINITY, b: Number.NEGATIVE_INFINITY, num_steps: 10, result: new Array<number>(10).fill(Number.NaN) },
+ { a: 0.0, b: 0.0, num_steps: 10, result: new Array<number>(10).fill(0.0) },
+ { a: 10.0, b: 10.0, num_steps: 10, result: new Array<number>(10).fill(10.0) },
+ { a: 0.0, b: 10.0, num_steps: 1, result: [0.0] },
+ { a: 10.0, b: 0.0, num_steps: 1, result: [10] },
+ { a: 0.0, b: 10.0, num_steps: 11, result: [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0] },
+ { a: 10.0, b: 0.0, num_steps: 11, result: [10.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0, 3.0, 2.0, 1.0, 0.0] },
+ { a: 0.0, b: 1000.0, num_steps: 11, result: [0.0, 100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0, 1000.0] },
+ { a: 1000.0, b: 0.0, num_steps: 11, result: [1000.0, 900.0, 800.0, 700.0, 600.0, 500.0, 400.0, 300.0, 200.0, 100.0, 0.0] },
+ { a: 1.0, b: 5.0, num_steps: 5, result: [1.0, 2.0, 3.0, 4.0, 5.0] },
+ { a: 5.0, b: 1.0, num_steps: 5, result: [5.0, 4.0, 3.0, 2.0, 1.0] },
+ { a: 0.0, b: 1.0, num_steps: 11, result: [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0] },
+ { a: 1.0, b: 0.0, num_steps: 11, result: [1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0] },
+ { a: 0.0, b: 1.0, num_steps: 5, result: [0.0, 0.25, 0.5, 0.75, 1.0] },
+ { a: 1.0, b: 0.0, num_steps: 5, result: [1.0, 0.75, 0.5, 0.25, 0.0] },
+ { a: -1.0, b: 1.0, num_steps: 11, result: [-1.0, -0.8, -0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0] },
+ { a: 1.0, b: -1.0, num_steps: 11, result: [1.0, 0.8, 0.6, 0.4, 0.2, 0.0, -0.2, -0.4, -0.6, -0.8, -1.0] },
+ { a: -1.0, b: 0, num_steps: 11, result: [-1.0, -0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0.0] },
+ { a: 0.0, b: -1.0, num_steps: 11, result: [0.0, -0.1, -0.2, -0.3, -0.4, -0.5, -0.6, -0.7, -0.8, -0.9, -1.0] },
+ ]
+ )
+ .fn(test => {
+ const a = test.params.a;
+ const b = test.params.b;
+ const num_steps = test.params.num_steps;
+ const got = linearRange(a, b, num_steps);
+ const expect = test.params.result;
+
+ test.expect(
+ compareArrayOfNumbersF32(got, expect, 'no-flush'),
+ `linearRange(${a}, ${b}, ${num_steps}) returned ${got}. Expected ${expect}`
+ );
+ });
+
+g.test('biasedRange')
+ .paramsSimple<rangeCase>(
+ // prettier-ignore
+ [
+ { a: 0.0, b: Number.POSITIVE_INFINITY, num_steps: 10, result: new Array<number>(10).fill(Number.NaN) },
+ { a: Number.POSITIVE_INFINITY, b: 0.0, num_steps: 10, result: new Array<number>(10).fill(Number.NaN) },
+ { a: Number.NEGATIVE_INFINITY, b: 1.0, num_steps: 10, result: new Array<number>(10).fill(Number.NaN) },
+ { a: 1.0, b: Number.NEGATIVE_INFINITY, num_steps: 10, result: new Array<number>(10).fill(Number.NaN) },
+ { a: Number.NEGATIVE_INFINITY, b: Number.POSITIVE_INFINITY, num_steps: 10, result: new Array<number>(10).fill(Number.NaN) },
+ { a: Number.POSITIVE_INFINITY, b: Number.NEGATIVE_INFINITY, num_steps: 10, result: new Array<number>(10).fill(Number.NaN) },
+ { a: 0.0, b: 0.0, num_steps: 10, result: new Array<number>(10).fill(0.0) },
+ { a: 10.0, b: 10.0, num_steps: 10, result: new Array<number>(10).fill(10.0) },
+ { a: 0.0, b: 10.0, num_steps: 1, result: [0.0] },
+ { a: 10.0, b: 0.0, num_steps: 1, result: [10.0] },
+ { a: 0.0, b: 10.0, num_steps: 11, result: [0.0, 0.1, 0.4, 0.9, 1.6, 2.5, 3.6, 4.9, 6.4, 8.1, 10.0] },
+ { a: 10.0, b: 0.0, num_steps: 11, result: [10.0, 9.9, 9.6, 9.1, 8.4, 7.5, 6.4, 5.1, 3.6, 1.9, 0.0] },
+ { a: 0.0, b: 1000.0, num_steps: 11, result: [0.0, 10.0, 40.0, 90.0, 160.0, 250.0, 360.0, 490.0, 640.0, 810.0, 1000.0] },
+ { a: 1000.0, b: 0.0, num_steps: 11, result: [1000.0, 990.0, 960.0, 910.0, 840.0, 750.0, 640.0, 510.0, 360.0, 190.0, 0.0] },
+ { a: 1.0, b: 5.0, num_steps: 5, result: [1.0, 1.25, 2.0, 3.25, 5.0] },
+ { a: 5.0, b: 1.0, num_steps: 5, result: [5.0, 4.75, 4.0, 2.75, 1.0] },
+ { a: 0.0, b: 1.0, num_steps: 11, result: [0.0, 0.01, 0.04, 0.09, 0.16, 0.25, 0.36, 0.49, 0.64, 0.81, 1.0] },
+ { a: 1.0, b: 0.0, num_steps: 11, result: [1.0, 0.99, 0.96, 0.91, 0.84, 0.75, 0.64, 0.51, 0.36, 0.19, 0.0] },
+ { a: 0.0, b: 1.0, num_steps: 5, result: [0.0, 0.0625, 0.25, 0.5625, 1.0] },
+ { a: 1.0, b: 0.0, num_steps: 5, result: [1.0, 0.9375, 0.75, 0.4375, 0.0] },
+ { a: -1.0, b: 1.0, num_steps: 11, result: [-1.0, -0.98, -0.92, -0.82, -0.68, -0.5, -0.28 ,-0.02, 0.28, 0.62, 1.0] },
+ { a: 1.0, b: -1.0, num_steps: 11, result: [1.0, 0.98, 0.92, 0.82, 0.68, 0.5, 0.28 ,0.02, -0.28, -0.62, -1.0] },
+ { a: -1.0, b: 0, num_steps: 11, result: [-1.0 , -0.99, -0.96, -0.91, -0.84, -0.75, -0.64, -0.51, -0.36, -0.19, 0.0] },
+ { a: 0.0, b: -1.0, num_steps: 11, result: [0.0, -0.01, -0.04, -0.09, -0.16, -0.25, -0.36, -0.49, -0.64, -0.81, -1.0] },
+ ]
+ )
+ .fn(test => {
+ const a = test.params.a;
+ const b = test.params.b;
+ const num_steps = test.params.num_steps;
+ const got = biasedRange(a, b, num_steps);
+ const expect = test.params.result;
+
+ test.expect(
+ compareArrayOfNumbersF32(got, expect, 'no-flush'),
+ `biasedRange(${a}, ${b}, ${num_steps}) returned ${got}. Expected ${expect}`
+ );
+ });
+
+interface rangeBigIntCase {
+ a: bigint;
+ b: bigint;
+ num_steps: number;
+ result: bigint[];
+}
+
+g.test('linearRangeBigInt')
+ .paramsSimple<rangeBigIntCase>(
+ // prettier-ignore
+ [
+ { a: 0n, b: 0n, num_steps: 10, result: new Array<bigint>(10).fill(0n) },
+ { a: 10n, b: 10n, num_steps: 10, result: new Array<bigint>(10).fill(10n) },
+ { a: 0n, b: 10n, num_steps: 1, result: [0n] },
+ { a: 10n, b: 0n, num_steps: 1, result: [10n] },
+ { a: 0n, b: 10n, num_steps: 11, result: [0n, 1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n, 10n] },
+ { a: 10n, b: 0n, num_steps: 11, result: [10n, 9n, 8n, 7n, 6n, 5n, 4n, 3n, 2n, 1n, 0n] },
+ { a: 0n, b: 1000n, num_steps: 11, result: [0n, 100n, 200n, 300n, 400n, 500n, 600n, 700n, 800n, 900n, 1000n] },
+ { a: 1000n, b: 0n, num_steps: 11, result: [1000n, 900n, 800n, 700n, 600n, 500n, 400n, 300n, 200n, 100n, 0n] },
+ { a: 1n, b: 5n, num_steps: 5, result: [1n, 2n, 3n, 4n, 5n] },
+ { a: 5n, b: 1n, num_steps: 5, result: [5n, 4n, 3n, 2n, 1n] },
+ { a: 0n, b: 10n, num_steps: 5, result: [0n, 2n, 5n, 7n, 10n] },
+ { a: 10n, b: 0n, num_steps: 5, result: [10n, 8n, 5n, 3n, 0n] },
+ { a: -10n, b: 10n, num_steps: 11, result: [-10n, -8n, -6n, -4n, -2n, 0n, 2n, 4n, 6n, 8n, 10n] },
+ { a: 10n, b: -10n, num_steps: 11, result: [10n, 8n, 6n, 4n, 2n, 0n, -2n, -4n, -6n, -8n, -10n] },
+ { a: -10n, b: 0n, num_steps: 11, result: [-10n, -9n, -8n, -7n, -6n, -5n, -4n, -3n, -2n, -1n, 0n] },
+ { a: 0n, b: -10n, num_steps: 11, result: [0n, -1n, -2n, -3n, -4n, -5n, -6n, -7n, -8n, -9n, -10n] },
+ ]
+ )
+ .fn(test => {
+ const a = test.params.a;
+ const b = test.params.b;
+ const num_steps = test.params.num_steps;
+ const got = linearRangeBigInt(a, b, num_steps);
+ const expect = test.params.result;
+
+ test.expect(
+ objectEquals(got, expect),
+ `linearRangeBigInt(${a}, ${b}, ${num_steps}) returned ${got}. Expected ${expect}`
+ );
+ });
+
+interface fullF32RangeCase {
+ neg_norm: number;
+ neg_sub: number;
+ pos_sub: number;
+ pos_norm: number;
+ expect: Array<number>;
+}
+
+g.test('fullF32Range')
+ .paramsSimple<fullF32RangeCase>(
+ // prettier-ignore
+ [
+ { neg_norm: 0, neg_sub: 0, pos_sub: 0, pos_norm: 0, expect: [ -0.0, 0.0 ] },
+ { neg_norm: 1, neg_sub: 0, pos_sub: 0, pos_norm: 0, expect: [ kValue.f32.negative.min, -0.0, 0.0] },
+ { neg_norm: 2, neg_sub: 0, pos_sub: 0, pos_norm: 0, expect: [ kValue.f32.negative.min, kValue.f32.negative.max, -0.0, 0.0 ] },
+ { neg_norm: 3, neg_sub: 0, pos_sub: 0, pos_norm: 0, expect: [ kValue.f32.negative.min, -1.9999998807907104, kValue.f32.negative.max, -0.0, 0.0 ] },
+ { neg_norm: 0, neg_sub: 1, pos_sub: 0, pos_norm: 0, expect: [ kValue.f32.negative.subnormal.min, -0.0, 0.0 ] },
+ { neg_norm: 0, neg_sub: 2, pos_sub: 0, pos_norm: 0, expect: [ kValue.f32.negative.subnormal.min, kValue.f32.negative.subnormal.max, -0.0, 0.0 ] },
+ { neg_norm: 0, neg_sub: 0, pos_sub: 1, pos_norm: 0, expect: [ -0.0, 0.0, kValue.f32.positive.subnormal.min ] },
+ { neg_norm: 0, neg_sub: 0, pos_sub: 2, pos_norm: 0, expect: [ -0.0, 0.0, kValue.f32.positive.subnormal.min, kValue.f32.positive.subnormal.max ] },
+ { neg_norm: 0, neg_sub: 0, pos_sub: 0, pos_norm: 1, expect: [ -0.0, 0.0, kValue.f32.positive.min ] },
+ { neg_norm: 0, neg_sub: 0, pos_sub: 0, pos_norm: 2, expect: [ -0.0, 0.0, kValue.f32.positive.min, kValue.f32.positive.max ] },
+ { neg_norm: 0, neg_sub: 0, pos_sub: 0, pos_norm: 3, expect: [ -0.0, 0.0, kValue.f32.positive.min, 1.9999998807907104, kValue.f32.positive.max ] },
+ { neg_norm: 1, neg_sub: 1, pos_sub: 1, pos_norm: 1, expect: [ kValue.f32.negative.min, kValue.f32.negative.subnormal.min, -0.0, 0.0, kValue.f32.positive.subnormal.min, kValue.f32.positive.min ] },
+ { neg_norm: 2, neg_sub: 2, pos_sub: 2, pos_norm: 2, expect: [ kValue.f32.negative.min, kValue.f32.negative.max, kValue.f32.negative.subnormal.min, kValue.f32.negative.subnormal.max, -0.0, 0.0, kValue.f32.positive.subnormal.min, kValue.f32.positive.subnormal.max, kValue.f32.positive.min, kValue.f32.positive.max ] },
+ ]
+ )
+ .fn(test => {
+ const neg_norm = test.params.neg_norm;
+ const neg_sub = test.params.neg_sub;
+ const pos_sub = test.params.pos_sub;
+ const pos_norm = test.params.pos_norm;
+ const got = fullF32Range({ neg_norm, neg_sub, pos_sub, pos_norm });
+ const expect = test.params.expect;
+
+ test.expect(
+ compareArrayOfNumbersF32(got, expect, 'no-flush'),
+ `fullF32Range(${neg_norm}, ${neg_sub}, ${pos_sub}, ${pos_norm}) returned [${got}]. Expected [${expect}]`
+ );
+ });
+
+interface fullF16RangeCase {
+ neg_norm: number;
+ neg_sub: number;
+ pos_sub: number;
+ pos_norm: number;
+ expect: Array<number>;
+}
+
+g.test('fullF16Range')
+ .paramsSimple<fullF16RangeCase>(
+ // prettier-ignore
+ [
+ { neg_norm: 0, neg_sub: 0, pos_sub: 0, pos_norm: 0, expect: [ -0.0, 0.0 ] },
+ { neg_norm: 1, neg_sub: 0, pos_sub: 0, pos_norm: 0, expect: [ kValue.f16.negative.min, -0.0, 0.0] },
+ { neg_norm: 2, neg_sub: 0, pos_sub: 0, pos_norm: 0, expect: [ kValue.f16.negative.min, kValue.f16.negative.max, -0.0, 0.0 ] },
+ { neg_norm: 3, neg_sub: 0, pos_sub: 0, pos_norm: 0, expect: [ kValue.f16.negative.min, -1.9990234375, kValue.f16.negative.max, -0.0, 0.0 ] },
+ { neg_norm: 0, neg_sub: 1, pos_sub: 0, pos_norm: 0, expect: [ kValue.f16.negative.subnormal.min, -0.0, 0.0 ] },
+ { neg_norm: 0, neg_sub: 2, pos_sub: 0, pos_norm: 0, expect: [ kValue.f16.negative.subnormal.min, kValue.f16.negative.subnormal.max, -0.0, 0.0 ] },
+ { neg_norm: 0, neg_sub: 0, pos_sub: 1, pos_norm: 0, expect: [ -0.0, 0.0, kValue.f16.positive.subnormal.min ] },
+ { neg_norm: 0, neg_sub: 0, pos_sub: 2, pos_norm: 0, expect: [ -0.0, 0.0, kValue.f16.positive.subnormal.min, kValue.f16.positive.subnormal.max ] },
+ { neg_norm: 0, neg_sub: 0, pos_sub: 0, pos_norm: 1, expect: [ -0.0, 0.0, kValue.f16.positive.min ] },
+ { neg_norm: 0, neg_sub: 0, pos_sub: 0, pos_norm: 2, expect: [ -0.0, 0.0, kValue.f16.positive.min, kValue.f16.positive.max ] },
+ { neg_norm: 0, neg_sub: 0, pos_sub: 0, pos_norm: 3, expect: [ -0.0, 0.0, kValue.f16.positive.min, 1.9990234375, kValue.f16.positive.max ] },
+ { neg_norm: 1, neg_sub: 1, pos_sub: 1, pos_norm: 1, expect: [ kValue.f16.negative.min, kValue.f16.negative.subnormal.min, -0.0, 0.0, kValue.f16.positive.subnormal.min, kValue.f16.positive.min ] },
+ { neg_norm: 2, neg_sub: 2, pos_sub: 2, pos_norm: 2, expect: [ kValue.f16.negative.min, kValue.f16.negative.max, kValue.f16.negative.subnormal.min, kValue.f16.negative.subnormal.max, -0.0, 0.0, kValue.f16.positive.subnormal.min, kValue.f16.positive.subnormal.max, kValue.f16.positive.min, kValue.f16.positive.max ] },
+ ]
+ )
+ .fn(test => {
+ const neg_norm = test.params.neg_norm;
+ const neg_sub = test.params.neg_sub;
+ const pos_sub = test.params.pos_sub;
+ const pos_norm = test.params.pos_norm;
+ const got = fullF16Range({ neg_norm, neg_sub, pos_sub, pos_norm });
+ const expect = test.params.expect;
+
+ test.expect(
+ compareArrayOfNumbersF32(got, expect),
+ `fullF16Range(${neg_norm}, ${neg_sub}, ${pos_sub}, ${pos_norm}) returned [${got}]. Expected [${expect}]`
+ );
+ });
+
+interface fullI32RangeCase {
+ neg_count: number;
+ pos_count: number;
+ expect: Array<number>;
+}
+
+g.test('fullI32Range')
+ .paramsSimple<fullI32RangeCase>(
+ // prettier-ignore
+ [
+ { neg_count: 0, pos_count: 0, expect: [0] },
+ { neg_count: 1, pos_count: 0, expect: [kValue.i32.negative.min, 0] },
+ { neg_count: 2, pos_count: 0, expect: [kValue.i32.negative.min, -1, 0] },
+ { neg_count: 3, pos_count: 0, expect: [kValue.i32.negative.min, -1610612736, -1, 0] },
+ { neg_count: 0, pos_count: 1, expect: [0, 1] },
+ { neg_count: 0, pos_count: 2, expect: [0, 1, kValue.i32.positive.max] },
+ { neg_count: 0, pos_count: 3, expect: [0, 1, 536870912, kValue.i32.positive.max] },
+ { neg_count: 1, pos_count: 1, expect: [kValue.i32.negative.min, 0, 1] },
+ { neg_count: 2, pos_count: 2, expect: [kValue.i32.negative.min, -1, 0, 1, kValue.i32.positive.max ] },
+ ]
+ )
+ .fn(test => {
+ const neg_count = test.params.neg_count;
+ const pos_count = test.params.pos_count;
+ const got = fullI32Range({ negative: neg_count, positive: pos_count });
+ const expect = test.params.expect;
+
+ test.expect(
+ compareArrayOfNumbersF32(got, expect),
+ `fullI32Range(${neg_count}, ${pos_count}) returned [${got}]. Expected [${expect}]`
+ );
+ });
+
+interface limitsBigIntBitsF64Case {
+ bits: bigint;
+ value: number;
+}
+
+// Test to confirm kBit and kValue constants are equivalent for f64
+g.test('f64LimitsEquivalency')
+ .paramsSimple<limitsBigIntBitsF64Case>([
+ { bits: kBit.f64.positive.max, value: kValue.f64.positive.max },
+ { bits: kBit.f64.positive.min, value: kValue.f64.positive.min },
+ { bits: kBit.f64.positive.nearest_max, value: kValue.f64.positive.nearest_max },
+ { bits: kBit.f64.positive.less_than_one, value: kValue.f64.positive.less_than_one },
+ { bits: kBit.f64.positive.pi.whole, value: kValue.f64.positive.pi.whole },
+ { bits: kBit.f64.positive.pi.three_quarters, value: kValue.f64.positive.pi.three_quarters },
+ { bits: kBit.f64.positive.pi.half, value: kValue.f64.positive.pi.half },
+ { bits: kBit.f64.positive.pi.third, value: kValue.f64.positive.pi.third },
+ { bits: kBit.f64.positive.pi.quarter, value: kValue.f64.positive.pi.quarter },
+ { bits: kBit.f64.positive.pi.sixth, value: kValue.f64.positive.pi.sixth },
+ { bits: kBit.f64.positive.e, value: kValue.f64.positive.e },
+ { bits: kBit.f64.max_ulp, value: kValue.f64.max_ulp },
+ { bits: kBit.f64.negative.max, value: kValue.f64.negative.max },
+ { bits: kBit.f64.negative.min, value: kValue.f64.negative.min },
+ { bits: kBit.f64.negative.nearest_min, value: kValue.f64.negative.nearest_min },
+ { bits: kBit.f64.negative.pi.whole, value: kValue.f64.negative.pi.whole },
+ { bits: kBit.f64.negative.pi.three_quarters, value: kValue.f64.negative.pi.three_quarters },
+ { bits: kBit.f64.negative.pi.half, value: kValue.f64.negative.pi.half },
+ { bits: kBit.f64.negative.pi.third, value: kValue.f64.negative.pi.third },
+ { bits: kBit.f64.negative.pi.quarter, value: kValue.f64.negative.pi.quarter },
+ { bits: kBit.f64.negative.pi.sixth, value: kValue.f64.negative.pi.sixth },
+ { bits: kBit.f64.positive.subnormal.max, value: kValue.f64.positive.subnormal.max },
+ { bits: kBit.f64.positive.subnormal.min, value: kValue.f64.positive.subnormal.min },
+ { bits: kBit.f64.negative.subnormal.max, value: kValue.f64.negative.subnormal.max },
+ { bits: kBit.f64.negative.subnormal.min, value: kValue.f64.negative.subnormal.min },
+ { bits: kBit.f64.positive.infinity, value: kValue.f64.positive.infinity },
+ { bits: kBit.f64.negative.infinity, value: kValue.f64.negative.infinity },
+ ])
+ .fn(test => {
+ const bits = test.params.bits;
+ const value = test.params.value;
+
+ const val_to_bits = bits === float64ToUint64(value);
+ const bits_to_val = value === uint64ToFloat64(bits);
+ test.expect(
+ val_to_bits && bits_to_val,
+ `bits = ${bits}, value = ${value}, returned val_to_bits as ${val_to_bits}, and bits_to_val as ${bits_to_val}, they are expected to be equivalent`
+ );
+ });
+
+interface limitsNumberBitsCase {
+ bits: number;
+ value: number;
+}
+
+// Test to confirm kBit and kValue constants are equivalent for f32
+g.test('f32LimitsEquivalency')
+ .paramsSimple<limitsNumberBitsCase>([
+ { bits: kBit.f32.positive.max, value: kValue.f32.positive.max },
+ { bits: kBit.f32.positive.min, value: kValue.f32.positive.min },
+ { bits: kBit.f32.positive.nearest_max, value: kValue.f32.positive.nearest_max },
+ { bits: kBit.f32.positive.less_than_one, value: kValue.f32.positive.less_than_one },
+ { bits: kBit.f32.positive.pi.whole, value: kValue.f32.positive.pi.whole },
+ { bits: kBit.f32.positive.pi.three_quarters, value: kValue.f32.positive.pi.three_quarters },
+ { bits: kBit.f32.positive.pi.half, value: kValue.f32.positive.pi.half },
+ { bits: kBit.f32.positive.pi.third, value: kValue.f32.positive.pi.third },
+ { bits: kBit.f32.positive.pi.quarter, value: kValue.f32.positive.pi.quarter },
+ { bits: kBit.f32.positive.pi.sixth, value: kValue.f32.positive.pi.sixth },
+ { bits: kBit.f32.positive.e, value: kValue.f32.positive.e },
+ { bits: kBit.f32.max_ulp, value: kValue.f32.max_ulp },
+ { bits: kBit.f32.negative.max, value: kValue.f32.negative.max },
+ { bits: kBit.f32.negative.min, value: kValue.f32.negative.min },
+ { bits: kBit.f32.negative.nearest_min, value: kValue.f32.negative.nearest_min },
+ { bits: kBit.f32.negative.pi.whole, value: kValue.f32.negative.pi.whole },
+ { bits: kBit.f32.negative.pi.three_quarters, value: kValue.f32.negative.pi.three_quarters },
+ { bits: kBit.f32.negative.pi.half, value: kValue.f32.negative.pi.half },
+ { bits: kBit.f32.negative.pi.third, value: kValue.f32.negative.pi.third },
+ { bits: kBit.f32.negative.pi.quarter, value: kValue.f32.negative.pi.quarter },
+ { bits: kBit.f32.negative.pi.sixth, value: kValue.f32.negative.pi.sixth },
+ { bits: kBit.f32.positive.subnormal.max, value: kValue.f32.positive.subnormal.max },
+ { bits: kBit.f32.positive.subnormal.min, value: kValue.f32.positive.subnormal.min },
+ { bits: kBit.f32.negative.subnormal.max, value: kValue.f32.negative.subnormal.max },
+ { bits: kBit.f32.negative.subnormal.min, value: kValue.f32.negative.subnormal.min },
+ { bits: kBit.f32.positive.infinity, value: kValue.f32.positive.infinity },
+ { bits: kBit.f32.negative.infinity, value: kValue.f32.negative.infinity },
+ ])
+ .fn(test => {
+ const bits = test.params.bits;
+ const value = test.params.value;
+
+ const val_to_bits = bits === float32ToUint32(value);
+ const bits_to_val = value === uint32ToFloat32(bits);
+ test.expect(
+ val_to_bits && bits_to_val,
+ `bits = ${bits}, value = ${value}, returned val_to_bits as ${val_to_bits}, and bits_to_val as ${bits_to_val}, they are expected to be equivalent`
+ );
+ });
+
+// Test to confirm kBit and kValue constants are equivalent for f16
+g.test('f16LimitsEquivalency')
+ .paramsSimple<limitsNumberBitsCase>([
+ { bits: kBit.f16.positive.max, value: kValue.f16.positive.max },
+ { bits: kBit.f16.positive.min, value: kValue.f16.positive.min },
+ { bits: kBit.f16.positive.nearest_max, value: kValue.f16.positive.nearest_max },
+ { bits: kBit.f16.positive.less_than_one, value: kValue.f16.positive.less_than_one },
+ { bits: kBit.f16.positive.pi.whole, value: kValue.f16.positive.pi.whole },
+ { bits: kBit.f16.positive.pi.three_quarters, value: kValue.f16.positive.pi.three_quarters },
+ { bits: kBit.f16.positive.pi.half, value: kValue.f16.positive.pi.half },
+ { bits: kBit.f16.positive.pi.third, value: kValue.f16.positive.pi.third },
+ { bits: kBit.f16.positive.pi.quarter, value: kValue.f16.positive.pi.quarter },
+ { bits: kBit.f16.positive.pi.sixth, value: kValue.f16.positive.pi.sixth },
+ { bits: kBit.f16.positive.e, value: kValue.f16.positive.e },
+ { bits: kBit.f16.max_ulp, value: kValue.f16.max_ulp },
+ { bits: kBit.f16.negative.max, value: kValue.f16.negative.max },
+ { bits: kBit.f16.negative.min, value: kValue.f16.negative.min },
+ { bits: kBit.f16.negative.nearest_min, value: kValue.f16.negative.nearest_min },
+ { bits: kBit.f16.negative.pi.whole, value: kValue.f16.negative.pi.whole },
+ { bits: kBit.f16.negative.pi.three_quarters, value: kValue.f16.negative.pi.three_quarters },
+ { bits: kBit.f16.negative.pi.half, value: kValue.f16.negative.pi.half },
+ { bits: kBit.f16.negative.pi.third, value: kValue.f16.negative.pi.third },
+ { bits: kBit.f16.negative.pi.quarter, value: kValue.f16.negative.pi.quarter },
+ { bits: kBit.f16.negative.pi.sixth, value: kValue.f16.negative.pi.sixth },
+ { bits: kBit.f16.positive.subnormal.max, value: kValue.f16.positive.subnormal.max },
+ { bits: kBit.f16.positive.subnormal.min, value: kValue.f16.positive.subnormal.min },
+ { bits: kBit.f16.negative.subnormal.max, value: kValue.f16.negative.subnormal.max },
+ { bits: kBit.f16.negative.subnormal.min, value: kValue.f16.negative.subnormal.min },
+ { bits: kBit.f16.positive.infinity, value: kValue.f16.positive.infinity },
+ { bits: kBit.f16.negative.infinity, value: kValue.f16.negative.infinity },
+ ])
+ .fn(test => {
+ const bits = test.params.bits;
+ const value = test.params.value;
+
+ const val_to_bits = bits === float16ToUint16(value);
+ const bits_to_val = value === uint16ToFloat16(bits);
+ test.expect(
+ val_to_bits && bits_to_val,
+ `bits = ${bits}, value = ${value}, returned val_to_bits as ${val_to_bits}, and bits_to_val as ${bits_to_val}, they are expected to be equivalent`
+ );
+ });
+
+interface cartesianProductCase<T> {
+ inputs: T[][];
+ result: T[][];
+}
+
+g.test('cartesianProductNumber')
+ .paramsSimple<cartesianProductCase<number>>(
+ // prettier-ignore
+ [
+ { inputs: [[0], [1]], result: [[0, 1]] },
+ { inputs: [[0, 1], [2]], result: [[0, 2],
+ [1, 2]] },
+ { inputs: [[0], [1, 2]], result: [[0, 1],
+ [0, 2]] },
+ { inputs: [[0, 1], [2, 3]], result: [[0,2],
+ [1, 2],
+ [0, 3],
+ [1, 3]] },
+ { inputs: [[0, 1, 2], [3, 4, 5]], result: [[0, 3],
+ [1, 3],
+ [2, 3],
+ [0, 4],
+ [1, 4],
+ [2, 4],
+ [0, 5],
+ [1, 5],
+ [2, 5]] },
+ { inputs: [[0, 1], [2, 3], [4, 5]], result: [[0, 2, 4],
+ [1, 2, 4],
+ [0, 3, 4],
+ [1, 3, 4],
+ [0, 2, 5],
+ [1, 2, 5],
+ [0, 3, 5],
+ [1, 3, 5]] },
+
+ ]
+ )
+ .fn(test => {
+ const inputs = test.params.inputs;
+ const got = cartesianProduct(...inputs);
+ const expect = test.params.result;
+
+ test.expect(
+ objectEquals(got, expect),
+ `cartesianProduct(${JSON.stringify(inputs)}) returned ${JSON.stringify(
+ got
+ )}. Expected ${JSON.stringify(expect)} `
+ );
+ });
+
+g.test('cartesianProductArray')
+ .paramsSimple<cartesianProductCase<number[]>>(
+ // prettier-ignore
+ [
+ { inputs: [[[0, 1], [2, 3]], [[4, 5], [6, 7]]], result: [[[0, 1], [4, 5]],
+ [[2, 3], [4, 5]],
+ [[0, 1], [6, 7]],
+ [[2, 3], [6, 7]]]},
+ { inputs: [[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9]]], result: [[[0, 1], [4, 5], [8, 9]],
+ [[2, 3], [4, 5], [8, 9]],
+ [[0, 1], [6, 7], [8, 9]],
+ [[2, 3], [6, 7], [8, 9]]]},
+ { inputs: [[[0, 1, 2], [3, 4, 5], [6, 7, 8]], [[2, 1, 0], [5, 4, 3], [8, 7, 6]]], result: [[[0, 1, 2], [2, 1, 0]],
+ [[3, 4, 5], [2, 1, 0]],
+ [[6, 7, 8], [2, 1, 0]],
+ [[0, 1, 2], [5, 4, 3]],
+ [[3, 4, 5], [5, 4, 3]],
+ [[6, 7, 8], [5, 4, 3]],
+ [[0, 1, 2], [8, 7, 6]],
+ [[3, 4, 5], [8, 7, 6]],
+ [[6, 7, 8], [8, 7, 6]]]}
+
+ ]
+ )
+ .fn(test => {
+ const inputs = test.params.inputs;
+ const got = cartesianProduct(...inputs);
+ const expect = test.params.result;
+
+ test.expect(
+ objectEquals(got, expect),
+ `cartesianProduct(${JSON.stringify(inputs)}) returned ${JSON.stringify(
+ got
+ )}. Expected ${JSON.stringify(expect)} `
+ );
+ });
+
+interface calculatePermutationsCase<T> {
+ input: T[];
+ result: T[][];
+}
+
+g.test('calculatePermutations')
+ .paramsSimple<calculatePermutationsCase<number>>(
+ // prettier-ignore
+ [
+ { input: [0, 1], result: [[0, 1],
+ [1, 0]] },
+ { input: [0, 1, 2], result: [[0, 1, 2],
+ [0, 2, 1],
+ [1, 0, 2],
+ [1, 2, 0],
+ [2, 0, 1],
+ [2, 1, 0]] },
+ { input: [0, 1, 2, 3], result: [[0, 1, 2, 3],
+ [0, 1, 3, 2],
+ [0, 2, 1, 3],
+ [0, 2, 3, 1],
+ [0, 3, 1, 2],
+ [0, 3, 2, 1],
+ [1, 0, 2, 3],
+ [1, 0, 3, 2],
+ [1, 2, 0, 3],
+ [1, 2, 3, 0],
+ [1, 3, 0, 2],
+ [1, 3, 2, 0],
+ [2, 0, 1, 3],
+ [2, 0, 3, 1],
+ [2, 1, 0, 3],
+ [2, 1, 3, 0],
+ [2, 3, 0, 1],
+ [2, 3, 1, 0],
+ [3, 0, 1, 2],
+ [3, 0, 2, 1],
+ [3, 1, 0, 2],
+ [3, 1, 2, 0],
+ [3, 2, 0, 1],
+ [3, 2, 1, 0]] },
+ ]
+ )
+ .fn(test => {
+ const input = test.params.input;
+ const got = calculatePermutations(input);
+ const expect = test.params.result;
+
+ test.expect(
+ objectEquals(got, expect),
+ `calculatePermutations(${JSON.stringify(input)}) returned ${JSON.stringify(
+ got
+ )}. Expected ${JSON.stringify(expect)} `
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/params_builder_and_utils.spec.ts b/dom/webgpu/tests/cts/checkout/src/unittests/params_builder_and_utils.spec.ts
new file mode 100644
index 0000000000..47e2eb335f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/params_builder_and_utils.spec.ts
@@ -0,0 +1,549 @@
+export const description = `
+Unit tests for parameterization helpers.
+`;
+
+import { TestParams } from '../common/framework/fixture.js';
+import {
+ kUnitCaseParamsBuilder,
+ CaseSubcaseIterable,
+ ParamsBuilderBase,
+ builderIterateCasesWithSubcases,
+} from '../common/framework/params_builder.js';
+import { makeTestGroup } from '../common/framework/test_group.js';
+import {
+ mergeParams,
+ mergeParamsChecked,
+ publicParamsEquals,
+} from '../common/internal/params_utils.js';
+import { assert, objectEquals } from '../common/util/util.js';
+
+import { UnitTest } from './unit_test.js';
+
+class ParamsTest extends UnitTest {
+ expectParams<CaseP extends {}, SubcaseP extends {}>(
+ act: ParamsBuilderBase<CaseP, SubcaseP>,
+ exp: CaseSubcaseIterable<{}, {}>,
+ caseFilter: TestParams | null = null
+ ): void {
+ const a = Array.from(builderIterateCasesWithSubcases(act, caseFilter)).map(
+ ([caseP, subcases]) => [caseP, subcases ? Array.from(subcases) : undefined]
+ );
+ const e = Array.from(exp);
+ this.expect(
+ objectEquals(a, e),
+ `
+got ${JSON.stringify(a)}
+expected ${JSON.stringify(e)}`
+ );
+ }
+}
+
+export const g = makeTestGroup(ParamsTest);
+
+const u = kUnitCaseParamsBuilder;
+
+g.test('combine').fn(t => {
+ t.expectParams<{ hello: number }, {}>(u.combine('hello', [1, 2, 3]), [
+ [{ hello: 1 }, undefined],
+ [{ hello: 2 }, undefined],
+ [{ hello: 3 }, undefined],
+ ]);
+ t.expectParams<{ hello: number }, {}>(
+ u.combine('hello', [1, 2, 3]),
+ [
+ [{ hello: 1 }, undefined],
+ [{ hello: 2 }, undefined],
+ [{ hello: 3 }, undefined],
+ ],
+ {}
+ );
+ t.expectParams<{ hello: number }, {}>(
+ u.combine('hello', [1, 2, 3]),
+ [[{ hello: 2 }, undefined]],
+ { hello: 2 }
+ );
+ t.expectParams<{ hello: 1 | 2 | 3 }, {}>(u.combine('hello', [1, 2, 3] as const), [
+ [{ hello: 1 }, undefined],
+ [{ hello: 2 }, undefined],
+ [{ hello: 3 }, undefined],
+ ]);
+ t.expectParams<{}, { hello: number }>(u.beginSubcases().combine('hello', [1, 2, 3]), [
+ [{}, [{ hello: 1 }, { hello: 2 }, { hello: 3 }]],
+ ]);
+ t.expectParams<{}, { hello: number }>(
+ u.beginSubcases().combine('hello', [1, 2, 3]),
+ [[{}, [{ hello: 1 }, { hello: 2 }, { hello: 3 }]]],
+ {}
+ );
+ t.expectParams<{}, { hello: number }>(u.beginSubcases().combine('hello', [1, 2, 3]), [], {
+ hello: 2,
+ });
+ t.expectParams<{}, { hello: 1 | 2 | 3 }>(u.beginSubcases().combine('hello', [1, 2, 3] as const), [
+ [{}, [{ hello: 1 }, { hello: 2 }, { hello: 3 }]],
+ ]);
+});
+
+g.test('empty').fn(t => {
+ t.expectParams<{}, {}>(u, [
+ [{}, undefined], //
+ ]);
+ t.expectParams<{}, {}>(u.beginSubcases(), [
+ [{}, [{}]], //
+ ]);
+});
+
+g.test('combine,zeroes_and_ones').fn(t => {
+ t.expectParams<{}, {}>(u.combineWithParams([]).combineWithParams([]), []);
+ t.expectParams<{}, {}>(u.combineWithParams([]).combineWithParams([{}]), []);
+ t.expectParams<{}, {}>(u.combineWithParams([{}]).combineWithParams([]), []);
+ t.expectParams<{}, {}>(u.combineWithParams([{}]).combineWithParams([{}]), [
+ [{}, undefined], //
+ ]);
+
+ t.expectParams<{}, {}>(u.combine('x', []).combine('y', []), []);
+ t.expectParams<{}, {}>(u.combine('x', []).combine('y', [1]), []);
+ t.expectParams<{}, {}>(u.combine('x', [1]).combine('y', []), []);
+ t.expectParams<{}, {}>(u.combine('x', [1]).combine('y', [1]), [
+ [{ x: 1, y: 1 }, undefined], //
+ ]);
+});
+
+g.test('combine,mixed').fn(t => {
+ t.expectParams<{ x: number; y: string; p: number | undefined; q: number | undefined }, {}>(
+ u
+ .combine('x', [1, 2])
+ .combine('y', ['a', 'b'])
+ .combineWithParams([{ p: 4 }, { q: 5 }])
+ .combineWithParams([{}]),
+ [
+ [{ x: 1, y: 'a', p: 4 }, undefined],
+ [{ x: 1, y: 'a', q: 5 }, undefined],
+ [{ x: 1, y: 'b', p: 4 }, undefined],
+ [{ x: 1, y: 'b', q: 5 }, undefined],
+ [{ x: 2, y: 'a', p: 4 }, undefined],
+ [{ x: 2, y: 'a', q: 5 }, undefined],
+ [{ x: 2, y: 'b', p: 4 }, undefined],
+ [{ x: 2, y: 'b', q: 5 }, undefined],
+ ]
+ );
+});
+
+g.test('filter').fn(t => {
+ t.expectParams<{ a: boolean; x: number | undefined; y: number | undefined }, {}>(
+ u
+ .combineWithParams([
+ { a: true, x: 1 },
+ { a: false, y: 2 },
+ ])
+ .filter(p => p.a),
+ [
+ [{ a: true, x: 1 }, undefined], //
+ ]
+ );
+
+ t.expectParams<{ a: boolean; x: number | undefined; y: number | undefined }, {}>(
+ u
+ .combineWithParams([
+ { a: true, x: 1 },
+ { a: false, y: 2 },
+ ])
+ .beginSubcases()
+ .filter(p => p.a),
+ [
+ [{ a: true, x: 1 }, [{}]], //
+ // Case with no subcases is filtered out.
+ ]
+ );
+
+ t.expectParams<{}, { a: boolean; x: number | undefined; y: number | undefined }>(
+ u
+ .beginSubcases()
+ .combineWithParams([
+ { a: true, x: 1 },
+ { a: false, y: 2 },
+ ])
+ .filter(p => p.a),
+ [
+ [{}, [{ a: true, x: 1 }]], //
+ ]
+ );
+});
+
+g.test('unless').fn(t => {
+ t.expectParams<{ a: boolean; x: number | undefined; y: number | undefined }, {}>(
+ u
+ .combineWithParams([
+ { a: true, x: 1 },
+ { a: false, y: 2 },
+ ])
+ .unless(p => p.a),
+ [
+ [{ a: false, y: 2 }, undefined], //
+ ]
+ );
+
+ t.expectParams<{ a: boolean; x: number | undefined; y: number | undefined }, {}>(
+ u
+ .combineWithParams([
+ { a: true, x: 1 },
+ { a: false, y: 2 },
+ ])
+ .beginSubcases()
+ .unless(p => p.a),
+ [
+ // Case with no subcases is filtered out.
+ [{ a: false, y: 2 }, [{}]], //
+ ]
+ );
+
+ t.expectParams<{}, { a: boolean; x: number | undefined; y: number | undefined }>(
+ u
+ .beginSubcases()
+ .combineWithParams([
+ { a: true, x: 1 },
+ { a: false, y: 2 },
+ ])
+ .unless(p => p.a),
+ [
+ [{}, [{ a: false, y: 2 }]], //
+ ]
+ );
+});
+
+g.test('expandP').fn(t => {
+ // simple
+ t.expectParams<{}, {}>(
+ u.expandWithParams(function* () {}),
+ []
+ );
+ t.expectParams<{}, {}>(
+ u.expandWithParams(function* () {
+ yield {};
+ }),
+ [[{}, undefined]]
+ );
+ t.expectParams<{ z: number | undefined; w: number | undefined }, {}>(
+ u.expandWithParams(function* () {
+ yield* kUnitCaseParamsBuilder.combine('z', [3, 4]);
+ yield { w: 5 };
+ }),
+ [
+ [{ z: 3 }, undefined],
+ [{ z: 4 }, undefined],
+ [{ w: 5 }, undefined],
+ ]
+ );
+ t.expectParams<{ z: number | undefined; w: number | undefined }, {}>(
+ u.expandWithParams(function* () {
+ yield* kUnitCaseParamsBuilder.combine('z', [3, 4]);
+ yield { w: 5 };
+ }),
+ [
+ [{ z: 3 }, undefined],
+ [{ z: 4 }, undefined],
+ [{ w: 5 }, undefined],
+ ],
+ {}
+ );
+ t.expectParams<{ z: number | undefined; w: number | undefined }, {}>(
+ u.expandWithParams(function* () {
+ yield* kUnitCaseParamsBuilder.combine('z', [3, 4]);
+ yield { w: 5 };
+ }),
+ [[{ z: 4 }, undefined]],
+ { z: 4 }
+ );
+ t.expectParams<{ z: number | undefined; w: number | undefined }, {}>(
+ u.expandWithParams(function* () {
+ yield* kUnitCaseParamsBuilder.combine('z', [3, 4]);
+ yield { w: 5 };
+ }),
+ [[{ z: 3 }, undefined]],
+ { z: 3 }
+ );
+ t.expectParams<{}, { z: number | undefined; w: number | undefined }>(
+ u.beginSubcases().expandWithParams(function* () {
+ yield* kUnitCaseParamsBuilder.combine('z', [3, 4]);
+ yield { w: 5 };
+ }),
+ [[{}, [{ z: 3 }, { z: 4 }, { w: 5 }]]]
+ );
+
+ t.expectParams<{ x: [] | {} }, {}>(
+ u.expand('x', () => [[], {}] as const),
+ [
+ [{ x: [] }, undefined],
+ [{ x: {} }, undefined],
+ ]
+ );
+ t.expectParams<{ x: [] | {} }, {}>(
+ u.expand('x', () => [[], {}] as const),
+ [[{ x: [] }, undefined]],
+ { x: [] }
+ );
+ t.expectParams<{ x: [] | {} }, {}>(
+ u.expand('x', () => [[], {}] as const),
+ [[{ x: {} }, undefined]],
+ { x: {} }
+ );
+
+ // more complex
+ {
+ const p = u
+ .combineWithParams([
+ { a: true, x: 1 },
+ { a: false, y: 2 },
+ ])
+ .expandWithParams(function* (p) {
+ if (p.a) {
+ yield { z: 3 };
+ yield { z: 4 };
+ } else {
+ yield { w: 5 };
+ }
+ });
+ type T = {
+ a: boolean;
+ x: number | undefined;
+ y: number | undefined;
+ z: number | undefined;
+ w: number | undefined;
+ };
+ t.expectParams<T, {}>(p, [
+ [{ a: true, x: 1, z: 3 }, undefined],
+ [{ a: true, x: 1, z: 4 }, undefined],
+ [{ a: false, y: 2, w: 5 }, undefined],
+ ]);
+ t.expectParams<T, {}>(
+ p,
+ [
+ [{ a: true, x: 1, z: 3 }, undefined],
+ [{ a: true, x: 1, z: 4 }, undefined],
+ [{ a: false, y: 2, w: 5 }, undefined],
+ ],
+ {}
+ );
+ t.expectParams<T, {}>(
+ p,
+ [
+ [{ a: true, x: 1, z: 3 }, undefined],
+ [{ a: true, x: 1, z: 4 }, undefined],
+ ],
+ { a: true }
+ );
+ t.expectParams<T, {}>(p, [[{ a: false, y: 2, w: 5 }, undefined]], { a: false });
+ }
+
+ t.expectParams<
+ { a: boolean; x: number | undefined; y: number | undefined },
+ { z: number | undefined; w: number | undefined }
+ >(
+ u
+ .combineWithParams([
+ { a: true, x: 1 },
+ { a: false, y: 2 },
+ ])
+ .beginSubcases()
+ .expandWithParams(function* (p) {
+ if (p.a) {
+ yield { z: 3 };
+ yield { z: 4 };
+ } else {
+ yield { w: 5 };
+ }
+ }),
+ [
+ [{ a: true, x: 1 }, [{ z: 3 }, { z: 4 }]],
+ [{ a: false, y: 2 }, [{ w: 5 }]],
+ ]
+ );
+});
+
+g.test('expand').fn(t => {
+ // simple
+ t.expectParams<{}, {}>(
+ u.expand('x', function* () {}),
+ []
+ );
+ t.expectParams<{ z: number }, {}>(
+ u.expand('z', function* () {
+ yield 3;
+ yield 4;
+ }),
+ [
+ [{ z: 3 }, undefined],
+ [{ z: 4 }, undefined],
+ ]
+ );
+ t.expectParams<{ z: number }, {}>(
+ u.expand('z', function* () {
+ yield 3;
+ yield 4;
+ }),
+ [
+ [{ z: 3 }, undefined],
+ [{ z: 4 }, undefined],
+ ],
+ {}
+ );
+ t.expectParams<{ z: number }, {}>(
+ u.expand('z', function* () {
+ yield 3;
+ yield 4;
+ }),
+ [[{ z: 3 }, undefined]],
+ { z: 3 }
+ );
+ t.expectParams<{}, { z: number }>(
+ u.beginSubcases().expand('z', function* () {
+ yield 3;
+ yield 4;
+ }),
+ [[{}, [{ z: 3 }, { z: 4 }]]]
+ );
+
+ // more complex
+ t.expectParams<{ a: boolean; x: number | undefined; y: number | undefined; z: number }, {}>(
+ u
+ .combineWithParams([
+ { a: true, x: 1 },
+ { a: false, y: 2 },
+ ])
+ .expand('z', function* (p) {
+ if (p.a) {
+ yield 3;
+ } else {
+ yield 5;
+ }
+ }),
+ [
+ [{ a: true, x: 1, z: 3 }, undefined],
+ [{ a: false, y: 2, z: 5 }, undefined],
+ ]
+ );
+ t.expectParams<{ a: boolean; x: number | undefined; y: number | undefined }, { z: number }>(
+ u
+ .combineWithParams([
+ { a: true, x: 1 },
+ { a: false, y: 2 },
+ ])
+ .beginSubcases()
+ .expand('z', function* (p) {
+ if (p.a) {
+ yield 3;
+ } else {
+ yield 5;
+ }
+ }),
+ [
+ [{ a: true, x: 1 }, [{ z: 3 }]],
+ [{ a: false, y: 2 }, [{ z: 5 }]],
+ ]
+ );
+});
+
+g.test('invalid,shadowing').fn(t => {
+ // Existing CaseP is shadowed by a new CaseP.
+ {
+ const p = u
+ .combineWithParams([
+ { a: true, x: 1 },
+ { a: false, x: 2 },
+ ])
+ .expandWithParams(function* (p) {
+ if (p.a) {
+ yield { x: 3 };
+ } else {
+ yield { w: 5 };
+ }
+ });
+ // Iterating causes merging e.g. ({x:1}, {x:3}), which fails.
+ t.shouldThrow('Error', () => {
+ Array.from(p.iterateCasesWithSubcases(null));
+ });
+ }
+ // Existing SubcaseP is shadowed by a new SubcaseP.
+ {
+ const p = u
+ .beginSubcases()
+ .combineWithParams([
+ { a: true, x: 1 },
+ { a: false, x: 2 },
+ ])
+ .expandWithParams(function* (p) {
+ if (p.a) {
+ yield { x: 3 };
+ } else {
+ yield { w: 5 };
+ }
+ });
+ // Iterating causes merging e.g. ({x:1}, {x:3}), which fails.
+ t.shouldThrow('Error', () => {
+ Array.from(p.iterateCasesWithSubcases(null));
+ });
+ }
+ // Existing CaseP is shadowed by a new SubcaseP.
+ {
+ const p = u
+ .combineWithParams([
+ { a: true, x: 1 },
+ { a: false, x: 2 },
+ ])
+ .beginSubcases()
+ .expandWithParams(function* (p) {
+ if (p.a) {
+ yield { x: 3 };
+ } else {
+ yield { w: 5 };
+ }
+ });
+ const cases = Array.from(p.iterateCasesWithSubcases(null));
+ // Iterating cases is fine...
+ for (const [caseP, subcases] of cases) {
+ assert(subcases !== undefined);
+ // Iterating subcases is fine...
+ for (const subcaseP of subcases) {
+ if (caseP.a) {
+ assert(subcases !== undefined);
+
+ // Only errors once we try to merge e.g. ({x:1}, {x:3}).
+ mergeParams(caseP, subcaseP);
+ t.shouldThrow('Error', () => {
+ mergeParamsChecked(caseP, subcaseP);
+ });
+ }
+ }
+ }
+ }
+});
+
+g.test('undefined').fn(t => {
+ t.expect(!publicParamsEquals({ a: undefined }, {}));
+ t.expect(!publicParamsEquals({}, { a: undefined }));
+});
+
+g.test('private').fn(t => {
+ t.expect(publicParamsEquals({ _a: 0 }, {}));
+ t.expect(publicParamsEquals({}, { _a: 0 }));
+});
+
+g.test('value,array').fn(t => {
+ t.expectParams<{ a: number[] }, {}>(u.combineWithParams([{ a: [1, 2] }]), [
+ [{ a: [1, 2] }, undefined], //
+ ]);
+ t.expectParams<{}, { a: number[] }>(u.beginSubcases().combineWithParams([{ a: [1, 2] }]), [
+ [{}, [{ a: [1, 2] }]], //
+ ]);
+});
+
+g.test('value,object').fn(t => {
+ t.expectParams<{ a: { [k: string]: number } }, {}>(u.combineWithParams([{ a: { x: 1 } }]), [
+ [{ a: { x: 1 } }, undefined], //
+ ]);
+ t.expectParams<{}, { a: { [k: string]: number } }>(
+ u.beginSubcases().combineWithParams([{ a: { x: 1 } }]),
+ [
+ [{}, [{ a: { x: 1 } }]], //
+ ]
+ );
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/params_builder_toplevel.spec.ts b/dom/webgpu/tests/cts/checkout/src/unittests/params_builder_toplevel.spec.ts
new file mode 100644
index 0000000000..08a84b23e7
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/params_builder_toplevel.spec.ts
@@ -0,0 +1,112 @@
+export const description = `
+Unit tests for parameterization.
+`;
+
+import { TestParams } from '../common/framework/fixture.js';
+import { kUnitCaseParamsBuilder } from '../common/framework/params_builder.js';
+import { makeTestGroup } from '../common/framework/test_group.js';
+import { makeTestGroupForUnitTesting } from '../common/internal/test_group.js';
+
+import { TestGroupTest } from './test_group_test.js';
+import { UnitTest } from './unit_test.js';
+
+export const g = makeTestGroup(TestGroupTest);
+
+g.test('combine_none,arg_unit')
+ .params(u => u.combineWithParams([]))
+ .fn(t => {
+ t.fail("this test shouldn't run");
+ });
+
+g.test('combine_none,arg_ignored')
+ .params(() => kUnitCaseParamsBuilder.combineWithParams([]))
+ .fn(t => {
+ t.fail("this test shouldn't run");
+ });
+
+g.test('combine_none,plain_builder')
+ .params(kUnitCaseParamsBuilder.combineWithParams([]))
+ .fn(t => {
+ t.fail("this test shouldn't run");
+ });
+
+g.test('combine_none,plain_array')
+ .paramsSimple([])
+ .fn(t => {
+ t.fail("this test shouldn't run");
+ });
+
+g.test('combine_one,case')
+ .params(u =>
+ u //
+ .combineWithParams([{ x: 1 }])
+ )
+ .fn(t => {
+ t.expect(t.params.x === 1);
+ });
+
+g.test('combine_one,subcase')
+ .paramsSubcasesOnly(u =>
+ u //
+ .combineWithParams([{ x: 1 }])
+ )
+ .fn(t => {
+ t.expect(t.params.x === 1);
+ });
+
+g.test('filter')
+ .params(u =>
+ u
+ .combineWithParams([
+ { a: true, x: 1 }, //
+ { a: false, y: 2 },
+ ])
+ .filter(p => p.a)
+ )
+ .fn(t => {
+ t.expect(t.params.a);
+ });
+
+g.test('unless')
+ .params(u =>
+ u
+ .combineWithParams([
+ { a: true, x: 1 }, //
+ { a: false, y: 2 },
+ ])
+ .unless(p => p.a)
+ )
+ .fn(t => {
+ t.expect(!t.params.a);
+ });
+
+g.test('generator').fn(t0 => {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+
+ const ran: TestParams[] = [];
+
+ g.test('generator')
+ .params(u =>
+ u.combineWithParams({
+ *[Symbol.iterator]() {
+ for (let x = 0; x < 3; ++x) {
+ for (let y = 0; y < 2; ++y) {
+ yield { x, y };
+ }
+ }
+ },
+ })
+ )
+ .fn(t => {
+ ran.push(t.params);
+ });
+
+ t0.expectCases(g, [
+ { test: ['generator'], params: { x: 0, y: 0 } },
+ { test: ['generator'], params: { x: 0, y: 1 } },
+ { test: ['generator'], params: { x: 1, y: 0 } },
+ { test: ['generator'], params: { x: 1, y: 1 } },
+ { test: ['generator'], params: { x: 2, y: 0 } },
+ { test: ['generator'], params: { x: 2, y: 1 } },
+ ]);
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/preprocessor.spec.ts b/dom/webgpu/tests/cts/checkout/src/unittests/preprocessor.spec.ts
new file mode 100644
index 0000000000..040629355d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/preprocessor.spec.ts
@@ -0,0 +1,207 @@
+export const description = `
+Test for "pp" preprocessor.
+`;
+
+import { makeTestGroup } from '../common/framework/test_group.js';
+import { pp } from '../common/util/preprocessor.js';
+
+import { UnitTest } from './unit_test.js';
+
+class F extends UnitTest {
+ test(act: string, exp: string): void {
+ this.expect(act === exp, 'got: ' + act.replace('\n', '⏎'));
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('empty').fn(t => {
+ t.test(pp``, '');
+ t.test(pp`\n`, '\n');
+ t.test(pp`\n\n`, '\n\n');
+});
+
+g.test('plain').fn(t => {
+ t.test(pp`a`, 'a');
+ t.test(pp`\na`, '\na');
+ t.test(pp`\n\na`, '\n\na');
+ t.test(pp`\na\n`, '\na\n');
+ t.test(pp`a\n\n`, 'a\n\n');
+});
+
+g.test('substitutions,1').fn(t => {
+ const act = pp`a ${3} b`;
+ const exp = 'a 3 b';
+ t.test(act, exp);
+});
+
+g.test('substitutions,2').fn(t => {
+ const act = pp`a ${'x'}`;
+ const exp = 'a x';
+ t.test(act, exp);
+});
+
+g.test('substitutions,3').fn(t => {
+ const act = pp`a ${'x'} b`;
+ const exp = 'a x b';
+ t.test(act, exp);
+});
+
+g.test('substitutions,4').fn(t => {
+ const act = pp`
+a
+${pp._if(false)}
+${'x'}
+${pp._endif}
+b`;
+ const exp = '\na\n\nb';
+ t.test(act, exp);
+});
+
+g.test('if,true').fn(t => {
+ const act = pp`
+a
+${pp._if(true)}c${pp._endif}
+d
+`;
+ const exp = '\na\nc\nd\n';
+ t.test(act, exp);
+});
+
+g.test('if,false').fn(t => {
+ const act = pp`
+a
+${pp._if(false)}c${pp._endif}
+d
+`;
+ const exp = '\na\n\nd\n';
+ t.test(act, exp);
+});
+
+g.test('else,1').fn(t => {
+ const act = pp`
+a
+${pp._if(true)}
+b
+${pp._else}
+c
+${pp._endif}
+d
+`;
+ const exp = '\na\n\nb\n\nd\n';
+ t.test(act, exp);
+});
+
+g.test('else,2').fn(t => {
+ const act = pp`
+a
+${pp._if(false)}
+b
+${pp._else}
+c
+${pp._endif}
+d
+`;
+ const exp = '\na\n\nc\n\nd\n';
+ t.test(act, exp);
+});
+
+g.test('elif,1').fn(t => {
+ const act = pp`
+a
+${pp._if(false)}
+b
+${pp._elif(true)}
+e
+${pp._else}
+c
+${pp._endif}
+d
+`;
+ const exp = '\na\n\ne\n\nd\n';
+ t.test(act, exp);
+});
+
+g.test('elif,2').fn(t => {
+ const act = pp`
+a
+${pp._if(true)}
+b
+${pp._elif(true)}
+e
+${pp._else}
+c
+${pp._endif}
+d
+`;
+ const exp = '\na\n\nb\n\nd\n';
+ t.test(act, exp);
+});
+
+g.test('nested,1').fn(t => {
+ const act = pp`
+a
+${pp._if(false)}
+b
+${pp.__if(true)}
+e
+${pp.__endif}
+c
+${pp._endif}
+d
+`;
+ const exp = '\na\n\nd\n';
+ t.test(act, exp);
+});
+
+g.test('nested,2').fn(t => {
+ const act = pp`
+a
+${pp._if(false)}
+b
+${pp._else}
+h
+${pp.__if(false)}
+e
+${pp.__elif(true)}
+f
+${pp.__else}
+g
+${pp.__endif}
+c
+${pp._endif}
+d
+`;
+ const exp = '\na\n\nh\n\nf\n\nc\n\nd\n';
+ t.test(act, exp);
+});
+
+g.test('errors,pass').fn(() => {
+ pp`${pp._if(true)}${pp._endif}`;
+ pp`${pp._if(true)}${pp._else}${pp._endif}`;
+ pp`${pp._if(true)}${pp.__if(true)}${pp.__endif}${pp._endif}`;
+});
+
+g.test('errors,fail').fn(t => {
+ const e = (fn: () => void) => t.shouldThrow('Error', fn);
+ e(() => pp`${pp._if(true)}`);
+ e(() => pp`${pp._elif(true)}`);
+ e(() => pp`${pp._else}`);
+ e(() => pp`${pp._endif}`);
+ e(() => pp`${pp.__if(true)}`);
+ e(() => pp`${pp.__elif(true)}`);
+ e(() => pp`${pp.__else}`);
+ e(() => pp`${pp.__endif}`);
+
+ e(() => pp`${pp._if(true)}${pp._elif(true)}`);
+ e(() => pp`${pp._if(true)}${pp._elif(true)}${pp._else}`);
+ e(() => pp`${pp._if(true)}${pp._else}`);
+ e(() => pp`${pp._else}${pp._endif}`);
+
+ e(() => pp`${pp._if(true)}${pp.__endif}`);
+ e(() => pp`${pp.__if(true)}${pp.__endif}`);
+ e(() => pp`${pp.__if(true)}${pp._endif}`);
+
+ e(() => pp`${pp._if(true)}${pp._else}${pp._else}${pp._endif}`);
+ e(() => pp`${pp._if(true)}${pp.__if(true)}${pp.__else}${pp.__else}${pp.__endif}${pp._endif}`);
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/prng.spec.ts b/dom/webgpu/tests/cts/checkout/src/unittests/prng.spec.ts
new file mode 100644
index 0000000000..6317a98eea
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/prng.spec.ts
@@ -0,0 +1,74 @@
+export const description = `
+Unittests for the pseudo random number generator
+`;
+
+import { makeTestGroup } from '../common/framework/test_group.js';
+import { fullU32Range } from '../webgpu/util/math.js';
+import { PRNG } from '../webgpu/util/prng.js';
+
+import { UnitTest } from './unit_test.js';
+
+export const g = makeTestGroup(UnitTest);
+
+// There exist more formal tests for the quality of random number generators
+// that are out of the scope for testing here (and are checked against the
+// original C implementation).
+// These tests are just intended to be smoke tests for implementation.
+
+// Test against the reference u32 values from the original C implementation
+// https://github.com/MersenneTwister-Lab/TinyMT/blob/master/tinymt/check32.out.txt
+g.test('check').fn(t => {
+ const p = new PRNG(1);
+ // prettier-ignore
+ const expected = [
+ 2545341989, 981918433, 3715302833, 2387538352, 3591001365,
+ 3820442102, 2114400566, 2196103051, 2783359912, 764534509,
+ 643179475, 1822416315, 881558334, 4207026366, 3690273640,
+ 3240535687, 2921447122, 3984931427, 4092394160, 44209675,
+ 2188315343, 2908663843, 1834519336, 3774670961, 3019990707,
+ 4065554902, 1239765502, 4035716197, 3412127188, 552822483,
+ 161364450, 353727785, 140085994, 149132008, 2547770827,
+ 4064042525, 4078297538, 2057335507, 622384752, 2041665899,
+ 2193913817, 1080849512, 33160901, 662956935, 642999063,
+ 3384709977, 1723175122, 3866752252, 521822317, 2292524454,
+ ];
+ expected.forEach((_, i) => {
+ const val = p.randomU32();
+ t.expect(
+ val === expected[i],
+ `PRNG(1) failed produced the ${i}th expected item, ${val} instead of ${expected[i]})`
+ );
+ });
+});
+
+// Prove that generator is deterministic for at least 1000 values with different
+// seeds.
+g.test('deterministic_random').fn(t => {
+ fullU32Range().forEach(seed => {
+ const lhs = new PRNG(seed);
+ const rhs = new PRNG(seed);
+ for (let i = 0; i < 1000; i++) {
+ const lhs_val = lhs.random();
+ const rhs_val = rhs.random();
+ t.expect(
+ lhs_val === rhs_val,
+ `For seed ${seed}, the ${i}th item, PRNG was non-deterministic (${lhs_val} vs ${rhs_val})`
+ );
+ }
+ });
+});
+
+g.test('deterministic_randomU32').fn(t => {
+ fullU32Range().forEach(seed => {
+ const lhs = new PRNG(seed);
+ const rhs = new PRNG(seed);
+ for (let i = 0; i < 1000; i++) {
+ const lhs_val = lhs.randomU32();
+ const rhs_val = rhs.randomU32();
+ t.expect(
+ lhs_val === rhs_val,
+ `For seed ${seed}, the ${i}th item, PRNG was non-deterministic (${lhs_val} vs ${rhs_val})`
+ );
+ }
+ });
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/query_compare.spec.ts b/dom/webgpu/tests/cts/checkout/src/unittests/query_compare.spec.ts
new file mode 100644
index 0000000000..b53b76a4df
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/query_compare.spec.ts
@@ -0,0 +1,144 @@
+export const description = `
+Tests for TestQuery comparison
+`;
+
+import { makeTestGroup } from '../common/framework/test_group.js';
+import { compareQueries, Ordering } from '../common/internal/query/compare.js';
+import {
+ TestQuery,
+ TestQuerySingleCase,
+ TestQueryMultiFile,
+ TestQueryMultiTest,
+ TestQueryMultiCase,
+} from '../common/internal/query/query.js';
+
+import { UnitTest } from './unit_test.js';
+
+class F extends UnitTest {
+ expectQ(a: TestQuery, exp: '<' | '=' | '>' | '!', b: TestQuery) {
+ const [expOrdering, expInvOrdering] =
+ exp === '<'
+ ? [Ordering.StrictSubset, Ordering.StrictSuperset]
+ : exp === '='
+ ? [Ordering.Equal, Ordering.Equal]
+ : exp === '>'
+ ? [Ordering.StrictSuperset, Ordering.StrictSubset]
+ : [Ordering.Unordered, Ordering.Unordered];
+ {
+ const act = compareQueries(a, b);
+ this.expect(act === expOrdering, `${a} ${b} got ${act}, exp ${expOrdering}`);
+ }
+ {
+ const act = compareQueries(a, b);
+ this.expect(act === expOrdering, `${b} ${a} got ${act}, exp ${expInvOrdering}`);
+ }
+ }
+
+ expectWellOrdered(...qs: TestQuery[]) {
+ for (let i = 0; i < qs.length; ++i) {
+ this.expectQ(qs[i], '=', qs[i]);
+ for (let j = i + 1; j < qs.length; ++j) {
+ this.expectQ(qs[i], '>', qs[j]);
+ }
+ }
+ }
+
+ expectUnordered(...qs: TestQuery[]) {
+ for (let i = 0; i < qs.length; ++i) {
+ this.expectQ(qs[i], '=', qs[i]);
+ for (let j = i + 1; j < qs.length; ++j) {
+ this.expectQ(qs[i], '!', qs[j]);
+ }
+ }
+ }
+}
+
+export const g = makeTestGroup(F);
+
+// suite:* > suite:a,* > suite:a,b,* > suite:a,b:*
+// suite:a,b:* > suite:a,b:c,* > suite:a,b:c,d,* > suite:a,b:c,d:*
+// suite:a,b:c,d:* > suite:a,b:c,d:x=1;* > suite:a,b:c,d:x=1;y=2;* > suite:a,b:c,d:x=1;y=2
+// suite:a;* (unordered) suite:b;*
+g.test('well_ordered').fn(t => {
+ t.expectWellOrdered(
+ new TestQueryMultiFile('suite', []),
+ new TestQueryMultiFile('suite', ['a']),
+ new TestQueryMultiFile('suite', ['a', 'b']),
+ new TestQueryMultiTest('suite', ['a', 'b'], []),
+ new TestQueryMultiTest('suite', ['a', 'b'], ['c']),
+ new TestQueryMultiTest('suite', ['a', 'b'], ['c', 'd']),
+ new TestQueryMultiCase('suite', ['a', 'b'], ['c', 'd'], {}),
+ new TestQueryMultiCase('suite', ['a', 'b'], ['c', 'd'], { x: 1 }),
+ new TestQueryMultiCase('suite', ['a', 'b'], ['c', 'd'], { x: 1, y: 2 }),
+ new TestQuerySingleCase('suite', ['a', 'b'], ['c', 'd'], { x: 1, y: 2 })
+ );
+ t.expectWellOrdered(
+ new TestQueryMultiFile('suite', []),
+ new TestQueryMultiFile('suite', ['a']),
+ new TestQueryMultiFile('suite', ['a', 'b']),
+ new TestQueryMultiTest('suite', ['a', 'b'], []),
+ new TestQueryMultiTest('suite', ['a', 'b'], ['c']),
+ new TestQueryMultiTest('suite', ['a', 'b'], ['c', 'd']),
+ new TestQueryMultiCase('suite', ['a', 'b'], ['c', 'd'], {}),
+ new TestQuerySingleCase('suite', ['a', 'b'], ['c', 'd'], {})
+ );
+});
+
+g.test('unordered').fn(t => {
+ t.expectUnordered(
+ new TestQueryMultiFile('suite', ['a']), //
+ new TestQueryMultiFile('suite', ['x'])
+ );
+ t.expectUnordered(
+ new TestQueryMultiFile('suite', ['a', 'b']),
+ new TestQueryMultiFile('suite', ['a', 'x'])
+ );
+ t.expectUnordered(
+ new TestQueryMultiTest('suite', ['a', 'b'], ['c']),
+ new TestQueryMultiTest('suite', ['a', 'b'], ['x']),
+ new TestQueryMultiTest('suite', ['a'], []),
+ new TestQueryMultiTest('suite', ['a', 'x'], [])
+ );
+ t.expectUnordered(
+ new TestQueryMultiTest('suite', ['a', 'b'], ['c', 'd']),
+ new TestQueryMultiTest('suite', ['a', 'b'], ['c', 'x']),
+ new TestQueryMultiTest('suite', ['a'], []),
+ new TestQueryMultiTest('suite', ['a', 'x'], [])
+ );
+ t.expectUnordered(
+ new TestQueryMultiTest('suite', ['a', 'b'], ['c', 'd']),
+ new TestQueryMultiTest('suite', ['a', 'b'], ['c', 'x']),
+ new TestQueryMultiTest('suite', ['a'], []),
+ new TestQueryMultiTest('suite', ['a', 'x'], ['c'])
+ );
+ t.expectUnordered(
+ new TestQueryMultiCase('suite', ['a', 'b'], ['c', 'd'], { x: 1 }),
+ new TestQueryMultiCase('suite', ['a', 'b'], ['c', 'd'], { x: 9 }),
+ new TestQueryMultiCase('suite', ['a', 'b'], ['c'], { x: 9 })
+ );
+ t.expectUnordered(
+ new TestQueryMultiCase('suite', ['a', 'b'], ['c', 'd'], { x: 1, y: 2 }),
+ new TestQueryMultiCase('suite', ['a', 'b'], ['c', 'd'], { x: 1, y: 8 }),
+ new TestQueryMultiCase('suite', ['a', 'b'], ['c'], { x: 1, y: 8 })
+ );
+ t.expectUnordered(
+ new TestQuerySingleCase('suite', ['a', 'b'], ['c', 'd'], { x: 1, y: 2 }),
+ new TestQuerySingleCase('suite', ['a', 'b'], ['c', 'd'], { x: 1, y: 8 }),
+ new TestQuerySingleCase('suite', ['a', 'b'], ['c'], { x: 1, y: 8 })
+ );
+ t.expectUnordered(
+ new TestQuerySingleCase('suite1', ['bar', 'buzz', 'buzz'], ['zap'], {}),
+ new TestQueryMultiTest('suite1', ['bar'], [])
+ );
+ // Expect that 0.0 and -0.0 are treated as different queries
+ t.expectUnordered(
+ new TestQueryMultiCase('suite', ['a', 'b'], ['c', 'd'], { x: 0.0 }),
+ new TestQueryMultiCase('suite', ['a', 'b'], ['c', 'd'], { x: -0.0 })
+ );
+ t.expectUnordered(
+ new TestQuerySingleCase('suite', ['a', 'b'], ['c', 'd'], { x: 0.0, y: 0.0 }),
+ new TestQuerySingleCase('suite', ['a', 'b'], ['c', 'd'], { x: 0.0, y: -0.0 }),
+ new TestQuerySingleCase('suite', ['a', 'b'], ['c', 'd'], { x: -0.0, y: 0.0 }),
+ new TestQuerySingleCase('suite', ['a', 'b'], ['c', 'd'], { x: -0.0, y: -0.0 })
+ );
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/query_string.spec.ts b/dom/webgpu/tests/cts/checkout/src/unittests/query_string.spec.ts
new file mode 100644
index 0000000000..040acd1b87
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/query_string.spec.ts
@@ -0,0 +1,268 @@
+export const description = `
+Unit tests for TestQuery strings.
+`;
+
+import { makeTestGroup } from '../common/framework/test_group.js';
+import { compareQueries, Ordering } from '../common/internal/query/compare.js';
+import {
+ TestQuery,
+ TestQuerySingleCase,
+ TestQueryMultiCase,
+ TestQueryMultiTest,
+ TestQueryMultiFile,
+ relativeQueryString,
+} from '../common/internal/query/query.js';
+
+import { UnitTest } from './unit_test.js';
+
+class T extends UnitTest {
+ expectQueryString(q: TestQuery, exp: string): void {
+ const s = q.toString();
+ this.expect(s === exp, `got ${s} expected ${exp}`);
+ }
+
+ expectRelativeQueryString(parent: TestQuery, child: TestQuery, exp: string): void {
+ const s = relativeQueryString(parent, child);
+ this.expect(s === exp, `got ${s} expected ${exp}`);
+
+ if (compareQueries(parent, child) !== Ordering.Equal) {
+ // Test in reverse
+ this.shouldThrow('Error', () => {
+ relativeQueryString(child, parent);
+ });
+ }
+ }
+}
+
+export const g = makeTestGroup(T);
+
+g.test('stringifyQuery,single_case').fn(t => {
+ t.expectQueryString(
+ new TestQuerySingleCase('a', ['b_1', '2_c'], ['d_3', '4_e'], {
+ f: 'g',
+ _pri1: 0,
+ x: 3,
+ _pri2: 1,
+ }),
+ 'a:b_1,2_c:d_3,4_e:f="g";x=3'
+ );
+});
+
+g.test('stringifyQuery,single_case,json').fn(t => {
+ t.expectQueryString(
+ new TestQuerySingleCase('a', ['b_1', '2_c'], ['d_3', '4_e'], {
+ f: 'g',
+ x: { p: 2, q: 'Q' },
+ }),
+ 'a:b_1,2_c:d_3,4_e:f="g";x={"p":2,"q":"Q"}'
+ );
+});
+
+g.test('stringifyQuery,multi_case').fn(t => {
+ t.expectQueryString(
+ new TestQueryMultiCase('a', ['b_1', '2_c'], ['d_3', '4_e'], {
+ f: 'g',
+ _pri1: 0,
+ a: 3,
+ _pri2: 1,
+ }),
+ 'a:b_1,2_c:d_3,4_e:f="g";a=3;*'
+ );
+
+ t.expectQueryString(
+ new TestQueryMultiCase('a', ['b_1', '2_c'], ['d_3', '4_e'], {}),
+ 'a:b_1,2_c:d_3,4_e:*'
+ );
+});
+
+g.test('stringifyQuery,multi_test').fn(t => {
+ t.expectQueryString(
+ new TestQueryMultiTest('a', ['b_1', '2_c'], ['d_3', '4_e']),
+ 'a:b_1,2_c:d_3,4_e,*'
+ );
+
+ t.expectQueryString(
+ new TestQueryMultiTest('a', ['b_1', '2_c'], []), //
+ 'a:b_1,2_c:*'
+ );
+});
+
+g.test('stringifyQuery,multi_file').fn(t => {
+ t.expectQueryString(
+ new TestQueryMultiFile('a', ['b_1', '2_c']), //
+ 'a:b_1,2_c,*'
+ );
+
+ t.expectQueryString(
+ new TestQueryMultiFile('a', []), //
+ 'a:*'
+ );
+});
+
+g.test('relativeQueryString,equal_or_child').fn(t => {
+ // Depth difference = 0
+ t.expectRelativeQueryString(
+ new TestQueryMultiFile('a', []), //
+ new TestQueryMultiFile('a', []), //
+ ''
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiFile('a', ['b', 'c']), //
+ new TestQueryMultiFile('a', ['b', 'c']), //
+ ''
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiTest('a', ['b', 'c'], ['d', 'e']), //
+ new TestQueryMultiTest('a', ['b', 'c'], ['d', 'e']), //
+ ''
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiCase('a', ['b', 'c'], ['d', 'e'], { f: 0 }), //
+ new TestQueryMultiCase('a', ['b', 'c'], ['d', 'e'], { f: 0 }), //
+ ''
+ );
+ t.expectRelativeQueryString(
+ new TestQuerySingleCase('a', ['b', 'c'], ['d', 'e'], { f: 0, g: 1 }), //
+ new TestQuerySingleCase('a', ['b', 'c'], ['d', 'e'], { f: 0, g: 1 }), //
+ ''
+ );
+
+ // Depth difference = 1
+ t.expectRelativeQueryString(
+ new TestQueryMultiFile('a', []), //
+ new TestQueryMultiFile('a', ['b']), //
+ ':b,*'
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiFile('a', ['b']), //
+ new TestQueryMultiFile('a', ['b', 'c']), //
+ ',c,*'
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiFile('a', ['b', 'c']), //
+ new TestQueryMultiTest('a', ['b', 'c'], []), //
+ ':*'
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiTest('a', ['b', 'c'], []), //
+ new TestQueryMultiTest('a', ['b', 'c'], ['d']), //
+ ':d,*'
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiTest('a', ['b', 'c'], ['d']), //
+ new TestQueryMultiTest('a', ['b', 'c'], ['d', 'e']), //
+ ',e,*'
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiTest('a', ['b', 'c'], ['d', 'e']), //
+ new TestQueryMultiCase('a', ['b', 'c'], ['d', 'e'], {}), //
+ ':*'
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiCase('a', ['b', 'c'], ['d', 'e'], {}), //
+ new TestQueryMultiCase('a', ['b', 'c'], ['d', 'e'], { f: 0 }), //
+ ':f=0;*'
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiCase('a', ['b', 'c'], ['d', 'e'], { f: 0 }), //
+ new TestQueryMultiCase('a', ['b', 'c'], ['d', 'e'], { f: 0, g: 1 }), //
+ ';g=1;*'
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiCase('a', ['b', 'c'], ['d', 'e'], { f: 0, g: 1 }), //
+ new TestQuerySingleCase('a', ['b', 'c'], ['d', 'e'], { f: 0, g: 1 }), //
+ ''
+ );
+
+ // Depth difference = 2
+ t.expectRelativeQueryString(
+ new TestQueryMultiFile('a', []), //
+ new TestQueryMultiFile('a', ['b', 'c']), //
+ ':b,c,*'
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiFile('a', ['b', 'c']), //
+ new TestQueryMultiTest('a', ['b', 'c'], ['d']), //
+ ':d,*'
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiTest('a', ['b', 'c'], ['d']), //
+ new TestQueryMultiCase('a', ['b', 'c'], ['d', 'e'], {}), //
+ ',e:*'
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiCase('a', ['b', 'c'], ['d', 'e'], {}), //
+ new TestQueryMultiCase('a', ['b', 'c'], ['d', 'e'], { f: 0, g: 1 }), //
+ ':f=0;g=1;*'
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiCase('a', ['b', 'c'], ['d', 'e'], { f: 0, g: 1 }), //
+ new TestQuerySingleCase('a', ['b', 'c'], ['d', 'e'], { f: 0, g: 1, h: 2 }), //
+ ';h=2'
+ );
+ // Depth difference = 2
+ t.expectRelativeQueryString(
+ new TestQueryMultiFile('a', ['b']), //
+ new TestQueryMultiTest('a', ['b', 'c'], []), //
+ ',c:*'
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiTest('a', ['b', 'c'], []), //
+ new TestQueryMultiTest('a', ['b', 'c'], ['d', 'e']), //
+ ':d,e,*'
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiTest('a', ['b', 'c'], ['d', 'e']), //
+ new TestQueryMultiCase('a', ['b', 'c'], ['d', 'e'], { f: 0 }), //
+ ':f=0;*'
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiCase('a', ['b', 'c'], ['d', 'e'], { f: 0 }), //
+ new TestQuerySingleCase('a', ['b', 'c'], ['d', 'e'], { f: 0, g: 1 }), //
+ ';g=1'
+ );
+
+ // Depth difference = 4
+ t.expectRelativeQueryString(
+ new TestQueryMultiFile('a', []), //
+ new TestQueryMultiTest('a', ['b', 'c'], ['d']), //
+ ':b,c:d,*'
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiTest('a', ['b', 'c'], ['d']), //
+ new TestQueryMultiCase('a', ['b', 'c'], ['d', 'e'], { f: 0, g: 1 }), //
+ ',e:f=0;g=1;*'
+ );
+ // Depth difference = 4
+ t.expectRelativeQueryString(
+ new TestQueryMultiFile('a', ['b']), //
+ new TestQueryMultiTest('a', ['b', 'c'], ['d', 'e']), //
+ ',c:d,e,*'
+ );
+ t.expectRelativeQueryString(
+ new TestQueryMultiTest('a', ['b', 'c'], ['d', 'e']), //
+ new TestQuerySingleCase('a', ['b', 'c'], ['d', 'e'], { f: 0, g: 1 }), //
+ ':f=0;g=1'
+ );
+});
+
+g.test('relativeQueryString,unrelated').fn(t => {
+ t.shouldThrow('Error', () => {
+ relativeQueryString(
+ new TestQueryMultiFile('a', ['b', 'x']), //
+ new TestQueryMultiFile('a', ['b', 'c']) //
+ );
+ });
+ t.shouldThrow('Error', () => {
+ relativeQueryString(
+ new TestQueryMultiTest('a', ['b', 'c'], ['d', 'x']), //
+ new TestQueryMultiTest('a', ['b', 'c'], ['d', 'e']) //
+ );
+ });
+ t.shouldThrow('Error', () => {
+ relativeQueryString(
+ new TestQueryMultiCase('a', ['b', 'c'], ['d', 'e'], { f: 0 }), //
+ new TestQueryMultiCase('a', ['b', 'c'], ['d', 'e'], { f: 1 }) //
+ );
+ });
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/serialization.spec.ts b/dom/webgpu/tests/cts/checkout/src/unittests/serialization.spec.ts
new file mode 100644
index 0000000000..9717ba3ecf
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/serialization.spec.ts
@@ -0,0 +1,413 @@
+export const description = `Unit tests for data cache serialization`;
+
+import { getIsBuildingDataCache, setIsBuildingDataCache } from '../common/framework/data_cache.js';
+import { makeTestGroup } from '../common/internal/test_group.js';
+import { objectEquals } from '../common/util/util.js';
+import {
+ deserializeExpectation,
+ serializeExpectation,
+} from '../webgpu/shader/execution/expression/case_cache.js';
+import BinaryStream from '../webgpu/util/binary_stream.js';
+import {
+ anyOf,
+ deserializeComparator,
+ serializeComparator,
+ skipUndefined,
+} from '../webgpu/util/compare.js';
+import { kValue } from '../webgpu/util/constants.js';
+import {
+ bool,
+ deserializeValue,
+ f16,
+ f32,
+ i16,
+ i32,
+ i8,
+ serializeValue,
+ toMatrix,
+ u16,
+ u32,
+ u8,
+ vec2,
+ vec3,
+ vec4,
+} from '../webgpu/util/conversion.js';
+import { deserializeFPInterval, FP, serializeFPInterval } from '../webgpu/util/floating_point.js';
+
+import { UnitTest } from './unit_test.js';
+
+export const g = makeTestGroup(UnitTest);
+
+g.test('value').fn(t => {
+ for (const value of [
+ u32(kValue.u32.min + 0),
+ u32(kValue.u32.min + 1),
+ u32(kValue.u32.min + 2),
+ u32(kValue.u32.max - 2),
+ u32(kValue.u32.max - 1),
+ u32(kValue.u32.max - 0),
+
+ u16(kValue.u16.min + 0),
+ u16(kValue.u16.min + 1),
+ u16(kValue.u16.min + 2),
+ u16(kValue.u16.max - 2),
+ u16(kValue.u16.max - 1),
+ u16(kValue.u16.max - 0),
+
+ u8(kValue.u8.min + 0),
+ u8(kValue.u8.min + 1),
+ u8(kValue.u8.min + 2),
+ u8(kValue.u8.max - 2),
+ u8(kValue.u8.max - 1),
+ u8(kValue.u8.max - 0),
+
+ i32(kValue.i32.negative.min + 0),
+ i32(kValue.i32.negative.min + 1),
+ i32(kValue.i32.negative.min + 2),
+ i32(kValue.i32.negative.max - 2),
+ i32(kValue.i32.negative.max - 1),
+ i32(kValue.i32.positive.min - 0),
+ i32(kValue.i32.positive.min + 1),
+ i32(kValue.i32.positive.min + 2),
+ i32(kValue.i32.positive.max - 2),
+ i32(kValue.i32.positive.max - 1),
+ i32(kValue.i32.positive.max - 0),
+
+ i16(kValue.i16.negative.min + 0),
+ i16(kValue.i16.negative.min + 1),
+ i16(kValue.i16.negative.min + 2),
+ i16(kValue.i16.negative.max - 2),
+ i16(kValue.i16.negative.max - 1),
+ i16(kValue.i16.positive.min + 0),
+ i16(kValue.i16.positive.min + 1),
+ i16(kValue.i16.positive.min + 2),
+ i16(kValue.i16.positive.max - 2),
+ i16(kValue.i16.positive.max - 1),
+ i16(kValue.i16.positive.max - 0),
+
+ i8(kValue.i8.negative.min + 0),
+ i8(kValue.i8.negative.min + 1),
+ i8(kValue.i8.negative.min + 2),
+ i8(kValue.i8.negative.max - 2),
+ i8(kValue.i8.negative.max - 1),
+ i8(kValue.i8.positive.min + 0),
+ i8(kValue.i8.positive.min + 1),
+ i8(kValue.i8.positive.min + 2),
+ i8(kValue.i8.positive.max - 2),
+ i8(kValue.i8.positive.max - 1),
+ i8(kValue.i8.positive.max - 0),
+
+ f32(0),
+ f32(-0),
+ f32(1),
+ f32(-1),
+ f32(0.5),
+ f32(-0.5),
+ f32(kValue.f32.positive.max),
+ f32(kValue.f32.positive.min),
+ f32(kValue.f32.positive.subnormal.max),
+ f32(kValue.f32.positive.subnormal.min),
+ f32(kValue.f32.negative.subnormal.max),
+ f32(kValue.f32.negative.subnormal.min),
+ f32(kValue.f32.positive.infinity),
+ f32(kValue.f32.negative.infinity),
+
+ f16(0),
+ f16(-0),
+ f16(1),
+ f16(-1),
+ f16(0.5),
+ f16(-0.5),
+ f16(kValue.f16.positive.max),
+ f16(kValue.f16.positive.min),
+ f16(kValue.f16.positive.subnormal.max),
+ f16(kValue.f16.positive.subnormal.min),
+ f16(kValue.f16.negative.subnormal.max),
+ f16(kValue.f16.negative.subnormal.min),
+ f16(kValue.f16.positive.infinity),
+ f16(kValue.f16.negative.infinity),
+
+ bool(true),
+ bool(false),
+
+ vec2(f32(1), f32(2)),
+ vec3(u32(1), u32(2), u32(3)),
+ vec4(bool(false), bool(true), bool(false), bool(true)),
+
+ toMatrix(
+ [
+ [0.0, 1.0],
+ [2.0, 3.0],
+ ],
+ f32
+ ),
+ toMatrix(
+ [
+ [0.0, 1.0, 2.0],
+ [3.0, 4.0, 5.0],
+ ],
+ f16
+ ),
+ toMatrix(
+ [
+ [0.0, 1.0, 2.0, 3.0],
+ [4.0, 5.0, 6.0, 7.0],
+ ],
+ f32
+ ),
+ toMatrix(
+ [
+ [0.0, 1.0],
+ [2.0, 3.0],
+ [4.0, 5.0],
+ ],
+ f16
+ ),
+ toMatrix(
+ [
+ [0.0, 1.0, 2.0],
+ [3.0, 4.0, 5.0],
+ [6.0, 7.0, 8.0],
+ ],
+ f32
+ ),
+ toMatrix(
+ [
+ [0.0, 1.0, 2.0, 3.0],
+ [4.0, 5.0, 6.0, 7.0],
+ [8.0, 9.0, 10.0, 11.0],
+ ],
+ f16
+ ),
+ toMatrix(
+ [
+ [0.0, 1.0],
+ [2.0, 3.0],
+ [4.0, 5.0],
+ [6.0, 7.0],
+ ],
+ f32
+ ),
+ toMatrix(
+ [
+ [0.0, 1.0, 2.0],
+ [3.0, 4.0, 5.0],
+ [6.0, 7.0, 8.0],
+ [9.0, 10.0, 11.0],
+ ],
+ f16
+ ),
+ toMatrix(
+ [
+ [0.0, 1.0, 2.0, 3.0],
+ [4.0, 5.0, 6.0, 7.0],
+ [8.0, 9.0, 10.0, 11.0],
+ [12.0, 13.0, 14.0, 15.0],
+ ],
+ f32
+ ),
+ ]) {
+ const s = new BinaryStream(new Uint8Array(1024).buffer);
+ serializeValue(s, value);
+ const d = new BinaryStream(s.buffer().buffer);
+ const deserialized = deserializeValue(d);
+ t.expect(
+ objectEquals(value, deserialized),
+ `${value.type} ${value} -> serialize -> deserialize -> ${deserialized}
+buffer: ${s.buffer()}`
+ );
+ }
+});
+
+g.test('fpinterval_f32').fn(t => {
+ for (const interval of [
+ FP.f32.toInterval(0),
+ FP.f32.toInterval(-0),
+ FP.f32.toInterval(1),
+ FP.f32.toInterval(-1),
+ FP.f32.toInterval(0.5),
+ FP.f32.toInterval(-0.5),
+ FP.f32.toInterval(kValue.f32.positive.max),
+ FP.f32.toInterval(kValue.f32.positive.min),
+ FP.f32.toInterval(kValue.f32.positive.subnormal.max),
+ FP.f32.toInterval(kValue.f32.positive.subnormal.min),
+ FP.f32.toInterval(kValue.f32.negative.subnormal.max),
+ FP.f32.toInterval(kValue.f32.negative.subnormal.min),
+ FP.f32.toInterval(kValue.f32.positive.infinity),
+ FP.f32.toInterval(kValue.f32.negative.infinity),
+
+ FP.f32.toInterval([-0, 0]),
+ FP.f32.toInterval([-1, 1]),
+ FP.f32.toInterval([-0.5, 0.5]),
+ FP.f32.toInterval([kValue.f32.positive.min, kValue.f32.positive.max]),
+ FP.f32.toInterval([kValue.f32.positive.subnormal.min, kValue.f32.positive.subnormal.max]),
+ FP.f32.toInterval([kValue.f32.negative.subnormal.min, kValue.f32.negative.subnormal.max]),
+ FP.f32.toInterval([kValue.f32.negative.infinity, kValue.f32.positive.infinity]),
+ ]) {
+ const s = new BinaryStream(new Uint8Array(1024).buffer);
+ serializeFPInterval(s, interval);
+ const d = new BinaryStream(s.buffer().buffer);
+ const deserialized = deserializeFPInterval(d);
+ t.expect(
+ objectEquals(interval, deserialized),
+ `interval ${interval} -> serialize -> deserialize -> ${deserialized}`
+ );
+ }
+});
+
+g.test('fpinterval_f16').fn(t => {
+ for (const interval of [
+ FP.f16.toInterval(0),
+ FP.f16.toInterval(-0),
+ FP.f16.toInterval(1),
+ FP.f16.toInterval(-1),
+ FP.f16.toInterval(0.5),
+ FP.f16.toInterval(-0.5),
+ FP.f16.toInterval(kValue.f16.positive.max),
+ FP.f16.toInterval(kValue.f16.positive.min),
+ FP.f16.toInterval(kValue.f16.positive.subnormal.max),
+ FP.f16.toInterval(kValue.f16.positive.subnormal.min),
+ FP.f16.toInterval(kValue.f16.negative.subnormal.max),
+ FP.f16.toInterval(kValue.f16.negative.subnormal.min),
+ FP.f16.toInterval(kValue.f16.positive.infinity),
+ FP.f16.toInterval(kValue.f16.negative.infinity),
+
+ FP.f16.toInterval([-0, 0]),
+ FP.f16.toInterval([-1, 1]),
+ FP.f16.toInterval([-0.5, 0.5]),
+ FP.f16.toInterval([kValue.f16.positive.min, kValue.f16.positive.max]),
+ FP.f16.toInterval([kValue.f16.positive.subnormal.min, kValue.f16.positive.subnormal.max]),
+ FP.f16.toInterval([kValue.f16.negative.subnormal.min, kValue.f16.negative.subnormal.max]),
+ FP.f16.toInterval([kValue.f16.negative.infinity, kValue.f16.positive.infinity]),
+ ]) {
+ const s = new BinaryStream(new Uint8Array(1024).buffer);
+ serializeFPInterval(s, interval);
+ const d = new BinaryStream(s.buffer().buffer);
+ const deserialized = deserializeFPInterval(d);
+ t.expect(
+ objectEquals(interval, deserialized),
+ `interval ${interval} -> serialize -> deserialize -> ${deserialized}`
+ );
+ }
+});
+
+g.test('fpinterval_abstract').fn(t => {
+ for (const interval of [
+ FP.abstract.toInterval(0),
+ FP.abstract.toInterval(-0),
+ FP.abstract.toInterval(1),
+ FP.abstract.toInterval(-1),
+ FP.abstract.toInterval(0.5),
+ FP.abstract.toInterval(-0.5),
+ FP.abstract.toInterval(kValue.f64.positive.max),
+ FP.abstract.toInterval(kValue.f64.positive.min),
+ FP.abstract.toInterval(kValue.f64.positive.subnormal.max),
+ FP.abstract.toInterval(kValue.f64.positive.subnormal.min),
+ FP.abstract.toInterval(kValue.f64.negative.subnormal.max),
+ FP.abstract.toInterval(kValue.f64.negative.subnormal.min),
+ FP.abstract.toInterval(kValue.f64.positive.infinity),
+ FP.abstract.toInterval(kValue.f64.negative.infinity),
+
+ FP.abstract.toInterval([-0, 0]),
+ FP.abstract.toInterval([-1, 1]),
+ FP.abstract.toInterval([-0.5, 0.5]),
+ FP.abstract.toInterval([kValue.f64.positive.min, kValue.f64.positive.max]),
+ FP.abstract.toInterval([kValue.f64.positive.subnormal.min, kValue.f64.positive.subnormal.max]),
+ FP.abstract.toInterval([kValue.f64.negative.subnormal.min, kValue.f64.negative.subnormal.max]),
+ FP.abstract.toInterval([kValue.f64.negative.infinity, kValue.f64.positive.infinity]),
+ ]) {
+ const s = new BinaryStream(new Uint8Array(1024).buffer);
+ serializeFPInterval(s, interval);
+ const d = new BinaryStream(s.buffer().buffer);
+ const deserialized = deserializeFPInterval(d);
+ t.expect(
+ objectEquals(interval, deserialized),
+ `interval ${interval} -> serialize -> deserialize -> ${deserialized}`
+ );
+ }
+});
+
+g.test('expression_expectation').fn(t => {
+ for (const expectation of [
+ // Value
+ f32(123),
+ vec2(f32(1), f32(2)),
+ // Interval
+ FP.f32.toInterval([-0.5, 0.5]),
+ FP.f32.toInterval([kValue.f32.positive.min, kValue.f32.positive.max]),
+ // Intervals
+ [FP.f32.toInterval([-8.0, 0.5]), FP.f32.toInterval([2.0, 4.0])],
+ ]) {
+ const s = new BinaryStream(new Uint8Array(1024).buffer);
+ serializeExpectation(s, expectation);
+ const d = new BinaryStream(s.buffer().buffer);
+ const deserialized = deserializeExpectation(d);
+ t.expect(
+ objectEquals(expectation, deserialized),
+ `expectation ${expectation} -> serialize -> deserialize -> ${deserialized}`
+ );
+ }
+});
+
+/**
+ * Temporarily enabled building of the data cache.
+ * Required for Comparators to serialize.
+ */
+function enableBuildingDataCache(f: () => void) {
+ const wasBuildingDataCache = getIsBuildingDataCache();
+ setIsBuildingDataCache(true);
+ f();
+ setIsBuildingDataCache(wasBuildingDataCache);
+}
+
+g.test('anyOf').fn(t => {
+ enableBuildingDataCache(() => {
+ for (const c of [
+ {
+ comparator: anyOf(i32(123)),
+ testCases: [f32(0), f32(10), f32(122), f32(123), f32(124), f32(200)],
+ },
+ ]) {
+ const s = new BinaryStream(new Uint8Array(1024).buffer);
+ serializeComparator(s, c.comparator);
+ const d = new BinaryStream(s.buffer().buffer);
+ const deserialized = deserializeComparator(d);
+ for (const val of c.testCases) {
+ const got = deserialized.compare(val);
+ const expect = c.comparator.compare(val);
+ t.expect(
+ got.matched === expect.matched,
+ `comparator(${val}): got: ${expect.matched}, expect: ${got.matched}`
+ );
+ }
+ }
+ });
+});
+
+g.test('skipUndefined').fn(t => {
+ enableBuildingDataCache(() => {
+ for (const c of [
+ {
+ comparator: skipUndefined(i32(123)),
+ testCases: [f32(0), f32(10), f32(122), f32(123), f32(124), f32(200)],
+ },
+ {
+ comparator: skipUndefined(undefined),
+ testCases: [f32(0), f32(10), f32(122), f32(123), f32(124), f32(200)],
+ },
+ ]) {
+ const s = new BinaryStream(new Uint8Array(1024).buffer);
+ serializeComparator(s, c.comparator);
+ const d = new BinaryStream(s.buffer().buffer);
+ const deserialized = deserializeComparator(d);
+ for (const val of c.testCases) {
+ const got = deserialized.compare(val);
+ const expect = c.comparator.compare(val);
+ t.expect(
+ got.matched === expect.matched,
+ `comparator(${val}): got: ${expect.matched}, expect: ${got.matched}`
+ );
+ }
+ }
+ });
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/test_group.spec.ts b/dom/webgpu/tests/cts/checkout/src/unittests/test_group.spec.ts
new file mode 100644
index 0000000000..aca8d298e6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/test_group.spec.ts
@@ -0,0 +1,437 @@
+/* eslint-disable @typescript-eslint/require-await */
+export const description = `
+Unit tests for TestGroup.
+`;
+
+import { Fixture } from '../common/framework/fixture.js';
+import { makeTestGroup } from '../common/framework/test_group.js';
+import { TestQueryMultiFile } from '../common/internal/query/query.js';
+import { kQueryMaxLength, makeTestGroupForUnitTesting } from '../common/internal/test_group.js';
+import { assert } from '../common/util/util.js';
+
+import { TestGroupTest } from './test_group_test.js';
+import { UnitTest } from './unit_test.js';
+
+export const g = makeTestGroup(TestGroupTest);
+
+g.test('UnitTest_fixture').fn(async t0 => {
+ let seen = 0;
+ function count(_t: Fixture): void {
+ seen++;
+ }
+
+ const g = makeTestGroupForUnitTesting(UnitTest);
+
+ g.test('test').fn(count);
+ g.test('testp')
+ .paramsSimple([{ a: 1 }])
+ .fn(count);
+
+ await t0.run(g);
+ t0.expect(seen === 2);
+});
+
+g.test('custom_fixture').fn(async t0 => {
+ let seen = 0;
+ class Counter extends UnitTest {
+ count(): void {
+ seen++;
+ }
+ }
+
+ const g = makeTestGroupForUnitTesting(Counter);
+
+ g.test('test').fn(t => {
+ t.count();
+ });
+ g.test('testp')
+ .paramsSimple([{ a: 1 }])
+ .fn(t => {
+ t.count();
+ });
+
+ await t0.run(g);
+ t0.expect(seen === 2);
+});
+
+g.test('stack').fn(async t0 => {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+
+ const doNestedThrow1 = () => {
+ throw new Error('goodbye');
+ };
+
+ const doNestedThrow2 = () => doNestedThrow1();
+
+ g.test('fail').fn(t => {
+ t.fail();
+ });
+ g.test('throw').fn(_t => {
+ throw new Error('hello');
+ });
+ g.test('throw_nested').fn(_t => {
+ doNestedThrow2();
+ });
+
+ const res = await t0.run(g);
+
+ const search = /unittests[/\\]test_group\.spec\.[tj]s/;
+ t0.expect(res.size > 0);
+ for (const { logs } of res.values()) {
+ assert(logs !== undefined, 'expected logs');
+ t0.expect(logs.some(l => search.test(l.toJSON())));
+ t0.expect(search.test(logs[logs.length - 1].toJSON()));
+ }
+});
+
+g.test('no_fn').fn(t => {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+
+ g.test('missing');
+
+ t.shouldThrow('Error', () => {
+ g.validate(new TestQueryMultiFile('s', ['f']));
+ });
+});
+
+g.test('duplicate_test_name').fn(t => {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+ g.test('abc').fn(() => {});
+
+ t.shouldThrow('Error', () => {
+ g.test('abc').fn(() => {});
+ });
+});
+
+g.test('duplicate_test_params,none').fn(() => {
+ {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+ g.test('abc')
+ .paramsSimple([])
+ .fn(() => {});
+ g.validate(new TestQueryMultiFile('s', ['f']));
+ }
+
+ {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+ g.test('abc').fn(() => {});
+ g.validate(new TestQueryMultiFile('s', ['f']));
+ }
+
+ {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+ g.test('abc')
+ .paramsSimple([
+ { a: 1 }, //
+ ])
+ .fn(() => {});
+ g.validate(new TestQueryMultiFile('s', ['f']));
+ }
+});
+
+g.test('duplicate_test_params,basic').fn(t => {
+ {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+ const builder = g.test('abc');
+ t.shouldThrow('Error', () => {
+ builder.paramsSimple([
+ { a: 1 }, //
+ { a: 1 },
+ ]);
+ g.validate(new TestQueryMultiFile('s', ['f']));
+ });
+ }
+ {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+ g.test('abc')
+ .params(u =>
+ u.expandWithParams(() => [
+ { a: 1 }, //
+ { a: 1 },
+ ])
+ )
+ .fn(() => {});
+ t.shouldThrow('Error', () => {
+ g.validate(new TestQueryMultiFile('s', ['f']));
+ });
+ }
+ {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+ g.test('abc')
+ .paramsSimple([
+ { a: 1, b: 3 }, //
+ { b: 3, a: 1 },
+ ])
+ .fn(() => {});
+ t.shouldThrow('Error', () => {
+ g.validate(new TestQueryMultiFile('s', ['f']));
+ });
+ }
+});
+
+g.test('duplicate_test_params,with_different_private_params').fn(t => {
+ {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+ const builder = g.test('abc');
+ t.shouldThrow('Error', () => {
+ builder.paramsSimple([
+ { a: 1, _b: 1 }, //
+ { a: 1, _b: 2 },
+ ]);
+ });
+ }
+ {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+ g.test('abc')
+ .params(u =>
+ u.expandWithParams(() => [
+ { a: 1, _b: 1 }, //
+ { a: 1, _b: 2 },
+ ])
+ )
+ .fn(() => {});
+ t.shouldThrow('Error', () => {
+ g.validate(new TestQueryMultiFile('s', ['f']));
+ });
+ }
+});
+
+g.test('invalid_test_name').fn(t => {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+
+ const badChars = Array.from('"`~@#$+=\\|!^&*[]<>{}-\'. ');
+ for (const char of badChars) {
+ const name = 'a' + char + 'b';
+ t.shouldThrow(
+ 'Error',
+ () => {
+ g.test(name).fn(() => {});
+ },
+ { message: name }
+ );
+ }
+});
+
+g.test('long_test_query,long_test_name').fn(t => {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+
+ const long = Array(kQueryMaxLength - 5).join('a');
+
+ const fileQuery = new TestQueryMultiFile('s', ['f']);
+ g.test(long).unimplemented();
+ g.validate(fileQuery);
+
+ g.test(long + 'a').unimplemented();
+ t.shouldThrow(
+ 'Error',
+ () => {
+ g.validate(fileQuery);
+ },
+ { message: long }
+ );
+});
+
+g.test('long_case_query,long_test_name').fn(t => {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+
+ const long = Array(kQueryMaxLength - 5).join('a');
+
+ const fileQuery = new TestQueryMultiFile('s', ['f']);
+ g.test(long).fn(() => {});
+ g.validate(fileQuery);
+
+ g.test(long + 'a').fn(() => {});
+ t.shouldThrow(
+ 'Error',
+ () => {
+ g.validate(fileQuery);
+ },
+ { message: long }
+ );
+});
+
+g.test('long_case_query,long_case_name').fn(t => {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+
+ const long = Array(kQueryMaxLength - 9).join('a');
+
+ const fileQuery = new TestQueryMultiFile('s', ['f']);
+ g.test('t')
+ .paramsSimple([{ x: long }])
+ .fn(() => {});
+ g.validate(fileQuery);
+
+ g.test('u')
+ .paramsSimple([{ x: long + 'a' }])
+ .fn(() => {});
+ t.shouldThrow(
+ 'Error',
+ () => {
+ g.validate(fileQuery);
+ },
+ { message: long }
+ );
+});
+
+g.test('param_value,valid').fn(() => {
+ const g = makeTestGroup(UnitTest);
+ g.test('a').paramsSimple([{ x: JSON.stringify({ a: 1, b: 2 }) }]);
+});
+
+g.test('param_value,invalid').fn(t => {
+ for (const badChar of ';=*') {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+ const builder = g.test('a');
+ t.shouldThrow('Error', () => {
+ builder.paramsSimple([{ badChar }]);
+ });
+ }
+});
+
+g.test('subcases').fn(async t0 => {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+ g.test('a')
+ .paramsSubcasesOnly(u =>
+ u //
+ .combineWithParams([{ a: 1 }])
+ )
+ .fn(t => {
+ t.expect(t.params.a === 1, 'a must be 1');
+ });
+
+ function* gen({ a, b }: { a?: number; b?: number }) {
+ if (b === 2) {
+ yield { ret: 2 };
+ } else if (a === 1) {
+ yield { ret: 1 };
+ } else {
+ yield { ret: -1 };
+ }
+ }
+ g.test('b')
+ .params(u =>
+ u
+ .combineWithParams([{ a: 1 }, { b: 2 }])
+ .beginSubcases()
+ .expandWithParams(gen)
+ )
+ .fn(t => {
+ const { a, b, ret } = t.params;
+ t.expect((a === 1 && ret === 1) || (b === 2 && ret === 2));
+ });
+
+ const result = await t0.run(g);
+ t0.expect(Array.from(result.values()).every(v => v.status === 'pass'));
+});
+
+g.test('subcases,skip')
+ .desc(
+ 'If all tests are skipped then status is "skip". If at least one test passed, status is "pass"'
+ )
+ .params(u => u.combine('allSkip', [false, true]))
+ .fn(async t0 => {
+ const { allSkip } = t0.params;
+ const g = makeTestGroupForUnitTesting(UnitTest);
+ g.test('a')
+ .params(u => u.beginSubcases().combine('do', ['pass', 'skip', 'pass']))
+ .fn(t => {
+ t.skipIf(allSkip || t.params.do === 'skip');
+ });
+ const result = await t0.run(g);
+ const values = Array.from(result.values());
+ t0.expect(values.length === 1);
+ const expectedStatus = allSkip ? 'skip' : 'pass';
+ t0.expect(
+ values[0].status === expectedStatus,
+ `expect: ${values[0].status} === ${expectedStatus}}, allSkip: ${allSkip}`
+ );
+ });
+
+g.test('exceptions')
+ .params(u =>
+ u
+ .combine('useSubcases', [false, true]) //
+ .combine('useDOMException', [false, true])
+ )
+ .fn(async t0 => {
+ const { useSubcases, useDOMException } = t0.params;
+ const g = makeTestGroupForUnitTesting(UnitTest);
+
+ const b1 = g.test('a');
+ let b2;
+ if (useSubcases) {
+ b2 = b1.paramsSubcasesOnly(u => u);
+ } else {
+ b2 = b1.params(u => u);
+ }
+ b2.fn(_t => {
+ if (useDOMException) {
+ throw new DOMException('Message!', 'Name!');
+ } else {
+ throw new Error('Message!');
+ }
+ });
+
+ const result = await t0.run(g);
+ const values = Array.from(result.values());
+ t0.expect(values.length === 1);
+ t0.expect(values[0].status === 'fail');
+ });
+
+g.test('throws').fn(async t0 => {
+ const g = makeTestGroupForUnitTesting(UnitTest);
+
+ g.test('a').fn(_t => {
+ throw new Error();
+ });
+
+ const result = await t0.run(g);
+ const values = Array.from(result.values());
+ t0.expect(values.length === 1);
+ t0.expect(values[0].status === 'fail');
+});
+
+g.test('shouldThrow').fn(async t0 => {
+ t0.shouldThrow('TypeError', () => {
+ throw new TypeError();
+ });
+
+ const g = makeTestGroupForUnitTesting(UnitTest);
+
+ g.test('a').fn(t => {
+ t.shouldThrow('Error', () => {
+ throw new TypeError();
+ });
+ });
+
+ const result = await t0.run(g);
+ const values = Array.from(result.values());
+ t0.expect(values.length === 1);
+ t0.expect(values[0].status === 'fail');
+});
+
+g.test('shouldReject').fn(async t0 => {
+ t0.shouldReject(
+ 'TypeError',
+ (async () => {
+ throw new TypeError();
+ })()
+ );
+
+ const g = makeTestGroupForUnitTesting(UnitTest);
+
+ g.test('a').fn(t => {
+ t.shouldReject(
+ 'Error',
+ (async () => {
+ throw new TypeError();
+ })()
+ );
+ });
+
+ const result = await t0.run(g);
+ // Fails even though shouldReject doesn't fail until after the test function ends
+ const values = Array.from(result.values());
+ t0.expect(values.length === 1);
+ t0.expect(values[0].status === 'fail');
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/test_group_test.ts b/dom/webgpu/tests/cts/checkout/src/unittests/test_group_test.ts
new file mode 100644
index 0000000000..5fdc02177b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/test_group_test.ts
@@ -0,0 +1,34 @@
+import { Logger, LogResults } from '../common/internal/logging/logger.js';
+import { TestQuerySingleCase } from '../common/internal/query/query.js';
+import { IterableTestGroup, TestCaseID } from '../common/internal/test_group.js';
+import { objectEquals } from '../common/util/util.js';
+
+import { UnitTest } from './unit_test.js';
+
+export class TestGroupTest extends UnitTest {
+ async run(g: IterableTestGroup): Promise<LogResults> {
+ const logger = new Logger({ overrideDebugMode: true });
+ for (const t of g.iterate()) {
+ for (const rc of t.iterate(null)) {
+ const query = new TestQuerySingleCase('xx', ['yy'], rc.id.test, rc.id.params);
+ const [rec] = logger.record(query.toString());
+ await rc.run(rec, query, []);
+ }
+ }
+ return logger.results;
+ }
+
+ expectCases(g: IterableTestGroup, cases: TestCaseID[]): void {
+ const gcases = [];
+ for (const t of g.iterate()) {
+ gcases.push(...Array.from(t.iterate(null), c => c.id));
+ }
+ this.expect(
+ objectEquals(gcases, cases),
+ `expected
+ ${JSON.stringify(cases)}
+got
+ ${JSON.stringify(gcases)}`
+ );
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/test_query.spec.ts b/dom/webgpu/tests/cts/checkout/src/unittests/test_query.spec.ts
new file mode 100644
index 0000000000..4a744c49e9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/test_query.spec.ts
@@ -0,0 +1,143 @@
+export const description = `
+Tests for TestQuery
+`;
+
+import { makeTestGroup } from '../common/framework/test_group.js';
+import { parseQuery } from '../common/internal/query/parseQuery.js';
+import {
+ TestQueryMultiFile,
+ TestQueryMultiTest,
+ TestQueryMultiCase,
+ TestQuerySingleCase,
+ TestQuery,
+} from '../common/internal/query/query.js';
+
+import { UnitTest } from './unit_test.js';
+
+class F extends UnitTest {
+ expectToString(q: TestQuery, exp: string) {
+ this.expect(q.toString() === exp);
+ }
+
+ expectQueriesEqual(q1: TestQuery, q2: TestQuery) {
+ this.expect(q1.level === q2.level);
+
+ if (q1.level >= 1) {
+ this.expect(q1.isMultiFile === q2.isMultiFile);
+ this.expect(q1.suite === q2.suite);
+ this.expect(q1.filePathParts.length === q2.filePathParts.length);
+ for (let i = 0; i < q1.filePathParts.length; i++) {
+ this.expect(q1.filePathParts[i] === q2.filePathParts[i]);
+ }
+ }
+
+ if (q1.level >= 2) {
+ const p1 = q1 as TestQueryMultiTest;
+ const p2 = q2 as TestQueryMultiTest;
+
+ this.expect(p1.isMultiTest === p2.isMultiTest);
+ this.expect(p1.testPathParts.length === p2.testPathParts.length);
+ for (let i = 0; i < p1.testPathParts.length; i++) {
+ this.expect(p1.testPathParts[i] === p2.testPathParts[i]);
+ }
+ }
+
+ if (q1.level >= 3) {
+ const p1 = q1 as TestQueryMultiCase;
+ const p2 = q2 as TestQueryMultiCase;
+
+ this.expect(p1.isMultiCase === p2.isMultiCase);
+ this.expect(Object.keys(p1.params).length === Object.keys(p2.params).length);
+ for (const key of Object.keys(p1.params)) {
+ this.expect(key in p2.params);
+ const v1 = p1.params[key];
+ const v2 = p2.params[key];
+ this.expect(
+ v1 === v2 ||
+ (typeof v1 === 'number' && isNaN(v1)) === (typeof v2 === 'number' && isNaN(v2))
+ );
+ this.expect(Object.is(v1, -0) === Object.is(v2, -0));
+ }
+ }
+ }
+
+ expectQueryParse(s: string, q: TestQuery) {
+ this.expectQueriesEqual(q, parseQuery(s));
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('constructor').fn(t => {
+ t.shouldThrow('Error', () => new TestQueryMultiTest('suite', [], []));
+
+ t.shouldThrow('Error', () => new TestQueryMultiCase('suite', ['a'], [], {}));
+ t.shouldThrow('Error', () => new TestQueryMultiCase('suite', [], ['c'], {}));
+ t.shouldThrow('Error', () => new TestQueryMultiCase('suite', [], [], {}));
+
+ t.shouldThrow('Error', () => new TestQuerySingleCase('suite', ['a'], [], {}));
+ t.shouldThrow('Error', () => new TestQuerySingleCase('suite', [], ['c'], {}));
+ t.shouldThrow('Error', () => new TestQuerySingleCase('suite', [], [], {}));
+});
+
+g.test('toString').fn(t => {
+ t.expectToString(new TestQueryMultiFile('s', []), 's:*');
+ t.expectToString(new TestQueryMultiFile('s', ['a']), 's:a,*');
+ t.expectToString(new TestQueryMultiFile('s', ['a', 'b']), 's:a,b,*');
+ t.expectToString(new TestQueryMultiTest('s', ['a', 'b'], []), 's:a,b:*');
+ t.expectToString(new TestQueryMultiTest('s', ['a', 'b'], ['c']), 's:a,b:c,*');
+ t.expectToString(new TestQueryMultiTest('s', ['a', 'b'], ['c', 'd']), 's:a,b:c,d,*');
+ t.expectToString(new TestQueryMultiCase('s', ['a', 'b'], ['c', 'd'], {}), 's:a,b:c,d:*');
+ t.expectToString(
+ new TestQueryMultiCase('s', ['a', 'b'], ['c', 'd'], { x: 1 }),
+ 's:a,b:c,d:x=1;*'
+ );
+ t.expectToString(
+ new TestQueryMultiCase('s', ['a', 'b'], ['c', 'd'], { x: 1, y: 2 }),
+ 's:a,b:c,d:x=1;y=2;*'
+ );
+ t.expectToString(
+ new TestQuerySingleCase('s', ['a', 'b'], ['c', 'd'], { x: 1, y: 2 }),
+ 's:a,b:c,d:x=1;y=2'
+ );
+ t.expectToString(new TestQuerySingleCase('s', ['a', 'b'], ['c', 'd'], {}), 's:a,b:c,d:');
+
+ // Test handling of magic param value that convert to NaN/undefined/Infinity/etc.
+ t.expectToString(new TestQuerySingleCase('s', ['a'], ['b'], { c: NaN }), 's:a:b:c="_nan_"');
+ t.expectToString(
+ new TestQuerySingleCase('s', ['a'], ['b'], { c: undefined }),
+ 's:a:b:c="_undef_"'
+ );
+ t.expectToString(new TestQuerySingleCase('s', ['a'], ['b'], { c: -0 }), 's:a:b:c="_negzero_"');
+});
+
+g.test('parseQuery').fn(t => {
+ t.expectQueryParse('s:*', new TestQueryMultiFile('s', []));
+ t.expectQueryParse('s:a,*', new TestQueryMultiFile('s', ['a']));
+ t.expectQueryParse('s:a,b,*', new TestQueryMultiFile('s', ['a', 'b']));
+ t.expectQueryParse('s:a,b:*', new TestQueryMultiTest('s', ['a', 'b'], []));
+ t.expectQueryParse('s:a,b:c,*', new TestQueryMultiTest('s', ['a', 'b'], ['c']));
+ t.expectQueryParse('s:a,b:c,d,*', new TestQueryMultiTest('s', ['a', 'b'], ['c', 'd']));
+ t.expectQueryParse('s:a,b:c,d:*', new TestQueryMultiCase('s', ['a', 'b'], ['c', 'd'], {}));
+ t.expectQueryParse(
+ 's:a,b:c,d:x=1;*',
+ new TestQueryMultiCase('s', ['a', 'b'], ['c', 'd'], { x: 1 })
+ );
+ t.expectQueryParse(
+ 's:a,b:c,d:x=1;y=2;*',
+ new TestQueryMultiCase('s', ['a', 'b'], ['c', 'd'], { x: 1, y: 2 })
+ );
+ t.expectQueryParse(
+ 's:a,b:c,d:x=1;y=2',
+ new TestQuerySingleCase('s', ['a', 'b'], ['c', 'd'], { x: 1, y: 2 })
+ );
+ t.expectQueryParse('s:a,b:c,d:', new TestQuerySingleCase('s', ['a', 'b'], ['c', 'd'], {}));
+
+ // Test handling of magic param value that convert to NaN/undefined/Infinity/etc.
+ t.expectQueryParse('s:a:b:c="_nan_"', new TestQuerySingleCase('s', ['a'], ['b'], { c: NaN }));
+ t.expectQueryParse(
+ 's:a:b:c="_undef_"',
+ new TestQuerySingleCase('s', ['a'], ['b'], { c: undefined })
+ );
+ t.expectQueryParse('s:a:b:c="_negzero_"', new TestQuerySingleCase('s', ['a'], ['b'], { c: -0 }));
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/texture_ok.spec.ts b/dom/webgpu/tests/cts/checkout/src/unittests/texture_ok.spec.ts
new file mode 100644
index 0000000000..f1e6971a74
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/texture_ok.spec.ts
@@ -0,0 +1,161 @@
+export const description = `
+Test for texture_ok utils.
+`;
+
+import { makeTestGroup } from '../common/framework/test_group.js';
+import { typedArrayFromParam, typedArrayParam } from '../common/util/util.js';
+import { RegularTextureFormat } from '../webgpu/format_info.js';
+import { TexelView } from '../webgpu/util/texture/texel_view.js';
+import { findFailedPixels } from '../webgpu/util/texture/texture_ok.js';
+
+import { UnitTest } from './unit_test.js';
+
+class F extends UnitTest {
+ test(act: string, exp: string): void {
+ this.expect(act === exp, 'got: ' + act.replace('\n', '⏎'));
+ }
+}
+
+export const g = makeTestGroup(F);
+g.test('findFailedPixels')
+ .desc(
+ `
+ Test findFailedPixels passes what is expected to pass and fails what is expected
+ to fail. For example NaN === NaN should be true in a texture that allows NaN.
+ 2 different representations of the same rgb9e5ufloat should compare as equal.
+ etc...
+ `
+ )
+ .params(u =>
+ u.combineWithParams([
+ // Sanity Check
+ {
+ format: 'rgba8unorm' as RegularTextureFormat,
+ actual: typedArrayParam('Uint8Array', [0x00, 0x40, 0x80, 0xff]),
+ expected: typedArrayParam('Uint8Array', [0x00, 0x40, 0x80, 0xff]),
+ isSame: true,
+ },
+ // Slightly different values
+ {
+ format: 'rgba8unorm' as RegularTextureFormat,
+ actual: typedArrayParam('Uint8Array', [0x00, 0x40, 0x80, 0xff]),
+ expected: typedArrayParam('Uint8Array', [0x00, 0x40, 0x81, 0xff]),
+ isSame: false,
+ },
+ // Different representations of the same value
+ {
+ format: 'rgb9e5ufloat' as RegularTextureFormat,
+ actual: typedArrayParam('Uint8Array', [0x78, 0x56, 0x34, 0x12]),
+ expected: typedArrayParam('Uint8Array', [0xf0, 0xac, 0x68, 0x0c]),
+ isSame: true,
+ },
+ // Slightly different values
+ {
+ format: 'rgb9e5ufloat' as RegularTextureFormat,
+ actual: typedArrayParam('Uint8Array', [0x78, 0x56, 0x34, 0x12]),
+ expected: typedArrayParam('Uint8Array', [0xf1, 0xac, 0x68, 0x0c]),
+ isSame: false,
+ },
+ // Test NaN === NaN
+ {
+ format: 'r32float' as RegularTextureFormat,
+ actual: typedArrayParam('Float32Array', [parseFloat('abc')]),
+ expected: typedArrayParam('Float32Array', [parseFloat('def')]),
+ isSame: true,
+ },
+ // Sanity Check
+ {
+ format: 'r32float' as RegularTextureFormat,
+ actual: typedArrayParam('Float32Array', [1.23]),
+ expected: typedArrayParam('Float32Array', [1.23]),
+ isSame: true,
+ },
+ // Slightly different values.
+ {
+ format: 'r32float' as RegularTextureFormat,
+ actual: typedArrayParam('Uint32Array', [0x3f9d70a4]),
+ expected: typedArrayParam('Uint32Array', [0x3f9d70a5]),
+ isSame: false,
+ },
+ // Slightly different
+ {
+ format: 'rg11b10ufloat' as RegularTextureFormat,
+ actual: typedArrayParam('Uint32Array', [0x3ce]),
+ expected: typedArrayParam('Uint32Array', [0x3cf]),
+ isSame: false,
+ },
+ // Positive.Infinity === Positive.Infinity (red)
+ {
+ format: 'rg11b10ufloat' as RegularTextureFormat,
+ actual: typedArrayParam('Uint32Array', [0b11111000000]),
+ expected: typedArrayParam('Uint32Array', [0b11111000000]),
+ isSame: true,
+ },
+ // Positive.Infinity === Positive.Infinity (green)
+ {
+ format: 'rg11b10ufloat' as RegularTextureFormat,
+ actual: typedArrayParam('Uint32Array', [0b11111000000_00000000000]),
+ expected: typedArrayParam('Uint32Array', [0b11111000000_00000000000]),
+ isSame: true,
+ },
+ // Positive.Infinity === Positive.Infinity (blue)
+ {
+ format: 'rg11b10ufloat' as RegularTextureFormat,
+ actual: typedArrayParam('Uint32Array', [0b1111100000_00000000000_00000000000]),
+ expected: typedArrayParam('Uint32Array', [0b1111100000_00000000000_00000000000]),
+ isSame: true,
+ },
+ // NaN === NaN (red)
+ {
+ format: 'rg11b10ufloat' as RegularTextureFormat,
+ actual: typedArrayParam('Uint32Array', [0b11111000001]),
+ expected: typedArrayParam('Uint32Array', [0b11111000010]),
+ isSame: true,
+ },
+ // NaN === NaN (green)
+ {
+ format: 'rg11b10ufloat' as RegularTextureFormat,
+ actual: typedArrayParam('Uint32Array', [0b11111000100_00000000000]),
+ expected: typedArrayParam('Uint32Array', [0b11111001000_00000000000]),
+ isSame: true,
+ },
+ // NaN === NaN (blue)
+ {
+ format: 'rg11b10ufloat' as RegularTextureFormat,
+ actual: typedArrayParam('Uint32Array', [0b1111110000_00000000000_00000000000]),
+ expected: typedArrayParam('Uint32Array', [0b1111101000_00000000000_00000000000]),
+ isSame: true,
+ },
+ ])
+ )
+ .fn(t => {
+ const { format, actual, expected, isSame } = t.params;
+ const actualData = new Uint8Array(typedArrayFromParam(actual).buffer);
+ const expectedData = new Uint8Array(typedArrayFromParam(expected).buffer);
+
+ const actTexelView = TexelView.fromTextureDataByReference(format, actualData, {
+ bytesPerRow: actualData.byteLength,
+ rowsPerImage: 1,
+ subrectOrigin: [0, 0, 0],
+ subrectSize: [1, 1, 1],
+ });
+ const expTexelView = TexelView.fromTextureDataByReference(format, expectedData, {
+ bytesPerRow: expectedData.byteLength,
+ rowsPerImage: 1,
+ subrectOrigin: [0, 0, 0],
+ subrectSize: [1, 1, 1],
+ });
+
+ const zero = { x: 0, y: 0, z: 0 };
+ const failedPixelsMessage = findFailedPixels(
+ format,
+ zero,
+ { width: 1, height: 1, depthOrArrayLayers: 1 },
+ { actTexelView, expTexelView },
+ {
+ maxFractionalDiff: 0,
+ }
+ );
+
+ t.expect(isSame === !failedPixelsMessage, failedPixelsMessage);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/unittests/unit_test.ts b/dom/webgpu/tests/cts/checkout/src/unittests/unit_test.ts
new file mode 100644
index 0000000000..876780e151
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/unittests/unit_test.ts
@@ -0,0 +1,3 @@
+import { Fixture } from '../common/framework/fixture.js';
+
+export class UnitTest extends Fixture {}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/README.txt
new file mode 100644
index 0000000000..c1b25dbb1c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/README.txt
@@ -0,0 +1 @@
+WebGPU conformance test suite.
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/api/README.txt
new file mode 100644
index 0000000000..867a090a73
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/README.txt
@@ -0,0 +1 @@
+Tests for full coverage of the Javascript API surface of WebGPU.
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/README.txt
new file mode 100644
index 0000000000..a6231af8b1
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/README.txt
@@ -0,0 +1,2 @@
+Tests that check the result of performing valid WebGPU operations, taking advantage of
+parameterization to exercise interactions between features.
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/adapter/requestAdapter.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/adapter/requestAdapter.spec.ts
new file mode 100644
index 0000000000..629b8213c1
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/adapter/requestAdapter.spec.ts
@@ -0,0 +1,124 @@
+export const description = `
+Tests for GPU.requestAdapter.
+
+Test all possible options to requestAdapter.
+default, low-power, and high performance should all always return adapters.
+forceFallbackAdapter may or may not return an adapter.
+
+GPU.requestAdapter can technically return null for any reason
+but we need test functionality so the test requires an adapter except
+when forceFallbackAdapter is true.
+
+The test runs simple compute shader is run that fills a buffer with consecutive
+values and then checks the result to test the adapter for basic functionality.
+`;
+
+import { Fixture } from '../../../../common/framework/fixture.js';
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { getGPU } from '../../../../common/util/navigator_gpu.js';
+import { assert, objectEquals, iterRange } from '../../../../common/util/util.js';
+
+export const g = makeTestGroup(Fixture);
+
+const powerPreferenceModes: Array<GPUPowerPreference | undefined> = [
+ undefined,
+ 'low-power',
+ 'high-performance',
+];
+const forceFallbackOptions: Array<boolean | undefined> = [undefined, false, true];
+
+async function testAdapter(adapter: GPUAdapter | null) {
+ assert(adapter !== null, 'Failed to get adapter.');
+ const device = await adapter.requestDevice();
+
+ assert(device !== null, 'Failed to get device.');
+
+ const kOffset = 1230000;
+ const pipeline = device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: device.createShaderModule({
+ code: `
+ struct Buffer { data: array<u32>, };
+
+ @group(0) @binding(0) var<storage, read_write> buffer: Buffer;
+ @compute @workgroup_size(1u) fn main(
+ @builtin(global_invocation_id) id: vec3<u32>) {
+ buffer.data[id.x] = id.x + ${kOffset}u;
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ });
+
+ const kNumElements = 64;
+ const kBufferSize = kNumElements * 4;
+ const buffer = device.createBuffer({
+ size: kBufferSize,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+
+ const resultBuffer = device.createBuffer({
+ size: kBufferSize,
+ usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
+ });
+
+ const bindGroup = device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer } }],
+ });
+
+ const encoder = device.createCommandEncoder();
+
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(kNumElements);
+ pass.end();
+
+ encoder.copyBufferToBuffer(buffer, 0, resultBuffer, 0, kBufferSize);
+
+ device.queue.submit([encoder.finish()]);
+
+ const expected = new Uint32Array([...iterRange(kNumElements, x => x + kOffset)]);
+
+ await resultBuffer.mapAsync(GPUMapMode.READ);
+ const actual = new Uint32Array(resultBuffer.getMappedRange());
+
+ assert(objectEquals(actual, expected), 'compute pipeline ran');
+
+ resultBuffer.destroy();
+ buffer.destroy();
+ device.destroy();
+}
+
+g.test('requestAdapter')
+ .desc(`request adapter with all possible options and check for basic functionality`)
+ .params(u =>
+ u
+ .combine('powerPreference', powerPreferenceModes)
+ .combine('forceFallbackAdapter', forceFallbackOptions)
+ )
+ .fn(async t => {
+ const { powerPreference, forceFallbackAdapter } = t.params;
+ const adapter = await getGPU(t.rec).requestAdapter({
+ ...(powerPreference !== undefined && { powerPreference }),
+ ...(forceFallbackAdapter !== undefined && { forceFallbackAdapter }),
+ });
+
+ // failing to create an adapter when forceFallbackAdapter is true is ok.
+ if (forceFallbackAdapter && !adapter) {
+ t.skip('No adapter available');
+ return;
+ }
+
+ await testAdapter(adapter);
+ });
+
+g.test('requestAdapter_no_parameters')
+ .desc(`request adapter with no parameters`)
+ .fn(async t => {
+ const adapter = await getGPU(t.rec).requestAdapter();
+ await testAdapter(adapter);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/adapter/requestAdapterInfo.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/adapter/requestAdapterInfo.spec.ts
new file mode 100644
index 0000000000..8a85d74773
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/adapter/requestAdapterInfo.spec.ts
@@ -0,0 +1,54 @@
+export const description = `
+Tests various ways of calling GPUAdapter.requestAdapterInfo.
+
+TODO:
+- Find a way to perform tests with and without user activation
+`;
+
+import { Fixture } from '../../../../common/framework/fixture.js';
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { getGPU } from '../../../../common/util/navigator_gpu.js';
+import { assert } from '../../../../common/util/util.js';
+
+export const g = makeTestGroup(Fixture);
+
+const normalizedIdentifierRegex = /^$|^[a-z0-9]+(-[a-z0-9]+)*$/;
+
+g.test('adapter_info')
+ .desc(
+ `
+ Test that calling requestAdapterInfo with no arguments:
+ - Returns a GPUAdapterInfo structure
+ - Every member in the structure except description is properly formatted`
+ )
+ .fn(async t => {
+ const gpu = getGPU(t.rec);
+ const adapter = await gpu.requestAdapter();
+ assert(adapter !== null);
+
+ const adapterInfo = await adapter.requestAdapterInfo();
+
+ t.expect(
+ normalizedIdentifierRegex.test(adapterInfo.vendor),
+ `adapterInfo.vendor should be a normalized identifier. But it's '${adapterInfo.vendor}'`
+ );
+
+ t.expect(
+ normalizedIdentifierRegex.test(adapterInfo.architecture),
+ `adapterInfo.architecture should be a normalized identifier. But it's '${adapterInfo.architecture}'`
+ );
+
+ t.expect(
+ normalizedIdentifierRegex.test(adapterInfo.device),
+ `adapterInfo.device should be a normalized identifier. But it's '${adapterInfo.device}'`
+ );
+ });
+
+g.test('adapter_info_with_hints')
+ .desc(
+ `
+ Test that calling requestAdapterInfo with hints:
+ - Rejects without user activation
+ - Succeed with user activation`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/adapter/requestDevice.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/adapter/requestDevice.spec.ts
new file mode 100644
index 0000000000..314da6356e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/adapter/requestDevice.spec.ts
@@ -0,0 +1,376 @@
+export const description = `
+Test GPUAdapter.requestDevice.
+
+Note tests explicitly destroy created devices so that tests don't have to wait for GC to clean up
+potentially limited native resources.
+`;
+
+import { Fixture } from '../../../../common/framework/fixture.js';
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { getGPU } from '../../../../common/util/navigator_gpu.js';
+import { assert, assertReject, raceWithRejectOnTimeout } from '../../../../common/util/util.js';
+import {
+ getDefaultLimitsForAdapter,
+ kFeatureNames,
+ kLimits,
+ kLimitClasses,
+} from '../../../capability_info.js';
+import { clamp, isPowerOfTwo } from '../../../util/math.js';
+
+export const g = makeTestGroup(Fixture);
+
+g.test('default')
+ .desc(
+ `
+ Test requesting the device with a variation of default parameters.
+ - No features listed in default device
+ - Default limits`
+ )
+ .paramsSubcasesOnly(u =>
+ u.combine('args', [
+ [],
+ [undefined],
+ [{}],
+ [{ requiredFeatures: [], requiredLimits: {} }],
+ ] as const)
+ )
+ .fn(async t => {
+ const { args } = t.params;
+ const gpu = getGPU(t.rec);
+ const adapter = await gpu.requestAdapter();
+ assert(adapter !== null);
+ const device = await adapter.requestDevice(...args);
+ assert(device !== null);
+
+ // Default device should have no features.
+ t.expect(device.features.size === 0, 'Default device should not have any features');
+ // All limits should be defaults.
+ const limitInfo = getDefaultLimitsForAdapter(adapter);
+ for (const limit of kLimits) {
+ t.expect(
+ device.limits[limit] === limitInfo[limit].default,
+ `Expected ${limit} == default: ${device.limits[limit]} != ${limitInfo[limit].default}`
+ );
+ }
+
+ device.destroy();
+ });
+
+g.test('invalid')
+ .desc(
+ `
+ Test that requesting device on an invalid adapter resolves with lost device.
+ - Induce invalid adapter via a device lost from a device.destroy()
+ - Check the device is lost with reason 'destroyed'
+ - Try creating another device on the now-stale adapter
+ - Check that returns a device lost with 'unknown'
+ `
+ )
+ .fn(async t => {
+ const gpu = getGPU(t.rec);
+ const adapter = await gpu.requestAdapter();
+ assert(adapter !== null);
+
+ {
+ // Request a device and destroy it immediately afterwards.
+ const device = await adapter.requestDevice();
+ assert(device !== null);
+ device.destroy();
+ const lostInfo = await device.lost;
+ t.expect(lostInfo.reason === 'destroyed');
+ }
+
+ // The adapter should now be invalid since a device was lost. Requesting another device should
+ // return an already lost device.
+ const kTimeoutMS = 1000;
+ const device = await adapter.requestDevice();
+ const lost = await raceWithRejectOnTimeout(device.lost, kTimeoutMS, 'device was not lost');
+ t.expect(lost.reason === 'unknown');
+ });
+
+g.test('stale')
+ .desc(
+ `
+ Test that adapter.requestDevice() can successfully return a device once, and once only.
+ - Tests that we can successfully resolve after serial and concurrent rejections.
+ - Tests that consecutive valid attempts only succeeds the first time, returning lost device otherwise.`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('initialError', [undefined, 'TypeError', 'OperationError'])
+ .combine('awaitInitialError', [true, false])
+ .combine('awaitSuccess', [true, false])
+ .unless(
+ ({ initialError, awaitInitialError }) => initialError === undefined && awaitInitialError
+ )
+ )
+ .fn(async t => {
+ const gpu = getGPU(t.rec);
+ const adapter = await gpu.requestAdapter();
+ assert(adapter !== null);
+
+ const { initialError, awaitInitialError, awaitSuccess } = t.params;
+
+ switch (initialError) {
+ case undefined:
+ break;
+ case 'TypeError':
+ // Cause a type error by requesting with an unknown feature.
+ if (awaitInitialError) {
+ await assertReject(
+ 'TypeError',
+ adapter.requestDevice({ requiredFeatures: ['unknown-feature' as GPUFeatureName] })
+ );
+ } else {
+ t.shouldReject(
+ 'TypeError',
+ adapter.requestDevice({ requiredFeatures: ['unknown-feature' as GPUFeatureName] })
+ );
+ }
+ break;
+ case 'OperationError':
+ // Cause an operation error by requesting with an alignment limit that is not a power of 2.
+ if (awaitInitialError) {
+ await assertReject(
+ 'OperationError',
+ adapter.requestDevice({ requiredLimits: { minUniformBufferOffsetAlignment: 255 } })
+ );
+ } else {
+ t.shouldReject(
+ 'OperationError',
+ adapter.requestDevice({ requiredLimits: { minUniformBufferOffsetAlignment: 255 } })
+ );
+ }
+ break;
+ }
+
+ let device: GPUDevice | undefined = undefined;
+ const promise = adapter.requestDevice();
+ if (awaitSuccess) {
+ device = await promise;
+ assert(device !== null);
+ } else {
+ t.shouldResolve(
+ (async () => {
+ const device = await promise;
+ device.destroy();
+ })()
+ );
+ }
+
+ const kTimeoutMS = 1000;
+ const lostDevice = await adapter.requestDevice();
+ const lost = await raceWithRejectOnTimeout(
+ lostDevice.lost,
+ kTimeoutMS,
+ 'adapter was not stale'
+ );
+ t.expect(lost.reason === 'unknown');
+
+ // Make sure to destroy the valid device after trying to get a second one. Otherwise, the second
+ // device may fail because the adapter is put into an invalid state from the destroy.
+ if (device) {
+ device.destroy();
+ }
+ });
+
+g.test('features,unknown')
+ .desc(
+ `
+ Test requesting device with an unknown feature.`
+ )
+ .fn(async t => {
+ const gpu = getGPU(t.rec);
+ const adapter = await gpu.requestAdapter();
+ assert(adapter !== null);
+
+ t.shouldReject(
+ 'TypeError',
+ adapter.requestDevice({ requiredFeatures: ['unknown-feature' as GPUFeatureName] })
+ );
+ });
+
+g.test('features,known')
+ .desc(
+ `
+ Test requesting device with all features.
+ - Succeeds with device supporting feature if adapter supports the feature.
+ - Rejects if the adapter does not support the feature.`
+ )
+ .params(u => u.combine('feature', kFeatureNames))
+ .fn(async t => {
+ const { feature } = t.params;
+
+ const gpu = getGPU(t.rec);
+ const adapter = await gpu.requestAdapter();
+ assert(adapter !== null);
+
+ const promise = adapter.requestDevice({ requiredFeatures: [feature] });
+ if (adapter.features.has(feature)) {
+ const device = await promise;
+ t.expect(device.features.has(feature), 'Device should include the required feature');
+ } else {
+ t.shouldReject('TypeError', promise);
+ }
+ });
+
+g.test('limits,unknown')
+ .desc(
+ `
+ Test that specifying limits that aren't part of the supported limit set causes
+ requestDevice to reject.`
+ )
+ .fn(async t => {
+ const gpu = getGPU(t.rec);
+ const adapter = await gpu.requestAdapter();
+ assert(adapter !== null);
+
+ const requiredLimits: Record<string, number> = { unknownLimitName: 9000 };
+
+ t.shouldReject('OperationError', adapter.requestDevice({ requiredLimits }));
+ });
+
+g.test('limits,supported')
+ .desc(
+ `
+ Test that each supported limit can be specified with valid values.
+ - Tests each limit with the default values given by the spec
+ - Tests each limit with the supported values given by the adapter`
+ )
+ .params(u =>
+ u.combine('limit', kLimits).beginSubcases().combine('limitValue', ['default', 'adapter'])
+ )
+ .fn(async t => {
+ const { limit, limitValue } = t.params;
+
+ const gpu = getGPU(t.rec);
+ const adapter = await gpu.requestAdapter();
+ assert(adapter !== null);
+
+ const limitInfo = getDefaultLimitsForAdapter(adapter);
+ let value: number = -1;
+ switch (limitValue) {
+ case 'default':
+ value = limitInfo[limit].default;
+ break;
+ case 'adapter':
+ value = adapter.limits[limit];
+ break;
+ }
+
+ const device = await adapter.requestDevice({ requiredLimits: { [limit]: value } });
+ assert(device !== null);
+ t.expect(
+ device.limits[limit] === value,
+ 'Devices reported limit should match the required limit'
+ );
+ device.destroy();
+ });
+
+g.test('limit,better_than_supported')
+ .desc(
+ `
+ Test that specifying a better limit than what the adapter supports causes requestDevice to
+ reject.
+ - Tests each limit
+ - Tests requesting better limits by various amounts`
+ )
+ .params(u =>
+ u
+ .combine('limit', kLimits)
+ .beginSubcases()
+ .expandWithParams(p => {
+ switch (kLimitClasses[p.limit]) {
+ case 'maximum':
+ return [
+ { mul: 1, add: 1 },
+ { mul: 1, add: 100 },
+ ];
+ case 'alignment':
+ return [
+ { mul: 1, add: -1 },
+ { mul: 1 / 2, add: 0 },
+ { mul: 1 / 1024, add: 0 },
+ ];
+ }
+ })
+ )
+ .fn(async t => {
+ const { limit, mul, add } = t.params;
+
+ const gpu = getGPU(t.rec);
+ const adapter = await gpu.requestAdapter();
+ assert(adapter !== null);
+
+ const limitInfo = getDefaultLimitsForAdapter(adapter);
+ const value = adapter.limits[limit] * mul + add;
+ const requiredLimits = {
+ [limit]: clamp(value, { min: 0, max: limitInfo[limit].maximumValue }),
+ };
+
+ t.shouldReject('OperationError', adapter.requestDevice({ requiredLimits }));
+ });
+
+g.test('limit,worse_than_default')
+ .desc(
+ `
+ Test that specifying a worse limit than the default values required by the spec cause the value
+ to clamp.
+ - Tests each limit
+ - Tests requesting worse limits by various amounts`
+ )
+ .params(u =>
+ u
+ .combine('limit', kLimits)
+ .beginSubcases()
+ .expandWithParams(p => {
+ switch (kLimitClasses[p.limit]) {
+ case 'maximum':
+ return [
+ { mul: 1, add: -1 },
+ { mul: 1, add: -100 },
+ ];
+ case 'alignment':
+ return [
+ { mul: 1, add: 1 },
+ { mul: 2, add: 0 },
+ { mul: 1024, add: 0 },
+ ];
+ }
+ })
+ )
+ .fn(async t => {
+ const { limit, mul, add } = t.params;
+
+ const gpu = getGPU(t.rec);
+ const adapter = await gpu.requestAdapter();
+ assert(adapter !== null);
+
+ const limitInfo = getDefaultLimitsForAdapter(adapter);
+ const value = limitInfo[limit].default * mul + add;
+ const requiredLimits = {
+ [limit]: clamp(value, { min: 0, max: limitInfo[limit].maximumValue }),
+ };
+
+ let success;
+ switch (limitInfo[limit].class) {
+ case 'alignment':
+ success = isPowerOfTwo(value);
+ break;
+ case 'maximum':
+ success = true;
+ break;
+ }
+
+ if (success) {
+ const device = await adapter.requestDevice({ requiredLimits });
+ assert(device !== null);
+ t.expect(
+ device.limits[limit] === limitInfo[limit].default,
+ 'Devices reported limit should match the default limit'
+ );
+ device.destroy();
+ } else {
+ t.shouldReject('OperationError', adapter.requestDevice({ requiredLimits }));
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/async_ordering/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/async_ordering/README.txt
new file mode 100644
index 0000000000..ad69ba7bf4
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/async_ordering/README.txt
@@ -0,0 +1,12 @@
+Test ordering of async resolutions between promises returned by the following calls (and possibly
+between multiple of the same call), where there are constraints on the ordering.
+Spec issue: https://github.com/gpuweb/gpuweb/issues/962
+
+TODO: plan and implement
+- createReadyPipeline() (not sure if this actually has any ordering constraints)
+- cmdbuf.executionTime
+- device.popErrorScope()
+- device.lost
+- queue.onSubmittedWorkDone()
+- buffer.mapAsync()
+- shadermodule.getCompilationInfo()
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/README.txt
new file mode 100644
index 0000000000..b5d99e646b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/README.txt
@@ -0,0 +1 @@
+GPUBuffer tests.
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/map.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/map.spec.ts
new file mode 100644
index 0000000000..4e81395269
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/map.spec.ts
@@ -0,0 +1,510 @@
+export const description = `
+Test the operation of buffer mapping, specifically the data contents written via
+map-write/mappedAtCreation, and the contents of buffers returned by getMappedRange on
+buffers which are mapped-read/mapped-write/mappedAtCreation.
+
+range: used for getMappedRange
+mapRegion: used for mapAsync
+
+mapRegionBoundModes is used to get mapRegion from range:
+ - default-expand: expand mapRegion to buffer bound by setting offset/size to undefined
+ - explicit-expand: expand mapRegion to buffer bound by explicitly calculating offset/size
+ - minimal: make mapRegion to be the same as range which is the minimal range to make getMappedRange input valid
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { assert, memcpy } from '../../../../common/util/util.js';
+import { checkElementsEqual } from '../../../util/check_contents.js';
+
+import { MappingTest } from './mapping_test.js';
+
+export const g = makeTestGroup(MappingTest);
+
+const kSubcases = [
+ { size: 0, range: [] },
+ { size: 0, range: [undefined] },
+ { size: 0, range: [undefined, undefined] },
+ { size: 0, range: [0] },
+ { size: 0, range: [0, undefined] },
+ { size: 0, range: [0, 0] },
+ { size: 12, range: [] },
+ { size: 12, range: [undefined] },
+ { size: 12, range: [undefined, undefined] },
+ { size: 12, range: [0] },
+ { size: 12, range: [0, undefined] },
+ { size: 12, range: [0, 12] },
+ { size: 12, range: [0, 0] },
+ { size: 12, range: [8] },
+ { size: 12, range: [8, undefined] },
+ { size: 12, range: [8, 4] },
+ { size: 28, range: [8, 8] },
+ { size: 28, range: [8, 12] },
+ { size: 512 * 1024, range: [] },
+] as const;
+
+function reifyMapRange(bufferSize: number, range: readonly [number?, number?]): [number, number] {
+ const offset = range[0] ?? 0;
+ return [offset, range[1] ?? bufferSize - offset];
+}
+
+const mapRegionBoundModes = ['default-expand', 'explicit-expand', 'minimal'] as const;
+type MapRegionBoundMode = (typeof mapRegionBoundModes)[number];
+
+function getRegionForMap(
+ bufferSize: number,
+ range: [number, number],
+ {
+ mapAsyncRegionLeft,
+ mapAsyncRegionRight,
+ }: {
+ mapAsyncRegionLeft: MapRegionBoundMode;
+ mapAsyncRegionRight: MapRegionBoundMode;
+ }
+) {
+ const regionLeft = mapAsyncRegionLeft === 'minimal' ? range[0] : 0;
+ const regionRight = mapAsyncRegionRight === 'minimal' ? range[0] + range[1] : bufferSize;
+ return [
+ mapAsyncRegionLeft === 'default-expand' ? undefined : regionLeft,
+ mapAsyncRegionRight === 'default-expand' ? undefined : regionRight - regionLeft,
+ ] as const;
+}
+
+g.test('mapAsync,write')
+ .desc(
+ `Use map-write to write to various ranges of variously-sized buffers, then expectContents
+(which does copyBufferToBuffer + map-read) to ensure the contents were written.`
+ )
+ .params(u =>
+ u
+ .combine('mapAsyncRegionLeft', mapRegionBoundModes)
+ .combine('mapAsyncRegionRight', mapRegionBoundModes)
+ .beginSubcases()
+ .combineWithParams(kSubcases)
+ )
+ .fn(async t => {
+ const { size, range } = t.params;
+ const [rangeOffset, rangeSize] = reifyMapRange(size, range);
+
+ const buffer = t.device.createBuffer({
+ size,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE,
+ });
+
+ const mapRegion = getRegionForMap(size, [rangeOffset, rangeSize], t.params);
+ await buffer.mapAsync(GPUMapMode.WRITE, ...mapRegion);
+ const arrayBuffer = buffer.getMappedRange(...range);
+ t.checkMapWrite(buffer, rangeOffset, arrayBuffer, rangeSize);
+ });
+
+g.test('mapAsync,write,unchanged_ranges_preserved')
+ .desc(
+ `Use mappedAtCreation or mapAsync to write to various ranges of variously-sized buffers, then
+use mapAsync to map a different range and zero it out. Finally use expectGPUBufferValuesEqual
+(which does copyBufferToBuffer + map-read) to verify that contents originally written outside the
+second mapped range were not altered.`
+ )
+ .params(u =>
+ u
+ .beginSubcases()
+ .combine('mappedAtCreation', [false, true])
+ .combineWithParams([
+ { size: 12, range1: [], range2: [8] },
+ { size: 12, range1: [], range2: [0, 8] },
+ { size: 12, range1: [0, 8], range2: [8] },
+ { size: 12, range1: [8], range2: [0, 8] },
+ { size: 28, range1: [], range2: [8, 8] },
+ { size: 28, range1: [8, 16], range2: [16, 8] },
+ { size: 32, range1: [16, 12], range2: [8, 16] },
+ { size: 32, range1: [8, 8], range2: [24, 4] },
+ ] as const)
+ )
+ .fn(async t => {
+ const { size, range1, range2, mappedAtCreation } = t.params;
+ const [rangeOffset1, rangeSize1] = reifyMapRange(size, range1);
+ const [rangeOffset2, rangeSize2] = reifyMapRange(size, range2);
+
+ const buffer = t.device.createBuffer({
+ mappedAtCreation,
+ size,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE,
+ });
+
+ // If the buffer is not mappedAtCreation map it now.
+ if (!mappedAtCreation) {
+ await buffer.mapAsync(GPUMapMode.WRITE);
+ }
+
+ // Set the initial contents of the buffer.
+ const init = buffer.getMappedRange(...range1);
+
+ assert(init.byteLength === rangeSize1);
+ const expectedBuffer = new ArrayBuffer(size);
+ const expected = new Uint32Array(
+ expectedBuffer,
+ rangeOffset1,
+ rangeSize1 / Uint32Array.BYTES_PER_ELEMENT
+ );
+ const data = new Uint32Array(init);
+ for (let i = 0; i < data.length; ++i) {
+ data[i] = expected[i] = i + 1;
+ }
+ buffer.unmap();
+
+ // Write to a second range of the buffer
+ await buffer.mapAsync(GPUMapMode.WRITE, ...range2);
+ const init2 = buffer.getMappedRange(...range2);
+
+ assert(init2.byteLength === rangeSize2);
+ const expected2 = new Uint32Array(
+ expectedBuffer,
+ rangeOffset2,
+ rangeSize2 / Uint32Array.BYTES_PER_ELEMENT
+ );
+ const data2 = new Uint32Array(init2);
+ for (let i = 0; i < data2.length; ++i) {
+ data2[i] = expected2[i] = 0;
+ }
+ buffer.unmap();
+
+ // Verify that the range of the buffer which was not overwritten was preserved.
+ t.expectGPUBufferValuesEqual(buffer, expected, rangeOffset1);
+ });
+
+g.test('mapAsync,read')
+ .desc(
+ `Use mappedAtCreation to initialize various ranges of variously-sized buffers, then
+map-read and check the read-back result.`
+ )
+ .params(u =>
+ u
+ .combine('mapAsyncRegionLeft', mapRegionBoundModes)
+ .combine('mapAsyncRegionRight', mapRegionBoundModes)
+ .beginSubcases()
+ .combineWithParams(kSubcases)
+ )
+ .fn(async t => {
+ const { size, range } = t.params;
+ const [rangeOffset, rangeSize] = reifyMapRange(size, range);
+
+ const buffer = t.device.createBuffer({
+ mappedAtCreation: true,
+ size,
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
+ });
+ const init = buffer.getMappedRange(...range);
+
+ assert(init.byteLength === rangeSize);
+ const expected = new Uint32Array(new ArrayBuffer(rangeSize));
+ const data = new Uint32Array(init);
+ for (let i = 0; i < data.length; ++i) {
+ data[i] = expected[i] = i + 1;
+ }
+ buffer.unmap();
+
+ const mapRegion = getRegionForMap(size, [rangeOffset, rangeSize], t.params);
+ await buffer.mapAsync(GPUMapMode.READ, ...mapRegion);
+ const actual = new Uint8Array(buffer.getMappedRange(...range));
+ t.expectOK(checkElementsEqual(actual, new Uint8Array(expected.buffer)));
+ });
+
+g.test('mapAsync,read,typedArrayAccess')
+ .desc(`Use various TypedArray types to read back from a mapped buffer`)
+ .params(u =>
+ u
+ .combine('mapAsyncRegionLeft', mapRegionBoundModes)
+ .combine('mapAsyncRegionRight', mapRegionBoundModes)
+ .beginSubcases()
+ .combineWithParams([
+ { size: 80, range: [] },
+ { size: 160, range: [] },
+ { size: 160, range: [0, 80] },
+ { size: 160, range: [80] },
+ { size: 160, range: [40, 120] },
+ { size: 160, range: [40] },
+ ] as const)
+ )
+ .fn(async t => {
+ const { size, range } = t.params;
+ const [rangeOffset, rangeSize] = reifyMapRange(size, range);
+
+ // Fill an array buffer with a variety of values of different types.
+ const expectedArrayBuffer = new ArrayBuffer(80);
+ const uint8Expected = new Uint8Array(expectedArrayBuffer, 0, 2);
+ uint8Expected[0] = 1;
+ uint8Expected[1] = 255;
+
+ const int8Expected = new Int8Array(expectedArrayBuffer, 2, 2);
+ int8Expected[0] = -1;
+ int8Expected[1] = 127;
+
+ const uint16Expected = new Uint16Array(expectedArrayBuffer, 4, 2);
+ uint16Expected[0] = 1;
+ uint16Expected[1] = 65535;
+
+ const int16Expected = new Int16Array(expectedArrayBuffer, 8, 2);
+ int16Expected[0] = -1;
+ int16Expected[1] = 32767;
+
+ const uint32Expected = new Uint32Array(expectedArrayBuffer, 12, 2);
+ uint32Expected[0] = 1;
+ uint32Expected[1] = 4294967295;
+
+ const int32Expected = new Int32Array(expectedArrayBuffer, 20, 2);
+ int32Expected[2] = -1;
+ int32Expected[3] = 2147483647;
+
+ const float32Expected = new Float32Array(expectedArrayBuffer, 28, 3);
+ float32Expected[0] = 1;
+ float32Expected[1] = -1;
+ float32Expected[2] = 12345.6789;
+
+ const float64Expected = new Float64Array(expectedArrayBuffer, 40, 5);
+ float64Expected[0] = 1;
+ float64Expected[1] = -1;
+ float64Expected[2] = 12345.6789;
+ float64Expected[3] = Number.MAX_VALUE;
+ float64Expected[4] = Number.MIN_VALUE;
+
+ const buffer = t.device.createBuffer({
+ mappedAtCreation: true,
+ size,
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
+ });
+ const init = buffer.getMappedRange(...range);
+
+ // Copy the expected values into the mapped range.
+ assert(init.byteLength === rangeSize);
+ memcpy({ src: expectedArrayBuffer }, { dst: init });
+ buffer.unmap();
+
+ const mapRegion = getRegionForMap(size, [rangeOffset, rangeSize], t.params);
+ await buffer.mapAsync(GPUMapMode.READ, ...mapRegion);
+ const mappedArrayBuffer = buffer.getMappedRange(...range);
+ t.expectOK(checkElementsEqual(new Uint8Array(mappedArrayBuffer, 0, 2), uint8Expected));
+ t.expectOK(checkElementsEqual(new Int8Array(mappedArrayBuffer, 2, 2), int8Expected));
+ t.expectOK(checkElementsEqual(new Uint16Array(mappedArrayBuffer, 4, 2), uint16Expected));
+ t.expectOK(checkElementsEqual(new Int16Array(mappedArrayBuffer, 8, 2), int16Expected));
+ t.expectOK(checkElementsEqual(new Uint32Array(mappedArrayBuffer, 12, 2), uint32Expected));
+ t.expectOK(checkElementsEqual(new Int32Array(mappedArrayBuffer, 20, 2), int32Expected));
+ t.expectOK(checkElementsEqual(new Float32Array(mappedArrayBuffer, 28, 3), float32Expected));
+ t.expectOK(checkElementsEqual(new Float64Array(mappedArrayBuffer, 40, 5), float64Expected));
+ });
+
+g.test('mappedAtCreation')
+ .desc(
+ `Use mappedAtCreation to write to various ranges of variously-sized buffers created either
+with or without the MAP_WRITE usage (since this could affect the mappedAtCreation upload path),
+then expectContents (which does copyBufferToBuffer + map-read) to ensure the contents were written.`
+ )
+ .params(u =>
+ u //
+ .combine('mappable', [false, true])
+ .beginSubcases()
+ .combineWithParams(kSubcases)
+ )
+ .fn(t => {
+ const { size, range, mappable } = t.params;
+ const [, rangeSize] = reifyMapRange(size, range);
+
+ const buffer = t.device.createBuffer({
+ mappedAtCreation: true,
+ size,
+ usage: GPUBufferUsage.COPY_SRC | (mappable ? GPUBufferUsage.MAP_WRITE : 0),
+ });
+ const arrayBuffer = buffer.getMappedRange(...range);
+ t.checkMapWrite(buffer, range[0] ?? 0, arrayBuffer, rangeSize);
+ });
+
+g.test('remapped_for_write')
+ .desc(
+ `Use mappedAtCreation or mapAsync to write to various ranges of variously-sized buffers created
+with the MAP_WRITE usage, then mapAsync again and ensure that the previously written values are
+still present in the mapped buffer.`
+ )
+ .params(u =>
+ u //
+ .combine('mapAsyncRegionLeft', mapRegionBoundModes)
+ .combine('mapAsyncRegionRight', mapRegionBoundModes)
+ .beginSubcases()
+ .combine('mappedAtCreation', [false, true])
+ .combineWithParams(kSubcases)
+ )
+ .fn(async t => {
+ const { size, range, mappedAtCreation } = t.params;
+ const [rangeOffset, rangeSize] = reifyMapRange(size, range);
+
+ const buffer = t.device.createBuffer({
+ mappedAtCreation,
+ size,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE,
+ });
+
+ // If the buffer is not mappedAtCreation map it now.
+ if (!mappedAtCreation) {
+ await buffer.mapAsync(GPUMapMode.WRITE);
+ }
+
+ // Set the initial contents of the buffer.
+ const init = buffer.getMappedRange(...range);
+
+ assert(init.byteLength === rangeSize);
+ const expected = new Uint32Array(new ArrayBuffer(rangeSize));
+ const data = new Uint32Array(init);
+ for (let i = 0; i < data.length; ++i) {
+ data[i] = expected[i] = i + 1;
+ }
+ buffer.unmap();
+
+ // Check that upon remapping the for WRITE the values in the buffer are
+ // still the same.
+ const mapRegion = getRegionForMap(size, [rangeOffset, rangeSize], t.params);
+ await buffer.mapAsync(GPUMapMode.WRITE, ...mapRegion);
+ const actual = new Uint8Array(buffer.getMappedRange(...range));
+ t.expectOK(checkElementsEqual(actual, new Uint8Array(expected.buffer)));
+ });
+
+g.test('mappedAtCreation,mapState')
+ .desc('Test that exposed map state of buffer created with mappedAtCreation has expected values.')
+ .params(u =>
+ u
+ .combine('usageType', ['invalid', 'read', 'write'])
+ .combine('afterUnmap', [false, true])
+ .combine('afterDestroy', [false, true])
+ )
+ .fn(t => {
+ const { usageType, afterUnmap, afterDestroy } = t.params;
+ const usage =
+ usageType === 'read'
+ ? GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ
+ : usageType === 'write'
+ ? GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE
+ : 0;
+ const validationError = usage === 0;
+ const size = 8;
+ const range = [0, 8];
+
+ let buffer: GPUBuffer;
+ t.expectValidationError(() => {
+ buffer = t.device.createBuffer({
+ mappedAtCreation: true,
+ size,
+ usage,
+ });
+ }, validationError);
+
+ // mapState must be "mapped" regardless of validation error
+ t.expect(buffer!.mapState === 'mapped');
+
+ // getMappedRange must not change the map state
+ buffer!.getMappedRange(...range);
+ t.expect(buffer!.mapState === 'mapped');
+
+ if (afterUnmap) {
+ buffer!.unmap();
+ t.expect(buffer!.mapState === 'unmapped');
+ }
+
+ if (afterDestroy) {
+ buffer!.destroy();
+ t.expect(buffer!.mapState === 'unmapped');
+ }
+ });
+
+g.test('mapAsync,mapState')
+ .desc('Test that exposed map state of buffer mapped with mapAsync has expected values.')
+ .params(u =>
+ u
+ .combine('usageType', ['invalid', 'read', 'write'])
+ .combine('mapModeType', ['READ', 'WRITE'] as const)
+ .combine('beforeUnmap', [false, true])
+ .combine('beforeDestroy', [false, true])
+ .combine('afterUnmap', [false, true])
+ .combine('afterDestroy', [false, true])
+ )
+ .fn(async t => {
+ const { usageType, mapModeType, beforeUnmap, beforeDestroy, afterUnmap, afterDestroy } =
+ t.params;
+ const size = 8;
+ const range = [0, 8];
+ const usage =
+ usageType === 'read'
+ ? GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ
+ : usageType === 'write'
+ ? GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE
+ : 0;
+ const bufferCreationValidationError = usage === 0;
+ const mapMode = GPUMapMode[mapModeType];
+
+ let buffer: GPUBuffer;
+ t.expectValidationError(() => {
+ buffer = t.device.createBuffer({
+ mappedAtCreation: false,
+ size,
+ usage,
+ });
+ }, bufferCreationValidationError);
+
+ t.expect(buffer!.mapState === 'unmapped');
+
+ {
+ const mapAsyncValidationError =
+ bufferCreationValidationError ||
+ (mapMode === GPUMapMode.READ && !(usage & GPUBufferUsage.MAP_READ)) ||
+ (mapMode === GPUMapMode.WRITE && !(usage & GPUBufferUsage.MAP_WRITE));
+ let promise: Promise<void>;
+ t.expectValidationError(() => {
+ promise = buffer!.mapAsync(mapMode);
+ }, mapAsyncValidationError);
+ t.expect(buffer!.mapState === 'pending');
+
+ try {
+ if (beforeUnmap) {
+ buffer!.unmap();
+ t.expect(buffer!.mapState === 'unmapped');
+ }
+ if (beforeDestroy) {
+ buffer!.destroy();
+ t.expect(buffer!.mapState === 'unmapped');
+ }
+
+ await promise!;
+ t.expect(buffer!.mapState === 'mapped');
+
+ // getMappedRange must not change the map state
+ buffer!.getMappedRange(...range);
+ t.expect(buffer!.mapState === 'mapped');
+ } catch {
+ // unmapped before resolve, destroyed before resolve, or mapAsync validation error
+ // will end up with rejection and 'unmapped'
+ t.expect(buffer!.mapState === 'unmapped');
+ }
+ }
+
+ // If buffer is already mapped test mapAsync on already mapped buffer
+ if (buffer!.mapState === 'mapped') {
+ // mapAsync on already mapped buffer must be rejected with a validation error
+ // and the map state must keep 'mapped'
+ let promise: Promise<void>;
+ t.expectValidationError(() => {
+ promise = buffer!.mapAsync(GPUMapMode.WRITE);
+ }, true);
+ t.expect(buffer!.mapState === 'mapped');
+
+ try {
+ await promise!;
+ t.fail('mapAsync on already mapped buffer must not succeed.');
+ } catch {
+ t.expect(buffer!.mapState === 'mapped');
+ }
+ }
+
+ if (afterUnmap) {
+ buffer!.unmap();
+ t.expect(buffer!.mapState === 'unmapped');
+ }
+
+ if (afterDestroy) {
+ buffer!.destroy();
+ t.expect(buffer!.mapState === 'unmapped');
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/map_ArrayBuffer.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/map_ArrayBuffer.spec.ts
new file mode 100644
index 0000000000..fc0bfac39d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/map_ArrayBuffer.spec.ts
@@ -0,0 +1,89 @@
+export const description = `
+Tests for the behavior of ArrayBuffers returned by getMappedRange.
+
+TODO: Add tests that transfer to another thread instead of just using MessageChannel.
+TODO: Add tests for any other Web APIs that can detach ArrayBuffers.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { timeout } from '../../../../common/util/timeout.js';
+import { GPUTest } from '../../../gpu_test.js';
+import { checkElementsEqual } from '../../../util/check_contents.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('postMessage')
+ .desc(
+ `Using postMessage to send a getMappedRange-returned ArrayBuffer throws a TypeError
+ if it was included in the transfer list. Otherwise, it makes a copy.
+ Test combinations of transfer={false, true}, mapMode={read,write}.`
+ )
+ .params(u =>
+ u //
+ .combine('transfer', [false, true])
+ .combine('mapMode', ['READ', 'WRITE'] as const)
+ )
+ .fn(async t => {
+ const { transfer, mapMode } = t.params;
+ const kSize = 1024;
+
+ // Populate initial data.
+ const initialData = new Uint32Array(new ArrayBuffer(kSize));
+ for (let i = 0; i < initialData.length; ++i) {
+ initialData[i] = i;
+ }
+
+ const buf = t.makeBufferWithContents(
+ initialData,
+ mapMode === 'WRITE' ? GPUBufferUsage.MAP_WRITE : GPUBufferUsage.MAP_READ
+ );
+
+ await buf.mapAsync(GPUMapMode[mapMode]);
+ const ab1 = buf.getMappedRange();
+ t.expect(ab1.byteLength === kSize, 'ab1 should have the size of the buffer');
+
+ const mc = new MessageChannel();
+ const ab2Promise = new Promise<ArrayBuffer>(resolve => {
+ mc.port2.onmessage = ev => {
+ if (transfer) {
+ t.fail(
+ `postMessage with ab1 in transfer list should not be received. Unexpected message: ${ev.data}`
+ );
+ } else {
+ resolve(ev.data);
+ }
+ };
+ });
+
+ if (transfer) {
+ t.shouldThrow('TypeError', () => mc.port1.postMessage(ab1, [ab1]));
+ // Wait to make sure the postMessage isn't received.
+ await new Promise(resolve => timeout(resolve, 100));
+ } else {
+ mc.port1.postMessage(ab1);
+ }
+ t.expect(ab1.byteLength === kSize, 'after postMessage, ab1 should not be detached');
+
+ if (!transfer) {
+ const ab2 = await ab2Promise;
+ t.expect(ab2.byteLength === kSize, 'ab2 should be the same size');
+ const ab2Data = new Uint32Array(ab2, 0, initialData.length);
+ // ab2 should have the same initial contents.
+ t.expectOK(checkElementsEqual(ab2Data, initialData));
+
+ // Mutations to ab2 should not be visible in ab1.
+ const ab1Data = new Uint32Array(ab1, 0, initialData.length);
+ const abs2NewData = initialData.slice().reverse();
+ for (let i = 0; i < ab2Data.length; ++i) {
+ ab2Data[i] = abs2NewData[i];
+ }
+ t.expectOK(checkElementsEqual(ab1Data, initialData));
+ t.expectOK(checkElementsEqual(ab2Data, abs2NewData));
+ }
+
+ buf.unmap();
+ t.expect(ab1.byteLength === 0, 'after unmap, ab1 should be detached');
+
+ // Transferring an already-detached ArrayBuffer is a DataCloneError.
+ t.shouldThrow('DataCloneError', () => mc.port1.postMessage(ab1, [ab1]));
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/map_detach.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/map_detach.spec.ts
new file mode 100644
index 0000000000..a8619aa1aa
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/map_detach.spec.ts
@@ -0,0 +1,79 @@
+export const description = `
+ Tests that TypedArrays created when mapping a GPUBuffer are detached when the
+ buffer is unmapped or destroyed.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { getGPU } from '../../../../common/util/navigator_gpu.js';
+import { assert } from '../../../../common/util/util.js';
+import { GPUConst } from '../../../constants.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('while_mapped')
+ .desc(
+ `
+ Test that a mapped buffers are able to properly detach.
+ - Tests {mappable, unmappable mapAtCreation, mappable mapAtCreation}
+ - Tests while {mapped, mapped at creation, mapped at creation then unmapped and mapped again}
+ - When {unmap, destroy, unmap && destroy, device.destroy} is called`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('mappedAtCreation', [false, true])
+ .combineWithParams([
+ { usage: GPUConst.BufferUsage.COPY_SRC },
+ { usage: GPUConst.BufferUsage.MAP_WRITE | GPUConst.BufferUsage.COPY_SRC },
+ { usage: GPUConst.BufferUsage.COPY_DST | GPUConst.BufferUsage.MAP_READ },
+ {
+ usage: GPUConst.BufferUsage.MAP_WRITE | GPUConst.BufferUsage.COPY_SRC,
+ mapMode: GPUConst.MapMode.WRITE,
+ },
+ {
+ usage: GPUConst.BufferUsage.COPY_DST | GPUConst.BufferUsage.MAP_READ,
+ mapMode: GPUConst.MapMode.READ,
+ },
+ ])
+ .combineWithParams([
+ { unmap: true, destroy: false },
+ { unmap: false, destroy: true },
+ { unmap: true, destroy: true },
+ { unmap: false, destroy: false, deviceDestroy: true },
+ ])
+ .unless(p => p.mappedAtCreation === false && p.mapMode === undefined)
+ )
+ .fn(async t => {
+ const { usage, mapMode, mappedAtCreation, unmap, destroy, deviceDestroy } = t.params;
+
+ let device: GPUDevice = t.device;
+ if (deviceDestroy) {
+ const adapter = await getGPU(t.rec).requestAdapter();
+ assert(adapter !== null);
+ device = await adapter.requestDevice();
+ }
+ const buffer = device.createBuffer({
+ size: 4,
+ usage,
+ mappedAtCreation,
+ });
+
+ if (mapMode !== undefined) {
+ if (mappedAtCreation) {
+ buffer.unmap();
+ }
+ await buffer.mapAsync(mapMode);
+ }
+
+ const arrayBuffer = buffer.getMappedRange();
+ const view = new Uint8Array(arrayBuffer);
+ t.expect(arrayBuffer.byteLength === 4);
+ t.expect(view.length === 4);
+
+ if (unmap) buffer.unmap();
+ if (destroy) buffer.destroy();
+ if (deviceDestroy) device.destroy();
+
+ t.expect(arrayBuffer.byteLength === 0, 'ArrayBuffer should be detached');
+ t.expect(view.byteLength === 0, 'ArrayBufferView should be detached');
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/map_oom.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/map_oom.spec.ts
new file mode 100644
index 0000000000..76f1e11c26
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/map_oom.spec.ts
@@ -0,0 +1,50 @@
+export const description =
+ 'Test out-of-memory conditions creating large mappable/mappedAtCreation buffers.';
+
+import { kUnitCaseParamsBuilder } from '../../../../common/framework/params_builder.js';
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { kBufferUsages } from '../../../capability_info.js';
+import { GPUTest } from '../../../gpu_test.js';
+import { kMaxSafeMultipleOf8 } from '../../../util/math.js';
+
+const oomAndSizeParams = kUnitCaseParamsBuilder
+ .combine('oom', [false, true])
+ .expand('size', ({ oom }) => {
+ return oom
+ ? [
+ kMaxSafeMultipleOf8,
+ 0x20_0000_0000, // 128 GB
+ ]
+ : [16];
+ });
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('mappedAtCreation')
+ .desc(
+ `Test creating a very large buffer mappedAtCreation buffer should throw a RangeError only
+ because such a large allocation cannot be created when we initialize an active buffer mapping.
+`
+ )
+ .params(
+ oomAndSizeParams //
+ .beginSubcases()
+ .combine('usage', kBufferUsages)
+ )
+ .fn(t => {
+ const { oom, usage, size } = t.params;
+
+ const f = () => t.device.createBuffer({ mappedAtCreation: true, size, usage });
+
+ if (oom) {
+ // getMappedRange is normally valid on OOM buffers, but this one fails because the
+ // (default) range is too large to create the returned ArrayBuffer.
+ t.shouldThrow('RangeError', f);
+ } else {
+ const buffer = f();
+ const mapping = buffer.getMappedRange();
+ t.expect(mapping.byteLength === size, 'Mapping should be successful');
+ buffer.unmap();
+ t.expect(mapping.byteLength === 0, 'Mapping should be detached');
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/mapping_test.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/mapping_test.ts
new file mode 100644
index 0000000000..733e2dcb69
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/mapping_test.ts
@@ -0,0 +1,39 @@
+import { assert } from '../../../../common/util/util.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+export class MappingTest extends GPUTest {
+ checkMapWrite(
+ buffer: GPUBuffer,
+ offset: number,
+ mappedContents: ArrayBuffer,
+ size: number
+ ): void {
+ this.checkMapWriteZeroed(mappedContents, size);
+
+ const mappedView = new Uint32Array(mappedContents);
+ const expected = new Uint32Array(new ArrayBuffer(size));
+ assert(mappedView.byteLength === size);
+ for (let i = 0; i < mappedView.length; ++i) {
+ mappedView[i] = expected[i] = i + 1;
+ }
+ buffer.unmap();
+
+ this.expectGPUBufferValuesEqual(buffer, expected, offset);
+ }
+
+ checkMapWriteZeroed(arrayBuffer: ArrayBuffer, expectedSize: number): void {
+ this.expect(arrayBuffer.byteLength === expectedSize);
+ const view = new Uint8Array(arrayBuffer);
+ this.expectZero(view);
+ }
+
+ expectZero(actual: Uint8Array): void {
+ const size = actual.byteLength;
+ for (let i = 0; i < size; ++i) {
+ if (actual[i] !== 0) {
+ this.fail(`at [${i}], expected zero, got ${actual[i]}`);
+ break;
+ }
+ }
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/threading.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/threading.spec.ts
new file mode 100644
index 0000000000..b69404508d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/buffers/threading.spec.ts
@@ -0,0 +1,29 @@
+export const description = `
+Tests for valid operations with various client-side thread-shared state of GPUBuffers.
+
+States to test:
+- mapping pending
+- mapped
+- mapped at creation
+- mapped at creation, then unmapped
+- mapped at creation, then unmapped, then re-mapped
+- destroyed
+
+TODO: Look for more things to test.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('serialize')
+ .desc(
+ `Copy a GPUBuffer to another thread while it is in various states on
+{the sending thread, yet another thread}.`
+ )
+ .unimplemented();
+
+g.test('destroyed')
+ .desc(`Destroy on one thread while in various states in another thread.`)
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/basic.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/basic.spec.ts
new file mode 100644
index 0000000000..971c8a6757
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/basic.spec.ts
@@ -0,0 +1,98 @@
+export const description = `
+Basic tests.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { memcpy } from '../../../../common/util/util.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('empty').fn(t => {
+ const encoder = t.device.createCommandEncoder();
+ const cmd = encoder.finish();
+ t.device.queue.submit([cmd]);
+});
+
+g.test('b2t2b').fn(t => {
+ const data = new Uint32Array([0x01020304]);
+
+ const src = t.device.createBuffer({
+ mappedAtCreation: true,
+ size: 4,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+ memcpy({ src: data }, { dst: src.getMappedRange() });
+ src.unmap();
+
+ const dst = t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+
+ const mid = t.device.createTexture({
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ format: 'rgba8uint',
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.copyBufferToTexture(
+ { buffer: src, bytesPerRow: 256 },
+ { texture: mid, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
+ { width: 1, height: 1, depthOrArrayLayers: 1 }
+ );
+ encoder.copyTextureToBuffer(
+ { texture: mid, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
+ { buffer: dst, bytesPerRow: 256 },
+ { width: 1, height: 1, depthOrArrayLayers: 1 }
+ );
+ t.device.queue.submit([encoder.finish()]);
+
+ t.expectGPUBufferValuesEqual(dst, data);
+});
+
+g.test('b2t2t2b').fn(t => {
+ const data = new Uint32Array([0x01020304]);
+
+ const src = t.device.createBuffer({
+ mappedAtCreation: true,
+ size: 4,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+ memcpy({ src: data }, { dst: src.getMappedRange() });
+ src.unmap();
+
+ const dst = t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+
+ const midDesc: GPUTextureDescriptor = {
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ format: 'rgba8uint',
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ };
+ const mid1 = t.device.createTexture(midDesc);
+ const mid2 = t.device.createTexture(midDesc);
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.copyBufferToTexture(
+ { buffer: src, bytesPerRow: 256 },
+ { texture: mid1, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
+ { width: 1, height: 1, depthOrArrayLayers: 1 }
+ );
+ encoder.copyTextureToTexture(
+ { texture: mid1, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
+ { texture: mid2, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
+ { width: 1, height: 1, depthOrArrayLayers: 1 }
+ );
+ encoder.copyTextureToBuffer(
+ { texture: mid2, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
+ { buffer: dst, bytesPerRow: 256 },
+ { width: 1, height: 1, depthOrArrayLayers: 1 }
+ );
+ t.device.queue.submit([encoder.finish()]);
+
+ t.expectGPUBufferValuesEqual(dst, data);
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/clearBuffer.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/clearBuffer.spec.ts
new file mode 100644
index 0000000000..13c1ccc1ba
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/clearBuffer.spec.ts
@@ -0,0 +1,54 @@
+export const description = `
+API operations tests for clearBuffer.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('clear')
+ .desc(
+ `Validate the correctness of the clear by filling the srcBuffer with testable data, doing
+ clearBuffer(), and verifying the content of the whole srcBuffer with MapRead:
+ Clear {4 bytes, part of, the whole} buffer {with, without} a non-zero valid offset that
+ - covers the whole buffer
+ - covers the beginning of the buffer
+ - covers the end of the buffer
+ - covers neither the beginning nor the end of the buffer`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('offset', [0, 4, 8, 16, undefined])
+ .combine('size', [0, 4, 8, 16, undefined])
+ .expand('bufferSize', p => [
+ (p.offset ?? 0) + (p.size ?? 16),
+ (p.offset ?? 0) + (p.size ?? 16) + 8,
+ ])
+ )
+ .fn(t => {
+ const { offset, size, bufferSize } = t.params;
+
+ const bufferData = new Uint8Array(bufferSize);
+ for (let i = 0; i < bufferSize; ++i) {
+ bufferData[i] = i + 1;
+ }
+
+ const buffer = t.makeBufferWithContents(
+ bufferData,
+ GPUBufferUsage.COPY_DST | GPUBufferUsage.COPY_SRC
+ );
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.clearBuffer(buffer, offset, size);
+ t.device.queue.submit([encoder.finish()]);
+
+ const expectOffset = offset ?? 0;
+ const expectSize = size ?? bufferSize - expectOffset;
+
+ for (let i = 0; i < expectSize; ++i) {
+ bufferData[expectOffset + i] = 0;
+ }
+
+ t.expectGPUBufferValuesEqual(buffer, bufferData);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/copyBufferToBuffer.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/copyBufferToBuffer.spec.ts
new file mode 100644
index 0000000000..d8edb94763
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/copyBufferToBuffer.spec.ts
@@ -0,0 +1,108 @@
+export const description = 'copyBufferToBuffer operation tests';
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('single')
+ .desc(
+ `Validate the correctness of the copy by filling the srcBuffer with testable data, doing
+ CopyBufferToBuffer() copy, and verifying the content of the whole dstBuffer with MapRead:
+ Copy {4 bytes, part of, the whole} srcBuffer to the dstBuffer {with, without} a non-zero valid
+ srcOffset that
+ - covers the whole dstBuffer
+ - covers the beginning of the dstBuffer
+ - covers the end of the dstBuffer
+ - covers neither the beginning nor the end of the dstBuffer`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('srcOffset', [0, 4, 8, 16])
+ .combine('dstOffset', [0, 4, 8, 16])
+ .combine('copySize', [0, 4, 8, 16])
+ .expand('srcBufferSize', p => [p.srcOffset + p.copySize, p.srcOffset + p.copySize + 8])
+ .expand('dstBufferSize', p => [p.dstOffset + p.copySize, p.dstOffset + p.copySize + 8])
+ )
+ .fn(t => {
+ const { srcOffset, dstOffset, copySize, srcBufferSize, dstBufferSize } = t.params;
+
+ const srcData = new Uint8Array(srcBufferSize);
+ for (let i = 0; i < srcBufferSize; ++i) {
+ srcData[i] = i + 1;
+ }
+
+ const src = t.makeBufferWithContents(srcData, GPUBufferUsage.COPY_SRC);
+
+ const dst = t.device.createBuffer({
+ size: dstBufferSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+ t.trackForCleanup(dst);
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.copyBufferToBuffer(src, srcOffset, dst, dstOffset, copySize);
+ t.device.queue.submit([encoder.finish()]);
+
+ const expectedDstData = new Uint8Array(dstBufferSize);
+ for (let i = 0; i < copySize; ++i) {
+ expectedDstData[dstOffset + i] = srcData[srcOffset + i];
+ }
+
+ t.expectGPUBufferValuesEqual(dst, expectedDstData);
+ });
+
+g.test('state_transitions')
+ .desc(
+ `Test proper state transitions/barriers happen between copy commands.
+ Copy part of src to dst, then a different part of dst to src, and check contents of both.`
+ )
+ .fn(t => {
+ const srcData = new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8]);
+ const dstData = new Uint8Array([10, 20, 30, 40, 50, 60, 70, 80]);
+
+ const src = t.makeBufferWithContents(
+ srcData,
+ GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST
+ );
+ const dst = t.makeBufferWithContents(
+ dstData,
+ GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST
+ );
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.copyBufferToBuffer(src, 0, dst, 4, 4);
+ encoder.copyBufferToBuffer(dst, 0, src, 4, 4);
+ t.device.queue.submit([encoder.finish()]);
+
+ const expectedSrcData = new Uint8Array([1, 2, 3, 4, 10, 20, 30, 40]);
+ const expectedDstData = new Uint8Array([10, 20, 30, 40, 1, 2, 3, 4]);
+ t.expectGPUBufferValuesEqual(src, expectedSrcData);
+ t.expectGPUBufferValuesEqual(dst, expectedDstData);
+ });
+
+g.test('copy_order')
+ .desc(
+ `Test copy commands in one command buffer occur in the correct order.
+ First copies one region from src to dst, then another region from src to an overlapping region
+ of dst, then checks the dst buffer's contents.`
+ )
+ .fn(t => {
+ const srcData = new Uint32Array([1, 2, 3, 4, 5, 6, 7, 8]);
+
+ const src = t.makeBufferWithContents(srcData, GPUBufferUsage.COPY_SRC);
+
+ const dst = t.device.createBuffer({
+ size: srcData.length * 4,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+ t.trackForCleanup(dst);
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.copyBufferToBuffer(src, 0, dst, 0, 16);
+ encoder.copyBufferToBuffer(src, 16, dst, 8, 16);
+ t.device.queue.submit([encoder.finish()]);
+
+ const expectedDstData = new Uint32Array([1, 2, 5, 6, 7, 8, 0, 0]);
+ t.expectGPUBufferValuesEqual(dst, expectedDstData);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/copyTextureToTexture.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/copyTextureToTexture.spec.ts
new file mode 100644
index 0000000000..4c55b5162f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/copyTextureToTexture.spec.ts
@@ -0,0 +1,1686 @@
+export const description = `copyTextureToTexture operation tests`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { assert, memcpy, unreachable } from '../../../../common/util/util.js';
+import {
+ kBufferSizeAlignment,
+ kMinDynamicBufferOffsetAlignment,
+ kTextureDimensions,
+} from '../../../capability_info.js';
+import {
+ kTextureFormatInfo,
+ kRegularTextureFormats,
+ kCompressedTextureFormats,
+ kDepthStencilFormats,
+ textureDimensionAndFormatCompatible,
+ depthStencilFormatAspectSize,
+ DepthStencilFormat,
+ ColorTextureFormat,
+ isCompressedTextureFormat,
+ viewCompatible,
+} from '../../../format_info.js';
+import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
+import { makeBufferWithContents } from '../../../util/buffer.js';
+import { checkElementsEqual, checkElementsEqualEither } from '../../../util/check_contents.js';
+import { align } from '../../../util/math.js';
+import { physicalMipSize } from '../../../util/texture/base.js';
+import { DataArrayGenerator } from '../../../util/texture/data_generation.js';
+import { kBytesPerRowAlignment, dataBytesForCopyOrFail } from '../../../util/texture/layout.js';
+
+const dataGenerator = new DataArrayGenerator();
+
+class F extends TextureTestMixin(GPUTest) {
+ GetInitialDataPerMipLevel(
+ dimension: GPUTextureDimension,
+ textureSize: Required<GPUExtent3DDict>,
+ format: ColorTextureFormat,
+ mipLevel: number
+ ): Uint8Array {
+ const textureSizeAtLevel = physicalMipSize(textureSize, format, dimension, mipLevel);
+ const bytesPerBlock = kTextureFormatInfo[format].color.bytes;
+ const blockWidthInTexel = kTextureFormatInfo[format].blockWidth;
+ const blockHeightInTexel = kTextureFormatInfo[format].blockHeight;
+ const blocksPerSubresource =
+ (textureSizeAtLevel.width / blockWidthInTexel) *
+ (textureSizeAtLevel.height / blockHeightInTexel);
+
+ const byteSize = bytesPerBlock * blocksPerSubresource * textureSizeAtLevel.depthOrArrayLayers;
+ return dataGenerator.generateView(byteSize);
+ }
+
+ GetInitialStencilDataPerMipLevel(
+ textureSize: Required<GPUExtent3DDict>,
+ format: DepthStencilFormat,
+ mipLevel: number
+ ): Uint8Array {
+ const textureSizeAtLevel = physicalMipSize(textureSize, format, '2d', mipLevel);
+ const aspectBytesPerBlock = depthStencilFormatAspectSize(format, 'stencil-only');
+ const byteSize =
+ aspectBytesPerBlock *
+ textureSizeAtLevel.width *
+ textureSizeAtLevel.height *
+ textureSizeAtLevel.depthOrArrayLayers;
+ return dataGenerator.generateView(byteSize);
+ }
+
+ DoCopyTextureToTextureTest(
+ dimension: GPUTextureDimension,
+ srcTextureSize: Required<GPUExtent3DDict>,
+ dstTextureSize: Required<GPUExtent3DDict>,
+ srcFormat: ColorTextureFormat,
+ dstFormat: ColorTextureFormat,
+ copyBoxOffsets: {
+ srcOffset: { x: number; y: number; z: number };
+ dstOffset: { x: number; y: number; z: number };
+ copyExtent: Required<GPUExtent3DDict>;
+ },
+ srcCopyLevel: number,
+ dstCopyLevel: number
+ ): void {
+ this.skipIfTextureFormatNotSupported(srcFormat, dstFormat);
+
+ // If we're in compatibility mode and it's a compressed texture
+ // then we need to render the texture to test the results of the copy.
+ const extraTextureUsageFlags =
+ isCompressedTextureFormat(dstFormat) && this.isCompatibility
+ ? GPUTextureUsage.TEXTURE_BINDING
+ : 0;
+ const mipLevelCount = dimension === '1d' ? 1 : 4;
+
+ // Create srcTexture and dstTexture
+ const srcTextureDesc: GPUTextureDescriptor = {
+ dimension,
+ size: srcTextureSize,
+ format: srcFormat,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ mipLevelCount,
+ };
+ const srcTexture = this.device.createTexture(srcTextureDesc);
+ this.trackForCleanup(srcTexture);
+ const dstTextureDesc: GPUTextureDescriptor = {
+ dimension,
+ size: dstTextureSize,
+ format: dstFormat,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST | extraTextureUsageFlags,
+ mipLevelCount,
+ };
+ const dstTexture = this.device.createTexture(dstTextureDesc);
+ this.trackForCleanup(dstTexture);
+
+ // Fill the whole subresource of srcTexture at srcCopyLevel with initialSrcData.
+ const initialSrcData = this.GetInitialDataPerMipLevel(
+ dimension,
+ srcTextureSize,
+ srcFormat,
+ srcCopyLevel
+ );
+ const srcTextureSizeAtLevel = physicalMipSize(
+ srcTextureSize,
+ srcFormat,
+ dimension,
+ srcCopyLevel
+ );
+ const bytesPerBlock = kTextureFormatInfo[srcFormat].color.bytes;
+ const blockWidth = kTextureFormatInfo[srcFormat].blockWidth;
+ const blockHeight = kTextureFormatInfo[srcFormat].blockHeight;
+ const srcBlocksPerRow = srcTextureSizeAtLevel.width / blockWidth;
+ const srcBlockRowsPerImage = srcTextureSizeAtLevel.height / blockHeight;
+ this.device.queue.writeTexture(
+ { texture: srcTexture, mipLevel: srcCopyLevel },
+ initialSrcData,
+ {
+ bytesPerRow: srcBlocksPerRow * bytesPerBlock,
+ rowsPerImage: srcBlockRowsPerImage,
+ },
+ srcTextureSizeAtLevel
+ );
+
+ // Copy the region specified by copyBoxOffsets from srcTexture to dstTexture.
+ const dstTextureSizeAtLevel = physicalMipSize(
+ dstTextureSize,
+ dstFormat,
+ dimension,
+ dstCopyLevel
+ );
+ const minWidth = Math.min(srcTextureSizeAtLevel.width, dstTextureSizeAtLevel.width);
+ const minHeight = Math.min(srcTextureSizeAtLevel.height, dstTextureSizeAtLevel.height);
+ const minDepth = Math.min(
+ srcTextureSizeAtLevel.depthOrArrayLayers,
+ dstTextureSizeAtLevel.depthOrArrayLayers
+ );
+
+ const appliedSrcOffset = {
+ x: Math.min(copyBoxOffsets.srcOffset.x * blockWidth, minWidth),
+ y: Math.min(copyBoxOffsets.srcOffset.y * blockHeight, minHeight),
+ z: Math.min(copyBoxOffsets.srcOffset.z, minDepth),
+ };
+ const appliedDstOffset = {
+ x: Math.min(copyBoxOffsets.dstOffset.x * blockWidth, minWidth),
+ y: Math.min(copyBoxOffsets.dstOffset.y * blockHeight, minHeight),
+ z: Math.min(copyBoxOffsets.dstOffset.z, minDepth),
+ };
+
+ const appliedCopyWidth = Math.max(
+ minWidth +
+ copyBoxOffsets.copyExtent.width * blockWidth -
+ Math.max(appliedSrcOffset.x, appliedDstOffset.x),
+ 0
+ );
+ const appliedCopyHeight = Math.max(
+ minHeight +
+ copyBoxOffsets.copyExtent.height * blockHeight -
+ Math.max(appliedSrcOffset.y, appliedDstOffset.y),
+ 0
+ );
+ assert(appliedCopyWidth % blockWidth === 0 && appliedCopyHeight % blockHeight === 0);
+
+ const appliedCopyDepth = Math.max(
+ 0,
+ minDepth +
+ copyBoxOffsets.copyExtent.depthOrArrayLayers -
+ Math.max(appliedSrcOffset.z, appliedDstOffset.z)
+ );
+ assert(appliedCopyDepth >= 0);
+
+ const appliedSize = {
+ width: appliedCopyWidth,
+ height: appliedCopyHeight,
+ depthOrArrayLayers: appliedCopyDepth,
+ };
+
+ {
+ const encoder = this.device.createCommandEncoder();
+ encoder.copyTextureToTexture(
+ { texture: srcTexture, mipLevel: srcCopyLevel, origin: appliedSrcOffset },
+ { texture: dstTexture, mipLevel: dstCopyLevel, origin: appliedDstOffset },
+ appliedSize
+ );
+ this.device.queue.submit([encoder.finish()]);
+ }
+
+ const dstBlocksPerRow = dstTextureSizeAtLevel.width / blockWidth;
+ const dstBlockRowsPerImage = dstTextureSizeAtLevel.height / blockHeight;
+ const bytesPerDstAlignedBlockRow = align(dstBlocksPerRow * bytesPerBlock, 256);
+ const dstBufferSize =
+ (dstBlockRowsPerImage * dstTextureSizeAtLevel.depthOrArrayLayers - 1) *
+ bytesPerDstAlignedBlockRow +
+ align(dstBlocksPerRow * bytesPerBlock, 4);
+
+ if (isCompressedTextureFormat(dstTexture.format) && this.isCompatibility) {
+ assert(viewCompatible(srcFormat, dstFormat));
+ // compare by rendering. We need the expected texture to match
+ // the dstTexture so we'll create a texture where we supply
+ // all of the data in JavaScript.
+ const expectedTexture = this.device.createTexture({
+ size: [dstTexture.width, dstTexture.height, dstTexture.depthOrArrayLayers],
+ mipLevelCount: dstTexture.mipLevelCount,
+ format: dstTexture.format,
+ usage: dstTexture.usage,
+ });
+ const expectedData = new Uint8Array(dstBufferSize);
+
+ // Execute the equivalent of `copyTextureToTexture`, copying
+ // from `initialSrcData` to `expectedData`.
+ this.updateLinearTextureDataSubBox(dstFormat, appliedSize, {
+ src: {
+ dataLayout: {
+ bytesPerRow: srcBlocksPerRow * bytesPerBlock,
+ rowsPerImage: srcBlockRowsPerImage,
+ offset: 0,
+ },
+ origin: appliedSrcOffset,
+ data: initialSrcData,
+ },
+ dest: {
+ dataLayout: {
+ bytesPerRow: dstBlocksPerRow * bytesPerBlock,
+ rowsPerImage: dstBlockRowsPerImage,
+ offset: 0,
+ },
+ origin: appliedDstOffset,
+ data: expectedData,
+ },
+ });
+
+ // Upload `expectedData` to `expectedTexture`. If `copyTextureToTexture`
+ // worked then the contents of `dstTexture` should match `expectedTexture`
+ this.queue.writeTexture(
+ { texture: expectedTexture, mipLevel: dstCopyLevel },
+ expectedData,
+ {
+ bytesPerRow: dstBlocksPerRow * bytesPerBlock,
+ rowsPerImage: dstBlockRowsPerImage,
+ },
+ dstTextureSizeAtLevel
+ );
+
+ this.expectTexturesToMatchByRendering(
+ dstTexture,
+ expectedTexture,
+ dstCopyLevel,
+ appliedDstOffset,
+ appliedSize
+ );
+ return;
+ }
+
+ // Copy the whole content of dstTexture at dstCopyLevel to dstBuffer.
+ const dstBufferDesc: GPUBufferDescriptor = {
+ size: dstBufferSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ };
+ const dstBuffer = this.device.createBuffer(dstBufferDesc);
+ this.trackForCleanup(dstBuffer);
+
+ {
+ const encoder = this.device.createCommandEncoder();
+ encoder.copyTextureToBuffer(
+ { texture: dstTexture, mipLevel: dstCopyLevel },
+ {
+ buffer: dstBuffer,
+ bytesPerRow: bytesPerDstAlignedBlockRow,
+ rowsPerImage: dstBlockRowsPerImage,
+ },
+ dstTextureSizeAtLevel
+ );
+ this.device.queue.submit([encoder.finish()]);
+ }
+
+ // Fill expectedUint8DataWithPadding with the expected data of dstTexture. The other values in
+ // expectedUint8DataWithPadding are kept 0 to check if the texels untouched by the copy are 0
+ // (their previous values).
+ const expectedUint8DataWithPadding = new Uint8Array(dstBufferSize);
+ const expectedUint8Data = new Uint8Array(initialSrcData);
+
+ const appliedCopyBlocksPerRow = appliedCopyWidth / blockWidth;
+ const appliedCopyBlockRowsPerImage = appliedCopyHeight / blockHeight;
+ const srcCopyOffsetInBlocks = {
+ x: appliedSrcOffset.x / blockWidth,
+ y: appliedSrcOffset.y / blockHeight,
+ z: appliedSrcOffset.z,
+ };
+ const dstCopyOffsetInBlocks = {
+ x: appliedDstOffset.x / blockWidth,
+ y: appliedDstOffset.y / blockHeight,
+ z: appliedDstOffset.z,
+ };
+
+ for (let z = 0; z < appliedCopyDepth; ++z) {
+ const srcOffsetZ = srcCopyOffsetInBlocks.z + z;
+ const dstOffsetZ = dstCopyOffsetInBlocks.z + z;
+ for (let y = 0; y < appliedCopyBlockRowsPerImage; ++y) {
+ const dstOffsetYInBlocks = dstCopyOffsetInBlocks.y + y;
+ const expectedDataWithPaddingOffset =
+ bytesPerDstAlignedBlockRow * (dstBlockRowsPerImage * dstOffsetZ + dstOffsetYInBlocks) +
+ dstCopyOffsetInBlocks.x * bytesPerBlock;
+
+ const srcOffsetYInBlocks = srcCopyOffsetInBlocks.y + y;
+ const expectedDataOffset =
+ bytesPerBlock *
+ srcBlocksPerRow *
+ (srcBlockRowsPerImage * srcOffsetZ + srcOffsetYInBlocks) +
+ srcCopyOffsetInBlocks.x * bytesPerBlock;
+
+ const bytesInRow = appliedCopyBlocksPerRow * bytesPerBlock;
+ memcpy(
+ { src: expectedUint8Data, start: expectedDataOffset, length: bytesInRow },
+ { dst: expectedUint8DataWithPadding, start: expectedDataWithPaddingOffset }
+ );
+ }
+ }
+
+ let alternateExpectedData = expectedUint8DataWithPadding;
+ // For 8-byte snorm formats, allow an alternative encoding of -1.
+ // MAINTENANCE_TODO: Use textureContentIsOKByT2B with TexelView.
+ if (srcFormat.includes('snorm')) {
+ switch (srcFormat) {
+ case 'r8snorm':
+ case 'rg8snorm':
+ case 'rgba8snorm':
+ alternateExpectedData = alternateExpectedData.slice();
+ for (let i = 0; i < alternateExpectedData.length; ++i) {
+ if (alternateExpectedData[i] === 128) {
+ alternateExpectedData[i] = 129;
+ } else if (alternateExpectedData[i] === 129) {
+ alternateExpectedData[i] = 128;
+ }
+ }
+ break;
+ case 'bc4-r-snorm':
+ case 'bc5-rg-snorm':
+ case 'eac-r11snorm':
+ case 'eac-rg11snorm':
+ break;
+ default:
+ unreachable();
+ }
+ }
+
+ // Verify the content of the whole subresource of dstTexture at dstCopyLevel (in dstBuffer) is expected.
+ this.expectGPUBufferValuesPassCheck(
+ dstBuffer,
+ alternateExpectedData === expectedUint8DataWithPadding
+ ? vals => checkElementsEqual(vals, expectedUint8DataWithPadding)
+ : vals =>
+ checkElementsEqualEither(vals, [expectedUint8DataWithPadding, alternateExpectedData]),
+ {
+ srcByteOffset: 0,
+ type: Uint8Array,
+ typedLength: expectedUint8DataWithPadding.length,
+ }
+ );
+ }
+
+ InitializeStencilAspect(
+ sourceTexture: GPUTexture,
+ initialStencilData: Uint8Array,
+ srcCopyLevel: number,
+ srcCopyBaseArrayLayer: number,
+ copySize: readonly [number, number, number]
+ ): void {
+ this.queue.writeTexture(
+ {
+ texture: sourceTexture,
+ mipLevel: srcCopyLevel,
+ aspect: 'stencil-only',
+ origin: { x: 0, y: 0, z: srcCopyBaseArrayLayer },
+ },
+ initialStencilData,
+ { bytesPerRow: copySize[0], rowsPerImage: copySize[1] },
+ copySize
+ );
+ }
+
+ VerifyStencilAspect(
+ destinationTexture: GPUTexture,
+ initialStencilData: Uint8Array,
+ dstCopyLevel: number,
+ dstCopyBaseArrayLayer: number,
+ copySize: readonly [number, number, number]
+ ): void {
+ const bytesPerRow = align(copySize[0], kBytesPerRowAlignment);
+ const rowsPerImage = copySize[1];
+ const outputBufferSize = align(
+ dataBytesForCopyOrFail({
+ layout: { bytesPerRow, rowsPerImage },
+ format: 'stencil8',
+ copySize,
+ method: 'CopyT2B',
+ }),
+ kBufferSizeAlignment
+ );
+ const outputBuffer = this.device.createBuffer({
+ size: outputBufferSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+ this.trackForCleanup(outputBuffer);
+ const encoder = this.device.createCommandEncoder();
+ encoder.copyTextureToBuffer(
+ {
+ texture: destinationTexture,
+ aspect: 'stencil-only',
+ mipLevel: dstCopyLevel,
+ origin: { x: 0, y: 0, z: dstCopyBaseArrayLayer },
+ },
+ { buffer: outputBuffer, bytesPerRow, rowsPerImage },
+ copySize
+ );
+ this.queue.submit([encoder.finish()]);
+
+ const expectedStencilData = new Uint8Array(outputBufferSize);
+ for (let z = 0; z < copySize[2]; ++z) {
+ const initialOffsetPerLayer = z * copySize[0] * copySize[1];
+ const expectedOffsetPerLayer = z * bytesPerRow * rowsPerImage;
+ for (let y = 0; y < copySize[1]; ++y) {
+ const initialOffsetPerRow = initialOffsetPerLayer + y * copySize[0];
+ const expectedOffsetPerRow = expectedOffsetPerLayer + y * bytesPerRow;
+ memcpy(
+ { src: initialStencilData, start: initialOffsetPerRow, length: copySize[0] },
+ { dst: expectedStencilData, start: expectedOffsetPerRow }
+ );
+ }
+ }
+ this.expectGPUBufferValuesEqual(outputBuffer, expectedStencilData);
+ }
+
+ GetRenderPipelineForT2TCopyWithDepthTests(
+ bindGroupLayout: GPUBindGroupLayout,
+ hasColorAttachment: boolean,
+ depthStencil: GPUDepthStencilState
+ ): GPURenderPipeline {
+ const renderPipelineDescriptor: GPURenderPipelineDescriptor = {
+ layout: this.device.createPipelineLayout({ bindGroupLayouts: [bindGroupLayout] }),
+ vertex: {
+ module: this.device.createShaderModule({
+ code: `
+ struct Params {
+ copyLayer: f32
+ };
+ @group(0) @binding(0) var<uniform> param: Params;
+ @vertex
+ fn main(@builtin(vertex_index) VertexIndex : u32)-> @builtin(position) vec4<f32> {
+ var depthValue = 0.5 + 0.2 * sin(param.copyLayer);
+ var pos : array<vec3<f32>, 6> = array<vec3<f32>, 6>(
+ vec3<f32>(-1.0, 1.0, depthValue),
+ vec3<f32>(-1.0, -1.0, 0.0),
+ vec3<f32>( 1.0, 1.0, 1.0),
+ vec3<f32>(-1.0, -1.0, 0.0),
+ vec3<f32>( 1.0, 1.0, 1.0),
+ vec3<f32>( 1.0, -1.0, depthValue));
+ return vec4<f32>(pos[VertexIndex], 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ depthStencil,
+ };
+ if (hasColorAttachment) {
+ renderPipelineDescriptor.fragment = {
+ module: this.device.createShaderModule({
+ code: `
+ @fragment
+ fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ };
+ }
+ return this.device.createRenderPipeline(renderPipelineDescriptor);
+ }
+
+ GetBindGroupLayoutForT2TCopyWithDepthTests(): GPUBindGroupLayout {
+ return this.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.VERTEX,
+ buffer: {
+ type: 'uniform',
+ minBindingSize: 4,
+ hasDynamicOffset: true,
+ },
+ },
+ ],
+ });
+ }
+
+ GetBindGroupForT2TCopyWithDepthTests(
+ bindGroupLayout: GPUBindGroupLayout,
+ totalCopyArrayLayers: number
+ ): GPUBindGroup {
+ // Prepare the uniform buffer that contains all the copy layers to generate different depth
+ // values for different copy layers.
+ assert(totalCopyArrayLayers > 0);
+ const uniformBufferSize = kMinDynamicBufferOffsetAlignment * (totalCopyArrayLayers - 1) + 4;
+ const uniformBufferData = new Float32Array(uniformBufferSize / 4);
+ for (let i = 1; i < totalCopyArrayLayers; ++i) {
+ uniformBufferData[(kMinDynamicBufferOffsetAlignment / 4) * i] = i;
+ }
+ const uniformBuffer = makeBufferWithContents(
+ this.device,
+ uniformBufferData,
+ GPUBufferUsage.COPY_DST | GPUBufferUsage.UNIFORM
+ );
+ return this.device.createBindGroup({
+ layout: bindGroupLayout,
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer: uniformBuffer,
+ size: 4,
+ },
+ },
+ ],
+ });
+ }
+
+ /** Initialize the depth aspect of sourceTexture with draw calls */
+ InitializeDepthAspect(
+ sourceTexture: GPUTexture,
+ depthFormat: GPUTextureFormat,
+ srcCopyLevel: number,
+ srcCopyBaseArrayLayer: number,
+ copySize: readonly [number, number, number]
+ ): void {
+ // Prepare a renderPipeline with depthCompareFunction == 'always' and depthWriteEnabled == true
+ // for the initializations of the depth attachment.
+ const bindGroupLayout = this.GetBindGroupLayoutForT2TCopyWithDepthTests();
+ const renderPipeline = this.GetRenderPipelineForT2TCopyWithDepthTests(bindGroupLayout, false, {
+ format: depthFormat,
+ depthWriteEnabled: true,
+ depthCompare: 'always',
+ });
+ const bindGroup = this.GetBindGroupForT2TCopyWithDepthTests(bindGroupLayout, copySize[2]);
+
+ const hasStencil = kTextureFormatInfo[sourceTexture.format].stencil;
+ const encoder = this.device.createCommandEncoder();
+ for (let srcCopyLayer = 0; srcCopyLayer < copySize[2]; ++srcCopyLayer) {
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [],
+ depthStencilAttachment: {
+ view: sourceTexture.createView({
+ baseArrayLayer: srcCopyLayer + srcCopyBaseArrayLayer,
+ arrayLayerCount: 1,
+ baseMipLevel: srcCopyLevel,
+ mipLevelCount: 1,
+ }),
+ depthClearValue: 0.0,
+ depthLoadOp: 'clear',
+ depthStoreOp: 'store',
+ stencilLoadOp: hasStencil ? 'load' : undefined,
+ stencilStoreOp: hasStencil ? 'store' : undefined,
+ },
+ });
+ renderPass.setBindGroup(0, bindGroup, [srcCopyLayer * kMinDynamicBufferOffsetAlignment]);
+ renderPass.setPipeline(renderPipeline);
+ renderPass.draw(6);
+ renderPass.end();
+ }
+ this.queue.submit([encoder.finish()]);
+ }
+
+ VerifyDepthAspect(
+ destinationTexture: GPUTexture,
+ depthFormat: GPUTextureFormat,
+ dstCopyLevel: number,
+ dstCopyBaseArrayLayer: number,
+ copySize: [number, number, number]
+ ): void {
+ // Prepare a renderPipeline with depthCompareFunction == 'equal' and depthWriteEnabled == false
+ // for the comparison of the depth attachment.
+ const bindGroupLayout = this.GetBindGroupLayoutForT2TCopyWithDepthTests();
+ const renderPipeline = this.GetRenderPipelineForT2TCopyWithDepthTests(bindGroupLayout, true, {
+ format: depthFormat,
+ depthWriteEnabled: false,
+ depthCompare: 'equal',
+ });
+ const bindGroup = this.GetBindGroupForT2TCopyWithDepthTests(bindGroupLayout, copySize[2]);
+
+ const outputColorTexture = this.trackForCleanup(
+ this.device.createTexture({
+ format: 'rgba8unorm',
+ size: copySize,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ })
+ );
+ const hasStencil = kTextureFormatInfo[destinationTexture.format].stencil;
+ const encoder = this.device.createCommandEncoder();
+ for (let dstCopyLayer = 0; dstCopyLayer < copySize[2]; ++dstCopyLayer) {
+ // If the depth value is not expected, the color of outputColorTexture will remain Red after
+ // the render pass.
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: outputColorTexture.createView({
+ baseArrayLayer: dstCopyLayer,
+ arrayLayerCount: 1,
+ }),
+ clearValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ depthStencilAttachment: {
+ view: destinationTexture.createView({
+ baseArrayLayer: dstCopyLayer + dstCopyBaseArrayLayer,
+ arrayLayerCount: 1,
+ baseMipLevel: dstCopyLevel,
+ mipLevelCount: 1,
+ }),
+ depthLoadOp: 'load',
+ depthStoreOp: 'store',
+ stencilLoadOp: hasStencil ? 'load' : undefined,
+ stencilStoreOp: hasStencil ? 'store' : undefined,
+ },
+ });
+ renderPass.setBindGroup(0, bindGroup, [dstCopyLayer * kMinDynamicBufferOffsetAlignment]);
+ renderPass.setPipeline(renderPipeline);
+ renderPass.draw(6);
+ renderPass.end();
+ }
+ this.queue.submit([encoder.finish()]);
+
+ this.expectSingleColor(outputColorTexture, 'rgba8unorm', {
+ size: copySize,
+ exp: { R: 0.0, G: 1.0, B: 0.0, A: 1.0 },
+ });
+ }
+}
+
+const kCopyBoxOffsetsForWholeDepth = [
+ // From (0, 0) of src to (0, 0) of dst.
+ {
+ srcOffset: { x: 0, y: 0, z: 0 },
+ dstOffset: { x: 0, y: 0, z: 0 },
+ copyExtent: { width: 0, height: 0, depthOrArrayLayers: 0 },
+ },
+ // From (0, 0) of src to (blockWidth, 0) of dst.
+ {
+ srcOffset: { x: 0, y: 0, z: 0 },
+ dstOffset: { x: 1, y: 0, z: 0 },
+ copyExtent: { width: 0, height: 0, depthOrArrayLayers: 0 },
+ },
+ // From (0, 0) of src to (0, blockHeight) of dst.
+ {
+ srcOffset: { x: 0, y: 0, z: 0 },
+ dstOffset: { x: 0, y: 1, z: 0 },
+ copyExtent: { width: 0, height: 0, depthOrArrayLayers: 0 },
+ },
+ // From (blockWidth, 0) of src to (0, 0) of dst.
+ {
+ srcOffset: { x: 1, y: 0, z: 0 },
+ dstOffset: { x: 0, y: 0, z: 0 },
+ copyExtent: { width: 0, height: 0, depthOrArrayLayers: 0 },
+ },
+ // From (0, blockHeight) of src to (0, 0) of dst.
+ {
+ srcOffset: { x: 0, y: 1, z: 0 },
+ dstOffset: { x: 0, y: 0, z: 0 },
+ copyExtent: { width: 0, height: 0, depthOrArrayLayers: 0 },
+ },
+ // From (blockWidth, 0) of src to (0, 0) of dst, and the copy extent will not cover the last
+ // texel block column of both source and destination texture.
+ {
+ srcOffset: { x: 1, y: 0, z: 0 },
+ dstOffset: { x: 0, y: 0, z: 0 },
+ copyExtent: { width: -1, height: 0, depthOrArrayLayers: 0 },
+ },
+ // From (0, blockHeight) of src to (0, 0) of dst, and the copy extent will not cover the last
+ // texel block row of both source and destination texture.
+ {
+ srcOffset: { x: 0, y: 1, z: 0 },
+ dstOffset: { x: 0, y: 0, z: 0 },
+ copyExtent: { width: 0, height: -1, depthOrArrayLayers: 0 },
+ },
+] as const;
+
+const kCopyBoxOffsetsFor2DArrayTextures = [
+ // Copy the whole array slices from the source texture to the destination texture.
+ // The copy extent will cover the whole subresource of either source or the
+ // destination texture
+ ...kCopyBoxOffsetsForWholeDepth,
+
+ // Copy 1 texture slice from the 1st slice of the source texture to the 1st slice of the
+ // destination texture.
+ {
+ srcOffset: { x: 0, y: 0, z: 0 },
+ dstOffset: { x: 0, y: 0, z: 0 },
+ copyExtent: { width: 0, height: 0, depthOrArrayLayers: -2 },
+ },
+ // Copy 1 texture slice from the 2nd slice of the source texture to the 2nd slice of the
+ // destination texture.
+ {
+ srcOffset: { x: 0, y: 0, z: 1 },
+ dstOffset: { x: 0, y: 0, z: 1 },
+ copyExtent: { width: 0, height: 0, depthOrArrayLayers: -3 },
+ },
+ // Copy 1 texture slice from the 1st slice of the source texture to the 2nd slice of the
+ // destination texture.
+ {
+ srcOffset: { x: 0, y: 0, z: 0 },
+ dstOffset: { x: 0, y: 0, z: 1 },
+ copyExtent: { width: 0, height: 0, depthOrArrayLayers: -1 },
+ },
+ // Copy 1 texture slice from the 2nd slice of the source texture to the 1st slice of the
+ // destination texture.
+ {
+ srcOffset: { x: 0, y: 0, z: 1 },
+ dstOffset: { x: 0, y: 0, z: 0 },
+ copyExtent: { width: 0, height: 0, depthOrArrayLayers: -1 },
+ },
+ // Copy 2 texture slices from the 1st slice of the source texture to the 1st slice of the
+ // destination texture.
+ {
+ srcOffset: { x: 0, y: 0, z: 0 },
+ dstOffset: { x: 0, y: 0, z: 0 },
+ copyExtent: { width: 0, height: 0, depthOrArrayLayers: -3 },
+ },
+ // Copy 3 texture slices from the 2nd slice of the source texture to the 2nd slice of the
+ // destination texture.
+ {
+ srcOffset: { x: 0, y: 0, z: 1 },
+ dstOffset: { x: 0, y: 0, z: 1 },
+ copyExtent: { width: 0, height: 0, depthOrArrayLayers: -1 },
+ },
+] as const;
+
+export const g = makeTestGroup(F);
+
+g.test('color_textures,non_compressed,non_array')
+ .desc(
+ `
+ Validate the correctness of the copy by filling the srcTexture with testable data and any
+ non-compressed color format supported by WebGPU, doing CopyTextureToTexture() copy, and verifying
+ the content of the whole dstTexture.
+
+ Copy {1 texel block, part of, the whole} srcTexture to the dstTexture {with, without} a non-zero
+ valid srcOffset that
+ - covers the whole dstTexture subresource
+ - covers the corners of the dstTexture
+ - doesn't cover any texels that are on the edge of the dstTexture
+ - covers the mipmap level > 0
+
+ Tests for all pairs of valid source/destination formats, and all texture dimensions.
+ `
+ )
+ .params(u =>
+ u
+ .combine('srcFormat', kRegularTextureFormats)
+ .combine('dstFormat', kRegularTextureFormats)
+ .filter(({ srcFormat, dstFormat }) => {
+ const srcBaseFormat = kTextureFormatInfo[srcFormat].baseFormat;
+ const dstBaseFormat = kTextureFormatInfo[dstFormat].baseFormat;
+ return (
+ srcFormat === dstFormat ||
+ (srcBaseFormat !== undefined &&
+ dstBaseFormat !== undefined &&
+ srcBaseFormat === dstBaseFormat)
+ );
+ })
+ .combine('dimension', kTextureDimensions)
+ .filter(
+ ({ dimension, srcFormat, dstFormat }) =>
+ textureDimensionAndFormatCompatible(dimension, srcFormat) &&
+ textureDimensionAndFormatCompatible(dimension, dstFormat)
+ )
+ .beginSubcases()
+ .expandWithParams(p => {
+ const params = [
+ {
+ srcTextureSize: { width: 32, height: 32, depthOrArrayLayers: 1 },
+ dstTextureSize: { width: 32, height: 32, depthOrArrayLayers: 1 },
+ },
+ {
+ srcTextureSize: { width: 31, height: 33, depthOrArrayLayers: 1 },
+ dstTextureSize: { width: 31, height: 33, depthOrArrayLayers: 1 },
+ },
+ {
+ srcTextureSize: { width: 32, height: 32, depthOrArrayLayers: 1 },
+ dstTextureSize: { width: 64, height: 64, depthOrArrayLayers: 1 },
+ },
+ {
+ srcTextureSize: { width: 32, height: 32, depthOrArrayLayers: 1 },
+ dstTextureSize: { width: 63, height: 61, depthOrArrayLayers: 1 },
+ },
+ ];
+ if (p.dimension === '1d') {
+ for (const param of params) {
+ param.srcTextureSize.height = 1;
+ param.dstTextureSize.height = 1;
+ }
+ }
+
+ return params;
+ })
+ .combine('copyBoxOffsets', kCopyBoxOffsetsForWholeDepth)
+ .unless(
+ p =>
+ p.dimension === '1d' &&
+ (p.copyBoxOffsets.copyExtent.height !== 0 ||
+ p.copyBoxOffsets.srcOffset.y !== 0 ||
+ p.copyBoxOffsets.dstOffset.y !== 0)
+ )
+ .combine('srcCopyLevel', [0, 3])
+ .combine('dstCopyLevel', [0, 3])
+ .unless(p => p.dimension === '1d' && (p.srcCopyLevel !== 0 || p.dstCopyLevel !== 0))
+ )
+ .fn(t => {
+ const {
+ dimension,
+ srcTextureSize,
+ dstTextureSize,
+ srcFormat,
+ dstFormat,
+ copyBoxOffsets,
+ srcCopyLevel,
+ dstCopyLevel,
+ } = t.params;
+
+ t.DoCopyTextureToTextureTest(
+ dimension,
+ srcTextureSize,
+ dstTextureSize,
+ srcFormat,
+ dstFormat,
+ copyBoxOffsets,
+ srcCopyLevel,
+ dstCopyLevel
+ );
+ });
+
+g.test('color_textures,compressed,non_array')
+ .desc(
+ `
+ Validate the correctness of the copy by filling the srcTexture with testable data and any
+ compressed color format supported by WebGPU, doing CopyTextureToTexture() copy, and verifying
+ the content of the whole dstTexture.
+
+ Tests for all pairs of valid source/destination formats, and all texture dimensions.
+ `
+ )
+ .params(u =>
+ u
+ .combine('srcFormat', kCompressedTextureFormats)
+ .combine('dstFormat', kCompressedTextureFormats)
+ .filter(({ srcFormat, dstFormat }) => {
+ const srcBaseFormat = kTextureFormatInfo[srcFormat].baseFormat;
+ const dstBaseFormat = kTextureFormatInfo[dstFormat].baseFormat;
+ return (
+ srcFormat === dstFormat ||
+ (srcBaseFormat !== undefined &&
+ dstBaseFormat !== undefined &&
+ srcBaseFormat === dstBaseFormat)
+ );
+ })
+ .combine('dimension', kTextureDimensions)
+ .filter(
+ ({ dimension, srcFormat, dstFormat }) =>
+ textureDimensionAndFormatCompatible(dimension, srcFormat) &&
+ textureDimensionAndFormatCompatible(dimension, dstFormat)
+ )
+ .beginSubcases()
+ .combine('textureSizeInBlocks', [
+ // The heights and widths in blocks are all power of 2
+ { src: { width: 16, height: 8 }, dst: { width: 16, height: 8 } },
+ // The virtual width of the source texture at mipmap level 2 (15) is not a multiple of 4 blocks
+ { src: { width: 15, height: 8 }, dst: { width: 16, height: 8 } },
+ // The virtual width of the destination texture at mipmap level 2 (15) is not a multiple
+ // of 4 blocks
+ { src: { width: 16, height: 8 }, dst: { width: 15, height: 8 } },
+ // The virtual height of the source texture at mipmap level 2 (13) is not a multiple of 4 blocks
+ { src: { width: 16, height: 13 }, dst: { width: 16, height: 8 } },
+ // The virtual height of the destination texture at mipmap level 2 (13) is not a
+ // multiple of 4 blocks
+ { src: { width: 16, height: 8 }, dst: { width: 16, height: 13 } },
+ // None of the widths or heights in blocks are power of 2
+ { src: { width: 15, height: 13 }, dst: { width: 15, height: 13 } },
+ ])
+ .combine('copyBoxOffsets', kCopyBoxOffsetsForWholeDepth)
+ .combine('srcCopyLevel', [0, 2])
+ .combine('dstCopyLevel', [0, 2])
+ )
+ .beforeAllSubcases(t => {
+ const { srcFormat, dstFormat } = t.params;
+ t.skipIfCopyTextureToTextureNotSupportedForFormat(srcFormat, dstFormat);
+ t.selectDeviceOrSkipTestCase([
+ kTextureFormatInfo[srcFormat].feature,
+ kTextureFormatInfo[dstFormat].feature,
+ ]);
+ })
+ .fn(t => {
+ const {
+ dimension,
+ textureSizeInBlocks,
+ srcFormat,
+ dstFormat,
+ copyBoxOffsets,
+ srcCopyLevel,
+ dstCopyLevel,
+ } = t.params;
+ const srcBlockWidth = kTextureFormatInfo[srcFormat].blockWidth;
+ const srcBlockHeight = kTextureFormatInfo[srcFormat].blockHeight;
+ const dstBlockWidth = kTextureFormatInfo[dstFormat].blockWidth;
+ const dstBlockHeight = kTextureFormatInfo[dstFormat].blockHeight;
+
+ t.DoCopyTextureToTextureTest(
+ dimension,
+ {
+ width: textureSizeInBlocks.src.width * srcBlockWidth,
+ height: textureSizeInBlocks.src.height * srcBlockHeight,
+ depthOrArrayLayers: 1,
+ },
+ {
+ width: textureSizeInBlocks.dst.width * dstBlockWidth,
+ height: textureSizeInBlocks.dst.height * dstBlockHeight,
+ depthOrArrayLayers: 1,
+ },
+ srcFormat,
+ dstFormat,
+ copyBoxOffsets,
+ srcCopyLevel,
+ dstCopyLevel
+ );
+ });
+
+g.test('color_textures,non_compressed,array')
+ .desc(
+ `
+ Validate the correctness of the texture-to-texture copy on 2D array textures by filling the
+ srcTexture with testable data and any non-compressed color format supported by WebGPU, doing
+ CopyTextureToTexture() copy, and verifying the content of the whole dstTexture.
+ `
+ )
+ .params(u =>
+ u
+ .combine('srcFormat', kRegularTextureFormats)
+ .combine('dstFormat', kRegularTextureFormats)
+ .filter(({ srcFormat, dstFormat }) => {
+ const srcBaseFormat = kTextureFormatInfo[srcFormat].baseFormat;
+ const dstBaseFormat = kTextureFormatInfo[dstFormat].baseFormat;
+ return (
+ srcFormat === dstFormat ||
+ (srcBaseFormat !== undefined &&
+ dstBaseFormat !== undefined &&
+ srcBaseFormat === dstBaseFormat)
+ );
+ })
+ .combine('dimension', ['2d', '3d'] as const)
+ .filter(
+ ({ dimension, srcFormat, dstFormat }) =>
+ textureDimensionAndFormatCompatible(dimension, srcFormat) &&
+ textureDimensionAndFormatCompatible(dimension, dstFormat)
+ )
+ .beginSubcases()
+ .combine('textureSize', [
+ {
+ srcTextureSize: { width: 64, height: 32, depthOrArrayLayers: 5 },
+ dstTextureSize: { width: 64, height: 32, depthOrArrayLayers: 5 },
+ },
+ {
+ srcTextureSize: { width: 31, height: 33, depthOrArrayLayers: 5 },
+ dstTextureSize: { width: 31, height: 33, depthOrArrayLayers: 5 },
+ },
+ {
+ srcTextureSize: { width: 31, height: 32, depthOrArrayLayers: 33 },
+ dstTextureSize: { width: 31, height: 32, depthOrArrayLayers: 33 },
+ },
+ ])
+
+ .combine('copyBoxOffsets', kCopyBoxOffsetsFor2DArrayTextures)
+ .combine('srcCopyLevel', [0, 3])
+ .combine('dstCopyLevel', [0, 3])
+ )
+ .fn(t => {
+ const {
+ dimension,
+ textureSize,
+ srcFormat,
+ dstFormat,
+ copyBoxOffsets,
+ srcCopyLevel,
+ dstCopyLevel,
+ } = t.params;
+
+ t.DoCopyTextureToTextureTest(
+ dimension,
+ textureSize.srcTextureSize,
+ textureSize.dstTextureSize,
+ srcFormat,
+ dstFormat,
+ copyBoxOffsets,
+ srcCopyLevel,
+ dstCopyLevel
+ );
+ });
+
+g.test('color_textures,compressed,array')
+ .desc(
+ `
+ Validate the correctness of the texture-to-texture copy on 2D array textures by filling the
+ srcTexture with testable data and any compressed color format supported by WebGPU, doing
+ CopyTextureToTexture() copy, and verifying the content of the whole dstTexture.
+
+ Tests for all pairs of valid source/destination formats, and all texture dimensions.
+ `
+ )
+ .params(u =>
+ u
+ .combine('srcFormat', kCompressedTextureFormats)
+ .combine('dstFormat', kCompressedTextureFormats)
+ .filter(({ srcFormat, dstFormat }) => {
+ const srcBaseFormat = kTextureFormatInfo[srcFormat].baseFormat;
+ const dstBaseFormat = kTextureFormatInfo[dstFormat].baseFormat;
+ return (
+ srcFormat === dstFormat ||
+ (srcBaseFormat !== undefined &&
+ dstBaseFormat !== undefined &&
+ srcBaseFormat === dstBaseFormat)
+ );
+ })
+ .combine('dimension', ['2d', '3d'] as const)
+ .filter(
+ ({ dimension, srcFormat, dstFormat }) =>
+ textureDimensionAndFormatCompatible(dimension, srcFormat) &&
+ textureDimensionAndFormatCompatible(dimension, dstFormat)
+ )
+ .beginSubcases()
+ .combine('textureSizeInBlocks', [
+ // The heights and widths in blocks are all power of 2
+ { src: { width: 2, height: 2 }, dst: { width: 2, height: 2 } },
+ // None of the widths or heights in blocks are power of 2
+ { src: { width: 15, height: 13 }, dst: { width: 15, height: 13 } },
+ ])
+ .combine('copyBoxOffsets', kCopyBoxOffsetsFor2DArrayTextures)
+ .combine('srcCopyLevel', [0, 2])
+ .combine('dstCopyLevel', [0, 2])
+ )
+ .beforeAllSubcases(t => {
+ const { srcFormat, dstFormat } = t.params;
+ t.skipIfCopyTextureToTextureNotSupportedForFormat(srcFormat, dstFormat);
+ t.selectDeviceOrSkipTestCase([
+ kTextureFormatInfo[srcFormat].feature,
+ kTextureFormatInfo[dstFormat].feature,
+ ]);
+ })
+ .fn(t => {
+ const {
+ dimension,
+ textureSizeInBlocks,
+ srcFormat,
+ dstFormat,
+ copyBoxOffsets,
+ srcCopyLevel,
+ dstCopyLevel,
+ } = t.params;
+ const srcBlockWidth = kTextureFormatInfo[srcFormat].blockWidth;
+ const srcBlockHeight = kTextureFormatInfo[srcFormat].blockHeight;
+ const dstBlockWidth = kTextureFormatInfo[dstFormat].blockWidth;
+ const dstBlockHeight = kTextureFormatInfo[dstFormat].blockHeight;
+
+ t.DoCopyTextureToTextureTest(
+ dimension,
+ {
+ width: textureSizeInBlocks.src.width * srcBlockWidth,
+ height: textureSizeInBlocks.src.height * srcBlockHeight,
+ depthOrArrayLayers: 5,
+ },
+ {
+ width: textureSizeInBlocks.dst.width * dstBlockWidth,
+ height: textureSizeInBlocks.dst.height * dstBlockHeight,
+ depthOrArrayLayers: 5,
+ },
+ srcFormat,
+ dstFormat,
+ copyBoxOffsets,
+ srcCopyLevel,
+ dstCopyLevel
+ );
+ });
+
+g.test('zero_sized')
+ .desc(
+ `
+ Validate the correctness of zero-sized copies (should be no-ops).
+
+ - For each texture dimension.
+ - Copies that are zero-sized in only one dimension {x, y, z}, each touching the {lower, upper} end
+ of that dimension.
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combineWithParams([
+ { dimension: '1d', textureSize: { width: 32, height: 1, depthOrArrayLayers: 1 } },
+ { dimension: '2d', textureSize: { width: 32, height: 32, depthOrArrayLayers: 5 } },
+ { dimension: '3d', textureSize: { width: 32, height: 32, depthOrArrayLayers: 5 } },
+ ] as const)
+ .combine('copyBoxOffset', [
+ // copyExtent.width === 0
+ {
+ srcOffset: { x: 0, y: 0, z: 0 },
+ dstOffset: { x: 0, y: 0, z: 0 },
+ copyExtent: { width: -64, height: 0, depthOrArrayLayers: 0 },
+ },
+ // copyExtent.width === 0 && srcOffset.x === textureWidth
+ {
+ srcOffset: { x: 64, y: 0, z: 0 },
+ dstOffset: { x: 0, y: 0, z: 0 },
+ copyExtent: { width: -64, height: 0, depthOrArrayLayers: 0 },
+ },
+ // copyExtent.width === 0 && dstOffset.x === textureWidth
+ {
+ srcOffset: { x: 0, y: 0, z: 0 },
+ dstOffset: { x: 64, y: 0, z: 0 },
+ copyExtent: { width: -64, height: 0, depthOrArrayLayers: 0 },
+ },
+ // copyExtent.height === 0
+ {
+ srcOffset: { x: 0, y: 0, z: 0 },
+ dstOffset: { x: 0, y: 0, z: 0 },
+ copyExtent: { width: 0, height: -32, depthOrArrayLayers: 0 },
+ },
+ // copyExtent.height === 0 && srcOffset.y === textureHeight
+ {
+ srcOffset: { x: 0, y: 32, z: 0 },
+ dstOffset: { x: 0, y: 0, z: 0 },
+ copyExtent: { width: 0, height: -32, depthOrArrayLayers: 0 },
+ },
+ // copyExtent.height === 0 && dstOffset.y === textureHeight
+ {
+ srcOffset: { x: 0, y: 0, z: 0 },
+ dstOffset: { x: 0, y: 32, z: 0 },
+ copyExtent: { width: 0, height: -32, depthOrArrayLayers: 0 },
+ },
+ // copyExtent.depthOrArrayLayers === 0
+ {
+ srcOffset: { x: 0, y: 0, z: 0 },
+ dstOffset: { x: 0, y: 0, z: 0 },
+ copyExtent: { width: 0, height: 0, depthOrArrayLayers: -5 },
+ },
+ // copyExtent.depthOrArrayLayers === 0 && srcOffset.z === textureDepth
+ {
+ srcOffset: { x: 0, y: 0, z: 5 },
+ dstOffset: { x: 0, y: 0, z: 0 },
+ copyExtent: { width: 0, height: 0, depthOrArrayLayers: 0 },
+ },
+ // copyExtent.depthOrArrayLayers === 0 && dstOffset.z === textureDepth
+ {
+ srcOffset: { x: 0, y: 0, z: 0 },
+ dstOffset: { x: 0, y: 0, z: 5 },
+ copyExtent: { width: 0, height: 0, depthOrArrayLayers: 0 },
+ },
+ ])
+ .unless(
+ p =>
+ p.dimension === '1d' &&
+ (p.copyBoxOffset.copyExtent.height !== 0 ||
+ p.copyBoxOffset.srcOffset.y !== 0 ||
+ p.copyBoxOffset.dstOffset.y !== 0)
+ )
+ .combine('srcCopyLevel', [0, 3])
+ .combine('dstCopyLevel', [0, 3])
+ .unless(p => p.dimension === '1d' && (p.srcCopyLevel !== 0 || p.dstCopyLevel !== 0))
+ )
+ .fn(t => {
+ const { dimension, textureSize, copyBoxOffset, srcCopyLevel, dstCopyLevel } = t.params;
+
+ const srcFormat = 'rgba8unorm';
+ const dstFormat = 'rgba8unorm';
+
+ t.DoCopyTextureToTextureTest(
+ dimension,
+ textureSize,
+ textureSize,
+ srcFormat,
+ dstFormat,
+ copyBoxOffset,
+ srcCopyLevel,
+ dstCopyLevel
+ );
+ });
+
+g.test('copy_depth_stencil')
+ .desc(
+ `
+ Validate the correctness of copyTextureToTexture() with depth and stencil aspect.
+
+ For all the texture formats with stencil aspect:
+ - Initialize the stencil aspect of the source texture with writeTexture().
+ - Copy the stencil aspect from the source texture into the destination texture
+ - Copy the stencil aspect of the destination texture into another staging buffer and check its
+ content
+ - Test the copies from / into zero / non-zero array layer / mipmap levels
+ - Test copying multiple array layers
+
+ For all the texture formats with depth aspect:
+ - Initialize the depth aspect of the source texture with a draw call
+ - Copy the depth aspect from the source texture into the destination texture
+ - Validate the content in the destination texture with the depth comparison function 'equal'
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kDepthStencilFormats)
+ .beginSubcases()
+ .combine('srcTextureSize', [
+ { width: 32, height: 16, depthOrArrayLayers: 1 },
+ { width: 32, height: 16, depthOrArrayLayers: 4 },
+ { width: 24, height: 48, depthOrArrayLayers: 5 },
+ ])
+ .combine('srcCopyLevel', [0, 2])
+ .combine('dstCopyLevel', [0, 2])
+ .combine('srcCopyBaseArrayLayer', [0, 1])
+ .combine('dstCopyBaseArrayLayer', [0, 1])
+ .filter(t => {
+ return (
+ t.srcTextureSize.depthOrArrayLayers > t.srcCopyBaseArrayLayer &&
+ t.srcTextureSize.depthOrArrayLayers > t.dstCopyBaseArrayLayer
+ );
+ })
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ t.selectDeviceForTextureFormatOrSkipTestCase(format);
+ })
+ .fn(t => {
+ const {
+ format,
+ srcTextureSize,
+ srcCopyLevel,
+ dstCopyLevel,
+ srcCopyBaseArrayLayer,
+ dstCopyBaseArrayLayer,
+ } = t.params;
+
+ const copySize: [number, number, number] = [
+ srcTextureSize.width >> srcCopyLevel,
+ srcTextureSize.height >> srcCopyLevel,
+ srcTextureSize.depthOrArrayLayers - Math.max(srcCopyBaseArrayLayer, dstCopyBaseArrayLayer),
+ ];
+ const sourceTexture = t.trackForCleanup(
+ t.device.createTexture({
+ format,
+ size: srcTextureSize,
+ usage:
+ GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ mipLevelCount: srcCopyLevel + 1,
+ })
+ );
+ const destinationTexture = t.trackForCleanup(
+ t.device.createTexture({
+ format,
+ size: [
+ copySize[0] << dstCopyLevel,
+ copySize[1] << dstCopyLevel,
+ srcTextureSize.depthOrArrayLayers,
+ ] as const,
+ usage:
+ GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ mipLevelCount: dstCopyLevel + 1,
+ })
+ );
+
+ let initialStencilData: undefined | Uint8Array = undefined;
+ if (kTextureFormatInfo[format].stencil) {
+ initialStencilData = t.GetInitialStencilDataPerMipLevel(srcTextureSize, format, srcCopyLevel);
+ t.InitializeStencilAspect(
+ sourceTexture,
+ initialStencilData,
+ srcCopyLevel,
+ srcCopyBaseArrayLayer,
+ copySize
+ );
+ }
+ if (kTextureFormatInfo[format].depth) {
+ t.InitializeDepthAspect(sourceTexture, format, srcCopyLevel, srcCopyBaseArrayLayer, copySize);
+ }
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.copyTextureToTexture(
+ {
+ texture: sourceTexture,
+ mipLevel: srcCopyLevel,
+ origin: { x: 0, y: 0, z: srcCopyBaseArrayLayer },
+ },
+ {
+ texture: destinationTexture,
+ mipLevel: dstCopyLevel,
+ origin: { x: 0, y: 0, z: dstCopyBaseArrayLayer },
+ },
+ copySize
+ );
+ t.queue.submit([encoder.finish()]);
+
+ if (kTextureFormatInfo[format].stencil) {
+ assert(initialStencilData !== undefined);
+ t.VerifyStencilAspect(
+ destinationTexture,
+ initialStencilData,
+ dstCopyLevel,
+ dstCopyBaseArrayLayer,
+ copySize
+ );
+ }
+ if (kTextureFormatInfo[format].depth) {
+ t.VerifyDepthAspect(
+ destinationTexture,
+ format,
+ dstCopyLevel,
+ dstCopyBaseArrayLayer,
+ copySize
+ );
+ }
+ });
+
+g.test('copy_multisampled_color')
+ .desc(
+ `
+ Validate the correctness of copyTextureToTexture() with multisampled color formats.
+
+ - Initialize the source texture with a triangle in a render pass.
+ - Copy from the source texture into the destination texture with CopyTextureToTexture().
+ - Compare every sub-pixel of source texture and destination texture in another render pass:
+ - If they are different, then output RED; otherwise output GREEN
+ - Verify the pixels in the output texture are all GREEN.
+ - Note that in current WebGPU SPEC the mipmap level count and array layer count of a multisampled
+ texture can only be 1.
+ `
+ )
+ .fn(t => {
+ const textureSize = [32, 16, 1] as const;
+ const kColorFormat = 'rgba8unorm';
+ const kSampleCount = 4;
+
+ const sourceTexture = t.device.createTexture({
+ format: kColorFormat,
+ size: textureSize,
+ usage:
+ GPUTextureUsage.COPY_SRC |
+ GPUTextureUsage.TEXTURE_BINDING |
+ GPUTextureUsage.RENDER_ATTACHMENT,
+ sampleCount: kSampleCount,
+ });
+ t.trackForCleanup(sourceTexture);
+ const destinationTexture = t.device.createTexture({
+ format: kColorFormat,
+ size: textureSize,
+ usage:
+ GPUTextureUsage.COPY_DST |
+ GPUTextureUsage.TEXTURE_BINDING |
+ GPUTextureUsage.RENDER_ATTACHMENT,
+ sampleCount: kSampleCount,
+ });
+ t.trackForCleanup(destinationTexture);
+
+ // Initialize sourceTexture with a draw call.
+ const renderPipelineForInit = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ @vertex
+ fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+ var pos = array<vec2<f32>, 3>(
+ vec2<f32>(-1.0, 1.0),
+ vec2<f32>( 1.0, 1.0),
+ vec2<f32>( 1.0, -1.0)
+ );
+ return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+ @fragment
+ fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.3, 0.5, 0.8, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: kColorFormat }],
+ },
+ multisample: {
+ count: kSampleCount,
+ },
+ });
+ const initEncoder = t.device.createCommandEncoder();
+ const renderPassForInit = initEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: sourceTexture.createView(),
+ clearValue: [1.0, 0.0, 0.0, 1.0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPassForInit.setPipeline(renderPipelineForInit);
+ renderPassForInit.draw(3);
+ renderPassForInit.end();
+ t.queue.submit([initEncoder.finish()]);
+
+ // Do the texture-to-texture copy
+ const copyEncoder = t.device.createCommandEncoder();
+ copyEncoder.copyTextureToTexture(
+ {
+ texture: sourceTexture,
+ },
+ {
+ texture: destinationTexture,
+ },
+ textureSize
+ );
+ t.queue.submit([copyEncoder.finish()]);
+
+ // Verify if all the sub-pixel values at the same location of sourceTexture and
+ // destinationTexture are equal.
+ const renderPipelineForValidation = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ @vertex
+ fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+ var pos = array<vec2<f32>, 6>(
+ vec2<f32>(-1.0, 1.0),
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>( 1.0, 1.0),
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>( 1.0, 1.0),
+ vec2<f32>( 1.0, -1.0));
+ return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var sourceTexture : texture_multisampled_2d<f32>;
+ @group(0) @binding(1) var destinationTexture : texture_multisampled_2d<f32>;
+ @fragment
+ fn main(@builtin(position) coord_in: vec4<f32>) -> @location(0) vec4<f32> {
+ var coord_in_vec2 = vec2<i32>(i32(coord_in.x), i32(coord_in.y));
+ for (var sampleIndex = 0; sampleIndex < ${kSampleCount};
+ sampleIndex = sampleIndex + 1) {
+ var sourceSubPixel : vec4<f32> =
+ textureLoad(sourceTexture, coord_in_vec2, sampleIndex);
+ var destinationSubPixel : vec4<f32> =
+ textureLoad(destinationTexture, coord_in_vec2, sampleIndex);
+ if (!all(sourceSubPixel == destinationSubPixel)) {
+ return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+ }
+ }
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: kColorFormat }],
+ },
+ });
+ const bindGroup = t.device.createBindGroup({
+ layout: renderPipelineForValidation.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: sourceTexture.createView(),
+ },
+ {
+ binding: 1,
+ resource: destinationTexture.createView(),
+ },
+ ],
+ });
+ const expectedOutputTexture = t.device.createTexture({
+ format: kColorFormat,
+ size: textureSize,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ t.trackForCleanup(expectedOutputTexture);
+ const validationEncoder = t.device.createCommandEncoder();
+ const renderPassForValidation = validationEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: expectedOutputTexture.createView(),
+ clearValue: [1.0, 0.0, 0.0, 1.0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPassForValidation.setPipeline(renderPipelineForValidation);
+ renderPassForValidation.setBindGroup(0, bindGroup);
+ renderPassForValidation.draw(6);
+ renderPassForValidation.end();
+ t.queue.submit([validationEncoder.finish()]);
+
+ t.expectSingleColor(expectedOutputTexture, 'rgba8unorm', {
+ size: [textureSize[0], textureSize[1], textureSize[2]],
+ exp: { R: 0.0, G: 1.0, B: 0.0, A: 1.0 },
+ });
+ });
+
+g.test('copy_multisampled_depth')
+ .desc(
+ `
+ Validate the correctness of copyTextureToTexture() with multisampled depth formats.
+
+ - Initialize the source texture with a triangle in a render pass.
+ - Copy from the source texture into the destination texture with CopyTextureToTexture().
+ - Validate the content in the destination texture with the depth comparison function 'equal'.
+ - Note that in current WebGPU SPEC the mipmap level count and array layer count of a multisampled
+ texture can only be 1.
+ `
+ )
+ .fn(t => {
+ const textureSize = [32, 16, 1] as const;
+ const kDepthFormat = 'depth24plus';
+ const kSampleCount = 4;
+
+ const sourceTexture = t.device.createTexture({
+ format: kDepthFormat,
+ size: textureSize,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ sampleCount: kSampleCount,
+ });
+ t.trackForCleanup(sourceTexture);
+ const destinationTexture = t.device.createTexture({
+ format: kDepthFormat,
+ size: textureSize,
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ sampleCount: kSampleCount,
+ });
+ t.trackForCleanup(destinationTexture);
+
+ const vertexState: GPUVertexState = {
+ module: t.device.createShaderModule({
+ code: `
+ @vertex
+ fn main(@builtin(vertex_index) VertexIndex : u32)-> @builtin(position) vec4<f32> {
+ var pos : array<vec3<f32>, 6> = array<vec3<f32>, 6>(
+ vec3<f32>(-1.0, 1.0, 0.5),
+ vec3<f32>(-1.0, -1.0, 0.0),
+ vec3<f32>( 1.0, 1.0, 1.0),
+ vec3<f32>(-1.0, -1.0, 0.0),
+ vec3<f32>( 1.0, 1.0, 1.0),
+ vec3<f32>( 1.0, -1.0, 0.5));
+ return vec4<f32>(pos[VertexIndex], 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ };
+
+ // Initialize the depth aspect of source texture with a draw call
+ const renderPipelineForInit = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: vertexState,
+ depthStencil: {
+ format: kDepthFormat,
+ depthCompare: 'always',
+ depthWriteEnabled: true,
+ },
+ multisample: {
+ count: kSampleCount,
+ },
+ });
+
+ const encoderForInit = t.device.createCommandEncoder();
+ const renderPassForInit = encoderForInit.beginRenderPass({
+ colorAttachments: [],
+ depthStencilAttachment: {
+ view: sourceTexture.createView(),
+ depthClearValue: 0.0,
+ depthLoadOp: 'clear',
+ depthStoreOp: 'store',
+ },
+ });
+ renderPassForInit.setPipeline(renderPipelineForInit);
+ renderPassForInit.draw(6);
+ renderPassForInit.end();
+ t.queue.submit([encoderForInit.finish()]);
+
+ // Do the texture-to-texture copy
+ const copyEncoder = t.device.createCommandEncoder();
+ copyEncoder.copyTextureToTexture(
+ {
+ texture: sourceTexture,
+ },
+ {
+ texture: destinationTexture,
+ },
+ textureSize
+ );
+ t.queue.submit([copyEncoder.finish()]);
+
+ // Verify the depth values in destinationTexture are what we expected with
+ // depthCompareFunction == 'equal' and depthWriteEnabled == false in the render pipeline
+ const kColorFormat = 'rgba8unorm';
+ const renderPipelineForVerify = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: vertexState,
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+ @fragment
+ fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: kColorFormat }],
+ },
+ depthStencil: {
+ format: kDepthFormat,
+ depthCompare: 'equal',
+ depthWriteEnabled: false,
+ },
+ multisample: {
+ count: kSampleCount,
+ },
+ });
+ const multisampledColorTexture = t.device.createTexture({
+ format: kColorFormat,
+ size: textureSize,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ sampleCount: kSampleCount,
+ });
+ t.trackForCleanup(multisampledColorTexture);
+ const colorTextureAsResolveTarget = t.device.createTexture({
+ format: kColorFormat,
+ size: textureSize,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ t.trackForCleanup(colorTextureAsResolveTarget);
+
+ const encoderForVerify = t.device.createCommandEncoder();
+ const renderPassForVerify = encoderForVerify.beginRenderPass({
+ colorAttachments: [
+ {
+ view: multisampledColorTexture.createView(),
+ clearValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'discard',
+ resolveTarget: colorTextureAsResolveTarget.createView(),
+ },
+ ],
+ depthStencilAttachment: {
+ view: destinationTexture.createView(),
+ depthLoadOp: 'load',
+ depthStoreOp: 'store',
+ },
+ });
+ renderPassForVerify.setPipeline(renderPipelineForVerify);
+ renderPassForVerify.draw(6);
+ renderPassForVerify.end();
+ t.queue.submit([encoderForVerify.finish()]);
+
+ t.expectSingleColor(colorTextureAsResolveTarget, kColorFormat, {
+ size: [textureSize[0], textureSize[1], textureSize[2]],
+ exp: { R: 0.0, G: 1.0, B: 0.0, A: 1.0 },
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/image_copy.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/image_copy.spec.ts
new file mode 100644
index 0000000000..4eebc3d611
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/image_copy.spec.ts
@@ -0,0 +1,2098 @@
+export const description = `writeTexture + copyBufferToTexture + copyTextureToBuffer operation tests.
+
+* copy_with_various_rows_per_image_and_bytes_per_row: test that copying data with various bytesPerRow (including { ==, > } bytesInACompleteRow) and\
+ rowsPerImage (including { ==, > } copyExtent.height) values and minimum required bytes in copy works for every format. Also covers special code paths:
+ - bufferSize - offset < bytesPerImage * copyExtent.depthOrArrayLayers
+ - when bytesPerRow is not a multiple of 512 and copyExtent.depthOrArrayLayers > 1: copyExtent.depthOrArrayLayers % 2 == { 0, 1 }
+ - bytesPerRow == bytesInACompleteCopyImage
+
+* copy_with_various_offsets_and_data_sizes: test that copying data with various offset (including { ==, > } 0 and is/isn't power of 2) values and additional\
+ data paddings works for every format with 2d and 2d-array textures. Also covers special code paths:
+ - offset + bytesInCopyExtentPerRow { ==, > } bytesPerRow
+ - offset > bytesInACompleteCopyImage
+
+* copy_with_various_origins_and_copy_extents: test that copying slices of a texture works with various origin (including { origin.x, origin.y, origin.z }\
+ { ==, > } 0 and is/isn't power of 2) and copyExtent (including { copyExtent.x, copyExtent.y, copyExtent.z } { ==, > } 0 and is/isn't power of 2) values\
+ (also including {origin._ + copyExtent._ { ==, < } the subresource size of textureCopyView) works for all formats. origin and copyExtent values are passed\
+ as [number, number, number] instead of GPUExtent3DDict.
+
+* copy_various_mip_levels: test that copying various mip levels works for all formats. Also covers special code paths:
+ - the physical size of the subresource is not equal to the logical size
+ - bufferSize - offset < bytesPerImage * copyExtent.depthOrArrayLayers and copyExtent needs to be clamped
+
+* copy_with_no_image_or_slice_padding_and_undefined_values: test that when copying a single row we can set any bytesPerRow value and when copying a single\
+ slice we can set rowsPerImage to 0. Also test setting offset, rowsPerImage, mipLevel, origin, origin.{x,y,z} to undefined.
+
+* TODO:
+ - add another initMethod which renders the texture [3]
+ - test copyT2B with buffer size not divisible by 4 (not done because expectContents 4-byte alignment)
+ - Convert the float32 values in initialData into the ones compatible to the depth aspect of
+ depthFormats when depth16unorm is supported by the browsers in
+ DoCopyTextureToBufferWithDepthAspectTest().
+
+TODO: Expand tests of GPUExtent3D [1]
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import {
+ assert,
+ ErrorWithExtra,
+ memcpy,
+ TypedArrayBufferView,
+ unreachable,
+} from '../../../../common/util/util.js';
+import {
+ kMinDynamicBufferOffsetAlignment,
+ kBufferSizeAlignment,
+ kTextureDimensions,
+} from '../../../capability_info.js';
+import {
+ kTextureFormatInfo,
+ kDepthStencilFormats,
+ kColorTextureFormats,
+ depthStencilBufferTextureCopySupported,
+ textureDimensionAndFormatCompatible,
+ depthStencilFormatAspectSize,
+ DepthStencilFormat,
+ ColorTextureFormat,
+ RegularTextureFormat,
+ isCompressedTextureFormat,
+} from '../../../format_info.js';
+import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
+import { makeBufferWithContents } from '../../../util/buffer.js';
+import { checkElementsEqual } from '../../../util/check_contents.js';
+import { align } from '../../../util/math.js';
+import { physicalMipSizeFromTexture } from '../../../util/texture/base.js';
+import { DataArrayGenerator } from '../../../util/texture/data_generation.js';
+import {
+ bytesInACompleteRow,
+ dataBytesForCopyOrFail,
+ getTextureCopyLayout,
+ kBytesPerRowAlignment,
+ TextureCopyLayout,
+} from '../../../util/texture/layout.js';
+import { TexelView } from '../../../util/texture/texel_view.js';
+import { findFailedPixels } from '../../../util/texture/texture_ok.js';
+
+interface TextureCopyViewWithRequiredOrigin {
+ texture: GPUTexture;
+ mipLevel: number | undefined;
+ origin: Required<GPUOrigin3DDict>;
+}
+
+/** Describes the function used to copy the initial data into the texture. */
+type InitMethod = 'WriteTexture' | 'CopyB2T';
+/**
+ * - PartialCopyT2B: do CopyT2B to check that the part of the texture we copied to with InitMethod
+ * matches the data we were copying and that we don't overwrite any data in the target buffer that
+ * we're not supposed to - that's primarily for testing CopyT2B functionality.
+ * - FullCopyT2B: do CopyT2B on the whole texture and check wether the part we copied to matches
+ * the data we were copying and that the nothing else was modified - that's primarily for testing
+ * WriteTexture and CopyB2T.
+ *
+ * Note: in compatibility mode, copyTextureToBuffer is not supported for compressed textures.
+ * In this case, we render the texture as well as a texture with the contents we except in the
+ * copy and then expect the rendered results to match.
+ */
+type CheckMethod = 'PartialCopyT2B' | 'FullCopyT2B';
+
+/**
+ * This describes in what form the arguments will be passed to WriteTexture/CopyB2T/CopyT2B. If
+ * undefined, then default values are passed as undefined instead of default values. If arrays, then
+ * `GPUOrigin3D` and `GPUExtent3D` are passed as `[number, number, number]`. *
+ *
+ * [1]: Try to expand this with something like:
+ * ```ts
+ * function encodeExtent3D(
+ * mode: 'partial-array' | 'full-array' | 'extra-array' | 'partial-dict' | 'full-dict',
+ * value: GPUExtent3D
+ * ): GPUExtent3D { ... }
+ * ```
+ */
+type ChangeBeforePass = 'none' | 'undefined' | 'arrays';
+
+/** Each combination of methods assume that the ones before it were tested and work correctly. */
+const kMethodsToTest = [
+ // Then we make sure that WriteTexture works for all formats:
+ { initMethod: 'WriteTexture', checkMethod: 'FullCopyT2B' },
+ // Then we make sure that CopyB2T works for all formats:
+ { initMethod: 'CopyB2T', checkMethod: 'FullCopyT2B' },
+ // Then we make sure that CopyT2B works for all formats:
+ { initMethod: 'WriteTexture', checkMethod: 'PartialCopyT2B' },
+] as const;
+
+const dataGenerator = new DataArrayGenerator();
+const altDataGenerator = new DataArrayGenerator();
+
+class ImageCopyTest extends TextureTestMixin(GPUTest) {
+ /**
+ * This is used for testing passing undefined members of `GPUImageDataLayout` instead of actual
+ * values where possible. Passing arguments as values and not as objects so that they are passed
+ * by copy and not by reference.
+ */
+ undefDataLayoutIfNeeded(
+ offset: number | undefined,
+ rowsPerImage: number | undefined,
+ bytesPerRow: number | undefined,
+ changeBeforePass: ChangeBeforePass
+ ): GPUImageDataLayout {
+ if (changeBeforePass === 'undefined') {
+ if (offset === 0) {
+ offset = undefined;
+ }
+ if (bytesPerRow === 0) {
+ bytesPerRow = undefined;
+ }
+ if (rowsPerImage === 0) {
+ rowsPerImage = undefined;
+ }
+ }
+ return { offset, bytesPerRow, rowsPerImage };
+ }
+
+ /**
+ * This is used for testing passing undefined members of `GPUImageCopyTexture` instead of actual
+ * values where possible and also for testing passing the origin as `[number, number, number]`.
+ * Passing arguments as values and not as objects so that they are passed by copy and not by
+ * reference.
+ */
+ undefOrArrayCopyViewIfNeeded(
+ texture: GPUTexture,
+ origin_x: number | undefined,
+ origin_y: number | undefined,
+ origin_z: number | undefined,
+ mipLevel: number | undefined,
+ changeBeforePass: ChangeBeforePass
+ ): GPUImageCopyTexture {
+ let origin: GPUOrigin3D | undefined = { x: origin_x, y: origin_y, z: origin_z };
+
+ if (changeBeforePass === 'undefined') {
+ if (origin_x === 0 && origin_y === 0 && origin_z === 0) {
+ origin = undefined;
+ } else {
+ if (origin_x === 0) {
+ origin_x = undefined;
+ }
+ if (origin_y === 0) {
+ origin_y = undefined;
+ }
+ if (origin_z === 0) {
+ origin_z = undefined;
+ }
+ origin = { x: origin_x, y: origin_y, z: origin_z };
+ }
+
+ if (mipLevel === 0) {
+ mipLevel = undefined;
+ }
+ }
+
+ if (changeBeforePass === 'arrays') {
+ origin = [origin_x!, origin_y!, origin_z!];
+ }
+
+ return { texture, origin, mipLevel };
+ }
+
+ /**
+ * This is used for testing passing `GPUExtent3D` as `[number, number, number]` instead of
+ * `GPUExtent3DDict`. Passing arguments as values and not as objects so that they are passed by
+ * copy and not by reference.
+ */
+ arrayCopySizeIfNeeded(
+ width: number,
+ height: number,
+ depthOrArrayLayers: number,
+ changeBeforePass: ChangeBeforePass
+ ): GPUExtent3D {
+ if (changeBeforePass === 'arrays') {
+ return [width, height, depthOrArrayLayers];
+ } else {
+ return { width, height, depthOrArrayLayers };
+ }
+ }
+
+ /**
+ * Compares data in `expected` to data in `buffer.
+ * Areas defined by size and dataLayout are compared by interpreting the data as appropriate
+ * for the texture format. As an example, with 'rgb9e5ufloat' multiple values can
+ * represent the same number. For example, double the exponent and halving the
+ * mantissa. Areas outside the area defined by size and dataLayout are expected to match
+ * by binary comparison.
+ */
+ expectGPUBufferValuesEqualWhenInterpretedAsTextureFormat(
+ expected: Uint8Array,
+ buffer: GPUBuffer,
+ format: ColorTextureFormat,
+ size: Required<GPUExtent3DDict>,
+ dataLayout: Required<GPUImageDataLayout>
+ ) {
+ if (isCompressedTextureFormat(format)) {
+ this.expectGPUBufferValuesEqual(buffer, expected);
+ return;
+ }
+ const regularFormat = format as RegularTextureFormat;
+ // data is in a format like this
+ //
+ // ....
+ // ttttt..
+ // ttttt..
+ // ttttt..
+ // .......
+ // ttttt..
+ // ttttt..
+ // ttttt...
+ //
+ // where the first `....` represents the portion of the buffer before
+ // `dataLayout.offset`. `ttttt` represents width (size[0]) and `..`
+ // represents the portion when `dataLayout.bytesPerRow` is greater than the
+ // data needed for width texels. `......` represents when height (size[1])
+ // is less than `dataLayout.rowsPerImage`. `...` represents any data past
+ // ((height - 1) * depth * bytePerRow + bytesPerRow) and the end of the
+ // buffer
+ const checkByTextureFormat = (actual: Uint8Array) => {
+ const zero = { x: 0, y: 0, z: 0 };
+
+ // compare texel areas
+ {
+ const actTexelView = TexelView.fromTextureDataByReference(regularFormat, actual, {
+ bytesPerRow: dataLayout.bytesPerRow,
+ rowsPerImage: dataLayout.rowsPerImage,
+ subrectOrigin: [0, 0, 0],
+ subrectSize: size,
+ });
+ const expTexelView = TexelView.fromTextureDataByReference(regularFormat, expected, {
+ bytesPerRow: dataLayout.bytesPerRow,
+ rowsPerImage: dataLayout.rowsPerImage,
+ subrectOrigin: [0, 0, 0],
+ subrectSize: size,
+ });
+
+ const failedPixelsMessage = findFailedPixels(
+ regularFormat,
+ zero,
+ size,
+ { actTexelView, expTexelView },
+ {
+ maxFractionalDiff: 0,
+ }
+ );
+
+ if (failedPixelsMessage !== undefined) {
+ const msg = 'Texture level had unexpected contents:\n' + failedPixelsMessage;
+ return new ErrorWithExtra(msg, () => ({
+ expTexelView,
+ actTexelView,
+ }));
+ }
+ }
+
+ // compare non texel areas
+ {
+ const rowLength = bytesInACompleteRow(size.width, format);
+ let lastOffset = 0;
+ for (const texel of this.iterateBlockRows(size, format)) {
+ const offset = this.getTexelOffsetInBytes(dataLayout, format, texel, zero);
+ const actualPart = actual.subarray(lastOffset, offset);
+ const expectedPart = expected.subarray(lastOffset, offset);
+ const error = checkElementsEqual(actualPart, expectedPart);
+ if (error) {
+ return error;
+ }
+ assert(offset >= lastOffset); // make sure iterateBlockRows always goes forward
+ lastOffset = offset + rowLength;
+ }
+ // compare end of buffers
+ {
+ const actualPart = actual.subarray(lastOffset, actual.length);
+ const expectedPart = expected.subarray(lastOffset, expected.length);
+ return checkElementsEqual(actualPart, expectedPart);
+ }
+ }
+ };
+
+ this.expectGPUBufferValuesPassCheck(buffer, checkByTextureFormat, {
+ srcByteOffset: 0,
+ type: Uint8Array,
+ typedLength: expected.length,
+ method: 'copy',
+ mode: 'fail',
+ });
+ }
+
+ /** Run a CopyT2B command with appropriate arguments corresponding to `ChangeBeforePass` */
+ copyTextureToBufferWithAppliedArguments(
+ buffer: GPUBuffer,
+ { offset, rowsPerImage, bytesPerRow }: Required<GPUImageDataLayout>,
+ { width, height, depthOrArrayLayers }: Required<GPUExtent3DDict>,
+ { texture, mipLevel, origin }: TextureCopyViewWithRequiredOrigin,
+ changeBeforePass: ChangeBeforePass
+ ): void {
+ const { x, y, z } = origin;
+
+ const appliedCopyView = this.undefOrArrayCopyViewIfNeeded(
+ texture,
+ x,
+ y,
+ z,
+ mipLevel,
+ changeBeforePass
+ );
+ const appliedDataLayout = this.undefDataLayoutIfNeeded(
+ offset,
+ rowsPerImage,
+ bytesPerRow,
+ changeBeforePass
+ );
+ const appliedCheckSize = this.arrayCopySizeIfNeeded(
+ width,
+ height,
+ depthOrArrayLayers,
+ changeBeforePass
+ );
+
+ const encoder = this.device.createCommandEncoder();
+ encoder.copyTextureToBuffer(
+ appliedCopyView,
+ { buffer, ...appliedDataLayout },
+ appliedCheckSize
+ );
+ this.device.queue.submit([encoder.finish()]);
+ }
+
+ /** Put data into a part of the texture with an appropriate method. */
+ uploadLinearTextureDataToTextureSubBox(
+ textureCopyView: TextureCopyViewWithRequiredOrigin,
+ textureDataLayout: GPUImageDataLayout & { bytesPerRow: number },
+ copySize: Required<GPUExtent3DDict>,
+ partialData: Uint8Array,
+ method: InitMethod,
+ changeBeforePass: ChangeBeforePass
+ ): void {
+ const { texture, mipLevel, origin } = textureCopyView;
+ const { offset, rowsPerImage, bytesPerRow } = textureDataLayout;
+ const { x, y, z } = origin;
+ const { width, height, depthOrArrayLayers } = copySize;
+
+ const appliedCopyView = this.undefOrArrayCopyViewIfNeeded(
+ texture,
+ x,
+ y,
+ z,
+ mipLevel,
+ changeBeforePass
+ );
+ const appliedDataLayout = this.undefDataLayoutIfNeeded(
+ offset,
+ rowsPerImage,
+ bytesPerRow,
+ changeBeforePass
+ );
+ const appliedCopySize = this.arrayCopySizeIfNeeded(
+ width,
+ height,
+ depthOrArrayLayers,
+ changeBeforePass
+ );
+
+ switch (method) {
+ case 'WriteTexture': {
+ this.device.queue.writeTexture(
+ appliedCopyView,
+ partialData,
+ appliedDataLayout,
+ appliedCopySize
+ );
+
+ break;
+ }
+ case 'CopyB2T': {
+ const buffer = this.makeBufferWithContents(partialData, GPUBufferUsage.COPY_SRC);
+ const encoder = this.device.createCommandEncoder();
+ encoder.copyBufferToTexture(
+ { buffer, ...appliedDataLayout },
+ appliedCopyView,
+ appliedCopySize
+ );
+ this.device.queue.submit([encoder.finish()]);
+
+ break;
+ }
+ default:
+ unreachable();
+ }
+ }
+
+ generateMatchingTextureInJSRenderAndCompareContents(
+ {
+ texture: actualTexture,
+ mipLevel: mipLevelOrUndefined,
+ origin,
+ }: TextureCopyViewWithRequiredOrigin,
+ copySize: Required<GPUExtent3DDict>,
+ format: ColorTextureFormat,
+ expected: Uint8Array,
+ expectedDataLayout: Required<GPUImageDataLayout>
+ ): void {
+ const size = [
+ actualTexture.width,
+ actualTexture.height,
+ actualTexture.depthOrArrayLayers,
+ ] as const;
+ const expectedTexture = this.device.createTexture({
+ label: 'expectedTexture',
+ size,
+ dimension: actualTexture.dimension,
+ format,
+ mipLevelCount: actualTexture.mipLevelCount,
+ usage: actualTexture.usage,
+ });
+ this.trackForCleanup(expectedTexture);
+
+ const mipLevel = mipLevelOrUndefined || 0;
+ const fullMipLevelTextureCopyLayout = getTextureCopyLayout(
+ format,
+ actualTexture.dimension,
+ size,
+ {
+ mipLevel,
+ }
+ );
+
+ // allocate data for entire mip level.
+ const expectedTextureMipLevelData = new Uint8Array(
+ align(fullMipLevelTextureCopyLayout.byteLength, 4)
+ );
+ const mipSize = physicalMipSizeFromTexture(expectedTexture, mipLevel);
+
+ // update the data for the entire mip level with the data
+ // that would be copied to the "actual" texture
+ this.updateLinearTextureDataSubBox(format, copySize, {
+ src: {
+ dataLayout: expectedDataLayout,
+ origin: { x: 0, y: 0, z: 0 },
+ data: expected,
+ },
+ dest: {
+ dataLayout: { offset: 0, ...fullMipLevelTextureCopyLayout },
+ origin,
+ data: expectedTextureMipLevelData,
+ },
+ });
+
+ // MAINTENANCE_TODO: If we're testing writeTexture should this use copyBufferToTexture instead?
+ this.queue.writeTexture(
+ { texture: expectedTexture, mipLevel },
+ expectedTextureMipLevelData,
+ { ...fullMipLevelTextureCopyLayout, offset: 0 },
+ mipSize
+ );
+
+ this.expectTexturesToMatchByRendering(
+ actualTexture,
+ expectedTexture,
+ mipLevel,
+ origin,
+ copySize
+ );
+ }
+
+ /**
+ * We check an appropriate part of the texture against the given data.
+ * Used directly with PartialCopyT2B check method (for a subpart of the texture)
+ * and by `copyWholeTextureToBufferAndCheckContentsWithUpdatedData` with FullCopyT2B check method
+ * (for the whole texture).
+ */
+ copyPartialTextureToBufferAndCheckContents(
+ { texture, mipLevel, origin }: TextureCopyViewWithRequiredOrigin,
+ checkSize: Required<GPUExtent3DDict>,
+ format: ColorTextureFormat,
+ expected: Uint8Array,
+ expectedDataLayout: Required<GPUImageDataLayout>,
+ changeBeforePass: ChangeBeforePass = 'none'
+ ): void {
+ // The alignment is necessary because we need to copy and map data from this buffer.
+ const bufferSize = align(expected.byteLength, 4);
+ // The start value ensures generated data here doesn't match the expected data.
+ const bufferData = altDataGenerator.generateAndCopyView(bufferSize, 17);
+
+ const buffer = this.makeBufferWithContents(
+ bufferData,
+ GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST
+ );
+ // At this point both buffer and bufferData have the same random data in
+ // them. We'll use copyTextureToBuffer to update buffer with data from the
+ // texture and updateLinearTextureDataSubBox to update bufferData with the
+ // data we originally uploaded to the texture.
+
+ // buffer has ...... in it.
+ // Copy to buffer the portion of texture that was previously uploaded.
+ // After execution buffer has t.t.t. because the rows are padded.
+ this.copyTextureToBufferWithAppliedArguments(
+ buffer,
+ expectedDataLayout,
+ checkSize,
+ { texture, mipLevel, origin },
+ changeBeforePass
+ );
+
+ // We originally copied expected to texture using expectedDataLayout.
+ // We're copying back out of texture above.
+
+ // bufferData has ...... in it.
+ // Update bufferData to have the same contents as buffer.
+ // When done, bufferData now has t.t.t. because the rows are padded.
+ this.updateLinearTextureDataSubBox(format, checkSize, {
+ src: {
+ dataLayout: expectedDataLayout,
+ origin: { x: 0, y: 0, z: 0 },
+ data: expected,
+ },
+ dest: {
+ dataLayout: expectedDataLayout,
+ origin: { x: 0, y: 0, z: 0 },
+ data: bufferData,
+ },
+ });
+
+ this.expectGPUBufferValuesEqualWhenInterpretedAsTextureFormat(
+ bufferData,
+ buffer,
+ format,
+ checkSize,
+ expectedDataLayout
+ );
+ }
+
+ /**
+ * Used for checking whether the whole texture was updated correctly by
+ * `uploadLinearTextureDataToTextureSubpart`. Takes fullData returned by
+ * `copyWholeTextureToNewBuffer` before the copy operation which is the original texture data,
+ * then updates it with `updateLinearTextureDataSubpart` and checks the texture against the
+ * updated data after the copy operation.
+ */
+ copyWholeTextureToBufferAndCheckContentsWithUpdatedData(
+ { texture, mipLevel, origin }: TextureCopyViewWithRequiredOrigin,
+ fullTextureCopyLayout: TextureCopyLayout,
+ texturePartialDataLayout: Required<GPUImageDataLayout>,
+ copySize: Required<GPUExtent3DDict>,
+ format: ColorTextureFormat,
+ fullData: GPUBuffer,
+ partialData: Uint8Array
+ ): void {
+ const { mipSize, bytesPerRow, rowsPerImage, byteLength } = fullTextureCopyLayout;
+ const readbackPromise = this.readGPUBufferRangeTyped(fullData, {
+ type: Uint8Array,
+ typedLength: byteLength,
+ });
+
+ const destinationOrigin = { x: 0, y: 0, z: 0 };
+
+ // We add an eventual async expectation which will update the full data and then add
+ // other eventual async expectations to ensure it will be correct.
+ this.eventualAsyncExpectation(async () => {
+ const readback = await readbackPromise;
+ this.updateLinearTextureDataSubBox(format, copySize, {
+ dest: {
+ dataLayout: { offset: 0, ...fullTextureCopyLayout },
+ origin,
+ data: readback.data,
+ },
+ src: {
+ dataLayout: texturePartialDataLayout,
+ origin: { x: 0, y: 0, z: 0 },
+ data: partialData,
+ },
+ });
+ this.copyPartialTextureToBufferAndCheckContents(
+ { texture, mipLevel, origin: destinationOrigin },
+ { width: mipSize[0], height: mipSize[1], depthOrArrayLayers: mipSize[2] },
+ format,
+ readback.data,
+ { bytesPerRow, rowsPerImage, offset: 0 }
+ );
+ readback.cleanup();
+ });
+ }
+
+ /**
+ * Tests copy between linear data and texture by creating a texture, putting some data into it
+ * with WriteTexture/CopyB2T, then getting data for the whole texture/for a part of it back and
+ * comparing it with the expectation.
+ */
+ uploadTextureAndVerifyCopy({
+ textureDataLayout,
+ copySize,
+ dataSize,
+ mipLevel = 0,
+ origin = { x: 0, y: 0, z: 0 },
+ textureSize,
+ format,
+ dimension,
+ initMethod,
+ checkMethod,
+ changeBeforePass = 'none',
+ }: {
+ textureDataLayout: Required<GPUImageDataLayout>;
+ copySize: Required<GPUExtent3DDict>;
+ dataSize: number;
+ mipLevel?: number;
+ origin?: Required<GPUOrigin3DDict>;
+ textureSize: readonly [number, number, number];
+ format: ColorTextureFormat;
+ dimension: GPUTextureDimension;
+ initMethod: InitMethod;
+ checkMethod: CheckMethod;
+ changeBeforePass?: ChangeBeforePass;
+ }): void {
+ const texture = this.device.createTexture({
+ size: textureSize as [number, number, number],
+ format,
+ dimension,
+ mipLevelCount: mipLevel + 1,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST | GPUTextureUsage.TEXTURE_BINDING,
+ });
+ this.trackForCleanup(texture);
+
+ const data = dataGenerator.generateView(dataSize);
+
+ switch (checkMethod) {
+ case 'PartialCopyT2B': {
+ this.uploadLinearTextureDataToTextureSubBox(
+ { texture, mipLevel, origin },
+ textureDataLayout,
+ copySize,
+ data,
+ initMethod,
+ changeBeforePass
+ );
+
+ if (this.canCallCopyTextureToBufferWithTextureFormat(texture.format)) {
+ this.copyPartialTextureToBufferAndCheckContents(
+ { texture, mipLevel, origin },
+ copySize,
+ format,
+ data,
+ textureDataLayout,
+ changeBeforePass
+ );
+ } else {
+ this.generateMatchingTextureInJSRenderAndCompareContents(
+ { texture, mipLevel, origin },
+ copySize,
+ format,
+ data,
+ textureDataLayout
+ );
+ }
+ break;
+ }
+ case 'FullCopyT2B': {
+ this.uploadLinearTextureDataToTextureSubBox(
+ { texture, mipLevel, origin },
+ textureDataLayout,
+ copySize,
+ data,
+ initMethod,
+ changeBeforePass
+ );
+
+ if (this.canCallCopyTextureToBufferWithTextureFormat(texture.format)) {
+ const fullTextureCopyLayout = getTextureCopyLayout(format, dimension, textureSize, {
+ mipLevel,
+ });
+
+ const fullData = this.copyWholeTextureToNewBuffer(
+ { texture, mipLevel },
+ fullTextureCopyLayout
+ );
+
+ this.copyWholeTextureToBufferAndCheckContentsWithUpdatedData(
+ { texture, mipLevel, origin },
+ fullTextureCopyLayout,
+ textureDataLayout,
+ copySize,
+ format,
+ fullData,
+ data
+ );
+ } else {
+ this.generateMatchingTextureInJSRenderAndCompareContents(
+ { texture, mipLevel, origin },
+ copySize,
+ format,
+ data,
+ textureDataLayout
+ //fullTextureCopyLayout,
+ //fullData,
+ );
+ }
+ break;
+ }
+ default:
+ unreachable();
+ }
+ }
+
+ DoUploadToStencilTest(
+ format: DepthStencilFormat,
+ textureSize: readonly [number, number, number],
+ uploadMethod: 'WriteTexture' | 'CopyB2T',
+ bytesPerRow: number,
+ rowsPerImage: number,
+ initialDataSize: number,
+ initialDataOffset: number,
+ mipLevel: number
+ ): void {
+ const srcTexture = this.device.createTexture({
+ size: textureSize,
+ usage:
+ GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ format,
+ mipLevelCount: mipLevel + 1,
+ });
+ this.trackForCleanup(srcTexture);
+
+ const copySize = [textureSize[0] >> mipLevel, textureSize[1] >> mipLevel, textureSize[2]];
+ const initialData = dataGenerator.generateView(
+ align(initialDataSize, kBufferSizeAlignment),
+ 0,
+ initialDataOffset
+ );
+ switch (uploadMethod) {
+ case 'WriteTexture':
+ this.queue.writeTexture(
+ { texture: srcTexture, aspect: 'stencil-only', mipLevel },
+ initialData,
+ {
+ offset: initialDataOffset,
+ bytesPerRow,
+ rowsPerImage,
+ },
+ copySize
+ );
+ break;
+ case 'CopyB2T':
+ {
+ const stagingBuffer = makeBufferWithContents(
+ this.device,
+ initialData,
+ GPUBufferUsage.COPY_SRC
+ );
+ const encoder = this.device.createCommandEncoder();
+ encoder.copyBufferToTexture(
+ { buffer: stagingBuffer, offset: initialDataOffset, bytesPerRow, rowsPerImage },
+ { texture: srcTexture, aspect: 'stencil-only', mipLevel },
+ copySize
+ );
+ this.queue.submit([encoder.finish()]);
+ }
+ break;
+ default:
+ unreachable();
+ }
+
+ this.checkStencilTextureContent(
+ srcTexture,
+ textureSize,
+ format,
+ initialData,
+ initialDataOffset,
+ bytesPerRow,
+ rowsPerImage,
+ mipLevel
+ );
+ }
+
+ DoCopyFromStencilTest(
+ format: DepthStencilFormat,
+ textureSize: readonly [number, number, number],
+ bytesPerRow: number,
+ rowsPerImage: number,
+ offset: number,
+ mipLevel: number
+ ): void {
+ const srcTexture = this.device.createTexture({
+ size: textureSize,
+ usage:
+ GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ format,
+ mipLevelCount: mipLevel + 1,
+ });
+ this.trackForCleanup(srcTexture);
+
+ // Initialize srcTexture with queue.writeTexture()
+ const copySize = [textureSize[0] >> mipLevel, textureSize[1] >> mipLevel, textureSize[2]];
+ const initialData = dataGenerator.generateView(
+ align(copySize[0] * copySize[1] * copySize[2], kBufferSizeAlignment)
+ );
+ this.queue.writeTexture(
+ { texture: srcTexture, aspect: 'stencil-only', mipLevel },
+ initialData,
+ { bytesPerRow: copySize[0], rowsPerImage: copySize[1] },
+ copySize
+ );
+
+ // Copy the stencil aspect from srcTexture into outputBuffer.
+ const outputBufferSize = align(
+ offset +
+ dataBytesForCopyOrFail({
+ layout: { bytesPerRow, rowsPerImage },
+ format: 'stencil8',
+ copySize,
+ method: 'CopyT2B',
+ }),
+ kBufferSizeAlignment
+ );
+ const outputBuffer = this.device.createBuffer({
+ size: outputBufferSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+ this.trackForCleanup(outputBuffer);
+ const encoder = this.device.createCommandEncoder();
+ encoder.copyTextureToBuffer(
+ { texture: srcTexture, aspect: 'stencil-only', mipLevel },
+ { buffer: outputBuffer, offset, bytesPerRow, rowsPerImage },
+ copySize
+ );
+ this.queue.submit([encoder.finish()]);
+
+ // Validate the data in outputBuffer is what we expect.
+ const expectedData = new Uint8Array(outputBufferSize);
+ for (let z = 0; z < copySize[2]; ++z) {
+ const baseExpectedOffset = offset + z * bytesPerRow * rowsPerImage;
+ const baseInitialDataOffset = z * copySize[0] * copySize[1];
+ for (let y = 0; y < copySize[1]; ++y) {
+ memcpy(
+ {
+ src: initialData,
+ start: baseInitialDataOffset + y * copySize[0],
+ length: copySize[0],
+ },
+ { dst: expectedData, start: baseExpectedOffset + y * bytesPerRow }
+ );
+ }
+ }
+ this.expectGPUBufferValuesEqual(outputBuffer, expectedData);
+ }
+
+ // MAINTENANCE_TODO(#881): Migrate this into the texture_ok helpers.
+ checkStencilTextureContent(
+ stencilTexture: GPUTexture,
+ stencilTextureSize: readonly [number, number, number],
+ stencilTextureFormat: GPUTextureFormat,
+ expectedStencilTextureData: Uint8Array,
+ expectedStencilTextureDataOffset: number,
+ expectedStencilTextureDataBytesPerRow: number,
+ expectedStencilTextureDataRowsPerImage: number,
+ stencilTextureMipLevel: number
+ ): void {
+ const stencilBitCount = 8;
+
+ // Prepare the uniform buffer that stores the bit indices (from 0 to 7) at stride 256 (required
+ // by Dynamic Buffer Offset).
+ const uniformBufferSize = kMinDynamicBufferOffsetAlignment * (stencilBitCount - 1) + 4;
+ const uniformBufferData = new Uint32Array(uniformBufferSize / 4);
+ for (let i = 1; i < stencilBitCount; ++i) {
+ uniformBufferData[(kMinDynamicBufferOffsetAlignment / 4) * i] = i;
+ }
+ const uniformBuffer = makeBufferWithContents(
+ this.device,
+ uniformBufferData,
+ GPUBufferUsage.COPY_DST | GPUBufferUsage.UNIFORM
+ );
+
+ // Prepare the base render pipeline descriptor (all the settings expect stencilReadMask).
+ const bindGroupLayout = this.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ buffer: {
+ type: 'uniform',
+ minBindingSize: 4,
+ hasDynamicOffset: true,
+ },
+ },
+ ],
+ });
+ const renderPipelineDescriptorBase: GPURenderPipelineDescriptor = {
+ layout: this.device.createPipelineLayout({ bindGroupLayouts: [bindGroupLayout] }),
+ vertex: {
+ module: this.device.createShaderModule({
+ code: `
+ @vertex
+ fn main(@builtin(vertex_index) VertexIndex : u32)-> @builtin(position) vec4<f32> {
+ var pos : array<vec2<f32>, 6> = array<vec2<f32>, 6>(
+ vec2<f32>(-1.0, 1.0),
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>( 1.0, 1.0),
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>( 1.0, 1.0),
+ vec2<f32>( 1.0, -1.0));
+ return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+
+ fragment: {
+ module: this.device.createShaderModule({
+ code: `
+ struct Params {
+ stencilBitIndex: u32
+ };
+ @group(0) @binding(0) var<uniform> param: Params;
+ @fragment
+ fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(f32(1u << param.stencilBitIndex) / 255.0, 0.0, 0.0, 0.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [
+ {
+ // As we implement "rendering one bit in each draw() call" with blending operation
+ // 'add', the format of outputTexture must support blending.
+ format: 'r8unorm',
+ blend: {
+ color: { srcFactor: 'one', dstFactor: 'one', operation: 'add' },
+ alpha: {},
+ },
+ },
+ ],
+ },
+
+ primitive: {
+ topology: 'triangle-list',
+ },
+
+ depthStencil: {
+ depthWriteEnabled: false,
+ depthCompare: 'always',
+ format: stencilTextureFormat,
+ stencilFront: {
+ compare: 'equal',
+ },
+ stencilBack: {
+ compare: 'equal',
+ },
+ },
+ };
+
+ // Prepare the bindGroup that contains uniformBuffer and referenceTexture.
+ const bindGroup = this.device.createBindGroup({
+ layout: bindGroupLayout,
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer: uniformBuffer,
+ size: 4,
+ },
+ },
+ ],
+ });
+
+ // "Copy" the stencil value into the color attachment with 8 draws in one render pass. Each draw
+ // will "Copy" one bit of the stencil value into the color attachment. The bit of the stencil
+ // value is specified by setStencilReference().
+ const copyFromOutputTextureLayout = getTextureCopyLayout(
+ stencilTextureFormat,
+ '2d',
+ [stencilTextureSize[0], stencilTextureSize[1], 1],
+ {
+ mipLevel: stencilTextureMipLevel,
+ aspect: 'stencil-only',
+ }
+ );
+ const outputTextureSize = [
+ copyFromOutputTextureLayout.mipSize[0],
+ copyFromOutputTextureLayout.mipSize[1],
+ 1,
+ ];
+ const outputTexture = this.device.createTexture({
+ format: 'r8unorm',
+ size: outputTextureSize,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ this.trackForCleanup(outputTexture);
+
+ for (
+ let stencilTextureLayer = 0;
+ stencilTextureLayer < stencilTextureSize[2];
+ ++stencilTextureLayer
+ ) {
+ const encoder = this.device.createCommandEncoder();
+ const depthStencilAttachment: GPURenderPassDepthStencilAttachment = {
+ view: stencilTexture.createView({
+ baseMipLevel: stencilTextureMipLevel,
+ mipLevelCount: 1,
+ baseArrayLayer: stencilTextureLayer,
+ arrayLayerCount: 1,
+ }),
+ };
+ if (kTextureFormatInfo[stencilTextureFormat].depth) {
+ depthStencilAttachment.depthClearValue = 0;
+ depthStencilAttachment.depthLoadOp = 'clear';
+ depthStencilAttachment.depthStoreOp = 'store';
+ }
+ if (kTextureFormatInfo[stencilTextureFormat].stencil) {
+ depthStencilAttachment.stencilLoadOp = 'load';
+ depthStencilAttachment.stencilStoreOp = 'store';
+ }
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: outputTexture.createView(),
+ clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 0.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ depthStencilAttachment,
+ });
+
+ for (let stencilBitIndex = 0; stencilBitIndex < stencilBitCount; ++stencilBitIndex) {
+ const renderPipelineDescriptor = renderPipelineDescriptorBase;
+ assert(renderPipelineDescriptor.depthStencil !== undefined);
+ renderPipelineDescriptor.depthStencil.stencilReadMask = 1 << stencilBitIndex;
+ const renderPipeline = this.device.createRenderPipeline(renderPipelineDescriptor);
+
+ renderPass.setPipeline(renderPipeline);
+ renderPass.setStencilReference(1 << stencilBitIndex);
+ renderPass.setBindGroup(0, bindGroup, [stencilBitIndex * kMinDynamicBufferOffsetAlignment]);
+ renderPass.draw(6);
+ }
+ renderPass.end();
+
+ // Check outputTexture by copying the content of outputTexture into outputStagingBuffer and
+ // checking all the data in outputStagingBuffer.
+ const outputStagingBuffer = this.device.createBuffer({
+ size: copyFromOutputTextureLayout.byteLength,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+ this.trackForCleanup(outputStagingBuffer);
+ encoder.copyTextureToBuffer(
+ {
+ texture: outputTexture,
+ },
+ {
+ buffer: outputStagingBuffer,
+ bytesPerRow: copyFromOutputTextureLayout.bytesPerRow,
+ rowsPerImage: copyFromOutputTextureLayout.rowsPerImage,
+ },
+ outputTextureSize
+ );
+
+ this.queue.submit([encoder.finish()]);
+
+ // Check the valid data in outputStagingBuffer once per row.
+ for (let y = 0; y < copyFromOutputTextureLayout.mipSize[1]; ++y) {
+ const dataStart =
+ expectedStencilTextureDataOffset +
+ expectedStencilTextureDataBytesPerRow *
+ expectedStencilTextureDataRowsPerImage *
+ stencilTextureLayer +
+ expectedStencilTextureDataBytesPerRow * y;
+ this.expectGPUBufferValuesEqual(
+ outputStagingBuffer,
+ expectedStencilTextureData.slice(
+ dataStart,
+ dataStart + copyFromOutputTextureLayout.mipSize[0]
+ ),
+ copyFromOutputTextureLayout.bytesPerRow * y
+ );
+ }
+ }
+ }
+
+ // MAINTENANCE_TODO(#881): Consider if this can be simplified/encapsulated using TexelView.
+ initializeDepthAspectWithRendering(
+ depthTexture: GPUTexture,
+ depthFormat: GPUTextureFormat,
+ copySize: readonly [number, number, number],
+ copyMipLevel: number,
+ initialData: Float32Array
+ ): void {
+ assert(!!kTextureFormatInfo[depthFormat].depth);
+
+ const inputTexture = this.device.createTexture({
+ size: copySize,
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.TEXTURE_BINDING,
+ format: 'r32float',
+ });
+ this.trackForCleanup(inputTexture);
+ this.queue.writeTexture(
+ { texture: inputTexture },
+ initialData,
+ {
+ bytesPerRow: copySize[0] * 4,
+ rowsPerImage: copySize[1],
+ },
+ copySize
+ );
+
+ const renderPipeline = this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: this.device.createShaderModule({
+ code: `
+ @vertex
+ fn main(@builtin(vertex_index) VertexIndex : u32)-> @builtin(position) vec4<f32> {
+ var pos : array<vec2<f32>, 6> = array<vec2<f32>, 6>(
+ vec2<f32>(-1.0, 1.0),
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>( 1.0, 1.0),
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>( 1.0, 1.0),
+ vec2<f32>( 1.0, -1.0));
+ return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: this.device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var inputTexture: texture_2d<f32>;
+ @fragment fn main(@builtin(position) fragcoord : vec4<f32>) ->
+ @builtin(frag_depth) f32 {
+ var depthValue : vec4<f32> = textureLoad(inputTexture, vec2<i32>(fragcoord.xy), 0);
+ return depthValue.x;
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [],
+ },
+ primitive: {
+ topology: 'triangle-list',
+ },
+ depthStencil: {
+ format: depthFormat,
+ depthWriteEnabled: true,
+ depthCompare: 'always',
+ },
+ });
+
+ const encoder = this.device.createCommandEncoder();
+ for (let z = 0; z < copySize[2]; ++z) {
+ const depthStencilAttachment: GPURenderPassDepthStencilAttachment = {
+ view: depthTexture.createView({
+ dimension: '2d',
+ baseArrayLayer: z,
+ arrayLayerCount: 1,
+ baseMipLevel: copyMipLevel,
+ mipLevelCount: 1,
+ }),
+ };
+ if (kTextureFormatInfo[depthFormat].depth) {
+ depthStencilAttachment.depthClearValue = 0.0;
+ depthStencilAttachment.depthLoadOp = 'clear';
+ depthStencilAttachment.depthStoreOp = 'store';
+ }
+ if (kTextureFormatInfo[depthFormat].stencil) {
+ depthStencilAttachment.stencilLoadOp = 'load';
+ depthStencilAttachment.stencilStoreOp = 'store';
+ }
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [],
+ depthStencilAttachment,
+ });
+ renderPass.setPipeline(renderPipeline);
+
+ const bindGroup = this.device.createBindGroup({
+ layout: renderPipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: inputTexture.createView({
+ dimension: '2d',
+ baseArrayLayer: z,
+ arrayLayerCount: 1,
+ baseMipLevel: 0,
+ mipLevelCount: 1,
+ }),
+ },
+ ],
+ });
+ renderPass.setBindGroup(0, bindGroup);
+ renderPass.draw(6);
+ renderPass.end();
+ }
+
+ this.queue.submit([encoder.finish()]);
+ }
+
+ DoCopyTextureToBufferWithDepthAspectTest(
+ format: DepthStencilFormat,
+ copySize: readonly [number, number, number],
+ bytesPerRowPadding: number,
+ rowsPerImagePadding: number,
+ offset: number,
+ dataPaddingInBytes: number,
+ mipLevel: number
+ ): void {
+ // [2]: need to convert the float32 values in initialData into the ones compatible
+ // to the depth aspect of depthFormats when depth16unorm is supported by the browsers.
+
+ // Generate the initial depth data uploaded to the texture as float32.
+ const initialData = new Float32Array(copySize[0] * copySize[1] * copySize[2]);
+ for (let i = 0; i < initialData.length; ++i) {
+ const baseValue = 0.05 * i;
+
+ // We expect there are both 1's and 0's in initialData.
+ initialData[i] = i % 40 === 0 ? 1 : baseValue - Math.floor(baseValue);
+ assert(initialData[i] >= 0 && initialData[i] <= 1);
+ }
+
+ // The data uploaded to the texture, using the byte pattern of the format.
+ let formatInitialData: TypedArrayBufferView = initialData;
+
+ // For unorm depth formats, replace the uploaded depth data with quantized data to avoid
+ // rounding issues when converting from 32float to 16unorm.
+ if (format === 'depth16unorm') {
+ const u16Data = new Uint16Array(initialData.length);
+ for (let i = 0; i < initialData.length; i++) {
+ u16Data[i] = initialData[i] * 65535;
+ initialData[i] = u16Data[i] / 65535.0;
+ }
+ formatInitialData = u16Data;
+ }
+
+ // Initialize the depth aspect of the source texture
+ const depthTexture = this.device.createTexture({
+ format,
+ size: [copySize[0] << mipLevel, copySize[1] << mipLevel, copySize[2]] as const,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ mipLevelCount: mipLevel + 1,
+ });
+ this.trackForCleanup(depthTexture);
+ this.initializeDepthAspectWithRendering(depthTexture, format, copySize, mipLevel, initialData);
+
+ // Copy the depth aspect of the texture into the destination buffer.
+ const aspectBytesPerBlock = depthStencilFormatAspectSize(format, 'depth-only');
+ const bytesPerRow =
+ align(aspectBytesPerBlock * copySize[0], kBytesPerRowAlignment) +
+ bytesPerRowPadding * kBytesPerRowAlignment;
+ const rowsPerImage = copySize[1] + rowsPerImagePadding;
+
+ const destinationBufferSize = align(
+ bytesPerRow * rowsPerImage * copySize[2] +
+ bytesPerRow * (copySize[1] - 1) +
+ aspectBytesPerBlock * copySize[0] +
+ offset +
+ dataPaddingInBytes,
+ kBufferSizeAlignment
+ );
+ const destinationBuffer = this.device.createBuffer({
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ size: destinationBufferSize,
+ });
+ this.trackForCleanup(destinationBuffer);
+ const copyEncoder = this.device.createCommandEncoder();
+ copyEncoder.copyTextureToBuffer(
+ {
+ texture: depthTexture,
+ mipLevel,
+ aspect: 'depth-only',
+ },
+ {
+ buffer: destinationBuffer,
+ offset,
+ bytesPerRow,
+ rowsPerImage,
+ },
+ copySize
+ );
+ this.queue.submit([copyEncoder.finish()]);
+
+ // Validate the data in destinationBuffer is what we expect.
+ const expectedData = new Uint8Array(destinationBufferSize);
+ for (let z = 0; z < copySize[2]; ++z) {
+ const baseExpectedOffset = z * bytesPerRow * rowsPerImage + offset;
+ const baseInitialDataOffset = z * copySize[0] * copySize[1];
+ for (let y = 0; y < copySize[1]; ++y) {
+ memcpy(
+ {
+ src: formatInitialData,
+ start: baseInitialDataOffset + y * copySize[0],
+ length: copySize[0],
+ },
+ { dst: expectedData, start: baseExpectedOffset + y * bytesPerRow }
+ );
+ }
+ }
+ this.expectGPUBufferValuesEqual(destinationBuffer, expectedData);
+ }
+}
+
+/**
+ * This is a helper function used for filtering test parameters
+ *
+ * [3]: Modify this after introducing tests with rendering.
+ */
+function formatCanBeTested({ format }: { format: ColorTextureFormat }): boolean {
+ return kTextureFormatInfo[format].color.copyDst && kTextureFormatInfo[format].color.copySrc;
+}
+
+export const g = makeTestGroup(ImageCopyTest);
+
+const kRowsPerImageAndBytesPerRowParams = {
+ paddings: [
+ { bytesPerRowPadding: 0, rowsPerImagePadding: 0 }, // no padding
+ { bytesPerRowPadding: 0, rowsPerImagePadding: 6 }, // rowsPerImage padding
+ { bytesPerRowPadding: 6, rowsPerImagePadding: 0 }, // bytesPerRow padding
+ { bytesPerRowPadding: 15, rowsPerImagePadding: 17 }, // both paddings
+ ],
+
+ copySizes: [
+ // In the two cases below, for (WriteTexture, PartialCopyB2T) and (CopyB2T, FullCopyT2B)
+ // sets of methods we will have bytesPerRow = 256 and copyDepth % 2 == { 0, 1 }
+ // respectively. This covers a special code path for D3D12.
+ { copyWidthInBlocks: 3, copyHeightInBlocks: 4, copyDepth: 5 }, // standard copy
+ { copyWidthInBlocks: 5, copyHeightInBlocks: 4, copyDepth: 2 }, // standard copy
+
+ { copyWidthInBlocks: 0, copyHeightInBlocks: 4, copyDepth: 5 }, // empty copy because of width
+ { copyWidthInBlocks: 3, copyHeightInBlocks: 0, copyDepth: 5 }, // empty copy because of height
+ { copyWidthInBlocks: 3, copyHeightInBlocks: 4, copyDepth: 0 }, // empty copy because of depthOrArrayLayers
+ { copyWidthInBlocks: 256, copyHeightInBlocks: 3, copyDepth: 2 }, // copyWidth is 256-aligned
+ { copyWidthInBlocks: 1, copyHeightInBlocks: 3, copyDepth: 5 }, // copyWidth = 1
+
+ // The two cases below cover another special code path for D3D12.
+ // - For (WriteTexture, FullCopyT2B) with r8unorm:
+ // bytesPerRow = 15 = 3 * 5 = bytesInACompleteCopyImage.
+ { copyWidthInBlocks: 32, copyHeightInBlocks: 1, copyDepth: 8 }, // copyHeight = 1
+ // - For (CopyB2T, FullCopyT2B) and (WriteTexture, PartialCopyT2B) with r8unorm:
+ // bytesPerRow = 256 = 8 * 32 = bytesInACompleteCopyImage.
+ { copyWidthInBlocks: 5, copyHeightInBlocks: 4, copyDepth: 1 }, // copyDepth = 1
+
+ { copyWidthInBlocks: 7, copyHeightInBlocks: 1, copyDepth: 1 }, // copyHeight = 1 and copyDepth = 1
+ ],
+
+ // Copy sizes that are suitable for 1D texture and check both some copy sizes and empty copies.
+ copySizes1D: [
+ { copyWidthInBlocks: 3, copyHeightInBlocks: 1, copyDepth: 1 },
+ { copyWidthInBlocks: 5, copyHeightInBlocks: 1, copyDepth: 1 },
+
+ { copyWidthInBlocks: 3, copyHeightInBlocks: 0, copyDepth: 1 },
+ { copyWidthInBlocks: 0, copyHeightInBlocks: 1, copyDepth: 1 },
+ { copyWidthInBlocks: 5, copyHeightInBlocks: 1, copyDepth: 0 },
+ ],
+};
+
+g.test('rowsPerImage_and_bytesPerRow')
+ .desc(
+ `Test that copying data with various bytesPerRow and rowsPerImage values and minimum required
+bytes in copy works for every format.
+
+ Covers a special code path for Metal:
+ bufferSize - offset < bytesPerImage * copyExtent.depthOrArrayLayers
+ Covers a special code path for D3D12:
+ when bytesPerRow is not a multiple of 512 and copyExtent.depthOrArrayLayers > 1: copyExtent.depthOrArrayLayers % 2 == { 0, 1 }
+ bytesPerRow == bytesInACompleteCopyImage
+
+ TODO: Cover the special code paths for 3D textures in D3D12.
+ `
+ )
+ .params(u =>
+ u
+ .combineWithParams(kMethodsToTest)
+ .combine('format', kColorTextureFormats)
+ .filter(formatCanBeTested)
+ .combine('dimension', kTextureDimensions)
+ .filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
+ .beginSubcases()
+ .combineWithParams(kRowsPerImageAndBytesPerRowParams.paddings)
+ .expandWithParams(p => {
+ if (p.dimension === '1d') {
+ return kRowsPerImageAndBytesPerRowParams.copySizes1D;
+ }
+ return kRowsPerImageAndBytesPerRowParams.copySizes;
+ })
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format];
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const {
+ bytesPerRowPadding,
+ rowsPerImagePadding,
+ copyWidthInBlocks,
+ copyHeightInBlocks,
+ copyDepth,
+ format,
+ dimension,
+ initMethod,
+ checkMethod,
+ } = t.params;
+ const info = kTextureFormatInfo[format];
+ // For CopyB2T and CopyT2B we need to have bytesPerRow 256-aligned,
+ // to make this happen we align the bytesInACompleteRow value and multiply
+ // bytesPerRowPadding by 256.
+ const bytesPerRowAlignment =
+ initMethod === 'WriteTexture' && checkMethod === 'FullCopyT2B' ? 1 : 256;
+
+ const copyWidth = copyWidthInBlocks * info.blockWidth;
+ const copyHeight = copyHeightInBlocks * info.blockHeight;
+ const rowsPerImage = copyHeightInBlocks + rowsPerImagePadding;
+ const bytesPerRow =
+ align(bytesInACompleteRow(copyWidth, format), bytesPerRowAlignment) +
+ bytesPerRowPadding * bytesPerRowAlignment;
+ const copySize = { width: copyWidth, height: copyHeight, depthOrArrayLayers: copyDepth };
+
+ const dataSize = dataBytesForCopyOrFail({
+ layout: { offset: 0, bytesPerRow, rowsPerImage },
+ format,
+ copySize,
+ method: initMethod,
+ });
+
+ t.uploadTextureAndVerifyCopy({
+ textureDataLayout: { offset: 0, bytesPerRow, rowsPerImage },
+ copySize,
+ dataSize,
+ textureSize: [
+ Math.max(copyWidth, info.blockWidth),
+ Math.max(copyHeight, info.blockHeight),
+ Math.max(copyDepth, 1),
+ ] /* making sure the texture is non-empty */,
+ format,
+ dimension,
+ initMethod,
+ checkMethod,
+ });
+ });
+
+const kOffsetsAndSizesParams = {
+ offsetsAndPaddings: [
+ { offsetInBlocks: 0, dataPaddingInBytes: 0 }, // no offset and no padding
+ { offsetInBlocks: 1, dataPaddingInBytes: 0 }, // offset = 1
+ { offsetInBlocks: 2, dataPaddingInBytes: 0 }, // offset = 2
+ { offsetInBlocks: 15, dataPaddingInBytes: 0 }, // offset = 15
+ { offsetInBlocks: 16, dataPaddingInBytes: 0 }, // offset = 16
+ { offsetInBlocks: 242, dataPaddingInBytes: 0 }, // for rgba8unorm format: offset + bytesInCopyExtentPerRow = 242 + 12 = 256 = bytesPerRow
+ { offsetInBlocks: 243, dataPaddingInBytes: 0 }, // for rgba8unorm format: offset + bytesInCopyExtentPerRow = 243 + 12 > 256 = bytesPerRow
+ { offsetInBlocks: 768, dataPaddingInBytes: 0 }, // for copyDepth = 1, blockWidth = 1 and bytesPerBlock = 1: offset = 768 = 3 * 256 = bytesInACompleteCopyImage
+ { offsetInBlocks: 769, dataPaddingInBytes: 0 }, // for copyDepth = 1, blockWidth = 1 and bytesPerBlock = 1: offset = 769 > 768 = bytesInACompleteCopyImage
+ { offsetInBlocks: 0, dataPaddingInBytes: 1 }, // dataPaddingInBytes > 0
+ { offsetInBlocks: 1, dataPaddingInBytes: 8 }, // offset > 0 and dataPaddingInBytes > 0
+ ],
+ copyDepth: [1, 2],
+};
+
+g.test('offsets_and_sizes')
+ .desc(
+ `Test that copying data with various offset values and additional data paddings
+works for every format with 2d and 2d-array textures.
+
+ Covers two special code paths for D3D12:
+ offset + bytesInCopyExtentPerRow { ==, > } bytesPerRow
+ offset > bytesInACompleteCopyImage
+
+ TODO: Cover the special code paths for 3D textures in D3D12.
+ TODO: Make a variant for depth-stencil formats.
+`
+ )
+ .params(u =>
+ u
+ .combineWithParams(kMethodsToTest)
+ .combine('format', kColorTextureFormats)
+ .filter(formatCanBeTested)
+ .combine('dimension', kTextureDimensions)
+ .filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
+ .beginSubcases()
+ .combineWithParams(kOffsetsAndSizesParams.offsetsAndPaddings)
+ .combine('copyDepth', kOffsetsAndSizesParams.copyDepth) // 2d and 2d-array textures
+ .unless(p => p.dimension === '1d' && p.copyDepth !== 1)
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format];
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const {
+ offsetInBlocks,
+ dataPaddingInBytes,
+ copyDepth,
+ format,
+ dimension,
+ initMethod,
+ checkMethod,
+ } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ const offset = offsetInBlocks * info.color.bytes;
+ const copySize = {
+ width: 3 * info.blockWidth,
+ height: 3 * info.blockHeight,
+ depthOrArrayLayers: copyDepth,
+ };
+ let textureHeight = 4 * info.blockHeight;
+ let rowsPerImage = 3;
+ const bytesPerRow = 256;
+
+ if (dimension === '1d') {
+ copySize.height = 1;
+ textureHeight = info.blockHeight;
+ rowsPerImage = 1;
+ }
+ const textureSize = [4 * info.blockWidth, textureHeight, copyDepth] as const;
+
+ const minDataSize = dataBytesForCopyOrFail({
+ layout: { offset, bytesPerRow, rowsPerImage },
+ format,
+ copySize,
+ method: initMethod,
+ });
+ const dataSize = minDataSize + dataPaddingInBytes;
+
+ // We're copying a (3 x 3 x copyDepth) (in texel blocks) part of a (4 x 4 x copyDepth)
+ // (in texel blocks) texture with no origin.
+ t.uploadTextureAndVerifyCopy({
+ textureDataLayout: { offset, bytesPerRow, rowsPerImage },
+ copySize,
+ dataSize,
+ textureSize,
+ format,
+ dimension,
+ initMethod,
+ checkMethod,
+ });
+ });
+
+g.test('origins_and_extents')
+ .desc(
+ `Test that copying slices of a texture works with various origin and copyExtent values
+for all formats. We pass origin and copyExtent as [number, number, number].`
+ )
+ .params(u =>
+ u
+ .combineWithParams(kMethodsToTest)
+ .combine('format', kColorTextureFormats)
+ .filter(formatCanBeTested)
+ .combine('dimension', kTextureDimensions)
+ .filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
+ .beginSubcases()
+ .combine('originValueInBlocks', [0, 7, 8])
+ .combine('copySizeValueInBlocks', [0, 7, 8])
+ .combine('textureSizePaddingValueInBlocks', [0, 7, 8])
+ .unless(
+ p =>
+ // we can't create an empty texture
+ p.copySizeValueInBlocks + p.originValueInBlocks + p.textureSizePaddingValueInBlocks === 0
+ )
+ .combine('coordinateToTest', [0, 1, 2] as const)
+ .unless(p => p.dimension === '1d' && p.coordinateToTest !== 0)
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format];
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const {
+ originValueInBlocks,
+ copySizeValueInBlocks,
+ textureSizePaddingValueInBlocks,
+ format,
+ dimension,
+ initMethod,
+ checkMethod,
+ } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ let originBlocks = [1, 1, 1];
+ let copySizeBlocks = [2, 2, 2];
+ let texSizeBlocks = [3, 3, 3];
+ if (dimension === '1d') {
+ originBlocks = [1, 0, 0];
+ copySizeBlocks = [2, 1, 1];
+ texSizeBlocks = [3, 1, 1];
+ }
+
+ {
+ const ctt = t.params.coordinateToTest;
+ originBlocks[ctt] = originValueInBlocks;
+ copySizeBlocks[ctt] = copySizeValueInBlocks;
+ texSizeBlocks[ctt] =
+ originBlocks[ctt] + copySizeBlocks[ctt] + textureSizePaddingValueInBlocks;
+ }
+
+ const origin: Required<GPUOrigin3DDict> = {
+ x: originBlocks[0] * info.blockWidth,
+ y: originBlocks[1] * info.blockHeight,
+ z: originBlocks[2],
+ };
+ const copySize = {
+ width: copySizeBlocks[0] * info.blockWidth,
+ height: copySizeBlocks[1] * info.blockHeight,
+ depthOrArrayLayers: copySizeBlocks[2],
+ };
+ const textureSize = [
+ texSizeBlocks[0] * info.blockWidth,
+ texSizeBlocks[1] * info.blockHeight,
+ texSizeBlocks[2],
+ ] as const;
+
+ const rowsPerImage = copySizeBlocks[1];
+ const bytesPerRow = align(copySizeBlocks[0] * info.color.bytes, 256);
+
+ const dataSize = dataBytesForCopyOrFail({
+ layout: { offset: 0, bytesPerRow, rowsPerImage },
+ format,
+ copySize,
+ method: initMethod,
+ });
+
+ // For testing width: we copy a (_ x 2 x 2) (in texel blocks) part of a (_ x 3 x 3)
+ // (in texel blocks) texture with origin (_, 1, 1) (in texel blocks).
+ // Similarly for other coordinates.
+ t.uploadTextureAndVerifyCopy({
+ textureDataLayout: { offset: 0, bytesPerRow, rowsPerImage },
+ copySize,
+ dataSize,
+ origin,
+ textureSize,
+ format,
+ dimension,
+ initMethod,
+ checkMethod,
+ changeBeforePass: 'arrays',
+ });
+ });
+
+/**
+ * Generates textureSizes which correspond to the same physicalSizeAtMipLevel including virtual
+ * sizes at mip level different from the physical ones.
+ */
+function* generateTestTextureSizes({
+ format,
+ dimension,
+ mipLevel,
+ _mipSizeInBlocks,
+}: {
+ format: ColorTextureFormat;
+ dimension: GPUTextureDimension;
+ mipLevel: number;
+ _mipSizeInBlocks: Required<GPUExtent3DDict>;
+}): Generator<[number, number, number]> {
+ assert(dimension !== '1d'); // textureSize[1] would be wrong for 1D mipped textures.
+ const info = kTextureFormatInfo[format];
+
+ const widthAtThisLevel = _mipSizeInBlocks.width * info.blockWidth;
+ const heightAtThisLevel = _mipSizeInBlocks.height * info.blockHeight;
+ const textureSize: [number, number, number] = [
+ widthAtThisLevel << mipLevel,
+ heightAtThisLevel << mipLevel,
+ _mipSizeInBlocks.depthOrArrayLayers << (dimension === '3d' ? mipLevel : 0),
+ ];
+ yield textureSize;
+
+ // We choose width and height of the texture so that the values are divisible by blockWidth and
+ // blockHeight respectively and so that the virtual size at mip level corresponds to the same
+ // physical size.
+ // Virtual size at mip level with modified width has width = (physical size width) - (blockWidth / 2).
+ // Virtual size at mip level with modified height has height = (physical size height) - (blockHeight / 2).
+ const widthAtPrevLevel = widthAtThisLevel << 1;
+ const heightAtPrevLevel = heightAtThisLevel << 1;
+ assert(mipLevel > 0);
+ assert(widthAtPrevLevel >= info.blockWidth && heightAtPrevLevel >= info.blockHeight);
+ const modifiedWidth = (widthAtPrevLevel - info.blockWidth) << (mipLevel - 1);
+ const modifiedHeight = (heightAtPrevLevel - info.blockHeight) << (mipLevel - 1);
+
+ const modifyWidth = info.blockWidth > 1 && modifiedWidth !== textureSize[0];
+ const modifyHeight = info.blockHeight > 1 && modifiedHeight !== textureSize[1];
+
+ if (modifyWidth) {
+ yield [modifiedWidth, textureSize[1], textureSize[2]];
+ }
+ if (modifyHeight) {
+ yield [textureSize[0], modifiedHeight, textureSize[2]];
+ }
+ if (modifyWidth && modifyHeight) {
+ yield [modifiedWidth, modifiedHeight, textureSize[2]];
+ }
+
+ if (dimension === '3d') {
+ yield [textureSize[0], textureSize[1], textureSize[2] + 1];
+ }
+}
+
+g.test('mip_levels')
+ .desc(
+ `Test that copying various mip levels works. Covers two special code paths:
+ - The physical size of the subresource is not equal to the logical size.
+ - bufferSize - offset < bytesPerImage * copyExtent.depthOrArrayLayers, and copyExtent needs to be clamped for all block formats.
+ - For 3D textures test copying to a sub-range of the depth.
+
+Tests both 2D and 3D textures. 1D textures are skipped because they can only have one mip level.
+
+TODO: Make a variant for depth-stencil formats.
+ `
+ )
+ .params(u =>
+ u
+ .combineWithParams(kMethodsToTest)
+ .combine('format', kColorTextureFormats)
+ .filter(formatCanBeTested)
+ .combine('dimension', ['2d', '3d'] as const)
+ .filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
+ .beginSubcases()
+ .combineWithParams([
+ // origin + copySize = texturePhysicalSizeAtMipLevel for all coordinates, 2d texture */
+ {
+ copySizeInBlocks: { width: 5, height: 4, depthOrArrayLayers: 1 },
+ originInBlocks: { x: 3, y: 2, z: 0 },
+ _mipSizeInBlocks: { width: 8, height: 6, depthOrArrayLayers: 1 },
+ mipLevel: 1,
+ },
+ // origin + copySize = texturePhysicalSizeAtMipLevel for all coordinates, 2d-array texture
+ {
+ copySizeInBlocks: { width: 5, height: 4, depthOrArrayLayers: 2 },
+ originInBlocks: { x: 3, y: 2, z: 1 },
+ _mipSizeInBlocks: { width: 8, height: 6, depthOrArrayLayers: 3 },
+ mipLevel: 2,
+ },
+ // origin.x + copySize.width = texturePhysicalSizeAtMipLevel.width
+ {
+ copySizeInBlocks: { width: 5, height: 4, depthOrArrayLayers: 2 },
+ originInBlocks: { x: 3, y: 2, z: 1 },
+ _mipSizeInBlocks: { width: 8, height: 7, depthOrArrayLayers: 4 },
+ mipLevel: 3,
+ },
+ // origin.y + copySize.height = texturePhysicalSizeAtMipLevel.height
+ {
+ copySizeInBlocks: { width: 5, height: 4, depthOrArrayLayers: 2 },
+ originInBlocks: { x: 3, y: 2, z: 1 },
+ _mipSizeInBlocks: { width: 9, height: 6, depthOrArrayLayers: 4 },
+ mipLevel: 4,
+ },
+ // origin.z + copySize.depthOrArrayLayers = texturePhysicalSizeAtMipLevel.depthOrArrayLayers
+ {
+ copySizeInBlocks: { width: 5, height: 4, depthOrArrayLayers: 2 },
+ originInBlocks: { x: 3, y: 2, z: 1 },
+ _mipSizeInBlocks: { width: 9, height: 7, depthOrArrayLayers: 3 },
+ mipLevel: 4,
+ },
+ // origin + copySize < texturePhysicalSizeAtMipLevel for all coordinates
+ {
+ copySizeInBlocks: { width: 5, height: 4, depthOrArrayLayers: 2 },
+ originInBlocks: { x: 3, y: 2, z: 1 },
+ _mipSizeInBlocks: { width: 9, height: 7, depthOrArrayLayers: 4 },
+ mipLevel: 4,
+ },
+ ])
+ .expand('textureSize', generateTestTextureSizes)
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format];
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const {
+ copySizeInBlocks,
+ originInBlocks,
+ textureSize,
+ mipLevel,
+ format,
+ dimension,
+ initMethod,
+ checkMethod,
+ } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ const origin = {
+ x: originInBlocks.x * info.blockWidth,
+ y: originInBlocks.y * info.blockHeight,
+ z: originInBlocks.z,
+ };
+ const copySize = {
+ width: copySizeInBlocks.width * info.blockWidth,
+ height: copySizeInBlocks.height * info.blockHeight,
+ depthOrArrayLayers: copySizeInBlocks.depthOrArrayLayers,
+ };
+
+ const rowsPerImage = copySizeInBlocks.height + 1;
+ const bytesPerRow = align(copySize.width, 256);
+
+ const dataSize = dataBytesForCopyOrFail({
+ layout: { offset: 0, bytesPerRow, rowsPerImage },
+ format,
+ copySize,
+ method: initMethod,
+ });
+
+ t.uploadTextureAndVerifyCopy({
+ textureDataLayout: { offset: 0, bytesPerRow, rowsPerImage },
+ copySize,
+ dataSize,
+ origin,
+ mipLevel,
+ textureSize,
+ format,
+ dimension,
+ initMethod,
+ checkMethod,
+ });
+ });
+
+const UND = undefined;
+g.test('undefined_params')
+ .desc(
+ `Tests undefined values of bytesPerRow, rowsPerImage, and origin.x/y/z.
+ Ensures bytesPerRow/rowsPerImage=undefined are valid and behave as expected.
+ Ensures origin.x/y/z undefined default to 0.`
+ )
+ .params(u =>
+ u
+ .combineWithParams(kMethodsToTest)
+ .combine('dimension', kTextureDimensions)
+ .beginSubcases()
+ .combineWithParams([
+ // copying one row: bytesPerRow and rowsPerImage can be undefined
+ { copySize: [3, 1, 1], origin: [UND, UND, UND], bytesPerRow: UND, rowsPerImage: UND },
+ // copying one slice: rowsPerImage can be undefined
+ { copySize: [3, 1, 1], origin: [UND, UND, UND], bytesPerRow: 256, rowsPerImage: UND },
+ { copySize: [3, 3, 1], origin: [UND, UND, UND], bytesPerRow: 256, rowsPerImage: UND },
+ // copying two slices
+ { copySize: [3, 3, 2], origin: [UND, UND, UND], bytesPerRow: 256, rowsPerImage: 3 },
+ // origin.x = undefined
+ { copySize: [1, 1, 1], origin: [UND, 1, 1], bytesPerRow: UND, rowsPerImage: UND },
+ // origin.y = undefined
+ { copySize: [1, 1, 1], origin: [1, UND, 1], bytesPerRow: UND, rowsPerImage: UND },
+ // origin.z = undefined
+ { copySize: [1, 1, 1], origin: [1, 1, UND], bytesPerRow: UND, rowsPerImage: UND },
+ ])
+ .expandWithParams(p => [
+ {
+ _textureSize: [
+ 100,
+ p.copySize[1] + (p.origin[1] ?? 0),
+ p.copySize[2] + (p.origin[2] ?? 0),
+ ] as const,
+ },
+ ])
+ .unless(p => p.dimension === '1d' && (p._textureSize[1] > 1 || p._textureSize[2] > 1))
+ )
+ .fn(t => {
+ const {
+ dimension,
+ _textureSize,
+ bytesPerRow,
+ rowsPerImage,
+ copySize,
+ origin,
+ initMethod,
+ checkMethod,
+ } = t.params;
+
+ t.uploadTextureAndVerifyCopy({
+ textureDataLayout: {
+ offset: 0,
+ // Zero will get turned back into undefined later.
+ bytesPerRow: bytesPerRow ?? 0,
+ // Zero will get turned back into undefined later.
+ rowsPerImage: rowsPerImage ?? 0,
+ },
+ copySize: { width: copySize[0], height: copySize[1], depthOrArrayLayers: copySize[2] },
+ dataSize: 2000,
+ textureSize: _textureSize,
+ // Zeros will get turned back into undefined later.
+ origin: { x: origin[0] ?? 0, y: origin[1] ?? 0, z: origin[2] ?? 0 },
+ format: 'rgba8unorm',
+ dimension,
+ initMethod,
+ checkMethod,
+ changeBeforePass: 'undefined',
+ });
+ });
+
+function CopyMethodSupportedWithDepthStencilFormat(
+ aspect: 'depth-only' | 'stencil-only',
+ format: DepthStencilFormat,
+ copyMethod: 'WriteTexture' | 'CopyB2T' | 'CopyT2B'
+): boolean {
+ {
+ return (
+ (aspect === 'stencil-only' && !!kTextureFormatInfo[format].stencil) ||
+ (aspect === 'depth-only' &&
+ !!kTextureFormatInfo[format].depth &&
+ copyMethod === 'CopyT2B' &&
+ depthStencilBufferTextureCopySupported('CopyT2B', format, aspect))
+ );
+ }
+}
+
+g.test('rowsPerImage_and_bytesPerRow_depth_stencil')
+ .desc(
+ `Test that copying data with various bytesPerRow and rowsPerImage values and minimum required
+bytes in copy works for copyBufferToTexture(), copyTextureToBuffer() and writeTexture() with stencil
+aspect and copyTextureToBuffer() with depth aspect.
+
+ Covers a special code path for Metal:
+ bufferSize - offset < bytesPerImage * copyExtent.depthOrArrayLayers
+ Covers a special code path for D3D12:
+ when bytesPerRow is not a multiple of 512 and copyExtent.depthOrArrayLayers > 1:
+ copyExtent.depthOrArrayLayers % 2 == { 0, 1 }
+ bytesPerRow == bytesInACompleteCopyImage
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kDepthStencilFormats)
+ .combine('copyMethod', ['WriteTexture', 'CopyB2T', 'CopyT2B'] as const)
+ .combine('aspect', ['depth-only', 'stencil-only'] as const)
+ .filter(t => CopyMethodSupportedWithDepthStencilFormat(t.aspect, t.format, t.copyMethod))
+ .beginSubcases()
+ .combineWithParams(kRowsPerImageAndBytesPerRowParams.paddings)
+ .combineWithParams(kRowsPerImageAndBytesPerRowParams.copySizes)
+ .filter(t => {
+ return t.copyWidthInBlocks * t.copyHeightInBlocks * t.copyDepth > 0;
+ })
+ .combine('mipLevel', [0, 2])
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format];
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const {
+ format,
+ copyMethod,
+ aspect,
+ bytesPerRowPadding,
+ rowsPerImagePadding,
+ copyWidthInBlocks,
+ copyHeightInBlocks,
+ copyDepth,
+ mipLevel,
+ } = t.params;
+ const bytesPerBlock = depthStencilFormatAspectSize(format, aspect);
+ const rowsPerImage = copyHeightInBlocks + rowsPerImagePadding;
+
+ const bytesPerRowAlignment = copyMethod === 'WriteTexture' ? 1 : kBytesPerRowAlignment;
+ const bytesPerRow =
+ align(bytesPerBlock * copyWidthInBlocks, bytesPerRowAlignment) +
+ bytesPerRowPadding * bytesPerRowAlignment;
+
+ const copySize = [copyWidthInBlocks, copyHeightInBlocks, copyDepth] as const;
+ const textureSize = [
+ copyWidthInBlocks << mipLevel,
+ copyHeightInBlocks << mipLevel,
+ copyDepth,
+ ] as const;
+ if (copyMethod === 'CopyT2B') {
+ if (aspect === 'depth-only') {
+ t.DoCopyTextureToBufferWithDepthAspectTest(
+ format,
+ copySize,
+ bytesPerRowPadding,
+ rowsPerImagePadding,
+ 0,
+ 0,
+ mipLevel
+ );
+ } else {
+ t.DoCopyFromStencilTest(format, textureSize, bytesPerRow, rowsPerImage, 0, mipLevel);
+ }
+ } else {
+ assert(
+ aspect === 'stencil-only' && (copyMethod === 'CopyB2T' || copyMethod === 'WriteTexture')
+ );
+ const initialDataSize = dataBytesForCopyOrFail({
+ layout: { bytesPerRow, rowsPerImage },
+ format: 'stencil8',
+ copySize,
+ method: copyMethod,
+ });
+
+ t.DoUploadToStencilTest(
+ format,
+ textureSize,
+ copyMethod,
+ bytesPerRow,
+ rowsPerImage,
+ initialDataSize,
+ 0,
+ mipLevel
+ );
+ }
+ });
+
+g.test('offsets_and_sizes_copy_depth_stencil')
+ .desc(
+ `Test that copying data with various offset values and additional data paddings
+works for copyBufferToTexture(), copyTextureToBuffer() and writeTexture() with stencil aspect and
+copyTextureToBuffer() with depth aspect.
+
+ Covers two special code paths for D3D12:
+ offset + bytesInCopyExtentPerRow { ==, > } bytesPerRow
+ offset > bytesInACompleteCopyImage
+`
+ )
+ .params(u =>
+ u
+ .combine('format', kDepthStencilFormats)
+ .combine('copyMethod', ['WriteTexture', 'CopyB2T', 'CopyT2B'] as const)
+ .combine('aspect', ['depth-only', 'stencil-only'] as const)
+ .filter(t => CopyMethodSupportedWithDepthStencilFormat(t.aspect, t.format, t.copyMethod))
+ .beginSubcases()
+ .combineWithParams(kOffsetsAndSizesParams.offsetsAndPaddings)
+ .filter(t => t.offsetInBlocks % 4 === 0)
+ .combine('copyDepth', kOffsetsAndSizesParams.copyDepth)
+ .combine('mipLevel', [0, 2])
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format];
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { format, copyMethod, aspect, offsetInBlocks, dataPaddingInBytes, copyDepth, mipLevel } =
+ t.params;
+ const bytesPerBlock = depthStencilFormatAspectSize(format, aspect);
+ const initialDataOffset = offsetInBlocks * bytesPerBlock;
+ const copySize = [3, 3, copyDepth] as const;
+ const rowsPerImage = 3;
+ const bytesPerRow = 256;
+
+ const textureSize = [copySize[0] << mipLevel, copySize[1] << mipLevel, copyDepth] as const;
+ if (copyMethod === 'CopyT2B') {
+ if (aspect === 'depth-only') {
+ t.DoCopyTextureToBufferWithDepthAspectTest(format, copySize, 0, 0, 0, 0, mipLevel);
+ } else {
+ t.DoCopyFromStencilTest(
+ format,
+ textureSize,
+ bytesPerRow,
+ rowsPerImage,
+ initialDataOffset,
+ mipLevel
+ );
+ }
+ } else {
+ assert(
+ aspect === 'stencil-only' && (copyMethod === 'CopyB2T' || copyMethod === 'WriteTexture')
+ );
+ const minDataSize = dataBytesForCopyOrFail({
+ layout: { offset: initialDataOffset, bytesPerRow, rowsPerImage },
+ format: 'stencil8',
+ copySize,
+ method: copyMethod,
+ });
+ const initialDataSize = minDataSize + dataPaddingInBytes;
+ t.DoUploadToStencilTest(
+ format,
+ textureSize,
+ copyMethod,
+ bytesPerRow,
+ rowsPerImage,
+ initialDataSize,
+ initialDataOffset,
+ mipLevel
+ );
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/programmable/programmable_state_test.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/programmable/programmable_state_test.ts
new file mode 100644
index 0000000000..19cf91419c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/programmable/programmable_state_test.ts
@@ -0,0 +1,157 @@
+import { unreachable } from '../../../../../common/util/util.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { EncoderType } from '../../../../util/command_buffer_maker.js';
+
+interface BindGroupIndices {
+ a: number;
+ b: number;
+ out: number;
+}
+
+export class ProgrammableStateTest extends GPUTest {
+ private commonBindGroupLayouts: Map<string, GPUBindGroupLayout> = new Map();
+
+ getBindGroupLayout(type: GPUBufferBindingType): GPUBindGroupLayout {
+ if (!this.commonBindGroupLayouts.has(type)) {
+ this.commonBindGroupLayouts.set(
+ type,
+ this.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE | GPUShaderStage.FRAGMENT,
+ buffer: { type },
+ },
+ ],
+ })
+ );
+ }
+ return this.commonBindGroupLayouts.get(type)!;
+ }
+
+ getBindGroupLayouts(indices: BindGroupIndices): GPUBindGroupLayout[] {
+ const bindGroupLayouts: GPUBindGroupLayout[] = [];
+ bindGroupLayouts[indices.a] = this.getBindGroupLayout('read-only-storage');
+ bindGroupLayouts[indices.b] = this.getBindGroupLayout('read-only-storage');
+ bindGroupLayouts[indices.out] = this.getBindGroupLayout('storage');
+ return bindGroupLayouts;
+ }
+
+ createBindGroup(buffer: GPUBuffer, type: GPUBufferBindingType): GPUBindGroup {
+ return this.device.createBindGroup({
+ layout: this.getBindGroupLayout(type),
+ entries: [{ binding: 0, resource: { buffer } }],
+ });
+ }
+
+ setBindGroup(
+ encoder: GPUBindingCommandsMixin,
+ index: number,
+ factory: (index: number) => GPUBindGroup
+ ) {
+ encoder.setBindGroup(index, factory(index));
+ }
+
+ // Create a compute pipeline that performs an operation on data from two bind groups,
+ // then writes the result to a third bind group.
+ createBindingStatePipeline<T extends EncoderType>(
+ encoderType: T,
+ groups: BindGroupIndices,
+ algorithm: string = 'a.value - b.value'
+ ): GPUComputePipeline | GPURenderPipeline {
+ switch (encoderType) {
+ case 'compute pass': {
+ const wgsl = `struct Data {
+ value : i32
+ };
+
+ @group(${groups.a}) @binding(0) var<storage> a : Data;
+ @group(${groups.b}) @binding(0) var<storage> b : Data;
+ @group(${groups.out}) @binding(0) var<storage, read_write> out : Data;
+
+ @compute @workgroup_size(1) fn main() {
+ out.value = ${algorithm};
+ return;
+ }
+ `;
+
+ return this.device.createComputePipeline({
+ layout: this.device.createPipelineLayout({
+ bindGroupLayouts: this.getBindGroupLayouts(groups),
+ }),
+ compute: {
+ module: this.device.createShaderModule({
+ code: wgsl,
+ }),
+ entryPoint: 'main',
+ },
+ });
+ }
+ case 'render pass':
+ case 'render bundle': {
+ const wgslShaders = {
+ vertex: `
+ @vertex fn vert_main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.5, 0.5, 0.0, 1.0);
+ }
+ `,
+
+ fragment: `
+ struct Data {
+ value : i32
+ };
+
+ @group(${groups.a}) @binding(0) var<storage> a : Data;
+ @group(${groups.b}) @binding(0) var<storage> b : Data;
+ @group(${groups.out}) @binding(0) var<storage, read_write> out : Data;
+
+ @fragment fn frag_main() -> @location(0) vec4<f32> {
+ out.value = ${algorithm};
+ return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+ }
+ `,
+ };
+
+ return this.device.createRenderPipeline({
+ layout: this.device.createPipelineLayout({
+ bindGroupLayouts: this.getBindGroupLayouts(groups),
+ }),
+ vertex: {
+ module: this.device.createShaderModule({
+ code: wgslShaders.vertex,
+ }),
+ entryPoint: 'vert_main',
+ },
+ fragment: {
+ module: this.device.createShaderModule({
+ code: wgslShaders.fragment,
+ }),
+ entryPoint: 'frag_main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ primitive: { topology: 'point-list' },
+ });
+ }
+ default:
+ unreachable();
+ }
+ }
+
+ setPipeline(pass: GPUBindingCommandsMixin, pipeline: GPUComputePipeline | GPURenderPipeline) {
+ if (pass instanceof GPUComputePassEncoder) {
+ pass.setPipeline(pipeline as GPUComputePipeline);
+ } else if (pass instanceof GPURenderPassEncoder || pass instanceof GPURenderBundleEncoder) {
+ pass.setPipeline(pipeline as GPURenderPipeline);
+ }
+ }
+
+ dispatchOrDraw(pass: GPUBindingCommandsMixin) {
+ if (pass instanceof GPUComputePassEncoder) {
+ pass.dispatchWorkgroups(1);
+ } else if (pass instanceof GPURenderPassEncoder) {
+ pass.draw(1);
+ } else if (pass instanceof GPURenderBundleEncoder) {
+ pass.draw(1);
+ }
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/programmable/state_tracking.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/programmable/state_tracking.spec.ts
new file mode 100644
index 0000000000..fe8ef3d437
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/programmable/state_tracking.spec.ts
@@ -0,0 +1,306 @@
+export const description = `
+Ensure state is set correctly. Tries to stress state caching (setting different states multiple
+times in different orders) for setBindGroup and setPipeline.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUConst } from '../../../../constants.js';
+import { kProgrammableEncoderTypes } from '../../../../util/command_buffer_maker.js';
+
+import { ProgrammableStateTest } from './programmable_state_test.js';
+
+export const g = makeTestGroup(ProgrammableStateTest);
+
+const kBufferUsage = GPUConst.BufferUsage.COPY_SRC | GPUConst.BufferUsage.STORAGE;
+
+g.test('bind_group_indices')
+ .desc(
+ `
+ Test that bind group indices can be declared in any order, regardless of their order in the shader.
+ - Test places the value of buffer a - buffer b into the out buffer, then reads the result.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('encoderType', kProgrammableEncoderTypes)
+ .beginSubcases()
+ .combine('groupIndices', [
+ { a: 0, b: 1, out: 2 },
+ { a: 1, b: 2, out: 0 },
+ { a: 2, b: 0, out: 1 },
+ { a: 0, b: 2, out: 1 },
+ { a: 2, b: 1, out: 0 },
+ { a: 1, b: 0, out: 2 },
+ ])
+ )
+ .fn(t => {
+ const { encoderType, groupIndices } = t.params;
+
+ const pipeline = t.createBindingStatePipeline(encoderType, groupIndices);
+
+ const out = t.makeBufferWithContents(new Int32Array([0]), kBufferUsage);
+ const bindGroups = {
+ a: t.createBindGroup(
+ t.makeBufferWithContents(new Int32Array([3]), kBufferUsage),
+ 'read-only-storage'
+ ),
+ b: t.createBindGroup(
+ t.makeBufferWithContents(new Int32Array([2]), kBufferUsage),
+ 'read-only-storage'
+ ),
+ out: t.createBindGroup(out, 'storage'),
+ };
+
+ const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType);
+
+ t.setPipeline(encoder, pipeline);
+ encoder.setBindGroup(groupIndices.a, bindGroups.a);
+ encoder.setBindGroup(groupIndices.b, bindGroups.b);
+ encoder.setBindGroup(groupIndices.out, bindGroups.out);
+ t.dispatchOrDraw(encoder);
+ validateFinishAndSubmit(true, true);
+
+ t.expectGPUBufferValuesEqual(out, new Int32Array([1]));
+ });
+
+g.test('bind_group_order')
+ .desc(
+ `
+ Test that the order in which you set the bind groups doesn't matter.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('encoderType', kProgrammableEncoderTypes)
+ .beginSubcases()
+ .combine('setOrder', [
+ ['a', 'b', 'out'],
+ ['b', 'out', 'a'],
+ ['out', 'a', 'b'],
+ ['b', 'a', 'out'],
+ ['a', 'out', 'b'],
+ ['out', 'b', 'a'],
+ ] as const)
+ )
+ .fn(t => {
+ const { encoderType, setOrder } = t.params;
+
+ const groupIndices = { a: 0, b: 1, out: 2 };
+ const pipeline = t.createBindingStatePipeline(encoderType, groupIndices);
+
+ const out = t.makeBufferWithContents(new Int32Array([0]), kBufferUsage);
+ const bindGroups = {
+ a: t.createBindGroup(
+ t.makeBufferWithContents(new Int32Array([3]), kBufferUsage),
+ 'read-only-storage'
+ ),
+ b: t.createBindGroup(
+ t.makeBufferWithContents(new Int32Array([2]), kBufferUsage),
+ 'read-only-storage'
+ ),
+ out: t.createBindGroup(out, 'storage'),
+ };
+
+ const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType);
+ t.setPipeline(encoder, pipeline);
+
+ for (const bindingName of setOrder) {
+ encoder.setBindGroup(groupIndices[bindingName], bindGroups[bindingName]);
+ }
+
+ t.dispatchOrDraw(encoder);
+ validateFinishAndSubmit(true, true);
+
+ t.expectGPUBufferValuesEqual(out, new Int32Array([1]));
+ });
+
+g.test('bind_group_before_pipeline')
+ .desc(
+ `
+ Test that setting bind groups prior to setting the pipeline is still valid.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('encoderType', kProgrammableEncoderTypes)
+ .beginSubcases()
+ .combineWithParams([
+ { setBefore: ['a', 'b'], setAfter: ['out'] },
+ { setBefore: ['a'], setAfter: ['b', 'out'] },
+ { setBefore: ['out', 'b'], setAfter: ['a'] },
+ { setBefore: ['a', 'b', 'out'], setAfter: [] },
+ ] as const)
+ )
+ .fn(t => {
+ const { encoderType, setBefore, setAfter } = t.params;
+ const groupIndices = { a: 0, b: 1, out: 2 };
+ const pipeline = t.createBindingStatePipeline(encoderType, groupIndices);
+
+ const out = t.makeBufferWithContents(new Int32Array([0]), kBufferUsage);
+ const bindGroups = {
+ a: t.createBindGroup(
+ t.makeBufferWithContents(new Int32Array([3]), kBufferUsage),
+ 'read-only-storage'
+ ),
+ b: t.createBindGroup(
+ t.makeBufferWithContents(new Int32Array([2]), kBufferUsage),
+ 'read-only-storage'
+ ),
+ out: t.createBindGroup(out, 'storage'),
+ };
+
+ const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType);
+
+ for (const bindingName of setBefore) {
+ encoder.setBindGroup(groupIndices[bindingName], bindGroups[bindingName]);
+ }
+
+ t.setPipeline(encoder, pipeline);
+
+ for (const bindingName of setAfter) {
+ encoder.setBindGroup(groupIndices[bindingName], bindGroups[bindingName]);
+ }
+
+ t.dispatchOrDraw(encoder);
+ validateFinishAndSubmit(true, true);
+
+ t.expectGPUBufferValuesEqual(out, new Int32Array([1]));
+ });
+
+g.test('one_bind_group_multiple_slots')
+ .desc(
+ `
+ Test that a single bind group may be bound to more than one slot.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('encoderType', kProgrammableEncoderTypes)
+ )
+ .fn(t => {
+ const { encoderType } = t.params;
+ const pipeline = t.createBindingStatePipeline(encoderType, { a: 0, b: 1, out: 2 });
+
+ const out = t.makeBufferWithContents(new Int32Array([1]), kBufferUsage);
+ const bindGroups = {
+ ab: t.createBindGroup(
+ t.makeBufferWithContents(new Int32Array([3]), kBufferUsage),
+ 'read-only-storage'
+ ),
+ out: t.createBindGroup(out, 'storage'),
+ };
+
+ const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType);
+ t.setPipeline(encoder, pipeline);
+
+ encoder.setBindGroup(0, bindGroups.ab);
+ encoder.setBindGroup(1, bindGroups.ab);
+ encoder.setBindGroup(2, bindGroups.out);
+
+ t.dispatchOrDraw(encoder);
+ validateFinishAndSubmit(true, true);
+
+ t.expectGPUBufferValuesEqual(out, new Int32Array([0]));
+ });
+
+g.test('bind_group_multiple_sets')
+ .desc(
+ `
+ Test that the last bind group set to a given slot is used when dispatching.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('encoderType', kProgrammableEncoderTypes)
+ )
+ .fn(t => {
+ const { encoderType } = t.params;
+ const pipeline = t.createBindingStatePipeline(encoderType, { a: 0, b: 1, out: 2 });
+
+ const badOut = t.makeBufferWithContents(new Int32Array([-1]), kBufferUsage);
+ const out = t.makeBufferWithContents(new Int32Array([0]), kBufferUsage);
+ const bindGroups = {
+ a: t.createBindGroup(
+ t.makeBufferWithContents(new Int32Array([3]), kBufferUsage),
+ 'read-only-storage'
+ ),
+ b: t.createBindGroup(
+ t.makeBufferWithContents(new Int32Array([2]), kBufferUsage),
+ 'read-only-storage'
+ ),
+ c: t.createBindGroup(
+ t.makeBufferWithContents(new Int32Array([5]), kBufferUsage),
+ 'read-only-storage'
+ ),
+ badOut: t.createBindGroup(badOut, 'storage'),
+ out: t.createBindGroup(out, 'storage'),
+ };
+
+ const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType);
+
+ encoder.setBindGroup(1, bindGroups.c);
+
+ t.setPipeline(encoder, pipeline);
+
+ encoder.setBindGroup(0, bindGroups.c);
+ encoder.setBindGroup(0, bindGroups.a);
+
+ encoder.setBindGroup(2, bindGroups.badOut);
+
+ encoder.setBindGroup(1, bindGroups.b);
+ encoder.setBindGroup(2, bindGroups.out);
+
+ t.dispatchOrDraw(encoder);
+ validateFinishAndSubmit(true, true);
+
+ t.expectGPUBufferValuesEqual(out, new Int32Array([1]));
+ t.expectGPUBufferValuesEqual(badOut, new Int32Array([-1]));
+ });
+
+g.test('compatible_pipelines')
+ .desc('Test that bind groups can be shared between compatible pipelines.')
+ .params(u =>
+ u //
+ .combine('encoderType', kProgrammableEncoderTypes)
+ )
+ .fn(t => {
+ const { encoderType } = t.params;
+ const pipelineA = t.createBindingStatePipeline(encoderType, { a: 0, b: 1, out: 2 });
+ const pipelineB = t.createBindingStatePipeline(
+ encoderType,
+ { a: 0, b: 1, out: 2 },
+ 'a.value + b.value'
+ );
+
+ const outA = t.makeBufferWithContents(new Int32Array([0]), kBufferUsage);
+ const outB = t.makeBufferWithContents(new Int32Array([0]), kBufferUsage);
+ const bindGroups = {
+ a: t.createBindGroup(
+ t.makeBufferWithContents(new Int32Array([3]), kBufferUsage),
+ 'read-only-storage'
+ ),
+ b: t.createBindGroup(
+ t.makeBufferWithContents(new Int32Array([2]), kBufferUsage),
+ 'read-only-storage'
+ ),
+ outA: t.createBindGroup(outA, 'storage'),
+ outB: t.createBindGroup(outB, 'storage'),
+ };
+
+ const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType);
+ encoder.setBindGroup(0, bindGroups.a);
+ encoder.setBindGroup(1, bindGroups.b);
+
+ t.setPipeline(encoder, pipelineA);
+ encoder.setBindGroup(2, bindGroups.outA);
+ t.dispatchOrDraw(encoder);
+
+ t.setPipeline(encoder, pipelineB);
+ encoder.setBindGroup(2, bindGroups.outB);
+ t.dispatchOrDraw(encoder);
+
+ validateFinishAndSubmit(true, true);
+
+ t.expectGPUBufferValuesEqual(outA, new Int32Array([1]));
+ t.expectGPUBufferValuesEqual(outB, new Int32Array([5]));
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/queries/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/queries/README.txt
new file mode 100644
index 0000000000..8e7c22c315
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/queries/README.txt
@@ -0,0 +1,5 @@
+TODO: test the behavior of creating/using/resolving queries.
+- timestamp
+- nested (e.g. timestamp inside occlusion query), if any such cases are valid. Try
+ writing to the same query set (at same or different indices), if valid. Check results make sense.
+- start a query (all types) with no draw calls
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/queries/occlusionQuery.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/queries/occlusionQuery.spec.ts
new file mode 100644
index 0000000000..39b7a377fe
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/queries/occlusionQuery.spec.ts
@@ -0,0 +1,1033 @@
+export const description = `
+API operations tests for occlusion queries.
+
+- test query with
+ - scissor
+ - sample mask
+ - alpha to coverage
+ - stencil
+ - depth test
+- test empty query (no draw) (should be cleared?)
+- test via render bundle
+- test resolveQuerySet with non-zero firstIndex
+- test no queries is zero
+- test 0x0 -> 0x3 sample mask
+- test 0 -> 1 alpha to coverage
+- test resolving twice in same pass keeps values
+- test resolving twice across pass keeps values
+- test resolveQuerySet destinationOffset
+`;
+
+import { kUnitCaseParamsBuilder } from '../../../../../common/framework/params_builder.js';
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import {
+ assert,
+ TypedArrayBufferView,
+ range,
+ unreachable,
+} from '../../../../../common/util/util.js';
+import { kMaxQueryCount } from '../../../../capability_info.js';
+import { DepthStencilFormat } from '../../../../format_info.js';
+import { GPUTest } from '../../../../gpu_test.js';
+
+const kRequiredQueryBufferOffsetAlignment = 256;
+const kBytesPerQuery = 8;
+const kTextureSize = [4, 4];
+
+const kRenderModes = ['direct', 'render-bundle'] as const;
+type RenderMode = (typeof kRenderModes)[number];
+
+const kBufferOffsets = ['zero', 'non-zero'] as const;
+type BufferOffset = (typeof kBufferOffsets)[number];
+
+type SetupParams = {
+ numQueries: number;
+ depthStencilFormat?: DepthStencilFormat;
+ sampleCount?: number;
+ sampleMask?: number;
+ alpha?: number;
+ writeMask?: number;
+ bufferOffset?: BufferOffset;
+ querySetOffset?: BufferOffset;
+ renderMode?: RenderMode;
+};
+
+// MAINTENANCE_TODO: Refactor these helper classes to use GPUTestBase.createEncoder
+//
+// The refactor would require some new features in CommandBufferMaker such as:
+//
+// * Multi render bundle in single render pass support
+//
+// * Some way to allow calling render pass commands on render bundle encoder.
+// Potentially have a special abstract encoder that wraps the two and defers
+// relevant calls appropriately.
+
+/**
+ * This class is used by the RenderPassHelper below to
+ * abstract calling these 4 functions on a RenderPassEncoder or a RenderBundleEncoder.
+ */
+interface QueryHelper {
+ setPipeline(pipeline: GPURenderPipeline): void;
+ setVertexBuffer(buffer: GPUBuffer): void;
+ draw(count: number): void;
+ end(): void;
+}
+
+interface QueryStarter {
+ begin(endFn: () => void): QueryHelper;
+}
+
+/**
+ * This class helps use a render pass encoder or a render bundle encoder
+ * in the correct way given the order that operations must happen, in order to be
+ * compatible across both paths.
+ */
+class RenderPassHelper {
+ _pass: GPURenderPassEncoder;
+ _helper: QueryStarter;
+ _queryHelper?: QueryHelper;
+
+ constructor(pass: GPURenderPassEncoder, helper: QueryStarter) {
+ this._pass = pass;
+ this._helper = helper;
+ }
+ setScissorRect(x: number, y: number, width: number, height: number) {
+ assert(!this._queryHelper);
+ this._pass.setScissorRect(x, y, width, height);
+ }
+ setStencilReference(ref: number) {
+ assert(!this._queryHelper);
+ this._pass.setStencilReference(ref);
+ }
+ beginOcclusionQuery(queryIndex: number) {
+ assert(!this._queryHelper);
+ this._pass.beginOcclusionQuery(queryIndex);
+ this._queryHelper = this._helper.begin(() => {
+ assert(!!this._queryHelper);
+ this._queryHelper = undefined;
+ this._pass.endOcclusionQuery();
+ });
+ return this._queryHelper;
+ }
+}
+
+/**
+ * Helper class for using a render pass encoder directly
+ */
+class QueryHelperDirect implements QueryHelper {
+ _pass?: GPURenderPassEncoder;
+ _endFn: () => void;
+
+ constructor(pass: GPURenderPassEncoder, endFn: () => void) {
+ this._pass = pass;
+ this._endFn = endFn;
+ }
+ setPipeline(pipeline: GPURenderPipeline): void {
+ assert(!!this._pass);
+ this._pass.setPipeline(pipeline);
+ }
+ setVertexBuffer(buffer: GPUBuffer): void {
+ assert(!!this._pass);
+ this._pass.setVertexBuffer(0, buffer);
+ }
+ draw(count: number): void {
+ assert(!!this._pass);
+ this._pass.draw(count);
+ }
+ end() {
+ // make this object impossible to use after calling end
+ const fn = this._endFn;
+ this._endFn = unreachable;
+ this._pass = undefined;
+ fn();
+ }
+}
+
+/**
+ * Helper class for starting a query on a render pass encoder directly
+ */
+class QueryStarterDirect implements QueryStarter {
+ _pass: GPURenderPassEncoder;
+ _helper?: QueryHelperDirect;
+
+ constructor(pass: GPURenderPassEncoder) {
+ this._pass = pass;
+ }
+ begin(endFn: () => void) {
+ assert(!this._helper);
+ this._helper = new QueryHelperDirect(this._pass, () => {
+ this._helper = undefined;
+ endFn();
+ });
+ return this._helper;
+ }
+}
+
+/**
+ * Helper class for using a render bundle encoder.
+ */
+class QueryHelperRenderBundle implements QueryHelper {
+ _encoder?: GPURenderBundleEncoder;
+ _endFn: () => void;
+
+ constructor(pass: GPURenderBundleEncoder, endFn: () => void) {
+ this._encoder = pass;
+ this._endFn = endFn;
+ }
+ setPipeline(pipeline: GPURenderPipeline): void {
+ assert(!!this._encoder);
+ this._encoder.setPipeline(pipeline);
+ }
+ setVertexBuffer(buffer: GPUBuffer): void {
+ assert(!!this._encoder);
+ this._encoder.setVertexBuffer(0, buffer);
+ }
+ draw(count: number): void {
+ assert(!!this._encoder);
+ this._encoder.draw(count);
+ }
+ end() {
+ // make this object impossible to use after calling end
+ const fn = this._endFn;
+ this._endFn = unreachable;
+ this._encoder = undefined;
+ fn();
+ }
+}
+
+/**
+ * Helper class for starting a query on a render bundle encoder
+ */
+class QueryStarterRenderBundle implements QueryStarter {
+ _device: GPUDevice;
+ _pass: GPURenderPassEncoder;
+ _renderBundleEncoderDescriptor: GPURenderBundleEncoderDescriptor;
+ _encoder?: GPURenderBundleEncoder;
+ _helper?: QueryHelperRenderBundle;
+
+ constructor(
+ device: GPUDevice,
+ pass: GPURenderPassEncoder,
+ renderPassDescriptor: GPURenderPassDescriptor
+ ) {
+ this._device = device;
+ this._pass = pass;
+ const colorAttachment = (
+ renderPassDescriptor.colorAttachments as GPURenderPassColorAttachment[]
+ )[0];
+ this._renderBundleEncoderDescriptor = {
+ colorFormats: ['rgba8unorm'],
+ depthStencilFormat: renderPassDescriptor.depthStencilAttachment?.depthLoadOp
+ ? 'depth24plus'
+ : renderPassDescriptor.depthStencilAttachment?.stencilLoadOp
+ ? 'stencil8'
+ : undefined,
+ sampleCount: colorAttachment.resolveTarget ? 4 : 1,
+ };
+ }
+ begin(endFn: () => void) {
+ assert(!this._encoder);
+ this._encoder = this._device.createRenderBundleEncoder(this._renderBundleEncoderDescriptor);
+ this._helper = new QueryHelperRenderBundle(this._encoder, () => {
+ assert(!!this._encoder);
+ assert(!!this._helper);
+ this._pass.executeBundles([this._encoder.finish()]);
+ this._helper = undefined;
+ this._encoder = undefined;
+ endFn();
+ });
+ return this._helper;
+ }
+ setPipeline(pipeline: GPURenderPipeline): void {
+ assert(!!this._encoder);
+ this._encoder.setPipeline(pipeline);
+ }
+ setVertexBuffer(buffer: GPUBuffer): void {
+ assert(!!this._encoder);
+ this._encoder.setVertexBuffer(0, buffer);
+ }
+ draw(count: number) {
+ assert(!!this._encoder);
+ this._encoder.draw(count);
+ }
+}
+
+class OcclusionQueryTest extends GPUTest {
+ createBuffer(desc: GPUBufferDescriptor) {
+ return this.trackForCleanup(this.device.createBuffer(desc));
+ }
+ createTexture(desc: GPUTextureDescriptor) {
+ return this.trackForCleanup(this.device.createTexture(desc));
+ }
+ createQuerySet(desc: GPUQuerySetDescriptor) {
+ return this.trackForCleanup(this.device.createQuerySet(desc));
+ }
+ createVertexBuffer(data: TypedArrayBufferView) {
+ return this.makeBufferWithContents(data, GPUBufferUsage.VERTEX);
+ }
+ createSingleTriangleVertexBuffer(z: number) {
+ // prettier-ignore
+ return this.createVertexBuffer(new Float32Array([
+ -0.5, -0.5, z,
+ 0.5, -0.5, z,
+ -0.5, 0.5, z,
+ ]));
+ }
+ async readBufferAsBigUint64(buffer: GPUBuffer) {
+ await buffer.mapAsync(GPUMapMode.READ);
+ const result = new BigUint64Array(buffer.getMappedRange().slice(0));
+ buffer.unmap();
+ return result;
+ }
+ setup(params: SetupParams) {
+ const {
+ numQueries,
+ depthStencilFormat,
+ sampleMask = 0xffffffff,
+ alpha,
+ sampleCount,
+ writeMask = 0xf,
+ bufferOffset,
+ renderMode,
+ } = params;
+ const { device } = this;
+
+ const queryResolveBufferOffset =
+ bufferOffset === 'non-zero' ? kRequiredQueryBufferOffsetAlignment : 0;
+ const queryResolveBuffer = this.createBuffer({
+ size: numQueries * 8 + queryResolveBufferOffset,
+ usage: GPUBufferUsage.QUERY_RESOLVE | GPUBufferUsage.COPY_SRC,
+ });
+
+ const readBuffer = this.createBuffer({
+ size: numQueries * kBytesPerQuery,
+ usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
+ });
+
+ const vertexBuffer = this.createSingleTriangleVertexBuffer(0);
+
+ const renderTargetTexture = this.createTexture({
+ format: 'rgba8unorm',
+ size: kTextureSize,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const multisampleRenderTarget = sampleCount
+ ? this.createTexture({
+ size: kTextureSize,
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ sampleCount,
+ })
+ : null;
+
+ const depthStencilTexture = depthStencilFormat
+ ? this.createTexture({
+ format: depthStencilFormat,
+ size: kTextureSize,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ })
+ : undefined;
+
+ const module = device.createShaderModule({
+ code: `
+ @vertex fn vs(@location(0) pos: vec4f) -> @builtin(position) vec4f {
+ return pos;
+ }
+
+ @fragment fn fs() -> @location(0) vec4f {
+ return vec4f(0, 0, 0, ${alpha === undefined ? 1 : alpha});
+ }
+ `,
+ });
+
+ const haveDepth = !!depthStencilFormat && depthStencilFormat.includes('depth');
+ const haveStencil = !!depthStencilFormat && depthStencilFormat.includes('stencil');
+ assert(!(haveDepth && haveStencil), 'code does not handle mixed depth-stencil');
+
+ const pipeline = device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vs',
+ buffers: [
+ {
+ arrayStride: 3 * 4,
+ attributes: [
+ {
+ shaderLocation: 0,
+ offset: 0,
+ format: 'float32x3',
+ },
+ ],
+ },
+ ],
+ },
+ fragment: {
+ module,
+ entryPoint: 'fs',
+ targets: [{ format: 'rgba8unorm', writeMask }],
+ },
+ ...(sampleCount && {
+ multisample: {
+ count: sampleCount,
+ mask: alpha === undefined ? sampleMask : 0xffffffff,
+ alphaToCoverageEnabled: alpha !== undefined,
+ },
+ }),
+ ...(depthStencilTexture && {
+ depthStencil: {
+ format: depthStencilFormat as GPUTextureFormat,
+ depthWriteEnabled: haveDepth,
+ depthCompare: haveDepth ? 'less-equal' : 'always',
+ ...(haveStencil && {
+ stencilFront: {
+ compare: 'equal',
+ },
+ }),
+ },
+ }),
+ });
+
+ const querySetOffset = params?.querySetOffset === 'non-zero' ? 7 : 0;
+ const occlusionQuerySet = this.createQuerySet({
+ type: 'occlusion',
+ count: numQueries + querySetOffset,
+ });
+
+ const renderPassDescriptor: GPURenderPassDescriptor = {
+ colorAttachments: sampleCount
+ ? [
+ {
+ view: multisampleRenderTarget!.createView(),
+ resolveTarget: renderTargetTexture.createView(),
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ]
+ : [
+ {
+ view: renderTargetTexture.createView(),
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ ...(haveDepth && {
+ depthStencilAttachment: {
+ view: depthStencilTexture!.createView(),
+ depthLoadOp: 'clear',
+ depthStoreOp: 'store',
+ depthClearValue: 0.5,
+ },
+ }),
+ ...(haveStencil && {
+ depthStencilAttachment: {
+ view: depthStencilTexture!.createView(),
+ stencilClearValue: 0,
+ stencilLoadOp: 'clear',
+ stencilStoreOp: 'store',
+ },
+ }),
+ occlusionQuerySet,
+ };
+
+ return {
+ readBuffer,
+ vertexBuffer,
+ queryResolveBuffer,
+ queryResolveBufferOffset,
+ occlusionQuerySet,
+ renderTargetTexture,
+ renderPassDescriptor,
+ pipeline,
+ depthStencilTexture,
+ querySetOffset,
+ renderMode,
+ };
+ }
+ async runQueryTest(
+ resources: ReturnType<OcclusionQueryTest['setup']>,
+ renderPassDescriptor: GPURenderPassDescriptor | null,
+ encodePassFn: (helper: RenderPassHelper, queryIndex: number) => void,
+ checkQueryIndexResultFn: (passed: boolean, queryIndex: number) => void
+ ) {
+ const { device } = this;
+ const {
+ readBuffer,
+ queryResolveBuffer,
+ queryResolveBufferOffset,
+ occlusionQuerySet,
+ querySetOffset,
+ renderMode = 'direct',
+ } = resources;
+ const numQueries = occlusionQuerySet.count - querySetOffset;
+ const queryIndices = range(numQueries, (i: number) => i + querySetOffset);
+
+ const encoder = device.createCommandEncoder();
+ if (renderPassDescriptor) {
+ const pass = encoder.beginRenderPass(renderPassDescriptor);
+ const helper = new RenderPassHelper(
+ pass,
+ renderMode === 'direct'
+ ? new QueryStarterDirect(pass)
+ : new QueryStarterRenderBundle(device, pass, renderPassDescriptor)
+ );
+
+ for (const queryIndex of queryIndices) {
+ encodePassFn(helper, queryIndex);
+ }
+ pass.end();
+ }
+
+ encoder.resolveQuerySet(
+ occlusionQuerySet,
+ querySetOffset,
+ numQueries,
+ queryResolveBuffer,
+ queryResolveBufferOffset
+ );
+ encoder.copyBufferToBuffer(
+ queryResolveBuffer,
+ queryResolveBufferOffset,
+ readBuffer,
+ 0,
+ readBuffer.size
+ );
+ device.queue.submit([encoder.finish()]);
+
+ const result = await this.readBufferAsBigUint64(readBuffer);
+ for (const queryIndex of queryIndices) {
+ const resultNdx = queryIndex - querySetOffset;
+ const passed = !!result[resultNdx];
+ checkQueryIndexResultFn(passed, queryIndex);
+ }
+
+ return result;
+ }
+}
+
+const kQueryTestBaseParams = kUnitCaseParamsBuilder
+ .combine('writeMask', [0xf, 0x0])
+ .combine('renderMode', kRenderModes)
+ .combine('bufferOffset', kBufferOffsets)
+ .combine('querySetOffset', kBufferOffsets);
+
+export const g = makeTestGroup(OcclusionQueryTest);
+
+g.test('occlusion_query,initial')
+ .desc(`Test getting contents of QuerySet without any queries.`)
+ .fn(async t => {
+ const kNumQueries = kMaxQueryCount;
+ const resources = t.setup({ numQueries: kNumQueries });
+ await t.runQueryTest(
+ resources,
+ null,
+ () => {},
+ (passed: boolean) => {
+ t.expect(!passed);
+ }
+ );
+ });
+
+g.test('occlusion_query,basic')
+ .desc('Test all queries pass')
+ .params(kQueryTestBaseParams)
+ .fn(async t => {
+ const { writeMask, renderMode, bufferOffset, querySetOffset } = t.params;
+ const kNumQueries = 30;
+ const resources = t.setup({
+ writeMask,
+ renderMode,
+ bufferOffset,
+ querySetOffset,
+ numQueries: kNumQueries,
+ });
+ const { renderPassDescriptor, vertexBuffer, pipeline } = resources;
+
+ await t.runQueryTest(
+ resources,
+ renderPassDescriptor,
+ (helper, queryIndex) => {
+ const queryHelper = helper.beginOcclusionQuery(queryIndex);
+ queryHelper.setPipeline(pipeline);
+ queryHelper.setVertexBuffer(vertexBuffer);
+ queryHelper.draw(3);
+ queryHelper.end();
+ },
+ (passed, queryIndex) => {
+ const expectPassed = true;
+ t.expect(
+ !!passed === expectPassed,
+ `queryIndex: ${queryIndex}, was: ${!!passed}, expected: ${expectPassed}`
+ );
+ }
+ );
+ });
+
+g.test('occlusion_query,empty')
+ .desc(
+ `
+ Test beginOcclusionQuery/endOcclusionQuery with nothing in between clears the queries
+
+ Calls beginOcclusionQuery/draw/endOcclusionQuery that should show passing fragments
+ and validates they passed. Then executes the same queries (same QuerySet) without drawing.
+ Those queries should have not passed.
+ `
+ )
+ .fn(async t => {
+ const kNumQueries = 30;
+ const resources = t.setup({ numQueries: kNumQueries });
+ const { vertexBuffer, renderPassDescriptor, pipeline } = resources;
+
+ const makeQueryRunner = (draw: boolean) => {
+ return (helper: RenderPassHelper, queryIndex: number) => {
+ const queryHelper = helper.beginOcclusionQuery(queryIndex);
+ queryHelper.setPipeline(pipeline);
+ queryHelper.setVertexBuffer(vertexBuffer);
+ if (draw) {
+ queryHelper.draw(3);
+ }
+ queryHelper.end();
+ };
+ };
+
+ const makeQueryChecker = (draw: boolean) => {
+ return (passed: boolean, queryIndex: number) => {
+ const expectPassed = draw;
+ t.expect(
+ !!passed === expectPassed,
+ `draw: ${draw}, queryIndex: ${queryIndex}, was: ${!!passed}, expected: ${expectPassed}`
+ );
+ };
+ };
+
+ await t.runQueryTest(
+ resources,
+ renderPassDescriptor,
+ makeQueryRunner(true),
+ makeQueryChecker(true)
+ );
+ await t.runQueryTest(
+ resources,
+ renderPassDescriptor,
+ makeQueryRunner(false),
+ makeQueryChecker(false)
+ );
+ });
+
+g.test('occlusion_query,scissor')
+ .desc(
+ `
+ Test beginOcclusionQuery/endOcclusionQuery using scissor to occlude
+ `
+ )
+ .params(kQueryTestBaseParams)
+ .fn(async t => {
+ const { writeMask, renderMode, bufferOffset, querySetOffset } = t.params;
+ const kNumQueries = 30;
+ const resources = t.setup({
+ writeMask,
+ renderMode,
+ bufferOffset,
+ querySetOffset,
+ numQueries: kNumQueries,
+ });
+ const { renderPassDescriptor, renderTargetTexture, vertexBuffer, pipeline } = resources;
+
+ const getScissorRect = (i: number) => {
+ const { width, height } = renderTargetTexture;
+ switch (i % 4) {
+ case 0: // whole target
+ return {
+ x: 0,
+ y: 0,
+ width,
+ height,
+ occluded: false,
+ name: 'whole target',
+ };
+ case 1: // center
+ return {
+ x: width / 4,
+ y: height / 4,
+ width: width / 2,
+ height: height / 2,
+ occluded: false,
+ name: 'center',
+ };
+ case 2: // none
+ return {
+ x: width / 4,
+ y: height / 4,
+ width: 0,
+ height: 0,
+ occluded: true,
+ name: 'none',
+ };
+ case 3: // top 1/4
+ return {
+ x: 0,
+ y: 0,
+ width,
+ height: height / 2,
+ occluded: true,
+ name: 'top quarter',
+ };
+ default:
+ unreachable();
+ }
+ };
+
+ await t.runQueryTest(
+ resources,
+ renderPassDescriptor,
+ (helper, queryIndex) => {
+ const { x, y, width, height } = getScissorRect(queryIndex);
+ helper.setScissorRect(x, y, width, height);
+ const queryHelper = helper.beginOcclusionQuery(queryIndex);
+ queryHelper.setPipeline(pipeline);
+ queryHelper.setVertexBuffer(vertexBuffer);
+ queryHelper.draw(3);
+ queryHelper.end();
+ },
+ (passed, queryIndex) => {
+ const { occluded, name: scissorCase } = getScissorRect(queryIndex);
+ const expectPassed = !occluded;
+ t.expect(
+ !!passed === expectPassed,
+ `queryIndex: ${queryIndex}, scissorCase: ${scissorCase}, was: ${!!passed}, expected: ${expectPassed}, ${name}`
+ );
+ }
+ );
+ });
+
+g.test('occlusion_query,depth')
+ .desc(
+ `
+ Test beginOcclusionQuery/endOcclusionQuery using depth test to occlude
+
+ Compares depth against 0.5, with alternating vertex buffers which have a depth
+ of 0 and 1. When depth check passes, we expect non-zero successful fragments.
+ `
+ )
+ .params(kQueryTestBaseParams)
+ .fn(async t => {
+ const { writeMask, renderMode, bufferOffset, querySetOffset } = t.params;
+ const kNumQueries = 30;
+ const resources = t.setup({
+ writeMask,
+ renderMode,
+ bufferOffset,
+ querySetOffset,
+ numQueries: kNumQueries,
+ depthStencilFormat: 'depth24plus',
+ });
+ const { vertexBuffer: vertexBufferAtZ0, renderPassDescriptor, pipeline } = resources;
+ const vertexBufferAtZ1 = t.createSingleTriangleVertexBuffer(1);
+
+ await t.runQueryTest(
+ resources,
+ renderPassDescriptor,
+ (helper, queryIndex) => {
+ const queryHelper = helper.beginOcclusionQuery(queryIndex);
+ queryHelper.setPipeline(pipeline);
+ queryHelper.setVertexBuffer(queryIndex % 2 ? vertexBufferAtZ1 : vertexBufferAtZ0);
+ queryHelper.draw(3);
+ queryHelper.end();
+ },
+ (passed, queryIndex) => {
+ const expectPassed = queryIndex % 2 === 0;
+ t.expect(
+ !!passed === expectPassed,
+ `queryIndex: ${queryIndex}, was: ${!!passed}, expected: ${expectPassed}, ${name}`
+ );
+ }
+ );
+ });
+
+g.test('occlusion_query,stencil')
+ .desc(
+ `
+ Test beginOcclusionQuery/endOcclusionQuery using stencil to occlude
+
+ Compares stencil against 0, with alternating stencil reference values of
+ of 0 and 1. When stencil test passes, we expect non-zero successful fragments.
+ `
+ )
+ .params(kQueryTestBaseParams)
+ .fn(async t => {
+ const { writeMask, renderMode, bufferOffset, querySetOffset } = t.params;
+ const kNumQueries = 30;
+ const resources = t.setup({
+ writeMask,
+ renderMode,
+ bufferOffset,
+ querySetOffset,
+ numQueries: kNumQueries,
+ depthStencilFormat: 'stencil8',
+ });
+ const { vertexBuffer, renderPassDescriptor, pipeline } = resources;
+
+ await t.runQueryTest(
+ resources,
+ renderPassDescriptor,
+ (helper, queryIndex) => {
+ helper.setStencilReference(queryIndex % 2);
+ const queryHelper = helper.beginOcclusionQuery(queryIndex);
+ queryHelper.setPipeline(pipeline);
+ queryHelper.setVertexBuffer(vertexBuffer);
+ queryHelper.draw(3);
+ queryHelper.end();
+ },
+ (passed, queryIndex) => {
+ const expectPassed = queryIndex % 2 === 0;
+ t.expect(
+ !!passed === expectPassed,
+ `queryIndex: ${queryIndex}, was: ${!!passed}, expected: ${expectPassed}, ${name}`
+ );
+ }
+ );
+ });
+
+g.test('occlusion_query,sample_mask')
+ .desc(
+ `
+ Test beginOcclusionQuery/endOcclusionQuery using sample_mask to occlude
+
+ Set sampleMask to 0, 2, 4, 6 and draw quads in top right or bottom left corners of the texel.
+ If the corner we draw to matches the corner masked we expect non-zero successful fragments.
+
+ See: https://learn.microsoft.com/en-us/windows/win32/api/d3d11/ne-d3d11-d3d11_standard_multisample_quality_levels
+ `
+ )
+ .params(kQueryTestBaseParams.combine('sampleMask', [0, 2, 4, 6]))
+ .fn(async t => {
+ const { writeMask, renderMode, bufferOffset, querySetOffset, sampleMask } = t.params;
+ const kNumQueries = 30;
+ const sampleCount = 4;
+ const resources = t.setup({
+ writeMask,
+ renderMode,
+ bufferOffset,
+ querySetOffset,
+ numQueries: kNumQueries,
+ sampleCount,
+ sampleMask,
+ });
+ const { renderPassDescriptor, pipeline } = resources;
+
+ const createQuad = (offset: number) => {
+ // prettier-ignore
+ return t.createVertexBuffer(new Float32Array([
+ offset + 0 , offset + 0 , 0,
+ offset + 0.25, offset + 0 , 0,
+ offset + 0 , offset + 0.25, 0,
+ offset + 0 , offset + 0.25, 0,
+ offset + 0.25, offset + 0 , 0,
+ offset + 0.25, offset + 0.25, 0,
+ ]));
+ };
+
+ const vertexBufferBL = createQuad(0);
+ const vertexBufferTR = createQuad(0.25);
+
+ await t.runQueryTest(
+ resources,
+ renderPassDescriptor,
+ (helper, queryIndex) => {
+ const queryHelper = helper.beginOcclusionQuery(queryIndex);
+ queryHelper.setPipeline(pipeline);
+ queryHelper.setVertexBuffer(queryIndex % 2 ? vertexBufferTR : vertexBufferBL);
+ queryHelper.draw(6);
+ queryHelper.end();
+ },
+ (passed, queryIndex) => {
+ // Above we draw to a specific corner (sample) of a multi-sampled texel
+ // drawMask is the "sampleMask" representation of that corner.
+ // In other words, if drawMask is 2 (we drew to the top right) and
+ // sampleMask is 2 (drawing is allowed to the top right) then we expect
+ // passing fragments.
+ const drawMask = queryIndex % 2 ? 2 : 4;
+ const expectPassed = !!(sampleMask & drawMask);
+ t.expect(
+ !!passed === expectPassed,
+ `queryIndex: ${queryIndex}, was: ${!!passed}, expected: ${expectPassed}, ${name}`
+ );
+ }
+ );
+ });
+
+g.test('occlusion_query,alpha_to_coverage')
+ .desc(
+ `
+ Test beginOcclusionQuery/endOcclusionQuery using alphaToCoverage to occlude
+
+ Set alpha to 0, 0.25, 0.5, 0.75, and 1, draw quads in 4 corners of texel.
+ Some should be culled. We count how many passed via queries. It's undefined which
+ will pass but it is defined how many will pass for a given alpha value.
+
+ Note: It seems like the result is well defined but if we find some devices/drivers
+ don't follow this exactly then we can relax check for the expected number of passed
+ queries.
+
+ See: https://bgolus.medium.com/anti-aliased-alpha-test-the-esoteric-alpha-to-coverage-8b177335ae4f
+ `
+ )
+ .params(kQueryTestBaseParams.combine('alpha', [0, 0.25, 0.5, 0.75, 1.0]))
+ .fn(async t => {
+ const { writeMask, renderMode, bufferOffset, querySetOffset, alpha } = t.params;
+ const kNumQueries = 32;
+ const sampleCount = 4;
+ const resources = t.setup({
+ writeMask,
+ renderMode,
+ bufferOffset,
+ querySetOffset,
+ numQueries: kNumQueries,
+ sampleCount,
+ alpha,
+ });
+ const { renderPassDescriptor, pipeline } = resources;
+
+ const createQuad = (xOffset: number, yOffset: number) => {
+ // prettier-ignore
+ return t.createVertexBuffer(new Float32Array([
+ xOffset + 0 , yOffset + 0 , 0,
+ xOffset + 0.25, yOffset + 0 , 0,
+ xOffset + 0 , yOffset + 0.25, 0,
+ xOffset + 0 , yOffset + 0.25, 0,
+ xOffset + 0.25, yOffset + 0 , 0,
+ xOffset + 0.25, yOffset + 0.25, 0,
+ ]));
+ };
+
+ const vertexBuffers = [
+ createQuad(0, 0),
+ createQuad(0.25, 0),
+ createQuad(0, 0.25),
+ createQuad(0.25, 0.25),
+ ];
+
+ const numPassedPerGroup: number[] = new Array(kNumQueries / 4).fill(0);
+
+ // These tests can't use queryIndex to decide what to draw because which mask
+ // a particular alpha converts to is implementation defined. When querySetOffset is
+ // non-zero the queryIndex will go 7, 8, 9, 10, ... but we need to guarantee
+ // 4 queries per pixel and group those results so `queryIndex / 4 | 0` won't work.
+ // Instead we count the queries to get 4 draws per group, one to each quadrant of a pixel
+ // Then we total up the passes for those 4 queries by queryCount.
+ let queryCount = 0;
+ let resultCount = 0;
+ await t.runQueryTest(
+ resources,
+ renderPassDescriptor,
+ (helper, queryIndex) => {
+ const queryHelper = helper.beginOcclusionQuery(queryIndex);
+ queryHelper.setPipeline(pipeline);
+ queryHelper.setVertexBuffer(vertexBuffers[queryCount++ % 4]);
+ queryHelper.draw(6);
+ queryHelper.end();
+ },
+ passed => {
+ const groupIndex = (resultCount++ / 4) | 0;
+ numPassedPerGroup[groupIndex] += passed ? 1 : 0;
+ }
+ );
+
+ const expected = (alpha / 0.25) | 0;
+ numPassedPerGroup.forEach((numPassed, queryGroup) => {
+ t.expect(
+ numPassed === expected,
+ `queryGroup: ${queryGroup}, was: ${numPassed}, expected: ${expected}`
+ );
+ });
+ });
+
+g.test('occlusion_query,multi_resolve')
+ .desc('Test calling resolveQuerySet more than once does not change results')
+ .fn(async t => {
+ const { device } = t;
+ const kNumQueries = 30;
+ const {
+ pipeline,
+ vertexBuffer,
+ occlusionQuerySet,
+ renderPassDescriptor,
+ renderTargetTexture,
+ queryResolveBuffer,
+ readBuffer,
+ } = t.setup({ numQueries: kNumQueries });
+
+ const readBuffer2 = t.createBuffer(readBuffer);
+ const readBuffer3 = t.createBuffer(readBuffer);
+
+ const renderSomething = (encoder: GPUCommandEncoder) => {
+ const pass = encoder.beginRenderPass(renderPassDescriptor);
+ pass.setPipeline(pipeline);
+ pass.setVertexBuffer(0, vertexBuffer);
+ pass.setScissorRect(0, 0, renderTargetTexture.width, renderTargetTexture.height);
+ pass.draw(3);
+ pass.end();
+ };
+
+ {
+ const encoder = device.createCommandEncoder();
+ {
+ const pass = encoder.beginRenderPass(renderPassDescriptor);
+ pass.setPipeline(pipeline);
+ pass.setVertexBuffer(0, vertexBuffer);
+
+ for (let i = 0; i < kNumQueries; ++i) {
+ pass.beginOcclusionQuery(i);
+ if (i % 2) {
+ pass.setScissorRect(0, 0, renderTargetTexture.width, renderTargetTexture.height);
+ } else {
+ pass.setScissorRect(0, 0, 0, 0);
+ }
+ pass.draw(3);
+ pass.endOcclusionQuery();
+ }
+ pass.end();
+ }
+
+ // Intentionally call resolveQuerySet twice
+ encoder.resolveQuerySet(occlusionQuerySet, 0, kNumQueries, queryResolveBuffer, 0);
+ encoder.resolveQuerySet(occlusionQuerySet, 0, kNumQueries, queryResolveBuffer, 0);
+ encoder.copyBufferToBuffer(queryResolveBuffer, 0, readBuffer, 0, readBuffer.size);
+
+ // Rendering stuff unrelated should not affect results.
+ renderSomething(encoder);
+
+ encoder.resolveQuerySet(occlusionQuerySet, 0, kNumQueries, queryResolveBuffer, 0);
+ encoder.copyBufferToBuffer(queryResolveBuffer, 0, readBuffer2, 0, readBuffer2.size);
+ device.queue.submit([encoder.finish()]);
+ }
+
+ // Encode something else and draw again, then read the results
+ // They should not be affected.
+ {
+ const encoder = device.createCommandEncoder();
+ renderSomething(encoder);
+
+ encoder.resolveQuerySet(occlusionQuerySet, 0, kNumQueries, queryResolveBuffer, 0);
+ encoder.copyBufferToBuffer(queryResolveBuffer, 0, readBuffer3, 0, readBuffer3.size);
+ device.queue.submit([encoder.finish()]);
+ }
+
+ const results = await Promise.all([
+ t.readBufferAsBigUint64(readBuffer),
+ t.readBufferAsBigUint64(readBuffer2),
+ t.readBufferAsBigUint64(readBuffer3),
+ ]);
+
+ results.forEach((result, r) => {
+ for (let i = 0; i < kNumQueries; ++i) {
+ const passed = !!result[i];
+ const expectPassed = !!(i % 2);
+ t.expect(
+ passed === expectPassed,
+ `result(${r}): queryIndex: ${i}, passed: ${passed}, expected: ${expectPassed}`
+ );
+ }
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/render/dynamic_state.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/render/dynamic_state.spec.ts
new file mode 100644
index 0000000000..d342fb6a46
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/render/dynamic_state.spec.ts
@@ -0,0 +1,19 @@
+export const description = `
+Tests of the behavior of the viewport/scissor/blend/reference states.
+
+TODO:
+- {viewport, scissor rect, blend color, stencil reference}:
+ Test rendering result with {various values}.
+ - Set the state in different ways to make sure it gets the correct value in the end: {
+ - state unset (= default)
+ - state explicitly set once to {default value, another value}
+ - persistence: [set, draw, draw] (fn should differentiate from [set, draw] + [draw])
+ - overwriting: [set(1), draw, set(2), draw] (fn should differentiate from [set(1), set(2), draw, draw])
+ - overwriting: [set(1), set(2), draw] (fn should differentiate from [set(1), draw] but not [set(2), draw])
+ - }
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/render/state_tracking.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/render/state_tracking.spec.ts
new file mode 100644
index 0000000000..049c7749b2
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/command_buffer/render/state_tracking.spec.ts
@@ -0,0 +1,624 @@
+export const description = `
+Ensure state is set correctly. Tries to stress state caching (setting different states multiple
+times in different orders) for setIndexBuffer and setVertexBuffer.
+Equivalent tests for setBindGroup and setPipeline are in programmable/state_tracking.spec.ts.
+Equivalent tests for viewport/scissor/blend/reference are in render/dynamic_state.spec.ts
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest, TextureTestMixin } from '../../../../gpu_test.js';
+import { TexelView } from '../../../../util/texture/texel_view.js';
+
+class VertexAndIndexStateTrackingTest extends TextureTestMixin(GPUTest) {
+ GetRenderPipelineForTest(arrayStride: number): GPURenderPipeline {
+ return this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: this.device.createShaderModule({
+ code: `
+ struct Inputs {
+ @location(0) vertexPosition : f32,
+ @location(1) vertexColor : vec4<f32>,
+ };
+ struct Outputs {
+ @builtin(position) position : vec4<f32>,
+ @location(0) color : vec4<f32>,
+ };
+ @vertex
+ fn main(input : Inputs)-> Outputs {
+ var outputs : Outputs;
+ outputs.position =
+ vec4<f32>(input.vertexPosition, 0.5, 0.0, 1.0);
+ outputs.color = input.vertexColor;
+ return outputs;
+ }`,
+ }),
+ entryPoint: 'main',
+ buffers: [
+ {
+ arrayStride,
+ attributes: [
+ {
+ format: 'float32',
+ offset: 0,
+ shaderLocation: 0,
+ },
+ {
+ format: 'unorm8x4',
+ offset: 4,
+ shaderLocation: 1,
+ },
+ ],
+ },
+ ],
+ },
+ fragment: {
+ module: this.device.createShaderModule({
+ code: `
+ struct Input {
+ @location(0) color : vec4<f32>
+ };
+ @fragment
+ fn main(input : Input) -> @location(0) vec4<f32> {
+ return input.color;
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ primitive: {
+ topology: 'point-list',
+ },
+ });
+ }
+
+ kVertexAttributeSize = 8;
+}
+
+export const g = makeTestGroup(VertexAndIndexStateTrackingTest);
+
+g.test('set_index_buffer_without_changing_buffer')
+ .desc(
+ `
+ Test that setting index buffer states (index format, offset, size) multiple times in different
+ orders still keeps the correctness of each draw call.
+`
+ )
+ .fn(t => {
+ // Initialize the index buffer with 5 uint16 indices (0, 1, 2, 3, 4).
+ const indexBuffer = t.makeBufferWithContents(
+ new Uint16Array([0, 1, 2, 3, 4]),
+ GPUBufferUsage.INDEX
+ );
+
+ // Initialize the vertex buffer with required vertex attributes (position: f32, color: f32x4)
+ // Note that the maximum index in the test is 0x10000.
+ const kVertexAttributesCount = 0x10000 + 1;
+ const vertexBuffer = t.device.createBuffer({
+ usage: GPUBufferUsage.VERTEX,
+ size: t.kVertexAttributeSize * kVertexAttributesCount,
+ mappedAtCreation: true,
+ });
+ t.trackForCleanup(vertexBuffer);
+ const vertexAttributes = vertexBuffer.getMappedRange();
+ const kPositions = [-0.8, -0.4, 0.0, 0.4, 0.8, -0.4];
+ const kColors = [
+ new Uint8Array([255, 0, 0, 255]),
+ new Uint8Array([255, 255, 255, 255]),
+ new Uint8Array([0, 0, 255, 255]),
+ new Uint8Array([255, 0, 255, 255]),
+ new Uint8Array([0, 255, 255, 255]),
+ new Uint8Array([0, 255, 0, 255]),
+ ];
+ // Set vertex attributes at index {0..4} in Uint16.
+ // Note that the vertex attribute at index 1 will not be used.
+ for (let i = 0; i < kPositions.length - 1; ++i) {
+ const baseOffset = t.kVertexAttributeSize * i;
+ const vertexPosition = new Float32Array(vertexAttributes, baseOffset, 1);
+ vertexPosition[0] = kPositions[i];
+ const vertexColor = new Uint8Array(vertexAttributes, baseOffset + 4, 4);
+ vertexColor.set(kColors[i]);
+ }
+ // Set vertex attributes at index 0x10000.
+ const lastOffset = t.kVertexAttributeSize * (kVertexAttributesCount - 1);
+ const lastVertexPosition = new Float32Array(vertexAttributes, lastOffset, 1);
+ lastVertexPosition[0] = kPositions[kPositions.length - 1];
+ const lastVertexColor = new Uint8Array(vertexAttributes, lastOffset + 4, 4);
+ lastVertexColor.set(kColors[kColors.length - 1]);
+
+ vertexBuffer.unmap();
+
+ const renderPipeline = t.GetRenderPipelineForTest(t.kVertexAttributeSize);
+
+ const outputTextureSize = [kPositions.length - 1, 1, 1];
+ const outputTexture = t.device.createTexture({
+ format: 'rgba8unorm',
+ size: outputTextureSize,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: outputTexture.createView(),
+ clearValue: [0, 0, 0, 1],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPass.setPipeline(renderPipeline);
+ renderPass.setVertexBuffer(0, vertexBuffer);
+
+ // 1st draw: indexFormat = 'uint32', offset = 0, size = 4 (index value: 0x10000)
+ renderPass.setIndexBuffer(indexBuffer, 'uint32', 0, 4);
+ renderPass.drawIndexed(1);
+
+ // 2nd draw: indexFormat = 'uint16', offset = 0, size = 4 (index value: 0)
+ renderPass.setIndexBuffer(indexBuffer, 'uint16', 0, 4);
+ renderPass.drawIndexed(1);
+
+ // 3rd draw: indexFormat = 'uint16', offset = 4, size = 2 (index value: 2)
+ renderPass.setIndexBuffer(indexBuffer, 'uint16', 0, 2);
+ renderPass.setIndexBuffer(indexBuffer, 'uint16', 4, 2);
+ renderPass.drawIndexed(1);
+
+ // 4th draw: indexformat = 'uint16', offset = 6, size = 4 (index values: 3, 4)
+ renderPass.setIndexBuffer(indexBuffer, 'uint16', 6, 2);
+ renderPass.setIndexBuffer(indexBuffer, 'uint16', 6, 4);
+ renderPass.drawIndexed(2);
+
+ renderPass.end();
+ t.queue.submit([encoder.finish()]);
+
+ t.expectTexelViewComparisonIsOkInTexture(
+ { texture: outputTexture },
+ TexelView.fromTexelsAsBytes('rgba8unorm', coord =>
+ coord.x === 1 ? kColors[kPositions.length - 1] : kColors[coord.x]
+ ),
+ outputTextureSize
+ );
+ });
+
+g.test('set_vertex_buffer_without_changing_buffer')
+ .desc(
+ `
+ Test that setting vertex buffer states (offset, size) multiple times in different orders still
+ keeps the correctness of each draw call.
+ - Tries several different sequences of setVertexBuffer+draw commands, each of which draws vertices
+ in all 4 output pixels, and check they were drawn correctly.
+`
+ )
+ .fn(t => {
+ const kPositions = [-0.875, -0.625, -0.375, -0.125, 0.125, 0.375, 0.625, 0.875];
+ const kColors = [
+ new Uint8Array([255, 0, 0, 255]),
+ new Uint8Array([0, 255, 0, 255]),
+ new Uint8Array([0, 0, 255, 255]),
+ new Uint8Array([51, 0, 0, 255]),
+ new Uint8Array([0, 51, 0, 255]),
+ new Uint8Array([0, 0, 51, 255]),
+ new Uint8Array([255, 0, 255, 255]),
+ new Uint8Array([255, 255, 0, 255]),
+ ];
+
+ // Initialize the vertex buffer with required vertex attributes (position: f32, color: f32x4)
+ const kVertexAttributesCount = 8;
+ const vertexBuffer = t.device.createBuffer({
+ usage: GPUBufferUsage.VERTEX,
+ size: t.kVertexAttributeSize * kVertexAttributesCount,
+ mappedAtCreation: true,
+ });
+ t.trackForCleanup(vertexBuffer);
+ const vertexAttributes = vertexBuffer.getMappedRange();
+ for (let i = 0; i < kPositions.length; ++i) {
+ const baseOffset = t.kVertexAttributeSize * i;
+ const vertexPosition = new Float32Array(vertexAttributes, baseOffset, 1);
+ vertexPosition[0] = kPositions[i];
+ const vertexColor = new Uint8Array(vertexAttributes, baseOffset + 4, 4);
+ vertexColor.set(kColors[i]);
+ }
+
+ vertexBuffer.unmap();
+
+ const renderPipeline = t.GetRenderPipelineForTest(t.kVertexAttributeSize);
+
+ const outputTextureSize = [kPositions.length, 1, 1];
+ const outputTexture = t.device.createTexture({
+ format: 'rgba8unorm',
+ size: outputTextureSize,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: outputTexture.createView(),
+ clearValue: [0, 0, 0, 1],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPass.setPipeline(renderPipeline);
+
+ // Change 'size' in setVertexBuffer()
+ renderPass.setVertexBuffer(0, vertexBuffer, 0, t.kVertexAttributeSize);
+ renderPass.setVertexBuffer(0, vertexBuffer, 0, t.kVertexAttributeSize * 2);
+ renderPass.draw(2);
+
+ // Change 'offset' in setVertexBuffer()
+ renderPass.setVertexBuffer(
+ 0,
+ vertexBuffer,
+ t.kVertexAttributeSize * 2,
+ t.kVertexAttributeSize * 2
+ );
+ renderPass.draw(2);
+
+ // Change 'size' again in setVertexBuffer()
+ renderPass.setVertexBuffer(
+ 0,
+ vertexBuffer,
+ t.kVertexAttributeSize * 4,
+ t.kVertexAttributeSize * 2
+ );
+ renderPass.setVertexBuffer(
+ 0,
+ vertexBuffer,
+ t.kVertexAttributeSize * 4,
+ t.kVertexAttributeSize * 4
+ );
+ renderPass.draw(4);
+
+ renderPass.end();
+ t.queue.submit([encoder.finish()]);
+
+ t.expectTexelViewComparisonIsOkInTexture(
+ { texture: outputTexture },
+ TexelView.fromTexelsAsBytes('rgba8unorm', coord => kColors[coord.x]),
+ outputTextureSize
+ );
+ });
+
+g.test('change_pipeline_before_and_after_vertex_buffer')
+ .desc(
+ `
+ Test that changing the pipeline {before,after} the vertex buffers still keeps the correctness of
+ each draw call (In D3D12, the vertex buffer stride is part of SetVertexBuffer instead of the
+ pipeline.)
+`
+ )
+ .fn(t => {
+ const kPositions = [-0.8, -0.4, 0.0, 0.4, 0.8, 0.9];
+ const kColors = [
+ new Uint8Array([255, 0, 0, 255]),
+ new Uint8Array([255, 255, 255, 255]),
+ new Uint8Array([0, 255, 0, 255]),
+ new Uint8Array([0, 0, 255, 255]),
+ new Uint8Array([255, 0, 255, 255]),
+ new Uint8Array([0, 255, 255, 255]),
+ ];
+
+ // Initialize the vertex buffer with required vertex attributes (position: f32, color: f32x4)
+ const vertexBuffer = t.device.createBuffer({
+ usage: GPUBufferUsage.VERTEX,
+ size: t.kVertexAttributeSize * kPositions.length,
+ mappedAtCreation: true,
+ });
+ t.trackForCleanup(vertexBuffer);
+ // Note that kPositions[1], kColors[1], kPositions[5] and kColors[5] are not used.
+ const vertexAttributes = vertexBuffer.getMappedRange();
+ for (let i = 0; i < kPositions.length; ++i) {
+ const baseOffset = t.kVertexAttributeSize * i;
+ const vertexPosition = new Float32Array(vertexAttributes, baseOffset, 1);
+ vertexPosition[0] = kPositions[i];
+ const vertexColor = new Uint8Array(vertexAttributes, baseOffset + 4, 4);
+ vertexColor.set(kColors[i]);
+ }
+ vertexBuffer.unmap();
+
+ // Create two render pipelines with different vertex attribute strides
+ const renderPipeline1 = t.GetRenderPipelineForTest(t.kVertexAttributeSize);
+ const renderPipeline2 = t.GetRenderPipelineForTest(t.kVertexAttributeSize * 2);
+
+ const kPointsCount = kPositions.length - 1;
+ const outputTextureSize = [kPointsCount, 1, 1];
+ const outputTexture = t.device.createTexture({
+ format: 'rgba8unorm',
+ size: outputTextureSize,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: outputTexture.createView(),
+ clearValue: [0, 0, 0, 1],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+
+ // Update render pipeline before setVertexBuffer. The applied vertex attribute stride should be
+ // 2 * kVertexAttributeSize.
+ renderPass.setPipeline(renderPipeline1);
+ renderPass.setPipeline(renderPipeline2);
+ renderPass.setVertexBuffer(0, vertexBuffer);
+ renderPass.draw(2);
+
+ // Update render pipeline after setVertexBuffer. The applied vertex attribute stride should be
+ // kVertexAttributeSize.
+ renderPass.setVertexBuffer(0, vertexBuffer, 3 * t.kVertexAttributeSize);
+ renderPass.setPipeline(renderPipeline1);
+ renderPass.draw(2);
+
+ renderPass.end();
+
+ t.queue.submit([encoder.finish()]);
+
+ t.expectTexelViewComparisonIsOkInTexture(
+ { texture: outputTexture },
+ TexelView.fromTexelsAsBytes('rgba8unorm', coord =>
+ coord.x === 1 ? new Uint8Array([0, 0, 0, 255]) : kColors[coord.x]
+ ),
+ outputTextureSize
+ );
+ });
+
+g.test('set_vertex_buffer_but_not_used_in_draw')
+ .desc(
+ `
+ Test that drawing after having set vertex buffer slots not used by the pipeline works correctly.
+ - In the test there are 2 draw calls in the render pass. The first draw call uses 2 vertex buffers
+ (position and color), and the second draw call only uses 1 vertex buffer (for color, the vertex
+ position is defined as constant values in the vertex shader). The test verifies if both of these
+ two draw calls work correctly.
+ `
+ )
+ .fn(t => {
+ const kPositions = new Float32Array([-0.75, -0.25]);
+ const kColors = new Uint8Array([255, 0, 0, 255, 0, 255, 0, 255]);
+
+ // Initialize the vertex buffers with required vertex attributes (position: f32, color: f32x4)
+ const kAttributeStride = 4;
+ const positionBuffer = t.makeBufferWithContents(kPositions, GPUBufferUsage.VERTEX);
+ const colorBuffer = t.makeBufferWithContents(kColors, GPUBufferUsage.VERTEX);
+
+ const fragmentState: GPUFragmentState = {
+ module: t.device.createShaderModule({
+ code: `
+ struct Input {
+ @location(0) color : vec4<f32>
+ };
+ @fragment
+ fn main(input : Input) -> @location(0) vec4<f32> {
+ return input.color;
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ };
+
+ // Create renderPipeline1 that uses both positionBuffer and colorBuffer.
+ const renderPipeline1 = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ struct Inputs {
+ @location(0) vertexColor : vec4<f32>,
+ @location(1) vertexPosition : f32,
+ };
+ struct Outputs {
+ @builtin(position) position : vec4<f32>,
+ @location(0) color : vec4<f32>,
+ };
+ @vertex
+ fn main(input : Inputs)-> Outputs {
+ var outputs : Outputs;
+ outputs.position =
+ vec4<f32>(input.vertexPosition, 0.5, 0.0, 1.0);
+ outputs.color = input.vertexColor;
+ return outputs;
+ }`,
+ }),
+ entryPoint: 'main',
+ buffers: [
+ {
+ arrayStride: kAttributeStride,
+ attributes: [
+ {
+ format: 'unorm8x4',
+ offset: 0,
+ shaderLocation: 0,
+ },
+ ],
+ },
+ {
+ arrayStride: kAttributeStride,
+ attributes: [
+ {
+ format: 'float32',
+ offset: 0,
+ shaderLocation: 1,
+ },
+ ],
+ },
+ ],
+ },
+ fragment: fragmentState,
+ primitive: {
+ topology: 'point-list',
+ },
+ });
+
+ const renderPipeline2 = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ struct Inputs {
+ @builtin(vertex_index) vertexIndex : u32,
+ @location(0) vertexColor : vec4<f32>,
+ };
+ struct Outputs {
+ @builtin(position) position : vec4<f32>,
+ @location(0) color : vec4<f32>,
+ };
+ @vertex
+ fn main(input : Inputs)-> Outputs {
+ var kPositions = array<f32, 2> (0.25, 0.75);
+ var outputs : Outputs;
+ outputs.position =
+ vec4(kPositions[input.vertexIndex], 0.5, 0.0, 1.0);
+ outputs.color = input.vertexColor;
+ return outputs;
+ }`,
+ }),
+ entryPoint: 'main',
+ buffers: [
+ {
+ arrayStride: kAttributeStride,
+ attributes: [
+ {
+ format: 'unorm8x4',
+ offset: 0,
+ shaderLocation: 0,
+ },
+ ],
+ },
+ ],
+ },
+ fragment: fragmentState,
+ primitive: {
+ topology: 'point-list',
+ },
+ });
+
+ const kPointsCount = 4;
+ const outputTextureSize = [kPointsCount, 1, 1];
+ const outputTexture = t.device.createTexture({
+ format: 'rgba8unorm',
+ size: [kPointsCount, 1, 1],
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: outputTexture.createView(),
+ clearValue: [0, 0, 0, 1],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+
+ renderPass.setVertexBuffer(0, colorBuffer);
+ renderPass.setVertexBuffer(1, positionBuffer);
+ renderPass.setPipeline(renderPipeline1);
+ renderPass.draw(2);
+
+ renderPass.setPipeline(renderPipeline2);
+ renderPass.draw(2);
+
+ renderPass.end();
+
+ t.queue.submit([encoder.finish()]);
+
+ const kExpectedColors = [
+ kColors.subarray(0, 4),
+ kColors.subarray(4),
+ kColors.subarray(0, 4),
+ kColors.subarray(4),
+ ];
+
+ t.expectTexelViewComparisonIsOkInTexture(
+ { texture: outputTexture },
+ TexelView.fromTexelsAsBytes('rgba8unorm', coord => kExpectedColors[coord.x]),
+ outputTextureSize
+ );
+ });
+
+g.test('set_index_buffer_before_non_indexed_draw')
+ .desc(
+ `
+ Test that setting / not setting the index buffer does not impact a non-indexed draw.
+ `
+ )
+ .fn(t => {
+ const kPositions = [-0.75, -0.25, 0.25, 0.75];
+ const kColors = [
+ new Uint8Array([255, 0, 0, 255]),
+ new Uint8Array([0, 255, 0, 255]),
+ new Uint8Array([0, 0, 255, 255]),
+ new Uint8Array([255, 0, 255, 255]),
+ ];
+
+ // Initialize the vertex buffer with required vertex attributes (position: f32, color: f32x4)
+ const vertexBuffer = t.device.createBuffer({
+ usage: GPUBufferUsage.VERTEX,
+ size: t.kVertexAttributeSize * kPositions.length,
+ mappedAtCreation: true,
+ });
+ t.trackForCleanup(vertexBuffer);
+ const vertexAttributes = vertexBuffer.getMappedRange();
+ for (let i = 0; i < kPositions.length; ++i) {
+ const baseOffset = t.kVertexAttributeSize * i;
+ const vertexPosition = new Float32Array(vertexAttributes, baseOffset, 1);
+ vertexPosition[0] = kPositions[i];
+ const vertexColor = new Uint8Array(vertexAttributes, baseOffset + 4, 4);
+ vertexColor.set(kColors[i]);
+ }
+ vertexBuffer.unmap();
+
+ // Initialize the index buffer with 2 uint16 indices (2, 3).
+ const indexBuffer = t.makeBufferWithContents(new Uint16Array([2, 3]), GPUBufferUsage.INDEX);
+
+ const renderPipeline = t.GetRenderPipelineForTest(t.kVertexAttributeSize);
+
+ const kPointsCount = 4;
+ const outputTextureSize = [kPointsCount, 1, 1];
+ const outputTexture = t.device.createTexture({
+ format: 'rgba8unorm',
+ size: [kPointsCount, 1, 1],
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: outputTexture.createView(),
+ clearValue: [0, 0, 0, 1],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+
+ // The first draw call is an indexed one (the third and fourth color are involved)
+ renderPass.setVertexBuffer(0, vertexBuffer);
+ renderPass.setIndexBuffer(indexBuffer, 'uint16');
+ renderPass.setPipeline(renderPipeline);
+ renderPass.drawIndexed(2);
+
+ // The second draw call is a non-indexed one (the first and second color are involved)
+ renderPass.draw(2);
+
+ renderPass.end();
+
+ t.queue.submit([encoder.finish()]);
+
+ t.expectTexelViewComparisonIsOkInTexture(
+ { texture: outputTexture },
+ TexelView.fromTexelsAsBytes('rgba8unorm', coord => kColors[coord.x]),
+ outputTextureSize
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/compute/basic.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/compute/basic.spec.ts
new file mode 100644
index 0000000000..5d73a34199
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/compute/basic.spec.ts
@@ -0,0 +1,162 @@
+export const description = `
+Basic command buffer compute tests.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+import { checkElementsEqualGenerated } from '../../../util/check_contents.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('memcpy').fn(t => {
+ const data = new Uint32Array([0x01020304]);
+
+ const src = t.makeBufferWithContents(data, GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE);
+
+ const dst = t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
+ });
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+ struct Data {
+ value : u32
+ };
+
+ @group(0) @binding(0) var<storage, read> src : Data;
+ @group(0) @binding(1) var<storage, read_write> dst : Data;
+
+ @compute @workgroup_size(1) fn main() {
+ dst.value = src.value;
+ return;
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ });
+
+ const bg = t.device.createBindGroup({
+ entries: [
+ { binding: 0, resource: { buffer: src, offset: 0, size: 4 } },
+ { binding: 1, resource: { buffer: dst, offset: 0, size: 4 } },
+ ],
+ layout: pipeline.getBindGroupLayout(0),
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bg);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+
+ t.expectGPUBufferValuesEqual(dst, data);
+});
+
+g.test('large_dispatch')
+ .desc(`Test reasonably-sized large dispatches (see also: stress tests).`)
+ .params(u =>
+ u
+ // Reasonably-sized powers of two, and some stranger larger sizes.
+ .combine('dispatchSize', [256, 2048, 315, 628, 2179, 'maximum'] as const)
+ // Test some reasonable workgroup sizes.
+ .beginSubcases()
+ // 0 == x axis; 1 == y axis; 2 == z axis.
+ .combine('largeDimension', [0, 1, 2] as const)
+ .expand('workgroupSize', () => [1, 2, 8, 32, 'maximum'] as const)
+ )
+ .fn(t => {
+ // The output storage buffer is filled with this value.
+ const val = 0x01020304;
+ const badVal = 0xbaadf00d;
+
+ const kMaxComputeWorkgroupSize = [
+ t.device.limits.maxComputeWorkgroupSizeX,
+ t.device.limits.maxComputeWorkgroupSizeY,
+ t.device.limits.maxComputeWorkgroupSizeZ,
+ ];
+
+ const wgSize =
+ t.params.workgroupSize === 'maximum'
+ ? kMaxComputeWorkgroupSize[t.params.largeDimension]
+ : t.params.workgroupSize;
+ const dispatchSize =
+ t.params.dispatchSize === 'maximum'
+ ? t.device.limits.maxComputeWorkgroupsPerDimension
+ : t.params.dispatchSize;
+ const bufferLength = dispatchSize * wgSize;
+ const bufferByteSize = Uint32Array.BYTES_PER_ELEMENT * bufferLength;
+ const dst = t.device.createBuffer({
+ size: bufferByteSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
+ });
+
+ // Only use one large dimension and workgroup size in the dispatch
+ // call to keep the size of the test reasonable.
+ const dims = [1, 1, 1];
+ dims[t.params.largeDimension] = dispatchSize;
+ const wgSizes = [1, 1, 1];
+ wgSizes[t.params.largeDimension] = wgSize;
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+ struct OutputBuffer {
+ value : array<u32>
+ };
+
+ @group(0) @binding(0) var<storage, read_write> dst : OutputBuffer;
+
+ @compute @workgroup_size(${wgSizes[0]}, ${wgSizes[1]}, ${wgSizes[2]})
+ fn main(
+ @builtin(global_invocation_id) GlobalInvocationID : vec3<u32>
+ ) {
+ var xExtent : u32 = ${dims[0]}u * ${wgSizes[0]}u;
+ var yExtent : u32 = ${dims[1]}u * ${wgSizes[1]}u;
+ var zExtent : u32 = ${dims[2]}u * ${wgSizes[2]}u;
+ var index : u32 = (
+ GlobalInvocationID.z * xExtent * yExtent +
+ GlobalInvocationID.y * xExtent +
+ GlobalInvocationID.x);
+ var val : u32 = ${val}u;
+ // Trivial error checking in the indexing and invocation.
+ if (GlobalInvocationID.x > xExtent ||
+ GlobalInvocationID.y > yExtent ||
+ GlobalInvocationID.z > zExtent) {
+ val = ${badVal}u;
+ }
+ dst.value[index] = val;
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ });
+
+ const bg = t.device.createBindGroup({
+ entries: [{ binding: 0, resource: { buffer: dst, offset: 0, size: bufferByteSize } }],
+ layout: pipeline.getBindGroupLayout(0),
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bg);
+ pass.dispatchWorkgroups(dims[0], dims[1], dims[2]);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+
+ t.expectGPUBufferValuesPassCheck(dst, a => checkElementsEqualGenerated(a, _i => val), {
+ type: Uint32Array,
+ typedLength: bufferLength,
+ });
+
+ dst.destroy();
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/compute_pipeline/entry_point_name.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/compute_pipeline/entry_point_name.spec.ts
new file mode 100644
index 0000000000..a62031c3fd
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/compute_pipeline/entry_point_name.spec.ts
@@ -0,0 +1,12 @@
+export const description = `
+TODO:
+- Test some weird but valid values for entry point name (both module and pipeline creation
+ should succeed).
+- Test using each of many entry points in the module (should succeed).
+- Test using an entry point with the wrong stage (should fail).
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/compute_pipeline/overrides.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/compute_pipeline/overrides.spec.ts
new file mode 100644
index 0000000000..7c6ecf4192
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/compute_pipeline/overrides.spec.ts
@@ -0,0 +1,503 @@
+export const description = `
+Compute pipeline using overridable constants test.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { range } from '../../../../common/util/util.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+class F extends GPUTest {
+ async ExpectShaderOutputWithConstants(
+ isAsync: boolean,
+ expected: Uint32Array | Float32Array,
+ constants: Record<string, GPUPipelineConstantValue>,
+ code: string
+ ) {
+ const dst = this.device.createBuffer({
+ size: expected.byteLength,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
+ });
+
+ const descriptor: GPUComputePipelineDescriptor = {
+ layout: 'auto',
+ compute: {
+ module: this.device.createShaderModule({
+ code,
+ }),
+ entryPoint: 'main',
+ constants,
+ },
+ };
+
+ const promise = isAsync
+ ? this.device.createComputePipelineAsync(descriptor)
+ : Promise.resolve(this.device.createComputePipeline(descriptor));
+
+ const pipeline = await promise;
+ const bindGroup = this.device.createBindGroup({
+ entries: [{ binding: 0, resource: { buffer: dst, offset: 0, size: expected.byteLength } }],
+ layout: pipeline.getBindGroupLayout(0),
+ });
+
+ const encoder = this.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ this.device.queue.submit([encoder.finish()]);
+
+ this.expectGPUBufferValuesEqual(dst, expected);
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('basic')
+ .desc(
+ `Test that either correct constants override values or default values when no constants override value are provided at pipeline creation time are used as the output to the storage buffer.`
+ )
+ .params(u => u.combine('isAsync', [true, false]))
+ .fn(async t => {
+ const count = 11;
+ await t.ExpectShaderOutputWithConstants(
+ t.params.isAsync,
+ new Uint32Array(range(count, i => i)),
+ {
+ c0: 0,
+ c1: 1,
+ c2: 2,
+ c3: 3,
+ // c4 is using default value
+ c5: 5,
+ c6: 6,
+ // c7 is using default value
+ c8: 8,
+ c9: 9,
+ // c10 is using default value
+ },
+ `
+ override c0: bool; // type: bool
+ override c1: bool = false; // default override
+ override c2: f32; // type: float32
+ override c3: f32 = 0.0; // default override
+ override c4: f32 = 4.0; // default
+ override c5: i32; // type: int32
+ override c6: i32 = 0; // default override
+ override c7: i32 = 7; // default
+ override c8: u32; // type: uint32
+ override c9: u32 = 0u; // default override
+ override c10: u32 = 10u; // default
+
+ struct Buf {
+ data : array<u32, ${count}>
+ }
+
+ @group(0) @binding(0) var<storage, read_write> buf : Buf;
+
+ @compute @workgroup_size(1) fn main() {
+ buf.data[0] = u32(c0);
+ buf.data[1] = u32(c1);
+ buf.data[2] = u32(c2);
+ buf.data[3] = u32(c3);
+ buf.data[4] = u32(c4);
+ buf.data[5] = u32(c5);
+ buf.data[6] = u32(c6);
+ buf.data[7] = u32(c7);
+ buf.data[8] = u32(c8);
+ buf.data[9] = u32(c9);
+ buf.data[10] = u32(c10);
+ }
+ `
+ );
+ });
+
+g.test('numeric_id')
+ .desc(
+ `Test that correct values are used as output to the storage buffer for constants specified with numeric id instead of their names.`
+ )
+ .params(u => u.combine('isAsync', [true, false]))
+ .fn(async t => {
+ await t.ExpectShaderOutputWithConstants(
+ t.params.isAsync,
+ new Uint32Array([1, 2, 3]),
+ {
+ 1001: 1,
+ 1: 2,
+ // 1003 is using default value
+ },
+ `
+ @id(1001) override c1: u32; // some big numeric id
+ @id(1) override c2: u32 = 0u; // id == 1 might collide with some generated constant id
+ @id(1003) override c3: u32 = 3u; // default
+
+ struct Buf {
+ data : array<u32, 3>
+ }
+
+ @group(0) @binding(0) var<storage, read_write> buf : Buf;
+
+ @compute @workgroup_size(1) fn main() {
+ buf.data[0] = c1;
+ buf.data[1] = c2;
+ buf.data[2] = c3;
+ }
+ `
+ );
+ });
+
+g.test('precision')
+ .desc(
+ `Test that float number precision is preserved for constants as they are used for compute shader output of the storage buffer.`
+ )
+ .params(u => u.combine('isAsync', [true, false]))
+ .fn(async t => {
+ const c1 = 3.14159;
+ const c2 = 3.141592653589793;
+ await t.ExpectShaderOutputWithConstants(
+ t.params.isAsync,
+ // These values will get rounded to f32 and createComputePipeline, so the values coming out from the shader won't be the exact same one as shown here.
+ new Float32Array([c1, c2]),
+ {
+ c1,
+ c2,
+ },
+ `
+ override c1: f32;
+ override c2: f32;
+
+ struct Buf {
+ data : array<f32, 2>
+ }
+
+ @group(0) @binding(0) var<storage, read_write> buf : Buf;
+
+ @compute @workgroup_size(1) fn main() {
+ buf.data[0] = c1;
+ buf.data[1] = c2;
+ }
+ `
+ );
+ });
+
+g.test('workgroup_size')
+ .desc(
+ `Test that constants can be used as workgroup size correctly, the compute shader should write the max local invocation id to the storage buffer which is equal to the workgroup size dimension given by the constant.`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combine('type', ['u32', 'i32'])
+ .combine('size', [3, 16, 64])
+ .combine('v', ['x', 'y', 'z'])
+ )
+ .fn(async t => {
+ const { isAsync, type, size, v } = t.params;
+ const workgroup_size_str = v === 'x' ? 'd' : v === 'y' ? '1, d' : '1, 1, d';
+ await t.ExpectShaderOutputWithConstants(
+ isAsync,
+ new Uint32Array([size]),
+ {
+ d: size,
+ },
+ `
+ override d: ${type};
+
+ struct Buf {
+ data : array<u32, 1>
+ }
+
+ @group(0) @binding(0) var<storage, read_write> buf : Buf;
+
+ @compute @workgroup_size(${workgroup_size_str}) fn main(
+ @builtin(local_invocation_id) local_invocation_id : vec3<u32>
+ ) {
+ if (local_invocation_id.${v} >= u32(d - 1)) {
+ buf.data[0] = local_invocation_id.${v} + 1;
+ }
+ }
+ `
+ );
+ });
+
+g.test('shared_shader_module')
+ .desc(
+ `Test that when the same shader module is shared by different pipelines, the correct constant values are used as output to the storage buffer. The constant value should not affect other pipeline sharing the same shader module.`
+ )
+ .params(u => u.combine('isAsync', [true, false]))
+ .fn(async t => {
+ const module = t.device.createShaderModule({
+ code: `
+ override a: u32;
+
+ struct Buf {
+ data : array<u32, 1>
+ }
+
+ @group(0) @binding(0) var<storage, read_write> buf : Buf;
+
+ @compute @workgroup_size(1) fn main() {
+ buf.data[0] = a;
+ }`,
+ });
+
+ const expects = [new Uint32Array([1]), new Uint32Array([2])];
+ const buffers = [
+ t.device.createBuffer({
+ size: Uint32Array.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
+ }),
+ t.device.createBuffer({
+ size: Uint32Array.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
+ }),
+ ];
+
+ const descriptors: GPUComputePipelineDescriptor[] = [
+ {
+ layout: 'auto',
+ compute: {
+ module,
+ entryPoint: 'main',
+ constants: {
+ a: 1,
+ },
+ },
+ },
+ {
+ layout: 'auto',
+ compute: {
+ module,
+ entryPoint: 'main',
+ constants: {
+ a: 2,
+ },
+ },
+ },
+ ];
+
+ const promises = t.params.isAsync
+ ? Promise.all([
+ t.device.createComputePipelineAsync(descriptors[0]),
+ t.device.createComputePipelineAsync(descriptors[1]),
+ ])
+ : Promise.resolve([
+ t.device.createComputePipeline(descriptors[0]),
+ t.device.createComputePipeline(descriptors[1]),
+ ]);
+
+ const pipelines = await promises;
+ const bindGroups = [
+ t.device.createBindGroup({
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer: buffers[0], offset: 0, size: Uint32Array.BYTES_PER_ELEMENT },
+ },
+ ],
+ layout: pipelines[0].getBindGroupLayout(0),
+ }),
+ t.device.createBindGroup({
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer: buffers[1], offset: 0, size: Uint32Array.BYTES_PER_ELEMENT },
+ },
+ ],
+ layout: pipelines[1].getBindGroupLayout(0),
+ }),
+ ];
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipelines[0]);
+ pass.setBindGroup(0, bindGroups[0]);
+ pass.dispatchWorkgroups(1);
+ pass.setPipeline(pipelines[1]);
+ pass.setBindGroup(0, bindGroups[1]);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+
+ t.expectGPUBufferValuesEqual(buffers[0], expects[0]);
+ t.expectGPUBufferValuesEqual(buffers[1], expects[1]);
+ });
+
+g.test('multi_entry_points')
+ .desc(
+ `Test that constants used for different entry points are used correctly as output to the storage buffer. They should have no impact for pipeline using entry points that doesn't reference them.`
+ )
+ .params(u => u.combine('isAsync', [true, false]))
+ .fn(async t => {
+ const module = t.device.createShaderModule({
+ code: `
+ override c1: u32;
+ override c2: u32;
+ override c3: u32;
+
+ struct Buf {
+ data : array<u32, 1>
+ }
+
+ @group(0) @binding(0) var<storage, read_write> buf : Buf;
+
+ @compute @workgroup_size(1) fn main1() {
+ buf.data[0] = c1;
+ }
+
+ @compute @workgroup_size(1) fn main2() {
+ buf.data[0] = c2;
+ }
+
+ @compute @workgroup_size(c3) fn main3() {
+ buf.data[0] = 3u;
+ }`,
+ });
+
+ const expects = [
+ new Uint32Array([1]),
+ new Uint32Array([2]),
+ new Uint32Array([3]),
+ new Uint32Array([4]),
+ ];
+
+ const buffers = [
+ t.device.createBuffer({
+ size: Uint32Array.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
+ }),
+ t.device.createBuffer({
+ size: Uint32Array.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
+ }),
+ t.device.createBuffer({
+ size: Uint32Array.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
+ }),
+ t.device.createBuffer({
+ size: Uint32Array.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
+ }),
+ ];
+
+ const descriptors: GPUComputePipelineDescriptor[] = [
+ {
+ layout: 'auto',
+ compute: {
+ module,
+ entryPoint: 'main1',
+ constants: {
+ c1: 1,
+ },
+ },
+ },
+ {
+ layout: 'auto',
+ compute: {
+ module,
+ entryPoint: 'main2',
+ constants: {
+ c2: 2,
+ },
+ },
+ },
+ {
+ layout: 'auto',
+ compute: {
+ module,
+ entryPoint: 'main3',
+ constants: {
+ // c3 is used as workgroup size
+ c3: 1,
+ },
+ },
+ },
+ {
+ layout: 'auto',
+ compute: {
+ module,
+ entryPoint: 'main1',
+ constants: {
+ // assign a different value to c1
+ c1: 4,
+ },
+ },
+ },
+ ];
+
+ const promises = t.params.isAsync
+ ? Promise.all([
+ t.device.createComputePipelineAsync(descriptors[0]),
+ t.device.createComputePipelineAsync(descriptors[1]),
+ t.device.createComputePipelineAsync(descriptors[2]),
+ t.device.createComputePipelineAsync(descriptors[3]),
+ ])
+ : Promise.resolve([
+ t.device.createComputePipeline(descriptors[0]),
+ t.device.createComputePipeline(descriptors[1]),
+ t.device.createComputePipeline(descriptors[2]),
+ t.device.createComputePipeline(descriptors[3]),
+ ]);
+
+ const pipelines = await promises;
+ const bindGroups = [
+ t.device.createBindGroup({
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer: buffers[0], offset: 0, size: Uint32Array.BYTES_PER_ELEMENT },
+ },
+ ],
+ layout: pipelines[0].getBindGroupLayout(0),
+ }),
+ t.device.createBindGroup({
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer: buffers[1], offset: 0, size: Uint32Array.BYTES_PER_ELEMENT },
+ },
+ ],
+ layout: pipelines[1].getBindGroupLayout(0),
+ }),
+ t.device.createBindGroup({
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer: buffers[2], offset: 0, size: Uint32Array.BYTES_PER_ELEMENT },
+ },
+ ],
+ layout: pipelines[2].getBindGroupLayout(0),
+ }),
+ t.device.createBindGroup({
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer: buffers[3], offset: 0, size: Uint32Array.BYTES_PER_ELEMENT },
+ },
+ ],
+ layout: pipelines[3].getBindGroupLayout(0),
+ }),
+ ];
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipelines[0]);
+ pass.setBindGroup(0, bindGroups[0]);
+ pass.dispatchWorkgroups(1);
+ pass.setPipeline(pipelines[1]);
+ pass.setBindGroup(0, bindGroups[1]);
+ pass.dispatchWorkgroups(1);
+ pass.setPipeline(pipelines[2]);
+ pass.setBindGroup(0, bindGroups[2]);
+ pass.dispatchWorkgroups(1);
+ pass.setPipeline(pipelines[3]);
+ pass.setBindGroup(0, bindGroups[3]);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+
+ t.expectGPUBufferValuesEqual(buffers[0], expects[0]);
+ t.expectGPUBufferValuesEqual(buffers[1], expects[1]);
+ t.expectGPUBufferValuesEqual(buffers[2], expects[2]);
+ t.expectGPUBufferValuesEqual(buffers[3], expects[3]);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/device/lost.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/device/lost.spec.ts
new file mode 100644
index 0000000000..e79c111d38
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/device/lost.spec.ts
@@ -0,0 +1,92 @@
+export const description = `
+Tests for GPUDevice.lost.
+`;
+
+import { Fixture } from '../../../../common/framework/fixture.js';
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { attemptGarbageCollection } from '../../../../common/util/collect_garbage.js';
+import { getGPU } from '../../../../common/util/navigator_gpu.js';
+import {
+ assert,
+ assertNotSettledWithinTime,
+ raceWithRejectOnTimeout,
+} from '../../../../common/util/util.js';
+
+class DeviceLostTests extends Fixture {
+ // Default timeout for waiting for device lost is 2 seconds.
+ readonly kDeviceLostTimeoutMS = 2000;
+
+ getDeviceLostWithTimeout(lost: Promise<GPUDeviceLostInfo>): Promise<GPUDeviceLostInfo> {
+ return raceWithRejectOnTimeout(lost, this.kDeviceLostTimeoutMS, 'device was not lost');
+ }
+
+ expectDeviceDestroyed(device: GPUDevice): void {
+ this.eventualAsyncExpectation(async niceStack => {
+ try {
+ const lost = await this.getDeviceLostWithTimeout(device.lost);
+ this.expect(lost.reason === 'destroyed', 'device was lost from destroy');
+ } catch (ex) {
+ niceStack.message = 'device was not lost';
+ this.rec.expectationFailed(niceStack);
+ }
+ });
+ }
+}
+
+export const g = makeTestGroup(DeviceLostTests);
+
+g.test('not_lost_on_gc')
+ .desc(
+ `'lost' is never resolved by GPUDevice being garbage collected (with attemptGarbageCollection).`
+ )
+ .fn(async t => {
+ // Wraps a lost promise object creation in a function scope so that the device has the best
+ // chance of being gone and ready for GC before trying to resolve the lost promise.
+ const { lost } = await (async () => {
+ const adapter = await getGPU(t.rec).requestAdapter();
+ assert(adapter !== null);
+ const lost = (await adapter.requestDevice()).lost;
+ return { lost };
+ })();
+ await assertNotSettledWithinTime(lost, t.kDeviceLostTimeoutMS, 'device was unexpectedly lost');
+
+ await attemptGarbageCollection();
+ });
+
+g.test('lost_on_destroy')
+ .desc(`'lost' is resolved, with reason='destroyed', on GPUDevice.destroy().`)
+ .fn(async t => {
+ const adapter = await getGPU(t.rec).requestAdapter();
+ assert(adapter !== null);
+ const device: GPUDevice = await adapter.requestDevice();
+ t.expectDeviceDestroyed(device);
+ device.destroy();
+ });
+
+g.test('same_object')
+ .desc(`'lost' provides the same Promise and GPUDeviceLostInfo objects each time it's accessed.`)
+ .fn(async t => {
+ const adapter = await getGPU(t.rec).requestAdapter();
+ assert(adapter !== null);
+ const device: GPUDevice = await adapter.requestDevice();
+
+ // The promises should be the same promise object.
+ const lostPromise1 = device.lost;
+ const lostPromise2 = device.lost;
+ t.expect(lostPromise1 === lostPromise2);
+
+ // Promise object should still be the same after destroy.
+ device.destroy();
+ const lostPromise3 = device.lost;
+ t.expect(lostPromise1 === lostPromise3);
+
+ // The results should also be the same result object.
+ const lost1 = await t.getDeviceLostWithTimeout(lostPromise1);
+ const lost2 = await t.getDeviceLostWithTimeout(lostPromise2);
+ const lost3 = await t.getDeviceLostWithTimeout(lostPromise3);
+ // Promise object should still be the same after we've been notified about device loss.
+ const lostPromise4 = device.lost;
+ t.expect(lostPromise1 === lostPromise4);
+ const lost4 = await t.getDeviceLostWithTimeout(lostPromise4);
+ t.expect(lost1 === lost2 && lost2 === lost3 && lost3 === lost4);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/labels.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/labels.spec.ts
new file mode 100644
index 0000000000..d7785f91dc
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/labels.spec.ts
@@ -0,0 +1,280 @@
+export const description = `
+Tests for object labels.
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { keysOf } from '../../../common/util/data_tables.js';
+import { getGPU } from '../../../common/util/navigator_gpu.js';
+import { GPUTest } from '../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+type TestFunction = (t: GPUTest, label: string) => Promise<void> | void;
+const kTestFunctions: { [name: string]: TestFunction } = {
+ createBuffer: (t: GPUTest, label: string) => {
+ const buffer = t.device.createBuffer({ size: 16, usage: GPUBufferUsage.COPY_DST, label });
+ t.expect(buffer.label === label);
+ buffer.destroy();
+ t.expect(buffer.label === label);
+ },
+
+ requestDevice: async (t: GPUTest, label: string) => {
+ const gpu = getGPU(t.rec);
+ const adapter = await gpu.requestAdapter();
+ t.expect(!!adapter);
+ const device = await adapter!.requestDevice({ label });
+ t.expect(!!device);
+ t.expect(device.label === label);
+ device.destroy();
+ t.expect(device.label === label);
+ },
+
+ createTexture: (t: GPUTest, label: string) => {
+ const texture = t.device.createTexture({
+ label,
+ size: [1, 1, 1],
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ t.expect(texture.label === label);
+ texture.destroy();
+ t.expect(texture.label === label);
+ },
+
+ createSampler: (t: GPUTest, label: string) => {
+ const sampler = t.device.createSampler({ label });
+ t.expect(sampler.label === label);
+ },
+
+ createBindGroupLayout: (t: GPUTest, label: string) => {
+ const bindGroupLayout = t.device.createBindGroupLayout({ label, entries: [] });
+ t.expect(bindGroupLayout.label === label);
+ },
+
+ createPipelineLayout: (t: GPUTest, label: string) => {
+ const pipelineLayout = t.device.createPipelineLayout({ label, bindGroupLayouts: [] });
+ t.expect(pipelineLayout.label === label);
+ },
+
+ createBindGroup: (t: GPUTest, label: string) => {
+ const layout = t.device.createBindGroupLayout({ entries: [] });
+ const bindGroup = t.device.createBindGroup({ label, layout, entries: [] });
+ t.expect(bindGroup.label === label);
+ },
+
+ createShaderModule: (t: GPUTest, label: string) => {
+ const shaderModule = t.device.createShaderModule({
+ label,
+ code: `
+ @vertex fn vs() -> @builtin(position) vec4f {
+ return vec4f(0, 0, 0, 1);
+ }
+ `,
+ });
+ t.expect(shaderModule.label === label);
+ },
+
+ createComputePipeline: (t: GPUTest, label: string) => {
+ const module = t.device.createShaderModule({
+ code: `
+ @compute @workgroup_size(1u) fn foo() {}
+ `,
+ });
+ const computePipeline = t.device.createComputePipeline({
+ label,
+ layout: 'auto',
+ compute: {
+ module,
+ entryPoint: 'foo',
+ },
+ });
+ t.expect(computePipeline.label === label);
+ },
+
+ createRenderPipeline: (t: GPUTest, label: string) => {
+ const module = t.device.createShaderModule({
+ code: `
+ @vertex fn foo() -> @builtin(position) vec4f {
+ return vec4f(0, 0, 0, 1);
+ }
+ `,
+ });
+ const renderPipeline = t.device.createRenderPipeline({
+ label,
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'foo',
+ },
+ });
+ t.expect(renderPipeline.label === label);
+ },
+
+ createComputePipelineAsync: async (t: GPUTest, label: string) => {
+ const module = t.device.createShaderModule({
+ code: `
+ @compute @workgroup_size(1u) fn foo() {}
+ `,
+ });
+ const computePipeline = await t.device.createComputePipelineAsync({
+ label,
+ layout: 'auto',
+ compute: {
+ module,
+ entryPoint: 'foo',
+ },
+ });
+ t.expect(computePipeline.label === label);
+ },
+
+ createRenderPipelineAsync: async (t: GPUTest, label: string) => {
+ const module = t.device.createShaderModule({
+ label,
+ code: `
+ @vertex fn foo() -> @builtin(position) vec4f {
+ return vec4f(0, 0, 0, 1);
+ }
+ `,
+ });
+ const renderPipeline = await t.device.createRenderPipelineAsync({
+ label,
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'foo',
+ },
+ });
+ t.expect(renderPipeline.label === label);
+ },
+
+ createCommandEncoder: (t: GPUTest, label: string) => {
+ const encoder = t.device.createCommandEncoder({ label });
+ t.expect(encoder.label === label);
+ },
+
+ createRenderBundleEncoder: (t: GPUTest, label: string) => {
+ const encoder = t.device.createRenderBundleEncoder({
+ label,
+ colorFormats: ['rgba8unorm'],
+ });
+ t.expect(encoder.label === label);
+ },
+
+ createQuerySet: (t: GPUTest, label: string) => {
+ const querySet = t.device.createQuerySet({
+ label,
+ type: 'occlusion',
+ count: 1,
+ });
+ t.expect(querySet.label === label);
+ querySet.destroy();
+ t.expect(querySet.label === label);
+ },
+
+ beginRenderPass: (t: GPUTest, label: string) => {
+ const texture = t.device.createTexture({
+ label,
+ size: [1, 1, 1],
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ const label2 = `${label}-2`;
+ const encoder = t.device.createCommandEncoder();
+ encoder.label = label2;
+ const renderPass = encoder.beginRenderPass({
+ label,
+ colorAttachments: [{ view: texture.createView(), loadOp: 'clear', storeOp: 'store' }],
+ });
+ t.expect(renderPass.label === label);
+ renderPass.end();
+ t.expect(renderPass.label === label);
+ encoder.finish();
+ t.expect(renderPass.label === label);
+ t.expect(encoder.label === label2);
+ texture.destroy();
+ },
+
+ beginComputePass: (t: GPUTest, label: string) => {
+ const label2 = `${label}-2`;
+ const encoder = t.device.createCommandEncoder();
+ encoder.label = label2;
+ const computePass = encoder.beginComputePass({ label });
+ t.expect(computePass.label === label);
+ computePass.end();
+ t.expect(computePass.label === label);
+ encoder.finish();
+ t.expect(computePass.label === label);
+ t.expect(encoder.label === label2);
+ },
+
+ finish: (t: GPUTest, label: string) => {
+ const encoder = t.device.createCommandEncoder();
+ const commandBuffer = encoder.finish({ label });
+ t.expect(commandBuffer.label === label);
+ },
+
+ createView: (t: GPUTest, label: string) => {
+ const texture = t.device.createTexture({
+ size: [1, 1, 1],
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ const view = texture.createView({ label });
+ t.expect(view.label === label);
+ texture.destroy();
+ t.expect(view.label === label);
+ },
+};
+
+g.test('object_has_descriptor_label')
+ .desc(
+ `
+ For every create function, the descriptor.label is carried over to the object.label.
+
+ TODO: test importExternalTexture
+ TODO: make a best effort and generating an error that is likely to use label. There's nothing to check for
+ but it may surface bugs related to unusual labels.
+ `
+ )
+ .params(u =>
+ u
+ .combine('name', keysOf(kTestFunctions))
+ .beginSubcases()
+ .combine('label', ['label', '\0', 'null\0in\0label', '🌞👆'])
+ )
+ .fn(async t => {
+ const { name, label } = t.params;
+ const result = kTestFunctions[name](t, label);
+ if (result instanceof Promise) {
+ await result;
+ }
+ });
+
+g.test('wrappers_do_not_share_labels')
+ .desc('test that different wrapper objects for the same GPU object do not share labels')
+ .fn(t => {
+ const module = t.device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var<uniform> pos: vec4f;
+ @vertex fn main() -> @builtin(position) vec4f {
+ return pos;
+ }
+ `,
+ });
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'main',
+ },
+ });
+ const layout1 = pipeline.getBindGroupLayout(0);
+ const layout2 = pipeline.getBindGroupLayout(0);
+ t.expect(layout1 !== layout2);
+
+ layout1.label = 'foo';
+ layout2.label = 'bar';
+
+ t.expect(layout1.label === 'foo');
+ t.expect(layout2.label === 'bar');
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_allocation/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_allocation/README.txt
new file mode 100644
index 0000000000..a8a8eb1d68
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_allocation/README.txt
@@ -0,0 +1,7 @@
+Try to stress memory allocators in the implementation and driver.
+
+TODO: plan and implement
+- Tests which (pseudo-randomly?) allocate a bunch of memory and then assert things about the memory
+ (it's not aliased, it's valid to read and write in various ways, accesses read/write the correct data)
+ - Possibly also with OOB accesses/robust buffer access?
+- Tests which are targeted against particular known implementation details
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/buffer/buffer_sync_test.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/buffer/buffer_sync_test.ts
new file mode 100644
index 0000000000..a9d4cb7d57
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/buffer/buffer_sync_test.ts
@@ -0,0 +1,942 @@
+import { assert, unreachable } from '../../../../../common/util/util.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { checkElementsEqualEither } from '../../../../util/check_contents.js';
+import { OperationContext, OperationContextHelper } from '../operation_context_helper.js';
+
+export const kAllWriteOps = ['storage', 'b2b-copy', 't2b-copy', 'write-buffer'] as const;
+
+export const kAllReadOps = [
+ 'input-vertex',
+ 'input-index',
+ 'input-indirect',
+ 'input-indirect-index',
+ 'input-indirect-dispatch',
+
+ 'constant-uniform',
+
+ 'storage-read',
+
+ 'b2b-copy',
+ 'b2t-copy',
+] as const;
+
+export type ReadOp = (typeof kAllReadOps)[number];
+export type WriteOp = (typeof kAllWriteOps)[number];
+
+export type Op = ReadOp | WriteOp;
+
+interface OpInfo {
+ readonly contexts: OperationContext[];
+}
+
+const kOpInfo: {
+ readonly [k in Op]: OpInfo;
+} = {
+ 'write-buffer': {
+ contexts: ['queue'],
+ },
+ 'b2t-copy': {
+ contexts: ['command-encoder'],
+ },
+ 'b2b-copy': {
+ contexts: ['command-encoder'],
+ },
+ 't2b-copy': {
+ contexts: ['command-encoder'],
+ },
+ storage: {
+ contexts: ['compute-pass-encoder', 'render-pass-encoder', 'render-bundle-encoder'],
+ },
+ 'storage-read': {
+ contexts: ['compute-pass-encoder', 'render-pass-encoder', 'render-bundle-encoder'],
+ },
+ 'input-vertex': {
+ contexts: ['render-pass-encoder', 'render-bundle-encoder'],
+ },
+ 'input-index': {
+ contexts: ['render-pass-encoder', 'render-bundle-encoder'],
+ },
+ 'input-indirect': {
+ contexts: ['render-pass-encoder', 'render-bundle-encoder'],
+ },
+ 'input-indirect-index': {
+ contexts: ['render-pass-encoder', 'render-bundle-encoder'],
+ },
+ 'input-indirect-dispatch': {
+ contexts: ['compute-pass-encoder'],
+ },
+ 'constant-uniform': {
+ contexts: ['render-pass-encoder', 'render-bundle-encoder'],
+ },
+};
+
+export function checkOpsValidForContext(
+ ops: [Op, Op],
+ context: [OperationContext, OperationContext]
+) {
+ const valid =
+ kOpInfo[ops[0]].contexts.includes(context[0]) && kOpInfo[ops[1]].contexts.includes(context[1]);
+ if (!valid) return false;
+
+ if (
+ context[0] === 'render-bundle-encoder' ||
+ context[0] === 'render-pass-encoder' ||
+ context[1] === 'render-bundle-encoder' ||
+ context[1] === 'render-pass-encoder'
+ ) {
+ // In a render pass, it is invalid to use a resource as both writable and another usage.
+ // Also, for storage+storage usage, the application is opting into racy behavior.
+ // The storage+storage case is also skipped as the results cannot be reliably tested.
+ const checkImpl = (op1: Op, op2: Op) => {
+ switch (op1) {
+ case 'storage':
+ switch (op2) {
+ case 'storage':
+ case 'storage-read':
+ case 'input-vertex':
+ case 'input-index':
+ case 'input-indirect':
+ case 'input-indirect-index':
+ case 'constant-uniform':
+ // Write+other, or racy.
+ return false;
+ case 'b2t-copy':
+ case 't2b-copy':
+ case 'b2b-copy':
+ case 'write-buffer':
+ // These don't occur in a render pass.
+ return true;
+ }
+ break;
+ case 'input-vertex':
+ case 'input-index':
+ case 'input-indirect':
+ case 'input-indirect-index':
+ case 'constant-uniform':
+ case 'b2t-copy':
+ case 't2b-copy':
+ case 'b2b-copy':
+ case 'write-buffer':
+ // These are not write usages, or don't occur in a render pass.
+ break;
+ }
+ return true;
+ };
+ return checkImpl(ops[0], ops[1]) && checkImpl(ops[1], ops[0]);
+ }
+ return true;
+}
+
+const kDummyVertexShader = `
+@vertex fn vert_main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.5, 0.5, 0.0, 1.0);
+}
+`;
+
+// Note: If it would be useful to have any of these helpers be separate from the fixture,
+// they can be refactored into standalone functions.
+export class BufferSyncTest extends GPUTest {
+ // Vertex and index buffers used in read render pass
+ vertexBuffer?: GPUBuffer;
+ indexBuffer?: GPUBuffer;
+
+ // Temp buffer and texture with values for buffer/texture copy write op
+ // There can be at most 2 write op
+ tmpValueBuffers: (GPUBuffer | undefined)[] = [undefined, undefined];
+ tmpValueTextures: (GPUTexture | undefined)[] = [undefined, undefined];
+
+ // These intermediate buffers/textures are created before any read/write op
+ // to avoid extra memory synchronization between ops introduced by await on buffer/texture creations.
+ // Create extra buffers/textures needed by write operation
+ async createIntermediateBuffersAndTexturesForWriteOp(
+ writeOp: WriteOp,
+ slot: number,
+ value: number
+ ) {
+ switch (writeOp) {
+ case 'b2b-copy':
+ this.tmpValueBuffers[slot] = await this.createBufferWithValue(value);
+ break;
+ case 't2b-copy':
+ this.tmpValueTextures[slot] = await this.createTextureWithValue(value);
+ break;
+ default:
+ break;
+ }
+ }
+
+ // Create extra buffers/textures needed by read operation
+ async createBuffersForReadOp(readOp: ReadOp, srcValue: number, opValue: number) {
+ // This helps create values that will be written into dst buffer by the readop
+ switch (readOp) {
+ case 'input-index':
+ // The index buffer will be the src buffer of the read op.
+ // The src value for readOp will be 0
+ // If the index buffer value is 0, the src value is written into the dst buffer.
+ // If the index buffer value is 1, the op value is written into the dst buffer.
+ this.vertexBuffer = await this.createBufferWithValues([srcValue, opValue]);
+ break;
+ case 'input-indirect':
+ // The indirect buffer for the draw cmd will be the src buffer of the read op.
+ // If the first value in the indirect buffer is 1, then the op value in vertex buffer will be written into dst buffer.
+ // If the first value in indirect buffer is 0, then nothing will be write into dst buffer.
+ this.vertexBuffer = await this.createBufferWithValues([opValue]);
+ break;
+ case 'input-indirect-index':
+ // The indirect buffer for draw indexed cmd will be the src buffer of the read op.
+ // If the first value in the indirect buffer is 1, then the opValue in vertex buffer will be written into dst buffer.
+ // If the first value in indirect buffer is 0, then nothing will be write into dst buffer.
+ this.vertexBuffer = await this.createBufferWithValues([opValue]);
+ this.indexBuffer = await this.createBufferWithValues([0]);
+ break;
+ default:
+ break;
+ }
+
+ let srcBuffer: GPUBuffer;
+ switch (readOp) {
+ case 'input-indirect':
+ // vertexCount = {0, 1}
+ // instanceCount = 1
+ // firstVertex = 0
+ // firstInstance = 0
+ srcBuffer = await this.createBufferWithValues([srcValue, 1, 0, 0]);
+ break;
+ case 'input-indirect-index':
+ // indexCount = {0, 1}
+ // instanceCount = 1
+ // firstIndex = 0
+ // baseVertex = 0
+ // firstInstance = 0
+ srcBuffer = await this.createBufferWithValues([srcValue, 1, 0, 0, 0]);
+ break;
+ case 'input-indirect-dispatch':
+ // workgroupCountX = {0, 1}
+ // workgroupCountY = 1
+ // workgroupCountZ = 1
+ srcBuffer = await this.createBufferWithValues([srcValue, 1, 1]);
+ break;
+ default:
+ srcBuffer = await this.createBufferWithValue(srcValue);
+ break;
+ }
+
+ const dstBuffer = this.trackForCleanup(
+ this.device.createBuffer({
+ size: Uint32Array.BYTES_PER_ELEMENT,
+ usage:
+ GPUBufferUsage.COPY_SRC |
+ GPUBufferUsage.COPY_DST |
+ GPUBufferUsage.STORAGE |
+ GPUBufferUsage.VERTEX |
+ GPUBufferUsage.INDEX |
+ GPUBufferUsage.INDIRECT |
+ GPUBufferUsage.UNIFORM,
+ })
+ );
+
+ return { srcBuffer, dstBuffer };
+ }
+
+ // Create a buffer with 1 uint32 element, and initialize it to a specified value.
+ async createBufferWithValue(initValue: number): Promise<GPUBuffer> {
+ const buffer = this.trackForCleanup(
+ this.device.createBuffer({
+ mappedAtCreation: true,
+ size: Uint32Array.BYTES_PER_ELEMENT,
+ usage:
+ GPUBufferUsage.COPY_SRC |
+ GPUBufferUsage.COPY_DST |
+ GPUBufferUsage.STORAGE |
+ GPUBufferUsage.VERTEX |
+ GPUBufferUsage.INDEX |
+ GPUBufferUsage.INDIRECT |
+ GPUBufferUsage.UNIFORM,
+ })
+ );
+ new Uint32Array(buffer.getMappedRange()).fill(initValue);
+ buffer.unmap();
+ await this.queue.onSubmittedWorkDone();
+ return buffer;
+ }
+
+ // Create a buffer, and initialize it to the specified values.
+ async createBufferWithValues(initValues: number[]): Promise<GPUBuffer> {
+ const buffer = this.trackForCleanup(
+ this.device.createBuffer({
+ mappedAtCreation: true,
+ size: Uint32Array.BYTES_PER_ELEMENT * initValues.length,
+ usage:
+ GPUBufferUsage.COPY_SRC |
+ GPUBufferUsage.COPY_DST |
+ GPUBufferUsage.STORAGE |
+ GPUBufferUsage.VERTEX |
+ GPUBufferUsage.INDEX |
+ GPUBufferUsage.INDIRECT |
+ GPUBufferUsage.UNIFORM,
+ })
+ );
+ const bufferView = new Uint32Array(buffer.getMappedRange());
+ bufferView.set(initValues);
+ buffer.unmap();
+ await this.queue.onSubmittedWorkDone();
+ return buffer;
+ }
+
+ // Create a 1x1 texture, and initialize it to a specified value for all elements.
+ async createTextureWithValue(initValue: number): Promise<GPUTexture> {
+ // This is not hot in profiles; optimize if this gets used more heavily.
+ const data = new Uint32Array(1).fill(initValue);
+ const texture = this.trackForCleanup(
+ this.device.createTexture({
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ format: 'r32uint',
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ })
+ );
+ this.device.queue.writeTexture(
+ { texture, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
+ data,
+ { offset: 0, bytesPerRow: 256, rowsPerImage: 1 },
+ { width: 1, height: 1, depthOrArrayLayers: 1 }
+ );
+ await this.queue.onSubmittedWorkDone();
+ return texture;
+ }
+
+ createBindGroup(
+ pipeline: GPURenderPipeline | GPUComputePipeline,
+ buffer: GPUBuffer
+ ): GPUBindGroup {
+ return this.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer } }],
+ });
+ }
+
+ // Create a compute pipeline and write given data into storage buffer.
+ createStorageWriteComputePipeline(value: number): GPUComputePipeline {
+ const wgslCompute = `
+ struct Data {
+ a : u32
+ };
+
+ @group(0) @binding(0) var<storage, read_write> data : Data;
+ @compute @workgroup_size(1) fn main() {
+ data.a = ${value}u;
+ }
+ `;
+
+ return this.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: this.device.createShaderModule({
+ code: wgslCompute,
+ }),
+ entryPoint: 'main',
+ },
+ });
+ }
+
+ createTrivialRenderPipeline(wgslShaders: { vertex: string; fragment: string }) {
+ return this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: this.device.createShaderModule({
+ code: wgslShaders.vertex,
+ }),
+ entryPoint: 'vert_main',
+ },
+ fragment: {
+ module: this.device.createShaderModule({
+ code: wgslShaders.fragment,
+ }),
+ entryPoint: 'frag_main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ primitive: { topology: 'point-list' },
+ });
+ }
+
+ // Create a render pipeline and write given data into storage buffer at fragment stage.
+ createStorageWriteRenderPipeline(value: number): GPURenderPipeline {
+ const wgslShaders = {
+ vertex: kDummyVertexShader,
+ fragment: `
+ struct Data {
+ a : u32
+ };
+
+ @group(0) @binding(0) var<storage, read_write> data : Data;
+ @fragment fn frag_main() -> @location(0) vec4<f32> {
+ data.a = ${value}u;
+ return vec4<f32>(); // result does't matter
+ }
+ `,
+ };
+
+ return this.createTrivialRenderPipeline(wgslShaders);
+ }
+
+ beginSimpleRenderPass(encoder: GPUCommandEncoder): GPURenderPassEncoder {
+ const view = this.trackForCleanup(
+ this.device.createTexture({
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ })
+ ).createView();
+ return encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view,
+ clearValue: { r: 0.0, g: 1.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ }
+
+ // Write buffer via draw call in render pass. Use bundle if needed.
+ encodeWriteAsStorageBufferInRenderPass(
+ renderer: GPURenderPassEncoder | GPURenderBundleEncoder,
+ buffer: GPUBuffer,
+ value: number
+ ) {
+ const pipeline = this.createStorageWriteRenderPipeline(value);
+ const bindGroup = this.createBindGroup(pipeline, buffer);
+
+ renderer.setBindGroup(0, bindGroup);
+ renderer.setPipeline(pipeline);
+ renderer.draw(1, 1, 0, 0);
+ }
+
+ // Write buffer via dispatch call in compute pass.
+ encodeWriteAsStorageBufferInComputePass(
+ pass: GPUComputePassEncoder,
+ buffer: GPUBuffer,
+ value: number
+ ) {
+ const pipeline = this.createStorageWriteComputePipeline(value);
+ const bindGroup = this.createBindGroup(pipeline, buffer);
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ }
+
+ // Write buffer via BufferToBuffer copy.
+ encodeWriteByB2BCopy(encoder: GPUCommandEncoder, buffer: GPUBuffer, slot: number) {
+ const tmpBuffer = this.tmpValueBuffers[slot];
+ assert(tmpBuffer !== undefined);
+ // The write operation via b2b copy is just encoded into command encoder, it doesn't write immediately.
+ encoder.copyBufferToBuffer(tmpBuffer, 0, buffer, 0, Uint32Array.BYTES_PER_ELEMENT);
+ }
+
+ // Write buffer via TextureToBuffer copy.
+ encodeWriteByT2BCopy(encoder: GPUCommandEncoder, buffer: GPUBuffer, slot: number) {
+ const tmpTexture = this.tmpValueTextures[slot];
+ assert(tmpTexture !== undefined);
+ // The write operation via t2b copy is just encoded into command encoder, it doesn't write immediately.
+ encoder.copyTextureToBuffer(
+ { texture: tmpTexture, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
+ { buffer, bytesPerRow: 256 },
+ { width: 1, height: 1, depthOrArrayLayers: 1 }
+ );
+ }
+
+ // Write buffer via writeBuffer API on queue
+ writeByWriteBuffer(buffer: GPUBuffer, value: number) {
+ // This is not hot in profiles; optimize if this gets used more heavily.
+ const data = new Uint32Array(1).fill(value);
+ this.device.queue.writeBuffer(buffer, 0, data);
+ }
+
+ // Issue write operation via render pass, compute pass, copy, etc.
+ encodeWriteOp(
+ helper: OperationContextHelper,
+ operation: WriteOp,
+ context: OperationContext,
+ buffer: GPUBuffer,
+ writeOpSlot: number,
+ value: number
+ ) {
+ helper.ensureContext(context);
+
+ switch (operation) {
+ case 'write-buffer':
+ this.writeByWriteBuffer(buffer, value);
+ break;
+ case 'storage':
+ switch (context) {
+ case 'render-pass-encoder':
+ assert(helper.renderPassEncoder !== undefined);
+ this.encodeWriteAsStorageBufferInRenderPass(helper.renderPassEncoder, buffer, value);
+ break;
+ case 'render-bundle-encoder':
+ assert(helper.renderBundleEncoder !== undefined);
+ this.encodeWriteAsStorageBufferInRenderPass(helper.renderBundleEncoder, buffer, value);
+ break;
+ case 'compute-pass-encoder':
+ assert(helper.computePassEncoder !== undefined);
+ this.encodeWriteAsStorageBufferInComputePass(helper.computePassEncoder, buffer, value);
+ break;
+ default:
+ unreachable();
+ }
+ break;
+ case 'b2b-copy':
+ assert(helper.commandEncoder !== undefined);
+ this.encodeWriteByB2BCopy(helper.commandEncoder, buffer, writeOpSlot);
+ break;
+ case 't2b-copy':
+ assert(helper.commandEncoder !== undefined);
+ this.encodeWriteByT2BCopy(helper.commandEncoder, buffer, writeOpSlot);
+ break;
+ default:
+ unreachable();
+ }
+ }
+
+ // Create a compute pipeline: read from src buffer and write it into the storage buffer.
+ createStorageReadComputePipeline(): GPUComputePipeline {
+ const wgslCompute = `
+ struct Data {
+ a : u32
+ };
+
+ @group(0) @binding(0) var<storage, read> srcData : Data;
+ @group(0) @binding(1) var<storage, read_write> dstData : Data;
+
+ @compute @workgroup_size(1) fn main() {
+ dstData.a = srcData.a;
+ }
+ `;
+
+ return this.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: this.device.createShaderModule({
+ code: wgslCompute,
+ }),
+ entryPoint: 'main',
+ },
+ });
+ }
+
+ createBindGroupSrcDstBuffer(
+ pipeline: GPURenderPipeline | GPUComputePipeline,
+ srcBuffer: GPUBuffer,
+ dstBuffer: GPUBuffer
+ ): GPUBindGroup {
+ return this.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: { buffer: srcBuffer } },
+ { binding: 1, resource: { buffer: dstBuffer } },
+ ],
+ });
+ }
+
+ // Create a render pipeline: read from vertex/index buffer and write it into the storage dst buffer at fragment stage.
+ createVertexReadRenderPipeline(): GPURenderPipeline {
+ const wgslShaders = {
+ vertex: `
+ struct VertexOutput {
+ @builtin(position) position : vec4<f32>,
+ @location(0) @interpolate(flat) data : u32,
+ };
+
+ @vertex fn vert_main(@location(0) input: u32) -> VertexOutput {
+ var output : VertexOutput;
+ output.position = vec4<f32>(0.5, 0.5, 0.0, 1.0);
+ output.data = input;
+ return output;
+ }
+ `,
+ fragment: `
+ struct Data {
+ a : u32
+ };
+
+ @group(0) @binding(0) var<storage, read_write> data : Data;
+
+ @fragment fn frag_main(@location(0) @interpolate(flat) input : u32) -> @location(0) vec4<f32> {
+ data.a = input;
+ return vec4<f32>(); // result does't matter
+ }
+ `,
+ };
+
+ return this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: this.device.createShaderModule({
+ code: wgslShaders.vertex,
+ }),
+ entryPoint: 'vert_main',
+ buffers: [
+ {
+ arrayStride: Uint32Array.BYTES_PER_ELEMENT,
+ attributes: [
+ {
+ shaderLocation: 0,
+ offset: 0,
+ format: 'uint32',
+ },
+ ],
+ },
+ ],
+ },
+ fragment: {
+ module: this.device.createShaderModule({
+ code: wgslShaders.fragment,
+ }),
+ entryPoint: 'frag_main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ primitive: { topology: 'point-list' },
+ });
+ }
+
+ // Create a render pipeline: read from uniform buffer and write it into the storage dst buffer at fragment stage.
+ createUniformReadRenderPipeline(): GPURenderPipeline {
+ const wgslShaders = {
+ vertex: kDummyVertexShader,
+ fragment: `
+ struct Data {
+ a : u32
+ };
+
+ @group(0) @binding(0) var<uniform> constant: Data;
+ @group(0) @binding(1) var<storage, read_write> data : Data;
+
+ @fragment fn frag_main() -> @location(0) vec4<f32> {
+ data.a = constant.a;
+ return vec4<f32>(); // result does't matter
+ }
+ `,
+ };
+
+ return this.createTrivialRenderPipeline(wgslShaders);
+ }
+
+ // Create a render pipeline: read from storage src buffer and write it into the storage dst buffer at fragment stage.
+ createStorageReadRenderPipeline(): GPURenderPipeline {
+ const wgslShaders = {
+ vertex: kDummyVertexShader,
+ fragment: `
+ struct Data {
+ a : u32
+ };
+
+ @group(0) @binding(0) var<storage, read> srcData : Data;
+ @group(0) @binding(1) var<storage, read_write> dstData : Data;
+
+ @fragment fn frag_main() -> @location(0) vec4<f32> {
+ dstData.a = srcData.a;
+ return vec4<f32>(); // result does't matter
+ }
+ `,
+ };
+
+ return this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: this.device.createShaderModule({
+ code: wgslShaders.vertex,
+ }),
+ entryPoint: 'vert_main',
+ },
+ fragment: {
+ module: this.device.createShaderModule({
+ code: wgslShaders.fragment,
+ }),
+ entryPoint: 'frag_main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ primitive: { topology: 'point-list' },
+ });
+ }
+
+ // Write buffer via dispatch call in compute pass.
+ encodeReadAsStorageBufferInComputePass(
+ pass: GPUComputePassEncoder,
+ srcBuffer: GPUBuffer,
+ dstBuffer: GPUBuffer
+ ) {
+ const pipeline = this.createStorageReadComputePipeline();
+ const bindGroup = this.createBindGroupSrcDstBuffer(pipeline, srcBuffer, dstBuffer);
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ }
+
+ // Write buffer via dispatchWorkgroupsIndirect call in compute pass.
+ encodeReadAsIndirectBufferInComputePass(
+ pass: GPUComputePassEncoder,
+ srcBuffer: GPUBuffer,
+ dstBuffer: GPUBuffer,
+ value: number
+ ) {
+ const pipeline = this.createStorageWriteComputePipeline(value);
+ const bindGroup = this.createBindGroup(pipeline, dstBuffer);
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroupsIndirect(srcBuffer, 0);
+ }
+
+ // Read as vertex input and write buffer via draw call in render pass. Use bundle if needed.
+ encodeReadAsVertexBufferInRenderPass(
+ renderer: GPURenderPassEncoder | GPURenderBundleEncoder,
+ srcBuffer: GPUBuffer,
+ dstBuffer: GPUBuffer
+ ) {
+ const pipeline = this.createVertexReadRenderPipeline();
+ const bindGroup = this.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer: dstBuffer } }],
+ });
+
+ renderer.setBindGroup(0, bindGroup);
+ renderer.setPipeline(pipeline);
+ renderer.setVertexBuffer(0, srcBuffer);
+ renderer.draw(1);
+ }
+
+ // Read as index input and write buffer via draw call in render pass. Use bundle if needed.
+ encodeReadAsIndexBufferInRenderPass(
+ renderer: GPURenderPassEncoder | GPURenderBundleEncoder,
+ srcBuffer: GPUBuffer,
+ dstBuffer: GPUBuffer,
+ vertexBuffer: GPUBuffer
+ ) {
+ const pipeline = this.createVertexReadRenderPipeline();
+ const bindGroup = this.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer: dstBuffer } }],
+ });
+
+ renderer.setBindGroup(0, bindGroup);
+ renderer.setPipeline(pipeline);
+ renderer.setVertexBuffer(0, vertexBuffer);
+ renderer.setIndexBuffer(srcBuffer, 'uint32');
+ renderer.drawIndexed(1);
+ }
+
+ // Read as indirect input and write buffer via draw call in render pass. Use bundle if needed.
+ encodeReadAsIndirectBufferInRenderPass(
+ renderer: GPURenderPassEncoder | GPURenderBundleEncoder,
+ srcBuffer: GPUBuffer,
+ dstBuffer: GPUBuffer,
+ vertexBuffer: GPUBuffer
+ ) {
+ const pipeline = this.createVertexReadRenderPipeline();
+ const bindGroup = this.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer: dstBuffer } }],
+ });
+
+ renderer.setBindGroup(0, bindGroup);
+ renderer.setPipeline(pipeline);
+ renderer.setVertexBuffer(0, vertexBuffer);
+ renderer.drawIndirect(srcBuffer, 0);
+ }
+
+ // Read as indexed indirect input and write buffer via draw call in render pass. Use bundle if needed.
+ encodeReadAsIndexedIndirectBufferInRenderPass(
+ renderer: GPURenderPassEncoder | GPURenderBundleEncoder,
+ srcBuffer: GPUBuffer,
+ dstBuffer: GPUBuffer,
+ vertexBuffer: GPUBuffer,
+ indexBuffer: GPUBuffer
+ ) {
+ const pipeline = this.createVertexReadRenderPipeline();
+ const bindGroup = this.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer: dstBuffer } }],
+ });
+
+ renderer.setBindGroup(0, bindGroup);
+ renderer.setPipeline(pipeline);
+ renderer.setVertexBuffer(0, vertexBuffer);
+ renderer.setIndexBuffer(indexBuffer, 'uint32');
+ renderer.drawIndexedIndirect(srcBuffer, 0);
+ }
+
+ // Read as uniform buffer and write buffer via draw call in render pass. Use bundle if needed.
+ encodeReadAsUniformBufferInRenderPass(
+ renderer: GPURenderPassEncoder | GPURenderBundleEncoder,
+ srcBuffer: GPUBuffer,
+ dstBuffer: GPUBuffer
+ ) {
+ const pipeline = this.createUniformReadRenderPipeline();
+ const bindGroup = this.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: { buffer: srcBuffer } },
+ { binding: 1, resource: { buffer: dstBuffer } },
+ ],
+ });
+
+ renderer.setBindGroup(0, bindGroup);
+ renderer.setPipeline(pipeline);
+ renderer.draw(1);
+ }
+
+ // Read as storage buffer and write buffer via draw call in render pass. Use bundle if needed.
+ encodeReadAsStorageBufferInRenderPass(
+ renderer: GPURenderPassEncoder | GPURenderBundleEncoder,
+ srcBuffer: GPUBuffer,
+ dstBuffer: GPUBuffer
+ ) {
+ const pipeline = this.createStorageReadRenderPipeline();
+ const bindGroup = this.createBindGroupSrcDstBuffer(pipeline, srcBuffer, dstBuffer);
+
+ renderer.setBindGroup(0, bindGroup);
+ renderer.setPipeline(pipeline);
+ renderer.draw(1, 1, 0, 0);
+ }
+
+ // Read and write via BufferToBuffer copy.
+ encodeReadByB2BCopy(encoder: GPUCommandEncoder, srcBuffer: GPUBuffer, dstBuffer: GPUBuffer) {
+ // The b2b copy is just encoded into command encoder, it doesn't write immediately.
+ encoder.copyBufferToBuffer(srcBuffer, 0, dstBuffer, 0, Uint32Array.BYTES_PER_ELEMENT);
+ }
+
+ // Read and Write texture via BufferToTexture copy.
+ encodeReadByB2TCopy(encoder: GPUCommandEncoder, srcBuffer: GPUBuffer, dstBuffer: GPUBuffer) {
+ const tmpTexture = this.trackForCleanup(
+ this.device.createTexture({
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ format: 'r32uint',
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ })
+ );
+
+ // The b2t copy is just encoded into command encoder, it doesn't write immediately.
+ encoder.copyBufferToTexture(
+ { buffer: srcBuffer, bytesPerRow: 256 },
+ { texture: tmpTexture, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
+ { width: 1, height: 1, depthOrArrayLayers: 1 }
+ );
+ // The t2b copy is just encoded into command encoder, it doesn't write immediately.
+ encoder.copyTextureToBuffer(
+ { texture: tmpTexture, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
+ { buffer: dstBuffer, bytesPerRow: 256 },
+ { width: 1, height: 1, depthOrArrayLayers: 1 }
+ );
+ }
+
+ encodeReadOp(
+ helper: OperationContextHelper,
+ operation: ReadOp,
+ context: OperationContext,
+ srcBuffer: GPUBuffer,
+ dstBuffer: GPUBuffer
+ ) {
+ helper.ensureContext(context);
+
+ const renderer =
+ context === 'render-bundle-encoder' ? helper.renderBundleEncoder : helper.renderPassEncoder;
+ const computePass = context === 'compute-pass-encoder' ? helper.computePassEncoder : undefined;
+
+ switch (operation) {
+ case 'input-vertex':
+ // The srcBuffer is used as vertexBuffer.
+ // draw writes the same value in srcBuffer[0] to dstBuffer[0].
+ assert(renderer !== undefined);
+ this.encodeReadAsVertexBufferInRenderPass(renderer, srcBuffer, dstBuffer);
+ break;
+ case 'input-index':
+ // The srcBuffer is used as indexBuffer.
+ // With this vertexBuffer, drawIndexed writes the same value in srcBuffer[0] to dstBuffer[0].
+ assert(renderer !== undefined);
+ assert(this.vertexBuffer !== undefined);
+ this.encodeReadAsIndexBufferInRenderPass(renderer, srcBuffer, dstBuffer, this.vertexBuffer);
+ break;
+ case 'input-indirect':
+ // The srcBuffer is used as indirectBuffer for drawIndirect.
+ // srcBuffer[0] = 0 or 1 (vertexCount), which will decide the value written into dstBuffer to be either 0 or 1.
+ assert(renderer !== undefined);
+ assert(this.vertexBuffer !== undefined);
+ this.encodeReadAsIndirectBufferInRenderPass(
+ renderer,
+ srcBuffer,
+ dstBuffer,
+ this.vertexBuffer
+ );
+ break;
+ case 'input-indirect-index':
+ // The srcBuffer is used as indirectBuffer for drawIndexedIndirect.
+ // srcBuffer[0] = 0 or 1 (indexCount), which will decide the value written into dstBuffer to be either 0 or 1.
+ assert(renderer !== undefined);
+ assert(this.vertexBuffer !== undefined);
+ assert(this.indexBuffer !== undefined);
+ this.encodeReadAsIndexedIndirectBufferInRenderPass(
+ renderer,
+ srcBuffer,
+ dstBuffer,
+ this.vertexBuffer,
+ this.indexBuffer
+ );
+ break;
+ case 'input-indirect-dispatch':
+ // The srcBuffer is used as indirectBuffer for dispatch.
+ // srcBuffer[0] = 0 or 1 (workgroupCountX), which will decide the value written into dstBuffer to be either 0 or 1.
+ assert(computePass !== undefined);
+ this.encodeReadAsIndirectBufferInComputePass(computePass, srcBuffer, dstBuffer, 1);
+ break;
+ case 'constant-uniform':
+ // The srcBuffer is used as uniform buffer.
+ assert(renderer !== undefined);
+ this.encodeReadAsUniformBufferInRenderPass(renderer, srcBuffer, dstBuffer);
+ break;
+ case 'storage-read':
+ switch (context) {
+ case 'render-pass-encoder':
+ case 'render-bundle-encoder':
+ assert(renderer !== undefined);
+ this.encodeReadAsStorageBufferInRenderPass(renderer, srcBuffer, dstBuffer);
+ break;
+ case 'compute-pass-encoder':
+ assert(computePass !== undefined);
+ this.encodeReadAsStorageBufferInComputePass(computePass, srcBuffer, dstBuffer);
+ break;
+ default:
+ unreachable();
+ }
+ break;
+ case 'b2b-copy':
+ assert(helper.commandEncoder !== undefined);
+ this.encodeReadByB2BCopy(helper.commandEncoder, srcBuffer, dstBuffer);
+ break;
+ case 'b2t-copy':
+ assert(helper.commandEncoder !== undefined);
+ this.encodeReadByB2TCopy(helper.commandEncoder, srcBuffer, dstBuffer);
+ break;
+ default:
+ unreachable();
+ }
+ }
+
+ verifyData(buffer: GPUBuffer, expectedValue: number) {
+ // This is not hot in profiles; optimize if this gets used more heavily.
+ const bufferData = new Uint32Array(1);
+ bufferData[0] = expectedValue;
+ this.expectGPUBufferValuesEqual(buffer, bufferData);
+ }
+
+ verifyDataTwoValidValues(buffer: GPUBuffer, expectedValue1: number, expectedValue2: number) {
+ // This is not hot in profiles; optimize if this gets used more heavily.
+ const bufferData1 = new Uint32Array(1);
+ bufferData1[0] = expectedValue1;
+ const bufferData2 = new Uint32Array(1);
+ bufferData2[0] = expectedValue2;
+ this.expectGPUBufferValuesPassCheck(
+ buffer,
+ a => checkElementsEqualEither(a, [bufferData1, bufferData2]),
+ { type: Uint32Array, typedLength: 1 }
+ );
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/buffer/multiple_buffers.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/buffer/multiple_buffers.spec.ts
new file mode 100644
index 0000000000..081384cd37
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/buffer/multiple_buffers.spec.ts
@@ -0,0 +1,354 @@
+export const description = `
+Memory Synchronization Tests for multiple buffers: read before write, read after write, and write after write.
+
+- Create multiple src buffers and initialize it to 0, wait on the fence to ensure the data is initialized.
+Write Op: write a value (say 1) into the src buffer via render pass, compute pass, copy, write buffer, etc.
+Read Op: read the value from the src buffer and write it to dst buffer via render pass (vertex, index, indirect input, uniform, storage), compute pass, copy etc.
+Wait on another fence, then call expectContents to verify the dst buffer value.
+ - x= write op: {storage buffer in {compute, render, render-via-bundle}, t2b copy dst, b2b copy dst, writeBuffer}
+ - x= read op: {index buffer, vertex buffer, indirect buffer (draw, draw indexed, dispatch), uniform buffer, {readonly, readwrite} storage buffer in {compute, render, render-via-bundle}, b2b copy src, b2t copy src}
+ - x= read-write sequence: {read then write, write then read, write then write}
+ - x= op context: {queue, command-encoder, compute-pass-encoder, render-pass-encoder, render-bundle-encoder}, x= op boundary: {queue-op, command-buffer, pass, execute-bundles, render-bundle}
+ - Not every context/boundary combinations are valid. We have the checkOpsValidForContext func to do the filtering.
+ - If two writes are in the same passes, render result has loose guarantees.
+
+TODO: Tests with more than one buffer to try to stress implementations a little bit more.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import {
+ kOperationBoundaries,
+ kBoundaryInfo,
+ OperationContextHelper,
+} from '../operation_context_helper.js';
+
+import {
+ kAllReadOps,
+ kAllWriteOps,
+ BufferSyncTest,
+ checkOpsValidForContext,
+} from './buffer_sync_test.js';
+
+// The src value is what stores in the src buffer before any operation.
+const kSrcValue = 0;
+// The op value is what the read/write operation write into the target buffer.
+const kOpValue = 1;
+
+export const g = makeTestGroup(BufferSyncTest);
+
+g.test('rw')
+ .desc(
+ `
+ Perform a 'read' operations on multiple buffers, followed by a 'write' operation.
+ Operations are separated by a 'boundary' (pass, encoder, queue-op, etc.).
+ Test that the results are synchronized.
+ The read should not see the contents written by the subsequent write.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('boundary', kOperationBoundaries)
+ .expand('_context', p => kBoundaryInfo[p.boundary].contexts)
+ .expandWithParams(function* ({ _context }) {
+ for (const readOp of kAllReadOps) {
+ for (const writeOp of kAllWriteOps) {
+ if (checkOpsValidForContext([readOp, writeOp], _context)) {
+ yield {
+ readOp,
+ readContext: _context[0],
+ writeOp,
+ writeContext: _context[1],
+ };
+ }
+ }
+ }
+ })
+ )
+ .fn(async t => {
+ const { readContext, readOp, writeContext, writeOp, boundary } = t.params;
+ const helper = new OperationContextHelper(t);
+
+ const srcBuffers: GPUBuffer[] = [];
+ const dstBuffers: GPUBuffer[] = [];
+
+ const kBufferCount = 4;
+ for (let i = 0; i < kBufferCount; i++) {
+ const { srcBuffer, dstBuffer } = await t.createBuffersForReadOp(readOp, kSrcValue, kOpValue);
+ srcBuffers.push(srcBuffer);
+ dstBuffers.push(dstBuffer);
+ }
+
+ await t.createIntermediateBuffersAndTexturesForWriteOp(writeOp, 0, kOpValue);
+
+ // The read op will read from src buffers and write to dst buffers based on what it reads.
+ // A boundary will separate multiple read and write operations. The write op will write the
+ // given op value into each src buffer as well. The write op happens after read op. So we are
+ // expecting each src value to be in the mapped dst buffer.
+ for (let i = 0; i < kBufferCount; i++) {
+ t.encodeReadOp(helper, readOp, readContext, srcBuffers[i], dstBuffers[i]);
+ }
+
+ helper.ensureBoundary(boundary);
+
+ for (let i = 0; i < kBufferCount; i++) {
+ t.encodeWriteOp(helper, writeOp, writeContext, srcBuffers[i], 0, kOpValue);
+ }
+
+ helper.ensureSubmit();
+
+ for (let i = 0; i < kBufferCount; i++) {
+ // Only verify the value of the first element of each dstBuffer.
+ t.verifyData(dstBuffers[i], kSrcValue);
+ }
+ });
+
+g.test('wr')
+ .desc(
+ `
+ Perform a 'write' operation on on multiple buffers, followed by a 'read' operation.
+ Operations are separated by a 'boundary' (pass, encoder, queue-op, etc.).
+ Test that the results are synchronized.
+ The read should see exactly the contents written by the previous write.`
+ )
+ .params(u =>
+ u //
+ .combine('boundary', kOperationBoundaries)
+ .expand('_context', p => kBoundaryInfo[p.boundary].contexts)
+ .expandWithParams(function* ({ _context }) {
+ for (const readOp of kAllReadOps) {
+ for (const writeOp of kAllWriteOps) {
+ if (checkOpsValidForContext([readOp, writeOp], _context)) {
+ yield {
+ readOp,
+ readContext: _context[0],
+ writeOp,
+ writeContext: _context[1],
+ };
+ }
+ }
+ }
+ })
+ )
+ .fn(async t => {
+ const { readContext, readOp, writeContext, writeOp, boundary } = t.params;
+ const helper = new OperationContextHelper(t);
+
+ const srcBuffers: GPUBuffer[] = [];
+ const dstBuffers: GPUBuffer[] = [];
+
+ const kBufferCount = 4;
+
+ for (let i = 0; i < kBufferCount; i++) {
+ const { srcBuffer, dstBuffer } = await t.createBuffersForReadOp(readOp, kSrcValue, kOpValue);
+
+ srcBuffers.push(srcBuffer);
+ dstBuffers.push(dstBuffer);
+ }
+
+ await t.createIntermediateBuffersAndTexturesForWriteOp(writeOp, 0, kOpValue);
+
+ // The write op will write the given op value into src buffers.
+ // The read op will read from src buffers and write to dst buffers based on what it reads.
+ // The write op happens before read op. So we are expecting the op value to be in the dst
+ // buffers.
+ for (let i = 0; i < kBufferCount; i++) {
+ t.encodeWriteOp(helper, writeOp, writeContext, srcBuffers[i], 0, kOpValue);
+ }
+
+ helper.ensureBoundary(boundary);
+
+ for (let i = 0; i < kBufferCount; i++) {
+ t.encodeReadOp(helper, readOp, readContext, srcBuffers[i], dstBuffers[i]);
+ }
+
+ helper.ensureSubmit();
+
+ for (let i = 0; i < kBufferCount; i++) {
+ // Only verify the value of the first element of the dstBuffer
+ t.verifyData(dstBuffers[i], kOpValue);
+ }
+ });
+
+g.test('ww')
+ .desc(
+ `
+ Perform a 'first' write operation on multiple buffers, followed by a 'second' write operation.
+ Operations are separated by a 'boundary' (pass, encoder, queue-op, etc.).
+ Test that the results are synchronized.
+ The second write should overwrite the contents of the first.`
+ )
+ .params(u =>
+ u //
+ .combine('boundary', kOperationBoundaries)
+ .expand('_context', p => kBoundaryInfo[p.boundary].contexts)
+ .expandWithParams(function* ({ _context }) {
+ for (const firstWriteOp of kAllWriteOps) {
+ for (const secondWriteOp of kAllWriteOps) {
+ if (checkOpsValidForContext([firstWriteOp, secondWriteOp], _context)) {
+ yield {
+ writeOps: [firstWriteOp, secondWriteOp],
+ contexts: _context,
+ };
+ }
+ }
+ }
+ })
+ )
+ .fn(async t => {
+ const { writeOps, contexts, boundary } = t.params;
+ const helper = new OperationContextHelper(t);
+
+ const buffers: GPUBuffer[] = [];
+
+ const kBufferCount = 4;
+
+ for (let i = 0; i < kBufferCount; i++) {
+ const buffer = await t.createBufferWithValue(0);
+
+ buffers.push(buffer);
+ }
+
+ await t.createIntermediateBuffersAndTexturesForWriteOp(writeOps[0], 0, 1);
+ await t.createIntermediateBuffersAndTexturesForWriteOp(writeOps[1], 1, 2);
+
+ for (let i = 0; i < kBufferCount; i++) {
+ t.encodeWriteOp(helper, writeOps[0], contexts[0], buffers[i], 0, 1);
+ }
+
+ helper.ensureBoundary(boundary);
+
+ for (let i = 0; i < kBufferCount; i++) {
+ t.encodeWriteOp(helper, writeOps[1], contexts[1], buffers[i], 1, 2);
+ }
+
+ helper.ensureSubmit();
+
+ for (let i = 0; i < kBufferCount; i++) {
+ t.verifyData(buffers[i], 2);
+ }
+ });
+
+g.test('multiple_pairs_of_draws_in_one_render_pass')
+ .desc(
+ `
+ Test write-after-write operations on multiple buffers via the one render pass. The first write
+ will write the buffer index * 2 + 1 into all storage buffers. The second write will write the
+ buffer index * 2 + 2 into the all buffers in the same pass. Expected data in all buffers is either
+ buffer index * 2 + 1 or buffer index * 2 + 2. It may use bundle in each draw.
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('firstDrawUseBundle', [false, true])
+ .combine('secondDrawUseBundle', [false, true])
+ )
+ .fn(async t => {
+ const { firstDrawUseBundle, secondDrawUseBundle } = t.params;
+
+ const encoder = t.device.createCommandEncoder();
+ const passEncoder = t.beginSimpleRenderPass(encoder);
+
+ const kBufferCount = 4;
+ const buffers: GPUBuffer[] = [];
+ for (let b = 0; b < kBufferCount; ++b) {
+ const buffer = await t.createBufferWithValue(0);
+ buffers.push(buffer);
+
+ const useBundle = [firstDrawUseBundle, secondDrawUseBundle];
+ for (let i = 0; i < 2; ++i) {
+ const renderEncoder = useBundle[i]
+ ? t.device.createRenderBundleEncoder({
+ colorFormats: ['rgba8unorm'],
+ })
+ : passEncoder;
+ const pipeline = t.createStorageWriteRenderPipeline(2 * b + i + 1);
+ const bindGroup = t.createBindGroup(pipeline, buffer);
+ renderEncoder.setPipeline(pipeline);
+ renderEncoder.setBindGroup(0, bindGroup);
+ renderEncoder.draw(1, 1, 0, 0);
+ if (useBundle[i])
+ passEncoder.executeBundles([(renderEncoder as GPURenderBundleEncoder).finish()]);
+ }
+ }
+
+ passEncoder.end();
+ t.device.queue.submit([encoder.finish()]);
+ for (let b = 0; b < kBufferCount; ++b) {
+ t.verifyDataTwoValidValues(buffers[b], 2 * b + 1, 2 * b + 2);
+ }
+ });
+
+g.test('multiple_pairs_of_draws_in_one_render_bundle')
+ .desc(
+ `
+ Test write-after-write operations on multiple buffers via the one render bundle. The first write
+ will write the buffer index * 2 + 1 into all storage buffers. The second write will write the
+ buffer index * 2 + 2 into the all buffers in the same pass. Expected data in all buffers is either
+ buffer index * 2 + 1 or buffer index * 2 + 2.
+ `
+ )
+ .fn(async t => {
+ const encoder = t.device.createCommandEncoder();
+ const passEncoder = t.beginSimpleRenderPass(encoder);
+ const renderEncoder = t.device.createRenderBundleEncoder({
+ colorFormats: ['rgba8unorm'],
+ });
+
+ const kBufferCount = 4;
+ const buffers: GPUBuffer[] = [];
+ for (let b = 0; b < kBufferCount; ++b) {
+ const buffer = await t.createBufferWithValue(0);
+ buffers.push(buffer);
+
+ for (let i = 0; i < 2; ++i) {
+ const pipeline = t.createStorageWriteRenderPipeline(2 * b + i + 1);
+ const bindGroup = t.createBindGroup(pipeline, buffer);
+ renderEncoder.setPipeline(pipeline);
+ renderEncoder.setBindGroup(0, bindGroup);
+ renderEncoder.draw(1, 1, 0, 0);
+ }
+ }
+
+ passEncoder.executeBundles([renderEncoder.finish()]);
+ passEncoder.end();
+ t.device.queue.submit([encoder.finish()]);
+ for (let b = 0; b < kBufferCount; ++b) {
+ t.verifyDataTwoValidValues(buffers[b], 2 * b + 1, 2 * b + 2);
+ }
+ });
+
+g.test('multiple_pairs_of_dispatches_in_one_compute_pass')
+ .desc(
+ `
+ Test write-after-write operations on multiple buffers via the one compute pass. The first write
+ will write the buffer index * 2 + 1 into all storage buffers. The second write will write the
+ buffer index * 2 + 2 into the all buffers in the same pass. Expected data in all buffers is the
+ buffer index * 2 + 2.
+ `
+ )
+ .fn(async t => {
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+
+ const kBufferCount = 4;
+ const buffers: GPUBuffer[] = [];
+ for (let b = 0; b < kBufferCount; ++b) {
+ const buffer = await t.createBufferWithValue(0);
+ buffers.push(buffer);
+
+ for (let i = 0; i < 2; ++i) {
+ const pipeline = t.createStorageWriteComputePipeline(2 * b + i + 1);
+ const bindGroup = t.createBindGroup(pipeline, buffer);
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ }
+ }
+
+ pass.end();
+
+ t.device.queue.submit([encoder.finish()]);
+ for (let b = 0; b < kBufferCount; ++b) {
+ t.verifyData(buffers[b], 2 * b + 2);
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/buffer/single_buffer.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/buffer/single_buffer.spec.ts
new file mode 100644
index 0000000000..817d6465cc
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/buffer/single_buffer.spec.ts
@@ -0,0 +1,257 @@
+export const description = `
+Memory Synchronization Tests for Buffer: read before write, read after write, and write after write.
+
+- Create a src buffer and initialize it to 0, wait on the fence to ensure the data is initialized.
+Write Op: write a value (say 1) into the src buffer via render pass, compute pass, copy, write buffer, etc.
+Read Op: read the value from the src buffer and write it to dst buffer via render pass (vertex, index, indirect input, uniform, storage), compute pass, copy etc.
+Wait on another fence, then call expectContents to verify the dst buffer value.
+ - x= write op: {storage buffer in {compute, render, render-via-bundle}, t2b copy dst, b2b copy dst, writeBuffer}
+ - x= read op: {index buffer, vertex buffer, indirect buffer (draw, draw indexed, dispatch), uniform buffer, {readonly, readwrite} storage buffer in {compute, render, render-via-bundle}, b2b copy src, b2t copy src}
+ - x= read-write sequence: {read then write, write then read, write then write}
+ - x= op context: {queue, command-encoder, compute-pass-encoder, render-pass-encoder, render-bundle-encoder}, x= op boundary: {queue-op, command-buffer, pass, execute-bundles, render-bundle}
+ - Not every context/boundary combinations are valid. We have the checkOpsValidForContext func to do the filtering.
+ - If two writes are in the same passes, render result has loose guarantees.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import {
+ kOperationBoundaries,
+ kBoundaryInfo,
+ OperationContextHelper,
+} from '../operation_context_helper.js';
+
+import {
+ kAllReadOps,
+ kAllWriteOps,
+ BufferSyncTest,
+ checkOpsValidForContext,
+} from './buffer_sync_test.js';
+
+// The src value is what stores in the src buffer before any operation.
+const kSrcValue = 0;
+// The op value is what the read/write operation write into the target buffer.
+const kOpValue = 1;
+
+export const g = makeTestGroup(BufferSyncTest);
+
+g.test('rw')
+ .desc(
+ `
+ Perform a 'read' operations on a buffer, followed by a 'write' operation.
+ Operations are separated by a 'boundary' (pass, encoder, queue-op, etc.).
+ Test that the results are synchronized.
+ The read should not see the contents written by the subsequent write.`
+ )
+ .params(u =>
+ u //
+ .combine('boundary', kOperationBoundaries)
+ .expand('_context', p => kBoundaryInfo[p.boundary].contexts)
+ .expandWithParams(function* ({ _context }) {
+ for (const readOp of kAllReadOps) {
+ for (const writeOp of kAllWriteOps) {
+ if (checkOpsValidForContext([readOp, writeOp], _context)) {
+ yield {
+ readOp,
+ readContext: _context[0],
+ writeOp,
+ writeContext: _context[1],
+ };
+ }
+ }
+ }
+ })
+ )
+ .fn(async t => {
+ const { readContext, readOp, writeContext, writeOp, boundary } = t.params;
+ const helper = new OperationContextHelper(t);
+
+ const { srcBuffer, dstBuffer } = await t.createBuffersForReadOp(readOp, kSrcValue, kOpValue);
+ await t.createIntermediateBuffersAndTexturesForWriteOp(writeOp, 0, kOpValue);
+
+ // The read op will read from src buffer and write to dst buffer based on what it reads.
+ // The write op will write the given op value into src buffer as well.
+ // The write op happens after read op. So we are expecting the src value to be in the dst buffer.
+ t.encodeReadOp(helper, readOp, readContext, srcBuffer, dstBuffer);
+ helper.ensureBoundary(boundary);
+ t.encodeWriteOp(helper, writeOp, writeContext, srcBuffer, 0, kOpValue);
+ helper.ensureSubmit();
+ // Only verify the value of the first element of the dstBuffer
+ t.verifyData(dstBuffer, kSrcValue);
+ });
+
+g.test('wr')
+ .desc(
+ `
+ Perform a 'write' operation on a buffer, followed by a 'read' operation.
+ Operations are separated by a 'boundary' (pass, encoder, queue-op, etc.).
+ Test that the results are synchronized.
+ The read should see exactly the contents written by the previous write.`
+ )
+ .params(u =>
+ u //
+ .combine('boundary', kOperationBoundaries)
+ .expand('_context', p => kBoundaryInfo[p.boundary].contexts)
+ .expandWithParams(function* ({ _context }) {
+ for (const readOp of kAllReadOps) {
+ for (const writeOp of kAllWriteOps) {
+ if (checkOpsValidForContext([readOp, writeOp], _context)) {
+ yield {
+ readOp,
+ readContext: _context[0],
+ writeOp,
+ writeContext: _context[1],
+ };
+ }
+ }
+ }
+ })
+ )
+ .fn(async t => {
+ const { readContext, readOp, writeContext, writeOp, boundary } = t.params;
+ const helper = new OperationContextHelper(t);
+
+ const { srcBuffer, dstBuffer } = await t.createBuffersForReadOp(readOp, kSrcValue, kOpValue);
+ await t.createIntermediateBuffersAndTexturesForWriteOp(writeOp, 0, kOpValue);
+
+ // The write op will write the given op value into src buffer.
+ // The read op will read from src buffer and write to dst buffer based on what it reads.
+ // The write op happens before read op. So we are expecting the op value to be in the dst buffer.
+ t.encodeWriteOp(helper, writeOp, writeContext, srcBuffer, 0, kOpValue);
+ helper.ensureBoundary(boundary);
+ t.encodeReadOp(helper, readOp, readContext, srcBuffer, dstBuffer);
+ helper.ensureSubmit();
+ // Only verify the value of the first element of the dstBuffer
+ t.verifyData(dstBuffer, kOpValue);
+ });
+
+g.test('ww')
+ .desc(
+ `
+ Perform a 'first' write operation on a buffer, followed by a 'second' write operation.
+ Operations are separated by a 'boundary' (pass, encoder, queue-op, etc.).
+ Test that the results are synchronized.
+ The second write should overwrite the contents of the first.`
+ )
+ .params(u =>
+ u //
+ .combine('boundary', kOperationBoundaries)
+ .expand('_context', p => kBoundaryInfo[p.boundary].contexts)
+ .expandWithParams(function* ({ _context }) {
+ for (const firstWriteOp of kAllWriteOps) {
+ for (const secondWriteOp of kAllWriteOps) {
+ if (checkOpsValidForContext([firstWriteOp, secondWriteOp], _context)) {
+ yield {
+ writeOps: [firstWriteOp, secondWriteOp],
+ contexts: _context,
+ };
+ }
+ }
+ }
+ })
+ )
+ .fn(async t => {
+ const { writeOps, contexts, boundary } = t.params;
+ const helper = new OperationContextHelper(t);
+
+ const buffer = await t.createBufferWithValue(0);
+ await t.createIntermediateBuffersAndTexturesForWriteOp(writeOps[0], 0, 1);
+ await t.createIntermediateBuffersAndTexturesForWriteOp(writeOps[1], 1, 2);
+
+ t.encodeWriteOp(helper, writeOps[0], contexts[0], buffer, 0, 1);
+ helper.ensureBoundary(boundary);
+ t.encodeWriteOp(helper, writeOps[1], contexts[1], buffer, 1, 2);
+ helper.ensureSubmit();
+ t.verifyData(buffer, 2);
+ });
+
+// Cases with loose render result guarantees.
+
+g.test('two_draws_in_the_same_render_pass')
+ .desc(
+ `Test write-after-write operations in the same render pass. The first write will write 1 into
+ a storage buffer. The second write will write 2 into the same buffer in the same pass. Expected
+ data in buffer is either 1 or 2. It may use bundle in each draw.`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('firstDrawUseBundle', [false, true])
+ .combine('secondDrawUseBundle', [false, true])
+ )
+ .fn(async t => {
+ const { firstDrawUseBundle, secondDrawUseBundle } = t.params;
+ const buffer = await t.createBufferWithValue(0);
+ const encoder = t.device.createCommandEncoder();
+ const passEncoder = t.beginSimpleRenderPass(encoder);
+
+ const useBundle = [firstDrawUseBundle, secondDrawUseBundle];
+ for (let i = 0; i < 2; ++i) {
+ const renderEncoder = useBundle[i]
+ ? t.device.createRenderBundleEncoder({
+ colorFormats: ['rgba8unorm'],
+ })
+ : passEncoder;
+ const pipeline = t.createStorageWriteRenderPipeline(i + 1);
+ const bindGroup = t.createBindGroup(pipeline, buffer);
+ renderEncoder.setPipeline(pipeline);
+ renderEncoder.setBindGroup(0, bindGroup);
+ renderEncoder.draw(1, 1, 0, 0);
+ if (useBundle[i])
+ passEncoder.executeBundles([(renderEncoder as GPURenderBundleEncoder).finish()]);
+ }
+
+ passEncoder.end();
+ t.device.queue.submit([encoder.finish()]);
+ t.verifyDataTwoValidValues(buffer, 1, 2);
+ });
+
+g.test('two_draws_in_the_same_render_bundle')
+ .desc(
+ `Test write-after-write operations in the same render bundle. The first write will write 1 into
+ a storage buffer. The second write will write 2 into the same buffer in the same pass. Expected
+ data in buffer is either 1 or 2.`
+ )
+ .fn(async t => {
+ const buffer = await t.createBufferWithValue(0);
+ const encoder = t.device.createCommandEncoder();
+ const passEncoder = t.beginSimpleRenderPass(encoder);
+ const renderEncoder = t.device.createRenderBundleEncoder({
+ colorFormats: ['rgba8unorm'],
+ });
+
+ for (let i = 0; i < 2; ++i) {
+ const pipeline = t.createStorageWriteRenderPipeline(i + 1);
+ const bindGroup = t.createBindGroup(pipeline, buffer);
+ renderEncoder.setPipeline(pipeline);
+ renderEncoder.setBindGroup(0, bindGroup);
+ renderEncoder.draw(1, 1, 0, 0);
+ }
+
+ passEncoder.executeBundles([renderEncoder.finish()]);
+ passEncoder.end();
+ t.device.queue.submit([encoder.finish()]);
+ t.verifyDataTwoValidValues(buffer, 1, 2);
+ });
+
+g.test('two_dispatches_in_the_same_compute_pass')
+ .desc(
+ `Test write-after-write operations in the same compute pass. The first write will write 1 into
+ a storage buffer. The second write will write 2 into the same buffer in the same pass. Expected
+ data in buffer is 2.`
+ )
+ .fn(async t => {
+ const buffer = await t.createBufferWithValue(0);
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+
+ for (let i = 0; i < 2; ++i) {
+ const pipeline = t.createStorageWriteComputePipeline(i + 1);
+ const bindGroup = t.createBindGroup(pipeline, buffer);
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ }
+
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ t.verifyData(buffer, 2);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/operation_context_helper.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/operation_context_helper.ts
new file mode 100644
index 0000000000..8a397de563
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/operation_context_helper.ts
@@ -0,0 +1,330 @@
+import { assert, unreachable } from '../../../../common/util/util.js';
+import { EncodableTextureFormat } from '../../../format_info.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+/**
+ * Boundary between the first operation, and the second operation.
+ */
+export const kOperationBoundaries = [
+ 'queue-op', // Operations are performed in different queue operations (submit, writeTexture).
+ 'command-buffer', // Operations are in different command buffers.
+ 'pass', // Operations are in different passes.
+ 'execute-bundles', // Operations are in different executeBundles(...) calls
+ 'render-bundle', // Operations are in different render bundles.
+ 'dispatch', // Operations are in different dispatches.
+ 'draw', // Operations are in different draws.
+] as const;
+export type OperationBoundary = (typeof kOperationBoundaries)[number];
+
+/**
+ * Context a particular operation is permitted in.
+ * These contexts should be sorted such that the first is the most top-level
+ * context, and the last is most nested (inside a render bundle, in a render pass, ...).
+ */
+export const kOperationContexts = [
+ 'queue', // Operation occurs on the GPUQueue object
+ 'command-encoder', // Operation may be encoded in a GPUCommandEncoder.
+ 'compute-pass-encoder', // Operation may be encoded in a GPUComputePassEncoder.
+ 'render-pass-encoder', // Operation may be encoded in a GPURenderPassEncoder.
+ 'render-bundle-encoder', // Operation may be encoded in a GPURenderBundleEncoder.
+] as const;
+export type OperationContext = (typeof kOperationContexts)[number];
+
+interface BoundaryInfo {
+ readonly contexts: [OperationContext, OperationContext][];
+ // Add fields as needed
+}
+
+function combineContexts(
+ as: readonly OperationContext[],
+ bs: readonly OperationContext[]
+): [OperationContext, OperationContext][] {
+ const result: [OperationContext, OperationContext][] = [];
+ for (const a of as) {
+ for (const b of bs) {
+ result.push([a, b]);
+ }
+ }
+ return result;
+}
+
+const queueContexts = combineContexts(kOperationContexts, kOperationContexts);
+const commandBufferContexts = combineContexts(
+ kOperationContexts.filter(c => c !== 'queue'),
+ kOperationContexts.filter(c => c !== 'queue')
+);
+
+/**
+ * Mapping of OperationBoundary => to a set of OperationContext pairs.
+ * The boundary is capable of separating operations in those two contexts.
+ */
+export const kBoundaryInfo: {
+ readonly [k in OperationBoundary]: BoundaryInfo;
+} = {
+ 'queue-op': {
+ contexts: queueContexts,
+ },
+ 'command-buffer': {
+ contexts: commandBufferContexts,
+ },
+ pass: {
+ contexts: [
+ ['compute-pass-encoder', 'compute-pass-encoder'],
+ ['compute-pass-encoder', 'render-pass-encoder'],
+ ['render-pass-encoder', 'compute-pass-encoder'],
+ ['render-pass-encoder', 'render-pass-encoder'],
+ ['render-bundle-encoder', 'render-pass-encoder'],
+ ['render-pass-encoder', 'render-bundle-encoder'],
+ ['render-bundle-encoder', 'render-bundle-encoder'],
+ ],
+ },
+ 'execute-bundles': {
+ contexts: [['render-bundle-encoder', 'render-bundle-encoder']],
+ },
+ 'render-bundle': {
+ contexts: [
+ ['render-bundle-encoder', 'render-pass-encoder'],
+ ['render-pass-encoder', 'render-bundle-encoder'],
+ ['render-bundle-encoder', 'render-bundle-encoder'],
+ ],
+ },
+ dispatch: {
+ contexts: [['compute-pass-encoder', 'compute-pass-encoder']],
+ },
+ draw: {
+ contexts: [
+ ['render-pass-encoder', 'render-pass-encoder'],
+ ['render-bundle-encoder', 'render-pass-encoder'],
+ ['render-pass-encoder', 'render-bundle-encoder'],
+ ],
+ },
+};
+
+export class OperationContextHelper {
+ // We start at the queue context which is top-level.
+ protected currentContext: OperationContext = 'queue';
+
+ // Set based on the current context.
+ queue: GPUQueue;
+ commandEncoder?: GPUCommandEncoder;
+ computePassEncoder?: GPUComputePassEncoder;
+ renderPassEncoder?: GPURenderPassEncoder;
+ renderBundleEncoder?: GPURenderBundleEncoder;
+
+ protected t: GPUTest;
+ protected device: GPUDevice;
+
+ protected commandBuffers: GPUCommandBuffer[] = [];
+ protected renderBundles: GPURenderBundle[] = [];
+
+ public readonly kTextureSize = [4, 4] as const;
+ public readonly kTextureFormat: EncodableTextureFormat = 'rgba8unorm';
+
+ constructor(t: GPUTest) {
+ this.t = t;
+ this.device = t.device;
+ this.queue = t.device.queue;
+ }
+
+ // Ensure that all encoded commands are finished and submitted.
+ ensureSubmit() {
+ this.ensureContext('queue');
+ this.flushCommandBuffers();
+ }
+
+ private popContext(): GPURenderBundle | GPUCommandBuffer | null {
+ switch (this.currentContext) {
+ case 'queue':
+ unreachable();
+ break;
+ case 'command-encoder': {
+ assert(this.commandEncoder !== undefined);
+ const commandBuffer = this.commandEncoder.finish();
+ this.commandEncoder = undefined;
+ this.currentContext = 'queue';
+ return commandBuffer;
+ }
+ case 'compute-pass-encoder':
+ assert(this.computePassEncoder !== undefined);
+ this.computePassEncoder.end();
+ this.computePassEncoder = undefined;
+ this.currentContext = 'command-encoder';
+ break;
+ case 'render-pass-encoder':
+ assert(this.renderPassEncoder !== undefined);
+ this.renderPassEncoder.end();
+ this.renderPassEncoder = undefined;
+ this.currentContext = 'command-encoder';
+ break;
+ case 'render-bundle-encoder': {
+ assert(this.renderBundleEncoder !== undefined);
+ const renderBundle = this.renderBundleEncoder.finish();
+ this.renderBundleEncoder = undefined;
+ this.currentContext = 'render-pass-encoder';
+ return renderBundle;
+ }
+ }
+ return null;
+ }
+
+ private makeDummyAttachment(): GPURenderPassColorAttachment {
+ const texture = this.t.trackForCleanup(
+ this.device.createTexture({
+ format: this.kTextureFormat,
+ size: this.kTextureSize,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ })
+ );
+ return {
+ view: texture.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ };
+ }
+
+ ensureContext(context: OperationContext) {
+ // Find the common ancestor. So we can transition from currentContext -> context.
+ const ancestorContext =
+ kOperationContexts[
+ Math.min(
+ kOperationContexts.indexOf(context),
+ kOperationContexts.indexOf(this.currentContext)
+ )
+ ];
+
+ // Pop the context until we're at the common ancestor.
+ while (this.currentContext !== ancestorContext) {
+ // About to pop the render pass encoder. Execute any outstanding render bundles.
+ if (this.currentContext === 'render-pass-encoder') {
+ this.flushRenderBundles();
+ }
+
+ const result = this.popContext();
+ if (result) {
+ if (result instanceof GPURenderBundle) {
+ this.renderBundles.push(result);
+ } else {
+ this.commandBuffers.push(result);
+ }
+ }
+ }
+
+ if (this.currentContext === context) {
+ return;
+ }
+
+ switch (context) {
+ case 'queue':
+ unreachable();
+ break;
+ case 'command-encoder':
+ assert(this.currentContext === 'queue');
+ this.commandEncoder = this.device.createCommandEncoder();
+ break;
+ case 'compute-pass-encoder':
+ switch (this.currentContext) {
+ case 'queue':
+ this.commandEncoder = this.device.createCommandEncoder();
+ // fallthrough
+ case 'command-encoder':
+ assert(this.commandEncoder !== undefined);
+ this.computePassEncoder = this.commandEncoder.beginComputePass();
+ break;
+ case 'compute-pass-encoder':
+ case 'render-bundle-encoder':
+ case 'render-pass-encoder':
+ unreachable();
+ }
+ break;
+ case 'render-pass-encoder':
+ switch (this.currentContext) {
+ case 'queue':
+ this.commandEncoder = this.device.createCommandEncoder();
+ // fallthrough
+ case 'command-encoder':
+ assert(this.commandEncoder !== undefined);
+ this.renderPassEncoder = this.commandEncoder.beginRenderPass({
+ colorAttachments: [this.makeDummyAttachment()],
+ });
+ break;
+ case 'render-pass-encoder':
+ case 'render-bundle-encoder':
+ case 'compute-pass-encoder':
+ unreachable();
+ }
+ break;
+ case 'render-bundle-encoder':
+ switch (this.currentContext) {
+ case 'queue':
+ this.commandEncoder = this.device.createCommandEncoder();
+ // fallthrough
+ case 'command-encoder':
+ assert(this.commandEncoder !== undefined);
+ this.renderPassEncoder = this.commandEncoder.beginRenderPass({
+ colorAttachments: [this.makeDummyAttachment()],
+ });
+ // fallthrough
+ case 'render-pass-encoder':
+ this.renderBundleEncoder = this.device.createRenderBundleEncoder({
+ colorFormats: [this.kTextureFormat],
+ });
+ break;
+ case 'render-bundle-encoder':
+ case 'compute-pass-encoder':
+ unreachable();
+ }
+ break;
+ }
+ this.currentContext = context;
+ }
+
+ private flushRenderBundles() {
+ assert(this.renderPassEncoder !== undefined);
+ if (this.renderBundles.length) {
+ this.renderPassEncoder.executeBundles(this.renderBundles);
+ this.renderBundles = [];
+ }
+ }
+
+ private flushCommandBuffers() {
+ if (this.commandBuffers.length) {
+ this.queue.submit(this.commandBuffers);
+ this.commandBuffers = [];
+ }
+ }
+
+ ensureBoundary(boundary: OperationBoundary) {
+ switch (boundary) {
+ case 'command-buffer':
+ this.ensureContext('queue');
+ break;
+ case 'queue-op':
+ this.ensureContext('queue');
+ // Submit any GPUCommandBuffers so the next one is in a separate submit.
+ this.flushCommandBuffers();
+ break;
+ case 'dispatch':
+ // Nothing to do to separate dispatches.
+ assert(this.currentContext === 'compute-pass-encoder');
+ break;
+ case 'draw':
+ // Nothing to do to separate draws.
+ assert(
+ this.currentContext === 'render-pass-encoder' ||
+ this.currentContext === 'render-bundle-encoder'
+ );
+ break;
+ case 'pass':
+ this.ensureContext('command-encoder');
+ break;
+ case 'render-bundle':
+ this.ensureContext('render-pass-encoder');
+ break;
+ case 'execute-bundles':
+ this.ensureContext('render-pass-encoder');
+ // Execute any GPURenderBundles so the next one is in a separate executeBundles.
+ this.flushRenderBundles();
+ break;
+ }
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/texture/same_subresource.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/texture/same_subresource.spec.ts
new file mode 100644
index 0000000000..3eed7a04ee
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/texture/same_subresource.spec.ts
@@ -0,0 +1,709 @@
+export const description = `
+Memory Synchronization Tests for Texture: read before write, read after write, and write after write to the same subresource.
+
+- TODO: Test synchronization between multiple queues.
+- TODO: Test depth/stencil attachments.
+- TODO: Use non-solid-color texture contents [2]
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { assert, memcpy, unreachable } from '../../../../../common/util/util.js';
+import { EncodableTextureFormat } from '../../../../format_info.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { align } from '../../../../util/math.js';
+import { getTextureCopyLayout } from '../../../../util/texture/layout.js';
+import {
+ kTexelRepresentationInfo,
+ PerTexelComponent,
+} from '../../../../util/texture/texel_data.js';
+import {
+ kOperationBoundaries,
+ OperationContext,
+ kBoundaryInfo,
+ OperationContextHelper,
+} from '../operation_context_helper.js';
+
+import {
+ kAllReadOps,
+ kAllWriteOps,
+ checkOpsValidForContext,
+ Op,
+ kOpInfo,
+} from './texture_sync_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+const fullscreenQuadWGSL = `
+ struct VertexOutput {
+ @builtin(position) Position : vec4<f32>
+ };
+
+ @vertex fn vert_main(@builtin(vertex_index) VertexIndex : u32) -> VertexOutput {
+ var pos = array<vec2<f32>, 6>(
+ vec2<f32>( 1.0, 1.0),
+ vec2<f32>( 1.0, -1.0),
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>( 1.0, 1.0),
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>(-1.0, 1.0));
+
+ var output : VertexOutput;
+ output.Position = vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+ return output;
+ }
+`;
+
+class TextureSyncTestHelper extends OperationContextHelper {
+ private texture: GPUTexture;
+
+ public override readonly kTextureSize = [4, 4] as const;
+ public override readonly kTextureFormat: EncodableTextureFormat = 'rgba8unorm';
+
+ constructor(
+ t: GPUTest,
+ textureCreationParams: {
+ usage: GPUTextureUsageFlags;
+ }
+ ) {
+ super(t);
+ this.texture = t.trackForCleanup(
+ t.device.createTexture({
+ size: this.kTextureSize,
+ format: this.kTextureFormat,
+ ...textureCreationParams,
+ })
+ );
+ }
+
+ /**
+ * Perform a read operation on the test texture.
+ * @return GPUTexture copy containing the contents.
+ */
+ performReadOp({ op, in: context }: { op: Op; in: OperationContext }): GPUTexture {
+ this.ensureContext(context);
+ switch (op) {
+ case 't2t-copy': {
+ const texture = this.t.trackForCleanup(
+ this.device.createTexture({
+ size: this.kTextureSize,
+ format: this.kTextureFormat,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ })
+ );
+
+ assert(this.commandEncoder !== undefined);
+ this.commandEncoder.copyTextureToTexture(
+ {
+ texture: this.texture,
+ },
+ { texture },
+ this.kTextureSize
+ );
+ return texture;
+ }
+ case 't2b-copy': {
+ const { byteLength, bytesPerRow } = getTextureCopyLayout(this.kTextureFormat, '2d', [
+ ...this.kTextureSize,
+ 1,
+ ]);
+ const buffer = this.t.trackForCleanup(
+ this.device.createBuffer({
+ size: byteLength,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ })
+ );
+
+ const texture = this.t.trackForCleanup(
+ this.device.createTexture({
+ size: this.kTextureSize,
+ format: this.kTextureFormat,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ })
+ );
+
+ assert(this.commandEncoder !== undefined);
+ this.commandEncoder.copyTextureToBuffer(
+ {
+ texture: this.texture,
+ },
+ { buffer, bytesPerRow },
+ this.kTextureSize
+ );
+ this.commandEncoder.copyBufferToTexture(
+ { buffer, bytesPerRow },
+ { texture },
+ this.kTextureSize
+ );
+ return texture;
+ }
+ case 'sample': {
+ const texture = this.t.trackForCleanup(
+ this.device.createTexture({
+ size: this.kTextureSize,
+ format: this.kTextureFormat,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.STORAGE_BINDING,
+ })
+ );
+
+ const bindGroupLayout = this.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,
+ texture: {
+ sampleType: 'unfilterable-float',
+ },
+ },
+ {
+ binding: 1,
+ visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,
+ storageTexture: {
+ access: 'write-only',
+ format: this.kTextureFormat,
+ },
+ },
+ ],
+ });
+
+ const bindGroup = this.device.createBindGroup({
+ layout: bindGroupLayout,
+ entries: [
+ {
+ binding: 0,
+ resource: this.texture.createView(),
+ },
+ {
+ binding: 1,
+ resource: texture.createView(),
+ },
+ ],
+ });
+
+ switch (context) {
+ case 'render-pass-encoder':
+ case 'render-bundle-encoder': {
+ const module = this.device.createShaderModule({
+ code: `${fullscreenQuadWGSL}
+
+ @group(0) @binding(0) var inputTex: texture_2d<f32>;
+ @group(0) @binding(1) var outputTex: texture_storage_2d<rgba8unorm, write>;
+
+ @fragment fn frag_main(@builtin(position) fragCoord: vec4<f32>) -> @location(0) vec4<f32> {
+ let coord = vec2<i32>(fragCoord.xy);
+ textureStore(outputTex, coord, textureLoad(inputTex, coord, 0));
+ return vec4<f32>();
+ }
+ `,
+ });
+ const renderPipeline = this.device.createRenderPipeline({
+ layout: this.device.createPipelineLayout({
+ bindGroupLayouts: [bindGroupLayout],
+ }),
+ vertex: {
+ module,
+ entryPoint: 'vert_main',
+ },
+ fragment: {
+ module,
+ entryPoint: 'frag_main',
+
+ // Unused attachment since we can't use textureStore in the vertex shader.
+ // Set writeMask to zero.
+ targets: [
+ {
+ format: this.kTextureFormat,
+ writeMask: 0,
+ },
+ ],
+ },
+ });
+
+ switch (context) {
+ case 'render-bundle-encoder':
+ assert(this.renderBundleEncoder !== undefined);
+ this.renderBundleEncoder.setPipeline(renderPipeline);
+ this.renderBundleEncoder.setBindGroup(0, bindGroup);
+ this.renderBundleEncoder.draw(6);
+ break;
+ case 'render-pass-encoder':
+ assert(this.renderPassEncoder !== undefined);
+ this.renderPassEncoder.setPipeline(renderPipeline);
+ this.renderPassEncoder.setBindGroup(0, bindGroup);
+ this.renderPassEncoder.draw(6);
+ break;
+ }
+ break;
+ }
+ case 'compute-pass-encoder': {
+ const module = this.device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var inputTex: texture_2d<f32>;
+ @group(0) @binding(1) var outputTex: texture_storage_2d<rgba8unorm, write>;
+
+ @compute @workgroup_size(8, 8)
+ fn main(@builtin(global_invocation_id) gid : vec3<u32>) {
+ if (any(gid.xy >= vec2<u32>(textureDimensions(inputTex)))) {
+ return;
+ }
+ let coord = vec2<i32>(gid.xy);
+ textureStore(outputTex, coord, textureLoad(inputTex, coord, 0));
+ }
+ `,
+ });
+ const computePipeline = this.device.createComputePipeline({
+ layout: this.device.createPipelineLayout({
+ bindGroupLayouts: [bindGroupLayout],
+ }),
+ compute: {
+ module,
+ entryPoint: 'main',
+ },
+ });
+
+ assert(this.computePassEncoder !== undefined);
+ this.computePassEncoder.setPipeline(computePipeline);
+ this.computePassEncoder.setBindGroup(0, bindGroup);
+ this.computePassEncoder.dispatchWorkgroups(
+ Math.ceil(this.kTextureSize[0] / 8),
+ Math.ceil(this.kTextureSize[1] / 8)
+ );
+ break;
+ }
+ default:
+ unreachable();
+ }
+
+ return texture;
+ }
+ case 'b2t-copy':
+ case 'attachment-resolve':
+ case 'attachment-store':
+ unreachable();
+ }
+ unreachable();
+ }
+
+ performWriteOp(
+ { op, in: context }: { op: Op; in: OperationContext },
+ data: PerTexelComponent<number>
+ ) {
+ this.ensureContext(context);
+ switch (op) {
+ case 'attachment-store': {
+ assert(this.commandEncoder !== undefined);
+ this.renderPassEncoder = this.commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: this.texture.createView(),
+ // [2] Use non-solid-color texture values
+ clearValue: [data.R ?? 0, data.G ?? 0, data.B ?? 0, data.A ?? 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ this.currentContext = 'render-pass-encoder';
+ break;
+ }
+ case 'write-texture': {
+ // [2] Use non-solid-color texture values
+ const rep = kTexelRepresentationInfo[this.kTextureFormat];
+ const texelData = rep.pack(rep.encode(data));
+ const numTexels = this.kTextureSize[0] * this.kTextureSize[1];
+ const fullTexelData = new ArrayBuffer(texelData.byteLength * numTexels);
+ for (let i = 0; i < numTexels; ++i) {
+ memcpy({ src: texelData }, { dst: fullTexelData, start: i * texelData.byteLength });
+ }
+
+ this.queue.writeTexture(
+ { texture: this.texture },
+ fullTexelData,
+ {
+ bytesPerRow: texelData.byteLength * this.kTextureSize[0],
+ },
+ this.kTextureSize
+ );
+ break;
+ }
+ case 't2t-copy': {
+ const texture = this.device.createTexture({
+ size: this.kTextureSize,
+ format: this.kTextureFormat,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ // [2] Use non-solid-color texture values
+ const rep = kTexelRepresentationInfo[this.kTextureFormat];
+ const texelData = rep.pack(rep.encode(data));
+ const numTexels = this.kTextureSize[0] * this.kTextureSize[1];
+ const fullTexelData = new ArrayBuffer(texelData.byteLength * numTexels);
+ for (let i = 0; i < numTexels; ++i) {
+ memcpy({ src: texelData }, { dst: fullTexelData, start: i * texelData.byteLength });
+ }
+
+ this.queue.writeTexture(
+ { texture },
+ fullTexelData,
+ {
+ bytesPerRow: texelData.byteLength * this.kTextureSize[0],
+ },
+ this.kTextureSize
+ );
+
+ assert(this.commandEncoder !== undefined);
+ this.commandEncoder.copyTextureToTexture(
+ { texture },
+ { texture: this.texture },
+ this.kTextureSize
+ );
+ break;
+ }
+ case 'b2t-copy': {
+ // [2] Use non-solid-color texture values
+ const rep = kTexelRepresentationInfo[this.kTextureFormat];
+ const texelData = rep.pack(rep.encode(data));
+ const bytesPerRow = align(texelData.byteLength, 256);
+ const fullTexelData = new ArrayBuffer(bytesPerRow * this.kTextureSize[1]);
+ for (let i = 0; i < this.kTextureSize[1]; ++i) {
+ for (let j = 0; j < this.kTextureSize[0]; ++j) {
+ memcpy(
+ { src: texelData },
+ {
+ dst: fullTexelData,
+ start: i * bytesPerRow + j * texelData.byteLength,
+ }
+ );
+ }
+ }
+
+ const buffer = this.t.trackForCleanup(
+ this.device.createBuffer({
+ size: fullTexelData.byteLength,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ })
+ );
+
+ this.queue.writeBuffer(buffer, 0, fullTexelData);
+
+ assert(this.commandEncoder !== undefined);
+ this.commandEncoder.copyBufferToTexture(
+ { buffer, bytesPerRow },
+ { texture: this.texture },
+ this.kTextureSize
+ );
+ break;
+ }
+ case 'attachment-resolve': {
+ assert(this.commandEncoder !== undefined);
+ const renderTarget = this.t.trackForCleanup(
+ this.device.createTexture({
+ format: this.kTextureFormat,
+ size: this.kTextureSize,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ sampleCount: 4,
+ })
+ );
+ this.renderPassEncoder = this.commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ resolveTarget: this.texture.createView(),
+ // [2] Use non-solid-color texture values
+ clearValue: [data.R ?? 0, data.G ?? 0, data.B ?? 0, data.A ?? 0],
+ loadOp: 'clear',
+ storeOp: 'discard',
+ },
+ ],
+ });
+ this.currentContext = 'render-pass-encoder';
+ break;
+ }
+ case 'storage': {
+ const bindGroupLayout = this.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT | GPUShaderStage.COMPUTE,
+ storageTexture: {
+ access: 'write-only',
+ format: this.kTextureFormat,
+ },
+ },
+ ],
+ });
+
+ const bindGroup = this.device.createBindGroup({
+ layout: bindGroupLayout,
+ entries: [
+ {
+ binding: 0,
+ resource: this.texture.createView(),
+ },
+ ],
+ });
+
+ // [2] Use non-solid-color texture values
+ const storedValue = `vec4<f32>(${[data.R ?? 0, data.G ?? 0, data.B ?? 0, data.A ?? 0]
+ .map(x => x.toFixed(5))
+ .join(', ')})`;
+
+ switch (context) {
+ case 'render-pass-encoder':
+ case 'render-bundle-encoder': {
+ const module = this.device.createShaderModule({
+ code: `${fullscreenQuadWGSL}
+
+ @group(0) @binding(0) var outputTex: texture_storage_2d<rgba8unorm, write>;
+
+ @fragment fn frag_main(@builtin(position) fragCoord: vec4<f32>) -> @location(0) vec4<f32> {
+ textureStore(outputTex, vec2<i32>(fragCoord.xy), ${storedValue});
+ return vec4<f32>();
+ }
+ `,
+ });
+ const renderPipeline = this.device.createRenderPipeline({
+ layout: this.device.createPipelineLayout({
+ bindGroupLayouts: [bindGroupLayout],
+ }),
+ vertex: {
+ module,
+ entryPoint: 'vert_main',
+ },
+ fragment: {
+ module,
+ entryPoint: 'frag_main',
+
+ // Unused attachment since we can't use textureStore in the vertex shader.
+ // Set writeMask to zero.
+ targets: [
+ {
+ format: this.kTextureFormat,
+ writeMask: 0,
+ },
+ ],
+ },
+ });
+
+ switch (context) {
+ case 'render-bundle-encoder':
+ assert(this.renderBundleEncoder !== undefined);
+ this.renderBundleEncoder.setPipeline(renderPipeline);
+ this.renderBundleEncoder.setBindGroup(0, bindGroup);
+ this.renderBundleEncoder.draw(6);
+ break;
+ case 'render-pass-encoder':
+ assert(this.renderPassEncoder !== undefined);
+ this.renderPassEncoder.setPipeline(renderPipeline);
+ this.renderPassEncoder.setBindGroup(0, bindGroup);
+ this.renderPassEncoder.draw(6);
+ break;
+ }
+ break;
+ }
+ case 'compute-pass-encoder': {
+ const module = this.device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var outputTex: texture_storage_2d<rgba8unorm, write>;
+
+ @compute @workgroup_size(8, 8)
+ fn main(@builtin(global_invocation_id) gid : vec3<u32>) {
+ if (any(gid.xy >= vec2<u32>(textureDimensions(outputTex)))) {
+ return;
+ }
+ let coord = vec2<i32>(gid.xy);
+ textureStore(outputTex, coord, ${storedValue});
+ }
+ `,
+ });
+ const computePipeline = this.device.createComputePipeline({
+ layout: this.device.createPipelineLayout({
+ bindGroupLayouts: [bindGroupLayout],
+ }),
+ compute: {
+ module,
+ entryPoint: 'main',
+ },
+ });
+
+ assert(this.computePassEncoder !== undefined);
+ this.computePassEncoder.setPipeline(computePipeline);
+ this.computePassEncoder.setBindGroup(0, bindGroup);
+ this.computePassEncoder.dispatchWorkgroups(
+ Math.ceil(this.kTextureSize[0] / 8),
+ Math.ceil(this.kTextureSize[1] / 8)
+ );
+ break;
+ }
+ default:
+ unreachable();
+ }
+ break;
+ }
+ case 't2b-copy':
+ case 'sample':
+ unreachable();
+ }
+ }
+}
+
+g.test('rw')
+ .desc(
+ `
+ Perform a 'read' operations on a texture subresource, followed by a 'write' operation.
+ Operations are separated by a 'boundary' (pass, encoder, queue-op, etc.).
+ Test that the results are synchronized.
+ The read should not see the contents written by the subsequent write.`
+ )
+ .params(u =>
+ u
+ .combine('boundary', kOperationBoundaries)
+ .expand('_context', p => kBoundaryInfo[p.boundary].contexts)
+ .expandWithParams(function* ({ _context }) {
+ for (const read of kAllReadOps) {
+ for (const write of kAllWriteOps) {
+ if (checkOpsValidForContext([read, write], _context)) {
+ yield {
+ read: { op: read, in: _context[0] },
+ write: { op: write, in: _context[1] },
+ };
+ }
+ }
+ }
+ })
+ )
+ .fn(t => {
+ const helper = new TextureSyncTestHelper(t, {
+ usage:
+ GPUTextureUsage.COPY_DST |
+ kOpInfo[t.params.read.op].readUsage |
+ kOpInfo[t.params.write.op].writeUsage,
+ });
+ // [2] Use non-solid-color texture value.
+ const texelValue1 = { R: 0, G: 1, B: 0, A: 1 } as const;
+ const texelValue2 = { R: 1, G: 0, B: 0, A: 1 } as const;
+
+ // Initialize the texture with something.
+ helper.performWriteOp({ op: 'write-texture', in: 'queue' }, texelValue1);
+ const readbackTexture = helper.performReadOp(t.params.read);
+ helper.ensureBoundary(t.params.boundary);
+ helper.performWriteOp(t.params.write, texelValue2);
+ helper.ensureSubmit();
+
+ // Contents should be the first value written, not the second.
+ t.expectSingleColor(readbackTexture, helper.kTextureFormat, {
+ size: [...helper.kTextureSize, 1],
+ exp: texelValue1,
+ });
+ });
+
+g.test('wr')
+ .desc(
+ `
+ Perform a 'write' operation on a texture subresource, followed by a 'read' operation.
+ Operations are separated by a 'boundary' (pass, encoder, queue-op, etc.).
+ Test that the results are synchronized.
+ The read should see exactly the contents written by the previous write.
+
+ - TODO: Use non-solid-color texture contents [2]`
+ )
+ .params(u =>
+ u
+ .combine('boundary', kOperationBoundaries)
+ .expand('_context', p => kBoundaryInfo[p.boundary].contexts)
+ .expandWithParams(function* ({ _context }) {
+ for (const read of kAllReadOps) {
+ for (const write of kAllWriteOps) {
+ if (checkOpsValidForContext([write, read], _context)) {
+ yield {
+ write: { op: write, in: _context[0] },
+ read: { op: read, in: _context[1] },
+ };
+ }
+ }
+ }
+ })
+ )
+ .fn(t => {
+ const helper = new TextureSyncTestHelper(t, {
+ usage: kOpInfo[t.params.read.op].readUsage | kOpInfo[t.params.write.op].writeUsage,
+ });
+ // [2] Use non-solid-color texture value.
+ const texelValue = { R: 0, G: 1, B: 0, A: 1 } as const;
+
+ helper.performWriteOp(t.params.write, texelValue);
+ helper.ensureBoundary(t.params.boundary);
+ const readbackTexture = helper.performReadOp(t.params.read);
+ helper.ensureSubmit();
+
+ // Contents should be exactly the values written.
+ t.expectSingleColor(readbackTexture, helper.kTextureFormat, {
+ size: [...helper.kTextureSize, 1],
+ exp: texelValue,
+ });
+ });
+
+g.test('ww')
+ .desc(
+ `
+ Perform a 'first' write operation on a texture subresource, followed by a 'second' write operation.
+ Operations are separated by a 'boundary' (pass, encoder, queue-op, etc.).
+ Test that the results are synchronized.
+ The second write should overwrite the contents of the first.`
+ )
+ .params(u =>
+ u
+ .combine('boundary', kOperationBoundaries)
+ .expand('_context', p => kBoundaryInfo[p.boundary].contexts)
+ .expandWithParams(function* ({ _context }) {
+ for (const first of kAllWriteOps) {
+ for (const second of kAllWriteOps) {
+ if (checkOpsValidForContext([first, second], _context)) {
+ yield {
+ first: { op: first, in: _context[0] },
+ second: { op: second, in: _context[1] },
+ };
+ }
+ }
+ }
+ })
+ )
+ .fn(t => {
+ const helper = new TextureSyncTestHelper(t, {
+ usage:
+ GPUTextureUsage.COPY_SRC |
+ kOpInfo[t.params.first.op].writeUsage |
+ kOpInfo[t.params.second.op].writeUsage,
+ });
+ // [2] Use non-solid-color texture value.
+ const texelValue1 = { R: 1, G: 0, B: 0, A: 1 } as const;
+ const texelValue2 = { R: 0, G: 1, B: 0, A: 1 } as const;
+
+ helper.performWriteOp(t.params.first, texelValue1);
+ helper.ensureBoundary(t.params.boundary);
+ helper.performWriteOp(t.params.second, texelValue2);
+ helper.ensureSubmit();
+
+ // Read back the contents so we can test the result.
+ const readbackTexture = helper.performReadOp({ op: 't2t-copy', in: 'command-encoder' });
+ helper.ensureSubmit();
+
+ // Contents should be the second value written.
+ t.expectSingleColor(readbackTexture, helper.kTextureFormat, {
+ size: [...helper.kTextureSize, 1],
+ exp: texelValue2,
+ });
+ });
+
+g.test('rw,single_pass,load_store')
+ .desc(
+ `
+ TODO: Test memory synchronization when loading from a texture subresource in a single pass and storing to it.`
+ )
+ .unimplemented();
+
+g.test('rw,single_pass,load_resolve')
+ .desc(
+ `
+ TODO: Test memory synchronization when loading from a texture subresource in a single pass and resolving to it.`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/texture/texture_sync_test.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/texture/texture_sync_test.ts
new file mode 100644
index 0000000000..401fc6ef46
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/memory_sync/texture/texture_sync_test.ts
@@ -0,0 +1,124 @@
+import { GPUConst } from '../../../../constants.js';
+import { OperationContext } from '../operation_context_helper.js';
+
+export const kAllWriteOps = [
+ 'write-texture',
+ 'b2t-copy',
+ 't2t-copy',
+ 'storage',
+ 'attachment-store',
+ 'attachment-resolve',
+] as const;
+export type WriteOp = (typeof kAllWriteOps)[number];
+
+export const kAllReadOps = ['t2b-copy', 't2t-copy', 'sample'] as const;
+export type ReadOp = (typeof kAllReadOps)[number];
+
+export type Op = ReadOp | WriteOp;
+
+interface OpInfo {
+ readonly contexts: OperationContext[];
+ readonly readUsage: GPUTextureUsageFlags;
+ readonly writeUsage: GPUTextureUsageFlags;
+ // Add fields as needed
+}
+
+/**
+ * Mapping of Op to the OperationContext(s) it is valid in
+ */
+export const kOpInfo: {
+ readonly [k in Op]: OpInfo;
+} = {
+ 'write-texture': {
+ contexts: ['queue'],
+ readUsage: 0,
+ writeUsage: GPUConst.TextureUsage.COPY_DST,
+ },
+ 'b2t-copy': {
+ contexts: ['command-encoder'],
+ readUsage: 0,
+ writeUsage: GPUConst.TextureUsage.COPY_DST,
+ },
+ 't2t-copy': {
+ contexts: ['command-encoder'],
+ readUsage: GPUConst.TextureUsage.COPY_SRC,
+ writeUsage: GPUConst.TextureUsage.COPY_DST,
+ },
+ 't2b-copy': {
+ contexts: ['command-encoder'],
+ readUsage: GPUConst.TextureUsage.COPY_SRC,
+ writeUsage: 0,
+ },
+ storage: {
+ contexts: ['compute-pass-encoder', 'render-pass-encoder', 'render-bundle-encoder'],
+ readUsage: 0,
+ writeUsage: GPUConst.TextureUsage.STORAGE,
+ },
+ sample: {
+ contexts: ['compute-pass-encoder', 'render-pass-encoder', 'render-bundle-encoder'],
+ readUsage: GPUConst.TextureUsage.SAMPLED,
+ writeUsage: 0,
+ },
+ 'attachment-store': {
+ contexts: ['command-encoder'],
+ readUsage: 0,
+ writeUsage: GPUConst.TextureUsage.RENDER_ATTACHMENT,
+ },
+ 'attachment-resolve': {
+ contexts: ['command-encoder'],
+ readUsage: 0,
+ writeUsage: GPUConst.TextureUsage.RENDER_ATTACHMENT,
+ },
+};
+
+export function checkOpsValidForContext(
+ ops: [Op, Op],
+ context: [OperationContext, OperationContext]
+) {
+ const valid =
+ kOpInfo[ops[0]].contexts.includes(context[0]) && kOpInfo[ops[1]].contexts.includes(context[1]);
+ if (!valid) return false;
+
+ if (
+ context[0] === 'render-bundle-encoder' ||
+ context[0] === 'render-pass-encoder' ||
+ context[1] === 'render-bundle-encoder' ||
+ context[1] === 'render-pass-encoder'
+ ) {
+ // In a render pass, it is invalid to use a resource as both writable and another usage.
+ // Also, for storage+storage usage, the application is opting into racy behavior.
+ // The storage+storage case is also skipped as the results cannot be reliably tested.
+ const checkImpl = (op1: Op, op2: Op) => {
+ switch (op1) {
+ case 'attachment-resolve':
+ case 'attachment-store':
+ case 'storage':
+ switch (op2) {
+ case 'attachment-resolve':
+ case 'attachment-store':
+ case 'storage':
+ case 'sample':
+ // Write+other, or racy.
+ return false;
+ case 'b2t-copy':
+ case 't2b-copy':
+ case 't2t-copy':
+ case 'write-texture':
+ // These don't occur in a render pass.
+ return true;
+ }
+ break;
+ case 'b2t-copy':
+ case 'sample':
+ case 't2b-copy':
+ case 't2t-copy':
+ case 'write-texture':
+ // These are not write usages, or don't occur in a render pass.
+ break;
+ }
+ return true;
+ };
+ return checkImpl(ops[0], ops[1]) && checkImpl(ops[1], ops[0]);
+ }
+ return true;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/onSubmittedWorkDone.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/onSubmittedWorkDone.spec.ts
new file mode 100644
index 0000000000..8b0647e7ef
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/onSubmittedWorkDone.spec.ts
@@ -0,0 +1,56 @@
+export const description = `
+Tests for the behavior of GPUQueue.onSubmittedWorkDone().
+
+Note that any promise timeouts will be detected by the framework.
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { range } from '../../../common/util/util.js';
+import { GPUTest } from '../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('without_work')
+ .desc(`Await onSubmittedWorkDone once without having submitted any work.`)
+ .fn(async t => {
+ await t.queue.onSubmittedWorkDone();
+ });
+
+g.test('with_work')
+ .desc(`Await onSubmittedWorkDone once after submitting some work (writeBuffer).`)
+ .fn(async t => {
+ const buffer = t.device.createBuffer({ size: 4, usage: GPUBufferUsage.COPY_DST });
+ t.queue.writeBuffer(buffer, 0, new Uint8Array(4));
+ await t.queue.onSubmittedWorkDone();
+ });
+
+g.test('many,serial')
+ .desc(`Await 1000 onSubmittedWorkDone calls in serial.`)
+ .fn(async t => {
+ for (let i = 0; i < 1000; ++i) {
+ await t.queue.onSubmittedWorkDone();
+ }
+ });
+
+g.test('many,parallel')
+ .desc(`Await 1000 onSubmittedWorkDone calls in parallel with Promise.all().`)
+ .fn(async t => {
+ const promises = range(1000, () => t.queue.onSubmittedWorkDone());
+ await Promise.all(promises);
+ });
+
+g.test('many,parallel_order')
+ .desc(`Issue 200 onSubmittedWorkDone calls and make sure they resolve in the right order.`)
+ .fn(async t => {
+ const promises = [];
+ let lastResolved = -1;
+ for (const i of range(200, i => i)) {
+ promises.push(
+ t.queue.onSubmittedWorkDone().then(() => {
+ t.expect(i === lastResolved + 1);
+ lastResolved++;
+ })
+ );
+ }
+ await Promise.all(promises);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/pipeline/default_layout.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/pipeline/default_layout.spec.ts
new file mode 100644
index 0000000000..f303b2737f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/pipeline/default_layout.spec.ts
@@ -0,0 +1,27 @@
+export const description = `
+Tests for default pipeline layouts.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('getBindGroupLayout_js_object')
+ .desc(
+ `Test that getBindGroupLayout returns [TODO: the same or a different, needs spec] object
+each time.`
+ )
+ .unimplemented();
+
+g.test('incompatible_with_explicit')
+ .desc(`Test that default bind group layouts are never compatible with explicitly created ones.`)
+ .unimplemented();
+
+g.test('layout')
+ .desc(
+ `Test that bind group layouts of the default pipeline layout are correct by passing various
+shaders and then checking their computed bind group layouts are compatible with particular bind
+groups.`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/queue/writeBuffer.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/queue/writeBuffer.spec.ts
new file mode 100644
index 0000000000..742adb3653
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/queue/writeBuffer.spec.ts
@@ -0,0 +1,235 @@
+export const description = 'Operation tests for GPUQueue.writeBuffer()';
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { memcpy, range } from '../../../../common/util/util.js';
+import { GPUTest } from '../../../gpu_test.js';
+import { align } from '../../../util/math.js';
+
+const kTypedArrays = [
+ 'Uint8Array',
+ 'Uint16Array',
+ 'Uint32Array',
+ 'Int8Array',
+ 'Int16Array',
+ 'Int32Array',
+ 'Float32Array',
+ 'Float64Array',
+] as const;
+
+type WriteBufferSignature = {
+ bufferOffset: number;
+ data: readonly number[];
+ arrayType: (typeof kTypedArrays)[number];
+ useArrayBuffer: boolean;
+ dataOffset?: number; // In elements when useArrayBuffer === false, bytes otherwise
+ dataSize?: number; // In elements when useArrayBuffer === false, bytes otherwise
+};
+
+class F extends GPUTest {
+ calculateRequiredBufferSize(writes: WriteBufferSignature[]): number {
+ let bufferSize = 0;
+ // Calculate size of final buffer
+ for (const { bufferOffset, data, arrayType, useArrayBuffer, dataOffset, dataSize } of writes) {
+ const TypedArrayConstructor = globalThis[arrayType];
+
+ // When passing data as an ArrayBuffer, dataOffset and dataSize use byte instead of number of
+ // elements. bytesPerElement is used to convert dataOffset and dataSize from elements to bytes
+ // when useArrayBuffer === false.
+ const bytesPerElement = useArrayBuffer ? 1 : TypedArrayConstructor.BYTES_PER_ELEMENT;
+
+ // Calculate the number of bytes written to the buffer. data is always an array of elements.
+ let bytesWritten =
+ data.length * TypedArrayConstructor.BYTES_PER_ELEMENT - (dataOffset || 0) * bytesPerElement;
+
+ if (dataSize) {
+ // When defined, dataSize clamps the number of bytes written
+ bytesWritten = Math.min(bytesWritten, dataSize * bytesPerElement);
+ }
+
+ // The minimum buffer size required for the write to succeed is the number of bytes written +
+ // the bufferOffset
+ const requiredBufferSize = bufferOffset + bytesWritten;
+
+ // Find the largest required size by all writes
+ bufferSize = Math.max(bufferSize, requiredBufferSize);
+ }
+ // writeBuffer requires buffers to be a multiple of 4
+ return align(bufferSize, 4);
+ }
+
+ testWriteBuffer(...writes: WriteBufferSignature[]) {
+ const bufferSize = this.calculateRequiredBufferSize(writes);
+
+ // Initialize buffer to non-zero data (0xff) for easier debug.
+ const expectedData = new Uint8Array(bufferSize).fill(0xff);
+
+ const buffer = this.makeBufferWithContents(
+ expectedData,
+ GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST
+ );
+
+ for (const { bufferOffset, data, arrayType, useArrayBuffer, dataOffset, dataSize } of writes) {
+ const TypedArrayConstructor = globalThis[arrayType];
+ const writeData = new TypedArrayConstructor(data);
+ const writeSrc = useArrayBuffer ? writeData.buffer : writeData;
+ this.queue.writeBuffer(buffer, bufferOffset, writeSrc, dataOffset, dataSize);
+ memcpy(
+ { src: writeSrc, start: dataOffset, length: dataSize },
+ { dst: expectedData, start: bufferOffset }
+ );
+ }
+
+ this.debug(`expectedData: [${expectedData.join(', ')}]`);
+ this.expectGPUBufferValuesEqual(buffer, expectedData);
+ }
+}
+
+export const g = makeTestGroup(F);
+
+const kTestData = range<number>(16, i => i);
+
+g.test('array_types')
+ .desc('Tests that writeBuffer correctly handles different TypedArrays and ArrayBuffer.')
+ .params(u =>
+ u //
+ .combine('arrayType', kTypedArrays)
+ .combine('useArrayBuffer', [false, true])
+ )
+ .fn(t => {
+ const { arrayType, useArrayBuffer } = t.params;
+ const dataOffset = 1;
+ const dataSize = 8;
+ t.testWriteBuffer({
+ bufferOffset: 0,
+ arrayType,
+ data: kTestData,
+ dataOffset,
+ dataSize,
+ useArrayBuffer,
+ });
+ });
+
+g.test('multiple_writes_at_different_offsets_and_sizes')
+ .desc(
+ `
+Tests that writeBuffer currently handles different offsets and writes. This includes:
+- Non-overlapping TypedArrays and ArrayLists
+- Overlapping TypedArrays and ArrayLists
+- Writing zero data
+- Writing on zero sized buffers
+- Unaligned source
+- Multiple overlapping writes with decreasing sizes
+ `
+ )
+ .paramsSubcasesOnly([
+ {
+ // Concatenate 2 Uint32Arrays
+ writes: [
+ {
+ bufferOffset: 0,
+ data: kTestData,
+ arrayType: 'Uint32Array',
+ useArrayBuffer: false,
+ dataOffset: 2,
+ dataSize: 2,
+ }, // [2, 3]
+ {
+ bufferOffset: 2 * Uint32Array.BYTES_PER_ELEMENT,
+ data: kTestData,
+ arrayType: 'Uint32Array',
+ useArrayBuffer: false,
+ dataOffset: 0,
+ dataSize: 2,
+ }, // [0, 1]
+ ], // Expected [2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]
+ },
+ {
+ // Concatenate 2 Uint8Arrays
+ writes: [
+ { bufferOffset: 0, data: [0, 1, 2, 3], arrayType: 'Uint8Array', useArrayBuffer: false },
+ { bufferOffset: 4, data: [4, 5, 6, 7], arrayType: 'Uint8Array', useArrayBuffer: false },
+ ], // Expected [0, 1, 2, 3, 4, 5, 6, 7]
+ },
+ {
+ // Overlap in the middle
+ writes: [
+ { bufferOffset: 0, data: kTestData, arrayType: 'Uint8Array', useArrayBuffer: false },
+ { bufferOffset: 4, data: [0], arrayType: 'Uint32Array', useArrayBuffer: false },
+ ], // Expected [0, 1, 2, 3, 0, 0 ,0 ,0, 8, 9, 10, 11, 12, 13, 14, 15]
+ },
+ {
+ // Overlapping arrayLists
+ writes: [
+ {
+ bufferOffset: 0,
+ data: kTestData,
+ arrayType: 'Uint32Array',
+ useArrayBuffer: true,
+ dataOffset: 2,
+ dataSize: 4 * Uint32Array.BYTES_PER_ELEMENT,
+ },
+ { bufferOffset: 4, data: [0x04030201], arrayType: 'Uint32Array', useArrayBuffer: true },
+ ], // Expected [0, 0, 1, 0, 1, 2, 3, 4, 0, 0, 3, 0, 0, 0, 4, 0]
+ },
+ {
+ // Write over with empty buffer
+ writes: [
+ { bufferOffset: 0, data: kTestData, arrayType: 'Uint8Array', useArrayBuffer: false },
+ { bufferOffset: 0, data: [], arrayType: 'Uint8Array', useArrayBuffer: false },
+ ], // Expected [0, 1, 2, 3, 4, 5 ,6 ,7, 8, 9, 10, 11, 12, 13, 14, 15]
+ },
+ {
+ // Zero buffer
+ writes: [{ bufferOffset: 0, data: [], arrayType: 'Uint8Array', useArrayBuffer: false }],
+ }, // Expected []
+ {
+ // Unaligned source
+ writes: [
+ {
+ bufferOffset: 0,
+ data: [0x77, ...kTestData],
+ arrayType: 'Uint8Array',
+ useArrayBuffer: false,
+ dataOffset: 1,
+ },
+ ], // Expected [0, 1, 2, 3, 4, 5 ,6 ,7, 8, 9, 10, 11, 12, 13, 14, 15]
+ },
+ {
+ // Multiple overlapping writes
+ writes: [
+ {
+ bufferOffset: 0,
+ data: [0x05050505, 0x05050505, 0x05050505, 0x05050505, 0x05050505],
+ arrayType: 'Uint32Array',
+ useArrayBuffer: false,
+ },
+ {
+ bufferOffset: 0,
+ data: [0x04040404, 0x04040404, 0x04040404, 0x04040404],
+ arrayType: 'Uint32Array',
+ useArrayBuffer: false,
+ },
+ {
+ bufferOffset: 0,
+ data: [0x03030303, 0x03030303, 0x03030303],
+ arrayType: 'Uint32Array',
+ useArrayBuffer: false,
+ },
+ {
+ bufferOffset: 0,
+ data: [0x02020202, 0x02020202],
+ arrayType: 'Uint32Array',
+ useArrayBuffer: false,
+ },
+ {
+ bufferOffset: 0,
+ data: [0x01010101],
+ arrayType: 'Uint32Array',
+ useArrayBuffer: false,
+ },
+ ], // Expected [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5]
+ },
+ ] as const)
+ .fn(t => {
+ t.testWriteBuffer(...t.params.writes);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/reflection.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/reflection.spec.ts
new file mode 100644
index 0000000000..e9f7b9726c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/reflection.spec.ts
@@ -0,0 +1,137 @@
+export const description = `
+Tests that object attributes which reflect the object's creation properties are properly set.
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { GPUConst } from '../../constants.js';
+import { GPUTest } from '../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('buffer_reflection_attributes')
+ .desc(`For every buffer attribute, the corresponding descriptor value is carried over.`)
+ .paramsSubcasesOnly(u =>
+ u.combine('descriptor', [
+ { size: 4, usage: GPUConst.BufferUsage.VERTEX },
+ {
+ size: 16,
+ usage:
+ GPUConst.BufferUsage.STORAGE |
+ GPUConst.BufferUsage.COPY_SRC |
+ GPUConst.BufferUsage.UNIFORM,
+ },
+ { size: 32, usage: GPUConst.BufferUsage.MAP_READ | GPUConst.BufferUsage.COPY_DST },
+ {
+ size: 32,
+ usage: GPUConst.BufferUsage.MAP_READ | GPUConst.BufferUsage.MAP_WRITE,
+ invalid: true,
+ },
+ ] as const)
+ )
+ .fn(t => {
+ const { descriptor } = t.params;
+
+ t.expectValidationError(() => {
+ const buffer = t.device.createBuffer(descriptor);
+
+ t.expect(buffer.size === descriptor.size);
+ t.expect(buffer.usage === descriptor.usage);
+ }, descriptor.invalid === true);
+ });
+
+g.test('texture_reflection_attributes')
+ .desc(`For every texture attribute, the corresponding descriptor value is carried over.`)
+ .paramsSubcasesOnly(u =>
+ u.combine('descriptor', [
+ {
+ size: { width: 4, height: 4 },
+ format: 'rgba8unorm',
+ usage: GPUConst.TextureUsage.TEXTURE_BINDING,
+ },
+ {
+ size: { width: 8, height: 8, depthOrArrayLayers: 8 },
+ format: 'bgra8unorm',
+ usage: GPUConst.TextureUsage.RENDER_ATTACHMENT | GPUConst.TextureUsage.COPY_SRC,
+ },
+ {
+ size: [4, 4],
+ format: 'rgba8unorm',
+ usage: GPUConst.TextureUsage.TEXTURE_BINDING,
+ mipLevelCount: 2,
+ },
+ {
+ size: [16, 16, 16],
+ format: 'rgba8unorm',
+ usage: GPUConst.TextureUsage.TEXTURE_BINDING,
+ dimension: '3d',
+ },
+ {
+ size: [32],
+ format: 'rgba8unorm',
+ usage: GPUConst.TextureUsage.TEXTURE_BINDING,
+ dimension: '1d',
+ },
+ {
+ size: { width: 4, height: 4 },
+ format: 'rgba8unorm',
+ usage: GPUConst.TextureUsage.RENDER_ATTACHMENT,
+ sampleCount: 4,
+ },
+ {
+ size: { width: 4, height: 4 },
+ format: 'rgba8unorm',
+ usage: GPUConst.TextureUsage.TEXTURE_BINDING,
+ sampleCount: 4,
+ invalid: true,
+ },
+ ] as const)
+ )
+ .fn(t => {
+ const { descriptor } = t.params;
+
+ let width: number;
+ let height: number;
+ let depthOrArrayLayers: number;
+ if (Array.isArray(descriptor.size)) {
+ width = descriptor.size[0];
+ height = descriptor.size[1] || 1;
+ depthOrArrayLayers = descriptor.size[2] || 1;
+ } else {
+ width = (descriptor.size as GPUExtent3DDict).width;
+ height = (descriptor.size as GPUExtent3DDict).height || 1;
+ depthOrArrayLayers = (descriptor.size as GPUExtent3DDict).depthOrArrayLayers || 1;
+ }
+
+ t.expectValidationError(() => {
+ const texture = t.device.createTexture(descriptor);
+
+ t.expect(texture.width === width);
+ t.expect(texture.height === height);
+ t.expect(texture.depthOrArrayLayers === depthOrArrayLayers);
+ t.expect(texture.format === descriptor.format);
+ t.expect(texture.usage === descriptor.usage);
+ t.expect(texture.dimension === (descriptor.dimension || '2d'));
+ t.expect(texture.mipLevelCount === (descriptor.mipLevelCount || 1));
+ t.expect(texture.sampleCount === (descriptor.sampleCount || 1));
+ }, descriptor.invalid === true);
+ });
+
+g.test('query_set_reflection_attributes')
+ .desc(`For every queue attribute, the corresponding descriptor value is carried over.`)
+ .paramsSubcasesOnly(u =>
+ u.combine('descriptor', [
+ { type: 'occlusion', count: 4 },
+ { type: 'occlusion', count: 16 },
+ { type: 'occlusion', count: 8193, invalid: true },
+ ] as const)
+ )
+ .fn(t => {
+ const { descriptor } = t.params;
+
+ t.expectValidationError(() => {
+ const querySet = t.device.createQuerySet(descriptor);
+
+ t.expect(querySet.type === descriptor.type);
+ t.expect(querySet.count === descriptor.count);
+ }, descriptor.invalid === true);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/README.txt
new file mode 100644
index 0000000000..aedaaa2d83
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/README.txt
@@ -0,0 +1 @@
+Render pass stuff other than commands (which are in command_buffer/).
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/clear_value.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/clear_value.spec.ts
new file mode 100644
index 0000000000..9c473e65af
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/clear_value.spec.ts
@@ -0,0 +1,188 @@
+export const description = `
+Tests for render pass clear values.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { assert } from '../../../../common/util/util.js';
+import {
+ kTextureFormatInfo,
+ kDepthStencilFormats,
+ depthStencilFormatAspectSize,
+} from '../../../format_info.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('stored')
+ .desc(`Test render pass clear values are stored at the end of an empty pass.`)
+ .unimplemented();
+
+g.test('loaded')
+ .desc(
+ `Test render pass clear values are visible during the pass by doing some trivial blending
+with the attachment (e.g. add [0,0,0,0] to the color and verify the stored result).`
+ )
+ .unimplemented();
+
+g.test('srgb')
+ .desc(
+ `Test that clear values on '-srgb' type attachments are interpreted as unencoded (linear),
+not decoded from srgb to linear.`
+ )
+ .unimplemented();
+
+g.test('layout')
+ .desc(
+ `Test that bind group layouts of the default pipeline layout are correct by passing various
+shaders and then checking their computed bind group layouts are compatible with particular bind
+groups.`
+ )
+ .unimplemented();
+
+g.test('stencil_clear_value')
+ .desc(
+ `Test that when stencilLoadOp is "clear", the stencil aspect should be correctly cleared by
+ GPURenderPassDepthStencilAttachment.stencilClearValue, which will be converted to the type of
+ the stencil aspect of view by taking the same number of LSBs as the number of bits in the
+ stencil aspect of one texel block of view.`
+ )
+ .params(u =>
+ u
+ .combine('stencilFormat', kDepthStencilFormats)
+ .combine('stencilClearValue', [0, 1, 0xff, 0x100 + 2, 0x10000 + 3])
+ .combine('applyStencilClearValueAsStencilReferenceValue', [true, false])
+ .filter(t => !!kTextureFormatInfo[t.stencilFormat].stencil)
+ )
+ .beforeAllSubcases(t => {
+ const { stencilFormat } = t.params;
+ const info = kTextureFormatInfo[stencilFormat];
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { stencilFormat, stencilClearValue, applyStencilClearValueAsStencilReferenceValue } =
+ t.params;
+
+ const kSize = [1, 1, 1] as const;
+ const colorFormat = 'rgba8unorm';
+ const stencilTexture = t.device.createTexture({
+ format: stencilFormat,
+ size: kSize,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ });
+ const colorTexture = t.device.createTexture({
+ format: colorFormat,
+ size: kSize,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ });
+ const renderPipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ @vertex
+ fn main(@builtin(vertex_index) VertexIndex : u32)-> @builtin(position) vec4<f32> {
+ var pos : array<vec2<f32>, 6> = array<vec2<f32>, 6>(
+ vec2<f32>(-1.0, 1.0),
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>( 1.0, 1.0),
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>( 1.0, 1.0),
+ vec2<f32>( 1.0, -1.0));
+ return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+ @fragment
+ fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: colorFormat }],
+ },
+ depthStencil: {
+ format: stencilFormat,
+ depthCompare: 'always',
+ depthWriteEnabled: false,
+ stencilFront: {
+ compare: 'equal',
+ },
+ stencilBack: {
+ compare: 'equal',
+ },
+ },
+ primitive: {
+ topology: 'triangle-list',
+ },
+ });
+
+ const stencilAspectSizeInBytes = depthStencilFormatAspectSize(stencilFormat, 'stencil-only');
+ assert(stencilAspectSizeInBytes > 0);
+ const expectedStencilValue = stencilClearValue & ((stencilAspectSizeInBytes << 8) - 1);
+
+ // StencilReference used in setStencilReference will also be masked to the lowest valid bits, so
+ // no matter what we set in the rest high bits that will be masked out (different or same
+ // between stencilClearValue and stencilReference), the test will pass if and only if the valid
+ // lowest bits are the same.
+ const stencilReference = applyStencilClearValueAsStencilReferenceValue
+ ? stencilClearValue
+ : expectedStencilValue;
+
+ const encoder = t.device.createCommandEncoder();
+
+ const depthStencilAttachment: GPURenderPassDepthStencilAttachment = {
+ view: stencilTexture.createView(),
+ depthClearValue: 0,
+ stencilLoadOp: 'clear',
+ stencilStoreOp: 'store',
+ stencilClearValue,
+ };
+ if (kTextureFormatInfo[stencilFormat].depth) {
+ depthStencilAttachment.depthClearValue = 0;
+ depthStencilAttachment.depthLoadOp = 'clear';
+ depthStencilAttachment.depthStoreOp = 'store';
+ }
+ const renderPassEncoder = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorTexture.createView(),
+ loadOp: 'clear',
+ storeOp: 'store',
+ clearValue: [1, 0, 0, 1] as const,
+ },
+ ],
+ depthStencilAttachment,
+ });
+ renderPassEncoder.setPipeline(renderPipeline);
+ renderPassEncoder.setStencilReference(stencilReference);
+ renderPassEncoder.draw(6);
+ renderPassEncoder.end();
+
+ const destinationBuffer = t.device.createBuffer({
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ size: 4,
+ });
+ t.trackForCleanup(destinationBuffer);
+ encoder.copyTextureToBuffer(
+ {
+ texture: stencilTexture,
+ aspect: 'stencil-only',
+ },
+ {
+ buffer: destinationBuffer,
+ },
+ [1, 1, 1]
+ );
+
+ t.queue.submit([encoder.finish()]);
+
+ t.expectSingleColor(colorTexture, colorFormat, {
+ size: [1, 1, 1],
+ exp: { R: 0, G: 1, B: 0, A: 1 },
+ });
+ t.expectGPUBufferValuesEqual(destinationBuffer, new Uint8Array([expectedStencilValue]));
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/resolve.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/resolve.spec.ts
new file mode 100644
index 0000000000..46b03e7b39
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/resolve.spec.ts
@@ -0,0 +1,183 @@
+export const description = `API Operation Tests for RenderPass StoreOp.
+Tests a render pass with a resolveTarget resolves correctly for many combinations of:
+ - number of color attachments, some with and some without a resolveTarget
+ - renderPass storeOp set to {'store', 'discard'}
+ - resolveTarget mip level {0, >0} (TODO?: different mip level from colorAttachment)
+ - resolveTarget {2d array layer, TODO: 3d slice} {0, >0} with {2d, TODO: 3d} resolveTarget
+ (TODO?: different z from colorAttachment)
+ - TODO: test all renderable color formats
+ - TODO: test that any not-resolved attachments are rendered to correctly.
+ - TODO: test different loadOps
+ - TODO?: resolveTarget mip level {0, >0} (TODO?: different mip level from colorAttachment)
+ - TODO?: resolveTarget {2d array layer, TODO: 3d slice} {0, >0} with {2d, TODO: 3d} resolveTarget
+ (different z from colorAttachment)
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
+
+const kSlotsToResolve = [
+ [0, 2],
+ [1, 3],
+ [0, 1, 2, 3],
+];
+
+const kSize = 4;
+const kFormat: GPUTextureFormat = 'rgba8unorm';
+
+export const g = makeTestGroup(TextureTestMixin(GPUTest));
+
+g.test('render_pass_resolve')
+ .params(u =>
+ u
+ .combine('storeOperation', ['discard', 'store'] as const)
+ .beginSubcases()
+ .combine('numColorAttachments', [2, 4] as const)
+ .combine('slotsToResolve', kSlotsToResolve)
+ .combine('resolveTargetBaseMipLevel', [0, 1] as const)
+ .combine('resolveTargetBaseArrayLayer', [0, 1] as const)
+ )
+ .fn(t => {
+ const targets: GPUColorTargetState[] = [];
+ for (let i = 0; i < t.params.numColorAttachments; i++) {
+ targets.push({ format: kFormat });
+ }
+
+ // These shaders will draw a white triangle into a texture. After draw, the top left
+ // half of the texture will be white, and the bottom right half will be unchanged. When this
+ // texture is resolved, there will be two distinct colors in each portion of the texture, as
+ // well as a line between the portions that contain the midpoint color due to the multisample
+ // resolve.
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ @vertex fn main(
+ @builtin(vertex_index) VertexIndex : u32
+ ) -> @builtin(position) vec4<f32> {
+ var pos : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>(-1.0, 1.0),
+ vec2<f32>( 1.0, 1.0));
+ return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+ struct Output {
+ @location(0) fragColor0 : vec4<f32>,
+ @location(1) fragColor1 : vec4<f32>,
+ @location(2) fragColor2 : vec4<f32>,
+ @location(3) fragColor3 : vec4<f32>,
+ };
+
+ @fragment fn main() -> Output {
+ return Output(
+ vec4<f32>(1.0, 1.0, 1.0, 1.0),
+ vec4<f32>(1.0, 1.0, 1.0, 1.0),
+ vec4<f32>(1.0, 1.0, 1.0, 1.0),
+ vec4<f32>(1.0, 1.0, 1.0, 1.0)
+ );
+ }`,
+ }),
+ entryPoint: 'main',
+ targets,
+ },
+ primitive: { topology: 'triangle-list' },
+ multisample: { count: 4 },
+ });
+
+ const resolveTargets: GPUTexture[] = [];
+ const renderPassColorAttachments: GPURenderPassColorAttachment[] = [];
+
+ // The resolve target must be the same size as the color attachment. If we're resolving to mip
+ // level 1, the resolve target base mip level should be 2x the color attachment size.
+ const kResolveTargetSize = kSize << t.params.resolveTargetBaseMipLevel;
+
+ for (let i = 0; i < t.params.numColorAttachments; i++) {
+ const colorAttachment = t.device.createTexture({
+ format: kFormat,
+ size: { width: kSize, height: kSize, depthOrArrayLayers: 1 },
+ sampleCount: 4,
+ mipLevelCount: 1,
+ usage:
+ GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ if (t.params.slotsToResolve.includes(i)) {
+ const colorAttachment = t.device.createTexture({
+ format: kFormat,
+ size: { width: kSize, height: kSize, depthOrArrayLayers: 1 },
+ sampleCount: 4,
+ mipLevelCount: 1,
+ usage:
+ GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const resolveTarget = t.device.createTexture({
+ format: kFormat,
+ size: {
+ width: kResolveTargetSize,
+ height: kResolveTargetSize,
+ depthOrArrayLayers: t.params.resolveTargetBaseArrayLayer + 1,
+ },
+ sampleCount: 1,
+ mipLevelCount: t.params.resolveTargetBaseMipLevel + 1,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ // Clear to black for the load operation. After the draw, the top left half of the attachment
+ // will be white and the bottom right half will be black.
+ renderPassColorAttachments.push({
+ view: colorAttachment.createView(),
+ clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 0.0 },
+ loadOp: 'clear',
+ storeOp: t.params.storeOperation,
+ resolveTarget: resolveTarget.createView({
+ baseMipLevel: t.params.resolveTargetBaseMipLevel,
+ baseArrayLayer: t.params.resolveTargetBaseArrayLayer,
+ }),
+ });
+
+ resolveTargets.push(resolveTarget);
+ } else {
+ renderPassColorAttachments.push({
+ view: colorAttachment.createView(),
+ clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 0.0 },
+ loadOp: 'clear',
+ storeOp: t.params.storeOperation,
+ });
+ }
+ }
+
+ const encoder = t.device.createCommandEncoder();
+
+ const pass = encoder.beginRenderPass({
+ colorAttachments: renderPassColorAttachments,
+ });
+ pass.setPipeline(pipeline);
+ pass.draw(3);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+
+ // Verify the resolve targets contain the correct values. Note that we use z to specify the
+ // array layer from which to pull the pixels for testing.
+ const z = t.params.resolveTargetBaseArrayLayer;
+ for (const resolveTarget of resolveTargets) {
+ t.expectSinglePixelComparisonsAreOkInTexture(
+ { texture: resolveTarget, mipLevel: t.params.resolveTargetBaseMipLevel },
+ [
+ // Top left pixel should be {1.0, 1.0, 1.0, 1.0}.
+ { coord: { x: 0, y: 0, z }, exp: { R: 1.0, G: 1.0, B: 1.0, A: 1.0 } },
+ // Bottom right pixel should be {0, 0, 0, 0}.
+ { coord: { x: kSize - 1, y: kSize - 1, z }, exp: { R: 0, G: 0, B: 0, A: 0 } },
+ // Top right pixel should be {0.5, 0.5, 0.5, 0.5} due to the multisampled resolve.
+ { coord: { x: kSize - 1, y: 0, z }, exp: { R: 0.5, G: 0.5, B: 0.5, A: 0.5 } },
+ ]
+ );
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/storeOp.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/storeOp.spec.ts
new file mode 100644
index 0000000000..873f473ad5
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/storeOp.spec.ts
@@ -0,0 +1,354 @@
+export const description = `API Operation Tests for RenderPass StoreOp.
+
+ Test Coverage:
+
+ - Tests that color and depth-stencil store operations {'discard', 'store'} work correctly for a
+ render pass with both a color attachment and depth-stencil attachment.
+ TODO: use depth24plus-stencil8
+
+ - Tests that store operations {'discard', 'store'} work correctly for a render pass with multiple
+ color attachments.
+ TODO: test with more interesting loadOp values
+
+ - Tests that store operations {'discard', 'store'} work correctly for a render pass with a color
+ attachment for:
+ - All renderable color formats
+ - mip level set to {'0', mip > '0'}
+ - array layer set to {'0', layer > '1'} for 2D textures
+ TODO: depth slice set to {'0', slice > '0'} for 3D textures
+
+ - Tests that store operations {'discard', 'store'} work correctly for a render pass with a
+ depth-stencil attachment for:
+ - All renderable depth-stencil formats
+ - mip level set to {'0', mip > '0'}
+ - array layer set to {'0', layer > '1'} for 2D textures
+ TODO: test depth24plus and depth24plus-stencil8 formats
+ TODO: test that depth and stencil aspects are set separately
+ TODO: depth slice set to {'0', slice > '0'} for 3D textures
+ TODO: test with more interesting loadOp values`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import {
+ kTextureFormatInfo,
+ kEncodableTextureFormats,
+ kSizedDepthStencilFormats,
+} from '../../../format_info.js';
+import { GPUTest } from '../../../gpu_test.js';
+import { PerTexelComponent } from '../../../util/texture/texel_data.js';
+
+// Test with a zero and non-zero mip.
+const kMipLevel: number[] = [0, 1];
+const kMipLevelCount = 2;
+
+// Test with different numbers of color attachments.
+type NumColorAttachments = 1 | 2 | 3 | 4;
+const kNumColorAttachments: NumColorAttachments[] = [1, 2, 3, 4];
+
+// Test with a zero and non-zero array layer.
+const kArrayLayers: number[] = [0, 1];
+
+const kStoreOps: GPUStoreOp[] = ['discard', 'store'];
+
+const kHeight = 2;
+const kWidth = 2;
+
+export const g = makeTestGroup(GPUTest);
+
+// Tests a render pass with both a color and depth stencil attachment to ensure store operations are
+// set independently.
+g.test('render_pass_store_op,color_attachment_with_depth_stencil_attachment')
+ .params(u =>
+ u //
+ .combine('colorStoreOperation', kStoreOps)
+ .combine('depthStencilStoreOperation', kStoreOps)
+ )
+ .fn(t => {
+ // Create a basic color attachment.
+ const kColorFormat: GPUTextureFormat = 'rgba8unorm';
+ const colorAttachment = t.device.createTexture({
+ format: kColorFormat,
+ size: { width: kWidth, height: kHeight, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const colorAttachmentView = colorAttachment.createView();
+
+ // Create a basic depth/stencil attachment.
+ const kDepthStencilFormat: GPUTextureFormat = 'depth32float';
+ const depthStencilAttachment = t.device.createTexture({
+ format: kDepthStencilFormat,
+ size: { width: kWidth, height: kHeight, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ // Color load operation will clear to {1.0, 1.0, 1.0, 1.0}.
+ // Depth operation will clear to 1.0.
+ // Store operations are determined by test the params.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachmentView,
+ clearValue: { r: 1.0, g: 1.0, b: 1.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: t.params.colorStoreOperation,
+ },
+ ],
+ depthStencilAttachment: {
+ view: depthStencilAttachment.createView(),
+ depthClearValue: 1.0,
+ depthLoadOp: 'clear',
+ depthStoreOp: t.params.depthStencilStoreOperation,
+ },
+ });
+ pass.end();
+
+ t.device.queue.submit([encoder.finish()]);
+
+ // Check that the correct store operation occurred.
+ let expectedColorValue: PerTexelComponent<number> = {};
+ if (t.params.colorStoreOperation === 'discard') {
+ // If colorStoreOp was clear, the texture should now contain {0.0, 0.0, 0.0, 0.0}.
+ expectedColorValue = { R: 0.0, G: 0.0, B: 0.0, A: 0.0 };
+ } else if (t.params.colorStoreOperation === 'store') {
+ // If colorStoreOP was store, the texture should still contain {1.0, 1.0, 1.0, 1.0}.
+ expectedColorValue = { R: 1.0, G: 1.0, B: 1.0, A: 1.0 };
+ }
+ t.expectSingleColor(colorAttachment, kColorFormat, {
+ size: [kHeight, kWidth, 1],
+ exp: expectedColorValue,
+ });
+
+ // Check that the correct store operation occurred.
+ let expectedDepthValue: PerTexelComponent<number> = {};
+ if (t.params.depthStencilStoreOperation === 'discard') {
+ // If depthStencilStoreOperation was clear, the texture's depth component should be 0.0, and
+ // the stencil component should be 0.0.
+ expectedDepthValue = { Depth: 0.0 };
+ } else if (t.params.depthStencilStoreOperation === 'store') {
+ // If depthStencilStoreOperation was store, the texture's depth component should be 1.0, and
+ // the stencil component should be 1.0.
+ expectedDepthValue = { Depth: 1.0 };
+ }
+ t.expectSingleColor(depthStencilAttachment, kDepthStencilFormat, {
+ size: [kHeight, kWidth, 1],
+ exp: expectedDepthValue,
+ layout: { mipLevel: 0, aspect: 'depth-only' },
+ });
+ });
+
+// Tests that render pass color attachment store operations work correctly for all renderable color
+// formats, mip levels and array layers.
+g.test('render_pass_store_op,color_attachment_only')
+ .params(u =>
+ u
+ .combine('colorFormat', kEncodableTextureFormats)
+ // Filter out any non-renderable formats
+ .filter(({ colorFormat }) => !!kTextureFormatInfo[colorFormat].colorRender)
+ .combine('storeOperation', kStoreOps)
+ .beginSubcases()
+ .combine('mipLevel', kMipLevel)
+ .combine('arrayLayer', kArrayLayers)
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.colorFormat);
+ })
+ .fn(t => {
+ const colorAttachment = t.device.createTexture({
+ format: t.params.colorFormat,
+ size: { width: kWidth, height: kHeight, depthOrArrayLayers: t.params.arrayLayer + 1 },
+ mipLevelCount: kMipLevelCount,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const colorViewDesc: GPUTextureViewDescriptor = {
+ baseArrayLayer: t.params.arrayLayer,
+ baseMipLevel: t.params.mipLevel,
+ mipLevelCount: 1,
+ arrayLayerCount: 1,
+ };
+
+ const colorAttachmentView = colorAttachment.createView(colorViewDesc);
+
+ // Color load operation will clear to {1.0, 0.0, 0.0, 1.0}.
+ // Color store operation is determined by the test params.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachmentView,
+ clearValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: t.params.storeOperation,
+ },
+ ],
+ });
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+
+ // Check that the correct store operation occurred.
+ let expectedValue: PerTexelComponent<number> = {};
+ if (t.params.storeOperation === 'discard') {
+ // If colorStoreOp was clear, the texture should now contain {0.0, 0.0, 0.0, 0.0}.
+ expectedValue = { R: 0.0, G: 0.0, B: 0.0, A: 0.0 };
+ } else if (t.params.storeOperation === 'store') {
+ // If colorStoreOP was store, the texture should still contain {1.0, 0.0, 0.0, 1.0}.
+ expectedValue = { R: 1.0, G: 0.0, B: 0.0, A: 1.0 };
+ }
+
+ t.expectSingleColor(colorAttachment, t.params.colorFormat, {
+ size: [kHeight, kWidth, 1],
+ slice: t.params.arrayLayer,
+ exp: expectedValue,
+ layout: { mipLevel: t.params.mipLevel },
+ });
+ });
+
+// Test with multiple color attachments to ensure each attachment's storeOp is set independently.
+g.test('render_pass_store_op,multiple_color_attachments')
+ .params(u =>
+ u
+ .combine('storeOperation1', kStoreOps)
+ .combine('storeOperation2', kStoreOps)
+ .beginSubcases()
+ .combine('colorAttachments', kNumColorAttachments)
+ )
+ .fn(t => {
+ const kColorFormat: GPUTextureFormat = 'rgba8unorm';
+ const colorAttachments: GPUTexture[] = [];
+
+ for (let i = 0; i < t.params.colorAttachments; i++) {
+ colorAttachments.push(
+ t.device.createTexture({
+ format: kColorFormat,
+ size: { width: kWidth, height: kHeight, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ })
+ );
+ }
+
+ // Color load operation will clear to {1.0, 1.0, 1.0, 1.0}
+ // Color store operation is determined by test params. Use storeOperation1 for even numbered
+ // attachments and storeOperation2 for odd numbered attachments.
+ const renderPassColorAttachments: GPURenderPassColorAttachment[] = [];
+ for (let i = 0; i < t.params.colorAttachments; i++) {
+ renderPassColorAttachments.push({
+ view: colorAttachments[i].createView(),
+ clearValue: { r: 1.0, g: 1.0, b: 1.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: i % 2 === 0 ? t.params.storeOperation1 : t.params.storeOperation2,
+ });
+ }
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: renderPassColorAttachments,
+ });
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+
+ // Check that the correct store operation occurred.
+ let expectedValue: PerTexelComponent<number> = {};
+ for (let i = 0; i < t.params.colorAttachments; i++) {
+ if (renderPassColorAttachments[i].storeOp === 'discard') {
+ // If colorStoreOp was clear, the texture should now contain {0.0, 0.0, 0.0, 0.0}.
+ expectedValue = { R: 0.0, G: 0.0, B: 0.0, A: 0.0 };
+ } else if (renderPassColorAttachments[i].storeOp === 'store') {
+ // If colorStoreOP was store, the texture should still contain {1.0, 1.0, 1.0, 1.0}.
+ expectedValue = { R: 1.0, G: 1.0, B: 1.0, A: 1.0 };
+ }
+ t.expectSingleColor(colorAttachments[i], kColorFormat, {
+ size: [kHeight, kWidth, 1],
+ exp: expectedValue,
+ });
+ }
+ });
+
+g.test('render_pass_store_op,depth_stencil_attachment_only')
+ .desc(
+ `
+Tests that render pass depth stencil store operations work correctly for all renderable color
+formats, mip levels and array layers.
+
+- x= all (sized) depth stencil formats, all store ops, multiple mip levels, multiple array layers
+
+TODO: Also test unsized depth/stencil formats [1]
+ `
+ )
+ .params(u =>
+ u
+ .combine('depthStencilFormat', kSizedDepthStencilFormats) // [1]
+ .combine('storeOperation', kStoreOps)
+ .beginSubcases()
+ .combine('mipLevel', kMipLevel)
+ .combine('arrayLayer', kArrayLayers)
+ )
+ .fn(t => {
+ const depthStencilTexture = t.device.createTexture({
+ format: t.params.depthStencilFormat,
+ size: { width: kWidth, height: kHeight, depthOrArrayLayers: t.params.arrayLayer + 1 },
+ mipLevelCount: kMipLevelCount,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const depthStencilViewDesc: GPUTextureViewDescriptor = {
+ baseArrayLayer: t.params.arrayLayer,
+ baseMipLevel: t.params.mipLevel,
+ mipLevelCount: 1,
+ arrayLayerCount: 1,
+ };
+
+ const depthStencilAttachmentView = depthStencilTexture.createView(depthStencilViewDesc);
+
+ // Depth-stencil load operation will clear to depth = 1.0, stencil = 1.0.
+ // Depth-stencil store operate is determined by test params.
+ const encoder = t.device.createCommandEncoder();
+ const depthStencilAttachment: GPURenderPassDepthStencilAttachment = {
+ view: depthStencilAttachmentView,
+ };
+ if (kTextureFormatInfo[t.params.depthStencilFormat].depth) {
+ depthStencilAttachment.depthClearValue = 1.0;
+ depthStencilAttachment.depthLoadOp = 'clear';
+ depthStencilAttachment.depthStoreOp = t.params.storeOperation;
+ }
+ if (kTextureFormatInfo[t.params.depthStencilFormat].stencil) {
+ depthStencilAttachment.stencilClearValue = 1;
+ depthStencilAttachment.stencilLoadOp = 'clear';
+ depthStencilAttachment.stencilStoreOp = t.params.storeOperation;
+ }
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [],
+ depthStencilAttachment,
+ });
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+
+ let expectedDepthValue: PerTexelComponent<number> = {};
+ let expectedStencilValue: PerTexelComponent<number> = {};
+ if (t.params.storeOperation === 'discard') {
+ // If depthStencilStoreOperation was clear, the texture's depth/stencil component should be 0,
+ expectedDepthValue = { Depth: 0.0 };
+ expectedStencilValue = { Stencil: 0 };
+ } else if (t.params.storeOperation === 'store') {
+ // If depthStencilStoreOperation was store, the texture's depth/stencil components should be 1,
+ expectedDepthValue = { Depth: 1.0 };
+ expectedStencilValue = { Stencil: 1 };
+ }
+
+ if (kTextureFormatInfo[t.params.depthStencilFormat].depth) {
+ t.expectSingleColor(depthStencilTexture, t.params.depthStencilFormat, {
+ size: [kHeight, kWidth, 1],
+ slice: t.params.arrayLayer,
+ exp: expectedDepthValue,
+ layout: { mipLevel: t.params.mipLevel, aspect: 'depth-only' },
+ });
+ }
+ if (kTextureFormatInfo[t.params.depthStencilFormat].stencil) {
+ t.expectSingleColor(depthStencilTexture, t.params.depthStencilFormat, {
+ size: [kHeight, kWidth, 1],
+ slice: t.params.arrayLayer,
+ exp: expectedStencilValue,
+ layout: { mipLevel: t.params.mipLevel, aspect: 'stencil-only' },
+ });
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/storeop2.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/storeop2.spec.ts
new file mode 100644
index 0000000000..f98435fc70
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pass/storeop2.spec.ts
@@ -0,0 +1,83 @@
+export const description = `
+renderPass store op test that drawn quad is either stored or cleared based on storeop
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('storeOp_controls_whether_1x1_drawn_quad_is_stored')
+ .desc(
+ `
+TODO: is this duplicated with api,operation,render_pass,storeOp?
+TODO: needs review and rename
+`
+ )
+ .paramsSimple([
+ { storeOp: 'store', _expected: 1 }, //
+ { storeOp: 'discard', _expected: 0 },
+ ] as const)
+ .fn(t => {
+ const renderTexture = t.device.createTexture({
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ format: 'r8unorm',
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ // create render pipeline
+ const renderPipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ @vertex fn main(
+ @builtin(vertex_index) VertexIndex : u32
+ ) -> @builtin(position) vec4<f32> {
+ var pos : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
+ vec2<f32>( 1.0, -1.0),
+ vec2<f32>( 1.0, 1.0),
+ vec2<f32>(-1.0, 1.0));
+ return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+ @fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'r8unorm' }],
+ },
+ primitive: { topology: 'triangle-list' },
+ });
+
+ // encode pass and submit
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTexture.createView(),
+ storeOp: t.params.storeOp,
+ clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 0.0 },
+ loadOp: 'clear',
+ },
+ ],
+ });
+ pass.setPipeline(renderPipeline);
+ pass.draw(3);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+
+ // expect the buffer to be clear
+ t.expectSingleColor(renderTexture, 'r8unorm', {
+ size: [1, 1, 1],
+ exp: { R: t.params._expected },
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/culling_tests.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/culling_tests.spec.ts
new file mode 100644
index 0000000000..3236acfaf5
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/culling_tests.spec.ts
@@ -0,0 +1,359 @@
+export const description = `Test culling and rasterization state.
+
+Test coverage:
+Test all culling combinations of GPUFrontFace and GPUCullMode show the correct output.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { kTextureFormatInfo, SizedTextureFormat } from '../../../format_info.js';
+import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
+
+function faceIsCulled(face: 'cw' | 'ccw', frontFace: GPUFrontFace, cullMode: GPUCullMode): boolean {
+ return cullMode !== 'none' && (frontFace === face) === (cullMode === 'front');
+}
+
+function faceColor(face: 'cw' | 'ccw', frontFace: GPUFrontFace, cullMode: GPUCullMode): Uint8Array {
+ // front facing color is green, non front facing is red, background is blue
+ const isCulled = faceIsCulled(face, frontFace, cullMode);
+ if (!isCulled && face === frontFace) {
+ return new Uint8Array([0x00, 0xff, 0x00, 0xff]);
+ } else if (isCulled) {
+ return new Uint8Array([0x00, 0x00, 0xff, 0xff]);
+ } else {
+ return new Uint8Array([0xff, 0x00, 0x00, 0xff]);
+ }
+}
+
+class CullingTest extends TextureTestMixin(GPUTest) {
+ checkCornerPixels(
+ texture: GPUTexture,
+ expectedTopLeftColor: Uint8Array,
+ expectedBottomRightColor: Uint8Array
+ ) {
+ this.expectSinglePixelComparisonsAreOkInTexture({ texture }, [
+ { coord: { x: 0, y: 0 }, exp: expectedTopLeftColor },
+ { coord: { x: texture.width - 1, y: texture.height - 1 }, exp: expectedBottomRightColor },
+ ]);
+ }
+
+ drawFullClipSpaceTriangleAndCheckCornerPixels(
+ texture: GPUTexture,
+ format: SizedTextureFormat,
+ topology: GPUPrimitiveTopology,
+ color: Uint8Array,
+ depthStencil: GPUDepthStencilState,
+ depthStencilAttachment: GPURenderPassDepthStencilAttachment,
+ expectedTopLeftColor: Uint8Array,
+ expectedBottomRightColor: Uint8Array
+ ) {
+ const { device } = this;
+ const encoder = device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: texture.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ depthStencilAttachment,
+ });
+
+ pass.setPipeline(
+ device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: device.createShaderModule({
+ code: `
+ @vertex fn main(
+ @builtin(vertex_index) VertexIndex : u32
+ ) -> @builtin(position) vec4<f32> {
+ var pos : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>( 3.0, -1.0),
+ vec2<f32>(-1.0, 3.0));
+ return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: device.createShaderModule({
+ code: `
+ @fragment fn main() -> @location(0) vec4<f32> {
+ return vec4f(${Array.from(color).map(v => v / 255)});
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format }],
+ },
+ primitive: {
+ topology,
+ },
+ depthStencil,
+ })
+ );
+ pass.draw(3);
+ pass.end();
+
+ device.queue.submit([encoder.finish()]);
+
+ this.checkCornerPixels(texture, expectedTopLeftColor, expectedBottomRightColor);
+ }
+}
+
+export const g = makeTestGroup(CullingTest);
+
+g.test('culling')
+ .desc(
+ `
+ Test 2 triangles with different winding orders:
+
+ - Test that the counter-clock wise triangle has correct output for:
+ - All FrontFaces (ccw, cw)
+ - All CullModes (none, front, back)
+ - All depth stencil attachment types (none, depth24plus, depth32float, depth24plus-stencil8)
+ - Some primitive topologies (triangle-list, triangle-strip)
+
+ - Test that the clock wise triangle has correct output for:
+ - All FrontFaces (ccw, cw)
+ - All CullModes (none, front, back)
+ - All depth stencil attachment types (none, depth24plus, depth32float, depth24plus-stencil8)
+ - Some primitive topologies (triangle-list, triangle-strip)
+ `
+ )
+ .params(u =>
+ u
+ .combine('frontFace', ['ccw', 'cw'] as const)
+ .combine('cullMode', ['none', 'front', 'back'] as const)
+ .beginSubcases()
+ .combine('depthStencilFormat', [
+ null,
+ 'depth24plus',
+ 'depth32float',
+ 'depth24plus-stencil8',
+ ] as const)
+ .combine('topology', ['triangle-list', 'triangle-strip'] as const)
+ )
+ .fn(t => {
+ const { frontFace, cullMode, depthStencilFormat, topology } = t.params;
+ const size = 4;
+ const format = 'rgba8unorm';
+
+ const texture = t.device.createTexture({
+ size: { width: size, height: size, depthOrArrayLayers: 1 },
+ format,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ });
+
+ const haveStencil = depthStencilFormat && kTextureFormatInfo[depthStencilFormat].stencil;
+ let depthTexture: GPUTexture | undefined = undefined;
+ let depthStencilAttachment: GPURenderPassDepthStencilAttachment | undefined = undefined;
+ let depthStencil: GPUDepthStencilState | undefined = undefined;
+ if (depthStencilFormat) {
+ depthTexture = t.device.createTexture({
+ size: { width: size, height: size, depthOrArrayLayers: 1 },
+ format: depthStencilFormat,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ depthStencilAttachment = {
+ view: depthTexture.createView(),
+ depthClearValue: 1.0,
+ depthLoadOp: 'clear',
+ depthStoreOp: 'store',
+ };
+
+ depthStencil = {
+ format: depthStencilFormat as GPUTextureFormat,
+ depthCompare: 'less',
+ depthWriteEnabled: true,
+ };
+
+ if (haveStencil) {
+ depthStencilAttachment.stencilLoadOp = 'clear';
+ depthStencilAttachment.stencilStoreOp = 'store';
+ depthStencil.stencilFront = { passOp: 'increment-clamp' };
+ depthStencil.stencilBack = { passOp: 'increment-clamp' };
+ }
+ }
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: texture.createView(),
+ clearValue: [0, 0, 1, 1],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ depthStencilAttachment,
+ });
+
+ // Draw triangles with different winding orders:
+ //
+ // for triangle-list, 2 triangles
+ // 1. The top-left one is counterclockwise (CCW)
+ // 2. The bottom-right one is clockwise (CW)
+ //
+ // 0---2---+
+ // | | |
+ // | | |
+ // 1---+---4
+ // | | |
+ // | | |
+ // +---3---5
+ //
+ // for triangle-strip, 4 triangles
+ // note: for triangle-strip the index order swaps every other triangle
+ // so the order is 012, 213, 234, 435
+ //
+ // 1. The top left is counterclockwise (CCW)
+ // 2. zero size
+ // 3. zero size
+ // 4. The bottom right one is clockwise (CW)
+ //
+ // 0
+ // |
+ // |
+ // +---+---+
+ // | | |
+ // | | |
+ // 1---+---23--+---5
+ // | | |
+ // | | |
+ // +---+---+
+ // |
+ // |
+ // 4
+ pass.setPipeline(
+ t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ @vertex fn main(
+ @builtin(vertex_index) VertexIndex : u32
+ ) -> @builtin(position) vec4<f32> {
+ var pos : array<vec2<f32>, 6> = array<vec2<f32>, 6>(
+ ${
+ topology === 'triangle-list'
+ ? `
+ vec2<f32>(-1.0, 1.0),
+ vec2<f32>(-1.0, 0.0),
+ vec2<f32>( 0.0, 1.0),
+ vec2<f32>( 0.0, -1.0),
+ vec2<f32>( 1.0, 0.0),
+ vec2<f32>( 1.0, -1.0));
+ `
+ : `
+ vec2<f32>( 0.0, 2.0),
+ vec2<f32>(-2.0, 0.0),
+ vec2<f32>( 0.0, 0.0),
+ vec2<f32>( 0.0, 0.0),
+ vec2<f32>( 0.0, -2.0),
+ vec2<f32>( 2.0, 0.0));
+ `
+ }
+ return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+ @fragment fn main(
+ @builtin(front_facing) FrontFacing : bool
+ ) -> @location(0) vec4<f32> {
+ var color : vec4<f32>;
+ if (FrontFacing) {
+ color = vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ } else {
+ color = vec4<f32>(1.0, 0.0, 0.0, 1.0);
+ }
+ return color;
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format }],
+ },
+ primitive: {
+ topology,
+ frontFace,
+ cullMode,
+ },
+ depthStencil,
+ })
+ );
+ pass.draw(6);
+ pass.end();
+
+ t.device.queue.submit([encoder.finish()]);
+
+ // front facing color is green, non front facing is red, background is blue
+ const kCCWTriangleTopLeftColor = faceColor('ccw', frontFace, cullMode);
+ const kCWTriangleBottomRightColor = faceColor('cw', frontFace, cullMode);
+ t.checkCornerPixels(texture, kCCWTriangleTopLeftColor, kCWTriangleBottomRightColor);
+
+ if (depthTexture) {
+ // draw a triangle that covers all of clip space in yellow at the same depth
+ // as the previous triangles with the depth test set to 'less'. We should only
+ // draw yellow where the previous triangles did not.
+ depthStencilAttachment!.depthLoadOp = 'load';
+
+ if (haveStencil) {
+ depthStencilAttachment!.stencilLoadOp = 'load';
+ depthStencil!.stencilFront!.passOp = 'keep';
+ depthStencil!.stencilBack!.passOp = 'keep';
+ }
+
+ const k2ndDrawColor = new Uint8Array([255, 255, 0, 255]);
+
+ const isTopLeftCulled = faceIsCulled('ccw', frontFace, cullMode);
+ const kExpectedTopLeftColor = isTopLeftCulled ? k2ndDrawColor : kCCWTriangleTopLeftColor;
+
+ const isBottomRightCulled = faceIsCulled('cw', frontFace, cullMode);
+ const kExpectedBottomRightColor = isBottomRightCulled
+ ? k2ndDrawColor
+ : kCWTriangleBottomRightColor;
+
+ t.drawFullClipSpaceTriangleAndCheckCornerPixels(
+ texture,
+ format,
+ topology,
+ k2ndDrawColor,
+ depthStencil as GPUDepthStencilState,
+ depthStencilAttachment as GPURenderPassDepthStencilAttachment,
+ kExpectedTopLeftColor,
+ kExpectedBottomRightColor
+ );
+
+ if (haveStencil) {
+ // draw a triangle that covers all of clip space in cyan with the stencil
+ // compare set to 'equal'. The reference value defaults to 0 so we should
+ // only render cyan where the first two triangles did not.
+ depthStencil!.depthCompare = 'always';
+ depthStencil!.stencilFront!.compare = 'equal';
+ depthStencil!.stencilBack!.compare = 'equal';
+
+ const k3rdDrawColor = new Uint8Array([0, 255, 255, 255]);
+ const kExpectedTopLeftColor = isTopLeftCulled ? k3rdDrawColor : kCCWTriangleTopLeftColor;
+ const kExpectedBottomRightColor = isBottomRightCulled
+ ? k3rdDrawColor
+ : kCWTriangleBottomRightColor;
+
+ t.drawFullClipSpaceTriangleAndCheckCornerPixels(
+ texture,
+ format,
+ topology,
+ k3rdDrawColor,
+ depthStencil as GPUDepthStencilState,
+ depthStencilAttachment as GPURenderPassDepthStencilAttachment,
+ kExpectedTopLeftColor,
+ kExpectedBottomRightColor
+ );
+ }
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/overrides.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/overrides.spec.ts
new file mode 100644
index 0000000000..8778e80062
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/overrides.spec.ts
@@ -0,0 +1,453 @@
+export const description = `
+Testing render pipeline using overridable constants in vertex stage and fragment stage.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+import { PerTexelComponent } from '../../../util/texture/texel_data.js';
+
+class F extends GPUTest {
+ async ExpectShaderOutputWithConstants(
+ isAsync: boolean,
+ format: GPUTextureFormat,
+ expected: PerTexelComponent<number>,
+ vertex: GPUVertexState,
+ fragment: GPUFragmentState
+ ) {
+ const renderTarget = this.device.createTexture({
+ format,
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const descriptor: GPURenderPipelineDescriptor = {
+ layout: 'auto',
+ vertex,
+ fragment,
+ primitive: {
+ topology: 'triangle-list',
+ frontFace: 'ccw',
+ cullMode: 'back',
+ },
+ };
+
+ const promise = isAsync
+ ? this.device.createRenderPipelineAsync(descriptor)
+ : Promise.resolve(this.device.createRenderPipeline(descriptor));
+
+ const pipeline = await promise;
+ const encoder = this.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ storeOp: 'store',
+ clearValue: {
+ r: kClearValueResult.R,
+ g: kClearValueResult.G,
+ b: kClearValueResult.B,
+ a: kClearValueResult.A,
+ },
+ loadOp: 'clear',
+ },
+ ],
+ });
+ pass.setPipeline(pipeline);
+ pass.draw(3);
+ pass.end();
+ this.device.queue.submit([encoder.finish()]);
+
+ this.expectSingleColor(renderTarget, format, {
+ size: [1, 1, 1],
+ exp: expected,
+ });
+ }
+}
+
+export const g = makeTestGroup(F);
+
+const kClearValueResult = { R: 0.2, G: 0.4, B: 0.6, A: 0.8 };
+const kDefaultValueResult = { R: 1.0, G: 1.0, B: 1.0, A: 1.0 };
+
+const kFullScreenTriangleVertexShader = `
+override xright: f32 = 3.0;
+override ytop: f32 = 3.0;
+
+@vertex fn main(
+ @builtin(vertex_index) VertexIndex : u32
+ ) -> @builtin(position) vec4<f32> {
+ var pos : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
+ vec2<f32>(-1.0, ytop),
+ vec2<f32>(-1.0, -ytop),
+ vec2<f32>(xright, 0.0));
+ return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+}
+`;
+
+const kFullScreenTriangleFragmentShader = `
+override R: f32 = 1.0;
+override G: f32 = 1.0;
+override B: f32 = 1.0;
+override A: f32 = 1.0;
+
+@fragment fn main()
+ -> @location(0) vec4<f32> {
+ return vec4<f32>(R, G, B, A);
+}
+`;
+
+g.test('basic')
+ .desc(
+ `Test that either correct constants override values or default values when no constants override value are provided at pipeline creation time are used correctly in vertex and fragment shader.`
+ )
+ .params(u =>
+ u
+ .combine('isAsync', [true, false])
+ .beginSubcases()
+ .combineWithParams([
+ {
+ expected: kDefaultValueResult,
+ vertexConstants: {},
+ fragmentConstants: {},
+ },
+ {
+ expected: kClearValueResult,
+ vertexConstants: {
+ xright: -3.0,
+ } as Record<string, GPUPipelineConstantValue>,
+ fragmentConstants: {},
+ },
+ {
+ expected: kClearValueResult,
+ vertexConstants: {
+ ytop: -3.0,
+ } as Record<string, GPUPipelineConstantValue>,
+ fragmentConstants: {},
+ },
+ {
+ expected: kDefaultValueResult,
+ vertexConstants: {
+ xright: 4.0,
+ ytop: 4.0,
+ } as Record<string, GPUPipelineConstantValue>,
+ fragmentConstants: {},
+ },
+ {
+ expected: { R: 0.0, G: 1.0, B: 0.0, A: 1.0 },
+ vertexConstants: {},
+ fragmentConstants: { R: 0.0, B: 0.0 } as Record<string, GPUPipelineConstantValue>,
+ },
+ {
+ expected: { R: 0.0, G: 0.0, B: 0.0, A: 0.0 },
+ vertexConstants: {},
+ fragmentConstants: { R: 0.0, G: 0.0, B: 0.0, A: 0.0 } as Record<
+ string,
+ GPUPipelineConstantValue
+ >,
+ },
+ ])
+ )
+ .fn(async t => {
+ const format = 'bgra8unorm';
+ await t.ExpectShaderOutputWithConstants(
+ t.params.isAsync,
+ format,
+ t.params.expected,
+ {
+ module: t.device.createShaderModule({
+ code: kFullScreenTriangleVertexShader,
+ }),
+ entryPoint: 'main',
+ constants: t.params.vertexConstants,
+ },
+ {
+ module: t.device.createShaderModule({
+ code: kFullScreenTriangleFragmentShader,
+ }),
+ entryPoint: 'main',
+ constants: t.params.fragmentConstants,
+ targets: [{ format }],
+ }
+ );
+ });
+
+g.test('precision')
+ .desc(`Test that the float number precision is preserved for constants`)
+ .params(u =>
+ u
+ .combine('isAsync', [true, false])
+ .beginSubcases()
+ .combineWithParams([
+ {
+ expected: { R: 3.14159, G: 1.0, B: 1.0, A: 1.0 },
+ vertexConstants: {},
+ fragmentConstants: { R: 3.14159 } as Record<string, GPUPipelineConstantValue>,
+ },
+ {
+ expected: { R: 3.141592653589793, G: 1.0, B: 1.0, A: 1.0 },
+ vertexConstants: {},
+ fragmentConstants: { R: 3.141592653589793 } as Record<string, GPUPipelineConstantValue>,
+ },
+ ])
+ )
+ .fn(async t => {
+ const format = 'rgba32float';
+ await t.ExpectShaderOutputWithConstants(
+ t.params.isAsync,
+ format,
+ t.params.expected,
+ {
+ module: t.device.createShaderModule({
+ code: kFullScreenTriangleVertexShader,
+ }),
+ entryPoint: 'main',
+ constants: t.params.vertexConstants,
+ },
+ {
+ module: t.device.createShaderModule({
+ code: kFullScreenTriangleFragmentShader,
+ }),
+ entryPoint: 'main',
+ constants: t.params.fragmentConstants,
+ targets: [{ format }],
+ }
+ );
+ });
+
+g.test('shared_shader_module')
+ .desc(
+ `Test that when the same module is shared by different pipelines, the constant values are still being used correctly.`
+ )
+ .params(u =>
+ u
+ .combine('isAsync', [true, false])
+ .beginSubcases()
+ .combineWithParams([
+ {
+ expected0: kClearValueResult,
+ vertexConstants0: {
+ xright: -3.0,
+ } as Record<string, GPUPipelineConstantValue>,
+ fragmentConstants0: {},
+
+ expected1: kDefaultValueResult,
+ vertexConstants1: {},
+ fragmentConstants1: {},
+ },
+ {
+ expected0: { R: 0.0, G: 0.0, B: 0.0, A: 0.0 },
+ vertexConstants0: {},
+ fragmentConstants0: { R: 0.0, G: 0.0, B: 0.0, A: 0.0 } as Record<
+ string,
+ GPUPipelineConstantValue
+ >,
+
+ expected1: kDefaultValueResult,
+ vertexConstants1: {},
+ fragmentConstants1: {},
+ },
+ {
+ expected0: { R: 1.0, G: 0.0, B: 1.0, A: 0.0 },
+ vertexConstants0: {},
+ fragmentConstants0: { R: 1.0, G: 0.0, B: 1.0, A: 0.0 } as Record<
+ string,
+ GPUPipelineConstantValue
+ >,
+
+ expected1: { R: 0.0, G: 1.0, B: 0.0, A: 1.0 },
+ vertexConstants1: {},
+ fragmentConstants1: { R: 0.0, G: 1.0, B: 0.0, A: 1.0 } as Record<
+ string,
+ GPUPipelineConstantValue
+ >,
+ },
+ ])
+ )
+ .fn(async t => {
+ const format = 'bgra8unorm';
+ const vertexModule = t.device.createShaderModule({
+ code: kFullScreenTriangleVertexShader,
+ });
+
+ const fragmentModule = t.device.createShaderModule({
+ code: kFullScreenTriangleFragmentShader,
+ });
+
+ const createPipelineFn = async (
+ vertexConstants: Record<string, GPUPipelineConstantValue>,
+ fragmentConstants: Record<string, GPUPipelineConstantValue>
+ ) => {
+ const descriptor: GPURenderPipelineDescriptor = {
+ layout: 'auto',
+ vertex: {
+ module: vertexModule,
+ entryPoint: 'main',
+ constants: vertexConstants,
+ },
+ fragment: {
+ module: fragmentModule,
+ entryPoint: 'main',
+ targets: [{ format }],
+ constants: fragmentConstants,
+ },
+ primitive: {
+ topology: 'triangle-list',
+ frontFace: 'ccw',
+ cullMode: 'back',
+ },
+ };
+
+ return t.params.isAsync
+ ? t.device.createRenderPipelineAsync(descriptor)
+ : t.device.createRenderPipeline(descriptor);
+ };
+
+ const pipeline0 = await createPipelineFn(
+ t.params.vertexConstants0,
+ t.params.fragmentConstants0
+ );
+ const pipeline1 = await createPipelineFn(
+ t.params.vertexConstants1,
+ t.params.fragmentConstants1
+ );
+
+ const renderTarget0 = t.device.createTexture({
+ format,
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ const renderTarget1 = t.device.createTexture({
+ format,
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const encoder = t.device.createCommandEncoder();
+
+ const pass0 = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget0.createView(),
+ storeOp: 'store',
+ clearValue: {
+ r: kClearValueResult.R,
+ g: kClearValueResult.G,
+ b: kClearValueResult.B,
+ a: kClearValueResult.A,
+ },
+ loadOp: 'clear',
+ },
+ ],
+ });
+ pass0.setPipeline(pipeline0);
+ pass0.draw(3);
+ pass0.end();
+
+ const pass1 = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget1.createView(),
+ storeOp: 'store',
+ clearValue: {
+ r: kClearValueResult.R,
+ g: kClearValueResult.G,
+ b: kClearValueResult.B,
+ a: kClearValueResult.A,
+ },
+ loadOp: 'clear',
+ },
+ ],
+ });
+ pass1.setPipeline(pipeline1);
+ pass1.draw(3);
+ pass1.end();
+
+ t.device.queue.submit([encoder.finish()]);
+
+ t.expectSingleColor(renderTarget0, format, {
+ size: [1, 1, 1],
+ exp: t.params.expected0,
+ });
+ t.expectSingleColor(renderTarget1, format, {
+ size: [1, 1, 1],
+ exp: t.params.expected1,
+ });
+ });
+
+g.test('multi_entry_points')
+ .desc(
+ `Test that when the same module is shared by vertex and fragment shader, the constant values are still being used correctly.`
+ )
+ .params(u =>
+ u
+ .combine('isAsync', [true, false])
+ .beginSubcases()
+ .combineWithParams([
+ {
+ expected: { R: 0.8, G: 0.4, B: 0.2, A: 1.0 },
+ vertexConstants: { A: 4.0, B: 4.0 } as Record<string, GPUPipelineConstantValue>,
+ fragmentConstants: { A: 0.8, B: 0.4, C: 0.2, D: 1.0 } as Record<
+ string,
+ GPUPipelineConstantValue
+ >,
+ },
+ {
+ expected: { R: 0.8, G: 0.4, B: 0.2, A: 1.0 },
+ vertexConstants: {},
+ fragmentConstants: { A: 0.8, B: 0.4, C: 0.2, D: 1.0 } as Record<
+ string,
+ GPUPipelineConstantValue
+ >,
+ },
+ {
+ expected: kClearValueResult,
+ vertexConstants: { A: -3.0 },
+ fragmentConstants: { A: 0.8, B: 0.4, C: 0.2, D: 1.0 } as Record<
+ string,
+ GPUPipelineConstantValue
+ >,
+ },
+ ])
+ )
+ .fn(async t => {
+ const format = 'bgra8unorm';
+ const module = t.device.createShaderModule({
+ code: `
+ override A: f32 = 3.0;
+ override B: f32 = 3.0;
+ override C: f32;
+ override D: f32;
+
+ @vertex fn vertexMain(
+ @builtin(vertex_index) VertexIndex : u32
+ ) -> @builtin(position) vec4<f32> {
+ var pos : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
+ vec2<f32>(-1.0, A),
+ vec2<f32>(-1.0, -A),
+ vec2<f32>(B, 0.0));
+ return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+ }
+
+ @fragment fn fragmentMain()
+ -> @location(0) vec4<f32> {
+ return vec4<f32>(A, B, C, D);
+ }
+ `,
+ });
+ await t.ExpectShaderOutputWithConstants(
+ t.params.isAsync,
+ format,
+ t.params.expected,
+ {
+ module,
+ entryPoint: 'vertexMain',
+ constants: t.params.vertexConstants,
+ },
+ {
+ module,
+ entryPoint: 'fragmentMain',
+ constants: t.params.fragmentConstants,
+ targets: [{ format }],
+ }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/pipeline_output_targets.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/pipeline_output_targets.spec.ts
new file mode 100644
index 0000000000..bfe7a6ca6b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/pipeline_output_targets.spec.ts
@@ -0,0 +1,450 @@
+export const description = `
+- Test pipeline outputs with different color attachment number, formats, component counts, etc.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { range } from '../../../../common/util/util.js';
+import {
+ computeBytesPerSampleFromFormats,
+ kRenderableColorTextureFormats,
+ kTextureFormatInfo,
+} from '../../../format_info.js';
+import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
+import { getFragmentShaderCodeWithOutput, getPlainTypeInfo } from '../../../util/shader.js';
+import { kTexelRepresentationInfo } from '../../../util/texture/texel_data.js';
+
+const kVertexShader = `
+@vertex fn main(
+@builtin(vertex_index) VertexIndex : u32
+) -> @builtin(position) vec4<f32> {
+ var pos : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
+ vec2<f32>(-1.0, -3.0),
+ vec2<f32>(3.0, 1.0),
+ vec2<f32>(-1.0, 1.0));
+ return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+}
+`;
+
+export const g = makeTestGroup(TextureTestMixin(GPUTest));
+
+// Values to write into each attachment
+// We make values different for each attachment index and each channel
+// to make sure they didn't get mixed up
+
+// Clamp alpha to 3 to avoid comparing a large expected value with a max 3 value for rgb10a2uint
+// MAINTENANCE_TODO: Make TexelRepresentation.numericRange per-component and use that.
+const attachmentsIntWriteValues = [
+ { R: 1, G: 2, B: 3, A: 1 },
+ { R: 5, G: 6, B: 7, A: 2 },
+ { R: 9, G: 10, B: 11, A: 3 },
+ { R: 13, G: 14, B: 15, A: 0 },
+];
+const attachmentsFloatWriteValues = [
+ { R: 0.12, G: 0.34, B: 0.56, A: 0 },
+ { R: 0.78, G: 0.9, B: 0.19, A: 1 },
+ { R: 0.28, G: 0.37, B: 0.46, A: 0.3 },
+ { R: 0.55, G: 0.64, B: 0.73, A: 1 },
+];
+
+g.test('color,attachments')
+ .desc(`Test that pipeline with sparse color attachments write values correctly.`)
+ .params(u =>
+ u
+ .combine('format', kRenderableColorTextureFormats)
+ .beginSubcases()
+ .combine('attachmentCount', [2, 3, 4])
+ .expand('emptyAttachmentId', p => range(p.attachmentCount, i => i))
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format];
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { format, attachmentCount, emptyAttachmentId } = t.params;
+ const componentCount = kTexelRepresentationInfo[format].componentOrder.length;
+ const info = kTextureFormatInfo[format];
+
+ // We only need to test formats that have a valid color attachment bytes per sample.
+ const pixelByteCost = kTextureFormatInfo[format].colorRender?.byteCost;
+ t.skipIf(
+ pixelByteCost === undefined ||
+ computeBytesPerSampleFromFormats(range(attachmentCount, () => format)) >
+ t.device.limits.maxColorAttachmentBytesPerSample
+ );
+
+ const writeValues =
+ info.color.type === 'sint' || info.color.type === 'uint'
+ ? attachmentsIntWriteValues
+ : attachmentsFloatWriteValues;
+
+ const renderTargets = range(attachmentCount, () =>
+ t.device.createTexture({
+ format,
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ })
+ );
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: kVertexShader,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: getFragmentShaderCodeWithOutput(
+ range(attachmentCount, i =>
+ i === emptyAttachmentId
+ ? null
+ : {
+ values: [
+ writeValues[i].R,
+ writeValues[i].G,
+ writeValues[i].B,
+ writeValues[i].A,
+ ],
+ plainType: getPlainTypeInfo(info.color.type),
+ componentCount,
+ }
+ )
+ ),
+ }),
+ entryPoint: 'main',
+ targets: range(attachmentCount, i => (i === emptyAttachmentId ? null : { format })),
+ },
+ primitive: { topology: 'triangle-list' },
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: range(attachmentCount, i =>
+ i === emptyAttachmentId
+ ? null
+ : {
+ view: renderTargets[i].createView(),
+ storeOp: 'store',
+ clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 0.5 },
+ loadOp: 'clear',
+ }
+ ),
+ });
+ pass.setPipeline(pipeline);
+ pass.draw(3);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+
+ for (let i = 0; i < attachmentCount; i++) {
+ if (i === emptyAttachmentId) {
+ continue;
+ }
+ t.expectSinglePixelComparisonsAreOkInTexture({ texture: renderTargets[i] }, [
+ { coord: { x: 0, y: 0 }, exp: writeValues[i] },
+ ]);
+ }
+ });
+
+g.test('color,component_count')
+ .desc(
+ `Test that extra components of the output (e.g. f32, vec2<f32>, vec3<f32>, vec4<f32>) are discarded.`
+ )
+ .params(u =>
+ u
+ .combine('format', kRenderableColorTextureFormats)
+ .beginSubcases()
+ .combine('componentCount', [1, 2, 3, 4])
+ .filter(x => x.componentCount >= kTexelRepresentationInfo[x.format].componentOrder.length)
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format];
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { format, componentCount } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ // expected RGBA values
+ // extra channels are discarded
+ const values = [0, 1, 0, 1];
+
+ const renderTarget = t.device.createTexture({
+ format,
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: kVertexShader,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: getFragmentShaderCodeWithOutput([
+ {
+ values,
+ plainType: getPlainTypeInfo(info.color.type),
+ componentCount,
+ },
+ ]),
+ }),
+ entryPoint: 'main',
+ targets: [{ format }],
+ },
+ primitive: { topology: 'triangle-list' },
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ storeOp: 'store',
+ clearValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ },
+ ],
+ });
+ pass.setPipeline(pipeline);
+ pass.draw(3);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+
+ t.expectSingleColor(renderTarget, format, {
+ size: [1, 1, 1],
+ exp: { R: values[0], G: values[1], B: values[2], A: values[3] },
+ });
+ });
+
+g.test('color,component_count,blend')
+ .desc(
+ `Test that blending behaves correctly when:
+- fragment output has no alpha, but the src alpha is not used for the blend operation indicated by blend factors
+- attachment format has no alpha, and the dst alpha should be assumed as 1
+
+The attachment has a load value of [1, 0, 0, 1]
+`
+ )
+ .params(u =>
+ u
+ .combine('format', ['r8unorm', 'rg8unorm', 'rgba8unorm', 'bgra8unorm'] as const)
+ .beginSubcases()
+ // _result is expected values in the color attachment (extra channels are discarded)
+ // output is the fragment shader output vector
+ // 0.498 -> 0x7f, 0.502 -> 0x80
+ .combineWithParams([
+ // fragment output has no alpha
+ {
+ _result: [0, 0, 0, 0],
+ output: [0],
+ colorSrcFactor: 'one',
+ colorDstFactor: 'zero',
+ alphaSrcFactor: 'zero',
+ alphaDstFactor: 'zero',
+ },
+ {
+ _result: [0, 0, 0, 0],
+ output: [0],
+ colorSrcFactor: 'dst-alpha',
+ colorDstFactor: 'zero',
+ alphaSrcFactor: 'zero',
+ alphaDstFactor: 'zero',
+ },
+ {
+ _result: [1, 0, 0, 0],
+ output: [0],
+ colorSrcFactor: 'one-minus-dst-alpha',
+ colorDstFactor: 'dst-alpha',
+ alphaSrcFactor: 'zero',
+ alphaDstFactor: 'one',
+ },
+ {
+ _result: [0.498, 0, 0, 0],
+ output: [0.498],
+ colorSrcFactor: 'dst-alpha',
+ colorDstFactor: 'zero',
+ alphaSrcFactor: 'zero',
+ alphaDstFactor: 'one',
+ },
+ {
+ _result: [0, 1, 0, 0],
+ output: [0, 1],
+ colorSrcFactor: 'one',
+ colorDstFactor: 'zero',
+ alphaSrcFactor: 'zero',
+ alphaDstFactor: 'zero',
+ },
+ {
+ _result: [0, 1, 0, 0],
+ output: [0, 1],
+ colorSrcFactor: 'dst-alpha',
+ colorDstFactor: 'zero',
+ alphaSrcFactor: 'zero',
+ alphaDstFactor: 'zero',
+ },
+ {
+ _result: [1, 0, 0, 0],
+ output: [0, 1],
+ colorSrcFactor: 'one-minus-dst-alpha',
+ colorDstFactor: 'dst-alpha',
+ alphaSrcFactor: 'zero',
+ alphaDstFactor: 'one',
+ },
+ {
+ _result: [0, 1, 0, 0],
+ output: [0, 1, 0],
+ colorSrcFactor: 'one',
+ colorDstFactor: 'zero',
+ alphaSrcFactor: 'zero',
+ alphaDstFactor: 'zero',
+ },
+ {
+ _result: [0, 1, 0, 0],
+ output: [0, 1, 0],
+ colorSrcFactor: 'dst-alpha',
+ colorDstFactor: 'zero',
+ alphaSrcFactor: 'zero',
+ alphaDstFactor: 'zero',
+ },
+ {
+ _result: [1, 0, 0, 0],
+ output: [0, 1, 0],
+ colorSrcFactor: 'one-minus-dst-alpha',
+ colorDstFactor: 'dst-alpha',
+ alphaSrcFactor: 'zero',
+ alphaDstFactor: 'one',
+ },
+ // fragment output has alpha
+ {
+ _result: [0.502, 1, 0, 0.498],
+ output: [0, 1, 0, 0.498],
+ colorSrcFactor: 'one',
+ colorDstFactor: 'one-minus-src-alpha',
+ alphaSrcFactor: 'one',
+ alphaDstFactor: 'zero',
+ },
+ {
+ _result: [0.502, 0.498, 0, 0.498],
+ output: [0, 1, 0, 0.498],
+ colorSrcFactor: 'src-alpha',
+ colorDstFactor: 'one-minus-src-alpha',
+ alphaSrcFactor: 'one',
+ alphaDstFactor: 'zero',
+ },
+ {
+ _result: [0, 1, 0, 0.498],
+ output: [0, 1, 0, 0.498],
+ colorSrcFactor: 'dst-alpha',
+ colorDstFactor: 'zero',
+ alphaSrcFactor: 'one',
+ alphaDstFactor: 'zero',
+ },
+ {
+ _result: [0, 1, 0, 0.498],
+ output: [0, 1, 0, 0.498],
+ colorSrcFactor: 'dst-alpha',
+ colorDstFactor: 'zero',
+ alphaSrcFactor: 'zero',
+ alphaDstFactor: 'src',
+ },
+ {
+ _result: [1, 0, 0, 1],
+ output: [0, 1, 0, 0.498],
+ colorSrcFactor: 'one-minus-dst-alpha',
+ colorDstFactor: 'dst-alpha',
+ alphaSrcFactor: 'zero',
+ alphaDstFactor: 'dst-alpha',
+ },
+ ] as const)
+ .filter(x => x.output.length >= kTexelRepresentationInfo[x.format].componentOrder.length)
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format];
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const {
+ format,
+ _result,
+ output,
+ colorSrcFactor,
+ colorDstFactor,
+ alphaSrcFactor,
+ alphaDstFactor,
+ } = t.params;
+ const componentCount = output.length;
+ const info = kTextureFormatInfo[format];
+
+ const renderTarget = t.device.createTexture({
+ format,
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: kVertexShader,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: getFragmentShaderCodeWithOutput([
+ {
+ values: output,
+ plainType: getPlainTypeInfo(info.color.type),
+ componentCount,
+ },
+ ]),
+ }),
+ entryPoint: 'main',
+ targets: [
+ {
+ format,
+ blend: {
+ color: {
+ srcFactor: colorSrcFactor,
+ dstFactor: colorDstFactor,
+ operation: 'add',
+ },
+ alpha: {
+ srcFactor: alphaSrcFactor,
+ dstFactor: alphaDstFactor,
+ operation: 'add',
+ },
+ },
+ },
+ ],
+ },
+ primitive: { topology: 'triangle-list' },
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ storeOp: 'store',
+ clearValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ },
+ ],
+ });
+ pass.setPipeline(pipeline);
+ pass.draw(3);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+
+ t.expectSingleColor(renderTarget, format, {
+ size: [1, 1, 1],
+ exp: { R: _result[0], G: _result[1], B: _result[2], A: _result[3] },
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/primitive_topology.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/primitive_topology.spec.ts
new file mode 100644
index 0000000000..21817d0164
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/primitive_topology.spec.ts
@@ -0,0 +1,488 @@
+export const description = `Test primitive topology rendering.
+
+Draw a primitive using 6 vertices with each topology and check if the pixel is covered.
+
+Vertex sequence and coordinates are the same for each topology:
+ - Vertex buffer = [v1, v2, v3, v4, v5, v6]
+ - Topology = [point-list, line-list, line-strip, triangle-list, triangle-strip]
+
+Test locations are framebuffer coordinates:
+ - Pixel value { valid: green, invalid: black, format: 'rgba8unorm'}
+ - Test point is valid if the pixel value equals the covered pixel value at the test location.
+ - Primitive restart occurs for strips (line-strip and triangle-strip) between [v3, v4].
+
+ Topology: point-list Valid test location(s) Invalid test location(s)
+
+ v2 v4 v6 Every vertex. Line-strip locations.
+ Triangle-list locations.
+ Triangle-strip locations.
+
+ v1 v3 v5
+
+ Topology: line-list (3 lines)
+
+ v2 v4 v6 Center of three line segments: Line-strip locations.
+ * * * {v1,V2}, {v3,v4}, and {v4,v5}. Triangle-list locations.
+ * * * Triangle-strip locations.
+ * * *
+ v1 v3 v5
+
+ Topology: line-strip (5 lines)
+
+ v2 v4 v6
+ ** ** *
+ * * * * * Line-list locations Triangle-list locations.
+ * ** ** + Center of two line segments: Triangle-strip locations.
+ v1 v3 v5 {v2,v3} and {v4,v5}.
+ With primitive restart:
+ Line segment {v3, v4}.
+
+ Topology: triangle-list (2 triangles)
+
+ v2 v4 v6
+ ** ****** Center of two triangle(s): Triangle-strip locations.
+ **** **** {v1,v2,v3} and {v4,v5,v6}.
+ ****** **
+ v1 v3 v5
+
+ Topology: triangle-strip (4 triangles)
+
+ v2 v4 v6
+ ** ****** ** ****** Triangle-list locations None.
+ **** **** **** **** + Center of two triangle(s):
+ ****** ** ****** ** {v2,v3,v4} and {v3,v4,v5}. With primitive restart:
+ v1 v3 v5 Triangle {v2, v3, v4}
+ and {v3, v4, v5}.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
+import { PerPixelComparison } from '../../../util/texture/texture_ok.js';
+
+const kRTSize: number = 56;
+const kColorFormat = 'rgba8unorm';
+const kValidPixelColor = new Uint8Array([0x00, 0xff, 0x00, 0xff]); // green
+const kInvalidPixelColor = new Uint8Array([0x00, 0x00, 0x00, 0x00]); // black
+
+class Point2D {
+ x: number;
+ y: number;
+ z: number;
+ w: number;
+
+ constructor(x: number, y: number) {
+ this.x = x;
+ this.y = y;
+ this.z = 0;
+ this.w = 1;
+ }
+
+ toNDC(): Point2D {
+ // NDC coordinate space is y-up, so we negate the y mapping.
+ // To ensure the resulting vertex in NDC will be placed at the center of the pixel, we
+ // must offset by the pixel coordinates or 0.5.
+ return new Point2D((2 * (this.x + 0.5)) / kRTSize - 1, (-2 * (this.y + 0.5)) / kRTSize + 1);
+ }
+
+ static getMidpoint(a: Point2D, b: Point2D) {
+ return new Point2D((a.x + b.x) / 2, (a.y + b.y) / 2);
+ }
+
+ static getCentroid(a: Point2D, b: Point2D, c: Point2D) {
+ return new Point2D((a.x + b.x + c.x) / 3, (a.y + b.y + c.y) / 3);
+ }
+}
+
+type TestLocation = PerPixelComparison<Uint8Array>;
+
+const VertexLocations = [
+ new Point2D(8, 24), // v1
+ new Point2D(16, 8), // v2
+ new Point2D(24, 24), // v3
+ new Point2D(32, 8), // v4
+ new Point2D(40, 24), // v5
+ new Point2D(48, 8), // v6
+];
+
+function getPointTestLocations(expectedColor: Uint8Array): TestLocation[] {
+ // Test points are always equal to vertex locations.
+ const testLocations: TestLocation[] = [];
+ for (const location of VertexLocations) {
+ testLocations.push({ coord: location, exp: expectedColor });
+ }
+ return testLocations;
+}
+
+function getLineTestLocations(expectedColor: Uint8Array): TestLocation[] {
+ // Midpoints of 3 line segments
+ return [
+ {
+ // Line {v1, v2}
+ coord: Point2D.getMidpoint(VertexLocations[0], VertexLocations[1]),
+ exp: expectedColor,
+ },
+ {
+ // Line {v3, v4}
+ coord: Point2D.getMidpoint(VertexLocations[2], VertexLocations[3]),
+ exp: expectedColor,
+ },
+ {
+ // Line {v5, v6}
+ coord: Point2D.getMidpoint(VertexLocations[4], VertexLocations[5]),
+ exp: expectedColor,
+ },
+ ];
+}
+
+function getPrimitiveRestartLineTestLocations(expectedColor: Uint8Array): TestLocation[] {
+ // Midpoints of 2 line segments
+ return [
+ {
+ // Line {v1, v2}
+ coord: Point2D.getMidpoint(VertexLocations[0], VertexLocations[1]),
+ exp: expectedColor,
+ },
+ {
+ // Line {v5, v6}
+ coord: Point2D.getMidpoint(VertexLocations[4], VertexLocations[5]),
+ exp: expectedColor,
+ },
+ ];
+}
+
+function getLineStripTestLocations(expectedColor: Uint8Array): TestLocation[] {
+ // Midpoints of 2 line segments
+ return [
+ {
+ // Line {v2, v3}
+ coord: Point2D.getMidpoint(VertexLocations[1], VertexLocations[2]),
+ exp: expectedColor,
+ },
+ {
+ // Line {v4, v5}
+ coord: Point2D.getMidpoint(VertexLocations[3], VertexLocations[4]),
+ exp: expectedColor,
+ },
+ ];
+}
+
+function getTriangleListTestLocations(expectedColor: Uint8Array): TestLocation[] {
+ // Center of two triangles
+ return [
+ {
+ // Triangle {v1, v2, v3}
+ coord: Point2D.getCentroid(VertexLocations[0], VertexLocations[1], VertexLocations[2]),
+ exp: expectedColor,
+ },
+ {
+ // Triangle {v4, v5, v6}
+ coord: Point2D.getCentroid(VertexLocations[3], VertexLocations[4], VertexLocations[5]),
+ exp: expectedColor,
+ },
+ ];
+}
+
+function getTriangleStripTestLocations(expectedColor: Uint8Array): TestLocation[] {
+ // Center of two triangles
+ return [
+ {
+ // Triangle {v2, v3, v4}
+ coord: Point2D.getCentroid(VertexLocations[1], VertexLocations[2], VertexLocations[3]),
+ exp: expectedColor,
+ },
+ {
+ // Triangle {v3, v4, v5}
+ coord: Point2D.getCentroid(VertexLocations[2], VertexLocations[3], VertexLocations[4]),
+ exp: expectedColor,
+ },
+ ];
+}
+
+function getDefaultTestLocations({
+ topology,
+ primitiveRestart = false,
+ invalidateLastInList = false,
+}: {
+ topology: GPUPrimitiveTopology;
+ primitiveRestart?: boolean;
+ invalidateLastInList?: boolean;
+}) {
+ function maybeInvalidateLast(locations: TestLocation[]) {
+ if (!invalidateLastInList) return locations;
+
+ return locations.map((tl, i) => {
+ if (i === locations.length - 1) {
+ return {
+ coord: tl.coord,
+ exp: kInvalidPixelColor,
+ };
+ } else {
+ return tl;
+ }
+ });
+ }
+
+ let testLocations: TestLocation[];
+ switch (topology) {
+ case 'point-list':
+ testLocations = [
+ ...getPointTestLocations(kValidPixelColor),
+ ...getLineStripTestLocations(kInvalidPixelColor),
+ ...getTriangleListTestLocations(kInvalidPixelColor),
+ ...getTriangleStripTestLocations(kInvalidPixelColor),
+ ];
+ break;
+ case 'line-list':
+ testLocations = [
+ ...maybeInvalidateLast(getLineTestLocations(kValidPixelColor)),
+ ...getLineStripTestLocations(kInvalidPixelColor),
+ ...getTriangleListTestLocations(kInvalidPixelColor),
+ ...getTriangleStripTestLocations(kInvalidPixelColor),
+ ];
+ break;
+ case 'line-strip':
+ testLocations = [
+ ...(primitiveRestart
+ ? getPrimitiveRestartLineTestLocations(kValidPixelColor)
+ : getLineTestLocations(kValidPixelColor)),
+ ...getLineStripTestLocations(kValidPixelColor),
+ ...getTriangleListTestLocations(kInvalidPixelColor),
+ ...getTriangleStripTestLocations(kInvalidPixelColor),
+ ];
+ break;
+ case 'triangle-list':
+ testLocations = [
+ ...maybeInvalidateLast(getTriangleListTestLocations(kValidPixelColor)),
+ ...getTriangleStripTestLocations(kInvalidPixelColor),
+ ];
+ break;
+ case 'triangle-strip':
+ testLocations = [
+ ...getTriangleListTestLocations(kValidPixelColor),
+ ...getTriangleStripTestLocations(primitiveRestart ? kInvalidPixelColor : kValidPixelColor),
+ ];
+ break;
+ }
+ return testLocations;
+}
+
+function generateVertexBuffer(vertexLocations: Point2D[]): Float32Array {
+ const vertexCoords = new Float32Array(vertexLocations.length * 4);
+ for (let i = 0; i < vertexLocations.length; i++) {
+ const point = vertexLocations[i].toNDC();
+ vertexCoords[i * 4 + 0] = point.x;
+ vertexCoords[i * 4 + 1] = point.y;
+ vertexCoords[i * 4 + 2] = point.z;
+ vertexCoords[i * 4 + 3] = point.w;
+ }
+ return vertexCoords;
+}
+
+const kDefaultDrawCount = 6;
+class PrimitiveTopologyTest extends TextureTestMixin(GPUTest) {
+ makeAttachmentTexture(): GPUTexture {
+ return this.device.createTexture({
+ format: kColorFormat,
+ size: { width: kRTSize, height: kRTSize, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ });
+ }
+
+ run({
+ topology,
+ indirect,
+ testLocations,
+ primitiveRestart = false,
+ drawCount = kDefaultDrawCount,
+ }: {
+ topology: GPUPrimitiveTopology;
+ indirect: boolean;
+ testLocations: TestLocation[];
+ primitiveRestart?: boolean;
+ drawCount?: number;
+ }): void {
+ const colorAttachment = this.makeAttachmentTexture();
+
+ // Color load operator will clear color attachment to zero.
+ const encoder = this.device.createCommandEncoder();
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachment.createView(),
+ clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 0.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+
+ let stripIndexFormat = undefined;
+ if (topology === 'triangle-strip' || topology === 'line-strip') {
+ stripIndexFormat = 'uint32' as const;
+ }
+
+ // Draw a primitive using 6 vertices based on the type.
+ // Pixels are generated based on vertex position.
+ // If point, 1 pixel is generated at each vertex location.
+ // Otherwise, >1 pixels could be generated.
+ // Output color is solid green.
+ renderPass.setPipeline(
+ this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: this.device.createShaderModule({
+ code: `
+ @vertex fn main(
+ @location(0) pos : vec4<f32>
+ ) -> @builtin(position) vec4<f32> {
+ return pos;
+ }`,
+ }),
+ entryPoint: 'main',
+ buffers: [
+ {
+ arrayStride: 4 * Float32Array.BYTES_PER_ELEMENT,
+ attributes: [
+ {
+ format: 'float32x4',
+ offset: 0,
+ shaderLocation: 0,
+ },
+ ],
+ },
+ ],
+ },
+ fragment: {
+ module: this.device.createShaderModule({
+ code: `
+ @fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: kColorFormat }],
+ },
+ primitive: {
+ topology,
+ stripIndexFormat,
+ },
+ })
+ );
+
+ // Create vertices for the primitive in a vertex buffer and bind it.
+ const vertexCoords = generateVertexBuffer(VertexLocations);
+ const vertexBuffer = this.makeBufferWithContents(vertexCoords, GPUBufferUsage.VERTEX);
+ renderPass.setVertexBuffer(0, vertexBuffer);
+
+ // Restart the strip between [v3, <restart>, v4].
+ if (primitiveRestart) {
+ const indexBuffer = this.makeBufferWithContents(
+ new Uint32Array([0, 1, 2, -1, 3, 4, 5]),
+ GPUBufferUsage.INDEX
+ );
+ renderPass.setIndexBuffer(indexBuffer, 'uint32');
+
+ if (indirect) {
+ renderPass.drawIndexedIndirect(
+ this.makeBufferWithContents(
+ new Uint32Array([drawCount + 1, 1, 0, 0, 0]),
+ GPUBufferUsage.INDIRECT
+ ),
+ 0
+ );
+ } else {
+ renderPass.drawIndexed(drawCount + 1); // extra index for restart
+ }
+ } else {
+ if (indirect) {
+ renderPass.drawIndirect(
+ this.makeBufferWithContents(
+ new Uint32Array([drawCount, 1, 0, 0]),
+ GPUBufferUsage.INDIRECT
+ ),
+ 0
+ );
+ } else {
+ renderPass.draw(drawCount);
+ }
+ }
+
+ renderPass.end();
+
+ this.device.queue.submit([encoder.finish()]);
+ this.expectSinglePixelComparisonsAreOkInTexture({ texture: colorAttachment }, testLocations);
+ }
+}
+
+export const g = makeTestGroup(PrimitiveTopologyTest);
+
+const topologies: GPUPrimitiveTopology[] = [
+ 'point-list',
+ 'line-list',
+ 'line-strip',
+ 'triangle-list',
+ 'triangle-strip',
+];
+
+g.test('basic')
+ .desc(
+ `Compute test locations for valid and invalid pixels for each topology.
+ If the primitive covers the pixel, the color value will be |kValidPixelColor|.
+ Otherwise, a non-covered pixel will be |kInvalidPixelColor|.
+
+ Params:
+ - topology= {...all topologies}
+ - indirect= {true, false}
+ - primitiveRestart= { true, false } - always false for non-strip topologies
+ `
+ )
+ .params(u =>
+ u //
+ .combine('topology', topologies)
+ .combine('indirect', [false, true])
+ .combine('primitiveRestart', [false, true])
+ .unless(
+ p => p.primitiveRestart && p.topology !== 'line-strip' && p.topology !== 'triangle-strip'
+ )
+ )
+ .fn(t => {
+ t.run({
+ ...t.params,
+ testLocations: getDefaultTestLocations(t.params),
+ });
+ });
+
+g.test('unaligned_vertex_count')
+ .desc(
+ `Test that drawing with a number of vertices that's not a multiple of the vertices a given primitive list topology is not an error. The last primitive is not drawn.
+
+ Params:
+ - topology= {line-list, triangle-list}
+ - indirect= {true, false}
+ - drawCount - number of vertices to draw. A value smaller than the test's default of ${kDefaultDrawCount}.
+ One smaller for line-list. One or two smaller for triangle-list.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('topology', ['line-list', 'triangle-list'] as const)
+ .combine('indirect', [false, true])
+ .expand('drawCount', function* (p) {
+ switch (p.topology) {
+ case 'line-list':
+ yield kDefaultDrawCount - 1;
+ break;
+ case 'triangle-list':
+ yield kDefaultDrawCount - 1;
+ yield kDefaultDrawCount - 2;
+ break;
+ }
+ })
+ )
+ .fn(t => {
+ const testLocations = getDefaultTestLocations({ ...t.params, invalidateLastInList: true });
+ t.run({
+ ...t.params,
+ testLocations,
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/sample_mask.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/sample_mask.spec.ts
new file mode 100644
index 0000000000..00069b777f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/sample_mask.spec.ts
@@ -0,0 +1,806 @@
+export const description = `
+Tests that the final sample mask is the logical AND of all the relevant masks, including
+the rasterization mask, sample mask, fragment output mask, and alpha to coverage mask (when alphaToCoverageEnabled === true).
+
+Also tested:
+- The positions of samples in the standard sample patterns.
+- Per-sample interpolation sampling: @interpolate(perspective, sample).
+
+TODO: add a test without a 0th color attachment (sparse color attachment), with different color attachments and alpha value output.
+The cross-platform behavior is unknown. could be any of:
+- coverage is always 100%
+- coverage is always 0%
+- it uses the first non-null attachment
+- it's an error
+Details could be found at: https://github.com/gpuweb/cts/issues/2201
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { assert, range } from '../../../../common/util/util.js';
+import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
+import { checkElementsPassPredicate, checkElementsEqual } from '../../../util/check_contents.js';
+import { TypeF32, TypeU32 } from '../../../util/conversion.js';
+import { TexelView } from '../../../util/texture/texel_view.js';
+
+const kColors = [
+ // Red
+ new Uint8Array([0xff, 0, 0, 0xff]),
+ // Green
+ new Uint8Array([0, 0xff, 0, 0xff]),
+ // Blue
+ new Uint8Array([0, 0, 0xff, 0xff]),
+ // Yellow
+ new Uint8Array([0xff, 0xff, 0, 0xff]),
+];
+
+const kDepthClearValue = 1.0;
+const kDepthWriteValue = 0.0;
+const kStencilClearValue = 0;
+const kStencilReferenceValue = 0xff;
+
+// Format of the render target and resolve target
+const format = 'rgba8unorm';
+
+// Format of depth stencil attachment
+const depthStencilFormat = 'depth24plus-stencil8';
+
+const kRenderTargetSize = 1;
+
+function hasSample(
+ rasterizationMask: number,
+ sampleMask: number,
+ fragmentShaderOutputMask: number,
+ sampleIndex: number = 0
+): boolean {
+ return (rasterizationMask & sampleMask & fragmentShaderOutputMask & (1 << sampleIndex)) > 0;
+}
+
+function getExpectedColorData(
+ sampleCount: number,
+ rasterizationMask: number,
+ sampleMask: number,
+ fragmentShaderOutputMaskOrAlphaToCoverageMask: number
+) {
+ const expectedData = new Float32Array(sampleCount * 4);
+ if (sampleCount === 1) {
+ if (hasSample(rasterizationMask, sampleMask, fragmentShaderOutputMaskOrAlphaToCoverageMask)) {
+ // Texel 3 is sampled at the pixel center
+ expectedData[0] = kColors[3][0] / 0xff;
+ expectedData[1] = kColors[3][1] / 0xff;
+ expectedData[2] = kColors[3][2] / 0xff;
+ expectedData[3] = kColors[3][3] / 0xff;
+ }
+ } else {
+ for (let i = 0; i < sampleCount; i++) {
+ if (
+ hasSample(rasterizationMask, sampleMask, fragmentShaderOutputMaskOrAlphaToCoverageMask, i)
+ ) {
+ const o = i * 4;
+ expectedData[o + 0] = kColors[i][0] / 0xff;
+ expectedData[o + 1] = kColors[i][1] / 0xff;
+ expectedData[o + 2] = kColors[i][2] / 0xff;
+ expectedData[o + 3] = kColors[i][3] / 0xff;
+ }
+ }
+ }
+ return expectedData;
+}
+
+function getExpectedDepthData(
+ sampleCount: number,
+ rasterizationMask: number,
+ sampleMask: number,
+ fragmentShaderOutputMaskOrAlphaToCoverageMask: number
+) {
+ const expectedData = new Float32Array(sampleCount);
+ for (let i = 0; i < sampleCount; i++) {
+ const s = hasSample(
+ rasterizationMask,
+ sampleMask,
+ fragmentShaderOutputMaskOrAlphaToCoverageMask,
+ i
+ );
+ expectedData[i] = s ? kDepthWriteValue : kDepthClearValue;
+ }
+ return expectedData;
+}
+
+function getExpectedStencilData(
+ sampleCount: number,
+ rasterizationMask: number,
+ sampleMask: number,
+ fragmentShaderOutputMaskOrAlphaToCoverageMask: number
+) {
+ const expectedData = new Uint32Array(sampleCount);
+ for (let i = 0; i < sampleCount; i++) {
+ const s = hasSample(
+ rasterizationMask,
+ sampleMask,
+ fragmentShaderOutputMaskOrAlphaToCoverageMask,
+ i
+ );
+ expectedData[i] = s ? kStencilReferenceValue : kStencilClearValue;
+ }
+ return expectedData;
+}
+
+const kSampleMaskTestShader = `
+struct Varyings {
+ @builtin(position) Position : vec4<f32>,
+ @location(0) @interpolate(flat) uvFlat : vec2<f32>,
+ @location(1) @interpolate(perspective, sample) uvInterpolated : vec2<f32>,
+}
+
+//
+// Vertex shader
+//
+
+@vertex
+fn vmain(@builtin(vertex_index) VertexIndex : u32,
+ @builtin(instance_index) InstanceIndex : u32) -> Varyings {
+ // Standard sample locations within a pixel, where the pixel ranges from (-1,-1) to (1,1), and is
+ // centered at (0,0) (NDC - the test uses a 1x1 render target).
+ // https://learn.microsoft.com/en-us/windows/win32/api/d3d11/ne-d3d11-d3d11_standard_multisample_quality_levels
+ var sampleCenters = array(
+ // sampleCount = 1
+ vec2f(0, 0),
+ // sampleCount = 4
+ vec2f(-2, 6) / 8,
+ vec2f( 6, 2) / 8,
+ vec2f(-6, -2) / 8,
+ vec2f( 2, -6) / 8,
+ );
+ // A tiny quad to draw around the sample center to ensure we hit only the expected point.
+ let kTinyQuadRadius = 1.0 / 32;
+ var tinyQuad = array(
+ vec2f( kTinyQuadRadius, kTinyQuadRadius),
+ vec2f( kTinyQuadRadius, -kTinyQuadRadius),
+ vec2f(-kTinyQuadRadius, -kTinyQuadRadius),
+ vec2f( kTinyQuadRadius, kTinyQuadRadius),
+ vec2f(-kTinyQuadRadius, -kTinyQuadRadius),
+ vec2f(-kTinyQuadRadius, kTinyQuadRadius),
+ );
+
+ var uvsFlat = array(
+ // sampleCount = 1
+ // Note: avoids hitting the point between the 4 texels.
+ vec2f(0.51, 0.51),
+ // sampleCount = 4
+ vec2f(0.25, 0.25),
+ vec2f(0.75, 0.25),
+ vec2f(0.25, 0.75),
+ vec2f(0.75, 0.75),
+ );
+ var uvsInterpolated = array(
+ // center quad
+ // Note: the interpolated point will be exactly in the middle of the 4 texels.
+ // The test expects to get texel 1,1 (the 3rd texel) in this case.
+ vec2f(1.0, 0.0),
+ vec2f(1.0, 1.0),
+ vec2f(0.0, 1.0),
+ vec2f(1.0, 0.0),
+ vec2f(0.0, 1.0),
+ vec2f(0.0, 0.0),
+
+ // top-left quad (texel 0)
+ vec2f(0.5, 0.0),
+ vec2f(0.5, 0.5),
+ vec2f(0.0, 0.5),
+ vec2f(0.5, 0.0),
+ vec2f(0.0, 0.5),
+ vec2f(0.0, 0.0),
+
+ // top-right quad (texel 1)
+ vec2f(1.0, 0.0),
+ vec2f(1.0, 0.5),
+ vec2f(0.5, 0.5),
+ vec2f(1.0, 0.0),
+ vec2f(0.5, 0.5),
+ vec2f(0.5, 0.0),
+
+ // bottom-left quad (texel 2)
+ vec2f(0.5, 0.5),
+ vec2f(0.5, 1.0),
+ vec2f(0.0, 1.0),
+ vec2f(0.5, 0.5),
+ vec2f(0.0, 1.0),
+ vec2f(0.0, 0.5),
+
+ // bottom-right quad (texel 3)
+ vec2f(1.0, 0.5),
+ vec2f(1.0, 1.0),
+ vec2f(0.5, 1.0),
+ vec2f(1.0, 0.5),
+ vec2f(0.5, 1.0),
+ vec2f(0.5, 0.5)
+ );
+
+ var output : Varyings;
+ let pos = sampleCenters[InstanceIndex] + tinyQuad[VertexIndex];
+ output.Position = vec4(pos, ${kDepthWriteValue}, 1.0);
+ output.uvFlat = uvsFlat[InstanceIndex];
+ output.uvInterpolated = uvsInterpolated[InstanceIndex * 6 + VertexIndex];
+ return output;
+}
+
+//
+// Fragment shaders
+//
+
+@group(0) @binding(0) var mySampler: sampler;
+@group(0) @binding(1) var myTexture: texture_2d<f32>;
+
+// For test named 'fragment_output_mask'
+
+@group(0) @binding(2) var<uniform> fragMask: u32;
+struct FragmentOutput1 {
+ @builtin(sample_mask) mask : u32,
+ @location(0) color : vec4<f32>,
+}
+@fragment fn fmain__fragment_output_mask__flat(varyings: Varyings) -> FragmentOutput1 {
+ return FragmentOutput1(fragMask, textureSample(myTexture, mySampler, varyings.uvFlat));
+}
+@fragment fn fmain__fragment_output_mask__interp(varyings: Varyings) -> FragmentOutput1 {
+ return FragmentOutput1(fragMask, textureSample(myTexture, mySampler, varyings.uvInterpolated));
+}
+
+// For test named 'alpha_to_coverage_mask'
+
+struct FragmentOutput2 {
+ @location(0) color0 : vec4<f32>,
+ @location(1) color1 : vec4<f32>,
+}
+@group(0) @binding(2) var<uniform> alpha: vec2<f32>;
+@fragment fn fmain__alpha_to_coverage_mask__flat(varyings: Varyings) -> FragmentOutput2 {
+ var c = textureSample(myTexture, mySampler, varyings.uvFlat);
+ return FragmentOutput2(vec4(c.xyz, alpha[0]), vec4(c.xyz, alpha[1]));
+}
+@fragment fn fmain__alpha_to_coverage_mask__interp(varyings: Varyings) -> FragmentOutput2 {
+ var c = textureSample(myTexture, mySampler, varyings.uvInterpolated);
+ return FragmentOutput2(vec4(c.xyz, alpha[0]), vec4(c.xyz, alpha[1]));
+}
+`;
+
+class F extends TextureTestMixin(GPUTest) {
+ private sampleTexture: GPUTexture | undefined;
+ private sampler: GPUSampler | undefined;
+
+ override async init() {
+ await super.init();
+ if (this.isCompatibility) {
+ this.skip('WGSL sample_mask is not supported in compatibility mode');
+ }
+ // Create a 2x2 color texture to sample from
+ // texel 0 - Red
+ // texel 1 - Green
+ // texel 2 - Blue
+ // texel 3 - Yellow
+ const kSampleTextureSize = 2;
+ this.sampleTexture = this.createTextureFromTexelView(
+ TexelView.fromTexelsAsBytes(format, coord => {
+ const id = coord.x + coord.y * kSampleTextureSize;
+ return kColors[id];
+ }),
+ {
+ size: [kSampleTextureSize, kSampleTextureSize, 1],
+ usage:
+ GPUTextureUsage.TEXTURE_BINDING |
+ GPUTextureUsage.COPY_DST |
+ GPUTextureUsage.RENDER_ATTACHMENT,
+ }
+ );
+
+ this.sampler = this.device.createSampler({
+ magFilter: 'nearest',
+ minFilter: 'nearest',
+ });
+ }
+
+ GetTargetTexture(
+ sampleCount: number,
+ rasterizationMask: number,
+ pipeline: GPURenderPipeline,
+ uniformBuffer: GPUBuffer,
+ colorTargetsCount: number = 1
+ ): { color: GPUTexture; depthStencil: GPUTexture } {
+ assert(this.sampleTexture !== undefined);
+ assert(this.sampler !== undefined);
+
+ const uniformBindGroup = this.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: this.sampler,
+ },
+ {
+ binding: 1,
+ resource: this.sampleTexture.createView(),
+ },
+ {
+ binding: 2,
+ resource: {
+ buffer: uniformBuffer,
+ },
+ },
+ ],
+ });
+
+ const renderTargetTextures = [];
+ const resolveTargetTextures: (GPUTexture | null)[] = [];
+ for (let i = 0; i < colorTargetsCount; i++) {
+ const renderTargetTexture = this.device.createTexture({
+ format,
+ size: {
+ width: kRenderTargetSize,
+ height: kRenderTargetSize,
+ depthOrArrayLayers: 1,
+ },
+ sampleCount,
+ mipLevelCount: 1,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,
+ });
+ renderTargetTextures.push(renderTargetTexture);
+
+ const resolveTargetTexture =
+ sampleCount === 1
+ ? null
+ : this.device.createTexture({
+ format,
+ size: {
+ width: kRenderTargetSize,
+ height: kRenderTargetSize,
+ depthOrArrayLayers: 1,
+ },
+ sampleCount: 1,
+ mipLevelCount: 1,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ resolveTargetTextures.push(resolveTargetTexture);
+ }
+
+ const depthStencilTexture = this.device.createTexture({
+ size: {
+ width: kRenderTargetSize,
+ height: kRenderTargetSize,
+ },
+ format: depthStencilFormat,
+ sampleCount,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,
+ });
+
+ const renderPassDescriptor: GPURenderPassDescriptor = {
+ colorAttachments: renderTargetTextures.map((renderTargetTexture, index) => {
+ return {
+ view: renderTargetTexture.createView(),
+ resolveTarget: resolveTargetTextures[index]?.createView(),
+ clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 0.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ };
+ }),
+ depthStencilAttachment: {
+ view: depthStencilTexture.createView(),
+ depthClearValue: kDepthClearValue,
+ depthLoadOp: 'clear',
+ depthStoreOp: 'store',
+ stencilClearValue: kStencilClearValue,
+ stencilLoadOp: 'clear',
+ stencilStoreOp: 'store',
+ },
+ };
+ const commandEncoder = this.device.createCommandEncoder();
+ const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
+ passEncoder.setPipeline(pipeline);
+ passEncoder.setBindGroup(0, uniformBindGroup);
+ passEncoder.setStencilReference(kStencilReferenceValue);
+
+ if (sampleCount === 1) {
+ if ((rasterizationMask & 1) !== 0) {
+ // draw center quad
+ passEncoder.draw(6, 1, 0, 0);
+ }
+ } else {
+ assert(sampleCount === 4);
+ if ((rasterizationMask & 1) !== 0) {
+ // draw top-left quad
+ passEncoder.draw(6, 1, 0, 1);
+ }
+ if ((rasterizationMask & 2) !== 0) {
+ // draw top-right quad
+ passEncoder.draw(6, 1, 0, 2);
+ }
+ if ((rasterizationMask & 4) !== 0) {
+ // draw bottom-left quad
+ passEncoder.draw(6, 1, 0, 3);
+ }
+ if ((rasterizationMask & 8) !== 0) {
+ // draw bottom-right quad
+ passEncoder.draw(6, 1, 0, 4);
+ }
+ }
+ passEncoder.end();
+ this.device.queue.submit([commandEncoder.finish()]);
+
+ return {
+ color: renderTargetTextures[0],
+ depthStencil: depthStencilTexture,
+ };
+ }
+
+ CheckColorAttachmentResult(
+ texture: GPUTexture,
+ sampleCount: number,
+ rasterizationMask: number,
+ sampleMask: number,
+ fragmentShaderOutputMask: number
+ ) {
+ const buffer = this.copySinglePixelTextureToBufferUsingComputePass(
+ TypeF32, // correspond to 'rgba8unorm' format
+ 4,
+ texture.createView(),
+ sampleCount
+ );
+
+ const expected = getExpectedColorData(
+ sampleCount,
+ rasterizationMask,
+ sampleMask,
+ fragmentShaderOutputMask
+ );
+ this.expectGPUBufferValuesEqual(buffer, expected);
+ }
+
+ CheckDepthStencilResult(
+ aspect: 'depth-only' | 'stencil-only',
+ depthStencilTexture: GPUTexture,
+ sampleCount: number,
+ rasterizationMask: number,
+ sampleMask: number,
+ fragmentShaderOutputMask: number
+ ) {
+ const buffer = this.copySinglePixelTextureToBufferUsingComputePass(
+ // Use f32 as the scalar type for depth (depth24plus, depth32float)
+ // Use u32 as the scalar type for stencil (stencil8)
+ aspect === 'depth-only' ? TypeF32 : TypeU32,
+ 1,
+ depthStencilTexture.createView({ aspect }),
+ sampleCount
+ );
+
+ const expected =
+ aspect === 'depth-only'
+ ? getExpectedDepthData(sampleCount, rasterizationMask, sampleMask, fragmentShaderOutputMask)
+ : getExpectedStencilData(
+ sampleCount,
+ rasterizationMask,
+ sampleMask,
+ fragmentShaderOutputMask
+ );
+ this.expectGPUBufferValuesEqual(buffer, expected);
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('fragment_output_mask')
+ .desc(
+ `
+Tests that the final sample mask is the logical AND of all the relevant masks -- meaning that the samples
+not included in the final mask are discarded on any attachments including
+- color outputs
+- depth tests
+- stencil operations
+
+The test draws 0/1/1+ textured quads of which each sample in the standard 4-sample pattern results in a different color:
+- Sample 0, Texel 0, top-left: Red
+- Sample 1, Texel 1, top-left: Green
+- Sample 2, Texel 2, top-left: Blue
+- Sample 3, Texel 3, top-left: Yellow
+
+The test checks each sample value of the render target texture and depth stencil texture using a compute pass to
+textureLoad each sample index from the texture and write to a storage buffer to compare with expected values.
+
+- for sampleCount = { 1, 4 } and various combinations of:
+ - rasterization mask = { 0, ..., 2 ** sampleCount - 1 }
+ - sample mask = { 0, 0b0001, 0b0010, 0b0111, 0b1011, 0b1101, 0b1110, 0b1111, 0b11110 }
+ - fragment shader output @builtin(sample_mask) = { 0, 0b0001, 0b0010, 0b0111, 0b1011, 0b1101, 0b1110, 0b1111, 0b11110 }
+- [choosing 0b11110 because the 5th bit should be ignored]
+`
+ )
+ .params(u =>
+ u
+ .combine('interpolated', [false, true])
+ .combine('sampleCount', [1, 4] as const)
+ .expand('rasterizationMask', function* (p) {
+ const maxMask = 2 ** p.sampleCount - 1;
+ for (let i = 0; i <= maxMask; i++) {
+ yield i;
+ }
+ })
+ .beginSubcases()
+ .combine('sampleMask', [
+ 0, 0b0001, 0b0010, 0b0111, 0b1011, 0b1101, 0b1110, 0b1111, 0b11110,
+ ] as const)
+ .combine('fragmentShaderOutputMask', [
+ 0, 0b0001, 0b0010, 0b0111, 0b1011, 0b1101, 0b1110, 0b1111, 0b11110,
+ ] as const)
+ )
+ .fn(t => {
+ const { sampleCount, rasterizationMask, sampleMask, fragmentShaderOutputMask } = t.params;
+
+ const fragmentMaskUniformBuffer = t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(fragmentMaskUniformBuffer);
+ t.device.queue.writeBuffer(
+ fragmentMaskUniformBuffer,
+ 0,
+ new Uint32Array([fragmentShaderOutputMask])
+ );
+
+ const module = t.device.createShaderModule({ code: kSampleMaskTestShader });
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: { module, entryPoint: 'vmain' },
+ fragment: {
+ module,
+ entryPoint: `fmain__fragment_output_mask__${t.params.interpolated ? 'interp' : 'flat'}`,
+ targets: [{ format }],
+ },
+ primitive: { topology: 'triangle-list' },
+ multisample: {
+ count: sampleCount,
+ mask: sampleMask,
+ alphaToCoverageEnabled: false,
+ },
+ depthStencil: {
+ format: depthStencilFormat,
+ depthWriteEnabled: true,
+ depthCompare: 'always',
+
+ stencilFront: {
+ compare: 'always',
+ passOp: 'replace',
+ },
+ stencilBack: {
+ compare: 'always',
+ passOp: 'replace',
+ },
+ },
+ });
+
+ const { color, depthStencil } = t.GetTargetTexture(
+ sampleCount,
+ rasterizationMask,
+ pipeline,
+ fragmentMaskUniformBuffer
+ );
+
+ t.CheckColorAttachmentResult(
+ color,
+ sampleCount,
+ rasterizationMask,
+ sampleMask,
+ fragmentShaderOutputMask
+ );
+
+ t.CheckDepthStencilResult(
+ 'depth-only',
+ depthStencil,
+ sampleCount,
+ rasterizationMask,
+ sampleMask,
+ fragmentShaderOutputMask
+ );
+
+ t.CheckDepthStencilResult(
+ 'stencil-only',
+ depthStencil,
+ sampleCount,
+ rasterizationMask,
+ sampleMask,
+ fragmentShaderOutputMask
+ );
+ });
+
+g.test('alpha_to_coverage_mask')
+ .desc(
+ `
+Test that alpha_to_coverage_mask is working properly with the alpha output of color target[0].
+
+- for sampleCount = 4, alphaToCoverageEnabled = true and various combinations of:
+ - rasterization masks
+ - increasing alpha0 values of the color0 output including { < 0, = 0, = 1/16, = 2/16, ..., = 15/16, = 1, > 1 }
+ - alpha1 values of the color1 output = { 0, 0.5, 1.0 }.
+- test that for a single pixel in { color0, color1 } { color0, depth, stencil } output the final sample mask is applied to it, moreover:
+ - if alpha0 is 0.0 or less then alpha to coverage mask is 0x0,
+ - if alpha0 is 1.0 or greater then alpha to coverage mask is 0xFFFFFFFF,
+ - that the number of bits in the alpha to coverage mask is non-decreasing,
+ - that the computation of alpha to coverage mask doesn't depend on any other color output than color0,
+ - (not included in the spec): that once a sample is included in the alpha to coverage sample mask
+ it will be included for any alpha greater than or equal to the current value.
+
+The algorithm of producing the alpha-to-coverage mask is platform-dependent. The test draws a different color
+at each sample point. for any two alpha values (alpha and alpha') where 0 < alpha' < alpha < 1, the color values (color and color') must satisfy
+color' <= color.
+`
+ )
+ .params(u =>
+ u
+ .combine('interpolated', [false, true])
+ .combine('sampleCount', [4] as const)
+ .expand('rasterizationMask', function* (p) {
+ const maxMask = 2 ** p.sampleCount - 1;
+ for (let i = 0; i <= maxMask; i++) {
+ yield i;
+ }
+ })
+ .beginSubcases()
+ .combine('alpha1', [0.0, 0.5, 1.0] as const)
+ )
+ .fn(async t => {
+ const { sampleCount, rasterizationMask, alpha1 } = t.params;
+ const sampleMask = 0xffffffff;
+
+ const alphaValues = new Float32Array(4); // [alpha0, alpha1, 0, 0]
+ const alphaValueUniformBuffer = t.device.createBuffer({
+ size: alphaValues.byteLength,
+ usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(alphaValueUniformBuffer);
+
+ const module = t.device.createShaderModule({ code: kSampleMaskTestShader });
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: { module, entryPoint: 'vmain' },
+ fragment: {
+ module,
+ entryPoint: `fmain__alpha_to_coverage_mask__${t.params.interpolated ? 'interp' : 'flat'}`,
+ targets: [{ format }, { format }],
+ },
+ primitive: { topology: 'triangle-list' },
+ multisample: {
+ count: sampleCount,
+ mask: sampleMask,
+ alphaToCoverageEnabled: true,
+ },
+ depthStencil: {
+ format: depthStencilFormat,
+ depthWriteEnabled: true,
+ depthCompare: 'always',
+
+ stencilFront: {
+ compare: 'always',
+ passOp: 'replace',
+ },
+ stencilBack: {
+ compare: 'always',
+ passOp: 'replace',
+ },
+ },
+ });
+
+ // { < 0, = 0, = 1/16, = 2/16, ..., = 15/16, = 1, > 1 }
+ const alpha0ParamsArray = [-0.1, ...range(16, i => i / 16), 1.0, 1.1];
+
+ const colorResultPromises = [];
+ const depthResultPromises = [];
+ const stencilResultPromises = [];
+
+ for (const alpha0 of alpha0ParamsArray) {
+ alphaValues[0] = alpha0;
+ alphaValues[1] = alpha1;
+ t.device.queue.writeBuffer(alphaValueUniformBuffer, 0, alphaValues);
+
+ const { color, depthStencil } = t.GetTargetTexture(
+ sampleCount,
+ rasterizationMask,
+ pipeline,
+ alphaValueUniformBuffer,
+ 2
+ );
+
+ const colorBuffer = t.copySinglePixelTextureToBufferUsingComputePass(
+ TypeF32, // correspond to 'rgba8unorm' format
+ 4,
+ color.createView(),
+ sampleCount
+ );
+ const colorResult = t.readGPUBufferRangeTyped(colorBuffer, {
+ type: Float32Array,
+ typedLength: colorBuffer.size / Float32Array.BYTES_PER_ELEMENT,
+ });
+ colorResultPromises.push(colorResult);
+
+ const depthBuffer = t.copySinglePixelTextureToBufferUsingComputePass(
+ TypeF32, // correspond to 'depth24plus-stencil8' format
+ 1,
+ depthStencil.createView({ aspect: 'depth-only' }),
+ sampleCount
+ );
+ const depthResult = t.readGPUBufferRangeTyped(depthBuffer, {
+ type: Float32Array,
+ typedLength: depthBuffer.size / Float32Array.BYTES_PER_ELEMENT,
+ });
+ depthResultPromises.push(depthResult);
+
+ const stencilBuffer = t.copySinglePixelTextureToBufferUsingComputePass(
+ TypeU32, // correspond to 'depth24plus-stencil8' format
+ 1,
+ depthStencil.createView({ aspect: 'stencil-only' }),
+ sampleCount
+ );
+ const stencilResult = t.readGPUBufferRangeTyped(stencilBuffer, {
+ type: Uint32Array,
+ typedLength: stencilBuffer.size / Uint32Array.BYTES_PER_ELEMENT,
+ });
+ stencilResultPromises.push(stencilResult);
+ }
+
+ const resultsArray = await Promise.all([
+ Promise.all(colorResultPromises),
+ Promise.all(depthResultPromises),
+ Promise.all(stencilResultPromises),
+ ]);
+
+ const checkResults = (
+ results: { data: Float32Array | Uint32Array; cleanup(): void }[],
+ getExpectedDataFn: (
+ sampleCount: number,
+ rasterizationMask: number,
+ sampleMask: number,
+ alphaToCoverageMask: number
+ ) => Float32Array | Uint32Array,
+ // Alpha to coverage mask should be non-decreasing as the alpha value goes up
+ // Result value of color and stencil is in positive correlation to alpha
+ // Result value of depth is in negative correlation to alpha
+ positiveCorrelation: boolean
+ ) => {
+ for (let i = 0; i < results.length; i++) {
+ const result = results[i];
+ const alpha0 = alpha0ParamsArray[i];
+
+ if (alpha0 <= 0) {
+ const expected = getExpectedDataFn(sampleCount, rasterizationMask, sampleMask, 0x0);
+ const check = checkElementsEqual(result.data, expected);
+ t.expectOK(check);
+ } else if (alpha0 >= 1) {
+ const expected = getExpectedDataFn(
+ sampleCount,
+ rasterizationMask,
+ sampleMask,
+ 0xffffffff
+ );
+ const check = checkElementsEqual(result.data, expected);
+ t.expectOK(check);
+ } else {
+ assert(i > 0);
+ const prevResult = results[i - 1];
+ const check = checkElementsPassPredicate(
+ result.data,
+ (index, value) =>
+ positiveCorrelation
+ ? value >= prevResult.data[index]
+ : value <= prevResult.data[index],
+ {}
+ );
+ t.expectOK(check);
+ }
+ }
+
+ for (const result of results) {
+ result.cleanup();
+ }
+ };
+
+ // Check color results
+ checkResults(resultsArray[0], getExpectedColorData, true);
+
+ // Check depth results
+ checkResults(resultsArray[1], getExpectedDepthData, false);
+
+ // Check stencil results
+ checkResults(resultsArray[2], getExpectedStencilData, true);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/vertex_only_render_pipeline.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/vertex_only_render_pipeline.spec.ts
new file mode 100644
index 0000000000..ef2d108f1e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/render_pipeline/vertex_only_render_pipeline.spec.ts
@@ -0,0 +1,29 @@
+export const description = `
+Test vertex-only render pipeline.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+class F extends GPUTest {}
+
+export const g = makeTestGroup(F);
+
+g.test('draw_depth_and_stencil_with_vertex_only_pipeline')
+ .desc(
+ `
+TODO:
+- Test drawing depth and stencil with vertex-only render pipelines by
+ 1. Create a color attachment and depth-stencil attachment of 4 pixels in a line, clear the color
+ to RGBA(0.0, 0.0, 0.0, 0.0), depth to 0.0 and stencil to 0x0
+ 2. Use a depth and stencil test disabled vertex-only render pipeline to modify the depth of middle
+ 2 pixels to 0.5, while leaving stencil unchanged
+ 3. Use another depth and stencil test disabled vertex-only render pipeline to modify the stencil
+ of right 2 pixels to 0x1, while leaving depth unchanged
+ 4. Use a complete render pipeline to draw all 4 pixels with color RGBA(0.0, 1.0, 0.0, 1.0), but
+ with depth test requiring depth no less than 0.5 and stencil test requiring stencil equals to 0x1
+ 5. Validate that only the third pixel is of color RGBA(0.0, 1.0, 0.0, 1.0), and all other pixels
+ are RGBA(0.0, 0.0, 0.0, 0.0).
+`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/basic.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/basic.spec.ts
new file mode 100644
index 0000000000..8472943238
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/basic.spec.ts
@@ -0,0 +1,353 @@
+export const description = `
+Basic command buffer rendering tests.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { now } from '../../../../common/util/util.js';
+import { GPUTest } from '../../../gpu_test.js';
+import { checkElementsEqual } from '../../../util/check_contents.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('clear').fn(t => {
+ const dst = t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+
+ const colorAttachment = t.device.createTexture({
+ format: 'rgba8unorm',
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ const colorAttachmentView = colorAttachment.createView();
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachmentView,
+ clearValue: { r: 0.0, g: 1.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.end();
+ encoder.copyTextureToBuffer(
+ { texture: colorAttachment, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
+ { buffer: dst, bytesPerRow: 256 },
+ { width: 1, height: 1, depthOrArrayLayers: 1 }
+ );
+ t.device.queue.submit([encoder.finish()]);
+
+ t.expectGPUBufferValuesEqual(dst, new Uint8Array([0x00, 0xff, 0x00, 0xff]));
+});
+
+g.test('fullscreen_quad').fn(t => {
+ const dst = t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+
+ const colorAttachment = t.device.createTexture({
+ format: 'rgba8unorm',
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ const colorAttachmentView = colorAttachment.createView();
+
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ @vertex fn main(
+ @builtin(vertex_index) VertexIndex : u32
+ ) -> @builtin(position) vec4<f32> {
+ var pos : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
+ vec2<f32>(-1.0, -3.0),
+ vec2<f32>(3.0, 1.0),
+ vec2<f32>(-1.0, 1.0));
+ return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+ @fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ primitive: { topology: 'triangle-list' },
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachmentView,
+ storeOp: 'store',
+ clearValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ },
+ ],
+ });
+ pass.setPipeline(pipeline);
+ pass.draw(3);
+ pass.end();
+ encoder.copyTextureToBuffer(
+ { texture: colorAttachment, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
+ { buffer: dst, bytesPerRow: 256 },
+ { width: 1, height: 1, depthOrArrayLayers: 1 }
+ );
+ t.device.queue.submit([encoder.finish()]);
+
+ t.expectGPUBufferValuesEqual(dst, new Uint8Array([0x00, 0xff, 0x00, 0xff]));
+});
+
+g.test('large_draw')
+ .desc(
+ `Test reasonably-sized large {draw, drawIndexed} (see also stress tests).
+
+ Tests that draw calls behave reasonably with large vertex counts for
+ non-indexed draws, large index counts for indexed draws, and large instance
+ counts in both cases. Various combinations of these counts are tested with
+ both direct and indirect draw calls.
+
+ Draw call sizes are increased incrementally over these parameters until we the
+ run out of values or completion of a draw call exceeds a fixed time limit of
+ 100ms.
+
+ To validate that the drawn vertices actually made it though the pipeline on
+ each draw call, we render a 3x3 target with the positions of the first and
+ last vertices of the first and last instances in different respective corners,
+ and everything else positioned to cover only one of the intermediate
+ fragments. If the output image is completely yellow, then we can reasonably
+ infer that all vertices were drawn.
+
+ Params:
+ - indexed= {true, false} - whether to test indexed or non-indexed draw calls
+ - indirect= {true, false} - whether to use indirect or direct draw calls`
+ )
+ .params(u =>
+ u //
+ .combine('indexed', [true, false])
+ .combine('indirect', [true, false])
+ )
+ .fn(async t => {
+ const { indexed, indirect } = t.params;
+
+ const kBytesPerRow = 256;
+ const dst = t.device.createBuffer({
+ size: 3 * kBytesPerRow,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+
+ const paramsBuffer = t.device.createBuffer({
+ size: 8,
+ usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
+ });
+
+ const indirectBuffer = t.device.createBuffer({
+ size: 20,
+ usage: GPUBufferUsage.INDIRECT | GPUBufferUsage.COPY_DST,
+ });
+ const writeIndirectParams = (count: number, instanceCount: number) => {
+ const params = new Uint32Array(5);
+ params[0] = count; // Vertex or index count
+ params[1] = instanceCount;
+ params[2] = 0; // First vertex or index
+ params[3] = 0; // First instance (non-indexed) or base vertex (indexed)
+ params[4] = 0; // First instance (indexed)
+ t.device.queue.writeBuffer(indirectBuffer, 0, params, 0, 5);
+ };
+
+ let indexBuffer: null | GPUBuffer = null;
+ if (indexed) {
+ const kMaxIndices = 16 * 1024 * 1024;
+ indexBuffer = t.device.createBuffer({
+ size: kMaxIndices * Uint32Array.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.INDEX | GPUBufferUsage.COPY_DST,
+ mappedAtCreation: true,
+ });
+ t.trackForCleanup(indexBuffer);
+ const indexData = new Uint32Array(indexBuffer.getMappedRange());
+ for (let i = 0; i < kMaxIndices; ++i) {
+ indexData[i] = i;
+ }
+ indexBuffer.unmap();
+ }
+
+ const colorAttachment = t.device.createTexture({
+ format: 'rgba8unorm',
+ size: { width: 3, height: 3, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ const colorAttachmentView = colorAttachment.createView();
+
+ const bgLayout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.VERTEX,
+ buffer: {},
+ },
+ ],
+ });
+
+ const bindGroup = t.device.createBindGroup({
+ layout: bgLayout,
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer: paramsBuffer },
+ },
+ ],
+ });
+
+ const pipeline = t.device.createRenderPipeline({
+ layout: t.device.createPipelineLayout({ bindGroupLayouts: [bgLayout] }),
+
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ struct Params {
+ numVertices: u32,
+ numInstances: u32,
+ };
+
+ fn selectValue(index: u32, maxIndex: u32) -> f32 {
+ let highOrMid = select(0.0, 2.0 / 3.0, index == maxIndex - 1u);
+ return select(highOrMid, -2.0 / 3.0, index == 0u);
+ }
+
+ @group(0) @binding(0) var<uniform> params: Params;
+
+ @vertex fn main(
+ @builtin(vertex_index) v: u32,
+ @builtin(instance_index) i: u32)
+ -> @builtin(position) vec4<f32> {
+ let x = selectValue(v, params.numVertices);
+ let y = -selectValue(i, params.numInstances);
+ return vec4<f32>(x, y, 0.0, 1.0);
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+ @fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(1.0, 1.0, 0.0, 1.0);
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ primitive: { topology: 'point-list' },
+ });
+
+ const runPipeline = (numVertices: number, numInstances: number) => {
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachmentView,
+ storeOp: 'store',
+ clearValue: { r: 0.0, g: 0.0, b: 1.0, a: 1.0 },
+ loadOp: 'clear',
+ },
+ ],
+ });
+
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ if (indexBuffer !== null) {
+ pass.setIndexBuffer(indexBuffer, 'uint32');
+ }
+
+ if (indirect) {
+ writeIndirectParams(numVertices, numInstances);
+ if (indexed) {
+ pass.drawIndexedIndirect(indirectBuffer, 0);
+ } else {
+ pass.drawIndirect(indirectBuffer, 0);
+ }
+ } else {
+ if (indexed) {
+ pass.drawIndexed(numVertices, numInstances);
+ } else {
+ pass.draw(numVertices, numInstances);
+ }
+ }
+ pass.end();
+ encoder.copyTextureToBuffer(
+ { texture: colorAttachment, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
+ { buffer: dst, bytesPerRow: kBytesPerRow },
+ { width: 3, height: 3, depthOrArrayLayers: 1 }
+ );
+
+ const params = new Uint32Array([numVertices, numInstances]);
+ t.device.queue.writeBuffer(paramsBuffer, 0, params, 0, 2);
+ t.device.queue.submit([encoder.finish()]);
+
+ const yellow = [0xff, 0xff, 0x00, 0xff];
+ const allYellow = new Uint8Array([...yellow, ...yellow, ...yellow]);
+ for (const row of [0, 1, 2]) {
+ t.expectGPUBufferValuesPassCheck(dst, data => checkElementsEqual(data, allYellow), {
+ srcByteOffset: row * 256,
+ type: Uint8Array,
+ typedLength: 12,
+ });
+ }
+ };
+
+ // If any iteration takes longer than this, we stop incrementing along that
+ // branch and move on to the next instance count. Note that the max
+ // supported vertex count for any iteration is 2**24 due to our choice of
+ // index buffer size.
+ const maxDurationMs = 100;
+ const counts = [
+ {
+ numInstances: 4,
+ vertexCounts: [2 ** 10, 2 ** 16, 2 ** 18, 2 ** 20, 2 ** 22, 2 ** 24],
+ },
+ {
+ numInstances: 2 ** 8,
+ vertexCounts: [2 ** 10, 2 ** 16, 2 ** 18, 2 ** 20, 2 ** 22],
+ },
+ {
+ numInstances: 2 ** 10,
+ vertexCounts: [2 ** 8, 2 ** 10, 2 ** 12, 2 ** 16, 2 ** 18, 2 ** 20],
+ },
+ {
+ numInstances: 2 ** 16,
+ vertexCounts: [2 ** 4, 2 ** 8, 2 ** 10, 2 ** 12, 2 ** 14],
+ },
+ {
+ numInstances: 2 ** 20,
+ vertexCounts: [2 ** 4, 2 ** 8, 2 ** 10],
+ },
+ ];
+ for (const { numInstances, vertexCounts } of counts) {
+ for (const numVertices of vertexCounts) {
+ const start = now();
+ runPipeline(numVertices, numInstances);
+ await t.device.queue.onSubmittedWorkDone();
+ const duration = now() - start;
+ if (duration >= maxDurationMs) {
+ break;
+ }
+ }
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/color_target_state.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/color_target_state.spec.ts
new file mode 100644
index 0000000000..1290c6bc99
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/color_target_state.spec.ts
@@ -0,0 +1,818 @@
+export const description = `
+Test blending results.
+
+TODO:
+- Test result for all combinations of args (make sure each case is distinguishable from others
+- Test underflow/overflow has consistent behavior
+- ?
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { assert, TypedArrayBufferView, unreachable } from '../../../../common/util/util.js';
+import { kBlendFactors, kBlendOperations } from '../../../capability_info.js';
+import { GPUConst } from '../../../constants.js';
+import { kEncodableTextureFormats, kTextureFormatInfo } from '../../../format_info.js';
+import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
+import { clamp } from '../../../util/math.js';
+import { TexelView } from '../../../util/texture/texel_view.js';
+
+class BlendingTest extends GPUTest {
+ createRenderPipelineForTest(colorTargetState: GPUColorTargetState): GPURenderPipeline {
+ return this.device.createRenderPipeline({
+ layout: 'auto',
+ fragment: {
+ targets: [colorTargetState],
+ module: this.device.createShaderModule({
+ code: `
+ struct Params {
+ color : vec4<f32>
+ }
+ @group(0) @binding(0) var<uniform> params : Params;
+ @fragment fn main() -> @location(0) vec4<f32> {
+ return params.color;
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ vertex: {
+ module: this.device.createShaderModule({
+ code: `
+ @vertex fn main(
+ @builtin(vertex_index) VertexIndex : u32
+ ) -> @builtin(position) vec4<f32> {
+ var pos = array<vec2<f32>, 3>(
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>(3.0, -1.0),
+ vec2<f32>(-1.0, 3.0));
+ return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ });
+ }
+
+ createBindGroupForTest(layout: GPUBindGroupLayout, data: TypedArrayBufferView): GPUBindGroup {
+ return this.device.createBindGroup({
+ layout,
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer: this.makeBufferWithContents(data, GPUBufferUsage.UNIFORM),
+ },
+ },
+ ],
+ });
+ }
+}
+
+export const g = makeTestGroup(TextureTestMixin(BlendingTest));
+
+function mapColor(
+ col: GPUColorDict,
+ f: (v: number, k: keyof GPUColorDict) => number
+): GPUColorDict {
+ return {
+ r: f(col.r, 'r'),
+ g: f(col.g, 'g'),
+ b: f(col.b, 'b'),
+ a: f(col.a, 'a'),
+ };
+}
+
+function computeBlendFactor(
+ src: GPUColorDict,
+ dst: GPUColorDict,
+ blendColor: GPUColorDict | undefined,
+ factor: GPUBlendFactor
+): GPUColorDict {
+ switch (factor) {
+ case 'zero':
+ return { r: 0, g: 0, b: 0, a: 0 };
+ case 'one':
+ return { r: 1, g: 1, b: 1, a: 1 };
+ case 'src':
+ return { ...src };
+ case 'one-minus-src':
+ return mapColor(src, v => 1 - v);
+ case 'src-alpha':
+ return mapColor(src, () => src.a);
+ case 'one-minus-src-alpha':
+ return mapColor(src, () => 1 - src.a);
+ case 'dst':
+ return { ...dst };
+ case 'one-minus-dst':
+ return mapColor(dst, v => 1 - v);
+ case 'dst-alpha':
+ return mapColor(dst, () => dst.a);
+ case 'one-minus-dst-alpha':
+ return mapColor(dst, () => 1 - dst.a);
+ case 'src-alpha-saturated': {
+ const f = Math.min(src.a, 1 - dst.a);
+ return { r: f, g: f, b: f, a: 1 };
+ }
+ case 'constant':
+ assert(blendColor !== undefined);
+ return { ...blendColor };
+ case 'one-minus-constant':
+ assert(blendColor !== undefined);
+ return mapColor(blendColor, v => 1 - v);
+ default:
+ unreachable();
+ }
+}
+
+function computeBlendOperation(
+ src: GPUColorDict,
+ srcFactor: GPUColorDict,
+ dst: GPUColorDict,
+ dstFactor: GPUColorDict,
+ operation: GPUBlendOperation
+) {
+ switch (operation) {
+ case 'add':
+ return mapColor(src, (_, k) => srcFactor[k] * src[k] + dstFactor[k] * dst[k]);
+ case 'max':
+ return mapColor(src, (_, k) => Math.max(src[k], dst[k]));
+ case 'min':
+ return mapColor(src, (_, k) => Math.min(src[k], dst[k]));
+ case 'reverse-subtract':
+ return mapColor(src, (_, k) => dstFactor[k] * dst[k] - srcFactor[k] * src[k]);
+ case 'subtract':
+ return mapColor(src, (_, k) => srcFactor[k] * src[k] - dstFactor[k] * dst[k]);
+ }
+}
+
+g.test('blending,GPUBlendComponent')
+ .desc(
+ `Test all combinations of parameters for GPUBlendComponent.
+
+ Tests that parameters are correctly passed to the backend API and blend computations
+ are done correctly by blending a single pixel. The test uses rgba16float as the format
+ to avoid checking clamping behavior (tested in api,operation,rendering,blending:clamp,*).
+
+ Params:
+ - component= {color, alpha} - whether to test blending the color or the alpha component.
+ - srcFactor= {...all GPUBlendFactors}
+ - dstFactor= {...all GPUBlendFactors}
+ - operation= {...all GPUBlendOperations}`
+ )
+ .params(u =>
+ u //
+ .combine('component', ['color', 'alpha'] as const)
+ .combine('srcFactor', kBlendFactors)
+ .combine('dstFactor', kBlendFactors)
+ .combine('operation', kBlendOperations)
+ .filter(t => {
+ if (t.operation === 'min' || t.operation === 'max') {
+ return t.srcFactor === 'one' && t.dstFactor === 'one';
+ }
+ return true;
+ })
+ .beginSubcases()
+ .combine('srcColor', [{ r: 0.11, g: 0.61, b: 0.81, a: 0.44 }])
+ .combine('dstColor', [
+ { r: 0.51, g: 0.22, b: 0.71, a: 0.33 },
+ { r: 0.09, g: 0.73, b: 0.93, a: 0.81 },
+ ])
+ .expand('blendConstant', p => {
+ const needsBlendConstant =
+ p.srcFactor === 'one-minus-constant' ||
+ p.srcFactor === 'constant' ||
+ p.dstFactor === 'one-minus-constant' ||
+ p.dstFactor === 'constant';
+ return needsBlendConstant ? [{ r: 0.91, g: 0.82, b: 0.73, a: 0.64 }] : [undefined];
+ })
+ )
+ .fn(t => {
+ const textureFormat: GPUTextureFormat = 'rgba16float';
+ const srcColor = t.params.srcColor;
+ const dstColor = t.params.dstColor;
+ const blendConstant = t.params.blendConstant;
+
+ const srcFactor = computeBlendFactor(srcColor, dstColor, blendConstant, t.params.srcFactor);
+ const dstFactor = computeBlendFactor(srcColor, dstColor, blendConstant, t.params.dstFactor);
+
+ const expectedColor = computeBlendOperation(
+ srcColor,
+ srcFactor,
+ dstColor,
+ dstFactor,
+ t.params.operation
+ );
+
+ switch (t.params.component) {
+ case 'color':
+ expectedColor.a = srcColor.a;
+ break;
+ case 'alpha':
+ expectedColor.r = srcColor.r;
+ expectedColor.g = srcColor.g;
+ expectedColor.b = srcColor.b;
+ break;
+ }
+
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ fragment: {
+ targets: [
+ {
+ format: textureFormat,
+ blend: {
+ // Set both color/alpha to defaults...
+ color: {},
+ alpha: {},
+ // ... but then override the component we're testing.
+ [t.params.component]: {
+ srcFactor: t.params.srcFactor,
+ dstFactor: t.params.dstFactor,
+ operation: t.params.operation,
+ },
+ },
+ },
+ ],
+ module: t.device.createShaderModule({
+ code: `
+struct Uniform {
+ color: vec4<f32>
+};
+@group(0) @binding(0) var<uniform> u : Uniform;
+
+@fragment fn main() -> @location(0) vec4<f32> {
+ return u.color;
+}
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+@vertex fn main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+}
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ primitive: {
+ topology: 'point-list',
+ },
+ });
+
+ const renderTarget = t.device.createTexture({
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ size: [1, 1, 1],
+ format: textureFormat,
+ });
+
+ const commandEncoder = t.device.createCommandEncoder();
+ const renderPass = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ clearValue: dstColor,
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPass.setPipeline(pipeline);
+ if (blendConstant) {
+ renderPass.setBlendConstant(blendConstant);
+ }
+ renderPass.setBindGroup(
+ 0,
+ t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer: t.makeBufferWithContents(
+ new Float32Array([srcColor.r, srcColor.g, srcColor.b, srcColor.a]),
+ GPUBufferUsage.UNIFORM
+ ),
+ },
+ },
+ ],
+ })
+ );
+ renderPass.draw(1);
+ renderPass.end();
+
+ t.device.queue.submit([commandEncoder.finish()]);
+
+ t.expectSinglePixelComparisonsAreOkInTexture(
+ { texture: renderTarget },
+ [
+ {
+ coord: { x: 0, y: 0 },
+ exp: { R: expectedColor.r, G: expectedColor.g, B: expectedColor.b, A: expectedColor.a },
+ },
+ ],
+ { maxFractionalDiff: 0.003 }
+ );
+ });
+
+const kBlendableFormats = kEncodableTextureFormats.filter(f => {
+ const info = kTextureFormatInfo[f];
+ return info.renderable && info.sampleType === 'float';
+});
+
+g.test('blending,formats')
+ .desc(
+ `Test blending results works for all formats that support it, and that blending is not applied
+ for formats that do not. Blending should be done in linear space for srgb formats.`
+ )
+ .params(u =>
+ u //
+ .combine('format', kBlendableFormats)
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ })
+ .fn(t => {
+ const { format } = t.params;
+
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ fragment: {
+ targets: [
+ {
+ format,
+ blend: {
+ color: { srcFactor: 'one', dstFactor: 'one', operation: 'add' },
+ alpha: { srcFactor: 'one', dstFactor: 'one', operation: 'add' },
+ },
+ },
+ ],
+ module: t.device.createShaderModule({
+ code: `
+@fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.4, 0.4, 0.4, 0.4);
+}
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+@vertex fn main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+}
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ primitive: {
+ topology: 'point-list',
+ },
+ });
+
+ const renderTarget = t.device.createTexture({
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ size: [1, 1, 1],
+ format,
+ });
+
+ const commandEncoder = t.device.createCommandEncoder();
+ const renderPass = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ clearValue: { r: 0.2, g: 0.2, b: 0.2, a: 0.2 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPass.setPipeline(pipeline);
+ renderPass.draw(1);
+ renderPass.end();
+ t.device.queue.submit([commandEncoder.finish()]);
+
+ const expColor = { R: 0.6, G: 0.6, B: 0.6, A: 0.6 };
+ const expTexelView = TexelView.fromTexelsAsColors(format, _coords => expColor);
+ t.expectTexelViewComparisonIsOkInTexture({ texture: renderTarget }, expTexelView, [1, 1, 1]);
+ });
+
+g.test('blend_constant,initial')
+ .desc(`Test that the blend constant is set to [0,0,0,0] at the beginning of a pass.`)
+ .fn(t => {
+ const format = 'rgba8unorm';
+ const kSize = 1;
+ const kWhiteColorData = new Float32Array([255, 255, 255, 255]);
+
+ const blendComponent = { srcFactor: 'constant', dstFactor: 'one', operation: 'add' } as const;
+ const testPipeline = t.createRenderPipelineForTest({
+ format,
+ blend: { color: blendComponent, alpha: blendComponent },
+ });
+
+ const renderTarget = t.device.createTexture({
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ size: [kSize, kSize],
+ format,
+ });
+
+ const commandEncoder = t.device.createCommandEncoder();
+ const renderPass = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPass.setPipeline(testPipeline);
+ renderPass.setBindGroup(
+ 0,
+ t.createBindGroupForTest(testPipeline.getBindGroupLayout(0), kWhiteColorData)
+ );
+ renderPass.draw(3);
+ // Draw [1,1,1,1] with `src * constant + dst * 1`.
+ // The blend constant defaults to [0,0,0,0], so the result is
+ // `[1,1,1,1] * [0,0,0,0] + [0,0,0,0] * 1` = [0,0,0,0].
+ renderPass.end();
+ t.device.queue.submit([commandEncoder.finish()]);
+
+ // Check that the initial blend constant is black(0,0,0,0) after setting testPipeline which has
+ // a white color buffer data.
+ const expColor = { R: 0, G: 0, B: 0, A: 0 };
+ const expTexelView = TexelView.fromTexelsAsColors(format, _coords => expColor);
+ t.expectTexelViewComparisonIsOkInTexture({ texture: renderTarget }, expTexelView, [
+ kSize,
+ kSize,
+ ]);
+ });
+
+g.test('blend_constant,setting')
+ .desc(`Test that setting the blend constant to the RGBA values works at the beginning of a pass.`)
+ .paramsSubcasesOnly([
+ { r: 1.0, g: 1.0, b: 1.0, a: 1.0 },
+ { r: 0.5, g: 1.0, b: 0.5, a: 0.0 },
+ { r: 0.0, g: 0.0, b: 0.0, a: 0.0 },
+ ])
+ .fn(t => {
+ const { r, g, b, a } = t.params;
+
+ const format = 'rgba8unorm';
+ const kSize = 1;
+ const kWhiteColorData = new Float32Array([255, 255, 255, 255]);
+
+ const blendComponent = { srcFactor: 'constant', dstFactor: 'one', operation: 'add' } as const;
+ const testPipeline = t.createRenderPipelineForTest({
+ format,
+ blend: { color: blendComponent, alpha: blendComponent },
+ });
+
+ const renderTarget = t.device.createTexture({
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ size: [kSize, kSize],
+ format,
+ });
+
+ const commandEncoder = t.device.createCommandEncoder();
+ const renderPass = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPass.setPipeline(testPipeline);
+ renderPass.setBlendConstant({ r, g, b, a });
+ renderPass.setBindGroup(
+ 0,
+ t.createBindGroupForTest(testPipeline.getBindGroupLayout(0), kWhiteColorData)
+ );
+ renderPass.draw(3);
+ // Draw [1,1,1,1] with `src * constant + dst * 1`. The blend constant to [r,g,b,a], so the
+ // result is `[1,1,1,1] * [r,g,b,a] + [0,0,0,0] * 1` = [r,g,b,a].
+ renderPass.end();
+ t.device.queue.submit([commandEncoder.finish()]);
+
+ // Check that the blend constant is the same as the given constant after setting the constant
+ // via setBlendConstant.
+ const expColor = { R: r, G: g, B: b, A: a };
+ const expTexelView = TexelView.fromTexelsAsColors(format, _coords => expColor);
+
+ t.expectTexelViewComparisonIsOkInTexture({ texture: renderTarget }, expTexelView, [
+ kSize,
+ kSize,
+ ]);
+ });
+
+g.test('blend_constant,not_inherited')
+ .desc(`Test that the blending constant is not inherited between render passes.`)
+ .fn(t => {
+ const format = 'rgba8unorm';
+ const kSize = 1;
+ const kWhiteColorData = new Float32Array([255, 255, 255, 255]);
+
+ const blendComponent = { srcFactor: 'constant', dstFactor: 'one', operation: 'add' } as const;
+ const testPipeline = t.createRenderPipelineForTest({
+ format,
+ blend: { color: blendComponent, alpha: blendComponent },
+ });
+
+ const renderTarget = t.device.createTexture({
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ size: [kSize, kSize],
+ format,
+ });
+
+ const commandEncoder = t.device.createCommandEncoder();
+ {
+ const renderPass = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPass.setPipeline(testPipeline);
+ renderPass.setBlendConstant({ r: 1.0, g: 1.0, b: 1.0, a: 1.0 }); // Set to white color.
+ renderPass.setBindGroup(
+ 0,
+ t.createBindGroupForTest(testPipeline.getBindGroupLayout(0), kWhiteColorData)
+ );
+ renderPass.draw(3);
+ // Draw [1,1,1,1] with `src * constant + dst * 1`. The blend constant to [1,1,1,1], so the
+ // result is `[1,1,1,1] * [1,1,1,1] + [0,0,0,0] * 1` = [1,1,1,1].
+ renderPass.end();
+ }
+ {
+ const renderPass = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPass.setPipeline(testPipeline);
+ renderPass.setBindGroup(
+ 0,
+ t.createBindGroupForTest(testPipeline.getBindGroupLayout(0), kWhiteColorData)
+ );
+ renderPass.draw(3);
+ // Draw [1,1,1,1] with `src * constant + dst * 1`. The blend constant defaults to [0,0,0,0],
+ // so the result is `[1,1,1,1] * [0,0,0,0] + [0,0,0,0] * 1` = [0,0,0,0].
+ renderPass.end();
+ }
+ t.device.queue.submit([commandEncoder.finish()]);
+
+ // Check that the blend constant is not inherited from the first render pass.
+ const expColor = { R: 0, G: 0, B: 0, A: 0 };
+ const expTexelView = TexelView.fromTexelsAsColors(format, _coords => expColor);
+
+ t.expectTexelViewComparisonIsOkInTexture({ texture: renderTarget }, expTexelView, [
+ kSize,
+ kSize,
+ ]);
+ });
+
+const kColorWriteCombinations: readonly GPUColorWriteFlags[] = [
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+];
+
+g.test('color_write_mask,channel_work')
+ .desc(
+ `
+ Test that the color write mask works with the zero channel, a single channel, multiple channels,
+ and all channels.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('mask', kColorWriteCombinations)
+ )
+ .fn(t => {
+ const { mask } = t.params;
+
+ const format = 'rgba8unorm';
+ const kSize = 1;
+
+ let r = 0,
+ g = 0,
+ b = 0,
+ a = 0;
+ if (mask & GPUConst.ColorWrite.RED) {
+ r = 1;
+ }
+ if (mask & GPUConst.ColorWrite.GREEN) {
+ g = 1;
+ }
+ if (mask & GPUConst.ColorWrite.BLUE) {
+ b = 1;
+ }
+ if (mask & GPUConst.ColorWrite.ALPHA) {
+ a = 1;
+ }
+
+ const testPipeline = t.createRenderPipelineForTest({
+ format,
+ writeMask: mask,
+ });
+
+ const renderTarget = t.device.createTexture({
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ size: [kSize, kSize],
+ format,
+ });
+
+ const kBaseColorData = new Float32Array([32, 64, 128, 192]);
+
+ const commandEncoder = t.device.createCommandEncoder();
+ {
+ const renderPass = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPass.setPipeline(testPipeline);
+ renderPass.setBindGroup(
+ 0,
+ t.createBindGroupForTest(testPipeline.getBindGroupLayout(0), kBaseColorData)
+ );
+ renderPass.draw(3);
+ renderPass.end();
+ }
+ t.device.queue.submit([commandEncoder.finish()]);
+
+ const expColor = { R: r, G: g, B: b, A: a };
+ const expTexelView = TexelView.fromTexelsAsColors(format, _coords => expColor);
+
+ t.expectTexelViewComparisonIsOkInTexture({ texture: renderTarget }, expTexelView, [
+ kSize,
+ kSize,
+ ]);
+ });
+
+g.test('color_write_mask,blending_disabled')
+ .desc(
+ `Test that the color write mask works when blending is disabled or set to the defaults
+ (which has the same blending result).`
+ )
+ .params(u => u.combine('disabled', [false, true]))
+ .fn(t => {
+ const format = 'rgba8unorm';
+ const kSize = 1;
+
+ const blend = t.params.disabled ? undefined : { color: {}, alpha: {} };
+
+ const testPipeline = t.createRenderPipelineForTest({
+ format,
+ blend,
+ writeMask: GPUColorWrite.RED,
+ });
+
+ const renderTarget = t.device.createTexture({
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ size: [kSize, kSize],
+ format,
+ });
+
+ const kBaseColorData = new Float32Array([32, 64, 128, 192]);
+
+ const commandEncoder = t.device.createCommandEncoder();
+ {
+ const renderPass = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPass.setPipeline(testPipeline);
+ renderPass.setBindGroup(
+ 0,
+ t.createBindGroupForTest(testPipeline.getBindGroupLayout(0), kBaseColorData)
+ );
+ // Draw [1,1,1,1] with `src * 1 + dst * 0`. So the
+ // result is `[1,1,1,1] * [1,1,1,1] + [0,0,0,0] * 0` = [1,1,1,1].
+ renderPass.draw(3);
+ renderPass.end();
+ }
+ t.device.queue.submit([commandEncoder.finish()]);
+
+ const expColor = { R: 1, G: 0, B: 0, A: 0 };
+ const expTexelView = TexelView.fromTexelsAsColors(format, _coords => expColor);
+
+ t.expectTexelViewComparisonIsOkInTexture({ texture: renderTarget }, expTexelView, [
+ kSize,
+ kSize,
+ ]);
+ });
+
+g.test('blending,clamping')
+ .desc(
+ `
+ Test that clamping occurs at the correct points in the blend process: src value, src factor, dst
+ factor, and output.
+ - TODO: Need to test snorm formats.
+ - TODO: Need to test src value, srcFactor and dstFactor.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('format', ['rgba8unorm', 'rg16float'] as const)
+ .combine('srcValue', [0.4, 0.6, 0.8, 1.0])
+ .combine('dstValue', [0.2, 0.4])
+ )
+ .fn(t => {
+ const { format, srcValue, dstValue } = t.params;
+
+ const blendComponent = { srcFactor: 'one', dstFactor: 'one', operation: 'add' } as const;
+
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ fragment: {
+ targets: [
+ {
+ format,
+ blend: {
+ color: blendComponent,
+ alpha: blendComponent,
+ },
+ },
+ ],
+ module: t.device.createShaderModule({
+ code: `
+@fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(${srcValue}, ${srcValue}, ${srcValue}, ${srcValue});
+}
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+@vertex fn main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+}
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ primitive: {
+ topology: 'point-list',
+ },
+ });
+
+ const renderTarget = t.device.createTexture({
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ size: [1, 1, 1],
+ format,
+ });
+
+ const commandEncoder = t.device.createCommandEncoder();
+ const renderPass = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ clearValue: { r: dstValue, g: dstValue, b: dstValue, a: dstValue },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPass.setPipeline(pipeline);
+ renderPass.draw(1);
+ renderPass.end();
+ t.device.queue.submit([commandEncoder.finish()]);
+
+ let expValue: number;
+ switch (format) {
+ case 'rgba8unorm': // unorm types should clamp if the sum of srcValue and dstValue exceeds 1.
+ expValue = clamp(srcValue + dstValue, { min: 0, max: 1 });
+ break;
+ case 'rg16float': // float format types doesn't clamp.
+ expValue = srcValue + dstValue;
+ break;
+ }
+
+ const expColor = { R: expValue, G: expValue, B: expValue, A: expValue };
+ const expTexelView = TexelView.fromTexelsAsColors(format, _coords => expColor);
+
+ t.expectTexelViewComparisonIsOkInTexture({ texture: renderTarget }, expTexelView, [1, 1, 1]);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/depth.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/depth.spec.ts
new file mode 100644
index 0000000000..3b2227db98
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/depth.spec.ts
@@ -0,0 +1,546 @@
+export const description = `
+Test related to depth buffer, depth op, compare func, etc.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { TypedArrayBufferView } from '../../../../common/util/util.js';
+import { kDepthStencilFormats, kTextureFormatInfo } from '../../../format_info.js';
+import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
+import { TexelView } from '../../../util/texture/texel_view.js';
+
+const backgroundColor = [0x00, 0x00, 0x00, 0xff];
+const triangleColor = [0xff, 0xff, 0xff, 0xff];
+
+const kBaseColor = new Float32Array([1.0, 1.0, 1.0, 1.0]);
+const kRedStencilColor = new Float32Array([1.0, 0.0, 0.0, 1.0]);
+const kGreenStencilColor = new Float32Array([0.0, 1.0, 0.0, 1.0]);
+
+type TestStates = {
+ state: GPUDepthStencilState;
+ color: Float32Array;
+ depth: number;
+};
+
+class DepthTest extends TextureTestMixin(GPUTest) {
+ runDepthStateTest(testStates: TestStates[], expectedColor: Float32Array) {
+ const renderTargetFormat = 'rgba8unorm';
+
+ const renderTarget = this.trackForCleanup(
+ this.device.createTexture({
+ format: renderTargetFormat,
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ })
+ );
+
+ const depthStencilFormat: GPUTextureFormat = 'depth24plus-stencil8';
+ const depthTexture = this.trackForCleanup(
+ this.device.createTexture({
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ format: depthStencilFormat,
+ sampleCount: 1,
+ mipLevelCount: 1,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_DST,
+ })
+ );
+
+ const depthStencilAttachment: GPURenderPassDepthStencilAttachment = {
+ view: depthTexture.createView(),
+ depthLoadOp: 'load',
+ depthStoreOp: 'store',
+ stencilLoadOp: 'load',
+ stencilStoreOp: 'store',
+ };
+
+ const encoder = this.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ storeOp: 'store',
+ loadOp: 'load',
+ },
+ ],
+ depthStencilAttachment,
+ });
+
+ // Draw a triangle with the given depth state, color, and depth.
+ for (const test of testStates) {
+ const testPipeline = this.createRenderPipelineForTest(test.state, test.depth);
+ pass.setPipeline(testPipeline);
+ pass.setBindGroup(
+ 0,
+ this.createBindGroupForTest(testPipeline.getBindGroupLayout(0), test.color)
+ );
+ pass.draw(1);
+ }
+
+ pass.end();
+ this.device.queue.submit([encoder.finish()]);
+
+ const expColor = {
+ R: expectedColor[0],
+ G: expectedColor[1],
+ B: expectedColor[2],
+ A: expectedColor[3],
+ };
+ const expTexelView = TexelView.fromTexelsAsColors(renderTargetFormat, _coords => expColor);
+
+ this.expectTexelViewComparisonIsOkInTexture({ texture: renderTarget }, expTexelView, [1, 1]);
+ }
+
+ createRenderPipelineForTest(
+ depthStencil: GPUDepthStencilState,
+ depth: number
+ ): GPURenderPipeline {
+ return this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: this.device.createShaderModule({
+ code: `
+ @vertex
+ fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, ${depth}, 1.0);
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ targets: [{ format: 'rgba8unorm' }],
+ module: this.device.createShaderModule({
+ code: `
+ struct Params {
+ color : vec4<f32>
+ }
+ @group(0) @binding(0) var<uniform> params : Params;
+
+ @fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(params.color);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ primitive: { topology: 'point-list' },
+ depthStencil,
+ });
+ }
+
+ createBindGroupForTest(layout: GPUBindGroupLayout, data: TypedArrayBufferView): GPUBindGroup {
+ return this.device.createBindGroup({
+ layout,
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer: this.makeBufferWithContents(data, GPUBufferUsage.UNIFORM),
+ },
+ },
+ ],
+ });
+ }
+}
+
+export const g = makeTestGroup(DepthTest);
+
+g.test('depth_disabled')
+ .desc('Tests render results with depth test disabled.')
+ .fn(t => {
+ const depthSpencilFormat: GPUTextureFormat = 'depth24plus-stencil8';
+ const state = {
+ format: depthSpencilFormat,
+ depthWriteEnabled: false,
+ depthCompare: 'always' as GPUCompareFunction,
+ };
+
+ const testStates = [
+ { state, color: kBaseColor, depth: 0.0 },
+ { state, color: kRedStencilColor, depth: 0.5 },
+ { state, color: kGreenStencilColor, depth: 1.0 },
+ ];
+
+ // Test that for all combinations and ensure the last triangle drawn is the one visible
+ // regardless of depth testing.
+ for (let last = 0; last < 3; ++last) {
+ const i = (last + 1) % 3;
+ const j = (last + 2) % 3;
+
+ t.runDepthStateTest([testStates[i], testStates[j], testStates[last]], testStates[last].color);
+ t.runDepthStateTest([testStates[j], testStates[i], testStates[last]], testStates[last].color);
+ }
+ });
+
+g.test('depth_write_disabled')
+ .desc(
+ `
+ Test that depthWriteEnabled behaves as expected.
+ If enabled, a depth value of 0.0 is written.
+ If disabled, it's not written, so it keeps the previous value of 1.0.
+ Use a depthCompare: 'equal' check at the end to check the value.
+ `
+ )
+ .params(u =>
+ u //
+ .combineWithParams([
+ { depthWriteEnabled: false, lastDepth: 0.0, _expectedColor: kRedStencilColor },
+ { depthWriteEnabled: true, lastDepth: 0.0, _expectedColor: kGreenStencilColor },
+ { depthWriteEnabled: false, lastDepth: 1.0, _expectedColor: kGreenStencilColor },
+ { depthWriteEnabled: true, lastDepth: 1.0, _expectedColor: kRedStencilColor },
+ ])
+ )
+ .fn(t => {
+ const { depthWriteEnabled, lastDepth, _expectedColor } = t.params;
+
+ const depthSpencilFormat: GPUTextureFormat = 'depth24plus-stencil8';
+
+ const stencilState = {
+ compare: 'always',
+ failOp: 'keep',
+ depthFailOp: 'keep',
+ passOp: 'keep',
+ } as const;
+
+ const baseState = {
+ format: depthSpencilFormat,
+ depthWriteEnabled: true,
+ depthCompare: 'always',
+ stencilFront: stencilState,
+ stencilBack: stencilState,
+ stencilReadMask: 0xff,
+ stencilWriteMask: 0xff,
+ } as const;
+
+ const depthWriteState = {
+ format: depthSpencilFormat,
+ depthWriteEnabled,
+ depthCompare: 'always',
+ stencilFront: stencilState,
+ stencilBack: stencilState,
+ stencilReadMask: 0xff,
+ stencilWriteMask: 0xff,
+ } as const;
+
+ const checkState = {
+ format: depthSpencilFormat,
+ depthWriteEnabled: false,
+ depthCompare: 'equal',
+ stencilFront: stencilState,
+ stencilBack: stencilState,
+ stencilReadMask: 0xff,
+ stencilWriteMask: 0xff,
+ } as const;
+
+ const testStates = [
+ // Draw a base point with depth write enabled.
+ { state: baseState, color: kBaseColor, depth: 1.0 },
+ // Draw a second point without depth write enabled.
+ { state: depthWriteState, color: kRedStencilColor, depth: 0.0 },
+ // Draw a third point which should occlude the second even though it is behind it.
+ { state: checkState, color: kGreenStencilColor, depth: lastDepth },
+ ];
+
+ t.runDepthStateTest(testStates, _expectedColor);
+ });
+
+g.test('depth_test_fail')
+ .desc(
+ `
+ Test that render results on depth test failure cases with 'less' depthCompare operation and
+ depthWriteEnabled is true.
+ `
+ )
+ .params(u =>
+ u //
+ .combineWithParams([
+ { secondDepth: 1.0, lastDepth: 2.0, _expectedColor: kBaseColor }, // fail -> fail.
+ { secondDepth: 0.0, lastDepth: 2.0, _expectedColor: kRedStencilColor }, // pass -> fail.
+ { secondDepth: 2.0, lastDepth: 0.9, _expectedColor: kGreenStencilColor }, // fail -> pass.
+ ] as const)
+ )
+ .fn(t => {
+ const { secondDepth, lastDepth, _expectedColor } = t.params;
+
+ const depthSpencilFormat: GPUTextureFormat = 'depth24plus-stencil8';
+
+ const baseState = {
+ format: depthSpencilFormat,
+ depthWriteEnabled: true,
+ depthCompare: 'always',
+ stencilReadMask: 0xff,
+ stencilWriteMask: 0xff,
+ } as const;
+
+ const depthTestState = {
+ format: depthSpencilFormat,
+ depthWriteEnabled: true,
+ depthCompare: 'less',
+ stencilReadMask: 0xff,
+ stencilWriteMask: 0xff,
+ } as const;
+
+ const testStates = [
+ { state: baseState, color: kBaseColor, depth: 1.0 },
+ { state: depthTestState, color: kRedStencilColor, depth: secondDepth },
+ { state: depthTestState, color: kGreenStencilColor, depth: lastDepth },
+ ];
+
+ t.runDepthStateTest(testStates, _expectedColor);
+ });
+
+// Use a depth value that's not exactly 0.5 because it is exactly between two depth16unorm value and
+// can get rounded either way (and a different way between shaders and clearDepthValue).
+const kMiddleDepthValue = 0.5001;
+
+g.test('depth_compare_func')
+ .desc(
+ `Tests each depth compare function works properly. Clears the depth attachment to various values, and renders a point at depth 0.5 with various depthCompare modes.`
+ )
+ .params(u =>
+ u
+ .combine(
+ 'format',
+ kDepthStencilFormats.filter(format => kTextureFormatInfo[format].depth)
+ )
+ .combineWithParams([
+ { depthCompare: 'never', depthClearValue: 1.0, _expected: backgroundColor },
+ { depthCompare: 'never', depthClearValue: kMiddleDepthValue, _expected: backgroundColor },
+ { depthCompare: 'never', depthClearValue: 0.0, _expected: backgroundColor },
+ { depthCompare: 'less', depthClearValue: 1.0, _expected: triangleColor },
+ { depthCompare: 'less', depthClearValue: kMiddleDepthValue, _expected: backgroundColor },
+ { depthCompare: 'less', depthClearValue: 0.0, _expected: backgroundColor },
+ { depthCompare: 'less-equal', depthClearValue: 1.0, _expected: triangleColor },
+ {
+ depthCompare: 'less-equal',
+ depthClearValue: kMiddleDepthValue,
+ _expected: triangleColor,
+ },
+ { depthCompare: 'less-equal', depthClearValue: 0.0, _expected: backgroundColor },
+ { depthCompare: 'equal', depthClearValue: 1.0, _expected: backgroundColor },
+ { depthCompare: 'equal', depthClearValue: kMiddleDepthValue, _expected: triangleColor },
+ { depthCompare: 'equal', depthClearValue: 0.0, _expected: backgroundColor },
+ { depthCompare: 'not-equal', depthClearValue: 1.0, _expected: triangleColor },
+ {
+ depthCompare: 'not-equal',
+ depthClearValue: kMiddleDepthValue,
+ _expected: backgroundColor,
+ },
+ { depthCompare: 'not-equal', depthClearValue: 0.0, _expected: triangleColor },
+ { depthCompare: 'greater-equal', depthClearValue: 1.0, _expected: backgroundColor },
+ {
+ depthCompare: 'greater-equal',
+ depthClearValue: kMiddleDepthValue,
+ _expected: triangleColor,
+ },
+ { depthCompare: 'greater-equal', depthClearValue: 0.0, _expected: triangleColor },
+ { depthCompare: 'greater', depthClearValue: 1.0, _expected: backgroundColor },
+ { depthCompare: 'greater', depthClearValue: kMiddleDepthValue, _expected: backgroundColor },
+ { depthCompare: 'greater', depthClearValue: 0.0, _expected: triangleColor },
+ { depthCompare: 'always', depthClearValue: 1.0, _expected: triangleColor },
+ { depthCompare: 'always', depthClearValue: kMiddleDepthValue, _expected: triangleColor },
+ { depthCompare: 'always', depthClearValue: 0.0, _expected: triangleColor },
+ ] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceForTextureFormatOrSkipTestCase(t.params.format);
+ })
+ .fn(t => {
+ const { depthCompare, depthClearValue, _expected, format } = t.params;
+
+ const colorAttachmentFormat = 'rgba8unorm';
+ const colorAttachment = t.device.createTexture({
+ format: colorAttachmentFormat,
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ const colorAttachmentView = colorAttachment.createView();
+
+ const depthTexture = t.device.createTexture({
+ size: { width: 1, height: 1 },
+ format,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,
+ });
+ const depthTextureView = depthTexture.createView();
+
+ const pipelineDescriptor: GPURenderPipelineDescriptor = {
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ @vertex fn main(
+ @builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.5, 0.5, ${kMiddleDepthValue}, 1.0);
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+ @fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(1.0, 1.0, 1.0, 1.0);
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: colorAttachmentFormat }],
+ },
+ primitive: { topology: 'point-list' },
+ depthStencil: {
+ depthWriteEnabled: true,
+ depthCompare,
+ format,
+ },
+ };
+ const pipeline = t.device.createRenderPipeline(pipelineDescriptor);
+
+ const encoder = t.device.createCommandEncoder();
+ const depthStencilAttachment: GPURenderPassDepthStencilAttachment = {
+ view: depthTextureView,
+ depthClearValue,
+ depthLoadOp: 'clear',
+ depthStoreOp: 'store',
+ };
+ if (kTextureFormatInfo[format].stencil) {
+ depthStencilAttachment.stencilClearValue = 0;
+ depthStencilAttachment.stencilLoadOp = 'clear';
+ depthStencilAttachment.stencilStoreOp = 'store';
+ }
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachmentView,
+ storeOp: 'store',
+ clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ },
+ ],
+ depthStencilAttachment,
+ });
+ pass.setPipeline(pipeline);
+ pass.draw(1);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+
+ t.expectSinglePixelComparisonsAreOkInTexture({ texture: colorAttachment }, [
+ {
+ coord: { x: 0, y: 0 },
+ exp: new Uint8Array(_expected),
+ },
+ ]);
+ });
+
+g.test('reverse_depth')
+ .desc(
+ `Tests simple rendering with reversed depth buffer, ensures depth test works properly: fragments are in correct order and out of range fragments are clipped.
+ Note that in real use case the depth range remapping is done by the modified projection matrix.
+(see https://developer.nvidia.com/content/depth-precision-visualized).`
+ )
+ .params(u => u.combine('reversed', [false, true]))
+ .fn(t => {
+ const colorAttachmentFormat = 'rgba8unorm';
+ const colorAttachment = t.device.createTexture({
+ format: colorAttachmentFormat,
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ const colorAttachmentView = colorAttachment.createView();
+
+ const depthBufferFormat = 'depth32float';
+ const depthTexture = t.device.createTexture({
+ size: { width: 1, height: 1 },
+ format: depthBufferFormat,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,
+ });
+ const depthTextureView = depthTexture.createView();
+
+ const pipelineDescriptor: GPURenderPipelineDescriptor = {
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ struct Output {
+ @builtin(position) Position : vec4<f32>,
+ @location(0) color : vec4<f32>,
+ };
+
+ @vertex fn main(
+ @builtin(vertex_index) VertexIndex : u32,
+ @builtin(instance_index) InstanceIndex : u32) -> Output {
+ // TODO: remove workaround for Tint unary array access broke
+ var zv : array<vec2<f32>, 4> = array<vec2<f32>, 4>(
+ vec2<f32>(0.2, 0.2),
+ vec2<f32>(0.3, 0.3),
+ vec2<f32>(-0.1, -0.1),
+ vec2<f32>(1.1, 1.1));
+ let z : f32 = zv[InstanceIndex].x;
+
+ var output : Output;
+ output.Position = vec4<f32>(0.5, 0.5, z, 1.0);
+ var colors : array<vec4<f32>, 4> = array<vec4<f32>, 4>(
+ vec4<f32>(1.0, 0.0, 0.0, 1.0),
+ vec4<f32>(0.0, 1.0, 0.0, 1.0),
+ vec4<f32>(0.0, 0.0, 1.0, 1.0),
+ vec4<f32>(1.0, 1.0, 1.0, 1.0)
+ );
+ output.color = colors[InstanceIndex];
+ return output;
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+ @fragment fn main(
+ @location(0) color : vec4<f32>
+ ) -> @location(0) vec4<f32> {
+ return color;
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: colorAttachmentFormat }],
+ },
+ primitive: { topology: 'point-list' },
+ depthStencil: {
+ depthWriteEnabled: true,
+ depthCompare: t.params.reversed ? 'greater' : 'less',
+ format: depthBufferFormat,
+ },
+ };
+ const pipeline = t.device.createRenderPipeline(pipelineDescriptor);
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachmentView,
+ storeOp: 'store',
+ clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
+ loadOp: 'clear',
+ },
+ ],
+ depthStencilAttachment: {
+ view: depthTextureView,
+
+ depthClearValue: t.params.reversed ? 0.0 : 1.0,
+ depthLoadOp: 'clear',
+ depthStoreOp: 'store',
+ },
+ });
+ pass.setPipeline(pipeline);
+ pass.draw(1, 4);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+
+ t.expectSinglePixelComparisonsAreOkInTexture({ texture: colorAttachment }, [
+ {
+ coord: { x: 0, y: 0 },
+ exp: new Uint8Array(
+ t.params.reversed ? [0x00, 0xff, 0x00, 0xff] : [0xff, 0x00, 0x00, 0xff]
+ ),
+ },
+ ]);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/depth_bias.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/depth_bias.spec.ts
new file mode 100644
index 0000000000..03caff3b25
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/depth_bias.spec.ts
@@ -0,0 +1,352 @@
+export const description = `
+Tests render results with different depth bias values like 'positive', 'negative',
+'slope', 'clamp', etc.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { unreachable } from '../../../../common/util/util.js';
+import {
+ kTextureFormatInfo,
+ DepthStencilFormat,
+ EncodableTextureFormat,
+} from '../../../format_info.js';
+import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
+import { TexelView } from '../../../util/texture/texel_view.js';
+
+enum QuadAngle {
+ Flat,
+ TiltedX,
+}
+
+// Floating point depth buffers use the following formula to calculate bias
+// bias = depthBias * 2 ** (exponent(max z of primitive) - number of bits in mantissa) +
+// slopeScale * maxSlope
+// https://docs.microsoft.com/en-us/windows/win32/direct3d11/d3d10-graphics-programming-guide-output-merger-stage-depth-bias
+// https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/vkCmdSetDepthBias.html
+// https://developer.apple.com/documentation/metal/mtlrendercommandencoder/1516269-setdepthbias
+//
+// To get a final bias of 0.25 for primitives with z = 0.25, we can use
+// depthBias = 0.25 / (2 ** (-2 - 23)) = 8388608.
+const kPointTwoFiveBiasForPointTwoFiveZOnFloat = 8388608;
+
+class DepthBiasTest extends TextureTestMixin(GPUTest) {
+ runDepthBiasTestInternal(
+ depthFormat: DepthStencilFormat,
+ {
+ quadAngle,
+ bias,
+ biasSlopeScale,
+ biasClamp,
+ initialDepth,
+ }: {
+ quadAngle: QuadAngle;
+ bias: number;
+ biasSlopeScale: number;
+ biasClamp: number;
+ initialDepth: number;
+ }
+ ): { renderTarget: GPUTexture; depthTexture: GPUTexture } {
+ const renderTargetFormat = 'rgba8unorm';
+ const depthFormatInfo = kTextureFormatInfo[depthFormat];
+
+ let vertexShaderCode: string;
+ switch (quadAngle) {
+ case QuadAngle.Flat:
+ // Draw a square at z = 0.25.
+ vertexShaderCode = `
+ @vertex
+ fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+ var pos = array<vec2<f32>, 6>(
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>( 1.0, -1.0),
+ vec2<f32>(-1.0, 1.0),
+ vec2<f32>(-1.0, 1.0),
+ vec2<f32>( 1.0, -1.0),
+ vec2<f32>( 1.0, 1.0));
+ return vec4<f32>(pos[VertexIndex], 0.25, 1.0);
+ }
+ `;
+ break;
+ case QuadAngle.TiltedX:
+ // Draw a square ranging from 0 to 0.5, bottom to top.
+ vertexShaderCode = `
+ @vertex
+ fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+ var pos = array<vec3<f32>, 6>(
+ vec3<f32>(-1.0, -1.0, 0.0),
+ vec3<f32>( 1.0, -1.0, 0.0),
+ vec3<f32>(-1.0, 1.0, 0.5),
+ vec3<f32>(-1.0, 1.0, 0.5),
+ vec3<f32>( 1.0, -1.0, 0.0),
+ vec3<f32>( 1.0, 1.0, 0.5));
+ return vec4<f32>(pos[VertexIndex], 1.0);
+ }
+ `;
+ break;
+ default:
+ unreachable();
+ }
+
+ const renderTarget = this.trackForCleanup(
+ this.device.createTexture({
+ format: renderTargetFormat,
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ })
+ );
+
+ const depthTexture = this.trackForCleanup(
+ this.device.createTexture({
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ format: depthFormat,
+ sampleCount: 1,
+ mipLevelCount: 1,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ })
+ );
+
+ const depthStencilAttachment: GPURenderPassDepthStencilAttachment = {
+ view: depthTexture.createView(),
+ depthLoadOp: depthFormatInfo.depth ? 'clear' : undefined,
+ depthStoreOp: depthFormatInfo.depth ? 'store' : undefined,
+ stencilLoadOp: depthFormatInfo.stencil ? 'clear' : undefined,
+ stencilStoreOp: depthFormatInfo.stencil ? 'store' : undefined,
+ depthClearValue: initialDepth,
+ };
+
+ const encoder = this.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ storeOp: 'store',
+ loadOp: 'load',
+ },
+ ],
+ depthStencilAttachment,
+ });
+
+ let depthCompare: GPUCompareFunction = 'always';
+ if (depthFormat !== 'depth32float') {
+ depthCompare = 'greater';
+ }
+
+ const testState = {
+ format: depthFormat,
+ depthCompare,
+ depthWriteEnabled: true,
+ depthBias: bias,
+ depthBiasSlopeScale: biasSlopeScale,
+ depthBiasClamp: biasClamp,
+ } as const;
+
+ // Draw a square with the given depth state and bias values.
+ const testPipeline = this.createRenderPipelineForTest(vertexShaderCode, testState);
+ pass.setPipeline(testPipeline);
+ pass.draw(6);
+ pass.end();
+ this.device.queue.submit([encoder.finish()]);
+
+ return { renderTarget, depthTexture };
+ }
+
+ runDepthBiasTest(
+ depthFormat: EncodableTextureFormat & DepthStencilFormat,
+ {
+ quadAngle,
+ bias,
+ biasSlopeScale,
+ biasClamp,
+ _expectedDepth,
+ }: {
+ quadAngle: QuadAngle;
+ bias: number;
+ biasSlopeScale: number;
+ biasClamp: number;
+ _expectedDepth: number;
+ }
+ ) {
+ const { depthTexture } = this.runDepthBiasTestInternal(depthFormat, {
+ quadAngle,
+ bias,
+ biasSlopeScale,
+ biasClamp,
+ initialDepth: 0,
+ });
+
+ const expColor = { Depth: _expectedDepth };
+ const expTexelView = TexelView.fromTexelsAsColors(depthFormat, _coords => expColor);
+ this.expectTexelViewComparisonIsOkInTexture({ texture: depthTexture }, expTexelView, [1, 1]);
+ }
+
+ runDepthBiasTestFor24BitFormat(
+ depthFormat: DepthStencilFormat,
+ {
+ quadAngle,
+ bias,
+ biasSlopeScale,
+ biasClamp,
+ _expectedColor,
+ }: {
+ quadAngle: QuadAngle;
+ bias: number;
+ biasSlopeScale: number;
+ biasClamp: number;
+ _expectedColor: Float32Array;
+ }
+ ) {
+ const { renderTarget } = this.runDepthBiasTestInternal(depthFormat, {
+ quadAngle,
+ bias,
+ biasSlopeScale,
+ biasClamp,
+ initialDepth: 0.4,
+ });
+
+ const renderTargetFormat = 'rgba8unorm';
+ const expColor = {
+ R: _expectedColor[0],
+ G: _expectedColor[1],
+ B: _expectedColor[2],
+ A: _expectedColor[3],
+ };
+ const expTexelView = TexelView.fromTexelsAsColors(renderTargetFormat, _coords => expColor);
+ this.expectTexelViewComparisonIsOkInTexture({ texture: renderTarget }, expTexelView, [1, 1]);
+ }
+
+ createRenderPipelineForTest(
+ vertex: string,
+ depthStencil: GPUDepthStencilState
+ ): GPURenderPipeline {
+ return this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: this.device.createShaderModule({
+ code: vertex,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ targets: [{ format: 'rgba8unorm' }],
+ module: this.device.createShaderModule({
+ code: `
+ @fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ depthStencil,
+ });
+ }
+}
+
+export const g = makeTestGroup(DepthBiasTest);
+
+g.test('depth_bias')
+ .desc(
+ `
+ Tests that a square with different depth bias values like 'positive', 'negative',
+ 'slope', 'clamp', etc. is drawn as expected.
+ `
+ )
+ .params(u =>
+ u //
+ .combineWithParams([
+ {
+ quadAngle: QuadAngle.Flat,
+ bias: kPointTwoFiveBiasForPointTwoFiveZOnFloat,
+ biasSlopeScale: 0,
+ biasClamp: 0,
+ _expectedDepth: 0.5,
+ },
+ {
+ quadAngle: QuadAngle.Flat,
+ bias: kPointTwoFiveBiasForPointTwoFiveZOnFloat,
+ biasSlopeScale: 0,
+ biasClamp: 0.125,
+ _expectedDepth: 0.375,
+ },
+ {
+ quadAngle: QuadAngle.Flat,
+ bias: -kPointTwoFiveBiasForPointTwoFiveZOnFloat,
+ biasSlopeScale: 0,
+ biasClamp: 0.125,
+ _expectedDepth: 0,
+ },
+ {
+ quadAngle: QuadAngle.Flat,
+ bias: -kPointTwoFiveBiasForPointTwoFiveZOnFloat,
+ biasSlopeScale: 0,
+ biasClamp: -0.125,
+ _expectedDepth: 0.125,
+ },
+ {
+ quadAngle: QuadAngle.TiltedX,
+ bias: 0,
+ biasSlopeScale: 0,
+ biasClamp: 0,
+ _expectedDepth: 0.25,
+ },
+ {
+ quadAngle: QuadAngle.TiltedX,
+ bias: 0,
+ biasSlopeScale: 1,
+ biasClamp: 0,
+ _expectedDepth: 0.75,
+ },
+ {
+ quadAngle: QuadAngle.TiltedX,
+ bias: 0,
+ biasSlopeScale: -0.5,
+ biasClamp: 0,
+ _expectedDepth: 0,
+ },
+ ] as const)
+ )
+ .fn(t => {
+ t.runDepthBiasTest('depth32float', t.params);
+ });
+
+g.test('depth_bias_24bit_format')
+ .desc(
+ `
+ Tests that a square with different depth bias values like 'positive', 'negative',
+ 'slope', 'clamp', etc. is drawn as expected with 24 bit depth format.
+
+ TODO: Enhance these tests by reading back the depth (emulating the copy using texture sampling)
+ and checking the result directly, like the non-24-bit depth tests, instead of just relying on
+ whether the depth test passes or fails.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('format', ['depth24plus', 'depth24plus-stencil8'] as const)
+ .combineWithParams([
+ {
+ quadAngle: QuadAngle.Flat,
+ bias: 0.25 * (1 << 25),
+ biasSlopeScale: 0,
+ biasClamp: 0,
+ _expectedColor: new Float32Array([1.0, 0.0, 0.0, 1.0]),
+ },
+ {
+ quadAngle: QuadAngle.TiltedX,
+ bias: 0.25 * (1 << 25),
+ biasSlopeScale: 1,
+ biasClamp: 0,
+ _expectedColor: new Float32Array([1.0, 0.0, 0.0, 1.0]),
+ },
+ {
+ quadAngle: QuadAngle.Flat,
+ bias: 0.25 * (1 << 25),
+ biasSlopeScale: 0,
+ biasClamp: 0.1,
+ _expectedColor: new Float32Array([0.0, 0.0, 0.0, 0.0]),
+ },
+ ] as const)
+ )
+ .fn(t => {
+ const { format } = t.params;
+ t.runDepthBiasTestFor24BitFormat(format, t.params);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/depth_clip_clamp.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/depth_clip_clamp.spec.ts
new file mode 100644
index 0000000000..65e2e8af1f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/depth_clip_clamp.spec.ts
@@ -0,0 +1,524 @@
+export const description = `
+Tests for depth clipping, depth clamping (at various points in the pipeline), and maybe extended
+depth ranges as well.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { kDepthStencilFormats, kTextureFormatInfo } from '../../../format_info.js';
+import { GPUTest } from '../../../gpu_test.js';
+import {
+ checkElementsBetween,
+ checkElementsPassPredicate,
+ CheckElementsSupplementalTableRows,
+} from '../../../util/check_contents.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('depth_clamp_and_clip')
+ .desc(
+ `
+Depth written to the depth attachment should always be in the range of the viewport depth,
+even if it was written by the fragment shader (using frag_depth). If depth clipping is enabled,
+primitives should be clipped to the viewport depth before rasterization; if not, these fragments
+should be rasterized, and the fragment shader should receive out-of-viewport position.z values.
+
+To test this, render NxN points, with N vertex depth values, by (if writeDepth=true) N
+frag_depth values with the viewport depth set to [0.25,0.75].
+
+While rendering, check the fragment input position.z has the expected value (for all fragments that
+were produced by the rasterizer) by writing the diff to a storage buffer, which is later checked to
+be all (near) 0.
+
+Then, run another pass (which outputs every point at z=0.5 to avoid clipping) to verify the depth
+buffer contents by outputting the expected depth with depthCompare:'not-equal': any fragments that
+have unexpected values then get drawn to the color buffer, which is later checked to be empty.`
+ )
+ .params(u =>
+ u //
+ .combine('format', kDepthStencilFormats)
+ .filter(p => !!kTextureFormatInfo[p.format].depth)
+ .combine('unclippedDepth', [undefined, false, true])
+ .combine('writeDepth', [false, true])
+ .combine('multisampled', [false, true])
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format];
+
+ t.selectDeviceOrSkipTestCase([
+ t.params.unclippedDepth ? 'depth-clip-control' : undefined,
+ info.feature,
+ ]);
+ })
+ .fn(async t => {
+ const { format, unclippedDepth, writeDepth, multisampled } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ /** Number of depth values to test for both vertex output and frag_depth output. */
+ const kNumDepthValues = 8;
+ /** Test every combination of vertex output and frag_depth output. */
+ const kNumTestPoints = kNumDepthValues * kNumDepthValues;
+ const kViewportMinDepth = 0.25;
+ const kViewportMaxDepth = 0.75;
+
+ const shaderSource = `
+ // Test depths, with viewport range corresponding to [0,1].
+ var<private> kDepths: array<f32, ${kNumDepthValues}> = array<f32, ${kNumDepthValues}>(
+ -1.0, -0.5, 0.0, 0.25, 0.75, 1.0, 1.5, 2.0);
+
+ const vpMin: f32 = ${kViewportMinDepth};
+ const vpMax: f32 = ${kViewportMaxDepth};
+
+ // Draw the points in a straight horizontal row, one per pixel.
+ fn vertexX(idx: u32) -> f32 {
+ return (f32(idx) + 0.5) * 2.0 / ${kNumTestPoints}.0 - 1.0;
+ }
+
+ // Test vertex shader's position.z output.
+ // Here, the viewport range corresponds to position.z in [0,1].
+ fn vertexZ(idx: u32) -> f32 {
+ return kDepths[idx / ${kNumDepthValues}u];
+ }
+
+ // Test fragment shader's expected position.z input.
+ // Here, the viewport range corresponds to position.z in [vpMin,vpMax], but
+ // unclipped values extend beyond that range.
+ fn expectedFragPosZ(idx: u32) -> f32 {
+ return vpMin + vertexZ(idx) * (vpMax - vpMin);
+ }
+
+ //////// "Test" entry points
+
+ struct VFTest {
+ @builtin(position) pos: vec4<f32>,
+ @location(0) @interpolate(flat) vertexIndex: u32,
+ };
+
+ @vertex
+ fn vtest(@builtin(vertex_index) idx: u32) -> VFTest {
+ var vf: VFTest;
+ vf.pos = vec4<f32>(vertexX(idx), 0.0, vertexZ(idx), 1.0);
+ vf.vertexIndex = idx;
+ return vf;
+ }
+
+ struct Output {
+ // Each fragment (that didn't get clipped) writes into one element of this output.
+ // (Anything that doesn't get written is already zero.)
+ fragInputZDiff: array<f32, ${kNumTestPoints}>
+ };
+ @group(0) @binding(0) var <storage, read_write> output: Output;
+
+ fn checkZ(vf: VFTest) {
+ output.fragInputZDiff[vf.vertexIndex] = vf.pos.z - expectedFragPosZ(vf.vertexIndex);
+ }
+
+ @fragment
+ fn ftest_WriteDepth(vf: VFTest) -> @builtin(frag_depth) f32 {
+ checkZ(vf);
+ return kDepths[vf.vertexIndex % ${kNumDepthValues}u];
+ }
+
+ @fragment
+ fn ftest_NoWriteDepth(vf: VFTest) {
+ checkZ(vf);
+ }
+
+ //////// "Check" entry points
+
+ struct VFCheck {
+ @builtin(position) pos: vec4<f32>,
+ @location(0) @interpolate(flat) vertexIndex: u32,
+ };
+
+ @vertex
+ fn vcheck(@builtin(vertex_index) idx: u32) -> VFCheck {
+ var vf: VFCheck;
+ // Depth=0.5 because we want to render every point, not get clipped.
+ vf.pos = vec4<f32>(vertexX(idx), 0.0, 0.5, 1.0);
+ vf.vertexIndex = idx;
+ return vf;
+ }
+
+ struct FCheck {
+ @builtin(frag_depth) depth: f32,
+ @location(0) color: f32,
+ };
+
+ @fragment
+ fn fcheck(vf: VFCheck) -> FCheck {
+ let vertZ = vertexZ(vf.vertexIndex);
+ let outOfRange = vertZ < 0.0 || vertZ > 1.0;
+ let expFragPosZ = expectedFragPosZ(vf.vertexIndex);
+
+ let writtenDepth = kDepths[vf.vertexIndex % ${kNumDepthValues}u];
+
+ let expectedDepthWriteInput = ${writeDepth ? 'writtenDepth' : 'expFragPosZ'};
+ var expectedDepthBufferValue = clamp(expectedDepthWriteInput, vpMin, vpMax);
+ if (${!unclippedDepth} && outOfRange) {
+ // Test fragment should have been clipped; expect the depth attachment to
+ // have its clear value (0.5).
+ expectedDepthBufferValue = 0.5;
+ }
+
+ var f: FCheck;
+ f.depth = expectedDepthBufferValue;
+ f.color = 1.0; // Color written if the resulting depth is unexpected.
+ return f;
+ }
+ `;
+ const module = t.device.createShaderModule({ code: shaderSource });
+
+ // Draw points at different vertex depths and fragment depths into the depth attachment,
+ // with a viewport of [0.25,0.75].
+ const testPipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: { module, entryPoint: 'vtest' },
+ primitive: {
+ topology: 'point-list',
+ unclippedDepth,
+ },
+ depthStencil: { format, depthWriteEnabled: true, depthCompare: 'always' },
+ multisample: multisampled ? { count: 4 } : undefined,
+ fragment: {
+ module,
+ entryPoint: writeDepth ? 'ftest_WriteDepth' : 'ftest_NoWriteDepth',
+ targets: [],
+ },
+ });
+
+ // Use depth comparison to check that the depth attachment now has the expected values.
+ const checkPipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: { module, entryPoint: 'vcheck' },
+ primitive: { topology: 'point-list' },
+ depthStencil: {
+ format,
+ // NOTE: This check is probably very susceptible to floating point error. If it fails, maybe
+ // replace it with two checks (less + greater) with an epsilon applied in the check shader?
+ depthCompare: 'not-equal', // Expect every depth value to be exactly equal.
+ depthWriteEnabled: true, // If the check failed, overwrite with the expected result.
+ },
+ multisample: multisampled ? { count: 4 } : undefined,
+ fragment: { module, entryPoint: 'fcheck', targets: [{ format: 'r8unorm' }] },
+ });
+
+ const dsTexture = t.device.createTexture({
+ format,
+ size: [kNumTestPoints],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ sampleCount: multisampled ? 4 : 1,
+ });
+ const dsTextureView = dsTexture.createView();
+
+ const checkTextureDesc = {
+ format: 'r8unorm' as const,
+ size: [kNumTestPoints],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ };
+ const checkTexture = t.device.createTexture(checkTextureDesc);
+ const checkTextureView = checkTexture.createView();
+ const checkTextureMSView = multisampled
+ ? t.device.createTexture({ ...checkTextureDesc, sampleCount: 4 }).createView()
+ : undefined;
+
+ const dsActual =
+ !multisampled && info.bytesPerBlock
+ ? t.device.createBuffer({
+ size: kNumTestPoints * info.bytesPerBlock,
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
+ })
+ : undefined;
+ const dsExpected =
+ !multisampled && info.bytesPerBlock
+ ? t.device.createBuffer({
+ size: kNumTestPoints * info.bytesPerBlock,
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
+ })
+ : undefined;
+ const checkBuffer = t.device.createBuffer({
+ size: kNumTestPoints,
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
+ });
+
+ const fragInputZFailedBuffer = t.device.createBuffer({
+ size: 4 * kNumTestPoints,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ const testBindGroup = t.device.createBindGroup({
+ layout: testPipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer: fragInputZFailedBuffer } }],
+ });
+
+ const enc = t.device.createCommandEncoder();
+ {
+ const pass = enc.beginRenderPass({
+ colorAttachments: [],
+ depthStencilAttachment: {
+ view: dsTextureView,
+ depthClearValue: 0.5, // Will see this depth value if the fragment was clipped.
+ depthLoadOp: 'clear',
+ depthStoreOp: 'store',
+ stencilClearValue: info.stencil ? 0 : undefined,
+ stencilLoadOp: info.stencil ? 'clear' : undefined,
+ stencilStoreOp: info.stencil ? 'discard' : undefined,
+ },
+ });
+ pass.setPipeline(testPipeline);
+ pass.setBindGroup(0, testBindGroup);
+ pass.setViewport(0, 0, kNumTestPoints, 1, kViewportMinDepth, kViewportMaxDepth);
+ pass.draw(kNumTestPoints);
+ pass.end();
+ }
+ if (dsActual) {
+ enc.copyTextureToBuffer({ texture: dsTexture }, { buffer: dsActual }, [kNumTestPoints]);
+ }
+ {
+ const clearValue = [0, 0, 0, 0]; // Will see this color if the check passed.
+ const pass = enc.beginRenderPass({
+ colorAttachments: [
+ checkTextureMSView
+ ? {
+ view: checkTextureMSView,
+ resolveTarget: checkTextureView,
+ clearValue,
+ loadOp: 'clear',
+ storeOp: 'discard',
+ }
+ : { view: checkTextureView, clearValue, loadOp: 'clear', storeOp: 'store' },
+ ],
+ depthStencilAttachment: {
+ view: dsTextureView,
+ depthLoadOp: 'load',
+ depthStoreOp: 'store',
+ stencilClearValue: info.stencil ? 0 : undefined,
+ stencilLoadOp: info.stencil ? 'clear' : undefined,
+ stencilStoreOp: info.stencil ? 'discard' : undefined,
+ },
+ });
+ pass.setPipeline(checkPipeline);
+ pass.setViewport(0, 0, kNumTestPoints, 1, 0.0, 1.0);
+ pass.draw(kNumTestPoints);
+ pass.end();
+ }
+ enc.copyTextureToBuffer({ texture: checkTexture }, { buffer: checkBuffer }, [kNumTestPoints]);
+ if (dsExpected) {
+ enc.copyTextureToBuffer({ texture: dsTexture }, { buffer: dsExpected }, [kNumTestPoints]);
+ }
+ t.device.queue.submit([enc.finish()]);
+
+ t.expectGPUBufferValuesPassCheck(
+ fragInputZFailedBuffer,
+ a => checkElementsBetween(a, [() => -1e-5, () => 1e-5]),
+ { type: Float32Array, typedLength: kNumTestPoints }
+ );
+
+ const kCheckPassedValue = 0;
+ const predicatePrinter: CheckElementsSupplementalTableRows = [
+ { leftHeader: 'expected ==', getValueForCell: _index => kCheckPassedValue },
+ ];
+ if (dsActual && dsExpected && format === 'depth32float') {
+ await Promise.all([dsActual.mapAsync(GPUMapMode.READ), dsExpected.mapAsync(GPUMapMode.READ)]);
+ const act = new Float32Array(dsActual.getMappedRange());
+ const exp = new Float32Array(dsExpected.getMappedRange());
+ predicatePrinter.push(
+ { leftHeader: 'act ==', getValueForCell: index => act[index].toFixed(2) },
+ { leftHeader: 'exp ==', getValueForCell: index => exp[index].toFixed(2) }
+ );
+ }
+ t.expectGPUBufferValuesPassCheck(
+ checkBuffer,
+ a =>
+ checkElementsPassPredicate(a, (_index, value) => value === kCheckPassedValue, {
+ predicatePrinter,
+ }),
+ { type: Uint8Array, typedLength: kNumTestPoints, method: 'map' }
+ );
+ });
+
+g.test('depth_test_input_clamped')
+ .desc(
+ `
+Input to the depth test should always be in the range of viewport depth, even if it was written by
+the fragment shader (using frag_depth).
+
+To test this, first initialize the depth buffer with N expected values (by writing frag_depth, with
+the default viewport). These expected values are clamped by the shader to [0.25, 0.75].
+
+Then, run another pass with the viewport depth set to [0.25,0.75], and output various (unclamped)
+frag_depth values from its fragment shader with depthCompare:'not-equal'. These should get clamped;
+any fragments that have unexpected values then get drawn to the color buffer, which is later checked
+to be empty.`
+ )
+ .params(u =>
+ u //
+ .combine('format', kDepthStencilFormats)
+ .filter(p => !!kTextureFormatInfo[p.format].depth)
+ .combine('unclippedDepth', [false, true])
+ .combine('multisampled', [false, true])
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format];
+
+ t.selectDeviceOrSkipTestCase([
+ t.params.unclippedDepth ? 'depth-clip-control' : undefined,
+ info.feature,
+ ]);
+ })
+ .fn(t => {
+ const { format, unclippedDepth, multisampled } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ const kNumDepthValues = 8;
+ const kViewportMinDepth = 0.25;
+ const kViewportMaxDepth = 0.75;
+
+ const shaderSource = `
+ // Test depths, with viewport range corresponding to [0,1].
+ var<private> kDepths: array<f32, ${kNumDepthValues}> = array<f32, ${kNumDepthValues}>(
+ -1.0, -0.5, 0.0, 0.25, 0.75, 1.0, 1.5, 2.0);
+
+ const vpMin: f32 = ${kViewportMinDepth};
+ const vpMax: f32 = ${kViewportMaxDepth};
+
+ // Draw the points in a straight horizontal row, one per pixel.
+ fn vertexX(idx: u32) -> f32 {
+ return (f32(idx) + 0.5) * 2.0 / ${kNumDepthValues}.0 - 1.0;
+ }
+
+ struct VF {
+ @builtin(position) pos: vec4<f32>,
+ @location(0) @interpolate(flat) vertexIndex: u32,
+ };
+
+ @vertex
+ fn vmain(@builtin(vertex_index) idx: u32) -> VF {
+ var vf: VF;
+ // Depth=0.5 because we want to render every point, not get clipped.
+ vf.pos = vec4<f32>(vertexX(idx), 0.0, 0.5, 1.0);
+ vf.vertexIndex = idx;
+ return vf;
+ }
+
+ @fragment
+ fn finit(vf: VF) -> @builtin(frag_depth) f32 {
+ // Expected values of the ftest pipeline.
+ return clamp(kDepths[vf.vertexIndex], vpMin, vpMax);
+ }
+
+ struct FTest {
+ @builtin(frag_depth) depth: f32,
+ @location(0) color: f32,
+ };
+
+ @fragment
+ fn ftest(vf: VF) -> FTest {
+ var f: FTest;
+ f.depth = kDepths[vf.vertexIndex]; // Should get clamped to the viewport.
+ f.color = 1.0; // Color written if the resulting depth is unexpected.
+ return f;
+ }
+ `;
+
+ const module = t.device.createShaderModule({ code: shaderSource });
+
+ // Initialize depth attachment with expected values, in [0.25,0.75].
+ const initPipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: { module, entryPoint: 'vmain' },
+ primitive: { topology: 'point-list' },
+ depthStencil: { format, depthWriteEnabled: true, depthCompare: 'always' },
+ multisample: multisampled ? { count: 4 } : undefined,
+ fragment: { module, entryPoint: 'finit', targets: [] },
+ });
+
+ // With a viewport set to [0.25,0.75], output values in [0.0,1.0] and check they're clamped
+ // before the depth test, regardless of whether unclippedDepth is enabled.
+ const testPipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: { module, entryPoint: 'vmain' },
+ primitive: {
+ topology: 'point-list',
+ unclippedDepth,
+ },
+ depthStencil: { format, depthCompare: 'not-equal', depthWriteEnabled: false },
+ multisample: multisampled ? { count: 4 } : undefined,
+ fragment: { module, entryPoint: 'ftest', targets: [{ format: 'r8unorm' }] },
+ });
+
+ const dsTexture = t.device.createTexture({
+ format,
+ size: [kNumDepthValues],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ sampleCount: multisampled ? 4 : 1,
+ });
+ const dsTextureView = dsTexture.createView();
+
+ const testTextureDesc = {
+ format: 'r8unorm' as const,
+ size: [kNumDepthValues],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ };
+ const testTexture = t.device.createTexture(testTextureDesc);
+ const testTextureView = testTexture.createView();
+ const testTextureMSView = multisampled
+ ? t.device.createTexture({ ...testTextureDesc, sampleCount: 4 }).createView()
+ : undefined;
+
+ const resultBuffer = t.device.createBuffer({
+ size: kNumDepthValues,
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
+ });
+
+ const enc = t.device.createCommandEncoder();
+ {
+ const pass = enc.beginRenderPass({
+ colorAttachments: [],
+ depthStencilAttachment: {
+ view: dsTextureView,
+ depthClearValue: 1.0,
+ depthLoadOp: 'clear',
+ depthStoreOp: 'store',
+ stencilClearValue: info.stencil ? 0 : undefined,
+ stencilLoadOp: info.stencil ? 'clear' : undefined,
+ stencilStoreOp: info.stencil ? 'discard' : undefined,
+ },
+ });
+ pass.setPipeline(initPipeline);
+ pass.draw(kNumDepthValues);
+ pass.end();
+ }
+ {
+ const clearValue = [0, 0, 0, 0]; // Will see this color if the test passed.
+ const pass = enc.beginRenderPass({
+ colorAttachments: [
+ testTextureMSView
+ ? {
+ view: testTextureMSView,
+ resolveTarget: testTextureView,
+ clearValue,
+ loadOp: 'clear',
+ storeOp: 'discard',
+ }
+ : { view: testTextureView, clearValue, loadOp: 'clear', storeOp: 'store' },
+ ],
+ depthStencilAttachment: {
+ view: dsTextureView,
+ depthLoadOp: 'load',
+ depthStoreOp: 'store',
+ stencilClearValue: info.stencil ? 0 : undefined,
+ stencilLoadOp: info.stencil ? 'clear' : undefined,
+ stencilStoreOp: info.stencil ? 'discard' : undefined,
+ },
+ });
+ pass.setPipeline(testPipeline);
+ pass.setViewport(0, 0, kNumDepthValues, 1, kViewportMinDepth, kViewportMaxDepth);
+ pass.draw(kNumDepthValues);
+ pass.end();
+ }
+ enc.copyTextureToBuffer({ texture: testTexture }, { buffer: resultBuffer }, [kNumDepthValues]);
+ t.device.queue.submit([enc.finish()]);
+
+ t.expectGPUBufferValuesEqual(resultBuffer, new Uint8Array(kNumDepthValues), 0, {
+ method: 'map',
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/draw.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/draw.spec.ts
new file mode 100644
index 0000000000..6ed4be08fd
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/draw.spec.ts
@@ -0,0 +1,768 @@
+export const description = `
+Tests for the general aspects of draw/drawIndexed/drawIndirect/drawIndexedIndirect.
+
+Primitive topology tested in api/operation/render_pipeline/primitive_topology.spec.ts.
+Index format tested in api/operation/command_buffer/render/state_tracking.spec.ts.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import {
+ assert,
+ TypedArrayBufferView,
+ TypedArrayBufferViewConstructor,
+} from '../../../../common/util/util.js';
+import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
+import { PerPixelComparison } from '../../../util/texture/texture_ok.js';
+
+class DrawTest extends TextureTestMixin(GPUTest) {
+ checkTriangleDraw(opts: {
+ firstIndex: number | undefined;
+ count: number;
+ firstInstance: number | undefined;
+ instanceCount: number | undefined;
+ indexed: boolean;
+ indirect: boolean;
+ vertexBufferOffset: number;
+ indexBufferOffset: number | undefined;
+ baseVertex: number | undefined;
+ }): void {
+ // Set fallbacks when parameters are undefined in order to calculate the expected values.
+ const defaulted = {
+ firstIndex: opts.firstIndex ?? 0,
+ count: opts.count,
+ firstInstance: opts.firstInstance ?? 0,
+ instanceCount: opts.instanceCount ?? 1,
+ indexed: opts.indexed,
+ indirect: opts.indirect,
+ vertexBufferOffset: opts.vertexBufferOffset,
+ indexBufferOffset: opts.indexBufferOffset ?? 0,
+ baseVertex: opts.baseVertex ?? 0,
+ };
+
+ const renderTargetSize = [72, 36];
+
+ // The test will split up the render target into a grid where triangles of
+ // increasing primitive id will be placed along the X axis, and triangles
+ // of increasing instance id will be placed along the Y axis. The size of the
+ // grid is based on the max primitive id and instance id used.
+ const numX = 6;
+ const numY = 6;
+ const tileSizeX = renderTargetSize[0] / numX;
+ const tileSizeY = renderTargetSize[1] / numY;
+
+ // |\
+ // | \
+ // |______\
+ // Unit triangle shaped like this. 0-1 Y-down.
+ /* prettier-ignore */
+ const triangleVertices = [
+ 0.0, 0.0,
+ 0.0, 1.0,
+ 1.0, 1.0,
+ ];
+
+ const renderTarget = this.device.createTexture({
+ size: renderTargetSize,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ format: 'rgba8unorm',
+ });
+
+ const vertexModule = this.device.createShaderModule({
+ code: `
+struct Inputs {
+ @builtin(vertex_index) vertex_index : u32,
+ @builtin(instance_index) instance_id : u32,
+ @location(0) vertexPosition : vec2<f32>,
+};
+
+@vertex fn vert_main(input : Inputs
+ ) -> @builtin(position) vec4<f32> {
+ // 3u is the number of points in a triangle to convert from index
+ // to id.
+ var vertex_id : u32 = input.vertex_index / 3u;
+
+ var x : f32 = (input.vertexPosition.x + f32(vertex_id)) / ${numX}.0;
+ var y : f32 = (input.vertexPosition.y + f32(input.instance_id)) / ${numY}.0;
+
+ // (0,1) y-down space to (-1,1) y-up NDC
+ x = 2.0 * x - 1.0;
+ y = -2.0 * y + 1.0;
+ return vec4<f32>(x, y, 0.0, 1.0);
+}
+`,
+ });
+
+ const fragmentModule = this.device.createShaderModule({
+ code: `
+struct Output {
+ value : u32
+};
+
+@group(0) @binding(0) var<storage, read_write> output : Output;
+
+@fragment fn frag_main() -> @location(0) vec4<f32> {
+ output.value = 1u;
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+}
+`,
+ });
+
+ const pipeline = this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: vertexModule,
+ entryPoint: 'vert_main',
+ buffers: [
+ {
+ attributes: [
+ {
+ shaderLocation: 0,
+ format: 'float32x2',
+ offset: 0,
+ },
+ ],
+ arrayStride: 2 * Float32Array.BYTES_PER_ELEMENT,
+ },
+ ],
+ },
+ fragment: {
+ module: fragmentModule,
+ entryPoint: 'frag_main',
+ targets: [
+ {
+ format: 'rgba8unorm',
+ },
+ ],
+ },
+ });
+
+ const resultBuffer = this.device.createBuffer({
+ size: Uint32Array.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+
+ const resultBindGroup = this.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer: resultBuffer,
+ },
+ },
+ ],
+ });
+
+ const commandEncoder = this.device.createCommandEncoder();
+ const renderPass = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+
+ renderPass.setPipeline(pipeline);
+ renderPass.setBindGroup(0, resultBindGroup);
+
+ if (defaulted.indexed) {
+ // INDEXED DRAW
+ assert(defaulted.baseVertex !== undefined);
+ assert(defaulted.indexBufferOffset !== undefined);
+
+ renderPass.setIndexBuffer(
+ this.makeBufferWithContents(
+ /* prettier-ignore */ new Uint32Array([
+ // Offset the index buffer contents by empty data.
+ ...new Array(defaulted.indexBufferOffset / Uint32Array.BYTES_PER_ELEMENT),
+
+ 0, 1, 2, //
+ 3, 4, 5, //
+ 6, 7, 8, //
+ ]),
+ GPUBufferUsage.INDEX
+ ),
+ 'uint32',
+ defaulted.indexBufferOffset
+ );
+
+ renderPass.setVertexBuffer(
+ 0,
+ this.makeBufferWithContents(
+ /* prettier-ignore */ new Float32Array([
+ // Offset the vertex buffer contents by empty data.
+ ...new Array(defaulted.vertexBufferOffset / Float32Array.BYTES_PER_ELEMENT),
+
+ // selected with base_vertex=0
+ // count=6
+ ...triangleVertices, // | count=6;first=3
+ ...triangleVertices, // | |
+ ...triangleVertices, // |
+
+ // selected with base_vertex=9
+ // count=6
+ ...triangleVertices, // | count=6;first=3
+ ...triangleVertices, // | |
+ ...triangleVertices, // |
+ ]),
+ GPUBufferUsage.VERTEX
+ ),
+ defaulted.vertexBufferOffset
+ );
+
+ if (defaulted.indirect) {
+ const args = [
+ defaulted.count,
+ defaulted.instanceCount,
+ defaulted.firstIndex,
+ defaulted.baseVertex,
+ defaulted.firstInstance,
+ ] as const;
+ renderPass.drawIndexedIndirect(
+ this.makeBufferWithContents(new Uint32Array(args), GPUBufferUsage.INDIRECT),
+ 0
+ );
+ } else {
+ const args = [
+ opts.count,
+ opts.instanceCount,
+ opts.firstIndex,
+ opts.baseVertex,
+ opts.firstInstance,
+ ] as const;
+ renderPass.drawIndexed.apply(renderPass, [...args]);
+ }
+ } else {
+ // NON-INDEXED DRAW
+ renderPass.setVertexBuffer(
+ 0,
+ this.makeBufferWithContents(
+ /* prettier-ignore */ new Float32Array([
+ // Offset the vertex buffer contents by empty data.
+ ...new Array(defaulted.vertexBufferOffset / Float32Array.BYTES_PER_ELEMENT),
+
+ // count=6
+ ...triangleVertices, // | count=6;first=3
+ ...triangleVertices, // | |
+ ...triangleVertices, // |
+ ]),
+ GPUBufferUsage.VERTEX
+ ),
+ defaulted.vertexBufferOffset
+ );
+
+ if (defaulted.indirect) {
+ const args = [
+ defaulted.count,
+ defaulted.instanceCount,
+ defaulted.firstIndex,
+ defaulted.firstInstance,
+ ] as const;
+ renderPass.drawIndirect(
+ this.makeBufferWithContents(new Uint32Array(args), GPUBufferUsage.INDIRECT),
+ 0
+ );
+ } else {
+ const args = [opts.count, opts.instanceCount, opts.firstIndex, opts.firstInstance] as const;
+ renderPass.draw.apply(renderPass, [...args]);
+ }
+ }
+
+ renderPass.end();
+ this.queue.submit([commandEncoder.finish()]);
+
+ const green = new Uint8Array([0, 255, 0, 255]);
+ const transparentBlack = new Uint8Array([0, 0, 0, 0]);
+
+ const didDraw = defaulted.count && defaulted.instanceCount;
+
+ this.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array([didDraw ? 1 : 0]));
+
+ const baseVertexCount = defaulted.baseVertex ?? 0;
+ const pixelComparisons: PerPixelComparison<Uint8Array>[] = [];
+ for (let primitiveId = 0; primitiveId < numX; ++primitiveId) {
+ for (let instanceId = 0; instanceId < numY; ++instanceId) {
+ let expectedColor = didDraw ? green : transparentBlack;
+ if (
+ primitiveId * 3 < defaulted.firstIndex + baseVertexCount ||
+ primitiveId * 3 >= defaulted.firstIndex + baseVertexCount + defaulted.count
+ ) {
+ expectedColor = transparentBlack;
+ }
+
+ if (
+ instanceId < defaulted.firstInstance ||
+ instanceId >= defaulted.firstInstance + defaulted.instanceCount
+ ) {
+ expectedColor = transparentBlack;
+ }
+
+ pixelComparisons.push({
+ coord: { x: (1 / 3 + primitiveId) * tileSizeX, y: (2 / 3 + instanceId) * tileSizeY },
+ exp: expectedColor,
+ });
+ }
+ }
+ this.expectSinglePixelComparisonsAreOkInTexture({ texture: renderTarget }, pixelComparisons);
+ }
+}
+
+export const g = makeTestGroup(DrawTest);
+
+g.test('arguments')
+ .desc(
+ `Test that draw arguments are passed correctly by drawing triangles in a grid.
+Horizontally across the texture are triangles with increasing "primitive id".
+Vertically down the screen are triangles with increasing instance id.
+Increasing the |first| param should skip some of the beginning triangles on the horizontal axis.
+Increasing the |first_instance| param should skip of the beginning triangles on the vertical axis.
+The vertex buffer contains two sets of disjoint triangles, and base_vertex is used to select the second set.
+The test checks that the center of all of the expected triangles is drawn, and the others are empty.
+The fragment shader also writes out to a storage buffer. If the draw is zero-sized, check that no value is written.
+
+Params:
+ - first= {0, 3} - either the firstVertex or firstIndex
+ - count= {0, 3, 6} - either the vertexCount or indexCount
+ - first_instance= {0, 2}
+ - instance_count= {0, 1, 4}
+ - indexed= {true, false}
+ - indirect= {true, false}
+ - vertex_buffer_offset= {0, 32}
+ - index_buffer_offset= {0, 16} - only for indexed draws
+ - base_vertex= {0, 9} - only for indexed draws
+ `
+ )
+ .params(u =>
+ u
+ .combine('first', [0, 3] as const)
+ .combine('count', [0, 3, 6] as const)
+ .combine('first_instance', [0, 2] as const)
+ .combine('instance_count', [0, 1, 4] as const)
+ .combine('indexed', [false, true])
+ .combine('indirect', [false, true])
+ .combine('vertex_buffer_offset', [0, 32] as const)
+ .expand('index_buffer_offset', p => (p.indexed ? ([0, 16] as const) : [undefined]))
+ .expand('base_vertex', p => (p.indexed ? ([0, 9] as const) : [undefined]))
+ )
+ .beforeAllSubcases(t => {
+ if (t.params.first_instance > 0 && t.params.indirect) {
+ t.selectDeviceOrSkipTestCase('indirect-first-instance');
+ }
+ })
+ .fn(t => {
+ t.checkTriangleDraw({
+ firstIndex: t.params.first,
+ count: t.params.count,
+ firstInstance: t.params.first_instance,
+ instanceCount: t.params.instance_count,
+ indexed: t.params.indexed,
+ indirect: t.params.indirect,
+ vertexBufferOffset: t.params.vertex_buffer_offset,
+ indexBufferOffset: t.params.index_buffer_offset,
+ baseVertex: t.params.base_vertex,
+ });
+ });
+
+g.test('default_arguments')
+ .desc(
+ `
+ Test that defaults arguments are passed correctly by drawing triangles in a grid when they are not
+ defined. This test is written based on the 'arguments' with 'undefined' value in the parameters.
+ - mode= {draw, drawIndexed}
+ - arg= {instance_count, first_index, first_instance, base_vertex}
+ `
+ )
+ .params(u =>
+ u
+ .combine('mode', ['draw', 'drawIndexed'])
+ .beginSubcases()
+ .combine('instance_count', [undefined, 4] as const)
+ .combine('first_index', [undefined, 3] as const)
+ .combine('first_instance', [undefined, 2] as const)
+ .expand('base_vertex', p =>
+ p.mode === 'drawIndexed' ? ([undefined, 9] as const) : [undefined]
+ )
+ )
+ .fn(t => {
+ const kVertexCount = 3;
+ const kVertexBufferOffset = 32;
+ const kIndexBufferOffset = 16;
+
+ t.checkTriangleDraw({
+ firstIndex: t.params.first_index,
+ count: kVertexCount,
+ firstInstance: t.params.first_instance,
+ instanceCount: t.params.instance_count,
+ indexed: t.params.mode === 'drawIndexed',
+ indirect: false, // indirect
+ vertexBufferOffset: kVertexBufferOffset,
+ indexBufferOffset: kIndexBufferOffset,
+ baseVertex: t.params.base_vertex,
+ });
+ });
+
+g.test('vertex_attributes,basic')
+ .desc(
+ `Test basic fetching of vertex attributes.
+ Each vertex attribute is a single value and written out into a storage buffer.
+ Tests that vertices with offsets/strides for instanced/non-instanced attributes are
+ fetched correctly. Not all vertex formats are tested.
+
+ Params:
+ - vertex_attribute_count= {1, 4, 8, 16}
+ - vertex_buffer_count={1, 4, 8} - where # attributes is > 0
+ - vertex_format={uint32, float32}
+ - step_mode= {undefined, vertex, instance, mixed} - where mixed only applies for vertex_buffer_count > 1
+ `
+ )
+ .params(u =>
+ u
+ .combine('vertex_attribute_count', [1, 4, 8, 16])
+ .combine('vertex_buffer_count', [1, 4, 8])
+ .combine('vertex_format', ['uint32', 'float32'] as const)
+ .combine('step_mode', [undefined, 'vertex', 'instance', 'mixed'] as const)
+ .unless(p => p.vertex_attribute_count < p.vertex_buffer_count)
+ .unless(p => p.step_mode === 'mixed' && p.vertex_buffer_count <= 1)
+ )
+ .fn(t => {
+ const vertexCount = 4;
+ const instanceCount = 4;
+
+ // In compat mode, @builtin(vertex_index) and @builtin(instance_index) each take an attribute.
+ const maxAttributes = t.device.limits.maxVertexAttributes - (t.isCompatibility ? 2 : 0);
+ const numAttributes = Math.min(maxAttributes, t.params.vertex_attribute_count);
+ const maxAttributesPerVertexBuffer = Math.ceil(numAttributes / t.params.vertex_buffer_count);
+
+ let shaderLocation = 0;
+ let attributeValue = 0;
+ const bufferLayouts: GPUVertexBufferLayout[] = [];
+
+ let ExpectedDataConstructor: TypedArrayBufferViewConstructor;
+ switch (t.params.vertex_format) {
+ case 'uint32':
+ ExpectedDataConstructor = Uint32Array;
+ break;
+ case 'float32':
+ ExpectedDataConstructor = Float32Array;
+ break;
+ }
+
+ // Populate |bufferLayouts|, |vertexBufferData|, and |vertexBuffers|.
+ // We will use this to both create the render pipeline, and produce the
+ // expected data on the CPU.
+ // Attributes in each buffer will be interleaved.
+ const vertexBuffers: GPUBuffer[] = [];
+ const vertexBufferData: TypedArrayBufferView[] = [];
+ for (let b = 0; b < t.params.vertex_buffer_count; ++b) {
+ const vertexBufferValues: number[] = [];
+
+ let offset = 0;
+ let stepMode = t.params.step_mode;
+
+ // If stepMode is mixed, alternate between vertex and instance.
+ if (stepMode === 'mixed') {
+ stepMode = (['vertex', 'instance'] as const)[b % 2];
+ }
+
+ let vertexOrInstanceCount: number;
+ switch (stepMode) {
+ case undefined:
+ case 'vertex':
+ vertexOrInstanceCount = vertexCount;
+ break;
+ case 'instance':
+ vertexOrInstanceCount = instanceCount;
+ break;
+ }
+
+ const attributes: GPUVertexAttribute[] = [];
+ const numAttributesForBuffer = Math.min(
+ maxAttributesPerVertexBuffer,
+ maxAttributes - b * maxAttributesPerVertexBuffer
+ );
+
+ for (let a = 0; a < numAttributesForBuffer; ++a) {
+ const attribute: GPUVertexAttribute = {
+ format: t.params.vertex_format,
+ shaderLocation,
+ offset,
+ };
+ attributes.push(attribute);
+
+ offset += ExpectedDataConstructor.BYTES_PER_ELEMENT;
+ shaderLocation += 1;
+ }
+
+ for (let v = 0; v < vertexOrInstanceCount; ++v) {
+ for (let a = 0; a < numAttributesForBuffer; ++a) {
+ vertexBufferValues.push(attributeValue);
+ attributeValue += 1.234; // Values will get rounded later if we make a Uint32Array.
+ }
+ }
+
+ bufferLayouts.push({
+ attributes,
+ arrayStride: offset,
+ stepMode,
+ });
+
+ const data = new ExpectedDataConstructor(vertexBufferValues);
+ vertexBufferData.push(data);
+ vertexBuffers.push(t.makeBufferWithContents(data, GPUBufferUsage.VERTEX));
+ }
+
+ // Create an array of shader locations [0, 1, 2, 3, ...] for easy iteration.
+ const vertexInputShaderLocations = new Array(shaderLocation).fill(0).map((_, i) => i);
+
+ // Create the expected data buffer.
+ const expectedData = new ExpectedDataConstructor(
+ vertexCount * instanceCount * vertexInputShaderLocations.length
+ );
+
+ // Populate the expected data. This is a CPU-side version of what we expect the shader
+ // to do.
+ for (let vertexIndex = 0; vertexIndex < vertexCount; ++vertexIndex) {
+ for (let instanceIndex = 0; instanceIndex < instanceCount; ++instanceIndex) {
+ bufferLayouts.forEach((bufferLayout, b) => {
+ for (const attribute of bufferLayout.attributes) {
+ const primitiveId = vertexCount * instanceIndex + vertexIndex;
+ const outputIndex =
+ primitiveId * vertexInputShaderLocations.length + attribute.shaderLocation;
+
+ let vertexOrInstanceIndex: number;
+ switch (bufferLayout.stepMode) {
+ case undefined:
+ case 'vertex':
+ vertexOrInstanceIndex = vertexIndex;
+ break;
+ case 'instance':
+ vertexOrInstanceIndex = instanceIndex;
+ break;
+ }
+
+ const view = new ExpectedDataConstructor(
+ vertexBufferData[b].buffer,
+ bufferLayout.arrayStride * vertexOrInstanceIndex + attribute.offset,
+ 1
+ );
+ expectedData[outputIndex] = view[0];
+ }
+ });
+ }
+ }
+
+ let wgslFormat: string;
+ switch (t.params.vertex_format) {
+ case 'uint32':
+ wgslFormat = 'u32';
+ break;
+ case 'float32':
+ wgslFormat = 'f32';
+ break;
+ }
+
+ // Maximum inter-stage shader location is 14, and we need to consume one for primitiveId, 12 for
+ // location 0 to 11, and combine the remaining vertex inputs into one location (one
+ // vec4<wgslFormat> when vertex_attribute_count === 16).
+ const interStageScalarShaderLocation = Math.min(shaderLocation, 12);
+ const interStageScalarShaderLocations = new Array(interStageScalarShaderLocation)
+ .fill(0)
+ .map((_, i) => i);
+
+ let accumulateVariableDeclarationsInVertexShader = '';
+ let accumulateVariableAssignmentsInVertexShader = '';
+ let accumulateVariableDeclarationsInFragmentShader = '';
+ let accumulateVariableAssignmentsInFragmentShader = '';
+ // The remaining 3 vertex attributes
+ if (numAttributes === 16) {
+ accumulateVariableDeclarationsInVertexShader = `
+ @location(13) @interpolate(flat) outAttrib13 : vec4<${wgslFormat}>,
+ `;
+ accumulateVariableAssignmentsInVertexShader = `
+ output.outAttrib13 =
+ vec4<${wgslFormat}>(input.attrib12, input.attrib13, input.attrib14, input.attrib15);
+ `;
+ accumulateVariableDeclarationsInFragmentShader = `
+ @location(13) @interpolate(flat) attrib13 : vec4<${wgslFormat}>,
+ `;
+ accumulateVariableAssignmentsInFragmentShader = `
+ outBuffer.primitives[input.primitiveId].attrib12 = input.attrib13.x;
+ outBuffer.primitives[input.primitiveId].attrib13 = input.attrib13.y;
+ outBuffer.primitives[input.primitiveId].attrib14 = input.attrib13.z;
+ outBuffer.primitives[input.primitiveId].attrib15 = input.attrib13.w;
+ `;
+ } else if (numAttributes === 14) {
+ accumulateVariableDeclarationsInVertexShader = `
+ @location(13) @interpolate(flat) outAttrib13 : vec4<${wgslFormat}>,
+ `;
+ accumulateVariableAssignmentsInVertexShader = `
+ output.outAttrib13 =
+ vec4<${wgslFormat}>(input.attrib12, input.attrib13, 0, 0);
+ `;
+ accumulateVariableDeclarationsInFragmentShader = `
+ @location(13) @interpolate(flat) attrib13 : vec4<${wgslFormat}>,
+ `;
+ accumulateVariableAssignmentsInFragmentShader = `
+ outBuffer.primitives[input.primitiveId].attrib12 = input.attrib13.x;
+ outBuffer.primitives[input.primitiveId].attrib13 = input.attrib13.y;
+ `;
+ }
+
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+struct Inputs {
+ @builtin(vertex_index) vertexIndex : u32,
+ @builtin(instance_index) instanceIndex : u32,
+${vertexInputShaderLocations.map(i => ` @location(${i}) attrib${i} : ${wgslFormat},`).join('\n')}
+};
+
+struct Outputs {
+ @builtin(position) Position : vec4<f32>,
+${interStageScalarShaderLocations
+ .map(i => ` @location(${i}) @interpolate(flat) outAttrib${i} : ${wgslFormat},`)
+ .join('\n')}
+ @location(${interStageScalarShaderLocations.length}) @interpolate(flat) primitiveId : u32,
+${accumulateVariableDeclarationsInVertexShader}
+};
+
+@vertex fn main(input : Inputs) -> Outputs {
+ var output : Outputs;
+${interStageScalarShaderLocations.map(i => ` output.outAttrib${i} = input.attrib${i};`).join('\n')}
+${accumulateVariableAssignmentsInVertexShader}
+
+ output.primitiveId = input.instanceIndex * ${instanceCount}u + input.vertexIndex;
+ output.Position = vec4<f32>(0.0, 0.0, 0.5, 1.0);
+ return output;
+}
+ `,
+ }),
+ entryPoint: 'main',
+ buffers: bufferLayouts,
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+struct Inputs {
+${interStageScalarShaderLocations
+ .map(i => ` @location(${i}) @interpolate(flat) attrib${i} : ${wgslFormat},`)
+ .join('\n')}
+ @location(${interStageScalarShaderLocations.length}) @interpolate(flat) primitiveId : u32,
+${accumulateVariableDeclarationsInFragmentShader}
+};
+
+struct OutPrimitive {
+${vertexInputShaderLocations.map(i => ` attrib${i} : ${wgslFormat},`).join('\n')}
+};
+struct OutBuffer {
+ primitives : array<OutPrimitive>
+};
+@group(0) @binding(0) var<storage, read_write> outBuffer : OutBuffer;
+
+@fragment fn main(input : Inputs) {
+${interStageScalarShaderLocations
+ .map(i => ` outBuffer.primitives[input.primitiveId].attrib${i} = input.attrib${i};`)
+ .join('\n')}
+${accumulateVariableAssignmentsInFragmentShader}
+}
+ `,
+ }),
+ entryPoint: 'main',
+ targets: [
+ {
+ format: 'rgba8unorm',
+ writeMask: 0,
+ },
+ ],
+ },
+ primitive: {
+ topology: 'point-list',
+ },
+ });
+
+ const resultBuffer = t.device.createBuffer({
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ size: vertexCount * instanceCount * vertexInputShaderLocations.length * 4,
+ });
+
+ const resultBindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer: resultBuffer,
+ },
+ },
+ ],
+ });
+
+ const commandEncoder = t.device.createCommandEncoder();
+ const renderPass = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ // Dummy render attachment - not used (WebGPU doesn't allow using a render pass with no
+ // attachments)
+ view: t.device
+ .createTexture({
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ size: [1],
+ format: 'rgba8unorm',
+ })
+ .createView(),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+
+ renderPass.setPipeline(pipeline);
+ renderPass.setBindGroup(0, resultBindGroup);
+ for (let i = 0; i < t.params.vertex_buffer_count; ++i) {
+ renderPass.setVertexBuffer(i, vertexBuffers[i]);
+ }
+ renderPass.draw(vertexCount, instanceCount);
+ renderPass.end();
+ t.device.queue.submit([commandEncoder.finish()]);
+
+ t.expectGPUBufferValuesEqual(resultBuffer, expectedData);
+ });
+
+g.test('vertex_attributes,formats')
+ .desc(
+ `Test all vertex formats are fetched correctly.
+
+ Runs a basic vertex shader which loads vertex data from two attributes which
+ may have different formats. Write data out to a storage buffer and check that
+ it was loaded correctly.
+
+ Params:
+ - vertex_format_1={...all_vertex_formats}
+ - vertex_format_2={...all_vertex_formats}
+ `
+ )
+ .unimplemented();
+
+g.test(`largeish_buffer`)
+ .desc(
+ `
+ Test a very large range of buffer is bound.
+ For a render pipeline that use a vertex step mode and a instance step mode vertex buffer, test
+ that :
+ - For draw, drawIndirect, drawIndexed and drawIndexedIndirect:
+ - The bound range of vertex step mode vertex buffer is significantly larger than necessary
+ - The bound range of instance step mode vertex buffer is significantly larger than necessary
+ - A large buffer is bound to an unused slot
+ - For drawIndexed and drawIndexedIndirect:
+ - The bound range of index buffer is significantly larger than necessary
+ - For drawIndirect and drawIndexedIndirect:
+ - The indirect buffer is significantly larger than necessary
+`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/indirect_draw.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/indirect_draw.spec.ts
new file mode 100644
index 0000000000..f5952dabf7
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/indirect_draw.spec.ts
@@ -0,0 +1,242 @@
+export const description = `
+Tests for the indirect-specific aspects of drawIndirect/drawIndexedIndirect.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import {
+ kDrawIndirectParametersSize,
+ kDrawIndexedIndirectParametersSize,
+} from '../../../capability_info.js';
+import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
+
+const filled = new Uint8Array([0, 255, 0, 255]);
+const notFilled = new Uint8Array([0, 0, 0, 0]);
+
+const kRenderTargetFormat = 'rgba8unorm';
+
+class F extends GPUTest {
+ MakeIndexBuffer(): GPUBuffer {
+ return this.makeBufferWithContents(
+ /* prettier-ignore */ new Uint32Array([
+ 0, 1, 2, // The bottom left triangle
+ 1, 2, 3, // The top right triangle
+ ]),
+ GPUBufferUsage.INDEX
+ );
+ }
+
+ MakeVertexBuffer(isIndexed: boolean): GPUBuffer {
+ /* prettier-ignore */
+ const vertices = isIndexed
+ ? [
+ -1.0, -1.0,
+ -1.0, 1.0,
+ 1.0, -1.0,
+ 1.0, 1.0,
+ ]
+ : [
+ // The bottom left triangle
+ -1.0, 1.0,
+ 1.0, -1.0,
+ -1.0, -1.0,
+
+ // The top right triangle
+ -1.0, 1.0,
+ 1.0, -1.0,
+ 1.0, 1.0,
+ ];
+ return this.makeBufferWithContents(new Float32Array(vertices), GPUBufferUsage.VERTEX);
+ }
+
+ MakeIndirectBuffer(isIndexed: boolean, indirectOffset: number): GPUBuffer {
+ const o = indirectOffset / Uint32Array.BYTES_PER_ELEMENT;
+
+ const parametersSize = isIndexed
+ ? kDrawIndexedIndirectParametersSize
+ : kDrawIndirectParametersSize;
+ const arraySize = o + parametersSize * 2;
+
+ const indirectBuffer = [...Array(arraySize)].map(() => Math.floor(Math.random() * 100));
+
+ if (isIndexed) {
+ // draw args that will draw the left bottom triangle (expected call)
+ indirectBuffer[o] = 3; // indexCount
+ indirectBuffer[o + 1] = 1; // instanceCount
+ indirectBuffer[o + 2] = 0; // firstIndex
+ indirectBuffer[o + 3] = 0; // baseVertex
+ indirectBuffer[o + 4] = 0; // firstInstance
+
+ // draw args that will draw both triangles
+ indirectBuffer[o + 5] = 6; // indexCount
+ indirectBuffer[o + 6] = 1; // instanceCount
+ indirectBuffer[o + 7] = 0; // firstIndex
+ indirectBuffer[o + 8] = 0; // baseVertex
+ indirectBuffer[o + 9] = 0; // firstInstance
+
+ if (o >= parametersSize) {
+ // draw args that will draw the right top triangle
+ indirectBuffer[o - 5] = 3; // indexCount
+ indirectBuffer[o - 4] = 1; // instanceCount
+ indirectBuffer[o - 3] = 3; // firstIndex
+ indirectBuffer[o - 2] = 0; // baseVertex
+ indirectBuffer[o - 1] = 0; // firstInstance
+ }
+
+ if (o >= parametersSize * 2) {
+ // draw args that will draw nothing
+ indirectBuffer[0] = 0; // indexCount
+ indirectBuffer[1] = 0; // instanceCount
+ indirectBuffer[2] = 0; // firstIndex
+ indirectBuffer[3] = 0; // baseVertex
+ indirectBuffer[4] = 0; // firstInstance
+ }
+ } else {
+ // draw args that will draw the left bottom triangle (expected call)
+ indirectBuffer[o] = 3; // vertexCount
+ indirectBuffer[o + 1] = 1; // instanceCount
+ indirectBuffer[o + 2] = 0; // firstVertex
+ indirectBuffer[o + 3] = 0; // firstInstance
+
+ // draw args that will draw both triangles
+ indirectBuffer[o + 4] = 6; // vertexCount
+ indirectBuffer[o + 5] = 1; // instanceCount
+ indirectBuffer[o + 6] = 0; // firstVertex
+ indirectBuffer[o + 7] = 0; // firstInstance
+
+ if (o >= parametersSize) {
+ // draw args that will draw the right top triangle
+ indirectBuffer[o - 4] = 3; // vertexCount
+ indirectBuffer[o - 3] = 1; // instanceCount
+ indirectBuffer[o - 2] = 3; // firstVertex
+ indirectBuffer[o - 1] = 0; // firstInstance
+ }
+
+ if (o >= parametersSize * 2) {
+ // draw args that will draw nothing
+ indirectBuffer[0] = 0; // vertexCount
+ indirectBuffer[1] = 0; // instanceCount
+ indirectBuffer[2] = 0; // firstVertex
+ indirectBuffer[3] = 0; // firstInstance
+ }
+ }
+
+ return this.makeBufferWithContents(new Uint32Array(indirectBuffer), GPUBufferUsage.INDIRECT);
+ }
+}
+
+export const g = makeTestGroup(TextureTestMixin(F));
+
+g.test('basics')
+ .desc(
+ `Test that the indirect draw parameters are tightly packed for drawIndirect and drawIndexedIndirect.
+An indirectBuffer is created based on indirectOffset. The actual draw args being used indicated by the
+indirectOffset is going to draw a left bottom triangle.
+While the remaining indirectBuffer is populated with random numbers or draw args
+that draw right top triangle, both, or nothing which will fail the color check.
+The test will check render target to see if only the left bottom area is filled,
+meaning the expected draw args is uploaded correctly by the indirectBuffer and indirectOffset.
+
+Params:
+ - draw{Indirect, IndexedIndirect}
+ - indirectOffset= {0, 4, k * sizeof(args struct), k * sizeof(args struct) + 4}
+ `
+ )
+ .params(u =>
+ u
+ .combine('isIndexed', [true, false])
+ .beginSubcases()
+ .expand('indirectOffset', p => {
+ const indirectDrawParametersSize = p.isIndexed
+ ? kDrawIndexedIndirectParametersSize * Uint32Array.BYTES_PER_ELEMENT
+ : kDrawIndirectParametersSize * Uint32Array.BYTES_PER_ELEMENT;
+ return [
+ 0,
+ Uint32Array.BYTES_PER_ELEMENT,
+ 1 * indirectDrawParametersSize,
+ 1 * indirectDrawParametersSize + Uint32Array.BYTES_PER_ELEMENT,
+ 3 * indirectDrawParametersSize,
+ 3 * indirectDrawParametersSize + Uint32Array.BYTES_PER_ELEMENT,
+ 99 * indirectDrawParametersSize,
+ 99 * indirectDrawParametersSize + Uint32Array.BYTES_PER_ELEMENT,
+ ] as const;
+ })
+ )
+ .fn(t => {
+ const { isIndexed, indirectOffset } = t.params;
+
+ const vertexBuffer = t.MakeVertexBuffer(isIndexed);
+ const indirectBuffer = t.MakeIndirectBuffer(isIndexed, indirectOffset);
+
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `@vertex fn main(@location(0) pos : vec2<f32>) -> @builtin(position) vec4<f32> {
+ return vec4<f32>(pos, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ buffers: [
+ {
+ attributes: [
+ {
+ shaderLocation: 0,
+ format: 'float32x2',
+ offset: 0,
+ },
+ ],
+ arrayStride: 2 * Float32Array.BYTES_PER_ELEMENT,
+ },
+ ],
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `@fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [
+ {
+ format: kRenderTargetFormat,
+ },
+ ],
+ },
+ });
+
+ const renderTarget = t.device.createTexture({
+ size: [4, 4],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ format: kRenderTargetFormat,
+ });
+
+ const commandEncoder = t.device.createCommandEncoder();
+ const renderPass = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPass.setPipeline(pipeline);
+ renderPass.setVertexBuffer(0, vertexBuffer, 0);
+
+ if (isIndexed) {
+ renderPass.setIndexBuffer(t.MakeIndexBuffer(), 'uint32', 0);
+ renderPass.drawIndexedIndirect(indirectBuffer, indirectOffset);
+ } else {
+ renderPass.drawIndirect(indirectBuffer, indirectOffset);
+ }
+ renderPass.end();
+ t.queue.submit([commandEncoder.finish()]);
+
+ t.expectSinglePixelComparisonsAreOkInTexture({ texture: renderTarget }, [
+ // The bottom left area is filled
+ { coord: { x: 0, y: 1 }, exp: filled },
+ // The top right area is not filled
+ { coord: { x: 1, y: 0 }, exp: notFilled },
+ ]);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/robust_access_index.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/robust_access_index.spec.ts
new file mode 100644
index 0000000000..68d7bc795d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/robust_access_index.spec.ts
@@ -0,0 +1,8 @@
+export const description = `
+TODO: Test that drawIndexedIndirect accesses the index buffer robustly.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/stencil.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/stencil.spec.ts
new file mode 100644
index 0000000000..5ce1e32b17
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/rendering/stencil.spec.ts
@@ -0,0 +1,584 @@
+export const description = `
+Test related to stencil states, stencil op, compare func, etc.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { TypedArrayBufferView } from '../../../../common/util/util.js';
+import {
+ kDepthStencilFormats,
+ kTextureFormatInfo,
+ DepthStencilFormat,
+} from '../../../format_info.js';
+import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
+import { TexelView } from '../../../util/texture/texel_view.js';
+
+const kStencilFormats = kDepthStencilFormats.filter(format => kTextureFormatInfo[format].stencil);
+
+const kBaseColor = new Float32Array([1.0, 1.0, 1.0, 1.0]);
+const kRedStencilColor = new Float32Array([1.0, 0.0, 0.0, 1.0]);
+const kGreenStencilColor = new Float32Array([0.0, 1.0, 0.0, 1.0]);
+
+type TestStates = {
+ state: GPUDepthStencilState;
+ color: Float32Array;
+ stencil: number | undefined;
+};
+
+class StencilTest extends TextureTestMixin(GPUTest) {
+ checkStencilOperation(
+ depthStencilFormat: DepthStencilFormat,
+ testStencilState: GPUStencilFaceState,
+ initialStencil: number,
+ _expectedStencil: number,
+ depthCompare: GPUCompareFunction = 'always'
+ ) {
+ const kReferenceStencil = 3;
+
+ const baseStencilState = {
+ compare: 'always',
+ failOp: 'keep',
+ passOp: 'replace',
+ } as const;
+
+ const stencilState = {
+ compare: 'equal',
+ failOp: 'keep',
+ passOp: 'keep',
+ } as const;
+
+ const baseState = {
+ format: depthStencilFormat,
+ depthWriteEnabled: false,
+ depthCompare: 'always',
+ stencilFront: baseStencilState,
+ stencilBack: baseStencilState,
+ } as const;
+
+ const testState = {
+ format: depthStencilFormat,
+ depthWriteEnabled: false,
+ depthCompare,
+ stencilFront: testStencilState,
+ stencilBack: testStencilState,
+ } as const;
+
+ const testState2 = {
+ format: depthStencilFormat,
+ depthWriteEnabled: false,
+ depthCompare: 'always',
+ stencilFront: stencilState,
+ stencilBack: stencilState,
+ } as const;
+
+ const testStates = [
+ // Draw the base triangle with stencil reference 1. This clears the stencil buffer to 1.
+ { state: baseState, color: kBaseColor, stencil: initialStencil },
+ { state: testState, color: kRedStencilColor, stencil: kReferenceStencil },
+ { state: testState2, color: kGreenStencilColor, stencil: _expectedStencil },
+ ];
+ this.runStencilStateTest(depthStencilFormat, testStates, kGreenStencilColor);
+ }
+
+ checkStencilCompareFunction(
+ depthStencilFormat: DepthStencilFormat,
+ compareFunction: GPUCompareFunction,
+ stencilRefValue: number,
+ expectedColor: Float32Array
+ ) {
+ const baseStencilState = {
+ compare: 'always',
+ failOp: 'keep',
+ passOp: 'replace',
+ } as const;
+
+ const stencilState = {
+ compare: compareFunction,
+ failOp: 'keep',
+ passOp: 'keep',
+ } as const;
+
+ const baseState = {
+ format: depthStencilFormat,
+ depthWriteEnabled: false,
+ depthCompare: 'always',
+ stencilFront: baseStencilState,
+ stencilBack: baseStencilState,
+ } as const;
+
+ const testState = {
+ format: depthStencilFormat,
+ depthWriteEnabled: false,
+ depthCompare: 'always',
+ stencilFront: stencilState,
+ stencilBack: stencilState,
+ } as const;
+
+ const testStates = [
+ // Draw the base triangle with stencil reference 1. This clears the stencil buffer to 1.
+ { state: baseState, color: kBaseColor, stencil: 1 },
+ { state: testState, color: kGreenStencilColor, stencil: stencilRefValue },
+ ];
+ this.runStencilStateTest(depthStencilFormat, testStates, expectedColor);
+ }
+
+ runStencilStateTest(
+ depthStencilFormat: DepthStencilFormat,
+ testStates: TestStates[],
+ expectedColor: Float32Array,
+ isSingleEncoderMultiplePass: boolean = false
+ ) {
+ const renderTargetFormat = 'rgba8unorm';
+ const renderTarget = this.trackForCleanup(
+ this.device.createTexture({
+ format: renderTargetFormat,
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ })
+ );
+
+ const depthTexture = this.trackForCleanup(
+ this.device.createTexture({
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ format: depthStencilFormat,
+ sampleCount: 1,
+ mipLevelCount: 1,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_DST,
+ })
+ );
+
+ const hasDepth = kTextureFormatInfo[depthStencilFormat].depth;
+ const depthStencilAttachment: GPURenderPassDepthStencilAttachment = {
+ view: depthTexture.createView(),
+ depthLoadOp: hasDepth ? 'load' : undefined,
+ depthStoreOp: hasDepth ? 'store' : undefined,
+ stencilLoadOp: 'load',
+ stencilStoreOp: 'store',
+ };
+
+ const encoder = this.device.createCommandEncoder();
+ let pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ storeOp: 'store',
+ loadOp: 'load',
+ },
+ ],
+ depthStencilAttachment,
+ });
+
+ if (isSingleEncoderMultiplePass) {
+ pass.end();
+ }
+
+ // Draw a triangle with the given stencil reference and the comparison function.
+ // The color will be kGreenStencilColor if the stencil test passes, and kBaseColor if not.
+ for (const test of testStates) {
+ if (isSingleEncoderMultiplePass) {
+ pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ storeOp: 'store',
+ loadOp: 'load',
+ },
+ ],
+ depthStencilAttachment,
+ });
+ }
+ const testPipeline = this.createRenderPipelineForTest(test.state);
+ pass.setPipeline(testPipeline);
+ if (test.stencil !== undefined) {
+ pass.setStencilReference(test.stencil);
+ }
+ pass.setBindGroup(
+ 0,
+ this.createBindGroupForTest(testPipeline.getBindGroupLayout(0), test.color)
+ );
+ pass.draw(1);
+
+ if (isSingleEncoderMultiplePass) {
+ pass.end();
+ }
+ }
+
+ if (!isSingleEncoderMultiplePass) {
+ pass.end();
+ }
+ this.device.queue.submit([encoder.finish()]);
+
+ const expColor = {
+ R: expectedColor[0],
+ G: expectedColor[1],
+ B: expectedColor[2],
+ A: expectedColor[3],
+ };
+ const expTexelView = TexelView.fromTexelsAsColors(renderTargetFormat, _coords => expColor);
+ this.expectTexelViewComparisonIsOkInTexture({ texture: renderTarget }, expTexelView, [1, 1]);
+ }
+
+ createRenderPipelineForTest(depthStencil: GPUDepthStencilState): GPURenderPipeline {
+ return this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: this.device.createShaderModule({
+ code: `
+ @vertex
+ fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ targets: [{ format: 'rgba8unorm' }],
+ module: this.device.createShaderModule({
+ code: `
+ struct Params {
+ color : vec4<f32>
+ }
+ @group(0) @binding(0) var<uniform> params : Params;
+
+ @fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(params.color);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ primitive: { topology: 'point-list' },
+ depthStencil,
+ });
+ }
+
+ createBindGroupForTest(layout: GPUBindGroupLayout, data: TypedArrayBufferView): GPUBindGroup {
+ return this.device.createBindGroup({
+ layout,
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer: this.makeBufferWithContents(data, GPUBufferUsage.UNIFORM),
+ },
+ },
+ ],
+ });
+ }
+}
+
+export const g = makeTestGroup(StencilTest);
+
+g.test('stencil_compare_func')
+ .desc(
+ `
+ Tests that stencil comparison functions with the stencil reference value works as expected.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('format', kStencilFormats)
+ .combineWithParams([
+ { stencilCompare: 'always', stencilRefValue: 0, _expectedColor: kGreenStencilColor },
+ { stencilCompare: 'always', stencilRefValue: 1, _expectedColor: kGreenStencilColor },
+ { stencilCompare: 'always', stencilRefValue: 2, _expectedColor: kGreenStencilColor },
+ { stencilCompare: 'equal', stencilRefValue: 0, _expectedColor: kBaseColor },
+ { stencilCompare: 'equal', stencilRefValue: 1, _expectedColor: kGreenStencilColor },
+ { stencilCompare: 'equal', stencilRefValue: 2, _expectedColor: kBaseColor },
+ { stencilCompare: 'greater', stencilRefValue: 0, _expectedColor: kBaseColor },
+ { stencilCompare: 'greater', stencilRefValue: 1, _expectedColor: kBaseColor },
+ { stencilCompare: 'greater', stencilRefValue: 2, _expectedColor: kGreenStencilColor },
+ { stencilCompare: 'greater-equal', stencilRefValue: 0, _expectedColor: kBaseColor },
+ { stencilCompare: 'greater-equal', stencilRefValue: 1, _expectedColor: kGreenStencilColor },
+ { stencilCompare: 'greater-equal', stencilRefValue: 2, _expectedColor: kGreenStencilColor },
+ { stencilCompare: 'less', stencilRefValue: 0, _expectedColor: kGreenStencilColor },
+ { stencilCompare: 'less', stencilRefValue: 1, _expectedColor: kBaseColor },
+ { stencilCompare: 'less', stencilRefValue: 2, _expectedColor: kBaseColor },
+ { stencilCompare: 'less-equal', stencilRefValue: 0, _expectedColor: kGreenStencilColor },
+ { stencilCompare: 'less-equal', stencilRefValue: 1, _expectedColor: kGreenStencilColor },
+ { stencilCompare: 'less-equal', stencilRefValue: 2, _expectedColor: kBaseColor },
+ { stencilCompare: 'never', stencilRefValue: 0, _expectedColor: kBaseColor },
+ { stencilCompare: 'never', stencilRefValue: 1, _expectedColor: kBaseColor },
+ { stencilCompare: 'never', stencilRefValue: 2, _expectedColor: kBaseColor },
+ { stencilCompare: 'not-equal', stencilRefValue: 0, _expectedColor: kGreenStencilColor },
+ { stencilCompare: 'not-equal', stencilRefValue: 1, _expectedColor: kBaseColor },
+ { stencilCompare: 'not-equal', stencilRefValue: 2, _expectedColor: kGreenStencilColor },
+ ] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceForTextureFormatOrSkipTestCase(t.params.format);
+ })
+ .fn(t => {
+ const { format, stencilCompare, stencilRefValue, _expectedColor } = t.params;
+
+ t.checkStencilCompareFunction(format, stencilCompare, stencilRefValue, _expectedColor);
+ });
+
+g.test('stencil_passOp_operation')
+ .desc(
+ `
+ Test that the stencil operation is executed on stencil pass. A triangle is drawn with the 'always'
+ comparison function, so it should pass. Then, test that each pass stencil operation works with the
+ given stencil values correctly as expected. For example,
+ - If the pass operation is 'keep', it keeps the initial stencil value.
+ - If the pass operation is 'replace', it replaces the initial stencil value with the reference
+ stencil value.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('format', kStencilFormats)
+ .combineWithParams([
+ { passOp: 'keep', initialStencil: 1, _expectedStencil: 1 },
+ { passOp: 'zero', initialStencil: 1, _expectedStencil: 0 },
+ { passOp: 'replace', initialStencil: 1, _expectedStencil: 3 },
+ { passOp: 'invert', initialStencil: 0xf0, _expectedStencil: 0x0f },
+ { passOp: 'increment-clamp', initialStencil: 1, _expectedStencil: 2 },
+ { passOp: 'increment-clamp', initialStencil: 0xff, _expectedStencil: 0xff },
+ { passOp: 'increment-wrap', initialStencil: 1, _expectedStencil: 2 },
+ { passOp: 'increment-wrap', initialStencil: 0xff, _expectedStencil: 0 },
+ { passOp: 'decrement-clamp', initialStencil: 1, _expectedStencil: 0 },
+ { passOp: 'decrement-clamp', initialStencil: 0, _expectedStencil: 0 },
+ { passOp: 'decrement-wrap', initialStencil: 1, _expectedStencil: 0 },
+ { passOp: 'decrement-wrap', initialStencil: 0, _expectedStencil: 0xff },
+ ] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceForTextureFormatOrSkipTestCase(t.params.format);
+ })
+ .fn(t => {
+ const { format, passOp, initialStencil, _expectedStencil } = t.params;
+
+ const stencilState = {
+ compare: 'always',
+ failOp: 'keep',
+ passOp,
+ } as const;
+
+ t.checkStencilOperation(format, stencilState, initialStencil, _expectedStencil);
+ });
+
+g.test('stencil_failOp_operation')
+ .desc(
+ `
+ Test that the stencil operation is executed on stencil fail. A triangle is drawn with the 'never'
+ comparison function, so it should fail. Then, test that each fail stencil operation works with the
+ given stencil values correctly as expected. For example,
+ - If the fail operation is 'keep', it keeps the initial stencil value.
+ - If the fail operation is 'replace', it replaces the initial stencil value with the reference
+ stencil value.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('format', kStencilFormats)
+ .combineWithParams([
+ { failOp: 'keep', initialStencil: 1, _expectedStencil: 1 },
+ { failOp: 'zero', initialStencil: 1, _expectedStencil: 0 },
+ { failOp: 'replace', initialStencil: 1, _expectedStencil: 3 },
+ { failOp: 'invert', initialStencil: 0xf0, _expectedStencil: 0x0f },
+ { failOp: 'increment-clamp', initialStencil: 1, _expectedStencil: 2 },
+ { failOp: 'increment-clamp', initialStencil: 0xff, _expectedStencil: 0xff },
+ { failOp: 'increment-wrap', initialStencil: 1, _expectedStencil: 2 },
+ { failOp: 'increment-wrap', initialStencil: 0xff, _expectedStencil: 0 },
+ { failOp: 'decrement-clamp', initialStencil: 1, _expectedStencil: 0 },
+ { failOp: 'decrement-clamp', initialStencil: 0, _expectedStencil: 0 },
+ { failOp: 'decrement-wrap', initialStencil: 2, _expectedStencil: 1 },
+ { failOp: 'decrement-wrap', initialStencil: 1, _expectedStencil: 0 },
+ { failOp: 'decrement-wrap', initialStencil: 0, _expectedStencil: 0xff },
+ ] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceForTextureFormatOrSkipTestCase(t.params.format);
+ })
+ .fn(t => {
+ const { format, failOp, initialStencil, _expectedStencil } = t.params;
+
+ const stencilState = {
+ compare: 'never',
+ failOp,
+ passOp: 'keep',
+ } as const;
+
+ // Draw the base triangle with stencil reference 1. This clears the stencil buffer to 1.
+ // Always fails because the comparison never passes. Therefore red is never drawn, and the
+ // stencil contents may be updated according to `operation`.
+ t.checkStencilOperation(format, stencilState, initialStencil, _expectedStencil);
+ });
+
+g.test('stencil_depthFailOp_operation')
+ .desc(
+ `
+ Test that the stencil operation is executed on depthCompare fail. A triangle is drawn with the
+ 'never' depthCompare, so it should fail the depth test. Then, test that each 'depthFailOp' stencil operation
+ works with the given stencil values correctly as expected. For example,
+ - If the depthFailOp operation is 'keep', it keeps the initial stencil value.
+ - If the depthFailOp operation is 'replace', it replaces the initial stencil value with the
+ reference stencil value.
+ `
+ )
+ .params(u =>
+ u //
+ .combine(
+ 'format',
+ kDepthStencilFormats.filter(format => {
+ const info = kTextureFormatInfo[format];
+ return info.depth && info.stencil;
+ })
+ )
+ .combineWithParams([
+ { depthFailOp: 'keep', initialStencil: 1, _expectedStencil: 1 },
+ { depthFailOp: 'zero', initialStencil: 1, _expectedStencil: 0 },
+ { depthFailOp: 'replace', initialStencil: 1, _expectedStencil: 3 },
+ { depthFailOp: 'invert', initialStencil: 0xf0, _expectedStencil: 0x0f },
+ { depthFailOp: 'increment-clamp', initialStencil: 1, _expectedStencil: 2 },
+ { depthFailOp: 'increment-clamp', initialStencil: 0xff, _expectedStencil: 0xff },
+ { depthFailOp: 'increment-wrap', initialStencil: 1, _expectedStencil: 2 },
+ { depthFailOp: 'increment-wrap', initialStencil: 0xff, _expectedStencil: 0 },
+ { depthFailOp: 'decrement-clamp', initialStencil: 1, _expectedStencil: 0 },
+ { depthFailOp: 'decrement-clamp', initialStencil: 0, _expectedStencil: 0 },
+ { depthFailOp: 'decrement-wrap', initialStencil: 2, _expectedStencil: 1 },
+ { depthFailOp: 'decrement-wrap', initialStencil: 1, _expectedStencil: 0 },
+ { depthFailOp: 'decrement-wrap', initialStencil: 0, _expectedStencil: 0xff },
+ ] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceForTextureFormatOrSkipTestCase(t.params.format);
+ })
+ .fn(t => {
+ const { format, depthFailOp, initialStencil, _expectedStencil } = t.params;
+
+ const stencilState = {
+ compare: 'always',
+ failOp: 'keep',
+ passOp: 'keep',
+ depthFailOp,
+ } as const;
+
+ // Call checkStencilOperation function with enabling the depthTest to test that the depthFailOp
+ // stencil operation works as expected.
+ t.checkStencilOperation(format, stencilState, initialStencil, _expectedStencil, 'never');
+ });
+
+g.test('stencil_read_write_mask')
+ .desc(
+ `
+ Tests that setting a stencil read/write masks work. Basically, The base triangle sets 3 to the
+ stencil, and then try to draw a triangle with different stencil values.
+ - In case that 'write' mask is 1,
+ * If the stencil of the triangle is 1, it draws because
+ 'base stencil(3) & write mask(1) == triangle stencil(1)'.
+ * If the stencil of the triangle is 2, it does not draw because
+ 'base stencil(3) & write mask(1) != triangle stencil(2)'.
+
+ - In case that 'read' mask is 2,
+ * If the stencil of the triangle is 1, it does not draw because
+ 'base stencil(3) & read mask(2) != triangle stencil(1)'.
+ * If the stencil of the triangle is 2, it draws because
+ 'base stencil(3) & read mask(2) == triangle stencil(2)'.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('format', kStencilFormats)
+ .combineWithParams([
+ { maskType: 'write', stencilRefValue: 1, _expectedColor: kRedStencilColor },
+ { maskType: 'write', stencilRefValue: 2, _expectedColor: kBaseColor },
+ { maskType: 'read', stencilRefValue: 1, _expectedColor: kBaseColor },
+ { maskType: 'read', stencilRefValue: 2, _expectedColor: kRedStencilColor },
+ ])
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceForTextureFormatOrSkipTestCase(t.params.format);
+ })
+ .fn(t => {
+ const { format, maskType, stencilRefValue, _expectedColor } = t.params;
+
+ const baseStencilState = {
+ compare: 'always',
+ failOp: 'keep',
+ passOp: 'replace',
+ } as const;
+
+ const stencilState = {
+ compare: 'equal',
+ failOp: 'keep',
+ passOp: 'keep',
+ } as const;
+
+ const baseState = {
+ format,
+ depthWriteEnabled: false,
+ depthCompare: 'always',
+ stencilFront: baseStencilState,
+ stencilBack: baseStencilState,
+ stencilReadMask: 0xff,
+ stencilWriteMask: maskType === 'write' ? 0x1 : 0xff,
+ } as const;
+
+ const testState = {
+ format,
+ depthWriteEnabled: false,
+ depthCompare: 'always',
+ stencilFront: stencilState,
+ stencilBack: stencilState,
+ stencilReadMask: maskType === 'read' ? 0x2 : 0xff,
+ stencilWriteMask: 0xff,
+ } as const;
+
+ const testStates = [
+ // Draw the base triangle with stencil reference 3. This clears the stencil buffer to 3.
+ { state: baseState, color: kBaseColor, stencil: 3 },
+ { state: testState, color: kRedStencilColor, stencil: stencilRefValue },
+ ];
+
+ t.runStencilStateTest(format, testStates, _expectedColor);
+ });
+
+g.test('stencil_reference_initialized')
+ .desc('Test that stencil reference is initialized as zero for new render pass.')
+ .params(u => u.combine('format', kStencilFormats))
+ .beforeAllSubcases(t => {
+ t.selectDeviceForTextureFormatOrSkipTestCase(t.params.format);
+ })
+ .fn(t => {
+ const { format } = t.params;
+
+ const baseStencilState = {
+ compare: 'always',
+ passOp: 'replace',
+ } as const;
+
+ const testStencilState = {
+ compare: 'equal',
+ passOp: 'keep',
+ } as const;
+
+ const hasDepth = !!kTextureFormatInfo[format].depth;
+
+ const baseState = {
+ format,
+ depthWriteEnabled: hasDepth,
+ depthCompare: 'always',
+ stencilFront: baseStencilState,
+ stencilBack: baseStencilState,
+ } as const;
+
+ const testState = {
+ format,
+ depthWriteEnabled: hasDepth,
+ depthCompare: 'always',
+ stencilFront: testStencilState,
+ stencilBack: testStencilState,
+ } as const;
+
+ // First pass sets the stencil to 0x1, the second pass sets the stencil to its default
+ // value, and the third pass tests if the stencil is zero.
+ const testStates = [
+ { state: baseState, color: kBaseColor, stencil: 0x1 },
+ { state: baseState, color: kRedStencilColor, stencil: undefined },
+ { state: testState, color: kGreenStencilColor, stencil: 0x0 },
+ ];
+
+ // The third draw should pass the stencil test since the second pass set it to default zero.
+ t.runStencilStateTest(format, testStates, kGreenStencilColor, true);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/buffer.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/buffer.spec.ts
new file mode 100644
index 0000000000..973340e3bf
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/buffer.spec.ts
@@ -0,0 +1,899 @@
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { unreachable } from '../../../../common/util/util.js';
+import { GPUConst } from '../../../constants.js';
+import { GPUTest } from '../../../gpu_test.js';
+import { getTextureCopyLayout } from '../../../util/texture/layout.js';
+import { PerTexelComponent } from '../../../util/texture/texel_data.js';
+
+export const description = `
+Test uninitialized buffers are initialized to zero when read
+(or read-written, e.g. with depth write or atomics).
+
+Note that:
+- We don't need 'copy_buffer_to_buffer_copy_destination' here because there has already been an
+ operation test 'command_buffer.copyBufferToBuffer.single' that provides the same functionality.
+`;
+
+const kMapModeOptions = [GPUConst.MapMode.READ, GPUConst.MapMode.WRITE];
+const kBufferUsagesForMappedAtCreationTests = [
+ GPUConst.BufferUsage.COPY_DST | GPUConst.BufferUsage.MAP_READ,
+ GPUConst.BufferUsage.COPY_SRC | GPUConst.BufferUsage.MAP_WRITE,
+ GPUConst.BufferUsage.COPY_SRC,
+];
+
+class F extends GPUTest {
+ GetBufferUsageFromMapMode(mapMode: GPUMapModeFlags): number {
+ switch (mapMode) {
+ case GPUMapMode.READ:
+ return GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ;
+ case GPUMapMode.WRITE:
+ return GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE;
+ default:
+ unreachable();
+ return 0;
+ }
+ }
+
+ CheckGPUBufferContent(
+ buffer: GPUBuffer,
+ bufferUsage: GPUBufferUsageFlags,
+ expectedData: Uint8Array
+ ): void {
+ const mappable = bufferUsage & GPUBufferUsage.MAP_READ;
+ this.expectGPUBufferValuesEqual(buffer, expectedData, 0, { method: mappable ? 'map' : 'copy' });
+ }
+
+ TestBufferZeroInitInBindGroup(
+ computeShaderModule: GPUShaderModule,
+ buffer: GPUBuffer,
+ bufferOffset: number,
+ boundBufferSize: number
+ ): void {
+ const computePipeline = this.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: computeShaderModule,
+ entryPoint: 'main',
+ },
+ });
+ const outputTexture = this.device.createTexture({
+ format: 'rgba8unorm',
+ size: [1, 1, 1],
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.STORAGE_BINDING,
+ });
+ this.trackForCleanup(outputTexture);
+ const bindGroup = this.device.createBindGroup({
+ layout: computePipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer,
+ offset: bufferOffset,
+ size: boundBufferSize,
+ },
+ },
+ {
+ binding: 1,
+ resource: outputTexture.createView(),
+ },
+ ],
+ });
+
+ const encoder = this.device.createCommandEncoder();
+ const computePass = encoder.beginComputePass();
+ computePass.setBindGroup(0, bindGroup);
+ computePass.setPipeline(computePipeline);
+ computePass.dispatchWorkgroups(1);
+ computePass.end();
+ this.queue.submit([encoder.finish()]);
+
+ this.CheckBufferAndOutputTexture(buffer, boundBufferSize + bufferOffset, outputTexture);
+ }
+
+ CreateRenderPipelineForTest(
+ vertexShaderModule: GPUShaderModule,
+ testVertexBuffer: boolean
+ ): GPURenderPipeline {
+ const renderPipelineDescriptor: GPURenderPipelineDescriptor = {
+ layout: 'auto',
+ vertex: {
+ module: vertexShaderModule,
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: this.device.createShaderModule({
+ code: `
+ @fragment
+ fn main(@location(0) i_color : vec4<f32>) -> @location(0) vec4<f32> {
+ return i_color;
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ primitive: {
+ topology: 'point-list',
+ },
+ };
+ if (testVertexBuffer) {
+ renderPipelineDescriptor.vertex.buffers = [
+ {
+ arrayStride: 16,
+ attributes: [{ format: 'float32x4', offset: 0, shaderLocation: 0 }],
+ },
+ ];
+ }
+
+ return this.device.createRenderPipeline(renderPipelineDescriptor);
+ }
+
+ RecordInitializeTextureColor(
+ encoder: GPUCommandEncoder,
+ texture: GPUTexture,
+ color: GPUColor
+ ): void {
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: texture.createView(),
+ clearValue: color,
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPass.end();
+ }
+
+ CheckBufferAndOutputTexture(
+ buffer: GPUBuffer,
+ bufferSize: number,
+ outputTexture: GPUTexture,
+ outputTextureSize: [number, number, number] = [1, 1, 1],
+ outputTextureColor: PerTexelComponent<number> = { R: 0.0, G: 1.0, B: 0.0, A: 1.0 }
+ ): void {
+ this.expectSingleColor(outputTexture, 'rgba8unorm', {
+ size: outputTextureSize,
+ exp: outputTextureColor,
+ });
+
+ const expectedBufferData = new Uint8Array(bufferSize);
+ this.expectGPUBufferValuesEqual(buffer, expectedBufferData);
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('partial_write_buffer')
+ .desc(
+ `Verify when we upload data to a part of a buffer with writeBuffer() just after the creation of
+the buffer, the remaining part of that buffer will be initialized to 0.`
+ )
+ .paramsSubcasesOnly(u => u.combine('offset', [0, 8, -12]))
+ .fn(t => {
+ const { offset } = t.params;
+ const bufferSize = 32;
+ const appliedOffset = offset >= 0 ? offset : bufferSize + offset;
+
+ const buffer = t.device.createBuffer({
+ size: bufferSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+ t.trackForCleanup(buffer);
+
+ const copySize = 12;
+ const writeData = new Uint8Array(copySize);
+ const expectedData = new Uint8Array(bufferSize);
+ for (let i = 0; i < copySize; ++i) {
+ expectedData[appliedOffset + i] = writeData[i] = i + 1;
+ }
+ t.queue.writeBuffer(buffer, appliedOffset, writeData, 0);
+
+ t.expectGPUBufferValuesEqual(buffer, expectedData);
+ });
+
+g.test('map_whole_buffer')
+ .desc(
+ `Verify when we map the whole range of a mappable GPUBuffer to a typed array buffer just after
+creating the GPUBuffer, the contents of both the typed array buffer and the GPUBuffer itself
+have already been initialized to 0.`
+ )
+ .params(u => u.combine('mapMode', kMapModeOptions))
+ .fn(async t => {
+ const { mapMode } = t.params;
+
+ const bufferSize = 32;
+ const bufferUsage = t.GetBufferUsageFromMapMode(mapMode);
+ const buffer = t.device.createBuffer({
+ size: bufferSize,
+ usage: bufferUsage,
+ });
+ t.trackForCleanup(buffer);
+
+ await buffer.mapAsync(mapMode);
+ const readData = new Uint8Array(buffer.getMappedRange());
+ for (let i = 0; i < bufferSize; ++i) {
+ t.expect(readData[i] === 0);
+ }
+ buffer.unmap();
+
+ const expectedData = new Uint8Array(bufferSize);
+ t.CheckGPUBufferContent(buffer, bufferUsage, expectedData);
+ });
+
+g.test('map_partial_buffer')
+ .desc(
+ `Verify when we map a subrange of a mappable GPUBuffer to a typed array buffer just after the
+creation of the GPUBuffer, the contents of both the typed array buffer and the GPUBuffer have
+already been initialized to 0.`
+ )
+ .params(u => u.combine('mapMode', kMapModeOptions).beginSubcases().combine('offset', [0, 8, -16]))
+ .fn(async t => {
+ const { mapMode, offset } = t.params;
+ const bufferSize = 32;
+ const appliedOffset = offset >= 0 ? offset : bufferSize + offset;
+
+ const bufferUsage = t.GetBufferUsageFromMapMode(mapMode);
+ const buffer = t.device.createBuffer({
+ size: bufferSize,
+ usage: bufferUsage,
+ });
+ t.trackForCleanup(buffer);
+
+ const expectedData = new Uint8Array(bufferSize);
+ {
+ const mapSize = 16;
+ await buffer.mapAsync(mapMode, appliedOffset, mapSize);
+ const mappedData = new Uint8Array(buffer.getMappedRange(appliedOffset, mapSize));
+ for (let i = 0; i < mapSize; ++i) {
+ t.expect(mappedData[i] === 0);
+ if (mapMode === GPUMapMode.WRITE) {
+ mappedData[i] = expectedData[appliedOffset + i] = i + 1;
+ }
+ }
+ buffer.unmap();
+ }
+
+ t.CheckGPUBufferContent(buffer, bufferUsage, expectedData);
+ });
+
+g.test('mapped_at_creation_whole_buffer')
+ .desc(
+ `Verify when we call getMappedRange() at the whole range of a GPUBuffer created with
+mappedAtCreation === true just after its creation, the contents of both the returned typed
+array buffer of getMappedRange() and the GPUBuffer itself have all been initialized to 0.`
+ )
+ .params(u => u.combine('bufferUsage', kBufferUsagesForMappedAtCreationTests))
+ .fn(t => {
+ const { bufferUsage } = t.params;
+
+ const bufferSize = 32;
+ const buffer = t.device.createBuffer({
+ mappedAtCreation: true,
+ size: bufferSize,
+ usage: bufferUsage,
+ });
+ t.trackForCleanup(buffer);
+
+ const mapped = new Uint8Array(buffer.getMappedRange());
+ for (let i = 0; i < bufferSize; ++i) {
+ t.expect(mapped[i] === 0);
+ }
+ buffer.unmap();
+
+ const expectedData = new Uint8Array(bufferSize);
+ t.CheckGPUBufferContent(buffer, bufferUsage, expectedData);
+ });
+
+g.test('mapped_at_creation_partial_buffer')
+ .desc(
+ `Verify when we call getMappedRange() at a subrange of a GPUBuffer created with
+mappedAtCreation === true just after its creation, the contents of both the returned typed
+array buffer of getMappedRange() and the GPUBuffer itself have all been initialized to 0.`
+ )
+ .params(u =>
+ u
+ .combine('bufferUsage', kBufferUsagesForMappedAtCreationTests)
+ .beginSubcases()
+ .combine('offset', [0, 8, -16])
+ )
+ .fn(t => {
+ const { bufferUsage, offset } = t.params;
+ const bufferSize = 32;
+ const appliedOffset = offset >= 0 ? offset : bufferSize + offset;
+
+ const buffer = t.device.createBuffer({
+ mappedAtCreation: true,
+ size: bufferSize,
+ usage: bufferUsage,
+ });
+ t.trackForCleanup(buffer);
+
+ const expectedData = new Uint8Array(bufferSize);
+ {
+ const mappedSize = 12;
+ const mapped = new Uint8Array(buffer.getMappedRange(appliedOffset, mappedSize));
+ for (let i = 0; i < mappedSize; ++i) {
+ t.expect(mapped[i] === 0);
+ if (!(bufferUsage & GPUBufferUsage.MAP_READ)) {
+ mapped[i] = expectedData[appliedOffset + i] = i + 1;
+ }
+ }
+ buffer.unmap();
+ }
+
+ t.CheckGPUBufferContent(buffer, bufferUsage, expectedData);
+ });
+
+g.test('copy_buffer_to_buffer_copy_source')
+ .desc(
+ `Verify when the first usage of a GPUBuffer is being used as the source buffer of
+CopyBufferToBuffer(), the contents of the GPUBuffer have already been initialized to 0.`
+ )
+ .fn(t => {
+ const bufferSize = 32;
+ const bufferUsage = GPUBufferUsage.COPY_SRC;
+ const buffer = t.device.createBuffer({
+ size: bufferSize,
+ usage: bufferUsage,
+ });
+ t.trackForCleanup(buffer);
+
+ const expectedData = new Uint8Array(bufferSize);
+ // copyBufferToBuffer() is called inside t.CheckGPUBufferContent().
+ t.CheckGPUBufferContent(buffer, bufferUsage, expectedData);
+ });
+
+g.test('copy_buffer_to_texture')
+ .desc(
+ `Verify when the first usage of a GPUBuffer is being used as the source buffer of
+CopyBufferToTexture(), the contents of the GPUBuffer have already been initialized to 0.`
+ )
+ .paramsSubcasesOnly(u => u.combine('bufferOffset', [0, 8]))
+ .fn(t => {
+ const { bufferOffset } = t.params;
+ const textureSize: [number, number, number] = [8, 8, 1];
+ const dstTextureFormat = 'rgba8unorm';
+
+ const dstTexture = t.device.createTexture({
+ size: textureSize,
+ format: dstTextureFormat,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+ t.trackForCleanup(dstTexture);
+ const layout = getTextureCopyLayout(dstTextureFormat, '2d', textureSize);
+ const srcBufferSize = layout.byteLength + bufferOffset;
+ const srcBufferUsage = GPUBufferUsage.COPY_SRC;
+ const srcBuffer = t.device.createBuffer({
+ size: srcBufferSize,
+ usage: srcBufferUsage,
+ });
+ t.trackForCleanup(srcBuffer);
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.copyBufferToTexture(
+ {
+ buffer: srcBuffer,
+ offset: bufferOffset,
+ bytesPerRow: layout.bytesPerRow,
+ rowsPerImage: layout.rowsPerImage,
+ },
+ { texture: dstTexture },
+ textureSize
+ );
+ t.queue.submit([encoder.finish()]);
+
+ t.CheckBufferAndOutputTexture(srcBuffer, srcBufferSize, dstTexture, textureSize, {
+ R: 0.0,
+ G: 0.0,
+ B: 0.0,
+ A: 0.0,
+ });
+ });
+
+g.test('resolve_query_set_to_partial_buffer')
+ .desc(
+ `Verify when we resolve a query set into a GPUBuffer just after creating that GPUBuffer, the
+remaining part of it will be initialized to 0.`
+ )
+ .paramsSubcasesOnly(u => u.combine('bufferOffset', [0, 256]))
+ .fn(t => {
+ const { bufferOffset } = t.params;
+ const bufferSize = bufferOffset + 8;
+ const bufferUsage = GPUBufferUsage.COPY_SRC | GPUBufferUsage.QUERY_RESOLVE;
+ const dstBuffer = t.device.createBuffer({
+ size: bufferSize,
+ usage: bufferUsage,
+ });
+ t.trackForCleanup(dstBuffer);
+
+ const querySet = t.device.createQuerySet({ type: 'occlusion', count: 1 });
+ const encoder = t.device.createCommandEncoder();
+ encoder.resolveQuerySet(querySet, 0, 1, dstBuffer, bufferOffset);
+ t.queue.submit([encoder.finish()]);
+
+ const expectedBufferData = new Uint8Array(bufferSize);
+ t.CheckGPUBufferContent(dstBuffer, bufferUsage, expectedBufferData);
+ });
+
+g.test('copy_texture_to_partial_buffer')
+ .desc(
+ `Verify when we copy from a GPUTexture into a GPUBuffer just after creating that GPUBuffer, the
+remaining part of it will be initialized to 0.`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('bufferOffset', [0, 8, -16])
+ .combine('arrayLayerCount', [1, 3])
+ .combine('copyMipLevel', [0, 2])
+ .combine('rowsPerImage', [16, 20])
+ .filter(t => {
+ // We don't need to test the copies that will cover the whole GPUBuffer.
+ return !(t.bufferOffset === 0 && t.rowsPerImage === 16);
+ })
+ )
+ .fn(t => {
+ const { bufferOffset, arrayLayerCount, copyMipLevel, rowsPerImage } = t.params;
+ const srcTextureFormat = 'r8uint';
+ const textureSize = [32, 16, arrayLayerCount] as const;
+
+ const srcTexture = t.device.createTexture({
+ format: srcTextureFormat,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ size: textureSize,
+ mipLevelCount: copyMipLevel + 1,
+ });
+ t.trackForCleanup(srcTexture);
+
+ const bytesPerRow = 256;
+ const layout = getTextureCopyLayout(srcTextureFormat, '2d', textureSize, {
+ mipLevel: copyMipLevel,
+ bytesPerRow,
+ rowsPerImage,
+ });
+
+ const dstBufferSize = layout.byteLength + Math.abs(bufferOffset);
+ const dstBuffer = t.device.createBuffer({
+ size: dstBufferSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+ t.trackForCleanup(dstBuffer);
+
+ const encoder = t.device.createCommandEncoder();
+
+ // Initialize srcTexture
+ for (let layer = 0; layer < arrayLayerCount; ++layer) {
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: srcTexture.createView({
+ baseArrayLayer: layer,
+ arrayLayerCount: 1,
+ baseMipLevel: copyMipLevel,
+ }),
+ clearValue: { r: layer + 1, g: 0, b: 0, a: 0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPass.end();
+ }
+
+ // Do texture-to-buffer copy
+ const appliedOffset = Math.max(bufferOffset, 0);
+ encoder.copyTextureToBuffer(
+ { texture: srcTexture, mipLevel: copyMipLevel },
+ { buffer: dstBuffer, offset: appliedOffset, bytesPerRow, rowsPerImage },
+ layout.mipSize
+ );
+ t.queue.submit([encoder.finish()]);
+
+ // Check if the contents of the destination buffer are what we expect.
+ const expectedData = new Uint8Array(dstBufferSize);
+ for (let layer = 0; layer < arrayLayerCount; ++layer) {
+ for (let y = 0; y < layout.mipSize[1]; ++y) {
+ for (let x = 0; x < layout.mipSize[0]; ++x) {
+ expectedData[appliedOffset + layer * bytesPerRow * rowsPerImage + y * bytesPerRow + x] =
+ layer + 1;
+ }
+ }
+ }
+ t.expectGPUBufferValuesEqual(dstBuffer, expectedData);
+ });
+
+g.test('uniform_buffer')
+ .desc(
+ `Verify when we use a GPUBuffer as a uniform buffer just after the creation of that GPUBuffer,
+ all the contents in that GPUBuffer have been initialized to 0.`
+ )
+ .paramsSubcasesOnly(u => u.combine('bufferOffset', [0, 256]))
+ .fn(t => {
+ const { bufferOffset } = t.params;
+
+ const boundBufferSize = 16;
+ const buffer = t.device.createBuffer({
+ size: bufferOffset + boundBufferSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.UNIFORM,
+ });
+ t.trackForCleanup(buffer);
+
+ const computeShaderModule = t.device.createShaderModule({
+ code: `
+ struct UBO {
+ value : vec4<u32>
+ };
+ @group(0) @binding(0) var<uniform> ubo : UBO;
+ @group(0) @binding(1) var outImage : texture_storage_2d<rgba8unorm, write>;
+
+ @compute @workgroup_size(1) fn main() {
+ if (all(ubo.value == vec4<u32>(0u, 0u, 0u, 0u))) {
+ textureStore(outImage, vec2<i32>(0, 0), vec4<f32>(0.0, 1.0, 0.0, 1.0));
+ } else {
+ textureStore(outImage, vec2<i32>(0, 0), vec4<f32>(1.0, 0.0, 0.0, 1.0));
+ }
+ }`,
+ });
+
+ // Verify the whole range of the buffer has been initialized to 0 in a compute shader.
+ t.TestBufferZeroInitInBindGroup(computeShaderModule, buffer, bufferOffset, boundBufferSize);
+ });
+
+g.test('readonly_storage_buffer')
+ .desc(
+ `Verify when we use a GPUBuffer as a read-only storage buffer just after the creation of that
+ GPUBuffer, all the contents in that GPUBuffer have been initialized to 0.`
+ )
+ .paramsSubcasesOnly(u => u.combine('bufferOffset', [0, 256]))
+ .fn(t => {
+ const { bufferOffset } = t.params;
+ const boundBufferSize = 16;
+ const buffer = t.device.createBuffer({
+ size: bufferOffset + boundBufferSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
+ });
+ t.trackForCleanup(buffer);
+
+ const computeShaderModule = t.device.createShaderModule({
+ code: `
+ struct SSBO {
+ value : vec4<u32>
+ };
+ @group(0) @binding(0) var<storage, read> ssbo : SSBO;
+ @group(0) @binding(1) var outImage : texture_storage_2d<rgba8unorm, write>;
+
+ @compute @workgroup_size(1) fn main() {
+ if (all(ssbo.value == vec4<u32>(0u, 0u, 0u, 0u))) {
+ textureStore(outImage, vec2<i32>(0, 0), vec4<f32>(0.0, 1.0, 0.0, 1.0));
+ } else {
+ textureStore(outImage, vec2<i32>(0, 0), vec4<f32>(1.0, 0.0, 0.0, 1.0));
+ }
+ }`,
+ });
+
+ // Verify the whole range of the buffer has been initialized to 0 in a compute shader.
+ t.TestBufferZeroInitInBindGroup(computeShaderModule, buffer, bufferOffset, boundBufferSize);
+ });
+
+g.test('storage_buffer')
+ .desc(
+ `Verify when we use a GPUBuffer as a storage buffer just after the creation of that
+ GPUBuffer, all the contents in that GPUBuffer have been initialized to 0.`
+ )
+ .paramsSubcasesOnly(u => u.combine('bufferOffset', [0, 256]))
+ .fn(t => {
+ const { bufferOffset } = t.params;
+ const boundBufferSize = 16;
+ const buffer = t.device.createBuffer({
+ size: bufferOffset + boundBufferSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
+ });
+ t.trackForCleanup(buffer);
+
+ const computeShaderModule = t.device.createShaderModule({
+ code: `
+ struct SSBO {
+ value : vec4<u32>
+ };
+ @group(0) @binding(0) var<storage, read_write> ssbo : SSBO;
+ @group(0) @binding(1) var outImage : texture_storage_2d<rgba8unorm, write>;
+
+ @compute @workgroup_size(1) fn main() {
+ if (all(ssbo.value == vec4<u32>(0u, 0u, 0u, 0u))) {
+ textureStore(outImage, vec2<i32>(0, 0), vec4<f32>(0.0, 1.0, 0.0, 1.0));
+ } else {
+ textureStore(outImage, vec2<i32>(0, 0), vec4<f32>(1.0, 0.0, 0.0, 1.0));
+ }
+ }`,
+ });
+
+ // Verify the whole range of the buffer has been initialized to 0 in a compute shader.
+ t.TestBufferZeroInitInBindGroup(computeShaderModule, buffer, bufferOffset, boundBufferSize);
+ });
+
+g.test('vertex_buffer')
+ .desc(
+ `Verify when we use a GPUBuffer as a vertex buffer just after the creation of that
+ GPUBuffer, all the contents in that GPUBuffer have been initialized to 0.`
+ )
+ .paramsSubcasesOnly(u => u.combine('bufferOffset', [0, 16]))
+ .fn(t => {
+ const { bufferOffset } = t.params;
+
+ const renderPipeline = t.CreateRenderPipelineForTest(
+ t.device.createShaderModule({
+ code: `
+ struct VertexOut {
+ @location(0) color : vec4<f32>,
+ @builtin(position) position : vec4<f32>,
+ };
+
+ @vertex fn main(@location(0) pos : vec4<f32>) -> VertexOut {
+ var output : VertexOut;
+ if (all(pos == vec4<f32>(0.0, 0.0, 0.0, 0.0))) {
+ output.color = vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ } else {
+ output.color = vec4<f32>(1.0, 0.0, 0.0, 1.0);
+ }
+ output.position = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ return output;
+ }`,
+ }),
+ true
+ );
+
+ const bufferSize = 16 + bufferOffset;
+ const vertexBuffer = t.device.createBuffer({
+ size: bufferSize,
+ usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(vertexBuffer);
+
+ const outputTexture = t.device.createTexture({
+ format: 'rgba8unorm',
+ size: [1, 1, 1],
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ t.trackForCleanup(outputTexture);
+
+ const encoder = t.device.createCommandEncoder();
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: outputTexture.createView(),
+ clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 0.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPass.setVertexBuffer(0, vertexBuffer, bufferOffset);
+ renderPass.setPipeline(renderPipeline);
+ renderPass.draw(1);
+ renderPass.end();
+ t.queue.submit([encoder.finish()]);
+
+ t.CheckBufferAndOutputTexture(vertexBuffer, bufferSize, outputTexture);
+ });
+
+g.test('index_buffer')
+ .desc(
+ `Verify when we use a GPUBuffer as an index buffer just after the creation of that
+GPUBuffer, all the contents in that GPUBuffer have been initialized to 0.`
+ )
+ .paramsSubcasesOnly(u => u.combine('bufferOffset', [0, 16]))
+ .fn(t => {
+ const { bufferOffset } = t.params;
+
+ const renderPipeline = t.CreateRenderPipelineForTest(
+ t.device.createShaderModule({
+ code: `
+ struct VertexOut {
+ @location(0) color : vec4<f32>,
+ @builtin(position) position : vec4<f32>,
+ };
+
+ @vertex
+ fn main(@builtin(vertex_index) VertexIndex : u32) -> VertexOut {
+ var output : VertexOut;
+ if (VertexIndex == 0u) {
+ output.color = vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ } else {
+ output.color = vec4<f32>(1.0, 0.0, 0.0, 1.0);
+ }
+ output.position = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ return output;
+ }`,
+ }),
+ false
+ );
+
+ // The size of GPUBuffer must be at least 4.
+ const bufferSize = 4 + bufferOffset;
+ const indexBuffer = t.device.createBuffer({
+ size: bufferSize,
+ usage: GPUBufferUsage.INDEX | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(indexBuffer);
+
+ const outputTexture = t.device.createTexture({
+ format: 'rgba8unorm',
+ size: [1, 1, 1],
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ t.trackForCleanup(outputTexture);
+
+ const encoder = t.device.createCommandEncoder();
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: outputTexture.createView(),
+ clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 0.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPass.setPipeline(renderPipeline);
+ renderPass.setIndexBuffer(indexBuffer, 'uint16', bufferOffset, 4);
+ renderPass.drawIndexed(1);
+ renderPass.end();
+ t.queue.submit([encoder.finish()]);
+
+ t.CheckBufferAndOutputTexture(indexBuffer, bufferSize, outputTexture);
+ });
+
+g.test('indirect_buffer_for_draw_indirect')
+ .desc(
+ `Verify when we use a GPUBuffer as an indirect buffer for drawIndirect() or
+drawIndexedIndirect() just after the creation of that GPUBuffer, all the contents in that GPUBuffer
+have been initialized to 0.`
+ )
+ .params(u =>
+ u.combine('test_indexed_draw', [true, false]).beginSubcases().combine('bufferOffset', [0, 16])
+ )
+ .fn(t => {
+ const { test_indexed_draw, bufferOffset } = t.params;
+
+ const renderPipeline = t.CreateRenderPipelineForTest(
+ t.device.createShaderModule({
+ code: `
+ struct VertexOut {
+ @location(0) color : vec4<f32>,
+ @builtin(position) position : vec4<f32>,
+ };
+
+ @vertex fn main() -> VertexOut {
+ var output : VertexOut;
+ output.color = vec4<f32>(1.0, 0.0, 0.0, 1.0);
+ output.position = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ return output;
+ }`,
+ }),
+ false
+ );
+
+ const kDrawIndirectParametersSize = 16;
+ const kDrawIndexedIndirectParametersSize = 20;
+ const bufferSize =
+ Math.max(kDrawIndirectParametersSize, kDrawIndexedIndirectParametersSize) + bufferOffset;
+ const indirectBuffer = t.device.createBuffer({
+ size: bufferSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.INDIRECT,
+ });
+ t.trackForCleanup(indirectBuffer);
+
+ const outputTexture = t.device.createTexture({
+ format: 'rgba8unorm',
+ size: [1, 1, 1],
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ t.trackForCleanup(outputTexture);
+
+ // Initialize outputTexture to green.
+ const encoder = t.device.createCommandEncoder();
+ t.RecordInitializeTextureColor(encoder, outputTexture, { r: 0.0, g: 1.0, b: 0.0, a: 1.0 });
+
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: outputTexture.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPass.setPipeline(renderPipeline);
+
+ let indexBuffer = undefined;
+ if (test_indexed_draw) {
+ indexBuffer = t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.INDEX,
+ });
+ t.trackForCleanup(indexBuffer);
+ renderPass.setIndexBuffer(indexBuffer, 'uint16');
+ renderPass.drawIndexedIndirect(indirectBuffer, bufferOffset);
+ } else {
+ renderPass.drawIndirect(indirectBuffer, bufferOffset);
+ }
+
+ renderPass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // The indirect buffer should be lazily cleared to 0, so we actually draw nothing and the color
+ // attachment will keep its original color (green) after we end the render pass.
+ t.CheckBufferAndOutputTexture(indirectBuffer, bufferSize, outputTexture);
+ });
+
+g.test('indirect_buffer_for_dispatch_indirect')
+ .desc(
+ `Verify when we use a GPUBuffer as an indirect buffer for dispatchWorkgroupsIndirect() just
+ after the creation of that GPUBuffer, all the contents in that GPUBuffer have been initialized
+ to 0.`
+ )
+ .paramsSubcasesOnly(u => u.combine('bufferOffset', [0, 16]))
+ .fn(t => {
+ const { bufferOffset } = t.params;
+
+ const computePipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var outImage : texture_storage_2d<rgba8unorm, write>;
+
+ @compute @workgroup_size(1) fn main() {
+ textureStore(outImage, vec2<i32>(0, 0), vec4<f32>(1.0, 0.0, 0.0, 1.0));
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ });
+
+ const kDispatchIndirectParametersSize = 12;
+ const bufferSize = kDispatchIndirectParametersSize + bufferOffset;
+ const indirectBuffer = t.device.createBuffer({
+ size: bufferSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.INDIRECT,
+ });
+ t.trackForCleanup(indirectBuffer);
+
+ const outputTexture = t.device.createTexture({
+ format: 'rgba8unorm',
+ size: [1, 1, 1],
+ usage:
+ GPUTextureUsage.COPY_SRC |
+ GPUTextureUsage.RENDER_ATTACHMENT |
+ GPUTextureUsage.STORAGE_BINDING,
+ });
+ t.trackForCleanup(outputTexture);
+
+ // Initialize outputTexture to green.
+ const encoder = t.device.createCommandEncoder();
+ t.RecordInitializeTextureColor(encoder, outputTexture, { r: 0.0, g: 1.0, b: 0.0, a: 1.0 });
+
+ const bindGroup = t.device.createBindGroup({
+ layout: computePipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: outputTexture.createView(),
+ },
+ ],
+ });
+
+ // The indirect buffer should be lazily cleared to 0, so we actually don't execute the compute
+ // shader and the output texture should keep its original color (green).
+ const computePass = encoder.beginComputePass();
+ computePass.setBindGroup(0, bindGroup);
+ computePass.setPipeline(computePipeline);
+ computePass.dispatchWorkgroupsIndirect(indirectBuffer, bufferOffset);
+ computePass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // The indirect buffer should be lazily cleared to 0, so we actually draw nothing and the color
+ // attachment will keep its original color (green) after we end the compute pass.
+ t.CheckBufferAndOutputTexture(indirectBuffer, bufferSize, outputTexture);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/check_texture/by_copy.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/check_texture/by_copy.ts
new file mode 100644
index 0000000000..2a4ca5e6a4
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/check_texture/by_copy.ts
@@ -0,0 +1,66 @@
+import { assert } from '../../../../../common/util/util.js';
+import { kTextureFormatInfo, EncodableTextureFormat } from '../../../../format_info.js';
+import { virtualMipSize } from '../../../../util/texture/base.js';
+import { CheckContents } from '../texture_zero.spec.js';
+
+export const checkContentsByBufferCopy: CheckContents = (
+ t,
+ params,
+ texture,
+ state,
+ subresourceRange
+) => {
+ for (const { level: mipLevel, layer } of subresourceRange.each()) {
+ assert(params.format in kTextureFormatInfo);
+ const format = params.format as EncodableTextureFormat;
+
+ t.expectSingleColor(texture, format, {
+ size: [t.textureWidth, t.textureHeight, t.textureDepth],
+ dimension: params.dimension,
+ slice: params.dimension === '2d' ? layer : 0,
+ layout: { mipLevel, aspect: params.aspect },
+ exp: t.stateToTexelComponents[state],
+ });
+ }
+};
+
+export const checkContentsByTextureCopy: CheckContents = (
+ t,
+ params,
+ texture,
+ state,
+ subresourceRange
+) => {
+ for (const { level, layer } of subresourceRange.each()) {
+ assert(params.format in kTextureFormatInfo);
+ const format = params.format as EncodableTextureFormat;
+
+ const [width, height, depth] = virtualMipSize(
+ params.dimension,
+ [t.textureWidth, t.textureHeight, t.textureDepth],
+ level
+ );
+
+ const dst = t.device.createTexture({
+ dimension: params.dimension,
+ size: [width, height, depth],
+ format: params.format,
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC,
+ });
+ t.trackForCleanup(dst);
+
+ const commandEncoder = t.device.createCommandEncoder();
+ commandEncoder.copyTextureToTexture(
+ { texture, mipLevel: level, origin: { x: 0, y: 0, z: layer } },
+ { texture: dst, mipLevel: 0 },
+ { width, height, depthOrArrayLayers: depth }
+ );
+ t.queue.submit([commandEncoder.finish()]);
+
+ t.expectSingleColor(dst, format, {
+ size: [width, height, depth],
+ exp: t.stateToTexelComponents[state],
+ layout: { mipLevel: 0, aspect: params.aspect },
+ });
+ }
+};
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/check_texture/by_ds_test.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/check_texture/by_ds_test.ts
new file mode 100644
index 0000000000..8646062452
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/check_texture/by_ds_test.ts
@@ -0,0 +1,200 @@
+import { assert } from '../../../../../common/util/util.js';
+import { kTextureFormatInfo } from '../../../../format_info.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { virtualMipSize } from '../../../../util/texture/base.js';
+import { CheckContents } from '../texture_zero.spec.js';
+
+function makeFullscreenVertexModule(device: GPUDevice) {
+ return device.createShaderModule({
+ code: `
+ @vertex
+ fn main(@builtin(vertex_index) VertexIndex : u32)
+ -> @builtin(position) vec4<f32> {
+ var pos : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
+ vec2<f32>(-1.0, -3.0),
+ vec2<f32>( 3.0, 1.0),
+ vec2<f32>(-1.0, 1.0));
+ return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+ }
+ `,
+ });
+}
+
+function getDepthTestEqualPipeline(
+ t: GPUTest,
+ format: GPUTextureFormat,
+ sampleCount: number,
+ expected: number
+): GPURenderPipeline {
+ return t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ entryPoint: 'main',
+ module: makeFullscreenVertexModule(t.device),
+ },
+ fragment: {
+ entryPoint: 'main',
+ module: t.device.createShaderModule({
+ code: `
+ struct Outputs {
+ @builtin(frag_depth) FragDepth : f32,
+ @location(0) outSuccess : f32,
+ };
+
+ @fragment
+ fn main() -> Outputs {
+ var output : Outputs;
+ output.FragDepth = f32(${expected});
+ output.outSuccess = 1.0;
+ return output;
+ }
+ `,
+ }),
+ targets: [{ format: 'r8unorm' }],
+ },
+ depthStencil: {
+ format,
+ depthCompare: 'equal',
+ depthWriteEnabled: false,
+ },
+ primitive: { topology: 'triangle-list' },
+ multisample: { count: sampleCount },
+ });
+}
+
+function getStencilTestEqualPipeline(
+ t: GPUTest,
+ format: GPUTextureFormat,
+ sampleCount: number
+): GPURenderPipeline {
+ return t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ entryPoint: 'main',
+ module: makeFullscreenVertexModule(t.device),
+ },
+ fragment: {
+ entryPoint: 'main',
+ module: t.device.createShaderModule({
+ code: `
+ @fragment
+ fn main() -> @location(0) f32 {
+ return 1.0;
+ }
+ `,
+ }),
+ targets: [{ format: 'r8unorm' }],
+ },
+ depthStencil: {
+ depthWriteEnabled: false,
+ depthCompare: 'always',
+ format,
+ stencilFront: { compare: 'equal' },
+ stencilBack: { compare: 'equal' },
+ },
+ primitive: { topology: 'triangle-list' },
+ multisample: { count: sampleCount },
+ });
+}
+
+const checkContents: (type: 'depth' | 'stencil', ...args: Parameters<CheckContents>) => void = (
+ type,
+ t,
+ params,
+ texture,
+ state,
+ subresourceRange
+) => {
+ const formatInfo = kTextureFormatInfo[params.format];
+
+ assert(params.dimension === '2d');
+ for (const viewDescriptor of t.generateTextureViewDescriptorsForRendering(
+ 'all',
+ subresourceRange
+ )) {
+ assert(viewDescriptor.baseMipLevel !== undefined);
+ const [width, height] = virtualMipSize(
+ params.dimension,
+ [t.textureWidth, t.textureHeight, 1],
+ viewDescriptor.baseMipLevel
+ );
+
+ const renderTexture = t.device.createTexture({
+ size: [width, height, 1],
+ format: 'r8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ sampleCount: params.sampleCount,
+ });
+
+ let resolveTexture = undefined;
+ let resolveTarget = undefined;
+ if (params.sampleCount > 1) {
+ resolveTexture = t.device.createTexture({
+ size: [width, height, 1],
+ format: 'r8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ });
+ resolveTarget = resolveTexture.createView();
+ }
+
+ const commandEncoder = t.device.createCommandEncoder();
+ commandEncoder.pushDebugGroup('checkContentsWithDepthStencil');
+
+ const pass = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTexture.createView(),
+ resolveTarget,
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ depthStencilAttachment: {
+ view: texture.createView(viewDescriptor),
+ depthStoreOp: formatInfo.depth ? 'store' : undefined,
+ depthLoadOp: formatInfo.depth ? 'load' : undefined,
+ stencilStoreOp: formatInfo.stencil ? 'store' : undefined,
+ stencilLoadOp: formatInfo.stencil ? 'load' : undefined,
+ },
+ });
+
+ switch (type) {
+ case 'depth': {
+ const expectedDepth = t.stateToTexelComponents[state].Depth;
+ assert(expectedDepth !== undefined);
+
+ pass.setPipeline(
+ getDepthTestEqualPipeline(t, params.format, params.sampleCount, expectedDepth)
+ );
+ break;
+ }
+
+ case 'stencil': {
+ const expectedStencil = t.stateToTexelComponents[state].Stencil;
+ assert(expectedStencil !== undefined);
+
+ pass.setPipeline(getStencilTestEqualPipeline(t, params.format, params.sampleCount));
+ pass.setStencilReference(expectedStencil);
+ break;
+ }
+ }
+
+ pass.draw(3);
+ pass.end();
+
+ commandEncoder.popDebugGroup();
+ t.queue.submit([commandEncoder.finish()]);
+
+ t.expectSingleColor(resolveTexture || renderTexture, 'r8unorm', {
+ size: [width, height, 1],
+ exp: { R: 1 },
+ });
+ }
+};
+
+export const checkContentsByDepthTest = (...args: Parameters<CheckContents>) =>
+ checkContents('depth', ...args);
+
+export const checkContentsByStencilTest = (...args: Parameters<CheckContents>) =>
+ checkContents('stencil', ...args);
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/check_texture/by_sampling.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/check_texture/by_sampling.ts
new file mode 100644
index 0000000000..64b4f73b34
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/check_texture/by_sampling.ts
@@ -0,0 +1,157 @@
+import { assert, unreachable } from '../../../../../common/util/util.js';
+import { kTextureFormatInfo, EncodableTextureFormat } from '../../../../format_info.js';
+import { virtualMipSize } from '../../../../util/texture/base.js';
+import {
+ kTexelRepresentationInfo,
+ getSingleDataType,
+ getComponentReadbackTraits,
+} from '../../../../util/texture/texel_data.js';
+import { CheckContents } from '../texture_zero.spec.js';
+
+export const checkContentsBySampling: CheckContents = (
+ t,
+ params,
+ texture,
+ state,
+ subresourceRange
+) => {
+ assert(params.format in kTextureFormatInfo);
+ const format = params.format as EncodableTextureFormat;
+ const rep = kTexelRepresentationInfo[format];
+
+ for (const { level, layers } of subresourceRange.mipLevels()) {
+ const [width, height, depth] = virtualMipSize(
+ params.dimension,
+ [t.textureWidth, t.textureHeight, t.textureDepth],
+ level
+ );
+
+ const { ReadbackTypedArray, shaderType } = getComponentReadbackTraits(
+ getSingleDataType(format)
+ );
+
+ const componentOrder = rep.componentOrder;
+ const componentCount = componentOrder.length;
+
+ // For single-component textures, generates .r
+ // For multi-component textures, generates ex.)
+ // .rgba[i], .bgra[i], .rgb[i]
+ const indexExpression =
+ componentCount === 1
+ ? componentOrder[0].toLowerCase()
+ : componentOrder.map(c => c.toLowerCase()).join('') + '[i]';
+
+ const _xd = '_' + params.dimension;
+ const _multisampled = params.sampleCount > 1 ? '_multisampled' : '';
+ const texelIndexExpression =
+ params.dimension === '2d'
+ ? 'vec2<i32>(GlobalInvocationID.xy)'
+ : params.dimension === '3d'
+ ? 'vec3<i32>(GlobalInvocationID.xyz)'
+ : params.dimension === '1d'
+ ? 'i32(GlobalInvocationID.x)'
+ : unreachable();
+ const computePipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ entryPoint: 'main',
+ module: t.device.createShaderModule({
+ code: `
+ struct Constants {
+ level : i32
+ };
+
+ @group(0) @binding(0) var<uniform> constants : Constants;
+ @group(0) @binding(1) var myTexture : texture${_multisampled}${_xd}<${shaderType}>;
+
+ struct Result {
+ values : array<${shaderType}>
+ };
+ @group(0) @binding(3) var<storage, read_write> result : Result;
+
+ @compute @workgroup_size(1)
+ fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
+ let flatIndex : u32 = ${componentCount}u * (
+ ${width}u * ${height}u * GlobalInvocationID.z +
+ ${width}u * GlobalInvocationID.y +
+ GlobalInvocationID.x
+ );
+ let texel : vec4<${shaderType}> = textureLoad(
+ myTexture, ${texelIndexExpression}, constants.level);
+
+ for (var i : u32 = 0u; i < ${componentCount}u; i = i + 1u) {
+ result.values[flatIndex + i] = texel.${indexExpression};
+ }
+ }`,
+ }),
+ },
+ });
+
+ for (const layer of layers) {
+ const ubo = t.device.createBuffer({
+ mappedAtCreation: true,
+ size: 4,
+ usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
+ });
+ new Int32Array(ubo.getMappedRange(), 0, 1)[0] = level;
+ ubo.unmap();
+
+ const byteLength =
+ width * height * depth * ReadbackTypedArray.BYTES_PER_ELEMENT * rep.componentOrder.length;
+ const resultBuffer = t.device.createBuffer({
+ size: byteLength,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(resultBuffer);
+
+ const bindGroup = t.device.createBindGroup({
+ layout: computePipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer: ubo },
+ },
+ {
+ binding: 1,
+ resource: texture.createView({
+ baseArrayLayer: layer,
+ arrayLayerCount: 1,
+ dimension: params.dimension,
+ }),
+ },
+ {
+ binding: 3,
+ resource: {
+ buffer: resultBuffer,
+ },
+ },
+ ],
+ });
+
+ const commandEncoder = t.device.createCommandEncoder();
+ const pass = commandEncoder.beginComputePass();
+ pass.setPipeline(computePipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(width, height, depth);
+ pass.end();
+ t.queue.submit([commandEncoder.finish()]);
+ ubo.destroy();
+
+ const expectedValues = new ReadbackTypedArray(new ArrayBuffer(byteLength));
+ const expectedState = t.stateToTexelComponents[state];
+ let i = 0;
+ for (let d = 0; d < depth; ++d) {
+ for (let h = 0; h < height; ++h) {
+ for (let w = 0; w < width; ++w) {
+ for (const c of rep.componentOrder) {
+ const value = expectedState[c];
+ assert(value !== undefined);
+ expectedValues[i++] = value;
+ }
+ }
+ }
+ }
+ t.expectGPUBufferValuesEqual(resultBuffer, expectedValues);
+ }
+ }
+};
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/texture_zero.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/texture_zero.spec.ts
new file mode 100644
index 0000000000..3f0baeccbd
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/resource_init/texture_zero.spec.ts
@@ -0,0 +1,645 @@
+export const description = `
+Test uninitialized textures are initialized to zero when read.
+
+TODO:
+- test by sampling depth/stencil [1]
+- test by copying out of stencil [2]
+- test compressed texture formats [3]
+`;
+
+// MAINTENANCE_TODO: This is a test file, it probably shouldn't export anything.
+// Everything that's exported should be moved to another file.
+
+import { TestCaseRecorder, TestParams } from '../../../../common/framework/fixture.js';
+import {
+ kUnitCaseParamsBuilder,
+ ParamTypeOf,
+} from '../../../../common/framework/params_builder.js';
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { assert, unreachable } from '../../../../common/util/util.js';
+import { kTextureAspects, kTextureDimensions } from '../../../capability_info.js';
+import { GPUConst } from '../../../constants.js';
+import {
+ kTextureFormatInfo,
+ kUncompressedTextureFormats,
+ textureDimensionAndFormatCompatible,
+ UncompressedTextureFormat,
+ EncodableTextureFormat,
+} from '../../../format_info.js';
+import { GPUTest, GPUTestSubcaseBatchState } from '../../../gpu_test.js';
+import { virtualMipSize } from '../../../util/texture/base.js';
+import { createTextureUploadBuffer } from '../../../util/texture/layout.js';
+import { BeginEndRange, SubresourceRange } from '../../../util/texture/subresource.js';
+import { PerTexelComponent, kTexelRepresentationInfo } from '../../../util/texture/texel_data.js';
+
+export enum UninitializeMethod {
+ Creation = 'Creation', // The texture was just created. It is uninitialized.
+ StoreOpClear = 'StoreOpClear', // The texture was rendered to with GPUStoreOp "clear"
+}
+const kUninitializeMethods = Object.keys(UninitializeMethod) as UninitializeMethod[];
+
+export const enum ReadMethod {
+ Sample = 'Sample', // The texture is sampled from
+ CopyToBuffer = 'CopyToBuffer', // The texture is copied to a buffer
+ CopyToTexture = 'CopyToTexture', // The texture is copied to another texture
+ DepthTest = 'DepthTest', // The texture is read as a depth buffer
+ StencilTest = 'StencilTest', // The texture is read as a stencil buffer
+ ColorBlending = 'ColorBlending', // Read the texture by blending as a color attachment
+ Storage = 'Storage', // Read the texture as a storage texture
+}
+
+// Test with these mip level counts
+type MipLevels = 1 | 5;
+const kMipLevelCounts: MipLevels[] = [1, 5];
+
+// For each mip level count, define the mip ranges to leave uninitialized.
+const kUninitializedMipRangesToTest: { [k in MipLevels]: BeginEndRange[] } = {
+ 1: [{ begin: 0, end: 1 }], // Test the only mip
+ 5: [
+ { begin: 0, end: 2 },
+ { begin: 3, end: 4 },
+ ], // Test a range and a single mip
+};
+
+// Test with these sample counts.
+const kSampleCounts: number[] = [1, 4];
+
+// Test with these layer counts.
+type LayerCounts = 1 | 7;
+
+// For each layer count, define the layers to leave uninitialized.
+const kUninitializedLayerRangesToTest: { [k in LayerCounts]: BeginEndRange[] } = {
+ 1: [{ begin: 0, end: 1 }], // Test the only layer
+ 7: [
+ { begin: 2, end: 4 },
+ { begin: 6, end: 7 },
+ ], // Test a range and a single layer
+};
+
+// Enums to abstract over color / depth / stencil values in textures. Depending on the texture format,
+// the data for each value may have a different representation. These enums are converted to a
+// representation such that their values can be compared. ex.) An integer is needed to upload to an
+// unsigned normalized format, but its value is read as a float in the shader.
+export const enum InitializedState {
+ Canary, // Set on initialized subresources. It should stay the same. On discarded resources, we should observe zero.
+ Zero, // We check that uninitialized subresources are in this state when read back.
+}
+
+const initializedStateAsFloat = {
+ [InitializedState.Zero]: 0,
+ [InitializedState.Canary]: 1,
+};
+
+const initializedStateAsUint = {
+ [InitializedState.Zero]: 0,
+ [InitializedState.Canary]: 1,
+};
+
+const initializedStateAsSint = {
+ [InitializedState.Zero]: 0,
+ [InitializedState.Canary]: -1,
+};
+
+function initializedStateAsColor(
+ state: InitializedState,
+ format: GPUTextureFormat
+): [number, number, number, number] {
+ let value;
+ if (format.indexOf('uint') !== -1) {
+ value = initializedStateAsUint[state];
+ } else if (format.indexOf('sint') !== -1) {
+ value = initializedStateAsSint[state];
+ } else {
+ value = initializedStateAsFloat[state];
+ }
+ return [value, value, value, value];
+}
+
+const initializedStateAsDepth = {
+ [InitializedState.Zero]: 0,
+ [InitializedState.Canary]: 0.8,
+};
+
+const initializedStateAsStencil = {
+ [InitializedState.Zero]: 0,
+ [InitializedState.Canary]: 42,
+};
+
+function getRequiredTextureUsage(
+ format: UncompressedTextureFormat,
+ sampleCount: number,
+ uninitializeMethod: UninitializeMethod,
+ readMethod: ReadMethod
+): GPUTextureUsageFlags {
+ let usage: GPUTextureUsageFlags = GPUConst.TextureUsage.COPY_DST;
+
+ switch (uninitializeMethod) {
+ case UninitializeMethod.Creation:
+ break;
+ case UninitializeMethod.StoreOpClear:
+ usage |= GPUConst.TextureUsage.RENDER_ATTACHMENT;
+ break;
+ default:
+ unreachable();
+ }
+
+ switch (readMethod) {
+ case ReadMethod.CopyToBuffer:
+ case ReadMethod.CopyToTexture:
+ usage |= GPUConst.TextureUsage.COPY_SRC;
+ break;
+ case ReadMethod.Sample:
+ usage |= GPUConst.TextureUsage.TEXTURE_BINDING;
+ break;
+ case ReadMethod.Storage:
+ usage |= GPUConst.TextureUsage.STORAGE_BINDING;
+ break;
+ case ReadMethod.DepthTest:
+ case ReadMethod.StencilTest:
+ case ReadMethod.ColorBlending:
+ usage |= GPUConst.TextureUsage.RENDER_ATTACHMENT;
+ break;
+ default:
+ unreachable();
+ }
+
+ if (sampleCount > 1) {
+ // Copies to multisampled textures are not allowed. We need OutputAttachment to initialize
+ // canary data in multisampled textures.
+ usage |= GPUConst.TextureUsage.RENDER_ATTACHMENT;
+ }
+
+ if (!kTextureFormatInfo[format].copyDst) {
+ // Copies are not possible. We need OutputAttachment to initialize
+ // canary data.
+ assert(kTextureFormatInfo[format].renderable);
+ usage |= GPUConst.TextureUsage.RENDER_ATTACHMENT;
+ }
+
+ return usage;
+}
+
+export class TextureZeroInitTest extends GPUTest {
+ readonly stateToTexelComponents: { [k in InitializedState]: PerTexelComponent<number> };
+
+ private p: TextureZeroParams;
+ constructor(sharedState: GPUTestSubcaseBatchState, rec: TestCaseRecorder, params: TestParams) {
+ super(sharedState, rec, params);
+ this.p = params as TextureZeroParams;
+
+ const stateToTexelComponents = (state: InitializedState) => {
+ const [R, G, B, A] = initializedStateAsColor(state, this.p.format);
+ return {
+ R,
+ G,
+ B,
+ A,
+ Depth: initializedStateAsDepth[state],
+ Stencil: initializedStateAsStencil[state],
+ };
+ };
+
+ this.stateToTexelComponents = {
+ [InitializedState.Zero]: stateToTexelComponents(InitializedState.Zero),
+ [InitializedState.Canary]: stateToTexelComponents(InitializedState.Canary),
+ };
+ }
+
+ get textureWidth(): number {
+ let width = 1 << this.p.mipLevelCount;
+ if (this.p.nonPowerOfTwo) {
+ width = 2 * width - 1;
+ }
+ return width;
+ }
+
+ get textureHeight(): number {
+ if (this.p.dimension === '1d') {
+ return 1;
+ }
+
+ let height = 1 << this.p.mipLevelCount;
+ if (this.p.nonPowerOfTwo) {
+ height = 2 * height - 1;
+ }
+ return height;
+ }
+
+ get textureDepth(): number {
+ return this.p.dimension === '3d' ? 11 : 1;
+ }
+
+ get textureDepthOrArrayLayers(): number {
+ return this.p.dimension === '2d' ? this.p.layerCount : this.textureDepth;
+ }
+
+ // Used to iterate subresources and check that their uninitialized contents are zero when accessed
+ *iterateUninitializedSubresources(): Generator<SubresourceRange> {
+ for (const mipRange of kUninitializedMipRangesToTest[this.p.mipLevelCount]) {
+ for (const layerRange of kUninitializedLayerRangesToTest[this.p.layerCount]) {
+ yield new SubresourceRange({ mipRange, layerRange });
+ }
+ }
+ }
+
+ // Used to iterate and initialize other subresources not checked for zero-initialization.
+ // Zero-initialization of uninitialized subresources should not have side effects on already
+ // initialized subresources.
+ *iterateInitializedSubresources(): Generator<SubresourceRange> {
+ const uninitialized: boolean[][] = new Array(this.p.mipLevelCount);
+ for (let level = 0; level < uninitialized.length; ++level) {
+ uninitialized[level] = new Array(this.p.layerCount);
+ }
+ for (const subresources of this.iterateUninitializedSubresources()) {
+ for (const { level, layer } of subresources.each()) {
+ uninitialized[level][layer] = true;
+ }
+ }
+ for (let level = 0; level < uninitialized.length; ++level) {
+ for (let layer = 0; layer < uninitialized[level].length; ++layer) {
+ if (!uninitialized[level][layer]) {
+ yield new SubresourceRange({
+ mipRange: { begin: level, count: 1 },
+ layerRange: { begin: layer, count: 1 },
+ });
+ }
+ }
+ }
+ }
+
+ *generateTextureViewDescriptorsForRendering(
+ aspect: GPUTextureAspect,
+ subresourceRange?: SubresourceRange
+ ): Generator<GPUTextureViewDescriptor> {
+ const viewDescriptor: GPUTextureViewDescriptor = {
+ dimension: '2d',
+ aspect,
+ };
+
+ if (subresourceRange === undefined) {
+ return viewDescriptor;
+ }
+
+ for (const { level, layer } of subresourceRange.each()) {
+ yield {
+ ...viewDescriptor,
+ baseMipLevel: level,
+ mipLevelCount: 1,
+ baseArrayLayer: layer,
+ arrayLayerCount: 1,
+ };
+ }
+ }
+
+ private initializeWithStoreOp(
+ state: InitializedState,
+ texture: GPUTexture,
+ subresourceRange?: SubresourceRange
+ ): void {
+ const commandEncoder = this.device.createCommandEncoder();
+ commandEncoder.pushDebugGroup('initializeWithStoreOp');
+
+ for (const viewDescriptor of this.generateTextureViewDescriptorsForRendering(
+ 'all',
+ subresourceRange
+ )) {
+ if (kTextureFormatInfo[this.p.format].color) {
+ commandEncoder
+ .beginRenderPass({
+ colorAttachments: [
+ {
+ view: texture.createView(viewDescriptor),
+ storeOp: 'store',
+ clearValue: initializedStateAsColor(state, this.p.format),
+ loadOp: 'clear',
+ },
+ ],
+ })
+ .end();
+ } else {
+ const depthStencilAttachment: GPURenderPassDepthStencilAttachment = {
+ view: texture.createView(viewDescriptor),
+ };
+ if (kTextureFormatInfo[this.p.format].depth) {
+ depthStencilAttachment.depthClearValue = initializedStateAsDepth[state];
+ depthStencilAttachment.depthLoadOp = 'clear';
+ depthStencilAttachment.depthStoreOp = 'store';
+ }
+ if (kTextureFormatInfo[this.p.format].stencil) {
+ depthStencilAttachment.stencilClearValue = initializedStateAsStencil[state];
+ depthStencilAttachment.stencilLoadOp = 'clear';
+ depthStencilAttachment.stencilStoreOp = 'store';
+ }
+ commandEncoder
+ .beginRenderPass({
+ colorAttachments: [],
+ depthStencilAttachment,
+ })
+ .end();
+ }
+ }
+
+ commandEncoder.popDebugGroup();
+ this.queue.submit([commandEncoder.finish()]);
+ }
+
+ private initializeWithCopy(
+ texture: GPUTexture,
+ state: InitializedState,
+ subresourceRange: SubresourceRange
+ ): void {
+ assert(this.p.format in kTextureFormatInfo);
+ const format = this.p.format as EncodableTextureFormat;
+
+ const firstSubresource = subresourceRange.each().next().value;
+ assert(typeof firstSubresource !== 'undefined');
+
+ const [largestWidth, largestHeight, largestDepth] = virtualMipSize(
+ this.p.dimension,
+ [this.textureWidth, this.textureHeight, this.textureDepth],
+ firstSubresource.level
+ );
+
+ const rep = kTexelRepresentationInfo[format];
+ const texelData = new Uint8Array(rep.pack(rep.encode(this.stateToTexelComponents[state])));
+ const { buffer, bytesPerRow, rowsPerImage } = createTextureUploadBuffer(
+ texelData,
+ this.device,
+ format,
+ this.p.dimension,
+ [largestWidth, largestHeight, largestDepth]
+ );
+
+ const commandEncoder = this.device.createCommandEncoder();
+
+ for (const { level, layer } of subresourceRange.each()) {
+ const [width, height, depth] = virtualMipSize(
+ this.p.dimension,
+ [this.textureWidth, this.textureHeight, this.textureDepth],
+ level
+ );
+
+ commandEncoder.copyBufferToTexture(
+ {
+ buffer,
+ bytesPerRow,
+ rowsPerImage,
+ },
+ { texture, mipLevel: level, origin: { x: 0, y: 0, z: layer } },
+ { width, height, depthOrArrayLayers: depth }
+ );
+ }
+ this.queue.submit([commandEncoder.finish()]);
+ buffer.destroy();
+ }
+
+ initializeTexture(
+ texture: GPUTexture,
+ state: InitializedState,
+ subresourceRange: SubresourceRange
+ ): void {
+ if (this.p.sampleCount > 1 || !kTextureFormatInfo[this.p.format].copyDst) {
+ // Copies to multisampled textures not yet specified.
+ // Use a storeOp for now.
+ assert(kTextureFormatInfo[this.p.format].renderable);
+ this.initializeWithStoreOp(state, texture, subresourceRange);
+ } else {
+ this.initializeWithCopy(texture, state, subresourceRange);
+ }
+ }
+
+ discardTexture(texture: GPUTexture, subresourceRange: SubresourceRange): void {
+ const commandEncoder = this.device.createCommandEncoder();
+ commandEncoder.pushDebugGroup('discardTexture');
+
+ for (const desc of this.generateTextureViewDescriptorsForRendering('all', subresourceRange)) {
+ if (kTextureFormatInfo[this.p.format].color) {
+ commandEncoder
+ .beginRenderPass({
+ colorAttachments: [
+ {
+ view: texture.createView(desc),
+ storeOp: 'discard',
+ loadOp: 'load',
+ },
+ ],
+ })
+ .end();
+ } else {
+ const depthStencilAttachment: GPURenderPassDepthStencilAttachment = {
+ view: texture.createView(desc),
+ };
+ if (kTextureFormatInfo[this.p.format].depth) {
+ depthStencilAttachment.depthLoadOp = 'load';
+ depthStencilAttachment.depthStoreOp = 'discard';
+ }
+ if (kTextureFormatInfo[this.p.format].stencil) {
+ depthStencilAttachment.stencilLoadOp = 'load';
+ depthStencilAttachment.stencilStoreOp = 'discard';
+ }
+ commandEncoder
+ .beginRenderPass({
+ colorAttachments: [],
+ depthStencilAttachment,
+ })
+ .end();
+ }
+ }
+
+ commandEncoder.popDebugGroup();
+ this.queue.submit([commandEncoder.finish()]);
+ }
+}
+
+const kTestParams = kUnitCaseParamsBuilder
+ .combine('dimension', kTextureDimensions)
+ .combine('readMethod', [
+ ReadMethod.CopyToBuffer,
+ ReadMethod.CopyToTexture,
+ ReadMethod.Sample,
+ ReadMethod.DepthTest,
+ ReadMethod.StencilTest,
+ ])
+ // [3] compressed formats
+ .combine('format', kUncompressedTextureFormats)
+ .filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
+ .beginSubcases()
+ .combine('aspect', kTextureAspects)
+ .unless(({ readMethod, format, aspect }) => {
+ const info = kTextureFormatInfo[format];
+ return (
+ (readMethod === ReadMethod.DepthTest && (!info.depth || aspect === 'stencil-only')) ||
+ (readMethod === ReadMethod.StencilTest && (!info.stencil || aspect === 'depth-only')) ||
+ (readMethod === ReadMethod.ColorBlending && !info.color) ||
+ // [1]: Test with depth/stencil sampling
+ (readMethod === ReadMethod.Sample && (!!info.depth || !!info.stencil)) ||
+ (aspect === 'depth-only' && !info.depth) ||
+ (aspect === 'stencil-only' && !info.stencil) ||
+ (aspect === 'all' && !!info.depth && !!info.stencil) ||
+ // Cannot copy from a packed depth format.
+ // [2]: Test copying out of the stencil aspect.
+ ((readMethod === ReadMethod.CopyToBuffer || readMethod === ReadMethod.CopyToTexture) &&
+ (format === 'depth24plus' || format === 'depth24plus-stencil8'))
+ );
+ })
+ .combine('mipLevelCount', kMipLevelCounts)
+ // 1D texture can only have a single mip level
+ .unless(p => p.dimension === '1d' && p.mipLevelCount !== 1)
+ .combine('sampleCount', kSampleCounts)
+ .unless(
+ ({ readMethod, sampleCount }) =>
+ // We can only read from multisampled textures by sampling.
+ sampleCount > 1 &&
+ (readMethod === ReadMethod.CopyToBuffer || readMethod === ReadMethod.CopyToTexture)
+ )
+ // Multisampled textures may only have one mip
+ .unless(({ sampleCount, mipLevelCount }) => sampleCount > 1 && mipLevelCount > 1)
+ .combine('uninitializeMethod', kUninitializeMethods)
+ .unless(({ dimension, readMethod, uninitializeMethod, format, sampleCount }) => {
+ const formatInfo = kTextureFormatInfo[format];
+ return (
+ dimension !== '2d' &&
+ (sampleCount > 1 ||
+ !!formatInfo.depth ||
+ !!formatInfo.stencil ||
+ readMethod === ReadMethod.DepthTest ||
+ readMethod === ReadMethod.StencilTest ||
+ readMethod === ReadMethod.ColorBlending ||
+ uninitializeMethod === UninitializeMethod.StoreOpClear)
+ );
+ })
+ .expandWithParams(function* ({ dimension }) {
+ switch (dimension) {
+ case '2d':
+ yield { layerCount: 1 as LayerCounts };
+ yield { layerCount: 7 as LayerCounts };
+ break;
+ case '1d':
+ case '3d':
+ yield { layerCount: 1 as LayerCounts };
+ break;
+ }
+ })
+ // Multisampled 3D / 2D array textures not supported.
+ .unless(({ sampleCount, layerCount }) => sampleCount > 1 && layerCount > 1)
+ .unless(({ format, sampleCount, uninitializeMethod, readMethod }) => {
+ const usage = getRequiredTextureUsage(format, sampleCount, uninitializeMethod, readMethod);
+ const info = kTextureFormatInfo[format];
+
+ return (
+ ((usage & GPUConst.TextureUsage.RENDER_ATTACHMENT) !== 0 && !info.renderable) ||
+ ((usage & GPUConst.TextureUsage.STORAGE_BINDING) !== 0 && !info.color?.storage) ||
+ (sampleCount > 1 && !info.multisample)
+ );
+ })
+ .combine('nonPowerOfTwo', [false, true])
+ .combine('canaryOnCreation', [false, true])
+ .filter(({ canaryOnCreation, format }) => {
+ // We can only initialize the texture if it's encodable or renderable.
+ const canInitialize = format in kTextureFormatInfo || kTextureFormatInfo[format].renderable;
+
+ // Filter out cases where we want canary values but can't initialize.
+ return !canaryOnCreation || canInitialize;
+ });
+
+type TextureZeroParams = ParamTypeOf<typeof kTestParams>;
+
+export type CheckContents = (
+ t: TextureZeroInitTest,
+ params: TextureZeroParams,
+ texture: GPUTexture,
+ state: InitializedState,
+ subresourceRange: SubresourceRange
+) => void;
+
+import { checkContentsByBufferCopy, checkContentsByTextureCopy } from './check_texture/by_copy.js';
+import {
+ checkContentsByDepthTest,
+ checkContentsByStencilTest,
+} from './check_texture/by_ds_test.js';
+import { checkContentsBySampling } from './check_texture/by_sampling.js';
+
+const checkContentsImpl: { [k in ReadMethod]: CheckContents } = {
+ Sample: checkContentsBySampling,
+ CopyToBuffer: checkContentsByBufferCopy,
+ CopyToTexture: checkContentsByTextureCopy,
+ DepthTest: checkContentsByDepthTest,
+ StencilTest: checkContentsByStencilTest,
+ ColorBlending: t => t.skip('Not implemented'),
+ Storage: t => t.skip('Not implemented'),
+};
+
+export const g = makeTestGroup(TextureZeroInitTest);
+
+g.test('uninitialized_texture_is_zero')
+ .params(kTestParams)
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ t.selectDeviceOrSkipTestCase(kTextureFormatInfo[t.params.format].feature);
+ })
+ .fn(t => {
+ const usage = getRequiredTextureUsage(
+ t.params.format,
+ t.params.sampleCount,
+ t.params.uninitializeMethod,
+ t.params.readMethod
+ );
+
+ const texture = t.device.createTexture({
+ size: [t.textureWidth, t.textureHeight, t.textureDepthOrArrayLayers],
+ format: t.params.format,
+ dimension: t.params.dimension,
+ usage,
+ mipLevelCount: t.params.mipLevelCount,
+ sampleCount: t.params.sampleCount,
+ });
+ t.trackForCleanup(texture);
+
+ if (t.params.canaryOnCreation) {
+ // Initialize some subresources with canary values
+ for (const subresourceRange of t.iterateInitializedSubresources()) {
+ t.initializeTexture(texture, InitializedState.Canary, subresourceRange);
+ }
+ }
+
+ switch (t.params.uninitializeMethod) {
+ case UninitializeMethod.Creation:
+ break;
+ case UninitializeMethod.StoreOpClear:
+ // Initialize the rest of the resources.
+ for (const subresourceRange of t.iterateUninitializedSubresources()) {
+ t.initializeTexture(texture, InitializedState.Canary, subresourceRange);
+ }
+ // Then use a store op to discard their contents.
+ for (const subresourceRange of t.iterateUninitializedSubresources()) {
+ t.discardTexture(texture, subresourceRange);
+ }
+ break;
+ default:
+ unreachable();
+ }
+
+ // Check that all uninitialized resources are zero.
+ for (const subresourceRange of t.iterateUninitializedSubresources()) {
+ checkContentsImpl[t.params.readMethod](
+ t,
+ t.params,
+ texture,
+ InitializedState.Zero,
+ subresourceRange
+ );
+ }
+
+ if (t.params.canaryOnCreation) {
+ // Check the all other resources are unchanged.
+ for (const subresourceRange of t.iterateInitializedSubresources()) {
+ checkContentsImpl[t.params.readMethod](
+ t,
+ t.params,
+ texture,
+ InitializedState.Canary,
+ subresourceRange
+ );
+ }
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/sampling/anisotropy.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/sampling/anisotropy.spec.ts
new file mode 100644
index 0000000000..6595fa723c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/sampling/anisotropy.spec.ts
@@ -0,0 +1,325 @@
+export const description = `
+Tests the behavior of anisotropic filtering.
+
+TODO:
+Note that anisotropic filtering is never guaranteed to occur, but we might be able to test some
+things. If there are no guarantees we can issue warnings instead of failures. Ideas:
+ - No *more* than the provided maxAnisotropy samples are used, by testing how many unique
+ sample values come out of the sample operation.
+ - Check anisotropy is done in the correct direction (by having a 2D gradient and checking we get
+ more of the color in the correct direction).
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { assert } from '../../../../common/util/util.js';
+import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
+import { checkElementsEqual } from '../../../util/check_contents.js';
+import { TexelView } from '../../../util/texture/texel_view.js';
+import { PerPixelComparison } from '../../../util/texture/texture_ok.js';
+
+const kRTSize = 16;
+const kBytesPerRow = 256;
+const xMiddle = kRTSize / 2; // we check the pixel value in the middle of the render target
+const kColorAttachmentFormat = 'rgba8unorm';
+const kTextureFormat = 'rgba8unorm';
+const colors = [
+ new Uint8Array([0xff, 0x00, 0x00, 0xff]), // miplevel = 0
+ new Uint8Array([0x00, 0xff, 0x00, 0xff]), // miplevel = 1
+ new Uint8Array([0x00, 0x00, 0xff, 0xff]), // miplevel = 2
+];
+const checkerColors = [
+ new Uint8Array([0xff, 0x00, 0x00, 0xff]),
+ new Uint8Array([0x00, 0xff, 0x00, 0xff]),
+];
+
+// renders texture a slanted plane placed in a specific way
+class SamplerAnisotropicFilteringSlantedPlaneTest extends GPUTest {
+ copyRenderTargetToBuffer(rt: GPUTexture): GPUBuffer {
+ const byteLength = kRTSize * kBytesPerRow;
+ const buffer = this.device.createBuffer({
+ size: byteLength,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+
+ const commandEncoder = this.device.createCommandEncoder();
+ commandEncoder.copyTextureToBuffer(
+ { texture: rt, mipLevel: 0, origin: [0, 0, 0] },
+ { buffer, bytesPerRow: kBytesPerRow, rowsPerImage: kRTSize },
+ { width: kRTSize, height: kRTSize, depthOrArrayLayers: 1 }
+ );
+ this.queue.submit([commandEncoder.finish()]);
+
+ return buffer;
+ }
+
+ private pipeline: GPURenderPipeline | undefined;
+ override async init(): Promise<void> {
+ await super.init();
+
+ this.pipeline = this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: this.device.createShaderModule({
+ code: `
+ struct Outputs {
+ @builtin(position) Position : vec4<f32>,
+ @location(0) fragUV : vec2<f32>,
+ };
+
+ @vertex fn main(
+ @builtin(vertex_index) VertexIndex : u32) -> Outputs {
+ var position : array<vec3<f32>, 6> = array<vec3<f32>, 6>(
+ vec3<f32>(-0.5, 0.5, -0.5),
+ vec3<f32>(0.5, 0.5, -0.5),
+ vec3<f32>(-0.5, 0.5, 0.5),
+ vec3<f32>(-0.5, 0.5, 0.5),
+ vec3<f32>(0.5, 0.5, -0.5),
+ vec3<f32>(0.5, 0.5, 0.5));
+ // uv is pre-scaled to mimic repeating tiled texture
+ var uv : array<vec2<f32>, 6> = array<vec2<f32>, 6>(
+ vec2<f32>(0.0, 0.0),
+ vec2<f32>(1.0, 0.0),
+ vec2<f32>(0.0, 50.0),
+ vec2<f32>(0.0, 50.0),
+ vec2<f32>(1.0, 0.0),
+ vec2<f32>(1.0, 50.0));
+ // draw a slanted plane in a specific way
+ let matrix : mat4x4<f32> = mat4x4<f32>(
+ vec4<f32>(-1.7320507764816284, 1.8322050568049563e-16, -6.176817699518044e-17, -6.170640314703498e-17),
+ vec4<f32>(-2.1211504944260596e-16, -1.496108889579773, 0.5043753981590271, 0.5038710236549377),
+ vec4<f32>(0.0, -43.63650894165039, -43.232173919677734, -43.18894577026367),
+ vec4<f32>(0.0, 21.693578720092773, 21.789791107177734, 21.86800193786621));
+
+ var output : Outputs;
+ output.fragUV = uv[VertexIndex];
+ output.Position = matrix * vec4<f32>(position[VertexIndex], 1.0);
+ return output;
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: this.device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var sampler0 : sampler;
+ @group(0) @binding(1) var texture0 : texture_2d<f32>;
+
+ @fragment fn main(
+ @builtin(position) FragCoord : vec4<f32>,
+ @location(0) fragUV: vec2<f32>)
+ -> @location(0) vec4<f32> {
+ return textureSample(texture0, sampler0, fragUV);
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ primitive: { topology: 'triangle-list' },
+ });
+ }
+
+ // return the render target texture object
+ drawSlantedPlane(textureView: GPUTextureView, sampler: GPUSampler): GPUTexture {
+ // make sure it's already initialized
+ assert(this.pipeline !== undefined);
+
+ const bindGroup = this.device.createBindGroup({
+ entries: [
+ { binding: 0, resource: sampler },
+ { binding: 1, resource: textureView },
+ ],
+ layout: this.pipeline.getBindGroupLayout(0),
+ });
+
+ const colorAttachment = this.device.createTexture({
+ format: kColorAttachmentFormat,
+ size: { width: kRTSize, height: kRTSize, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ const colorAttachmentView = colorAttachment.createView();
+
+ const encoder = this.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachmentView,
+ storeOp: 'store',
+ clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ },
+ ],
+ });
+ pass.setPipeline(this.pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.draw(6);
+ pass.end();
+ this.device.queue.submit([encoder.finish()]);
+
+ return colorAttachment;
+ }
+}
+
+export const g = makeTestGroup(TextureTestMixin(SamplerAnisotropicFilteringSlantedPlaneTest));
+
+g.test('anisotropic_filter_checkerboard')
+ .desc(
+ `Anisotropic filter rendering tests that draws a slanted plane and samples from a texture
+ that only has a top level mipmap, the content of which is like a checkerboard.
+ We will check the rendering result using sampler with maxAnisotropy values to be
+ different from each other, as the sampling rate is different.
+ We will also check if those large maxAnisotropy values are clamped so that rendering is the
+ same as the supported upper limit say 16.
+ A similar webgl demo is at https://jsfiddle.net/yqnbez24`
+ )
+ .fn(async t => {
+ // init texture with only a top level mipmap
+ const textureSize = 32;
+ const texture = t.device.createTexture({
+ mipLevelCount: 1,
+ size: { width: textureSize, height: textureSize, depthOrArrayLayers: 1 },
+ format: kTextureFormat,
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.TEXTURE_BINDING,
+ });
+
+ const textureEncoder = t.device.createCommandEncoder();
+
+ const bufferSize = kBytesPerRow * textureSize; // RGBA8 for each pixel (256 > 16 * 4)
+
+ // init checkerboard texture data
+ const data: Uint8Array = new Uint8Array(bufferSize);
+ for (let r = 0; r < textureSize; r++) {
+ const o = r * kBytesPerRow;
+ for (let c = o, end = o + textureSize * 4; c < end; c += 4) {
+ const cid = (r + (c - o) / 4) % 2;
+ const color = checkerColors[cid];
+ data[c] = color[0];
+ data[c + 1] = color[1];
+ data[c + 2] = color[2];
+ data[c + 3] = color[3];
+ }
+ }
+ const buffer = t.makeBufferWithContents(
+ data,
+ GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST
+ );
+ const bytesPerRow = kBytesPerRow;
+ const rowsPerImage = textureSize;
+
+ textureEncoder.copyBufferToTexture(
+ {
+ buffer,
+ bytesPerRow,
+ rowsPerImage,
+ },
+ {
+ texture,
+ mipLevel: 0,
+ origin: [0, 0, 0],
+ },
+ [textureSize, textureSize, 1]
+ );
+
+ t.device.queue.submit([textureEncoder.finish()]);
+
+ const textureView = texture.createView();
+ const byteLength = kRTSize * kBytesPerRow;
+ const results = [];
+
+ for (const maxAnisotropy of [1, 16, 1024]) {
+ const sampler = t.device.createSampler({
+ magFilter: 'linear',
+ minFilter: 'linear',
+ mipmapFilter: 'linear',
+ maxAnisotropy,
+ });
+ const result = await t.readGPUBufferRangeTyped(
+ t.copyRenderTargetToBuffer(t.drawSlantedPlane(textureView, sampler)),
+ { type: Uint8Array, typedLength: byteLength }
+ );
+ results.push(result);
+ }
+
+ const check0 = checkElementsEqual(results[0].data, results[1].data);
+ if (check0 === undefined) {
+ t.warn('Render results with sampler.maxAnisotropy being 1 and 16 should be different.');
+ }
+ const check1 = checkElementsEqual(results[1].data, results[2].data);
+ if (check1 !== undefined) {
+ t.expect(
+ false,
+ 'Render results with sampler.maxAnisotropy being 16 and 1024 should be the same.'
+ );
+ }
+
+ for (const result of results) {
+ result.cleanup();
+ }
+ });
+
+g.test('anisotropic_filter_mipmap_color')
+ .desc(
+ `Anisotropic filter rendering tests that draws a slanted plane and samples from a texture
+ containing mipmaps of different colors. Given the same fragment with dFdx and dFdy for uv being different,
+ sampler with bigger maxAnisotropy value tends to bigger mip levels to provide better details.
+ We can then look at the color of the fragment to know which mip level is being sampled from and to see
+ if it fits expectations.
+ A similar webgl demo is at https://jsfiddle.net/t8k7c95o/5/`
+ )
+ .paramsSimple([
+ {
+ maxAnisotropy: 1,
+ _results: [
+ { coord: { x: xMiddle, y: 2 }, expected: colors[2] },
+ { coord: { x: xMiddle, y: 6 }, expected: [colors[0], colors[1]] },
+ ],
+ _generateWarningOnly: false,
+ },
+ {
+ maxAnisotropy: 4,
+ _results: [
+ { coord: { x: xMiddle, y: 2 }, expected: [colors[0], colors[1]] },
+ { coord: { x: xMiddle, y: 6 }, expected: colors[0] },
+ ],
+ _generateWarningOnly: true,
+ },
+ ])
+ .fn(t => {
+ const texture = t.createTextureFromTexelViewsMultipleMipmaps(
+ colors.map(value => TexelView.fromTexelsAsBytes(kTextureFormat, _coords => value)),
+ { size: [4, 4, 1], usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.TEXTURE_BINDING }
+ );
+ const textureView = texture.createView();
+
+ const sampler = t.device.createSampler({
+ magFilter: 'linear',
+ minFilter: 'linear',
+ mipmapFilter: 'linear',
+ maxAnisotropy: t.params.maxAnisotropy,
+ });
+
+ const colorAttachment = t.drawSlantedPlane(textureView, sampler);
+
+ const pixelComparisons: PerPixelComparison<Uint8Array>[] = [];
+ for (const entry of t.params._results) {
+ if (entry.expected instanceof Uint8Array) {
+ // equal exactly one color
+ pixelComparisons.push({ coord: entry.coord, exp: entry.expected });
+ } else {
+ // a lerp between two colors
+ // MAINTENANCE_TODO: Unify comparison to allow for a strict in-between comparison to support
+ // this kind of expectation.
+ t.expectSinglePixelBetweenTwoValuesIn2DTexture(
+ colorAttachment,
+ kColorAttachmentFormat,
+ entry.coord,
+ {
+ exp: entry.expected as [Uint8Array, Uint8Array],
+ generateWarningOnly: t.params._generateWarningOnly,
+ }
+ );
+ }
+ }
+ t.expectSinglePixelComparisonsAreOkInTexture({ texture: colorAttachment }, pixelComparisons);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/sampling/filter_mode.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/sampling/filter_mode.spec.ts
new file mode 100644
index 0000000000..129997382d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/sampling/filter_mode.spec.ts
@@ -0,0 +1,1143 @@
+export const description = `
+Tests the behavior of different filtering modes in minFilter/magFilter/mipmapFilter.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { kAddressModes, kMipmapFilterModes } from '../../../capability_info.js';
+import {
+ EncodableTextureFormat,
+ kRenderableColorTextureFormats,
+ kTextureFormatInfo,
+} from '../../../format_info.js';
+import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
+import { getTextureCopyLayout } from '../../../util/texture/layout.js';
+import { TexelView } from '../../../util/texture/texel_view.js';
+
+// Simple checkerboard 2x2 texture used as a base for the sampling.
+const kCheckerTextureSize = 2;
+const kCheckerTextureData = [
+ { R: 1.0, G: 1.0, B: 1.0, A: 1.0 },
+ { R: 0.0, G: 0.0, B: 0.0, A: 1.0 },
+ { R: 0.0, G: 0.0, B: 0.0, A: 1.0 },
+ { R: 1.0, G: 1.0, B: 1.0, A: 1.0 },
+];
+
+class FilterModeTest extends TextureTestMixin(GPUTest) {
+ runFilterRenderPipeline(
+ sampler: GPUSampler,
+ module: GPUShaderModule,
+ format: EncodableTextureFormat,
+ renderSize: number[],
+ vertexCount: number,
+ instanceCount: number
+ ) {
+ const sampleTexture = this.createTextureFromTexelView(
+ TexelView.fromTexelsAsColors(format, coord => {
+ const id = coord.x + coord.y * kCheckerTextureSize;
+ return kCheckerTextureData[id];
+ }),
+ {
+ size: [kCheckerTextureSize, kCheckerTextureSize],
+ usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST,
+ }
+ );
+ const renderTexture = this.device.createTexture({
+ format,
+ size: renderSize,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ });
+ const pipeline = this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vs_main',
+ },
+ fragment: {
+ module,
+ entryPoint: 'fs_main',
+ targets: [{ format }],
+ },
+ });
+ const bindgroup = this.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: sampler },
+ { binding: 1, resource: sampleTexture.createView() },
+ ],
+ });
+ const commandEncoder = this.device.createCommandEncoder();
+ const renderPass = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTexture.createView(),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPass.setPipeline(pipeline);
+ renderPass.setBindGroup(0, bindgroup);
+ renderPass.draw(vertexCount, instanceCount);
+ renderPass.end();
+ this.device.queue.submit([commandEncoder.finish()]);
+ return renderTexture;
+ }
+}
+
+export const g = makeTestGroup(FilterModeTest);
+
+/* eslint-disable prettier/prettier */
+
+/* For filter mode 'nearest', we need to check a 6x6 of pixels because 4x4s are identical when using
+ * address mode 'clamp-to-edge' and 'mirror-repeat'. The minFilter and magFilter tests are setup so
+ * that they both render the same results. (See the respective test for details.) The following
+ * table shows the expected results:
+ * u
+ *
+ * repeat clamp-to-edge mirror-repeat
+ *
+ * │█│ │█│ │█│ │ │█│█│█│ │ │ │ │ │█│█│ │ │█│
+ * │ │█│ │█│ │█│ │ │ │ │█│█│█│ │█│ │ │█│█│ │
+ * │█│ │█│ │█│ │ │█│█│█│ │ │ │ │ │█│█│ │ │█│
+ * repeat │ │█│ │█│ │█│ │ │ │ │█│█│█│ │█│ │ │█│█│ │
+ * │█│ │█│ │█│ │ │█│█│█│ │ │ │ │ │█│█│ │ │█│
+ * │ │█│ │█│ │█│ │ │ │ │█│█│█│ │█│ │ │█│█│ │
+ *
+ * │█│ │█│ │█│ │ │█│█│█│ │ │ │ │ │█│█│ │ │█│
+ * │█│ │█│ │█│ │ │█│█│█│ │ │ │ │ │█│█│ │ │█│
+ * │█│ │█│ │█│ │ │█│█│█│ │ │ │ │ │█│█│ │ │█│
+ * v clamp-to-edge │ │█│ │█│ │█│ │ │ │ │█│█│█│ │█│ │ │█│█│ │
+ * │ │█│ │█│ │█│ │ │ │ │█│█│█│ │█│ │ │█│█│ │
+ * │ │█│ │█│ │█│ │ │ │ │█│█│█│ │█│ │ │█│█│ │
+ *
+ * │ │█│ │█│ │█│ │ │ │ │█│█│█│ │█│ │ │█│█│ │
+ * │█│ │█│ │█│ │ │█│█│█│ │ │ │ │ │█│█│ │ │█│
+ * │█│ │█│ │█│ │ │█│█│█│ │ │ │ │ │█│█│ │ │█│
+ * mirror-repeat │ │█│ │█│ │█│ │ │ │ │█│█│█│ │█│ │ │█│█│ │
+ * │ │█│ │█│ │█│ │ │ │ │█│█│█│ │█│ │ │█│█│ │
+ * │█│ │█│ │█│ │ │█│█│█│ │ │ │ │ │█│█│ │ │█│
+*/
+const kNearestRenderSize = 6;
+const kNearestRenderDim = [kNearestRenderSize, kNearestRenderSize];
+const kNearestURepeatVRepeat = [
+ [1, 0, 1, 0, 1, 0],
+ [0, 1, 0, 1, 0, 1],
+ [1, 0, 1, 0, 1, 0],
+ [0, 1, 0, 1, 0, 1],
+ [1, 0, 1, 0, 1, 0],
+ [0, 1, 0, 1, 0, 1],
+];
+const kNearestURepeatVClamped = [
+ [1, 0, 1, 0, 1, 0],
+ [1, 0, 1, 0, 1, 0],
+ [1, 0, 1, 0, 1, 0],
+ [0, 1, 0, 1, 0, 1],
+ [0, 1, 0, 1, 0, 1],
+ [0, 1, 0, 1, 0, 1],
+];
+const kNearestURepeatVMirror = [
+ [0, 1, 0, 1, 0, 1],
+ [1, 0, 1, 0, 1, 0],
+ [1, 0, 1, 0, 1, 0],
+ [0, 1, 0, 1, 0, 1],
+ [0, 1, 0, 1, 0, 1],
+ [1, 0, 1, 0, 1, 0],
+];
+const kNearestUClampedVRepeat = [
+ [1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1],
+ [1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1],
+ [1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1],
+];
+const kNearestUClampedVClamped = [
+ [1, 1, 1, 0, 0, 0],
+ [1, 1, 1, 0, 0, 0],
+ [1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1],
+ [0, 0, 0, 1, 1, 1],
+ [0, 0, 0, 1, 1, 1],
+];
+const kNearestUClampedVMirror = [
+ [0, 0, 0, 1, 1, 1],
+ [1, 1, 1, 0, 0, 0],
+ [1, 1, 1, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1],
+ [0, 0, 0, 1, 1, 1],
+ [1, 1, 1, 0, 0, 0],
+];
+const kNearestUMirrorVRepeat = [
+ [0, 1, 1, 0, 0, 1],
+ [1, 0, 0, 1, 1, 0],
+ [0, 1, 1, 0, 0, 1],
+ [1, 0, 0, 1, 1, 0],
+ [0, 1, 1, 0, 0, 1],
+ [1, 0, 0, 1, 1, 0],
+];
+const kNearestUMirrorVClamped = [
+ [0, 1, 1, 0, 0, 1],
+ [0, 1, 1, 0, 0, 1],
+ [0, 1, 1, 0, 0, 1],
+ [1, 0, 0, 1, 1, 0],
+ [1, 0, 0, 1, 1, 0],
+ [1, 0, 0, 1, 1, 0],
+];
+const kNearestUMirrorVMirror = [
+ [1, 0, 0, 1, 1, 0],
+ [0, 1, 1, 0, 0, 1],
+ [0, 1, 1, 0, 0, 1],
+ [1, 0, 0, 1, 1, 0],
+ [1, 0, 0, 1, 1, 0],
+ [0, 1, 1, 0, 0, 1],
+];
+
+/* For filter mode 'linear', the tests samples 16 points (to create a 4x4) on what the effective 8x8
+ * expanded texture via the address modes looks like (see table below for what those look like). The
+ * sample points are selected such that no combination of address modes result in the same render.
+ * There is exactly one sample point in each sub 2x2 of the 8x8 texture, thereby yielding the 4x4
+ * result. Note that sampling from the 8x8 texture instead of the 6x6 texture is necessary because
+ * that allows us to keep the results in powers of 2 to minimize floating point errors on different
+ * backends.
+ *
+ * The 8x8 effective textures:
+ * u
+ *
+ * repeat clamp-to-edge mirror-repeat
+ * │█│ │█│ │█│ │█│ │ │ │ │ │ │█│█│█│█│ │█│█│ │ │█│█│ │ │
+ * │ │█│ │█│ │█│ │█│ │█│█│█│█│ │ │ │ │ │ │ │█│█│ │ │█│█│
+ * │█│ │█│ │█│ │█│ │ │ │ │ │ │█│█│█│█│ │█│█│ │ │█│█│ │ │
+ * repeat │ │█│ │█│ │█│ │█│ │█│█│█│█│ │ │ │ │ │ │ │█│█│ │ │█│█│
+ * │█│ │█│ │█│ │█│ │ │ │ │ │ │█│█│█│█│ │█│█│ │ │█│█│ │ │
+ * │ │█│ │█│ │█│ │█│ │█│█│█│█│ │ │ │ │ │ │ │█│█│ │ │█│█│
+ * │█│ │█│ │█│ │█│ │ │ │ │ │ │█│█│█│█│ │█│█│ │ │█│█│ │ │
+ * │ │█│ │█│ │█│ │█│ │█│█│█│█│ │ │ │ │ │ │ │█│█│ │ │█│█│
+ *
+ * │ │█│ │█│ │█│ │█│ │█│█│█│█│ │ │ │ │ │ │ │█│█│ │ │█│█│
+ * │ │█│ │█│ │█│ │█│ │█│█│█│█│ │ │ │ │ │ │ │█│█│ │ │█│█│
+ * │ │█│ │█│ │█│ │█│ │█│█│█│█│ │ │ │ │ │ │ │█│█│ │ │█│█│
+ * v clamp-to-edge │ │█│ │█│ │█│ │█│ │█│█│█│█│ │ │ │ │ │ │ │█│█│ │ │█│█│
+ * │█│ │█│ │█│ │█│ │ │ │ │ │ │█│█│█│█│ │█│█│ │ │█│█│ │ │
+ * │█│ │█│ │█│ │█│ │ │ │ │ │ │█│█│█│█│ │█│█│ │ │█│█│ │ │
+ * │█│ │█│ │█│ │█│ │ │ │ │ │ │█│█│█│█│ │█│█│ │ │█│█│ │ │
+ * │█│ │█│ │█│ │█│ │ │ │ │ │ │█│█│█│█│ │█│█│ │ │█│█│ │ │
+ *
+ * │█│ │█│ │█│ │█│ │ │ │ │ │ │█│█│█│█│ │█│█│ │ │█│█│ │ │
+ * │█│ │█│ │█│ │█│ │ │ │ │ │ │█│█│█│█│ │█│█│ │ │█│█│ │ │
+ * │ │█│ │█│ │█│ │█│ │█│█│█│█│ │ │ │ │ │ │ │█│█│ │ │█│█│
+ * mirror-repeat │ │█│ │█│ │█│ │█│ │█│█│█│█│ │ │ │ │ │ │ │█│█│ │ │█│█│
+ * │█│ │█│ │█│ │█│ │ │ │ │ │ │█│█│█│█│ │█│█│ │ │█│█│ │ │
+ * │█│ │█│ │█│ │█│ │ │ │ │ │ │█│█│█│█│ │█│█│ │ │█│█│ │ │
+ * │ │█│ │█│ │█│ │█│ │█│█│█│█│ │ │ │ │ │ │ │█│█│ │ │█│█│
+ * │ │█│ │█│ │█│ │█│ │█│█│█│█│ │ │ │ │ │ │ │█│█│ │ │█│█│
+ *
+ *
+ * Sample points:
+ * The sample points are always at a 25% corner of a pixel such that the contributions come from
+ * the 2x2 (doubly outlined) with ratios 1/16, 3/16, or 9/16.
+ * ╔══╤══╦══╤══╦══╤══╦══╤══╗
+ * ║ │ ║ │ ║ │ ║ │ ║
+ * ╟──┼──╫──┼──╫──┼──╫──┼──╢
+ * ║ │▘ ║ ▝│ ║ │▘ ║ ▝│ ║
+ * ╠══╪══╬══╪══╬══╪══╬══╪══╣
+ * ║ │ ║ │ ║ │ ║ │ ║
+ * ╟──┼──╫──┼──╫──┼──╫──┼──╢
+ * ║ │▘ ║ ▝│ ║ │▘ ║ ▝│ ║
+ * ╠══╪══╬══╪══╬══╪══╬══╪══╣
+ * ║ │▖ ║ ▗│ ║ │▖ ║ ▗│ ║
+ * ╟──┼──╫──┼──╫──┼──╫──┼──╢
+ * ║ │ ║ │ ║ │ ║ │ ║
+ * ╠══╪══╬══╪══╬══╪══╬══╪══╣
+ * ║ │▖ ║ ▗│ ║ │▖ ║ ▗│ ║
+ * ╟──┼──╫──┼──╫──┼──╫──┼──╢
+ * ║ │ ║ │ ║ │ ║ │ ║
+ * ╚══╧══╩══╧══╩══╧══╩══╧══╝
+ */
+const kLinearRenderSize = 4;
+const kLinearRenderDim = [kLinearRenderSize, kLinearRenderSize];
+const kLinearURepeatVRepeat = [
+ [10, 6, 10, 6],
+ [10, 6, 10, 6],
+ [6, 10, 6, 10],
+ [6, 10, 6, 10],
+];
+const kLinearURepeatVClamped = [
+ [12, 4, 12, 4],
+ [12, 4, 12, 4],
+ [4, 12, 4, 12],
+ [4, 12, 4, 12],
+];
+const kLinearURepeatVMirror = [
+ [4, 12, 4, 12],
+ [12, 4, 12, 4],
+ [4, 12, 4, 12],
+ [12, 4, 12, 4],
+];
+const kLinearUClampedVRepeat = [
+ [12, 12, 4, 4],
+ [12, 12, 4, 4],
+ [4, 4, 12, 12],
+ [4, 4, 12, 12],
+];
+const kLinearUClampedVClamped = [
+ [16, 16, 0, 0],
+ [16, 16, 0, 0],
+ [0, 0, 16, 16],
+ [0, 0, 16, 16],
+];
+const kLinearUClampedVMirror = [
+ [0, 0, 16, 16],
+ [16, 16, 0, 0],
+ [0, 0, 16, 16],
+ [16, 16, 0, 0],
+];
+const kLinearUMirrorVRepeat = [
+ [4, 12, 4, 12],
+ [4, 12, 4, 12],
+ [12, 4, 12, 4],
+ [12, 4, 12, 4],
+];
+const kLinearUMirrorVClamped = [
+ [0, 16, 0, 16],
+ [0, 16, 0, 16],
+ [16, 0, 16, 0],
+ [16, 0, 16, 0],
+];
+const kLinearUMirrorVMirror = [
+ [16, 0, 16, 0],
+ [0, 16, 0, 16],
+ [16, 0, 16, 0],
+ [0, 16, 0, 16],
+];
+
+/* eslint-enable prettier/prettier */
+
+function expectedNearestColors(
+ format: EncodableTextureFormat,
+ addressModeU: GPUAddressMode,
+ addressModeV: GPUAddressMode
+): TexelView {
+ let expectedColors: number[][];
+ switch (addressModeU) {
+ case 'clamp-to-edge': {
+ switch (addressModeV) {
+ case 'clamp-to-edge':
+ expectedColors = kNearestUClampedVClamped;
+ break;
+ case 'repeat':
+ expectedColors = kNearestUClampedVRepeat;
+ break;
+ case 'mirror-repeat':
+ expectedColors = kNearestUClampedVMirror;
+ break;
+ }
+ break;
+ }
+ case 'repeat':
+ switch (addressModeV) {
+ case 'clamp-to-edge':
+ expectedColors = kNearestURepeatVClamped;
+ break;
+ case 'repeat':
+ expectedColors = kNearestURepeatVRepeat;
+ break;
+ case 'mirror-repeat':
+ expectedColors = kNearestURepeatVMirror;
+ break;
+ }
+ break;
+ case 'mirror-repeat':
+ switch (addressModeV) {
+ case 'clamp-to-edge':
+ expectedColors = kNearestUMirrorVClamped;
+ break;
+ case 'repeat':
+ expectedColors = kNearestUMirrorVRepeat;
+ break;
+ case 'mirror-repeat':
+ expectedColors = kNearestUMirrorVMirror;
+ break;
+ }
+ break;
+ }
+ return TexelView.fromTexelsAsColors(format, coord => {
+ const c = expectedColors[coord.y][coord.x];
+ return { R: c, G: c, B: c, A: 1.0 };
+ });
+}
+function expectedLinearColors(
+ format: EncodableTextureFormat,
+ addressModeU: GPUAddressMode,
+ addressModeV: GPUAddressMode
+): TexelView {
+ let expectedColors: number[][];
+ switch (addressModeU) {
+ case 'clamp-to-edge': {
+ switch (addressModeV) {
+ case 'clamp-to-edge':
+ expectedColors = kLinearUClampedVClamped;
+ break;
+ case 'repeat':
+ expectedColors = kLinearUClampedVRepeat;
+ break;
+ case 'mirror-repeat':
+ expectedColors = kLinearUClampedVMirror;
+ break;
+ }
+ break;
+ }
+ case 'repeat':
+ switch (addressModeV) {
+ case 'clamp-to-edge':
+ expectedColors = kLinearURepeatVClamped;
+ break;
+ case 'repeat':
+ expectedColors = kLinearURepeatVRepeat;
+ break;
+ case 'mirror-repeat':
+ expectedColors = kLinearURepeatVMirror;
+ break;
+ }
+ break;
+ case 'mirror-repeat':
+ switch (addressModeV) {
+ case 'clamp-to-edge':
+ expectedColors = kLinearUMirrorVClamped;
+ break;
+ case 'repeat':
+ expectedColors = kLinearUMirrorVRepeat;
+ break;
+ case 'mirror-repeat':
+ expectedColors = kLinearUMirrorVMirror;
+ break;
+ }
+ break;
+ }
+ return TexelView.fromTexelsAsColors(format, coord => {
+ const c = expectedColors[coord.y][coord.x];
+ return { R: c / 16, G: c / 16, B: c / 16, A: 1.0 };
+ });
+}
+function expectedColors(
+ format: EncodableTextureFormat,
+ filterMode: GPUFilterMode,
+ addressModeU: GPUAddressMode,
+ addressModeV: GPUAddressMode
+): TexelView {
+ switch (filterMode) {
+ case 'nearest':
+ return expectedNearestColors(format, addressModeU, addressModeV);
+ case 'linear':
+ return expectedLinearColors(format, addressModeU, addressModeV);
+ }
+}
+
+/* For the magFilter tests, each rendered pixel is an instanced quad such that the center of the
+ * quad coincides with the center of the pixel. The uv coordinates for each quad are shifted
+ * according to the test so that the center of the quad is at the point we want to sample.
+ *
+ * For the grid offset logic, see this codelab for reference:
+ * https://codelabs.developers.google.com/your-first-webgpu-app#4
+ */
+
+/* The following diagram shows the UV shift (almost to scale) for what the pixel at cell (0,0) looks
+ * like w.r.t the UV of the texture if we just mapped the entire 2x2 texture to the quad. Note that
+ * the square representing the mapped location on the bottom left is actually slighly smaller than a
+ * pixel in order to ensure that we are magnifying the texture and hence using the magFilter. It
+ * should be fairly straightforwards to derive that for each pixel, we are shifting (.5, -.5) from
+ * the picture.
+ *
+ * ┌─┬─┬─┬─┬─┬─┐
+ * ├─┼─┼─┼─┼─┼─┤ (0,0) (1,0)
+ * ├─┼─╔═╪═╗─┼─┤ ╔═══╗
+ * ├─┼─╫─┼─╫─┼─┤ ║─┼─║
+ * ├─┼─╚═╪═╝─┼─┤ ╚═══╝ (-.875,1.625) (-.625,1.625)
+ * ╔═╗─┼─┼─┼─┼─┤ (0,1) (1,1) ╔═╗
+ * ╚═╝─┴─┴─┴─┴─┘ ╚═╝
+ * (-.875,1.875) (-.625,1.875)
+ */
+g.test('magFilter,nearest')
+ .desc(
+ `
+ Test that for filterable formats, magFilter 'nearest' mode correctly modifies the sampling.
+ - format= {<filterable formats>}
+ - addressModeU= {'clamp-to-edge', 'repeat', 'mirror-repeat'}
+ - addressModeV= {'clamp-to-edge', 'repeat', 'mirror-repeat'}
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kRenderableColorTextureFormats)
+ .filter(t => {
+ return (
+ kTextureFormatInfo[t.format].color.type === 'float' ||
+ kTextureFormatInfo[t.format].color.type === 'unfilterable-float'
+ );
+ })
+ .beginSubcases()
+ .combine('addressModeU', kAddressModes)
+ .combine('addressModeV', kAddressModes)
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ if (kTextureFormatInfo[t.params.format].color.type === 'unfilterable-float') {
+ t.selectDeviceOrSkipTestCase('float32-filterable');
+ }
+ })
+ .fn(t => {
+ const { format, addressModeU, addressModeV } = t.params;
+ const sampler = t.device.createSampler({
+ addressModeU,
+ addressModeV,
+ magFilter: 'nearest',
+ });
+ const module = t.device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var s : sampler;
+ @group(0) @binding(1) var t : texture_2d<f32>;
+
+ struct VertexOut {
+ @builtin(position) pos: vec4f,
+ @location(0) uv: vec2f,
+ };
+
+ @vertex
+ fn vs_main(@builtin(vertex_index) vi : u32,
+ @builtin(instance_index) ii: u32) -> VertexOut {
+ const grid = vec2f(${kNearestRenderSize}, ${kNearestRenderSize});
+ const posBases = array(
+ vec2f(1, 1), vec2f(1, -1), vec2f(-1, -1),
+ vec2f(1, 1), vec2f(-1, -1), vec2f(-1, 1),
+ );
+ const uvBases = array(
+ vec2f(1., 0.), vec2f(1., 1.), vec2f(0., 1.),
+ vec2f(1., 0.), vec2f(0., 1.), vec2f(0., 0.),
+ );
+
+ // Compute the offset of instance plane.
+ let cell = vec2f(f32(ii) % grid.x, floor(f32(ii) / grid.y));
+ let cellOffset = cell / grid * 2;
+ let pos = (posBases[vi] + 1) / grid - 1 + cellOffset;
+
+ // Compute the offset of the UVs.
+ let uvBase = uvBases[vi] * 0.25 + vec2f(-0.875, 1.625);
+ const uvPerPixelOffset = vec2f(0.5, -0.5);
+ return VertexOut(vec4f(pos, 0.0, 1.0), uvBase + uvPerPixelOffset * cell);
+ }
+
+ @fragment
+ fn fs_main(@location(0) uv : vec2f) -> @location(0) vec4f {
+ return textureSample(t, s, uv);
+ }
+ `,
+ });
+ const vertexCount = 6;
+ const instanceCount = kNearestRenderDim.reduce((sink, current) => sink * current);
+ const render = t.runFilterRenderPipeline(
+ sampler,
+ module,
+ format,
+ kNearestRenderDim,
+ vertexCount,
+ instanceCount
+ );
+ t.expectTexelViewComparisonIsOkInTexture(
+ { texture: render },
+ expectedColors(format, 'nearest', addressModeU, addressModeV),
+ kNearestRenderDim
+ );
+ });
+
+/* The following diagram shows the UV shift (almost to scale) for what the pixel at cell (0,0) (the
+ * dark square) looks like w.r.t the UV of the texture if we just mapped the entire 2x2 texture to
+ * the quad. The other small squares represent the other locations that we are sampling the texture
+ * at. The offsets are defined in the shader.
+ *
+ * ┌────┬────┬────┬────┬────┬────┬────┬────┐
+ * │ │ │ │ │ │ │ │ │
+ * │ │ │ │ │ │ │ │ │
+ * ├────┼────┼────┼────┼────┼────┼────┼────┤
+ * │ │□ │ □│ │ │□ │ □│ │
+ * │ │ │ │ │ │ │ │ │
+ * ├────┼────┼────┼────┼────┼────┼────┼────┤
+ * │ │ │ │ │ │ │ │ │
+ * │ │ │ │ │ │ │ │ │ (0,0) (1,0)
+ * ├────┼────┼────╔════╪════╗────┼────┼────┤ ╔═════════╗
+ * │ │□ │ □║ │ ║□ │ □│ │ ║ │ ║
+ * │ │ │ ║ │ ║ │ │ │ ║ │ ║
+ * ├────┼────┼────╫────┼────╫────┼────┼────┤ ║────┼────║
+ * │ │ │ ║ │ ║ │ │ │ ║ │ ║
+ * │ │□ │ □║ │ ║□ │ □│ │ ║ │ ║
+ * ├────┼────┼────╚════╪════╝────┼────┼────┤ ╚═════════╝
+ * │ │ │ │ │ │ │ │ │ (0,1) (1,1)
+ * │ │ │ │ │ │ │ │ │
+ * ├────┼────┼────┼────┼────┼────┼────┼────┤
+ * │ │ │ │ │ │ │ │ │ (-1,1.75) (-.75,1.75)
+ * │ │■ │ □│ │ │□ │ □│ │ ■
+ * ├────┼────┼────┼────┼────┼────┼────┼────┤ (-1,2) (-.75,2)
+ * │ │ │ │ │ │ │ │ │
+ * │ │ │ │ │ │ │ │ │
+ * └────┴────┴────┴────┴────┴────┴────┴────┘
+ */
+g.test('magFilter,linear')
+ .desc(
+ `
+ Test that for filterable formats, magFilter 'linear' mode correctly modifies the sampling.
+ - format= {<filterable formats>}
+ - addressModeU= {'clamp-to-edge', 'repeat', 'mirror-repeat'}
+ - addressModeV= {'clamp-to-edge', 'repeat', 'mirror-repeat'}
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kRenderableColorTextureFormats)
+ .filter(t => {
+ return (
+ kTextureFormatInfo[t.format].color.type === 'float' ||
+ kTextureFormatInfo[t.format].color.type === 'unfilterable-float'
+ );
+ })
+ .beginSubcases()
+ .combine('addressModeU', kAddressModes)
+ .combine('addressModeV', kAddressModes)
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ if (kTextureFormatInfo[t.params.format].color.type === 'unfilterable-float') {
+ t.selectDeviceOrSkipTestCase('float32-filterable');
+ }
+ })
+ .fn(t => {
+ const { format, addressModeU, addressModeV } = t.params;
+ const sampler = t.device.createSampler({
+ addressModeU,
+ addressModeV,
+ magFilter: 'linear',
+ });
+ const module = t.device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var s : sampler;
+ @group(0) @binding(1) var t : texture_2d<f32>;
+
+ struct VertexOut {
+ @builtin(position) pos: vec4f,
+ @location(0) uv: vec2f,
+ };
+
+ @vertex
+ fn vs_main(@builtin(vertex_index) vi : u32,
+ @builtin(instance_index) ii: u32) -> VertexOut {
+ const grid = vec2f(${kLinearRenderSize}, ${kLinearRenderSize});
+ const posBases = array(
+ vec2f(1, 1), vec2f(1, -1), vec2f(-1, -1),
+ vec2f(1, 1), vec2f(-1, -1), vec2f(-1, 1),
+ );
+ const uvBases = array(
+ vec2f(1., 0.), vec2f(1., 1.), vec2f(0., 1.),
+ vec2f(1., 0.), vec2f(0., 1.), vec2f(0., 0.),
+ );
+
+ // Compute the offset of instance plane.
+ let cell = vec2f(f32(ii) % grid.x, floor(f32(ii) / grid.y));
+ let cellOffset = cell / grid * 2;
+ let pos = (posBases[vi] + 1) / grid - 1 + cellOffset;
+
+ // Compute the offset of the UVs.
+ const uOffsets = array(0., 0.75, 2., 2.75);
+ const vOffsets = array(0., 1., 1.75, 2.75);
+ let uvBase = uvBases[vi] * 0.25 + vec2f(-1., 1.75);
+ let uvPixelOffset = vec2f(uOffsets[u32(cell.x)], -vOffsets[u32(cell.y)]);
+ return VertexOut(vec4f(pos, 0.0, 1.0), uvBase + uvPixelOffset);
+ }
+
+ @fragment
+ fn fs_main(@location(0) uv : vec2f) -> @location(0) vec4f {
+ return textureSample(t, s, uv);
+ }
+ `,
+ });
+ const vertexCount = 6;
+ const instanceCount = kLinearRenderDim.reduce((sink, current) => sink * current);
+ const render = t.runFilterRenderPipeline(
+ sampler,
+ module,
+ format,
+ kLinearRenderDim,
+ vertexCount,
+ instanceCount
+ );
+ t.expectTexelViewComparisonIsOkInTexture(
+ { texture: render },
+ expectedColors(format, 'linear', addressModeU, addressModeV),
+ kLinearRenderDim
+ );
+ });
+
+/* For the minFilter tests, each rendered pixel is a small instanced quad that is UV mapped such
+ * that it is either the 6x6 or 8x8 textures from above. Each quad in each cell is then offsetted
+ * and scaled so that the target sample point coincides with the center of the pixel and the texture
+ * is significantly smaller than the pixel to force minFilter mode.
+ *
+ * For the grid offset logic, see this codelab for reference:
+ * https://codelabs.developers.google.com/your-first-webgpu-app#4
+ */
+
+/* The following diagram depicts a single pixel and the sub-pixel sized 6x6 textured quad. The
+ * distances shown in the diagram are pre-grid transformation and relative to the quad. Notice that
+ * for cell (0,0) marked with an x, we need to offset the center by (5/12,5/12), and per cell, the
+ * offset is (-1/6, -1/6).
+ *
+ *
+ * ┌───────────────────────────────────────────────┐
+ * │ │
+ * │ │
+ * │ │
+ * │ │
+ * │ │
+ * │ ┌───┬───┬───┬───┬───┬───┐ │
+ * │ │ │ │ │ │ │ │ │
+ * │ ├───┼───┼───┼───┼───┼───┤ │
+ * │ │ │ │ │ │ │ │ │
+ * │ ├───┼───┼───┼───┼───┼───┤ │
+ * │ │ │ │ │ │ │ │ │
+ * │ ├───┼───┼───x───┼───┼───┤ │ ┐
+ * │ │ │ │ │ │ │ │ │ │
+ * │ ├───┼───┼───┼───┼───┼───┤ │ │ 5/12
+ * │ │ │ │ │ │ │ │ │ ┐ │
+ * │ ├───┼───┼───┼───┼───┼───┤ │ │ 1/6 │
+ * │ │ x │ │ │ │ │ │ │ ┘ ┘
+ * │ └───┴───┴───┴───┴───┴───┘ │
+ * │ │
+ * │ │
+ * │ │
+ * │ │
+ * │ │
+ * └───────────────────────────────────────────────┘
+ */
+g.test('minFilter,nearest')
+ .desc(
+ `
+ Test that for filterable formats, minFilter 'nearest' mode correctly modifies the sampling.
+ - format= {<filterable formats>}
+ - addressModeU= {'clamp-to-edge', 'repeat', 'mirror-repeat'}
+ - addressModeV= {'clamp-to-edge', 'repeat', 'mirror-repeat'}
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kRenderableColorTextureFormats)
+ .filter(t => {
+ return (
+ kTextureFormatInfo[t.format].color.type === 'float' ||
+ kTextureFormatInfo[t.format].color.type === 'unfilterable-float'
+ );
+ })
+ .beginSubcases()
+ .combine('addressModeU', kAddressModes)
+ .combine('addressModeV', kAddressModes)
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ if (kTextureFormatInfo[t.params.format].color.type === 'unfilterable-float') {
+ t.selectDeviceOrSkipTestCase('float32-filterable');
+ }
+ })
+ .fn(t => {
+ const { format, addressModeU, addressModeV } = t.params;
+ const sampler = t.device.createSampler({
+ addressModeU,
+ addressModeV,
+ minFilter: 'nearest',
+ });
+ const module = t.device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var s : sampler;
+ @group(0) @binding(1) var t : texture_2d<f32>;
+
+ struct VertexOut {
+ @builtin(position) pos: vec4f,
+ @location(0) uv: vec2f,
+ };
+
+ @vertex
+ fn vs_main(@builtin(vertex_index) vi : u32,
+ @builtin(instance_index) ii: u32) -> VertexOut {
+ const grid = vec2f(${kNearestRenderSize}, ${kNearestRenderSize});
+ const posBases = array(
+ vec2f(.5, .5), vec2f(.5, -.5), vec2f(-.5, -.5),
+ vec2f(.5, .5), vec2f(-.5, -.5), vec2f(-.5, .5),
+ );
+ // Choose UVs so that the quad ends up being the 6x6 texture.
+ const uvBases = array(
+ vec2f(2., -1.), vec2f(2., 2.), vec2f(-1., 2.),
+ vec2f(2., -1.), vec2f(-1., 2.), vec2f(-1., -1.),
+ );
+
+ let cell = vec2f(f32(ii) % grid.x, floor(f32(ii) / grid.y));
+
+ // Compute the offset of instance plane (pre-grid transformation).
+ const constantPlaneOffset = vec2f(5. / 12., 5. / 12.);
+ const perPixelOffset = vec2f(1. / 6., 1. / 6.);
+ let posBase = posBases[vi] + constantPlaneOffset - perPixelOffset * cell;
+
+ // Apply the grid transformation.
+ let cellOffset = cell / grid * 2;
+ let absPos = (posBase + 1) / grid - 1 + cellOffset;
+
+ return VertexOut(vec4f(absPos, 0.0, 1.0), uvBases[vi]);
+ }
+
+ @fragment
+ fn fs_main(@location(0) uv : vec2f) -> @location(0) vec4f {
+ return textureSample(t, s, uv);
+ }
+ `,
+ });
+ const vertexCount = 6;
+ const instanceCount = kNearestRenderDim.reduce((sink, current) => sink * current);
+ const render = t.runFilterRenderPipeline(
+ sampler,
+ module,
+ format,
+ kNearestRenderDim,
+ vertexCount,
+ instanceCount
+ );
+ t.expectTexelViewComparisonIsOkInTexture(
+ { texture: render },
+ expectedColors(format, 'nearest', addressModeU, addressModeV),
+ kNearestRenderDim
+ );
+ });
+
+/* The following diagram shows the sub-pixel quad and the relative distances between the sample
+ * points and the origin. The pixel is not shown in this diagram but is a 2x bounding box around the
+ * quad similar to the one in the diagram for minFilter,nearest above. The dark square is where the
+ * cell (0,0) is, and the offsets are all relative to that point.
+ *
+ * 11/32
+ * ┌─────────────┐
+ *
+ * 3/16 5/16 3/16
+ * ┌───────┬───────────┬───────┐
+ *
+ * ┌────┬────┬────┬────┬────┬────┬────┬────┐
+ * │ │ │ │ │ │ │ │ │
+ * │ │ │ │ │ │ │ │ │
+ * ├────┼────┼────┼────┼────┼────┼────┼────┤
+ * │ │□ │ □│ │ │□ │ □│ │ ┐
+ * │ │ │ │ │ │ │ │ │ │
+ * ├────┼────┼────┼────┼────┼────┼────┼────┤ │
+ * │ │ │ │ │ │ │ │ │ │ 1/4
+ * │ │ │ │ │ │ │ │ │ │
+ * ├────┼────┼────┼────┼────┼────┼────┼────┤ │
+ * │ │□ │ □│ │ │□ │ □│ │ ┤
+ * │ │ │ │ │ │ │ │ │ │
+ * ├────┼────┼────┼────x────┼────┼────┼────┤ │ 3/16 ┐
+ * │ │ │ │ │ │ │ │ │ │ │
+ * │ │□ │ □│ │ │□ │ □│ │ ┤ │
+ * ├────┼────┼────┼────┼────┼────┼────┼────┤ │ │
+ * │ │ │ │ │ │ │ │ │ │ │ 11/32
+ * │ │ │ │ │ │ │ │ │ │ 1/4 │
+ * ├────┼────┼────┼────┼────┼────┼────┼────┤ │ │
+ * │ │ │ │ │ │ │ │ │ │ │
+ * │ │■ │ □│ │ │□ │ □│ │ ┘ ┘
+ * ├────┼────┼────┼────┼────┼────┼────┼────┤
+ * │ │ │ │ │ │ │ │ │
+ * │ │ │ │ │ │ │ │ │
+ * └────┴────┴────┴────┴────┴────┴────┴────┘
+ */
+g.test('minFilter,linear')
+ .desc(
+ `
+ Test that for filterable formats, minFilter 'linear' mode correctly modifies the sampling.
+ - format= {<filterable formats>}
+ - addressModeU= {'clamp-to-edge', 'repeat', 'mirror-repeat'}
+ - addressModeV= {'clamp-to-edge', 'repeat', 'mirror-repeat'}
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kRenderableColorTextureFormats)
+ .filter(t => {
+ return (
+ kTextureFormatInfo[t.format].color.type === 'float' ||
+ kTextureFormatInfo[t.format].color.type === 'unfilterable-float'
+ );
+ })
+ .beginSubcases()
+ .combine('addressModeU', kAddressModes)
+ .combine('addressModeV', kAddressModes)
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ if (kTextureFormatInfo[t.params.format].color.type === 'unfilterable-float') {
+ t.selectDeviceOrSkipTestCase('float32-filterable');
+ }
+ })
+ .fn(t => {
+ const { format, addressModeU, addressModeV } = t.params;
+ const sampler = t.device.createSampler({
+ addressModeU,
+ addressModeV,
+ minFilter: 'linear',
+ });
+ const module = t.device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var s : sampler;
+ @group(0) @binding(1) var t : texture_2d<f32>;
+
+ struct VertexOut {
+ @builtin(position) pos: vec4f,
+ @location(0) uv: vec2f,
+ };
+
+ @vertex
+ fn vs_main(@builtin(vertex_index) vi : u32,
+ @builtin(instance_index) ii: u32) -> VertexOut {
+ const grid = vec2f(${kLinearRenderSize}, ${kLinearRenderSize});
+ const posBases = array(
+ vec2f(.5, .5), vec2f(.5, -.5), vec2f(-.5, -.5),
+ vec2f(.5, .5), vec2f(-.5, -.5), vec2f(-.5, .5),
+ );
+ // Choose UVs so that the quad ends up being the 8x8 texture.
+ const uvBases = array(
+ vec2f(2.5, -1.5), vec2f(2.5, 2.5), vec2f(-1.5, 2.5),
+ vec2f(2.5, -1.5), vec2f(-1.5, 2.5), vec2f(-1.5, -1.5),
+ );
+
+ let cell = vec2f(f32(ii) % grid.x, floor(f32(ii) / grid.y));
+
+ // Compute the offset of instance plane (pre-grid transformation).
+ const constantPlaneOffset = vec2f(11. / 32., 11. / 32.);
+ const xOffsets = array(0., 3. / 16., 1. / 2., 11. / 16.);
+ const yOffsets = array(0., 1. / 4., 7. / 16., 11. / 16.);
+ let pixelOffset = vec2f(xOffsets[u32(cell.x)], yOffsets[u32(cell.y)]);
+ let posBase = posBases[vi] + constantPlaneOffset - pixelOffset;
+
+ // Compute the offset of instance plane.
+ let cellOffset = cell / grid * 2;
+ let absPos = (posBase + 1) / grid - 1 + cellOffset;
+
+ return VertexOut(vec4f(absPos, 0.0, 1.0), uvBases[vi]);
+ }
+
+ @fragment
+ fn fs_main(@location(0) uv : vec2f) -> @location(0) vec4f {
+ return textureSample(t, s, uv);
+ }
+ `,
+ });
+ const vertexCount = 6;
+ const instanceCount = kLinearRenderDim.reduce((sink, current) => sink * current);
+ const render = t.runFilterRenderPipeline(
+ sampler,
+ module,
+ format,
+ kLinearRenderDim,
+ vertexCount,
+ instanceCount
+ );
+ t.expectTexelViewComparisonIsOkInTexture(
+ { texture: render },
+ expectedColors(format, 'linear', addressModeU, addressModeV),
+ kLinearRenderDim
+ );
+ });
+
+g.test('mipmapFilter')
+ .desc(
+ `
+ Test that for filterable formats, mipmapFilter modes correctly modifies the sampling.
+ - format= {<filterable formats>}
+ - filterMode= {'nearest', 'linear'}
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kRenderableColorTextureFormats)
+ .filter(t => {
+ return (
+ kTextureFormatInfo[t.format].color.type === 'float' ||
+ kTextureFormatInfo[t.format].color.type === 'unfilterable-float'
+ );
+ })
+ .beginSubcases()
+ .combine('filterMode', kMipmapFilterModes)
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ if (kTextureFormatInfo[t.params.format].color.type === 'unfilterable-float') {
+ t.selectDeviceOrSkipTestCase('float32-filterable');
+ }
+ })
+ .fn(t => {
+ const { format, filterMode } = t.params;
+ // Takes a 8x8/4x4 mipmapped texture and renders it on multiple quads with different UVs such
+ // that each instanced quad from left to right emulates moving the quad further and further from
+ // the camera. Each quad is then rendered to a single pixel in a 1-dimensional texture. Since
+ // the 8x8 is fully black and the 4x4 is fully white, we should see the pixels increase in
+ // brightness from left to right when sampling linearly, and jump from black to white when
+ // sampling for the nearest mip level.
+ const kTextureSize = 8;
+ const kRenderSize = 8;
+
+ const sampler = t.device.createSampler({
+ mipmapFilter: filterMode,
+ });
+ const sampleTexture = t.createTextureFromTexelViewsMultipleMipmaps(
+ [
+ TexelView.fromTexelsAsColors(format, () => {
+ return { R: 0.0, G: 0.0, B: 0.0, A: 1.0 };
+ }),
+ TexelView.fromTexelsAsColors(format, _coords => {
+ return { R: 1.0, G: 1.0, B: 1.0, A: 1.0 };
+ }),
+ ],
+ {
+ size: [kTextureSize, 1],
+ usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST,
+ }
+ );
+ const renderTexture = t.device.createTexture({
+ format,
+ size: [kRenderSize, 1],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ });
+ const module = t.device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var s : sampler;
+ @group(0) @binding(1) var t : texture_2d<f32>;
+
+ struct VertexOut {
+ @builtin(position) pos: vec4f,
+ @location(0) uv: vec2f,
+ };
+
+ @vertex
+ fn vs_main(@builtin(vertex_index) vi : u32,
+ @builtin(instance_index) ii: u32) -> VertexOut {
+ const grid = vec2f(${kRenderSize}., 1.);
+ const pos = array(
+ vec2f( 1.0, 1.0), vec2f( 1.0, -1.0), vec2f(-1.0, -1.0),
+ vec2f( 1.0, 1.0), vec2f(-1.0, -1.0), vec2f(-1.0, 1.0),
+ );
+ const uv = array(
+ vec2f(1., 0.), vec2f(1., 1.), vec2f(0., 1.),
+ vec2f(1., 0.), vec2f(0., 1.), vec2f(0., 0.),
+ );
+
+ // Compute the offset of the plane.
+ let cell = vec2f(f32(ii) % grid.x, 0.);
+ let cellOffset = cell / grid * 2;
+ let absPos = (pos[vi] + 1) / grid - 1 + cellOffset;
+ let uvFactor = (1. / 8.) * (1 + (f32(ii) / (grid.x - 1)));
+ return VertexOut(vec4f(absPos, 0.0, 1.0), uv[vi] * uvFactor);
+ }
+
+ @fragment
+ fn fs_main(@location(0) uv : vec2f) -> @location(0) vec4f {
+ return textureSample(t, s, uv);
+ }
+ `,
+ });
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vs_main',
+ },
+ fragment: {
+ module,
+ entryPoint: 'fs_main',
+ targets: [{ format }],
+ },
+ });
+ const bindgroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: sampler },
+ { binding: 1, resource: sampleTexture.createView() },
+ ],
+ });
+ const commandEncoder = t.device.createCommandEncoder();
+ const renderPass = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTexture.createView(),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPass.setPipeline(pipeline);
+ renderPass.setBindGroup(0, bindgroup);
+ renderPass.draw(6, kRenderSize);
+ renderPass.end();
+ t.device.queue.submit([commandEncoder.finish()]);
+
+ // Since mipmap filtering varies across different backends, we verify that the result exhibits
+ // filtered characteristics without strict value equalities via copies to a buffer.
+ const buffer = t.copyWholeTextureToNewBufferSimple(renderTexture, 0);
+ t.expectGPUBufferValuesPassCheck(
+ buffer,
+ actual => {
+ // Convert the buffer to texel view so we can do comparisons.
+ const layout = getTextureCopyLayout(format, '2d', [kRenderSize, 1, 1]);
+ const view = TexelView.fromTextureDataByReference(format, actual, {
+ bytesPerRow: layout.bytesPerRow,
+ rowsPerImage: layout.rowsPerImage,
+ subrectOrigin: [0, 0, 0],
+ subrectSize: [kRenderSize, 1, 1],
+ });
+
+ // We only check the R component for the conditions, since all components should be equal if
+ // specified in the format.
+ switch (filterMode) {
+ case 'linear': {
+ // For 'linear' mode, we check that the resulting 1d image is monotonically increasing.
+ for (let x = 1; x < kRenderSize; x++) {
+ const { R: Ri } = view.color({ x: x - 1, y: 0, z: 0 });
+ const { R: Rj } = view.color({ x, y: 0, z: 0 });
+ if (Ri! >= Rj!) {
+ return Error(
+ 'Linear filtering on mipmaps should be a monotonically increasing sequence:\n' +
+ view.toString(
+ { x: 0, y: 0, z: 0 },
+ { width: kRenderSize, height: 1, depthOrArrayLayers: 1 }
+ )
+ );
+ }
+ }
+ break;
+ }
+ case 'nearest': {
+ // For 'nearest' mode, we check that the resulting 1d image changes from 0.0 to 1.0
+ // exactly once.
+ let changes = 0;
+ for (let x = 1; x < kRenderSize; x++) {
+ const { R: Ri } = view.color({ x: x - 1, y: 0, z: 0 });
+ const { R: Rj } = view.color({ x, y: 0, z: 0 });
+ if (Ri! !== Rj!) {
+ changes++;
+ }
+ }
+ if (changes !== 1) {
+ return Error(
+ `Nearest filtering on mipmaps should change exacly once but found (${changes}):\n` +
+ view.toString(
+ { x: 0, y: 0, z: 0 },
+ { width: kRenderSize, height: 1, depthOrArrayLayers: 1 }
+ )
+ );
+ }
+ break;
+ }
+ }
+ return undefined;
+ },
+ { srcByteOffset: 0, type: Uint8Array, typedLength: buffer.size }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/sampling/lod_clamp.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/sampling/lod_clamp.spec.ts
new file mode 100644
index 0000000000..8ef35422dc
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/sampling/lod_clamp.spec.ts
@@ -0,0 +1,12 @@
+export const description = `
+Tests the behavior of LOD clamping (lodMinClamp, lodMaxClamp).
+
+TODO:
+- Write a test that can test the exact clamping behavior
+- Test a bunch of values, including very large/small ones.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/shader_module/compilation_info.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/shader_module/compilation_info.spec.ts
new file mode 100644
index 0000000000..93fa4575c4
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/shader_module/compilation_info.spec.ts
@@ -0,0 +1,264 @@
+export const description = `
+ShaderModule CompilationInfo tests.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { assert } from '../../../../common/util/util.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+const kValidShaderSources = [
+ {
+ valid: true,
+ name: 'ascii',
+ _code: `
+ @vertex fn main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }`,
+ },
+ {
+ valid: true,
+ name: 'unicode',
+ _code: `
+ // 頂点シェーダー 👩‍💻
+ @vertex fn main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }`,
+ },
+];
+
+const kInvalidShaderSources = [
+ {
+ valid: false,
+ name: 'ascii',
+ _errorLine: 4,
+ _code: `
+ @vertex fn main() -> @builtin(position) vec4<f32> {
+ // Expected Error: unknown function 'unknown'
+ return unknown(0.0, 0.0, 0.0, 1.0);
+ }`,
+ },
+ {
+ valid: false,
+ name: 'unicode',
+ _errorLine: 5,
+ _code: `
+ // 頂点シェーダー 👩‍💻
+ @vertex fn main() -> @builtin(position) vec4<f32> {
+ // Expected Error: unknown function 'unknown'
+ return unknown(0.0, 0.0, 0.0, 1.0);
+ }`,
+ },
+ {
+ valid: false,
+ name: 'carriage-return',
+ _errorLine: 5,
+ _code:
+ `
+ @vertex fn main() -> @builtin(position) vec4<f32> {` +
+ '\r\n' +
+ `
+ // Expected Error: unknown function 'unknown'
+ return unknown(0.0, 0.0, 0.0, 1.0);
+ }`,
+ },
+];
+
+const kAllShaderSources = [...kValidShaderSources, ...kInvalidShaderSources];
+
+// This is the source the sourcemap refers to.
+const kOriginalSource = new Array(20)
+ .fill(0)
+ .map((_, i) => `original line ${i}`)
+ .join('\n');
+
+const kSourceMaps: { [name: string]: undefined | object } = {
+ none: undefined,
+ empty: {},
+ // A valid source map. It maps `unknown` on lines 4 and line 5 to
+ // `wasUnknown` from lines 20, 21 respectively
+ valid: {
+ version: 3,
+ sources: ['myCode'],
+ sourcesContent: [kOriginalSource],
+ names: ['myMain', 'wasUnknown'],
+ mappings: ';kBAYkCA,OACd;SAElB;gBAKOC;gBACAA',
+ },
+ // not a valid sourcemap
+ invalid: {
+ version: -123,
+ notAnything: {},
+ },
+ // The correct format but this data is for lines 11,12 even
+ // though the source only has 5 or 6 lines
+ nonMatching: {
+ version: 3,
+ sources: ['myCode'],
+ sourcesContent: [kOriginalSource],
+ names: ['myMain'],
+ mappings: ';;;;;;;;;;kBAYkCA,OACd;SAElB',
+ },
+};
+const kSourceMapsKeys = keysOf(kSourceMaps);
+
+g.test('getCompilationInfo_returns')
+ .desc(
+ `
+ Test that getCompilationInfo() can be called on any ShaderModule.
+
+ Note: sourcemaps are not used in the WebGPU API. We are only testing that
+ browser that happen to use them don't fail or crash if the sourcemap is
+ bad or invalid.
+
+ - Test for both valid and invalid shader modules.
+ - Test for shader modules containing only ASCII and those containing unicode characters.
+ - Test that the compilation info for valid shader modules contains no errors.
+ - Test that the compilation info for invalid shader modules contains at least one error.`
+ )
+ .params(u =>
+ u.combineWithParams(kAllShaderSources).beginSubcases().combine('sourceMapName', kSourceMapsKeys)
+ )
+ .fn(async t => {
+ const { _code, valid, sourceMapName } = t.params;
+
+ const shaderModule = t.expectGPUError(
+ 'validation',
+ () => {
+ const sourceMap = kSourceMaps[sourceMapName];
+ return t.device.createShaderModule({ code: _code, ...(sourceMap && { sourceMap }) });
+ },
+ !valid
+ );
+
+ const info = await shaderModule.getCompilationInfo();
+
+ t.expect(
+ info instanceof GPUCompilationInfo,
+ 'Expected a GPUCompilationInfo object to be returned'
+ );
+
+ // Expect that we get zero error messages from a valid shader.
+ // Message types other than errors are OK.
+ let errorCount = 0;
+ for (const message of info.messages) {
+ if (message.type === 'error') {
+ errorCount++;
+ }
+ }
+ if (valid) {
+ t.expect(errorCount === 0, "Expected zero GPUCompilationMessages of type 'error'");
+ } else {
+ t.expect(errorCount > 0, "Expected at least one GPUCompilationMessages of type 'error'");
+ }
+ });
+
+g.test('line_number_and_position')
+ .desc(
+ `
+ Test that line numbers reported by compilationInfo either point at an appropriate line and
+ position or at 0:0, indicating an unknown position.
+
+ Note: sourcemaps are not used in the WebGPU API. We are only testing that
+ browser that happen to use them don't fail or crash if the sourcemap is
+ bad or invalid.
+
+ - Test for invalid shader modules containing containing at least one error.
+ - Test for shader modules containing only ASCII and those containing unicode characters.`
+ )
+ .params(u =>
+ u
+ .combineWithParams(kInvalidShaderSources)
+ .beginSubcases()
+ .combine('sourceMapName', kSourceMapsKeys)
+ )
+ .fn(async t => {
+ const { _code, _errorLine, sourceMapName } = t.params;
+
+ const shaderModule = t.expectGPUError('validation', () => {
+ const sourceMap = kSourceMaps[sourceMapName];
+ return t.device.createShaderModule({ code: _code, ...(sourceMap && { sourceMap }) });
+ });
+
+ const info = await shaderModule.getCompilationInfo();
+
+ let foundAppropriateError = false;
+ for (const message of info.messages) {
+ if (message.type === 'error') {
+ // Some backends may not be able to indicate a precise location for the error. In those
+ // cases a line and position of 0 should be reported.
+ // If a line is reported, it should point at the correct line (1-based).
+ t.expect(
+ (message.lineNum === 0) === (message.linePos === 0),
+ "GPUCompilationMessages that don't report a line number should not report a line position."
+ );
+
+ if (message.lineNum === 0 || message.lineNum === _errorLine) {
+ foundAppropriateError = true;
+
+ // Various backends may choose to report the error at different positions within the line,
+ // so it's difficult to meaningfully validate them.
+ break;
+ }
+ }
+ }
+ t.expect(
+ foundAppropriateError,
+ 'Expected to find an error which corresponded with the erroneous line'
+ );
+ });
+
+g.test('offset_and_length')
+ .desc(
+ `Test that message offsets and lengths are valid and align with any reported lineNum and linePos.
+
+ Note: sourcemaps are not used in the WebGPU API. We are only testing that
+ browser that happen to use them don't fail or crash if the sourcemap is
+ bad or invalid.
+
+ - Test for valid and invalid shader modules.
+ - Test for shader modules containing only ASCII and those containing unicode characters.`
+ )
+ .params(u =>
+ u.combineWithParams(kAllShaderSources).beginSubcases().combine('sourceMapName', kSourceMapsKeys)
+ )
+ .fn(async t => {
+ const { _code, valid, sourceMapName } = t.params;
+
+ const shaderModule = t.expectGPUError(
+ 'validation',
+ () => {
+ const sourceMap = kSourceMaps[sourceMapName];
+ return t.device.createShaderModule({ code: _code, ...(sourceMap && { sourceMap }) });
+ },
+ !valid
+ );
+
+ const info = await shaderModule.getCompilationInfo();
+
+ for (const message of info.messages) {
+ // Any offsets and lengths should reference valid spans of the shader code.
+ t.expect(message.offset <= _code.length, 'Message offset should be within the shader source');
+ t.expect(
+ message.offset + message.length <= _code.length,
+ 'Message offset and length should be within the shader source'
+ );
+
+ // If a valid line number and position are given, the offset should point the the same
+ // location in the shader source.
+ if (message.lineNum !== 0 && message.linePos !== 0) {
+ let lineOffset = 0;
+ for (let i = 0; i < message.lineNum - 1; ++i) {
+ lineOffset = _code.indexOf('\n', lineOffset);
+ assert(lineOffset !== -1);
+ lineOffset += 1;
+ }
+
+ t.expect(
+ message.offset === lineOffset + message.linePos - 1,
+ 'lineNum and linePos should point to the same location as offset'
+ );
+ }
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/texture_view/format_reinterpretation.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/texture_view/format_reinterpretation.spec.ts
new file mode 100644
index 0000000000..c032415327
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/texture_view/format_reinterpretation.spec.ts
@@ -0,0 +1,358 @@
+export const description = `
+Test texture views can reinterpret the format of the original texture.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import {
+ kRenderableColorTextureFormats,
+ kRegularTextureFormats,
+ viewCompatible,
+ EncodableTextureFormat,
+} from '../../../format_info.js';
+import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
+import { TexelView } from '../../../util/texture/texel_view.js';
+
+export const g = makeTestGroup(TextureTestMixin(GPUTest));
+
+const kColors = [
+ { R: 1.0, G: 0.0, B: 0.0, A: 0.8 },
+ { R: 0.0, G: 1.0, B: 0.0, A: 0.7 },
+ { R: 0.0, G: 0.0, B: 0.0, A: 0.6 },
+ { R: 0.0, G: 0.0, B: 0.0, A: 0.5 },
+ { R: 1.0, G: 1.0, B: 1.0, A: 0.4 },
+ { R: 0.7, G: 0.0, B: 0.0, A: 0.3 },
+ { R: 0.0, G: 0.8, B: 0.0, A: 0.2 },
+ { R: 0.0, G: 0.0, B: 0.9, A: 0.1 },
+ { R: 0.1, G: 0.2, B: 0.0, A: 0.3 },
+ { R: 0.4, G: 0.3, B: 0.6, A: 0.8 },
+];
+
+const kTextureSize = 16;
+
+function makeInputTexelView(format: EncodableTextureFormat) {
+ return TexelView.fromTexelsAsColors(
+ format,
+ coords => {
+ const pixelPos = coords.y * kTextureSize + coords.x;
+ return kColors[pixelPos % kColors.length];
+ },
+ { clampToFormatRange: true }
+ );
+}
+
+function makeBlitPipeline(
+ device: GPUDevice,
+ format: GPUTextureFormat,
+ multisample: { sample: number; render: number }
+) {
+ return device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: device.createShaderModule({
+ code: `
+ @vertex fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+ var pos = array<vec2<f32>, 6>(
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>(-1.0, 1.0),
+ vec2<f32>( 1.0, -1.0),
+ vec2<f32>(-1.0, 1.0),
+ vec2<f32>( 1.0, -1.0),
+ vec2<f32>( 1.0, 1.0));
+ return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module:
+ multisample.sample > 1
+ ? device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var src: texture_multisampled_2d<f32>;
+ @fragment fn main(@builtin(position) coord: vec4<f32>) -> @location(0) vec4<f32> {
+ var result : vec4<f32>;
+ for (var i = 0; i < ${multisample.sample}; i = i + 1) {
+ result = result + textureLoad(src, vec2<i32>(coord.xy), i);
+ }
+ return result * ${1 / multisample.sample};
+ }`,
+ })
+ : device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var src: texture_2d<f32>;
+ @fragment fn main(@builtin(position) coord: vec4<f32>) -> @location(0) vec4<f32> {
+ return textureLoad(src, vec2<i32>(coord.xy), 0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format }],
+ },
+ multisample: {
+ count: multisample.render,
+ },
+ });
+}
+
+g.test('texture_binding')
+ .desc(`Test that a regular texture allocated as 'format' is correctly sampled as 'viewFormat'.`)
+ .params(u =>
+ u //
+ .combine('format', kRegularTextureFormats)
+ .combine('viewFormat', kRegularTextureFormats)
+ .filter(
+ ({ format, viewFormat }) => format !== viewFormat && viewCompatible(format, viewFormat)
+ )
+ )
+ .beforeAllSubcases(t => {
+ const { format, viewFormat } = t.params;
+ t.skipIfTextureFormatNotSupported(format, viewFormat);
+ })
+ .fn(t => {
+ const { format, viewFormat } = t.params;
+
+ // Make an input texel view.
+ const inputTexelView = makeInputTexelView(format);
+
+ // Create the initial texture with the contents if the input texel view.
+ const texture = t.createTextureFromTexelView(inputTexelView, {
+ size: [kTextureSize, kTextureSize],
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ viewFormats: [viewFormat],
+ });
+
+ // Reinterpret the texture as the view format.
+ // Make a texel view of the format that also reinterprets the data.
+ const reinterpretedView = texture.createView({ format: viewFormat });
+ const reinterpretedTexelView = TexelView.fromTexelsAsBytes(viewFormat, inputTexelView.bytes);
+
+ // Create a pipeline to write data out to rgba8unorm.
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var src: texture_2d<f32>;
+ @group(0) @binding(1) var dst: texture_storage_2d<rgba8unorm, write>;
+ @compute @workgroup_size(1, 1) fn main(
+ @builtin(global_invocation_id) global_id: vec3<u32>,
+ ) {
+ var coord = vec2<i32>(global_id.xy);
+ textureStore(dst, coord, textureLoad(src, coord, 0));
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ });
+
+ // Create an rgba8unorm output texture.
+ const outputTexture = t.trackForCleanup(
+ t.device.createTexture({
+ format: 'rgba8unorm',
+ size: [kTextureSize, kTextureSize],
+ usage: GPUTextureUsage.STORAGE_BINDING | GPUTextureUsage.COPY_SRC,
+ })
+ );
+
+ // Execute a compute pass to load data from the reinterpreted view and
+ // write out to the rgba8unorm texture.
+ const commandEncoder = t.device.createCommandEncoder();
+ const pass = commandEncoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(
+ 0,
+ t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: reinterpretedView,
+ },
+ {
+ binding: 1,
+ resource: outputTexture.createView(),
+ },
+ ],
+ })
+ );
+ pass.dispatchWorkgroups(kTextureSize, kTextureSize);
+ pass.end();
+ t.device.queue.submit([commandEncoder.finish()]);
+
+ t.expectTexelViewComparisonIsOkInTexture(
+ { texture: outputTexture },
+ TexelView.fromTexelsAsColors('rgba8unorm', reinterpretedTexelView.color, {
+ clampToFormatRange: true,
+ }),
+ [kTextureSize, kTextureSize]
+ );
+ });
+
+g.test('render_and_resolve_attachment')
+ .desc(
+ `Test that a color render attachment allocated as 'format' is correctly rendered to as 'viewFormat',
+and resolved to an attachment allocated as 'format' viewed as 'viewFormat'.
+
+Other combinations aren't possible because the render and resolve targets must both match
+in view format and match in base format.`
+ )
+ .params(u =>
+ u //
+ .combine('format', kRenderableColorTextureFormats)
+ .combine('viewFormat', kRenderableColorTextureFormats)
+ .filter(
+ ({ format, viewFormat }) => format !== viewFormat && viewCompatible(format, viewFormat)
+ )
+ .combine('sampleCount', [1, 4])
+ )
+ .beforeAllSubcases(t => {
+ const { format, viewFormat } = t.params;
+ t.skipIfTextureFormatNotSupported(format, viewFormat);
+ })
+ .fn(t => {
+ const { format, viewFormat, sampleCount } = t.params;
+
+ // Make an input texel view.
+ const inputTexelView = makeInputTexelView(format);
+
+ // Create the renderTexture as |format|.
+ const renderTexture = t.trackForCleanup(
+ t.device.createTexture({
+ format,
+ size: [kTextureSize, kTextureSize],
+ usage:
+ GPUTextureUsage.RENDER_ATTACHMENT |
+ (sampleCount > 1 ? GPUTextureUsage.TEXTURE_BINDING : GPUTextureUsage.COPY_SRC),
+ viewFormats: [viewFormat],
+ sampleCount,
+ })
+ );
+
+ const resolveTexture =
+ sampleCount === 1
+ ? undefined
+ : t.trackForCleanup(
+ t.device.createTexture({
+ format,
+ size: [kTextureSize, kTextureSize],
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ viewFormats: [viewFormat],
+ })
+ );
+
+ // Create the sample source with the contents of the input texel view.
+ // We will sample this texture into |renderTexture|. It uses the same format to keep the same
+ // number of bits of precision.
+ const sampleSource = t.createTextureFromTexelView(inputTexelView, {
+ size: [kTextureSize, kTextureSize],
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ });
+
+ // Reinterpret the renderTexture as |viewFormat|.
+ const reinterpretedRenderView = renderTexture.createView({ format: viewFormat });
+ const reinterpretedResolveView =
+ resolveTexture && resolveTexture.createView({ format: viewFormat });
+
+ // Create a pipeline to blit a src texture to the render attachment.
+ const pipeline = makeBlitPipeline(t.device, viewFormat, {
+ sample: 1,
+ render: sampleCount,
+ });
+
+ // Execute a render pass to sample |sampleSource| into |texture| viewed as |viewFormat|.
+ const commandEncoder = t.device.createCommandEncoder();
+ const pass = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: reinterpretedRenderView,
+ resolveTarget: reinterpretedResolveView,
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(
+ 0,
+ t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: sampleSource.createView(),
+ },
+ ],
+ })
+ );
+ pass.draw(6);
+ pass.end();
+
+ // If the render target is multisampled, we'll manually resolve it to check
+ // the contents.
+ const singleSampleRenderTexture = resolveTexture
+ ? t.trackForCleanup(
+ t.device.createTexture({
+ format,
+ size: [kTextureSize, kTextureSize],
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ })
+ )
+ : renderTexture;
+
+ if (resolveTexture) {
+ // Create a pipeline to blit the multisampled render texture to a non-multisample texture.
+ // We are basically performing a manual resolve step to the same format as the original
+ // render texture to check its contents.
+ const pipeline = makeBlitPipeline(t.device, format, { sample: sampleCount, render: 1 });
+ const pass = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: singleSampleRenderTexture.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(
+ 0,
+ t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: renderTexture.createView(),
+ },
+ ],
+ })
+ );
+ pass.draw(6);
+ pass.end();
+ }
+
+ // Submit the commands.
+ t.device.queue.submit([commandEncoder.finish()]);
+
+ // Check the rendered contents.
+ const renderViewTexels = TexelView.fromTexelsAsColors(viewFormat, inputTexelView.color, {
+ clampToFormatRange: true,
+ });
+ t.expectTexelViewComparisonIsOkInTexture(
+ { texture: singleSampleRenderTexture },
+ renderViewTexels,
+ [kTextureSize, kTextureSize],
+ { maxDiffULPsForNormFormat: 2 }
+ );
+
+ // Check the resolved contents.
+ if (resolveTexture) {
+ const resolveView = TexelView.fromTexelsAsColors(viewFormat, renderViewTexels.color, {
+ clampToFormatRange: true,
+ });
+ t.expectTexelViewComparisonIsOkInTexture(
+ { texture: resolveTexture },
+ resolveView,
+ [kTextureSize, kTextureSize],
+ { maxDiffULPsForNormFormat: 2 }
+ );
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/texture_view/read.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/texture_view/read.spec.ts
new file mode 100644
index 0000000000..ce2e5055a8
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/texture_view/read.spec.ts
@@ -0,0 +1,56 @@
+export const description = `
+Test the result of reading textures through texture views with various options.
+
+All x= every possible view read method: {
+ - {unfiltered, filtered (if valid), comparison (if valid)} sampling
+ - storage read {vertex, fragment, compute}
+ - no-op render pass that loads and then stores
+ - depth comparison
+ - stencil comparison
+}
+
+Format reinterpretation is not tested here. It is in format_reinterpretation.spec.ts.
+
+TODO: Write helper for this if not already available (see resource_init, buffer_sync_test for related code).
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('format')
+ .desc(
+ `Views of every allowed format.
+
+- x= every texture format
+- x= sampleCount {1, 4} if valid
+- x= every possible view read method (see above)
+`
+ )
+ .unimplemented();
+
+g.test('dimension')
+ .desc(
+ `Views of every allowed dimension.
+
+- x= a representative subset of formats
+- x= {every texture dimension} x {every valid view dimension}
+ (per gpuweb#79 no dimension-count reinterpretations, like 2d-array <-> 3d, are possible)
+- x= sampleCount {1, 4} if valid
+- x= every possible view read method (see above)
+`
+ )
+ .unimplemented();
+
+g.test('aspect')
+ .desc(
+ `Views of every allowed aspect of depth/stencil textures.
+
+- x= every depth/stencil format
+- x= {"all", "stencil-only", "depth-only"} where valid for the format
+- x= sampleCount {1, 4} if valid
+- x= every possible view read method (see above)
+`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/texture_view/write.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/texture_view/write.spec.ts
new file mode 100644
index 0000000000..0340121334
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/texture_view/write.spec.ts
@@ -0,0 +1,54 @@
+export const description = `
+Test the result of writing textures through texture views with various options.
+
+All x= every possible view write method: {
+ - storage write {fragment, compute}
+ - render pass store
+ - render pass resolve
+}
+
+Format reinterpretation is not tested here. It is in format_reinterpretation.spec.ts.
+
+TODO: Write helper for this if not already available (see resource_init, buffer_sync_test for related code).
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('format')
+ .desc(
+ `Views of every allowed format.
+
+- x= every texture format
+- x= sampleCount {1, 4} if valid
+- x= every possible view write method (see above)
+`
+ )
+ .unimplemented();
+
+g.test('dimension')
+ .desc(
+ `Views of every allowed dimension.
+
+- x= a representative subset of formats
+- x= {every texture dimension} x {every valid view dimension}
+ (per gpuweb#79 no dimension-count reinterpretations, like 2d-array <-> 3d, are possible)
+- x= sampleCount {1, 4} if valid
+- x= every possible view write method (see above)
+`
+ )
+ .unimplemented();
+
+g.test('aspect')
+ .desc(
+ `Views of every allowed aspect of depth/stencil textures.
+
+- x= every depth/stencil format
+- x= {"all", "stencil-only", "depth-only"} where valid for the format
+- x= sampleCount {1, 4} if valid
+- x= every possible view write method (see above)
+`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/threading/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/threading/README.txt
new file mode 100644
index 0000000000..caccf6f69d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/threading/README.txt
@@ -0,0 +1,11 @@
+Tests for behavior with multiple threads (main thread + workers).
+
+TODO: plan and implement
+- 'postMessage'
+ Try postMessage'ing an object of every type (to same or different thread)
+ - {main -> main, main -> worker, worker -> main, worker1 -> worker1, worker1 -> worker2}
+ - through {global postMessage, MessageChannel}
+ - {in, not in} transferrable object list, when valid
+- 'concurrency'
+ Short tight loop doing many of an action from two threads at the same time
+ - e.g. {create {buffer, texture, shader, pipeline}}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/uncapturederror.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/uncapturederror.spec.ts
new file mode 100644
index 0000000000..c957f55fb3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/uncapturederror.spec.ts
@@ -0,0 +1,34 @@
+export const description = `
+Tests for GPUDevice.onuncapturederror.
+`;
+
+import { Fixture } from '../../../common/framework/fixture.js';
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+
+export const g = makeTestGroup(Fixture);
+
+g.test('constructor')
+ .desc(
+ `GPUUncapturedErrorEvent constructor options (also tests constructing GPUOutOfMemoryError/GPUValidationError)`
+ )
+ .unimplemented();
+
+g.test('iff_uncaptured')
+ .desc(
+ `{validation, out-of-memory} error should fire uncapturederror iff not captured by a scope.`
+ )
+ .unimplemented();
+
+g.test('only_original_device_is_event_target')
+ .desc(
+ `Original GPUDevice objects are EventTargets and have onuncapturederror, but
+deserialized GPUDevices do not.`
+ )
+ .unimplemented();
+
+g.test('uncapturederror_from_non_originating_thread')
+ .desc(
+ `Uncaptured errors on any thread should always propagate to the original GPUDevice object
+(since deserialized ones don't have EventTarget/onuncapturederror).`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/vertex_state/correctness.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/vertex_state/correctness.spec.ts
new file mode 100644
index 0000000000..61a0afa3f0
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/vertex_state/correctness.spec.ts
@@ -0,0 +1,1180 @@
+export const description = `
+TODO: Test more corner case values for Float16 / Float32 (INF, NaN, ...) and reduce the
+float tolerance.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import {
+ assert,
+ filterUniqueValueTestVariants,
+ makeValueTestVariant,
+ memcpy,
+ unreachable,
+} from '../../../../common/util/util.js';
+import {
+ kPerStageBindingLimits,
+ kVertexFormatInfo,
+ kVertexFormats,
+} from '../../../capability_info.js';
+import { GPUTest } from '../../../gpu_test.js';
+import { float32ToFloat16Bits, normalizedIntegerAsFloat } from '../../../util/conversion.js';
+import { align, clamp } from '../../../util/math.js';
+
+// These types mirror the structure of GPUVertexBufferLayout but allow defining the extra
+// dictionary members at the GPUVertexBufferLayout and GPUVertexAttribute level. The are used
+// like so:
+//
+// VertexState<{arrayStride: number}, {format: VertexFormat}>
+// VertexBuffer<{arrayStride: number}, {format: VertexFormat}>
+// VertexAttrib<{format: VertexFormat}>
+type VertexAttrib<A> = A & { shaderLocation: number };
+type VertexBuffer<V, A> = V & {
+ slot: number;
+ attributes: VertexAttrib<A>[];
+};
+type VertexState<V, A> = VertexBuffer<V, A>[];
+
+type VertexLayoutState<V, A> = VertexState<
+ { stepMode: GPUVertexStepMode; arrayStride: number } & V,
+ { format: GPUVertexFormat; offset: number } & A
+>;
+
+function mapBufferAttribs<V, A1, A2>(
+ buffer: VertexBuffer<V, A1>,
+ f: (v: V, a: VertexAttrib<A1>) => A2
+): VertexBuffer<V, A2> {
+ const newAttributes: VertexAttrib<A2>[] = [];
+ for (const a of buffer.attributes) {
+ newAttributes.push({
+ shaderLocation: a.shaderLocation,
+ ...f(buffer, a),
+ });
+ }
+
+ return { ...buffer, attributes: newAttributes };
+}
+
+function mapStateAttribs<V, A1, A2>(
+ buffers: VertexState<V, A1>,
+ f: (v: V, a: VertexAttrib<A1>) => A2
+): VertexState<V, A2> {
+ return buffers.map(b => mapBufferAttribs(b, f));
+}
+
+function makeRgb10a2(rgba: Array<number>): number {
+ const [r, g, b, a] = rgba;
+ assert((r & 0x3ff) === r);
+ assert((g & 0x3ff) === g);
+ assert((b & 0x3ff) === b);
+ assert((a & 0x3) === a);
+ return r | (g << 10) | (b << 20) | (a << 30);
+}
+
+function normalizeRgb10a2(rgba: number, index: number): number {
+ const normalizationFactor = index % 4 === 3 ? 3 : 1023;
+ return rgba / normalizationFactor;
+}
+
+type TestData = {
+ shaderBaseType: string;
+ floatTolerance?: number;
+ // The number of vertex components in the vertexData (expectedData might contain more because
+ // it is padded to 4 components).
+ testComponentCount: number;
+ // The data that will be in the uniform buffer and used to check the vertex inputs.
+ expectedData: ArrayBuffer;
+ // The data that will be in the vertex buffer.
+ vertexData: ArrayBuffer;
+};
+
+class VertexStateTest extends GPUTest {
+ // Generate for VS + FS (entrypoints vsMain / fsMain) that for each attribute will check that its
+ // value corresponds to what's expected (as provided by a uniform buffer per attribute) and then
+ // renders each vertex at position (vertexIndex, instanceindex) with either 1 (success) or
+ // a negative number corresponding to the check number (in case you need to debug a failure).
+ makeTestWGSL(
+ buffers: VertexState<
+ { stepMode: GPUVertexStepMode },
+ {
+ format: GPUVertexFormat;
+ shaderBaseType: string;
+ shaderComponentCount?: number;
+ floatTolerance?: number;
+ }
+ >,
+ vertexCount: number,
+ instanceCount: number
+ ): string {
+ // In the base WebGPU spec maxVertexAttributes is larger than maxUniformBufferPerStage. We'll
+ // use a combination of uniform and storage buffers to cover all possible attributes. This
+ // happens to work because maxUniformBuffer + maxStorageBuffer = 12 + 8 = 20 which is larger
+ // than maxVertexAttributes = 16.
+ // However this might not work in the future for implementations that allow even more vertex
+ // attributes so there will need to be larger changes when that happens.
+ const maxUniformBuffers = this.getDefaultLimit(kPerStageBindingLimits['uniformBuf'].maxLimit);
+ assert(
+ maxUniformBuffers + this.getDefaultLimit(kPerStageBindingLimits['storageBuf'].maxLimit) >=
+ this.device.limits.maxVertexAttributes
+ );
+
+ let vsInputs = '';
+ let vsChecks = '';
+ let vsBindings = '';
+
+ for (const b of buffers) {
+ for (const a of b.attributes) {
+ const format = kVertexFormatInfo[a.format];
+ const shaderComponentCount = a.shaderComponentCount ?? format.componentCount;
+ const i = a.shaderLocation;
+
+ // shaderType is either a scalar type like f32 or a vecN<scalarType>
+ let shaderType = a.shaderBaseType;
+ if (shaderComponentCount !== 1) {
+ shaderType = `vec${shaderComponentCount}<${shaderType}>`;
+ }
+
+ let maxCount = `${vertexCount}`;
+ let indexBuiltin = `input.vertexIndex`;
+ if (b.stepMode === 'instance') {
+ maxCount = `${instanceCount}`;
+ indexBuiltin = `input.instanceIndex`;
+ }
+
+ // Start using storage buffers when we run out of uniform buffers.
+ let storageType = 'uniform';
+ if (i >= maxUniformBuffers) {
+ storageType = 'storage, read';
+ }
+
+ vsInputs += ` @location(${i}) attrib${i} : ${shaderType},\n`;
+ vsBindings += `struct S${i} { data : array<vec4<${a.shaderBaseType}>, ${maxCount}> };\n`;
+ vsBindings += `@group(0) @binding(${i}) var<${storageType}> providedData${i} : S${i};\n`;
+
+ // Generate the all the checks for the attributes.
+ for (let component = 0; component < shaderComponentCount; component++) {
+ // Components are filled with (0, 0, 0, 1) if they aren't provided data from the pipeline.
+ if (component >= format.componentCount) {
+ const expected = component === 3 ? '1' : '0';
+ vsChecks += ` check(input.attrib${i}[${component}] == ${a.shaderBaseType}(${expected}));\n`;
+ continue;
+ }
+
+ // Check each component individually, with special handling of tolerance for floats.
+ const attribComponent =
+ shaderComponentCount === 1 ? `input.attrib${i}` : `input.attrib${i}[${component}]`;
+ const providedData = `providedData${i}.data[${indexBuiltin}][${component}]`;
+ if (format.type === 'uint' || format.type === 'sint') {
+ vsChecks += ` check(${attribComponent} == ${providedData});\n`;
+ } else {
+ vsChecks += ` check(floatsSimilar(${attribComponent}, ${providedData}, f32(${
+ a.floatTolerance ?? 0
+ })));\n`;
+ }
+ }
+ }
+ }
+
+ return `
+struct Inputs {
+${vsInputs}
+ @builtin(vertex_index) vertexIndex: u32,
+ @builtin(instance_index) instanceIndex: u32,
+};
+
+${vsBindings}
+
+var<private> vsResult : i32 = 1;
+var<private> checkIndex : i32 = 0;
+fn check(success : bool) {
+ if (!success) {
+ vsResult = -checkIndex;
+ }
+ checkIndex = checkIndex + 1;
+}
+
+fn floatsSimilar(a : f32, b : f32, tolerance : f32) -> bool {
+ // Note: -0.0 and 0.0 have different bit patterns, but compare as equal.
+ return abs(a - b) < tolerance;
+}
+
+fn doTest(input : Inputs) {
+${vsChecks}
+}
+
+struct VSOutputs {
+ @location(0) @interpolate(flat) result : i32,
+ @builtin(position) position : vec4<f32>,
+};
+
+@vertex fn vsMain(input : Inputs) -> VSOutputs {
+ doTest(input);
+
+ // Place that point at pixel (vertexIndex, instanceIndex) in a framebuffer of size
+ // (vertexCount , instanceCount).
+ var output : VSOutputs;
+ output.position = vec4<f32>(
+ ((f32(input.vertexIndex) + 0.5) / ${vertexCount}.0 * 2.0) - 1.0,
+ ((f32(input.instanceIndex) + 0.5) / ${instanceCount}.0 * 2.0) - 1.0,
+ 0.0, 1.0
+ );
+ output.result = vsResult;
+ return output;
+}
+
+@fragment fn fsMain(@location(0) @interpolate(flat) result : i32)
+ -> @location(0) i32 {
+ return result;
+}
+ `;
+ }
+
+ makeTestPipeline(
+ buffers: VertexState<
+ { stepMode: GPUVertexStepMode; arrayStride: number },
+ {
+ offset: number;
+ format: GPUVertexFormat;
+ shaderBaseType: string;
+ shaderComponentCount?: number;
+ floatTolerance?: number;
+ }
+ >,
+ vertexCount: number,
+ instanceCount: number
+ ): GPURenderPipeline {
+ const module = this.device.createShaderModule({
+ code: this.makeTestWGSL(buffers, vertexCount, instanceCount),
+ });
+
+ const bufferLayouts: GPUVertexBufferLayout[] = [];
+ for (const b of buffers) {
+ bufferLayouts[b.slot] = b;
+ }
+
+ return this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vsMain',
+ buffers: bufferLayouts,
+ },
+ primitive: {
+ topology: 'point-list',
+ },
+ fragment: {
+ module,
+ entryPoint: 'fsMain',
+ targets: [
+ {
+ format: 'r32sint',
+ },
+ ],
+ },
+ });
+ }
+
+ // Runs the render pass drawing points in a vertexCount*instanceCount rectangle, then check each
+ // of produced a value of 1 which means that the tests in the shader passed.
+ submitRenderPass(
+ pipeline: GPURenderPipeline,
+ buffers: VertexState<{ buffer: GPUBuffer; vbOffset?: number }, {}>,
+ expectedData: GPUBindGroup,
+ vertexCount: number,
+ instanceCount: number
+ ) {
+ const testTexture = this.device.createTexture({
+ format: 'r32sint',
+ size: [vertexCount, instanceCount],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ });
+
+ const encoder = this.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: testTexture.createView(),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, expectedData);
+ for (const buffer of buffers) {
+ pass.setVertexBuffer(buffer.slot, buffer.buffer, buffer.vbOffset ?? 0);
+ }
+ pass.draw(vertexCount, instanceCount);
+ pass.end();
+
+ this.device.queue.submit([encoder.finish()]);
+
+ this.expectSingleColor(testTexture, 'r32sint', {
+ size: [vertexCount, instanceCount, 1],
+ exp: { R: 1 },
+ });
+ }
+
+ // Generate TestData for the format with interesting test values.
+ // MAINTENANCE_TODO cache the result on the fixture?
+ // Note that the test data always starts with an interesting value, so that using the first
+ // test value in a test is still meaningful.
+ generateTestData(format: GPUVertexFormat): TestData {
+ const formatInfo = kVertexFormatInfo[format];
+ const bitSize =
+ formatInfo.bytesPerComponent === 'packed' ? 0 : formatInfo.bytesPerComponent * 8;
+
+ switch (formatInfo.type) {
+ case 'float': {
+ // -0.0 and +0.0 have different bit patterns, but compare as equal.
+ const data = [42.42, 0.0, -0.0, 1.0, -1.0, 1000, -18.7, 25.17];
+ const expectedData = new Float32Array(data).buffer;
+ const vertexData =
+ bitSize === 32
+ ? expectedData
+ : bitSize === 16
+ ? new Uint16Array(data.map(float32ToFloat16Bits)).buffer
+ : unreachable();
+
+ return {
+ shaderBaseType: 'f32',
+ testComponentCount: data.length,
+ expectedData,
+ vertexData,
+ floatTolerance: 0.05,
+ };
+ }
+
+ case 'sint': {
+ /* prettier-ignore */
+ const data = [
+ 42,
+ 0, 1, 2, 3, 4, 5,
+ -1, -2, -3, -4, -5,
+ Math.pow(2, bitSize - 2),
+ Math.pow(2, bitSize - 1) - 1, // max value
+ -Math.pow(2, bitSize - 2),
+ -Math.pow(2, bitSize - 1), // min value
+ ];
+ const expectedData = new Int32Array(data).buffer;
+ const vertexData =
+ bitSize === 32
+ ? expectedData
+ : bitSize === 16
+ ? new Int16Array(data).buffer
+ : new Int8Array(data).buffer;
+
+ return {
+ shaderBaseType: 'i32',
+ testComponentCount: data.length,
+ expectedData,
+ vertexData,
+ };
+ }
+
+ case 'uint': {
+ /* prettier-ignore */
+ const data = [
+ 42,
+ 0, 1, 2, 3, 4, 5,
+ Math.pow(2, bitSize - 1),
+ Math.pow(2, bitSize) - 1, // max value
+ ];
+ const expectedData = new Uint32Array(data).buffer;
+ const vertexData =
+ bitSize === 32
+ ? expectedData
+ : bitSize === 16
+ ? new Uint16Array(data).buffer
+ : new Uint8Array(data).buffer;
+
+ return {
+ shaderBaseType: 'u32',
+ testComponentCount: data.length,
+ expectedData,
+ vertexData,
+ };
+ }
+
+ case 'snorm': {
+ /* prettier-ignore */
+ const data = [
+ 42,
+ 0, 1, 2, 3, 4, 5,
+ -1, -2, -3, -4, -5,
+ Math.pow(2,bitSize - 2),
+ Math.pow(2,bitSize - 1) - 1, // max value
+ -Math.pow(2,bitSize - 2),
+ -Math.pow(2,bitSize - 1), // min value
+ ];
+ const vertexData =
+ bitSize === 16
+ ? new Int16Array(data).buffer
+ : bitSize === 8
+ ? new Int8Array(data).buffer
+ : unreachable();
+
+ return {
+ shaderBaseType: 'f32',
+ testComponentCount: data.length,
+ expectedData: new Float32Array(data.map(v => normalizedIntegerAsFloat(v, bitSize, true)))
+ .buffer,
+ vertexData,
+ floatTolerance: 0.1 * normalizedIntegerAsFloat(1, bitSize, true),
+ };
+ }
+
+ case 'unorm': {
+ if (formatInfo.bytesPerComponent === 'packed') {
+ assert(format === 'unorm10-10-10-2'); // This is the only packed format for now.
+ assert(bitSize === 0);
+
+ /* prettier-ignore */
+ const data = [
+ [ 0, 0, 0, 0],
+ [1023, 1023, 1023, 3],
+ [ 243, 567, 765, 2],
+ ];
+ const vertexData = new Uint32Array(data.map(makeRgb10a2)).buffer;
+ const expectedData = new Float32Array(data.flat().map(normalizeRgb10a2)).buffer;
+
+ return {
+ shaderBaseType: 'f32',
+ testComponentCount: data.flat().length,
+ expectedData,
+ vertexData,
+ floatTolerance: 0.1 / 1023,
+ };
+ }
+
+ /* prettier-ignore */
+ const data = [
+ 42,
+ 0, 1, 2, 3, 4, 5,
+ Math.pow(2, bitSize - 1),
+ Math.pow(2, bitSize) - 1, // max value
+ ];
+ const vertexData =
+ bitSize === 16
+ ? new Uint16Array(data).buffer
+ : bitSize === 8
+ ? new Uint8Array(data).buffer
+ : unreachable();
+
+ return {
+ shaderBaseType: 'f32',
+ testComponentCount: data.length,
+ expectedData: new Float32Array(data.map(v => normalizedIntegerAsFloat(v, bitSize, false)))
+ .buffer,
+ vertexData,
+ floatTolerance: 0.1 * normalizedIntegerAsFloat(1, bitSize, false),
+ };
+ }
+ }
+ }
+
+ // The TestData generated for a format might not contain enough data for all the vertices we are
+ // going to draw, so we expand them by adding additional copies of the vertexData as needed.
+ // expectedData is a bit different because it also needs to be unpacked to have `componentCount`
+ // components every 4 components (because the shader uses vec4 for the expected data).
+ expandTestData(data: TestData, maxCount: number, componentCount: number): TestData {
+ const vertexComponentSize = data.vertexData.byteLength / data.testComponentCount;
+ const expectedComponentSize = data.expectedData.byteLength / data.testComponentCount;
+
+ const expandedVertexData = new Uint8Array(maxCount * componentCount * vertexComponentSize);
+ const expandedExpectedData = new Uint8Array(4 * maxCount * expectedComponentSize);
+
+ for (let index = 0; index < maxCount; index++) {
+ for (let component = 0; component < componentCount; component++) {
+ // If only we had some builtin JS memcpy function between ArrayBuffers...
+ const targetVertexOffset = (index * componentCount + component) * vertexComponentSize;
+ const sourceVertexOffset = targetVertexOffset % data.vertexData.byteLength;
+ memcpy(
+ { src: data.vertexData, start: sourceVertexOffset, length: vertexComponentSize },
+ { dst: expandedVertexData, start: targetVertexOffset }
+ );
+
+ const targetExpectedOffset = (index * 4 + component) * expectedComponentSize;
+ const sourceExpectedOffset =
+ ((index * componentCount + component) * expectedComponentSize) %
+ data.expectedData.byteLength;
+ memcpy(
+ { src: data.expectedData, start: sourceExpectedOffset, length: expectedComponentSize },
+ { dst: expandedExpectedData, start: targetExpectedOffset }
+ );
+ }
+ }
+
+ return {
+ shaderBaseType: data.shaderBaseType,
+ testComponentCount: maxCount * componentCount,
+ floatTolerance: data.floatTolerance,
+ expectedData: expandedExpectedData.buffer,
+ vertexData: expandedVertexData.buffer,
+ };
+ }
+
+ // Copies `size` bytes from `source` to `target` starting at `offset` each `targetStride`.
+ // (the data in `source` is assumed packed)
+ interleaveVertexDataInto(
+ target: ArrayBuffer,
+ src: ArrayBuffer,
+ { targetStride, offset, size }: { targetStride: number; offset: number; size: number }
+ ) {
+ const dst = new Uint8Array(target);
+ for (
+ let srcStart = 0, dstStart = offset;
+ srcStart < src.byteLength;
+ srcStart += size, dstStart += targetStride
+ ) {
+ memcpy({ src, start: srcStart, length: size }, { dst, start: dstStart });
+ }
+ }
+
+ createTestAndPipelineData<V, A>(
+ state: VertexLayoutState<V, A>,
+ vertexCount: number,
+ instanceCount: number
+ ): VertexLayoutState<V, A & TestData> {
+ // Gather the test data and some additional test state for attribs.
+ return mapStateAttribs(state, (buffer, attrib) => {
+ const maxCount = buffer.stepMode === 'instance' ? instanceCount : vertexCount;
+ const formatInfo = kVertexFormatInfo[attrib.format];
+
+ let testData = this.generateTestData(attrib.format);
+ testData = this.expandTestData(testData, maxCount, formatInfo.componentCount);
+
+ return {
+ ...testData,
+ ...attrib,
+ };
+ });
+ }
+
+ createExpectedBG(state: VertexState<{}, TestData>, pipeline: GPURenderPipeline): GPUBindGroup {
+ // Create the bindgroups from that test data
+ const bgEntries: GPUBindGroupEntry[] = [];
+
+ for (const buffer of state) {
+ for (const attrib of buffer.attributes) {
+ const expectedDataBuffer = this.makeBufferWithContents(
+ new Uint8Array(attrib.expectedData),
+ GPUBufferUsage.UNIFORM | GPUBufferUsage.STORAGE
+ );
+ bgEntries.push({
+ binding: attrib.shaderLocation,
+ resource: { buffer: expectedDataBuffer },
+ });
+ }
+ }
+
+ return this.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: bgEntries,
+ });
+ }
+
+ createVertexBuffers(
+ state: VertexLayoutState<{ vbOffset?: number }, TestData>,
+ vertexCount: number,
+ instanceCount: number
+ ): VertexState<{ buffer: GPUBuffer; vbOffset?: number }, {}> {
+ // Create the vertex buffers
+ const vertexBuffers: VertexState<{ buffer: GPUBuffer; vbOffset?: number }, {}> = [];
+
+ for (const buffer of state) {
+ const maxCount = buffer.stepMode === 'instance' ? instanceCount : vertexCount;
+
+ // Fill the vertex data with garbage so that we don't get `0` (which could be a test value)
+ // if the vertex shader loads the vertex data incorrectly.
+ const vertexData = new ArrayBuffer(
+ align(buffer.arrayStride * maxCount + (buffer.vbOffset ?? 0), 4)
+ );
+ new Uint8Array(vertexData).fill(0xc4);
+
+ for (const attrib of buffer.attributes) {
+ const formatInfo = kVertexFormatInfo[attrib.format];
+ this.interleaveVertexDataInto(vertexData, attrib.vertexData, {
+ targetStride: buffer.arrayStride,
+ offset: (buffer.vbOffset ?? 0) + attrib.offset,
+ size: formatInfo.byteSize,
+ });
+ }
+
+ vertexBuffers.push({
+ slot: buffer.slot,
+ buffer: this.makeBufferWithContents(new Uint8Array(vertexData), GPUBufferUsage.VERTEX),
+ vbOffset: buffer.vbOffset,
+ attributes: [],
+ });
+ }
+
+ return vertexBuffers;
+ }
+
+ runTest(
+ buffers: VertexLayoutState<{ vbOffset?: number }, { shaderComponentCount?: number }>,
+ // Default to using 20 vertices and 20 instances so that we cover each of the test data at least
+ // once (at the time of writing the largest testData has 16 values).
+ vertexCount: number = 20,
+ instanceCount: number = 20
+ ) {
+ const testData = this.createTestAndPipelineData(buffers, vertexCount, instanceCount);
+ const pipeline = this.makeTestPipeline(testData, vertexCount, instanceCount);
+ const expectedDataBG = this.createExpectedBG(testData, pipeline);
+ const vertexBuffers = this.createVertexBuffers(testData, vertexCount, instanceCount);
+ this.submitRenderPass(pipeline, vertexBuffers, expectedDataBG, vertexCount, instanceCount);
+ }
+}
+
+export const g = makeTestGroup(VertexStateTest);
+
+g.test('vertex_format_to_shader_format_conversion')
+ .desc(
+ `Test that the raw data passed in vertex buffers is correctly converted to the input type in the shader. Test for:
+ - all formats
+ - 1 to 4 components in the shader's input type (unused components are filled with 0 and except the 4th with 1)
+ - various locations
+ - various slots`
+ )
+ .params(u =>
+ u //
+ .combine('format', kVertexFormats)
+ .combine('shaderComponentCount', [1, 2, 3, 4])
+ .beginSubcases()
+ .combine('slotVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ ])
+ .combine('shaderLocationVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ ])
+ )
+ .fn(t => {
+ const { format, shaderComponentCount, slotVariant, shaderLocationVariant } = t.params;
+ const slot = t.makeLimitVariant('maxVertexBuffers', slotVariant);
+ const shaderLocation = t.makeLimitVariant('maxVertexAttributes', shaderLocationVariant);
+ t.runTest([
+ {
+ slot,
+ arrayStride: 16,
+ stepMode: 'vertex',
+ attributes: [
+ {
+ shaderLocation,
+ format,
+ offset: 0,
+ shaderComponentCount,
+ },
+ ],
+ },
+ ]);
+ });
+
+g.test('setVertexBuffer_offset_and_attribute_offset')
+ .desc(
+ `Test that the vertex buffer offset and attribute offset in the vertex state are applied correctly. Test for:
+ - all formats
+ - various setVertexBuffer offsets
+ - various attribute offsets in a fixed arrayStride`
+ )
+ .params(u =>
+ u //
+ .combine('format', kVertexFormats)
+ .beginSubcases()
+ .combine('vbOffset', [0, 4, 400, 1004])
+ .combine('arrayStride', [128])
+ .expand('offset', p => {
+ const formatInfo = kVertexFormatInfo[p.format];
+ const formatSize = formatInfo.byteSize;
+ return new Set([
+ 0,
+ 4,
+ 8,
+ formatSize,
+ formatSize * 2,
+ p.arrayStride / 2,
+ p.arrayStride - formatSize - 4,
+ p.arrayStride - formatSize - 8,
+ p.arrayStride - formatSize - formatSize,
+ p.arrayStride - formatSize - formatSize * 2,
+ p.arrayStride - formatSize,
+ ]);
+ })
+ )
+ .fn(t => {
+ const { format, vbOffset, arrayStride, offset } = t.params;
+ t.runTest([
+ {
+ slot: 0,
+ arrayStride,
+ stepMode: 'vertex',
+ vbOffset,
+ attributes: [
+ {
+ shaderLocation: 0,
+ format,
+ offset,
+ },
+ ],
+ },
+ ]);
+ });
+
+g.test('non_zero_array_stride_and_attribute_offset')
+ .desc(
+ `Test that the array stride and attribute offset in the vertex state are applied correctly. Test for:
+ - all formats
+ - various array strides
+ - various attribute offsets in a fixed arrayStride`
+ )
+ .params(u =>
+ u //
+ .combine('format', kVertexFormats)
+ .beginSubcases()
+ .expand('arrayStrideVariant', p => {
+ const formatInfo = kVertexFormatInfo[p.format];
+ const formatSize = formatInfo.byteSize;
+
+ return [
+ { mult: 0, add: align(formatSize, 4) },
+ { mult: 0, add: align(formatSize, 4) + 4 },
+ { mult: 1, add: 0 },
+ ];
+ })
+ .expand('offsetVariant', function* (p) {
+ const formatInfo = kVertexFormatInfo[p.format];
+ const formatSize = formatInfo.byteSize;
+ yield { mult: 0, add: 0 };
+ yield { mult: 0, add: 4 };
+ if (formatSize !== 4) yield { mult: 0, add: formatSize };
+ yield { mult: 0.5, add: 0 };
+ yield { mult: 1, add: -formatSize * 2 };
+ if (formatSize !== 4) yield { mult: 1, add: -formatSize - 4 };
+ yield { mult: 1, add: -formatSize };
+ })
+ )
+ .fn(t => {
+ const { format, arrayStrideVariant, offsetVariant } = t.params;
+ const arrayStride = t.makeLimitVariant('maxVertexBufferArrayStride', arrayStrideVariant);
+ const formatInfo = kVertexFormatInfo[format];
+ const formatSize = formatInfo.byteSize;
+ const offset = clamp(makeValueTestVariant(arrayStride, offsetVariant), {
+ min: 0,
+ max: arrayStride - formatSize,
+ });
+
+ t.runTest([
+ {
+ slot: 0,
+ arrayStride,
+ stepMode: 'vertex',
+ attributes: [
+ {
+ shaderLocation: 0,
+ format,
+ offset,
+ },
+ ],
+ },
+ ]);
+ });
+
+g.test('buffers_with_varying_step_mode')
+ .desc(
+ `Test buffers with varying step modes in the same vertex state.
+ - Various combination of step modes`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('stepModes', [
+ ['instance'],
+ ['vertex', 'vertex', 'instance'],
+ ['instance', 'vertex', 'instance'],
+ ['vertex', 'instance', 'vertex', 'vertex'],
+ ])
+ )
+ .fn(t => {
+ const { stepModes } = t.params;
+ const state = (stepModes as GPUVertexStepMode[]).map((stepMode, i) => ({
+ slot: i,
+ arrayStride: 4,
+ stepMode,
+ attributes: [
+ {
+ shaderLocation: i,
+ format: 'float32' as const,
+ offset: 0,
+ },
+ ],
+ }));
+ t.runTest(state);
+ });
+
+g.test('vertex_buffer_used_multiple_times_overlapped')
+ .desc(
+ `Test using the same vertex buffer in for multiple "vertex buffers", with data from each buffer overlapping.
+ - For each vertex format.
+ - For various numbers of vertex buffers [2, 3, max]`
+ )
+ .params(u =>
+ u //
+ .combine('format', kVertexFormats)
+ .beginSubcases()
+ .combine('vbCountVariant', [
+ { mult: 0, add: 2 },
+ { mult: 0, add: 3 },
+ { mult: 1, add: 0 },
+ ])
+ .combine('additionalVBOffset', [0, 4, 120])
+ )
+ .fn(t => {
+ const { format, vbCountVariant, additionalVBOffset } = t.params;
+ const vbCount = t.makeLimitVariant('maxVertexBuffers', vbCountVariant);
+ const kVertexCount = 20;
+ const kInstanceCount = 1;
+ const formatInfo = kVertexFormatInfo[format];
+ const formatByteSize = formatInfo.byteSize;
+ // We need to align so the offset for non-0 setVertexBuffer don't fail validation.
+ const alignedFormatByteSize = align(formatByteSize, 4);
+
+ // In this test we want to test using the same vertex buffer for multiple different attributes.
+ // For example if vbCount is 3, we will create a vertex buffer containing the following data:
+ // a0, a1, a2, a3, ..., a<baseDataVertexCount>
+ // We also create the expected data for the vertex fetching from that buffer so we can modify it
+ // below.
+ const baseDataVertexCount = kVertexCount + vbCount - 1;
+ const baseData = t.createTestAndPipelineData(
+ [
+ {
+ slot: 0,
+ arrayStride: alignedFormatByteSize,
+ stepMode: 'vertex',
+ vbOffset: additionalVBOffset,
+ attributes: [{ shaderLocation: 0, format, offset: 0 }],
+ },
+ ],
+ baseDataVertexCount,
+ kInstanceCount
+ );
+ const vertexBuffer = t.createVertexBuffers(baseData, baseDataVertexCount, kInstanceCount)[0]
+ .buffer;
+
+ // We are going to bind the vertex buffer multiple times, each time at a different offset that's
+ // a multiple of the data size. So what should be fetched by the vertex shader is:
+ // - attrib0: a0, a1, ..., a19
+ // - attrib1: a1, a2, ..., a20
+ // - attrib2: a2, a3, ..., a21
+ // etc.
+ // We re-create the test data by:
+ // 1) creating multiple "vertex buffers" that all point at the GPUBuffer above but at
+ // different offsets.
+ // 2) selecting what parts of the expectedData each attribute will see in the expectedData for
+ // the full vertex buffer.
+ const baseTestData = baseData[0].attributes[0];
+ assert(baseTestData.testComponentCount === formatInfo.componentCount * baseDataVertexCount);
+ const expectedDataBytesPerVertex = baseTestData.expectedData.byteLength / baseDataVertexCount;
+
+ const testData: VertexLayoutState<{}, TestData> = [];
+ const vertexBuffers: VertexState<{ buffer: GPUBuffer; vbOffset: number }, {}> = [];
+ for (let i = 0; i < vbCount; i++) {
+ vertexBuffers.push({
+ buffer: vertexBuffer,
+ slot: i,
+ vbOffset: additionalVBOffset + i * alignedFormatByteSize,
+ attributes: [],
+ });
+
+ testData.push({
+ slot: i,
+ arrayStride: alignedFormatByteSize,
+ stepMode: 'vertex',
+ attributes: [
+ {
+ shaderLocation: i,
+ format,
+ offset: 0,
+
+ shaderBaseType: baseTestData.shaderBaseType,
+ floatTolerance: baseTestData.floatTolerance,
+ // Select vertices [i, i + kVertexCount]
+ testComponentCount: kVertexCount * formatInfo.componentCount,
+ expectedData: baseTestData.expectedData.slice(
+ expectedDataBytesPerVertex * i,
+ expectedDataBytesPerVertex * (kVertexCount + i)
+ ),
+ vertexData: new ArrayBuffer(0),
+ },
+ ],
+ });
+ }
+
+ // Run the test with the modified test data.
+ const pipeline = t.makeTestPipeline(testData, kVertexCount, kInstanceCount);
+ const expectedDataBG = t.createExpectedBG(testData, pipeline);
+ t.submitRenderPass(pipeline, vertexBuffers, expectedDataBG, kVertexCount, kInstanceCount);
+ });
+
+g.test('vertex_buffer_used_multiple_times_interleaved')
+ .desc(
+ `Test using the same vertex buffer in for multiple "vertex buffers", with data from each buffer interleaved.
+ - For each vertex format.
+ - For various numbers of vertex buffers [2, 3, max]`
+ )
+ .params(u =>
+ u //
+ .combine('format', kVertexFormats)
+ .beginSubcases()
+ .combine('vbCountVariant', [
+ { mult: 0, add: 2 },
+ { mult: 0, add: 3 },
+ { mult: 1, add: 0 },
+ ])
+ .combine('additionalVBOffset', [0, 4, 120])
+ )
+ .fn(t => {
+ const { format, vbCountVariant, additionalVBOffset } = t.params;
+ const vbCount = t.makeLimitVariant('maxVertexBuffers', vbCountVariant);
+ const kVertexCount = 20;
+ const kInstanceCount = 1;
+ const formatInfo = kVertexFormatInfo[format];
+ const formatByteSize = formatInfo.byteSize;
+ // We need to align so the offset for non-0 setVertexBuffer don't fail validation.
+ const alignedFormatByteSize = align(formatByteSize, 4);
+
+ // Create data for a single vertex buffer with many attributes, that will be split between
+ // many vertex buffers set at different offsets.
+
+ // In this test we want to test using the same vertex buffer for multiple different attributes.
+ // For example if vbCount is 3, we will create a vertex buffer containing the following data:
+ // a0, a0, a0, a1, a1, a1, ...
+ // To do that we create a single vertex buffer with `vbCount` attributes that all have the same
+ // format.
+ const attribs: GPUVertexAttribute[] = [];
+ for (let i = 0; i < vbCount; i++) {
+ attribs.push({ format, offset: i * alignedFormatByteSize, shaderLocation: i });
+ }
+ const baseData = t.createTestAndPipelineData(
+ [
+ {
+ slot: 0,
+ arrayStride: alignedFormatByteSize * vbCount,
+ stepMode: 'vertex',
+ vbOffset: additionalVBOffset,
+ attributes: attribs,
+ },
+ ],
+ // Request one vertex more than what we need so we have an extra full stride. Otherwise WebGPU
+ // validation of vertex being in bounds will fail for all vertex buffers at an offset that's
+ // not 0 (since their last stride will go beyond the data for vertex kVertexCount -1).
+ kVertexCount + 1,
+ kInstanceCount
+ );
+ const vertexBuffer = t.createVertexBuffers(baseData, kVertexCount + 1, kInstanceCount)[0]
+ .buffer;
+
+ // Then we recreate test data by:
+ // 1) creating multiple "vertex buffers" that all point at the GPUBuffer above but at
+ // different offsets.
+ // 2) have multiple vertex buffer, each with one attributes that will expect a0, a1, ...
+ const testData: VertexLayoutState<{}, TestData> = [];
+ const vertexBuffers: VertexState<{ buffer: GPUBuffer; vbOffset: number }, {}> = [];
+ for (let i = 0; i < vbCount; i++) {
+ vertexBuffers.push({
+ slot: i,
+ buffer: vertexBuffer,
+ vbOffset: additionalVBOffset + i * alignedFormatByteSize,
+ attributes: [],
+ });
+ testData.push({
+ ...baseData[0],
+ slot: i,
+ attributes: [{ ...baseData[0].attributes[i], offset: 0 }],
+ });
+ }
+
+ // Run the test with the modified test data.
+ const pipeline = t.makeTestPipeline(testData, kVertexCount, kInstanceCount);
+ const expectedDataBG = t.createExpectedBG(testData, pipeline);
+ t.submitRenderPass(pipeline, vertexBuffers, expectedDataBG, kVertexCount, kInstanceCount);
+ });
+
+g.test('max_buffers_and_attribs')
+ .desc(
+ `Test a vertex state that loads as many attributes and buffers as possible.
+ - For each format.
+ `
+ )
+ .params(u => u.combine('format', kVertexFormats))
+ .fn(t => {
+ const { format } = t.params;
+ // In compat mode, @builtin(vertex_index) and @builtin(instance_index) each take an attribute
+ const maxVertexBuffers = t.device.limits.maxVertexBuffers;
+ const deviceMaxVertexAttributes = t.device.limits.maxVertexAttributes;
+ const maxVertexAttributes = deviceMaxVertexAttributes - (t.isCompatibility ? 2 : 0);
+ const attributesPerBuffer = Math.ceil(maxVertexAttributes / maxVertexBuffers);
+ let attributesEmitted = 0;
+
+ const state: VertexLayoutState<{}, {}> = [];
+ for (let i = 0; i < maxVertexBuffers; i++) {
+ const attributes: GPUVertexAttribute[] = [];
+ for (let j = 0; j < attributesPerBuffer && attributesEmitted < maxVertexAttributes; j++) {
+ attributes.push({ format, offset: 0, shaderLocation: attributesEmitted });
+ attributesEmitted++;
+ }
+ state.push({
+ slot: i,
+ stepMode: 'vertex',
+ arrayStride: 32,
+ attributes,
+ });
+ }
+ t.runTest(state);
+ });
+
+g.test('array_stride_zero')
+ .desc(
+ `Test that arrayStride 0 correctly uses the same data for all vertex/instances, while another test vertex buffer with arrayStride != 0 gets different data.
+ - Test for all formats
+ - Test for both step modes`
+ )
+ .params(u =>
+ u //
+ .combine('format', kVertexFormats)
+ .beginSubcases()
+ .combine('stepMode', ['vertex', 'instance'] as const)
+ .expand('offsetVariant', p => {
+ const formatInfo = kVertexFormatInfo[p.format];
+ const formatSize = formatInfo.byteSize;
+ return filterUniqueValueTestVariants([
+ { mult: 0, add: 0 },
+ { mult: 0, add: 4 },
+ { mult: 0, add: 8 },
+ { mult: 0, add: formatSize },
+ { mult: 0, add: formatSize * 2 },
+ { mult: 0.5, add: 0 },
+ { mult: 1, add: -formatSize - 4 },
+ { mult: 1, add: -formatSize - 8 },
+ { mult: 1, add: -formatSize },
+ { mult: 1, add: -formatSize * 2 },
+ ]);
+ })
+ )
+ .fn(t => {
+ const { format, stepMode, offsetVariant } = t.params;
+ const offset = t.makeLimitVariant('maxVertexBufferArrayStride', offsetVariant);
+ const kCount = 10;
+
+ // Create the stride 0 part of the test, first by faking a single vertex being drawn and
+ // then expanding the data to cover kCount vertex / instances
+ const stride0TestData = t.createTestAndPipelineData(
+ [
+ {
+ slot: 0,
+ arrayStride: 2048,
+ stepMode,
+ vbOffset: offset, // used to push data in the vertex buffer
+ attributes: [{ format, offset: 0, shaderLocation: 0 }],
+ },
+ ],
+ 1,
+ 1
+ )[0];
+ const stride0VertexBuffer = t.createVertexBuffers([stride0TestData], kCount, kCount)[0];
+
+ // Expand the stride0 test data to have kCount values for expectedData.
+ const originalData = stride0TestData.attributes[0].expectedData;
+ const expandedData = new ArrayBuffer(kCount * originalData.byteLength);
+ for (let i = 0; i < kCount; i++) {
+ new Uint8Array(expandedData, originalData.byteLength * i).set(new Uint8Array(originalData));
+ }
+
+ // Fixup stride0TestData to use arrayStride 0.
+ stride0TestData.attributes[0].offset = offset;
+ stride0TestData.attributes[0].expectedData = expandedData;
+ stride0TestData.attributes[0].testComponentCount *= kCount;
+ stride0TestData.arrayStride = 0;
+ stride0VertexBuffer.vbOffset = 0;
+
+ // Create the part of the state that will be varying for each vertex / instance
+ const varyingTestData = t.createTestAndPipelineData(
+ [
+ {
+ slot: 1,
+ arrayStride: 32,
+ stepMode,
+ attributes: [{ format, offset: 0, shaderLocation: 1 }],
+ },
+ ],
+ kCount,
+ kCount
+ )[0];
+ const varyingVertexBuffer = t.createVertexBuffers([varyingTestData], kCount, kCount)[0];
+
+ // Run the test with the merged test state.
+ const state = [stride0TestData, varyingTestData];
+ const vertexBuffers = [stride0VertexBuffer, varyingVertexBuffer];
+
+ const pipeline = t.makeTestPipeline(state, kCount, kCount);
+ const expectedDataBG = t.createExpectedBG(state, pipeline);
+ t.submitRenderPass(pipeline, vertexBuffers, expectedDataBG, kCount, kCount);
+ });
+
+g.test('discontiguous_location_and_attribs')
+ .desc('Test that using far away slots / shaderLocations works as expected')
+ .fn(t => {
+ t.runTest([
+ {
+ slot: t.device.limits.maxVertexBuffers - 1,
+ arrayStride: 4,
+ stepMode: 'vertex',
+ attributes: [
+ { format: 'uint8x2', offset: 2, shaderLocation: 0 },
+ { format: 'uint8x2', offset: 0, shaderLocation: 8 },
+ ],
+ },
+ {
+ slot: 1,
+ arrayStride: 16,
+ stepMode: 'instance',
+ vbOffset: 1000,
+ attributes: [
+ {
+ format: 'uint32x4',
+ offset: 0,
+ shaderLocation: t.device.limits.maxVertexAttributes - 1,
+ },
+ ],
+ },
+ ]);
+ });
+
+g.test('overlapping_attributes')
+ .desc(
+ `Test that overlapping attributes in the same vertex buffer works
+ - Test for all formats`
+ )
+ .params(u => u.combine('format', kVertexFormats))
+ .fn(t => {
+ const { format } = t.params;
+
+ // In compat mode, @builtin(vertex_index) and @builtin(instance_index) each take an attribute
+ const maxVertexAttributes = t.device.limits.maxVertexAttributes - (t.isCompatibility ? 2 : 0);
+ const attributes: GPUVertexAttribute[] = [];
+ for (let i = 0; i < maxVertexAttributes; i++) {
+ attributes.push({ format, offset: 0, shaderLocation: i });
+ }
+
+ t.runTest([
+ {
+ slot: 0,
+ stepMode: 'vertex',
+ arrayStride: 32,
+ attributes,
+ },
+ ]);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/vertex_state/index_format.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/vertex_state/index_format.spec.ts
new file mode 100644
index 0000000000..fb589e5543
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/operation/vertex_state/index_format.spec.ts
@@ -0,0 +1,584 @@
+export const description = `
+Test indexing, index format and primitive restart.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+import { getTextureCopyLayout } from '../../../util/texture/layout.js';
+
+const kHeight = 4;
+const kWidth = 8;
+const kTextureFormat = 'r8uint' as const;
+
+/** 4x4 grid of r8uint values (each 0 or 1). */
+type Raster8x4 = readonly [
+ readonly [0 | 1, 0 | 1, 0 | 1, 0 | 1, 0 | 1, 0 | 1, 0 | 1, 0 | 1],
+ readonly [0 | 1, 0 | 1, 0 | 1, 0 | 1, 0 | 1, 0 | 1, 0 | 1, 0 | 1],
+ readonly [0 | 1, 0 | 1, 0 | 1, 0 | 1, 0 | 1, 0 | 1, 0 | 1, 0 | 1],
+ readonly [0 | 1, 0 | 1, 0 | 1, 0 | 1, 0 | 1, 0 | 1, 0 | 1, 0 | 1],
+];
+
+/** Expected 4x4 rasterization of a bottom-left triangle. */
+const kBottomLeftTriangle: Raster8x4 = [
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 1, 0, 0, 0],
+ [0, 0, 0, 0, 1, 1, 0, 0],
+ [0, 0, 0, 0, 1, 1, 1, 0],
+];
+
+/** Expected 4x4 rasterization filling the whole quad. */
+const kSquare: Raster8x4 = [
+ [0, 0, 0, 0, 1, 1, 1, 1],
+ [0, 0, 0, 0, 1, 1, 1, 1],
+ [0, 0, 0, 0, 1, 1, 1, 1],
+ [0, 0, 0, 0, 1, 1, 1, 1],
+];
+
+/** Expected 4x4 rasterization with no pixels. */
+const kNothing: Raster8x4 = [
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+];
+
+const { byteLength, bytesPerRow, rowsPerImage } = getTextureCopyLayout(kTextureFormat, '2d', [
+ kWidth,
+ kHeight,
+ 1,
+]);
+
+class IndexFormatTest extends GPUTest {
+ MakeRenderPipeline(
+ topology: GPUPrimitiveTopology,
+ stripIndexFormat?: GPUIndexFormat
+ ): GPURenderPipeline {
+ const vertexModule = this.device.createShaderModule({
+ // NOTE: These positions will create triangles that cut right through pixel centers. If this
+ // results in different rasterization results on different hardware, tweak to avoid this.
+ code: `
+ @vertex
+ fn main(@builtin(vertex_index) VertexIndex : u32)
+ -> @builtin(position) vec4<f32> {
+ var pos = array<vec2<f32>, 4>(
+ vec2<f32>(0.01, 0.98),
+ vec2<f32>(0.99, -0.98),
+ vec2<f32>(0.99, 0.98),
+ vec2<f32>(0.01, -0.98));
+
+ if (VertexIndex == 0xFFFFu || VertexIndex == 0xFFFFFFFFu) {
+ return vec4<f32>(-0.99, -0.98, 0.0, 1.0);
+ }
+ return vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+ }
+ `,
+ });
+
+ const fragmentModule = this.device.createShaderModule({
+ code: `
+ @fragment
+ fn main() -> @location(0) u32 {
+ return 1u;
+ }
+ `,
+ });
+
+ return this.device.createRenderPipeline({
+ layout: this.device.createPipelineLayout({ bindGroupLayouts: [] }),
+ vertex: { module: vertexModule, entryPoint: 'main' },
+ fragment: {
+ module: fragmentModule,
+ entryPoint: 'main',
+ targets: [{ format: kTextureFormat }],
+ },
+ primitive: {
+ topology,
+ stripIndexFormat,
+ },
+ });
+ }
+
+ CreateIndexBuffer(indices: readonly number[], indexFormat: GPUIndexFormat): GPUBuffer {
+ const typedArrayConstructor = { uint16: Uint16Array, uint32: Uint32Array }[indexFormat];
+ return this.makeBufferWithContents(new typedArrayConstructor(indices), GPUBufferUsage.INDEX);
+ }
+
+ run(
+ indexBuffer: GPUBuffer,
+ indexCount: number,
+ indexFormat: GPUIndexFormat,
+ indexOffset: number = 0,
+ primitiveTopology: GPUPrimitiveTopology = 'triangle-list'
+ ): GPUBuffer {
+ let pipeline: GPURenderPipeline;
+ // The indexFormat must be set in render pipeline descriptor that specifies a strip primitive
+ // topology for primitive restart testing
+ if (primitiveTopology === 'line-strip' || primitiveTopology === 'triangle-strip') {
+ pipeline = this.MakeRenderPipeline(primitiveTopology, indexFormat);
+ } else {
+ pipeline = this.MakeRenderPipeline(primitiveTopology);
+ }
+
+ const colorAttachment = this.device.createTexture({
+ format: kTextureFormat,
+ size: { width: kWidth, height: kHeight, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const result = this.device.createBuffer({
+ size: byteLength,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+
+ const encoder = this.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachment.createView(),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.setPipeline(pipeline);
+ pass.setIndexBuffer(indexBuffer, indexFormat, indexOffset);
+ pass.drawIndexed(indexCount);
+ pass.end();
+ encoder.copyTextureToBuffer(
+ { texture: colorAttachment },
+ { buffer: result, bytesPerRow, rowsPerImage },
+ [kWidth, kHeight]
+ );
+ this.device.queue.submit([encoder.finish()]);
+
+ return result;
+ }
+
+ CreateExpectedUint8Array(renderShape: Raster8x4): Uint8Array {
+ const arrayBuffer = new Uint8Array(byteLength);
+ for (let row = 0; row < renderShape.length; row++) {
+ for (let col = 0; col < renderShape[row].length; col++) {
+ const texel: 0 | 1 = renderShape[row][col];
+
+ const kBytesPerTexel = 1; // r8uint
+ const byteOffset = row * bytesPerRow + col * kBytesPerTexel;
+ arrayBuffer[byteOffset] = texel;
+ }
+ }
+ return arrayBuffer;
+ }
+}
+
+export const g = makeTestGroup(IndexFormatTest);
+
+g.test('index_format,uint16')
+ .desc('Test rendering result of indexed draw with index format of uint16.')
+ .paramsSubcasesOnly([
+ { indexOffset: 0, _indexCount: 10, _expectedShape: kSquare },
+ { indexOffset: 6, _indexCount: 6, _expectedShape: kBottomLeftTriangle },
+ { indexOffset: 18, _indexCount: 0, _expectedShape: kNothing },
+ ])
+ .fn(t => {
+ const { indexOffset, _indexCount, _expectedShape } = t.params;
+
+ // If this is written as uint16 but interpreted as uint32, it will have index 1 and 2 be both 0
+ // and render nothing.
+ // And the index buffer size - offset must be not less than the size required by triangle
+ // list, otherwise it also render nothing.
+ const indices: number[] = [1, 2, 0, 0, 0, 0, 0, 1, 3, 0];
+ const indexBuffer = t.CreateIndexBuffer(indices, 'uint16');
+ const result = t.run(indexBuffer, _indexCount, 'uint16', indexOffset);
+
+ const expectedTextureValues = t.CreateExpectedUint8Array(_expectedShape);
+ t.expectGPUBufferValuesEqual(result, expectedTextureValues);
+ });
+
+g.test('index_format,uint32')
+ .desc('Test rendering result of indexed draw with index format of uint32.')
+ .paramsSubcasesOnly([
+ { indexOffset: 0, _indexCount: 10, _expectedShape: kSquare },
+ { indexOffset: 12, _indexCount: 7, _expectedShape: kBottomLeftTriangle },
+ { indexOffset: 36, _indexCount: 0, _expectedShape: kNothing },
+ ])
+ .fn(t => {
+ const { indexOffset, _indexCount, _expectedShape } = t.params;
+
+ // If this is interpreted as uint16, then it would be 0, 1, 0, ... and would draw nothing.
+ // And the index buffer size - offset must be not less than the size required by triangle
+ // list, otherwise it also render nothing.
+ const indices: number[] = [1, 2, 0, 0, 0, 0, 0, 1, 3, 0];
+ const indexBuffer = t.CreateIndexBuffer(indices, 'uint32');
+ const result = t.run(indexBuffer, _indexCount, 'uint32', indexOffset);
+
+ const expectedTextureValues = t.CreateExpectedUint8Array(_expectedShape);
+ t.expectGPUBufferValuesEqual(result, expectedTextureValues);
+ });
+
+g.test('index_format,change_pipeline_after_setIndexBuffer')
+ .desc('Test that setting the index buffer before the pipeline works correctly.')
+ .params(u => u.combine('setPipelineBeforeSetIndexBuffer', [false, true]))
+ .fn(t => {
+ const indexOffset = 12;
+ const indexCount = 7;
+ const expectedShape = kBottomLeftTriangle;
+
+ const indexFormat16 = 'uint16';
+ const indexFormat32 = 'uint32';
+
+ const indices: number[] = [1, 2, 0, 0, 0, 0, 0, 1, 3, 0];
+ const indexBuffer = t.CreateIndexBuffer(indices, indexFormat32);
+
+ const kPrimitiveTopology = 'triangle-strip';
+ const pipeline32 = t.MakeRenderPipeline(kPrimitiveTopology, indexFormat32);
+ const pipeline16 = t.MakeRenderPipeline(kPrimitiveTopology, indexFormat16);
+
+ const colorAttachment = t.device.createTexture({
+ format: kTextureFormat,
+ size: { width: kWidth, height: kHeight, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const result = t.device.createBuffer({
+ size: byteLength,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachment.createView(),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+
+ if (t.params.setPipelineBeforeSetIndexBuffer) {
+ pass.setPipeline(pipeline16);
+ }
+ pass.setIndexBuffer(indexBuffer, indexFormat32, indexOffset);
+ pass.setPipeline(pipeline32); // Set the pipeline for 'indexFormat32' again.
+ pass.drawIndexed(indexCount);
+ pass.end();
+ encoder.copyTextureToBuffer(
+ { texture: colorAttachment },
+ { buffer: result, bytesPerRow, rowsPerImage },
+ [kWidth, kHeight]
+ );
+ t.device.queue.submit([encoder.finish()]);
+
+ const expectedTextureValues = t.CreateExpectedUint8Array(expectedShape);
+ t.expectGPUBufferValuesEqual(result, expectedTextureValues);
+ });
+
+g.test('index_format,setIndexBuffer_before_setPipeline')
+ .desc('Test that setting the index buffer before the pipeline works correctly.')
+ .params(u => u.combine('setIndexBufferBeforeSetPipeline', [false, true]))
+ .fn(t => {
+ const indexOffset = 12;
+ const indexCount = 7;
+ const expectedShape = kBottomLeftTriangle;
+
+ const indexFormat = 'uint32';
+
+ const indices: number[] = [1, 2, 0, 0, 0, 0, 0, 1, 3, 0];
+ const indexBuffer = t.CreateIndexBuffer(indices, indexFormat);
+
+ const kPrimitiveTopology = 'triangle-strip';
+ const pipeline = t.MakeRenderPipeline(kPrimitiveTopology, indexFormat);
+
+ const colorAttachment = t.device.createTexture({
+ format: kTextureFormat,
+ size: { width: kWidth, height: kHeight, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const result = t.device.createBuffer({
+ size: byteLength,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachment.createView(),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+
+ if (t.params.setIndexBufferBeforeSetPipeline) {
+ pass.setIndexBuffer(indexBuffer, indexFormat, indexOffset);
+ pass.setPipeline(pipeline);
+ } else {
+ pass.setPipeline(pipeline);
+ pass.setIndexBuffer(indexBuffer, indexFormat, indexOffset);
+ }
+
+ pass.drawIndexed(indexCount);
+ pass.end();
+ encoder.copyTextureToBuffer(
+ { texture: colorAttachment },
+ { buffer: result, bytesPerRow, rowsPerImage },
+ [kWidth, kHeight]
+ );
+ t.device.queue.submit([encoder.finish()]);
+
+ const expectedTextureValues = t.CreateExpectedUint8Array(expectedShape);
+ t.expectGPUBufferValuesEqual(result, expectedTextureValues);
+ });
+
+g.test('index_format,setIndexBuffer_different_formats')
+ .desc(
+ `
+ Test that index buffers of multiple formats can be used with a pipeline that doesn't use strip
+ primitive topology.
+ `
+ )
+ .fn(t => {
+ const indices: number[] = [1, 2, 0, 0, 0, 0, 0, 1, 3, 0];
+
+ // Create a pipeline to be used by different index formats.
+ const kPrimitiveTopology = 'triangle-list';
+ const pipeline = t.MakeRenderPipeline(kPrimitiveTopology);
+
+ const expectedTextureValues = t.CreateExpectedUint8Array(kBottomLeftTriangle);
+
+ const colorAttachment = t.device.createTexture({
+ format: kTextureFormat,
+ size: { width: kWidth, height: kHeight, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const result = t.device.createBuffer({
+ size: byteLength,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+
+ let encoder = t.device.createCommandEncoder();
+ {
+ const indexFormat = 'uint32';
+ const indexOffset = 12;
+ const indexCount = 7;
+ const indexBuffer = t.CreateIndexBuffer(indices, indexFormat);
+
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachment.createView(),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+
+ pass.setIndexBuffer(indexBuffer, indexFormat, indexOffset);
+ pass.setPipeline(pipeline);
+ pass.drawIndexed(indexCount);
+ pass.end();
+ encoder.copyTextureToBuffer(
+ { texture: colorAttachment },
+ { buffer: result, bytesPerRow, rowsPerImage },
+ [kWidth, kHeight]
+ );
+ }
+ t.device.queue.submit([encoder.finish()]);
+ t.expectGPUBufferValuesEqual(result, expectedTextureValues);
+
+ // Call setIndexBuffer with the pipeline and a different index format buffer.
+ encoder = t.device.createCommandEncoder();
+ {
+ const indexFormat = 'uint16';
+ const indexOffset = 6;
+ const indexCount = 6;
+ const indexBuffer = t.CreateIndexBuffer(indices, indexFormat);
+
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachment.createView(),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+
+ pass.setIndexBuffer(indexBuffer, indexFormat, indexOffset);
+ pass.setPipeline(pipeline);
+ pass.drawIndexed(indexCount);
+ pass.end();
+ encoder.copyTextureToBuffer(
+ { texture: colorAttachment },
+ { buffer: result, bytesPerRow, rowsPerImage },
+ [kWidth, kHeight]
+ );
+ }
+ t.device.queue.submit([encoder.finish()]);
+ t.expectGPUBufferValuesEqual(result, expectedTextureValues);
+ });
+
+g.test('primitive_restart')
+ .desc(
+ `
+Test primitive restart with each primitive topology.
+
+Primitive restart should be always active with strip primitive topologies
+('line-strip' or 'triangle-strip') and never active for other topologies, where
+the primitive restart value isn't special and should be treated as a regular index value.
+
+The value -1 gets uploaded as 0xFFFF or 0xFFFF_FFFF according to the format.
+
+The positions of these points are embedded in the shader above, and look like this:
+ | 0 2|
+ | |
+ -1 3 1|
+
+Below are the indices lists used for each test, and the expected rendering result of each
+(approximately, in the case of incorrect results). This shows the expected result (marked '->')
+is different from what you would get if the topology were incorrect.
+
+- primitiveTopology: triangle-list
+ indices: [0, 1, 3, -1, 2, 1, 0, 0],
+ -> triangle-list: (0, 1, 3), (-1, 2, 1)
+ | # #|
+ | ####|
+ | #####|
+ | #######|
+ triangle-list with restart: (0, 1, 3), (2, 1, 0)
+ triangle-strip: (0, 1, 3), (2, 1, 0), (1, 0, 0)
+ | ####|
+ | ####|
+ | ####|
+ | ####|
+ triangle-strip w/o restart: (0, 1, 3), (1, 3, -1), (3, -1, 2), (-1, 2, 1), (2, 1, 0), (1, 0, 0)
+ | ####|
+ | ####|
+ | #####|
+ | #######|
+
+- primitiveTopology: triangle-strip
+ indices: [3, 1, 0, -1, 2, 2, 1, 3],
+ -> triangle-strip: (3, 1, 0), (2, 2, 1), (2, 1, 3)
+ | # #|
+ | ####|
+ | ####|
+ | ####|
+ triangle-strip w/o restart: (3, 1, 0), (1, 0, -1), (0, -1, 2), (2, 2, 1), (2, 3, 1)
+ | ####|
+ | #####|
+ | ######|
+ | #######|
+ triangle-list: (3, 1, 0), (-1, 2, 2)
+ triangle-list with restart: (3, 1, 0), (2, 2, 1)
+ | |
+ | # |
+ | ## |
+ | ### |
+
+- primitiveTopology: point, line-list, line-strip:
+ indices: [0, 1, -1, 2, -1, 2, 3, 0],
+ -> point-list: (0), (1), (-1), (2), (3), (0)
+ | # #|
+ | |
+ | |
+ |# # #|
+ point-list with restart (0), (1), (2), (3), (0)
+ | # #|
+ | |
+ | |
+ | # #|
+ -> line-list: (0, 1), (-1, 2), (3, 0)
+ | # ##|
+ | ## |
+ | ### # |
+ |## # #|
+ line-list with restart: (0, 1), (2, 3)
+ | # #|
+ | ## |
+ | ## |
+ | # #|
+ -> line-strip: (0, 1), (2, 3), (3, 0)
+ | # #|
+ | ### |
+ | ### |
+ | # #|
+ line-strip w/o restart: (0, 1), (1, -1), (-1, 2), (2, 3), (3, 3)
+ | # ##|
+ | ### |
+ | ## ## |
+ |########|
+`
+ )
+ .params(u =>
+ u //
+ .combine('indexFormat', ['uint16', 'uint32'] as const)
+ .combineWithParams([
+ {
+ primitiveTopology: 'point-list',
+ _indices: [0, 1, -1, 2, 3, 0],
+ _expectedShape: [
+ [0, 0, 0, 0, 1, 0, 0, 1],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 0, 0, 0, 1, 0, 0, 1],
+ ],
+ },
+ {
+ primitiveTopology: 'line-list',
+ _indices: [0, 1, -1, 2, 3, 0],
+ _expectedShape: [
+ [0, 0, 0, 0, 1, 0, 1, 1],
+ [0, 0, 0, 0, 1, 1, 0, 0],
+ [0, 0, 1, 1, 1, 0, 1, 0],
+ [1, 1, 0, 0, 1, 0, 0, 1],
+ ],
+ },
+ {
+ primitiveTopology: 'line-strip',
+ _indices: [0, 1, -1, 2, 3, 0],
+ _expectedShape: [
+ [0, 0, 0, 0, 1, 0, 0, 1],
+ [0, 0, 0, 0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 1, 1, 1, 0],
+ [0, 0, 0, 0, 1, 0, 0, 1],
+ ],
+ },
+ {
+ primitiveTopology: 'triangle-list',
+ _indices: [0, 1, 3, -1, 2, 1, 0, 0],
+ _expectedShape: [
+ [0, 0, 0, 0, 0, 0, 0, 1],
+ [0, 0, 0, 0, 1, 1, 1, 1],
+ [0, 0, 0, 1, 1, 1, 1, 1],
+ [0, 1, 1, 1, 1, 1, 1, 1],
+ ],
+ },
+ {
+ primitiveTopology: 'triangle-strip',
+ _indices: [3, 1, 0, -1, 2, 2, 1, 3],
+ _expectedShape: [
+ [0, 0, 0, 0, 0, 0, 0, 1],
+ [0, 0, 0, 0, 1, 0, 1, 1],
+ [0, 0, 0, 0, 1, 1, 1, 1],
+ [0, 0, 0, 0, 1, 1, 1, 1],
+ ],
+ },
+ ] as const)
+ )
+ .fn(t => {
+ const { indexFormat, primitiveTopology, _indices, _expectedShape } = t.params;
+
+ const indexBuffer = t.CreateIndexBuffer(_indices, indexFormat);
+ const result = t.run(indexBuffer, _indices.length, indexFormat, 0, primitiveTopology);
+
+ const expectedTextureValues = t.CreateExpectedUint8Array(_expectedShape);
+ t.expectGPUBufferValuesEqual(result, expectedTextureValues);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/regression/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/api/regression/README.txt
new file mode 100644
index 0000000000..263b04a372
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/regression/README.txt
@@ -0,0 +1,2 @@
+One-off tests that reproduce API bugs found in implementations to prevent the bugs from
+appearing again.
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/README.txt
new file mode 100644
index 0000000000..8bf08d7b08
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/README.txt
@@ -0,0 +1 @@
+Positive and negative tests for all the validation rules of the API.
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/buffer/create.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/buffer/create.spec.ts
new file mode 100644
index 0000000000..9631c368fe
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/buffer/create.spec.ts
@@ -0,0 +1,113 @@
+export const description = `
+Tests for validation in createBuffer.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { assert } from '../../../../common/util/util.js';
+import {
+ kAllBufferUsageBits,
+ kBufferSizeAlignment,
+ kBufferUsages,
+} from '../../../capability_info.js';
+import { GPUConst } from '../../../constants.js';
+import { kMaxSafeMultipleOf8 } from '../../../util/math.js';
+import { ValidationTest } from '../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+assert(kBufferSizeAlignment === 4);
+g.test('size')
+ .desc(
+ 'Test buffer size alignment is validated to be a multiple of 4 if mappedAtCreation is true.'
+ )
+ .params(u =>
+ u
+ .combine('mappedAtCreation', [false, true])
+ .beginSubcases()
+ .combine('size', [
+ 0,
+ kBufferSizeAlignment * 0.5,
+ kBufferSizeAlignment,
+ kBufferSizeAlignment * 1.5,
+ kBufferSizeAlignment * 2,
+ ])
+ )
+ .fn(t => {
+ const { mappedAtCreation, size } = t.params;
+ const isValid = !mappedAtCreation || size % kBufferSizeAlignment === 0;
+ const usage = BufferUsage.COPY_SRC;
+ t.expectGPUError(
+ 'validation',
+ () => t.device.createBuffer({ size, usage, mappedAtCreation }),
+ !isValid
+ );
+ });
+
+g.test('limit')
+ .desc('Test buffer size is validated against maxBufferSize.')
+ .params(u => u.beginSubcases().combine('sizeAddition', [-1, 0, +1]))
+ .fn(t => {
+ const { sizeAddition } = t.params;
+ const size = t.makeLimitVariant('maxBufferSize', { mult: 1, add: sizeAddition });
+ const isValid = size <= t.device.limits.maxBufferSize;
+ const usage = BufferUsage.COPY_SRC;
+ t.expectGPUError('validation', () => t.device.createBuffer({ size, usage }), !isValid);
+ });
+
+const kInvalidUsage = 0x8000;
+assert((kInvalidUsage & kAllBufferUsageBits) === 0);
+g.test('usage')
+ .desc('Test combinations of zero to two usage flags are validated to be valid.')
+ .params(u =>
+ u
+ .combine('usage1', [0, ...kBufferUsages, kInvalidUsage])
+ .combine('usage2', [0, ...kBufferUsages, kInvalidUsage])
+ .beginSubcases()
+ .combine('mappedAtCreation', [false, true])
+ )
+ .fn(t => {
+ const { mappedAtCreation, usage1, usage2 } = t.params;
+ const usage = usage1 | usage2;
+
+ const isValid =
+ usage !== 0 &&
+ (usage & ~kAllBufferUsageBits) === 0 &&
+ ((usage & GPUBufferUsage.MAP_READ) === 0 ||
+ (usage & ~(GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ)) === 0) &&
+ ((usage & GPUBufferUsage.MAP_WRITE) === 0 ||
+ (usage & ~(GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE)) === 0);
+
+ t.expectGPUError(
+ 'validation',
+ () => t.device.createBuffer({ size: kBufferSizeAlignment * 2, usage, mappedAtCreation }),
+ !isValid
+ );
+ });
+
+const BufferUsage = GPUConst.BufferUsage;
+
+g.test('createBuffer_invalid_and_oom')
+ .desc(
+ `When creating a mappable buffer, it's expected that shmem may be immediately allocated
+(in the content process, before validation occurs in the GPU process). If the buffer is really
+large, though, it could fail shmem allocation before validation fails. Ensure that OOM error is
+hidden behind the "more severe" validation error.`
+ )
+ .paramsSubcasesOnly(u =>
+ u.combineWithParams([
+ { _valid: true, usage: BufferUsage.UNIFORM, size: 16 },
+ { _valid: true, usage: BufferUsage.STORAGE, size: 16 },
+ // Invalid because UNIFORM is not allowed with map usages.
+ { usage: BufferUsage.MAP_WRITE | BufferUsage.UNIFORM, size: 16 },
+ { usage: BufferUsage.MAP_WRITE | BufferUsage.UNIFORM, size: kMaxSafeMultipleOf8 },
+ { usage: BufferUsage.MAP_WRITE | BufferUsage.UNIFORM, size: 0x20_0000_0000 }, // 128 GiB
+ { usage: BufferUsage.MAP_READ | BufferUsage.UNIFORM, size: 16 },
+ { usage: BufferUsage.MAP_READ | BufferUsage.UNIFORM, size: kMaxSafeMultipleOf8 },
+ { usage: BufferUsage.MAP_READ | BufferUsage.UNIFORM, size: 0x20_0000_0000 }, // 128 GiB
+ ] as const)
+ )
+ .fn(t => {
+ const { _valid, usage, size } = t.params;
+
+ t.expectGPUError('validation', () => t.device.createBuffer({ size, usage }), !_valid);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/buffer/destroy.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/buffer/destroy.spec.ts
new file mode 100644
index 0000000000..102556472b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/buffer/destroy.spec.ts
@@ -0,0 +1,101 @@
+export const description = `
+Validation tests for GPUBuffer.destroy.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { kBufferUsages } from '../../../capability_info.js';
+import { GPUConst } from '../../../constants.js';
+import { ValidationTest } from '../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('all_usages')
+ .desc('Test destroying buffers of every usage type.')
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('usage', kBufferUsages)
+ )
+ .fn(t => {
+ const { usage } = t.params;
+ const buf = t.device.createBuffer({
+ size: 4,
+ usage,
+ });
+
+ buf.destroy();
+ });
+
+g.test('error_buffer')
+ .desc('Test that error buffers may be destroyed without generating validation errors.')
+ .fn(t => {
+ const buf = t.getErrorBuffer();
+ buf.destroy();
+ });
+
+g.test('twice')
+ .desc(
+ `Test that destroying a buffer more than once is allowed.
+ - Tests buffers which are mapped at creation or not
+ - Tests buffers with various usages`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('mappedAtCreation', [false, true])
+ .combineWithParams([
+ { size: 4, usage: GPUConst.BufferUsage.COPY_SRC },
+ { size: 4, usage: GPUConst.BufferUsage.MAP_WRITE | GPUConst.BufferUsage.COPY_SRC },
+ { size: 4, usage: GPUConst.BufferUsage.COPY_DST | GPUConst.BufferUsage.MAP_READ },
+ ])
+ )
+ .fn(t => {
+ const buf = t.device.createBuffer(t.params);
+
+ buf.destroy();
+ buf.destroy();
+ });
+
+g.test('while_mapped')
+ .desc(
+ `Test destroying buffers while mapped or after being unmapped.
+ - Tests {mappable, unmappable mapAtCreation, mappable mapAtCreation}
+ - Tests while {mapped, mapped at creation, unmapped}`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('mappedAtCreation', [false, true])
+ .combine('unmapBeforeDestroy', [false, true])
+ .combineWithParams([
+ { usage: GPUConst.BufferUsage.COPY_SRC },
+ { usage: GPUConst.BufferUsage.MAP_WRITE | GPUConst.BufferUsage.COPY_SRC },
+ { usage: GPUConst.BufferUsage.COPY_DST | GPUConst.BufferUsage.MAP_READ },
+ {
+ usage: GPUConst.BufferUsage.MAP_WRITE | GPUConst.BufferUsage.COPY_SRC,
+ mapMode: GPUConst.MapMode.WRITE,
+ },
+ {
+ usage: GPUConst.BufferUsage.COPY_DST | GPUConst.BufferUsage.MAP_READ,
+ mapMode: GPUConst.MapMode.READ,
+ },
+ ])
+ .unless(p => p.mappedAtCreation === false && p.mapMode === undefined)
+ )
+ .fn(async t => {
+ const { usage, mapMode, mappedAtCreation, unmapBeforeDestroy } = t.params;
+ const buf = t.device.createBuffer({
+ size: 4,
+ usage,
+ mappedAtCreation,
+ });
+
+ if (mapMode !== undefined) {
+ if (mappedAtCreation) {
+ buf.unmap();
+ }
+ await buf.mapAsync(mapMode);
+ }
+ if (unmapBeforeDestroy) {
+ buf.unmap();
+ }
+
+ buf.destroy();
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/buffer/mapping.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/buffer/mapping.spec.ts
new file mode 100644
index 0000000000..58d7f2767a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/buffer/mapping.spec.ts
@@ -0,0 +1,1125 @@
+export const description = `
+Validation tests for GPUBuffer.mapAsync, GPUBuffer.unmap and GPUBuffer.getMappedRange.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { attemptGarbageCollection } from '../../../../common/util/collect_garbage.js';
+import { assert, unreachable } from '../../../../common/util/util.js';
+import { kBufferUsages } from '../../../capability_info.js';
+import { GPUConst } from '../../../constants.js';
+import { ValidationTest } from '../validation_test.js';
+
+class F extends ValidationTest {
+ async testMapAsyncCall(
+ expectation:
+ | 'success'
+ | { validationError: boolean; earlyRejection: boolean; rejectName: string },
+ buffer: GPUBuffer,
+ mode: GPUMapModeFlags,
+ offset?: number,
+ size?: number
+ ) {
+ if (expectation === 'success') {
+ const p = buffer.mapAsync(mode, offset, size);
+ await p;
+ } else {
+ let p: Promise<void>;
+ this.expectValidationError(() => {
+ p = buffer.mapAsync(mode, offset, size);
+ }, expectation.validationError);
+ let caught = false;
+ let rejectedEarly = false;
+ // If mapAsync rejected early, microtask A will run before B.
+ // If not, B will run before A.
+ p!.catch(() => {
+ // Microtask A
+ caught = true;
+ });
+ queueMicrotask(() => {
+ // Microtask B
+ rejectedEarly = caught;
+ });
+ try {
+ // This await will always complete after microtasks A and B are both done.
+ await p!;
+ assert(expectation.rejectName === null, 'mapAsync unexpectedly passed');
+ } catch (ex) {
+ assert(ex instanceof Error, 'mapAsync rejected with non-error');
+ assert(typeof ex.stack === 'string', 'mapAsync rejected without a stack');
+ assert(expectation.rejectName === ex.name, `mapAsync rejected unexpectedly with: ${ex}`);
+ assert(
+ expectation.earlyRejection === rejectedEarly,
+ 'mapAsync rejected at an unexpected timing'
+ );
+ }
+ }
+ }
+
+ testGetMappedRangeCall(success: boolean, buffer: GPUBuffer, offset?: number, size?: number) {
+ if (success) {
+ const data = buffer.getMappedRange(offset, size);
+ this.expect(data instanceof ArrayBuffer);
+ if (size !== undefined) {
+ this.expect(data.byteLength === size);
+ }
+ } else {
+ this.shouldThrow('OperationError', () => {
+ buffer.getMappedRange(offset, size);
+ });
+ }
+ }
+
+ createMappableBuffer(type: GPUMapModeFlags, size: number): GPUBuffer {
+ switch (type) {
+ case GPUMapMode.READ:
+ return this.device.createBuffer({
+ size,
+ usage: GPUBufferUsage.MAP_READ,
+ });
+ case GPUMapMode.WRITE:
+ return this.device.createBuffer({
+ size,
+ usage: GPUBufferUsage.MAP_WRITE,
+ });
+ default:
+ unreachable();
+ }
+ }
+}
+
+export const g = makeTestGroup(F);
+
+const kMapModeOptions = [GPUConst.MapMode.READ, GPUConst.MapMode.WRITE];
+const kOffsetAlignment = 8;
+const kSizeAlignment = 4;
+
+g.test('mapAsync,usage')
+ .desc(
+ `Test the usage validation for mapAsync.
+
+ For each buffer usage:
+ For GPUMapMode.READ, GPUMapMode.WRITE, and 0:
+ Test that the mapAsync call is valid iff the mapping usage is not 0 and the buffer usage
+ the mapMode flag.`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combineWithParams([
+ { mapMode: GPUConst.MapMode.READ, validUsage: GPUConst.BufferUsage.MAP_READ },
+ { mapMode: GPUConst.MapMode.WRITE, validUsage: GPUConst.BufferUsage.MAP_WRITE },
+ // Using mapMode 0 is never valid, so there is no validUsage.
+ { mapMode: 0, validUsage: null },
+ ])
+ .combine('usage', kBufferUsages)
+ )
+ .fn(async t => {
+ const { mapMode, validUsage, usage } = t.params;
+
+ const buffer = t.device.createBuffer({
+ size: 16,
+ usage,
+ });
+
+ const successParam =
+ usage === validUsage
+ ? 'success'
+ : {
+ validationError: true,
+ earlyRejection: false,
+ rejectName: 'OperationError',
+ };
+ await t.testMapAsyncCall(successParam, buffer, mapMode);
+ });
+
+g.test('mapAsync,invalidBuffer')
+ .desc('Test that mapAsync is an error when called on an invalid buffer.')
+ .paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
+ .fn(async t => {
+ const { mapMode } = t.params;
+ const buffer = t.getErrorBuffer();
+ await t.testMapAsyncCall(
+ { validationError: true, earlyRejection: false, rejectName: 'OperationError' },
+ buffer,
+ mapMode
+ );
+ });
+
+g.test('mapAsync,state,destroyed')
+ .desc('Test that mapAsync is an error when called on a destroyed buffer.')
+ .paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
+ .fn(async t => {
+ const { mapMode } = t.params;
+ const buffer = t.createMappableBuffer(mapMode, 16);
+
+ // Start mapping the buffer, we are going to destroy it before it resolves so it will reject
+ // the mapping promise with an AbortError.
+ const pending = t.testMapAsyncCall(
+ { validationError: false, earlyRejection: false, rejectName: 'AbortError' },
+ buffer,
+ mapMode
+ );
+
+ buffer.destroy();
+ await t.testMapAsyncCall(
+ { validationError: true, earlyRejection: false, rejectName: 'OperationError' },
+ buffer,
+ mapMode
+ );
+
+ await pending;
+ });
+
+g.test('mapAsync,state,mappedAtCreation')
+ .desc(
+ `Test that mapAsync is an error when called on a buffer mapped at creation,
+ but succeeds after unmapping it.`
+ )
+ .paramsSubcasesOnly([
+ { mapMode: GPUConst.MapMode.READ, validUsage: GPUConst.BufferUsage.MAP_READ },
+ { mapMode: GPUConst.MapMode.WRITE, validUsage: GPUConst.BufferUsage.MAP_WRITE },
+ ])
+ .fn(async t => {
+ const { mapMode, validUsage } = t.params;
+
+ const buffer = t.device.createBuffer({
+ size: 16,
+ usage: validUsage,
+ mappedAtCreation: true,
+ });
+ await t.testMapAsyncCall(
+ { validationError: true, earlyRejection: false, rejectName: 'OperationError' },
+ buffer,
+ mapMode
+ );
+
+ buffer.unmap();
+ await t.testMapAsyncCall('success', buffer, mapMode);
+ });
+
+g.test('mapAsync,state,mapped')
+ .desc(
+ `Test that mapAsync is an error when called on a mapped buffer, but succeeds
+ after unmapping it.`
+ )
+ .paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
+ .fn(async t => {
+ const { mapMode } = t.params;
+
+ const buffer = t.createMappableBuffer(mapMode, 16);
+ await t.testMapAsyncCall('success', buffer, mapMode);
+ await t.testMapAsyncCall(
+ { validationError: true, earlyRejection: false, rejectName: 'OperationError' },
+ buffer,
+ mapMode
+ );
+
+ buffer.unmap();
+ await t.testMapAsyncCall('success', buffer, mapMode);
+ });
+
+g.test('mapAsync,state,mappingPending')
+ .desc(
+ `Test that mapAsync is rejected when called on a buffer that is being mapped,
+ but succeeds after the previous mapping request is cancelled.`
+ )
+ .paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
+ .fn(async t => {
+ const { mapMode } = t.params;
+
+ const buffer = t.createMappableBuffer(mapMode, 16);
+
+ // Start mapping the buffer, we are going to unmap it before it resolves so it will reject
+ // the mapping promise with an AbortError.
+ const pending0 = t.testMapAsyncCall(
+ { validationError: false, earlyRejection: false, rejectName: 'AbortError' },
+ buffer,
+ mapMode
+ );
+
+ // Do the test of mapAsync while [[pending_map]] is non-null. It has to be synchronous so
+ // that we can unmap the previous mapping in the same stack frame and testing this one doesn't
+ // get canceled, but instead is rejected.
+ const pending1 = t.testMapAsyncCall(
+ { validationError: false, earlyRejection: true, rejectName: 'OperationError' },
+ buffer,
+ mapMode
+ );
+
+ // Unmap the first mapping. It should now be possible to successfully call mapAsync
+ // This unmap should cause the first mapAsync rejection.
+ buffer.unmap();
+ await t.testMapAsyncCall('success', buffer, mapMode);
+
+ await pending0;
+ await pending1;
+ });
+
+g.test('mapAsync,sizeUnspecifiedOOB')
+ .desc(
+ `Test that mapAsync with size unspecified rejects if offset > buffer.[[size]],
+ with various cases at the limits of the buffer size or with a misaligned offset.
+ Also test for an empty buffer.`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('mapMode', kMapModeOptions)
+ .combineWithParams([
+ // 0 size buffer.
+ { bufferSize: 0, offset: 0 },
+ { bufferSize: 0, offset: 1 },
+ { bufferSize: 0, offset: kOffsetAlignment },
+
+ // Test with a buffer that's not empty.
+ { bufferSize: 16, offset: 0 },
+ { bufferSize: 16, offset: kOffsetAlignment },
+ { bufferSize: 16, offset: 16 },
+ { bufferSize: 16, offset: 17 },
+ { bufferSize: 16, offset: 16 + kOffsetAlignment },
+ ])
+ )
+ .fn(async t => {
+ const { mapMode, bufferSize, offset } = t.params;
+ const buffer = t.createMappableBuffer(mapMode, bufferSize);
+
+ const successParam =
+ offset <= bufferSize
+ ? 'success'
+ : {
+ validationError: true,
+ earlyRejection: false,
+ rejectName: 'OperationError',
+ };
+ await t.testMapAsyncCall(successParam, buffer, mapMode, offset);
+ });
+
+g.test('mapAsync,offsetAndSizeAlignment')
+ .desc("Test that mapAsync fails if the alignment of offset and size isn't correct.")
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('mapMode', kMapModeOptions)
+ .combine('offset', [0, kOffsetAlignment, kOffsetAlignment / 2])
+ .combine('size', [0, kSizeAlignment, kSizeAlignment / 2])
+ )
+ .fn(async t => {
+ const { mapMode, offset, size } = t.params;
+ const buffer = t.createMappableBuffer(mapMode, 16);
+
+ const successParam =
+ offset % kOffsetAlignment === 0 && size % kSizeAlignment === 0
+ ? 'success'
+ : {
+ validationError: true,
+ earlyRejection: false,
+ rejectName: 'OperationError',
+ };
+ await t.testMapAsyncCall(successParam, buffer, mapMode, offset, size);
+ });
+
+g.test('mapAsync,offsetAndSizeOOB')
+ .desc('Test that mapAsync fails if offset + size is larger than the buffer size.')
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('mapMode', kMapModeOptions)
+ .combineWithParams([
+ // For a 0 size buffer
+ { bufferSize: 0, offset: 0, size: 0 },
+ { bufferSize: 0, offset: 0, size: 4 },
+ { bufferSize: 0, offset: 8, size: 0 },
+
+ // For a small buffer
+ { bufferSize: 16, offset: 0, size: 16 },
+ { bufferSize: 16, offset: kOffsetAlignment, size: 16 },
+
+ { bufferSize: 16, offset: 16, size: 0 },
+ { bufferSize: 16, offset: 16, size: kSizeAlignment },
+
+ { bufferSize: 16, offset: 8, size: 0 },
+ { bufferSize: 16, offset: 8, size: 8 },
+ { bufferSize: 16, offset: 8, size: 8 + kSizeAlignment },
+
+ // For a larger buffer
+ { bufferSize: 1024, offset: 0, size: 1024 },
+ { bufferSize: 1024, offset: kOffsetAlignment, size: 1024 },
+
+ { bufferSize: 1024, offset: 1024, size: 0 },
+ { bufferSize: 1024, offset: 1024, size: kSizeAlignment },
+
+ { bufferSize: 1024, offset: 512, size: 0 },
+ { bufferSize: 1024, offset: 512, size: 512 },
+ { bufferSize: 1024, offset: 512, size: 512 + kSizeAlignment },
+ ])
+ )
+ .fn(async t => {
+ const { mapMode, bufferSize, size, offset } = t.params;
+ const buffer = t.createMappableBuffer(mapMode, bufferSize);
+
+ const successParam =
+ offset + size <= bufferSize
+ ? 'success'
+ : {
+ validationError: true,
+ earlyRejection: false,
+ rejectName: 'OperationError',
+ };
+ await t.testMapAsyncCall(successParam, buffer, mapMode, offset, size);
+ });
+
+g.test('mapAsync,earlyRejection')
+ .desc("Test that mapAsync fails immediately if it's pending map.")
+ .paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions).combine('offset2', [0, 8]))
+ .fn(async t => {
+ const { mapMode, offset2 } = t.params;
+
+ const bufferSize = 16;
+ const mapSize = 8;
+ const offset1 = 0;
+
+ const buffer = t.createMappableBuffer(mapMode, bufferSize);
+ const p1 = buffer.mapAsync(mapMode, offset1, mapSize); // succeeds
+ await t.testMapAsyncCall(
+ {
+ validationError: false,
+ earlyRejection: true,
+ rejectName: 'OperationError',
+ },
+ buffer,
+ mapMode,
+ offset2,
+ mapSize
+ );
+ await p1; // ensure the original map still succeeds
+ });
+
+g.test('mapAsync,abort_over_invalid_error')
+ .desc(
+ `Test that unmap abort error should have precedence over validation error
+TODO
+ - Add other validation error test (eg. offset is not a multiple of 8)
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u.combine('mapMode', kMapModeOptions).combine('unmapBeforeResolve', [true, false])
+ )
+ .fn(async t => {
+ const { mapMode, unmapBeforeResolve } = t.params;
+ const bufferSize = 8;
+ const buffer = t.createMappableBuffer(mapMode, bufferSize);
+ await buffer.mapAsync(mapMode);
+
+ if (unmapBeforeResolve) {
+ // unmap abort error should have precedence over validation error
+ const pending = t.testMapAsyncCall(
+ { validationError: true, earlyRejection: false, rejectName: 'AbortError' },
+ buffer,
+ mapMode
+ );
+ buffer.unmap();
+ await pending;
+ } else {
+ // map on already mapped buffer should cause validation error
+ await t.testMapAsyncCall(
+ { validationError: true, earlyRejection: false, rejectName: 'OperationError' },
+ buffer,
+ mapMode
+ );
+ buffer.unmap();
+ }
+ });
+
+g.test('getMappedRange,state,mapped')
+ .desc('Test that it is valid to call getMappedRange in the mapped state')
+ .paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
+ .fn(async t => {
+ const { mapMode } = t.params;
+ const bufferSize = 16;
+ const buffer = t.createMappableBuffer(mapMode, bufferSize);
+ await buffer.mapAsync(mapMode);
+
+ const data = buffer.getMappedRange();
+ t.expect(data instanceof ArrayBuffer);
+ t.expect(data.byteLength === bufferSize);
+
+ // map on already mapped buffer should be rejected
+ const pending = t.testMapAsyncCall(
+ { validationError: true, earlyRejection: false, rejectName: 'OperationError' },
+ buffer,
+ mapMode
+ );
+ t.expect(data.byteLength === bufferSize);
+ await pending;
+
+ buffer.unmap();
+
+ t.expect(data.byteLength === 0);
+ });
+
+g.test('getMappedRange,state,mappedAtCreation')
+ .desc(
+ `Test that, in the mapped-at-creation state, it is valid to call getMappedRange, for all buffer usages,
+ and invalid to call mapAsync, for all map modes.`
+ )
+ .paramsSubcasesOnly(u =>
+ u.combine('bufferUsage', kBufferUsages).combine('mapMode', kMapModeOptions)
+ )
+ .fn(async t => {
+ const { bufferUsage, mapMode } = t.params;
+ const bufferSize = 16;
+ const buffer = t.device.createBuffer({
+ usage: bufferUsage,
+ size: bufferSize,
+ mappedAtCreation: true,
+ });
+
+ const data = buffer.getMappedRange();
+ t.expect(data instanceof ArrayBuffer);
+ t.expect(data.byteLength === bufferSize);
+
+ // map on already mapped buffer should be rejected
+ const pending = t.testMapAsyncCall(
+ { validationError: true, earlyRejection: false, rejectName: 'OperationError' },
+ buffer,
+ mapMode
+ );
+ t.expect(data.byteLength === bufferSize);
+ await pending;
+
+ buffer.unmap();
+
+ t.expect(data.byteLength === 0);
+ });
+
+g.test('getMappedRange,state,invalid_mappedAtCreation')
+ .desc(
+ `mappedAtCreation should return a mapped buffer, even if the buffer is invalid.
+Like VRAM allocation (see map_oom), validation can be performed asynchronously (in the GPU process)
+so the Content process doesn't necessarily know the buffer is invalid.`
+ )
+ .fn(t => {
+ const buffer = t.expectGPUError('validation', () =>
+ t.device.createBuffer({
+ mappedAtCreation: true,
+ size: 16,
+ usage: 0xffff_ffff, // Invalid usage
+ })
+ );
+
+ // Should still be valid.
+ buffer.getMappedRange();
+ });
+
+g.test('getMappedRange,state,mappedAgain')
+ .desc(
+ 'Test that it is valid to call getMappedRange in the mapped state, even if there is a duplicate mapAsync before'
+ )
+ .paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
+ .fn(async t => {
+ const { mapMode } = t.params;
+ const buffer = t.createMappableBuffer(mapMode, 16);
+ await buffer.mapAsync(mapMode);
+
+ // call mapAsync again on already mapped buffer should fail
+ await t.testMapAsyncCall(
+ { validationError: true, earlyRejection: false, rejectName: 'OperationError' },
+ buffer,
+ mapMode
+ );
+
+ // getMapppedRange should still success
+ t.testGetMappedRangeCall(true, buffer);
+ });
+
+g.test('getMappedRange,state,unmapped')
+ .desc(
+ `Test that it is invalid to call getMappedRange in the unmapped state.
+Test for various cases of being unmapped: at creation, after a mapAsync call or after being created mapped.`
+ )
+ .fn(async t => {
+ // It is invalid to call getMappedRange when the buffer starts unmapped when created.
+ {
+ const buffer = t.createMappableBuffer(GPUMapMode.READ, 16);
+ t.testGetMappedRangeCall(false, buffer);
+ }
+
+ // It is invalid to call getMappedRange when the buffer is unmapped after mapAsync.
+ {
+ const buffer = t.createMappableBuffer(GPUMapMode.READ, 16);
+ await buffer.mapAsync(GPUMapMode.READ);
+ buffer.unmap();
+ t.testGetMappedRangeCall(false, buffer);
+ }
+
+ // It is invalid to call getMappedRange when the buffer is unmapped after mappedAtCreation.
+ {
+ const buffer = t.device.createBuffer({
+ usage: GPUBufferUsage.MAP_READ,
+ size: 16,
+ mappedAtCreation: true,
+ });
+ buffer.unmap();
+ t.testGetMappedRangeCall(false, buffer);
+ }
+ });
+
+g.test('getMappedRange,subrange,mapped')
+ .desc(
+ `Test that old getMappedRange returned arraybuffer does not exist after unmap, and newly returned
+ arraybuffer after new map has correct subrange`
+ )
+ .params(u => u.combine('mapMode', kMapModeOptions))
+ .fn(async t => {
+ const { mapMode } = t.params;
+ const bufferSize = 16;
+ const offset = 8;
+ const subrangeSize = bufferSize - offset;
+ const buffer = t.createMappableBuffer(mapMode, bufferSize);
+ await buffer.mapAsync(mapMode);
+
+ const data0 = buffer.getMappedRange();
+ t.expect(data0 instanceof ArrayBuffer);
+ t.expect(data0.byteLength === bufferSize);
+
+ buffer.unmap();
+ t.expect(data0.byteLength === 0);
+
+ await buffer.mapAsync(mapMode, offset);
+ const data1 = buffer.getMappedRange(8);
+
+ t.expect(data0.byteLength === 0);
+ t.expect(data1.byteLength === subrangeSize);
+ });
+
+g.test('getMappedRange,subrange,mappedAtCreation')
+ .desc(
+ `Test that old getMappedRange returned arraybuffer does not exist after unmap and newly returned
+ arraybuffer after new map has correct subrange`
+ )
+ .fn(async t => {
+ const bufferSize = 16;
+ const offset = 8;
+ const subrangeSize = bufferSize - offset;
+ const buffer = t.device.createBuffer({
+ size: bufferSize,
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
+ mappedAtCreation: true,
+ });
+
+ const data0 = buffer.getMappedRange();
+ t.expect(data0 instanceof ArrayBuffer);
+ t.expect(data0.byteLength === bufferSize);
+
+ buffer.unmap();
+ t.expect(data0.byteLength === 0);
+
+ await buffer.mapAsync(GPUMapMode.READ, offset);
+ const data1 = buffer.getMappedRange(8);
+
+ t.expect(data0.byteLength === 0);
+ t.expect(data1.byteLength === subrangeSize);
+ });
+
+g.test('getMappedRange,state,destroyed')
+ .desc(
+ `Test that it is invalid to call getMappedRange in the destroyed state.
+Test for various cases of being destroyed: at creation, after a mapAsync call or after being created mapped.`
+ )
+ .fn(async t => {
+ // It is invalid to call getMappedRange when the buffer is destroyed when unmapped.
+ {
+ const buffer = t.createMappableBuffer(GPUMapMode.READ, 16);
+ buffer.destroy();
+ t.testGetMappedRangeCall(false, buffer);
+ }
+
+ // It is invalid to call getMappedRange when the buffer is destroyed when mapped.
+ {
+ const buffer = t.createMappableBuffer(GPUMapMode.READ, 16);
+ await buffer.mapAsync(GPUMapMode.READ);
+ buffer.destroy();
+ t.testGetMappedRangeCall(false, buffer);
+ }
+
+ // It is invalid to call getMappedRange when the buffer is destroyed when mapped at creation.
+ {
+ const buffer = t.device.createBuffer({
+ usage: GPUBufferUsage.MAP_READ,
+ size: 16,
+ mappedAtCreation: true,
+ });
+ buffer.destroy();
+ t.testGetMappedRangeCall(false, buffer);
+ }
+ });
+
+g.test('getMappedRange,state,mappingPending')
+ .desc(`Test that it is invalid to call getMappedRange in the mappingPending state.`)
+ .paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
+ .fn(async t => {
+ const { mapMode } = t.params;
+ const buffer = t.createMappableBuffer(mapMode, 16);
+
+ /* noawait */ const mapping0 = buffer.mapAsync(mapMode);
+ // seconding mapping should be rejected
+ const mapping1 = t.testMapAsyncCall(
+ { validationError: false, earlyRejection: true, rejectName: 'OperationError' },
+ buffer,
+ mapMode
+ );
+
+ // invalid in mappingPending state
+ t.testGetMappedRangeCall(false, buffer);
+
+ await mapping0;
+
+ // valid after buffer is mapped
+ t.testGetMappedRangeCall(true, buffer);
+
+ await mapping1;
+ });
+
+g.test('getMappedRange,offsetAndSizeAlignment,mapped')
+ .desc(`Test that getMappedRange fails if the alignment of offset and size isn't correct.`)
+ .params(u =>
+ u
+ .combine('mapMode', kMapModeOptions)
+ .beginSubcases()
+ .combine('mapOffset', [0, kOffsetAlignment])
+ .combine('offset', [0, kOffsetAlignment, kOffsetAlignment / 2])
+ .combine('size', [0, kSizeAlignment, kSizeAlignment / 2])
+ )
+ .fn(async t => {
+ const { mapMode, mapOffset, offset, size } = t.params;
+ const buffer = t.createMappableBuffer(mapMode, 32);
+ await buffer.mapAsync(mapMode, mapOffset);
+
+ const success = offset % kOffsetAlignment === 0 && size % kSizeAlignment === 0;
+ t.testGetMappedRangeCall(success, buffer, offset + mapOffset, size);
+ });
+
+g.test('getMappedRange,offsetAndSizeAlignment,mappedAtCreation')
+ .desc(`Test that getMappedRange fails if the alignment of offset and size isn't correct.`)
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('offset', [0, kOffsetAlignment, kOffsetAlignment / 2])
+ .combine('size', [0, kSizeAlignment, kSizeAlignment / 2])
+ )
+ .fn(t => {
+ const { offset, size } = t.params;
+ const buffer = t.device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.COPY_DST,
+ mappedAtCreation: true,
+ });
+ const success = offset % kOffsetAlignment === 0 && size % kSizeAlignment === 0;
+ t.testGetMappedRangeCall(success, buffer, offset, size);
+ });
+
+g.test('getMappedRange,sizeAndOffsetOOB,mappedAtCreation')
+ .desc(
+ `Test that getMappedRange size + offset must be less than the buffer size for a
+ buffer mapped at creation. (and offset has not constraints on its own)`
+ )
+ .paramsSubcasesOnly([
+ // Tests for a zero-sized buffer, with and without a size defined.
+ { bufferSize: 0, offset: undefined, size: undefined },
+ { bufferSize: 0, offset: undefined, size: 0 },
+ { bufferSize: 0, offset: undefined, size: kSizeAlignment },
+ { bufferSize: 0, offset: 0, size: undefined },
+ { bufferSize: 0, offset: 0, size: 0 },
+ { bufferSize: 0, offset: kOffsetAlignment, size: undefined },
+ { bufferSize: 0, offset: kOffsetAlignment, size: 0 },
+
+ // Tests for a non-empty buffer, with an undefined offset.
+ { bufferSize: 80, offset: undefined, size: 80 },
+ { bufferSize: 80, offset: undefined, size: 80 + kSizeAlignment },
+
+ // Tests for a non-empty buffer, with an undefined size.
+ { bufferSize: 80, offset: undefined, size: undefined },
+ { bufferSize: 80, offset: 0, size: undefined },
+ { bufferSize: 80, offset: kOffsetAlignment, size: undefined },
+ { bufferSize: 80, offset: 80, size: undefined },
+ { bufferSize: 80, offset: 80 + kOffsetAlignment, size: undefined },
+
+ // Tests for a non-empty buffer with a size defined.
+ { bufferSize: 80, offset: 0, size: 80 },
+ { bufferSize: 80, offset: 0, size: 80 + kSizeAlignment },
+ { bufferSize: 80, offset: kOffsetAlignment, size: 80 },
+
+ { bufferSize: 80, offset: 40, size: 40 },
+ { bufferSize: 80, offset: 40 + kOffsetAlignment, size: 40 },
+ { bufferSize: 80, offset: 40, size: 40 + kSizeAlignment },
+ ])
+ .fn(t => {
+ const { bufferSize, offset, size } = t.params;
+ const buffer = t.device.createBuffer({
+ size: bufferSize,
+ usage: GPUBufferUsage.COPY_DST,
+ mappedAtCreation: true,
+ });
+
+ const actualOffset = offset ?? 0;
+ const actualSize = size ?? bufferSize - actualOffset;
+
+ const success = actualOffset <= bufferSize && actualOffset + actualSize <= bufferSize;
+ t.testGetMappedRangeCall(success, buffer, offset, size);
+ });
+
+g.test('getMappedRange,sizeAndOffsetOOB,mapped')
+ .desc('Test that getMappedRange size + offset must be less than the mapAsync range.')
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('mapMode', kMapModeOptions)
+ .combineWithParams([
+ // Tests for an empty buffer, and implicit mapAsync size.
+ { bufferSize: 0, mapOffset: 0, mapSize: undefined, offset: undefined, size: undefined },
+ { bufferSize: 0, mapOffset: 0, mapSize: undefined, offset: undefined, size: 0 },
+ {
+ bufferSize: 0,
+ mapOffset: 0,
+ mapSize: undefined,
+ offset: undefined,
+ size: kSizeAlignment,
+ },
+ { bufferSize: 0, mapOffset: 0, mapSize: undefined, offset: 0, size: undefined },
+ { bufferSize: 0, mapOffset: 0, mapSize: undefined, offset: 0, size: 0 },
+ {
+ bufferSize: 0,
+ mapOffset: 0,
+ mapSize: undefined,
+ offset: kOffsetAlignment,
+ size: undefined,
+ },
+ { bufferSize: 0, mapOffset: 0, mapSize: undefined, offset: kOffsetAlignment, size: 0 },
+
+ // Tests for an empty buffer, and explicit mapAsync size.
+ { bufferSize: 0, mapOffset: 0, mapSize: 0, offset: undefined, size: undefined },
+ { bufferSize: 0, mapOffset: 0, mapSize: 0, offset: 0, size: undefined },
+ { bufferSize: 0, mapOffset: 0, mapSize: 0, offset: 0, size: 0 },
+ { bufferSize: 0, mapOffset: 0, mapSize: 0, offset: kOffsetAlignment, size: undefined },
+ { bufferSize: 0, mapOffset: 0, mapSize: 0, offset: kOffsetAlignment, size: 0 },
+
+ // Test for a fully implicit mapAsync call
+ { bufferSize: 80, mapOffset: undefined, mapSize: undefined, offset: 0, size: 80 },
+ {
+ bufferSize: 80,
+ mapOffset: undefined,
+ mapSize: undefined,
+ offset: 0,
+ size: 80 + kSizeAlignment,
+ },
+ {
+ bufferSize: 80,
+ mapOffset: undefined,
+ mapSize: undefined,
+ offset: kOffsetAlignment,
+ size: 80,
+ },
+
+ // Test for a mapAsync call with an implicit size
+ { bufferSize: 80, mapOffset: 24, mapSize: undefined, offset: 24, size: 80 - 24 },
+ {
+ bufferSize: 80,
+ mapOffset: 24,
+ mapSize: undefined,
+ offset: 0,
+ size: 80 - 24 + kSizeAlignment,
+ },
+ {
+ bufferSize: 80,
+ mapOffset: 24,
+ mapSize: undefined,
+ offset: kOffsetAlignment,
+ size: 80 - 24,
+ },
+
+ // Test for a non-empty buffer fully mapped.
+ { bufferSize: 80, mapOffset: 0, mapSize: 80, offset: 0, size: 80 },
+ { bufferSize: 80, mapOffset: 0, mapSize: 80, offset: kOffsetAlignment, size: 80 },
+ { bufferSize: 80, mapOffset: 0, mapSize: 80, offset: 0, size: 80 + kSizeAlignment },
+
+ { bufferSize: 80, mapOffset: 0, mapSize: 80, offset: 40, size: 40 },
+ { bufferSize: 80, mapOffset: 0, mapSize: 80, offset: 40 + kOffsetAlignment, size: 40 },
+ { bufferSize: 80, mapOffset: 0, mapSize: 80, offset: 40, size: 40 + kSizeAlignment },
+
+ // Test for a buffer partially mapped.
+ { bufferSize: 80, mapOffset: 24, mapSize: 40, offset: 24, size: 40 },
+ { bufferSize: 80, mapOffset: 24, mapSize: 40, offset: 24 - kOffsetAlignment, size: 40 },
+ { bufferSize: 80, mapOffset: 24, mapSize: 40, offset: 24 + kOffsetAlignment, size: 40 },
+ { bufferSize: 80, mapOffset: 24, mapSize: 40, offset: 24, size: 40 + kSizeAlignment },
+
+ // Test for a partially mapped buffer with implicit size and offset for getMappedRange.
+ // - Buffer partially mapped in the middle
+ { bufferSize: 80, mapOffset: 24, mapSize: 40, offset: undefined, size: undefined },
+ { bufferSize: 80, mapOffset: 24, mapSize: 40, offset: 0, size: undefined },
+ { bufferSize: 80, mapOffset: 24, mapSize: 40, offset: 24, size: undefined },
+ // - Buffer partially mapped to the end
+ { bufferSize: 80, mapOffset: 24, mapSize: undefined, offset: 24, size: undefined },
+ { bufferSize: 80, mapOffset: 24, mapSize: undefined, offset: 80, size: undefined },
+ // - Buffer partially mapped from the start
+ { bufferSize: 80, mapOffset: 0, mapSize: 64, offset: undefined, size: undefined },
+ { bufferSize: 80, mapOffset: 0, mapSize: 64, offset: undefined, size: 64 },
+ ])
+ )
+ .fn(async t => {
+ const { mapMode, bufferSize, mapOffset, mapSize, offset, size } = t.params;
+ const buffer = t.createMappableBuffer(mapMode, bufferSize);
+ await buffer.mapAsync(mapMode, mapOffset, mapSize);
+
+ const actualMapOffset = mapOffset ?? 0;
+ const actualMapSize = mapSize ?? bufferSize - actualMapOffset;
+
+ const actualOffset = offset ?? 0;
+ const actualSize = size ?? bufferSize - actualOffset;
+
+ const success =
+ actualOffset >= actualMapOffset &&
+ actualOffset <= bufferSize &&
+ actualOffset + actualSize <= actualMapOffset + actualMapSize;
+ t.testGetMappedRangeCall(success, buffer, offset, size);
+ });
+
+g.test('getMappedRange,disjointRanges')
+ .desc('Test that the ranges asked through getMappedRange must be disjoint.')
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('remapBetweenCalls', [false, true])
+ .combineWithParams([
+ // Disjoint ranges with one that's empty.
+ { offset1: 8, size1: 0, offset2: 8, size2: 8 },
+ { offset1: 16, size1: 0, offset2: 8, size2: 8 },
+
+ { offset1: 8, size1: 8, offset2: 8, size2: 0 },
+ { offset1: 8, size1: 8, offset2: 16, size2: 0 },
+
+ // Disjoint ranges with both non-empty.
+ { offset1: 0, size1: 8, offset2: 8, size2: 8 },
+ { offset1: 16, size1: 8, offset2: 8, size2: 8 },
+
+ { offset1: 8, size1: 8, offset2: 0, size2: 8 },
+ { offset1: 8, size1: 8, offset2: 16, size2: 8 },
+
+ // Empty range contained inside another one.
+ { offset1: 16, size1: 20, offset2: 24, size2: 0 },
+ { offset1: 24, size1: 0, offset2: 16, size2: 20 },
+
+ // Ranges that overlap only partially.
+ { offset1: 16, size1: 20, offset2: 8, size2: 20 },
+ { offset1: 16, size1: 20, offset2: 32, size2: 20 },
+
+ // Ranges that include one another.
+ { offset1: 0, size1: 80, offset2: 16, size2: 20 },
+ { offset1: 16, size1: 20, offset2: 0, size2: 80 },
+ ])
+ )
+ .fn(async t => {
+ const { offset1, size1, offset2, size2, remapBetweenCalls } = t.params;
+ const buffer = t.device.createBuffer({ size: 80, usage: GPUBufferUsage.MAP_READ });
+ await buffer.mapAsync(GPUMapMode.READ);
+
+ t.testGetMappedRangeCall(true, buffer, offset1, size1);
+
+ if (remapBetweenCalls) {
+ buffer.unmap();
+ await buffer.mapAsync(GPUMapMode.READ);
+ }
+
+ const range1StartsAfter2 = offset1 >= offset2 + size2;
+ const range2StartsAfter1 = offset2 >= offset1 + size1;
+ const disjoint = range1StartsAfter2 || range2StartsAfter1;
+ const success = disjoint || remapBetweenCalls;
+
+ t.testGetMappedRangeCall(success, buffer, offset2, size2);
+ });
+
+g.test('getMappedRange,disjoinRanges_many')
+ .desc('Test getting a lot of small ranges, and that the disjoint check checks them all.')
+ .fn(async t => {
+ const kStride = 256;
+ const kNumStrides = 256;
+
+ const buffer = t.device.createBuffer({
+ size: kStride * kNumStrides,
+ usage: GPUBufferUsage.MAP_READ,
+ });
+ await buffer.mapAsync(GPUMapMode.READ);
+
+ // Get a lot of small mapped ranges.
+ for (let stride = 0; stride < kNumStrides; stride++) {
+ t.testGetMappedRangeCall(true, buffer, stride * kStride, 8);
+ }
+
+ // Check for each range it is invalid to get a range that overlaps it and check that it is valid
+ // to get ranges for the rest of the buffer.
+ for (let stride = 0; stride < kNumStrides; stride++) {
+ t.testGetMappedRangeCall(false, buffer, stride * kStride, kStride);
+ t.testGetMappedRangeCall(true, buffer, stride * kStride + 8, kStride - 8);
+ }
+ });
+
+g.test('unmap,state,unmapped')
+ .desc(
+ `Test it is valid to call unmap on a buffer that is unmapped (at creation, or after
+ mappedAtCreation or mapAsync)`
+ )
+ .fn(async t => {
+ // It is valid to call unmap after creation of an unmapped buffer.
+ {
+ const buffer = t.device.createBuffer({ size: 16, usage: GPUBufferUsage.MAP_READ });
+ buffer.unmap();
+ }
+
+ // It is valid to call unmap after unmapping a mapAsynced buffer.
+ {
+ const buffer = t.createMappableBuffer(GPUMapMode.READ, 16);
+ await buffer.mapAsync(GPUMapMode.READ);
+ buffer.unmap();
+ buffer.unmap();
+ }
+
+ // It is valid to call unmap after unmapping a mappedAtCreation buffer.
+ {
+ const buffer = t.device.createBuffer({
+ usage: GPUBufferUsage.MAP_READ,
+ size: 16,
+ mappedAtCreation: true,
+ });
+ buffer.unmap();
+ buffer.unmap();
+ }
+ });
+
+g.test('unmap,state,destroyed')
+ .desc(
+ `Test it is valid to call unmap on a buffer that is destroyed (at creation, or after
+ mappedAtCreation or mapAsync)`
+ )
+ .fn(async t => {
+ // It is valid to call unmap after destruction of an unmapped buffer.
+ {
+ const buffer = t.device.createBuffer({ size: 16, usage: GPUBufferUsage.MAP_READ });
+ buffer.destroy();
+ buffer.unmap();
+ }
+
+ // It is valid to call unmap after destroying a mapAsynced buffer.
+ {
+ const buffer = t.createMappableBuffer(GPUMapMode.READ, 16);
+ await buffer.mapAsync(GPUMapMode.READ);
+ buffer.destroy();
+ buffer.unmap();
+ }
+
+ // It is valid to call unmap after destroying a mappedAtCreation buffer.
+ {
+ const buffer = t.device.createBuffer({
+ usage: GPUBufferUsage.MAP_READ,
+ size: 16,
+ mappedAtCreation: true,
+ });
+ buffer.destroy();
+ buffer.unmap();
+ }
+ });
+
+g.test('unmap,state,mappedAtCreation')
+ .desc('Test it is valid to call unmap on a buffer mapped at creation, for various usages')
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('bufferUsage', kBufferUsages)
+ )
+ .fn(t => {
+ const { bufferUsage } = t.params;
+ const buffer = t.device.createBuffer({ size: 16, usage: bufferUsage, mappedAtCreation: true });
+
+ buffer.unmap();
+ });
+
+g.test('unmap,state,mapped')
+ .desc("Test it is valid to call unmap on a buffer that's mapped")
+ .paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
+ .fn(async t => {
+ const { mapMode } = t.params;
+ const buffer = t.createMappableBuffer(mapMode, 16);
+
+ await buffer.mapAsync(mapMode);
+ buffer.unmap();
+ });
+
+g.test('unmap,state,mappingPending')
+ .desc("Test it is valid to call unmap on a buffer that's being mapped")
+ .paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
+ .fn(async t => {
+ const { mapMode } = t.params;
+ const buffer = t.createMappableBuffer(mapMode, 16);
+
+ const pending = t.testMapAsyncCall(
+ { validationError: false, earlyRejection: false, rejectName: 'AbortError' },
+ buffer,
+ mapMode
+ );
+ buffer.unmap();
+ await pending;
+ });
+
+g.test('gc_behavior,mappedAtCreation')
+ .desc(
+ "Test that GCing the buffer while mappings are handed out doesn't invalidate them - mappedAtCreation case"
+ )
+ .fn(async t => {
+ let buffer = null;
+ buffer = t.device.createBuffer({
+ size: 256,
+ usage: GPUBufferUsage.COPY_DST,
+ mappedAtCreation: true,
+ });
+
+ // Write some non-zero data to the buffer.
+ const contents = new Uint32Array(buffer.getMappedRange());
+ for (let i = 0; i < contents.length; i++) {
+ contents[i] = i;
+ }
+
+ // Trigger garbage collection that should collect the buffer (or as if it collected it)
+ // NOTE: This won't fail unless the browser immediately starts reusing the memory, or gives it
+ // back to the OS. One good option for browsers to check their logic is good is to zero-out the
+ // memory on GPUBuffer (or internal gpu::Buffer-like object) destruction.
+ buffer = null;
+ await attemptGarbageCollection();
+
+ // Use the mapping again both for read and write, it should work.
+ for (let i = 0; i < contents.length; i++) {
+ t.expect(contents[i] === i);
+ contents[i] = i + 1;
+ }
+ });
+
+g.test('gc_behavior,mapAsync')
+ .desc(
+ "Test that GCing the buffer while mappings are handed out doesn't invalidate them - mapAsync case"
+ )
+ .paramsSubcasesOnly(u => u.combine('mapMode', kMapModeOptions))
+ .fn(async t => {
+ const { mapMode } = t.params;
+
+ let buffer = null;
+ buffer = t.createMappableBuffer(mapMode, 256);
+ await buffer.mapAsync(mapMode);
+
+ // Write some non-zero data to the buffer.
+ const contents = new Uint32Array(buffer.getMappedRange());
+ for (let i = 0; i < contents.length; i++) {
+ contents[i] = i;
+ }
+
+ // Trigger garbage collection that should collect the buffer (or as if it collected it)
+ // NOTE: This won't fail unless the browser immediately starts reusing the memory, or gives it
+ // back to the OS. One good option for browsers to check their logic is good is to zero-out the
+ // memory on GPUBuffer (or internal gpu::Buffer-like object) destruction.
+ buffer = null;
+ await attemptGarbageCollection();
+
+ // Use the mapping again both for read and write, it should work.
+ for (let i = 0; i < contents.length; i++) {
+ t.expect(contents[i] === i);
+ contents[i] = i + 1;
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/buffer/threading.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/buffer/threading.spec.ts
new file mode 100644
index 0000000000..b449b36d25
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/buffer/threading.spec.ts
@@ -0,0 +1,14 @@
+export const description = `
+TODO:
+- Try to map on one thread while {pending, mapped, mappedAtCreation, mappedAtCreation+unmap+mapped}
+ on another thread.
+- Invalid to postMessage a mapped range's ArrayBuffer or ArrayBufferView
+ {with, without} it being in the transfer array.
+- Copy GPUBuffer to another thread while {pending, mapped mappedAtCreation} on {same,diff} thread
+ (valid), then try to map on that thread (invalid)
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ValidationTest } from '../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/features/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/features/README.txt
new file mode 100644
index 0000000000..608e66d18f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/features/README.txt
@@ -0,0 +1,10 @@
+Test every method or option that shouldn't be allowed without a feature enabled.
+If the feature is not enabled, any use of an enum value added by a feature must be an
+*exception*, per <https://github.com/gpuweb/gpuweb/blob/main/design/ErrorConventions.md>.
+
+- x= that feature {enabled, disabled}
+
+Generally one file for each feature name, but some may be grouped (e.g. one file for all optional
+query types, one file for all optional texture formats).
+
+TODO: implement
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/features/query_types.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/features/query_types.spec.ts
new file mode 100644
index 0000000000..8016252b1e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/features/query_types.spec.ts
@@ -0,0 +1,76 @@
+export const description = `
+Tests for capability checking for features enabling optional query types.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { ValidationTest } from '../../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('createQuerySet')
+ .desc(
+ `
+ Tests that creating a query set throws a type error exception if the features don't contain
+ 'timestamp-query'.
+ - createQuerySet
+ - type {occlusion, timestamp}
+ - x= timestamp query {enable, disable}
+ `
+ )
+ .params(u =>
+ u
+ .combine('type', ['occlusion', 'timestamp'] as const)
+ .combine('featureContainsTimestampQuery', [false, true])
+ )
+ .beforeAllSubcases(t => {
+ const { featureContainsTimestampQuery } = t.params;
+
+ const requiredFeatures: GPUFeatureName[] = [];
+ if (featureContainsTimestampQuery) {
+ requiredFeatures.push('timestamp-query');
+ }
+
+ t.selectDeviceOrSkipTestCase({ requiredFeatures });
+ })
+ .fn(t => {
+ const { type, featureContainsTimestampQuery } = t.params;
+
+ const count = 1;
+ const shouldException = type === 'timestamp' && !featureContainsTimestampQuery;
+
+ t.shouldThrow(shouldException ? 'TypeError' : false, () => {
+ t.device.createQuerySet({ type, count });
+ });
+ });
+
+g.test('writeTimestamp')
+ .desc(
+ `
+ Tests that writing a timestamp throws a type error exception if the features don't contain
+ 'timestamp-query'.
+ `
+ )
+ .params(u => u.combine('featureContainsTimestampQuery', [false, true]))
+ .beforeAllSubcases(t => {
+ const { featureContainsTimestampQuery } = t.params;
+
+ const requiredFeatures: GPUFeatureName[] = [];
+ if (featureContainsTimestampQuery) {
+ requiredFeatures.push('timestamp-query');
+ }
+
+ t.selectDeviceOrSkipTestCase({ requiredFeatures });
+ })
+ .fn(t => {
+ const { featureContainsTimestampQuery } = t.params;
+
+ const querySet = t.device.createQuerySet({
+ type: featureContainsTimestampQuery ? 'timestamp' : 'occlusion',
+ count: 1,
+ });
+ const encoder = t.createEncoder('non-pass');
+
+ t.shouldThrow(featureContainsTimestampQuery ? false : 'TypeError', () => {
+ encoder.encoder.writeTimestamp(querySet, 0);
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/features/texture_formats.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/features/texture_formats.spec.ts
new file mode 100644
index 0000000000..eb7005dd29
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/features/texture_formats.spec.ts
@@ -0,0 +1,463 @@
+export const description = `
+Tests for capability checking for features enabling optional texture formats.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { getGPU } from '../../../../../common/util/navigator_gpu.js';
+import { assert } from '../../../../../common/util/util.js';
+import { kAllTextureFormats, kTextureFormatInfo } from '../../../../format_info.js';
+import { kAllCanvasTypes, createCanvas } from '../../../../util/create_elements.js';
+import { ValidationTest } from '../../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+const kOptionalTextureFormats = kAllTextureFormats.filter(
+ t => kTextureFormatInfo[t].feature !== undefined
+);
+
+g.test('texture_descriptor')
+ .desc(
+ `
+ Test creating a texture with an optional texture format will fail if the required optional feature
+ is not enabled.
+ `
+ )
+ .params(u =>
+ u.combine('format', kOptionalTextureFormats).combine('enable_required_feature', [true, false])
+ )
+ .beforeAllSubcases(t => {
+ const { format, enable_required_feature } = t.params;
+
+ const formatInfo = kTextureFormatInfo[format];
+ if (enable_required_feature) {
+ t.selectDeviceOrSkipTestCase(formatInfo.feature);
+ }
+ })
+ .fn(t => {
+ const { format, enable_required_feature } = t.params;
+
+ const formatInfo = kTextureFormatInfo[format];
+ t.shouldThrow(enable_required_feature ? false : 'TypeError', () => {
+ t.device.createTexture({
+ format,
+ size: [formatInfo.blockWidth, formatInfo.blockHeight, 1] as const,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ });
+ });
+ });
+
+g.test('texture_descriptor_view_formats')
+ .desc(
+ `
+ Test creating a texture with view formats that have an optional texture format will fail if the
+ required optional feature is not enabled.
+ `
+ )
+ .params(u =>
+ u.combine('format', kOptionalTextureFormats).combine('enable_required_feature', [true, false])
+ )
+ .beforeAllSubcases(t => {
+ const { format, enable_required_feature } = t.params;
+
+ const formatInfo = kTextureFormatInfo[format];
+ if (enable_required_feature) {
+ t.selectDeviceOrSkipTestCase(formatInfo.feature);
+ }
+ })
+ .fn(t => {
+ const { format, enable_required_feature } = t.params;
+
+ const formatInfo = kTextureFormatInfo[format];
+ t.shouldThrow(enable_required_feature ? false : 'TypeError', () => {
+ t.device.createTexture({
+ format,
+ size: [formatInfo.blockWidth, formatInfo.blockHeight, 1] as const,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ viewFormats: [format],
+ });
+ });
+ });
+
+g.test('texture_view_descriptor')
+ .desc(
+ `
+ Test creating a texture view with all texture formats will fail if the required optional feature
+ is not enabled.
+ `
+ )
+ .params(u =>
+ u.combine('format', kOptionalTextureFormats).combine('enable_required_feature', [true, false])
+ )
+ .beforeAllSubcases(t => {
+ const { format, enable_required_feature } = t.params;
+
+ const formatInfo = kTextureFormatInfo[format];
+ if (enable_required_feature) {
+ t.selectDeviceOrSkipTestCase(formatInfo.feature);
+ }
+ })
+ .fn(t => {
+ const { format, enable_required_feature } = t.params;
+
+ // If the required feature isn't enabled then the texture will fail to create and we won't be
+ // able to test createView, so pick and alternate guaranteed format instead. This will almost
+ // certainly not be view-compatible with the format being tested, but that doesn't matter since
+ // createView should throw an exception due to the format feature not being enabled before it
+ // has a chance to validate that the view and texture formats aren't compatible.
+ const textureFormat = enable_required_feature ? format : 'rgba8unorm';
+
+ const formatInfo = kTextureFormatInfo[format];
+ const testTexture = t.device.createTexture({
+ format: textureFormat,
+ size: [formatInfo.blockWidth, formatInfo.blockHeight, 1] as const,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ });
+ const testViewDesc: GPUTextureViewDescriptor = {
+ format,
+ dimension: '2d',
+ aspect: 'all',
+ arrayLayerCount: 1,
+ baseMipLevel: 0,
+ mipLevelCount: 1,
+ baseArrayLayer: 0,
+ };
+ t.shouldThrow(enable_required_feature ? false : 'TypeError', () => {
+ testTexture.createView(testViewDesc);
+ });
+ });
+
+g.test('canvas_configuration')
+ .desc(
+ `
+ Test configuring a canvas with optional texture formats will throw an exception if the required
+ optional feature is not enabled. Otherwise, a validation error should be generated instead of
+ throwing an exception.
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kOptionalTextureFormats)
+ .combine('canvasType', kAllCanvasTypes)
+ .combine('enable_required_feature', [true, false])
+ )
+ .beforeAllSubcases(t => {
+ const { format, enable_required_feature } = t.params;
+
+ const formatInfo = kTextureFormatInfo[format];
+ if (enable_required_feature) {
+ t.selectDeviceOrSkipTestCase(formatInfo.feature);
+ }
+ })
+ .fn(t => {
+ const { format, canvasType, enable_required_feature } = t.params;
+
+ const canvas = createCanvas(t, canvasType, 2, 2);
+ const ctx = canvas.getContext('webgpu');
+ assert(ctx instanceof GPUCanvasContext, 'Failed to get WebGPU context from canvas');
+
+ const canvasConf = {
+ device: t.device,
+ format,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ };
+
+ if (enable_required_feature) {
+ t.expectValidationError(() => {
+ ctx.configure(canvasConf);
+ });
+ } else {
+ t.shouldThrow('TypeError', () => {
+ ctx.configure(canvasConf);
+ });
+ }
+ });
+
+g.test('canvas_configuration_view_formats')
+ .desc(
+ `
+ Test that configuring a canvas with view formats throws an exception if the required optional
+ feature is not enabled. Otherwise, a validation error should be generated instead of throwing an
+ exception.
+ `
+ )
+ .params(u =>
+ u
+ .combine('viewFormats', [
+ ...kOptionalTextureFormats.map(format => [format]),
+ ['bgra8unorm', 'bc1-rgba-unorm'],
+ ['bc1-rgba-unorm', 'bgra8unorm'],
+ ])
+ .combine('canvasType', kAllCanvasTypes)
+ .combine('enable_required_feature', [true, false])
+ )
+ .beforeAllSubcases(t => {
+ const { viewFormats, enable_required_feature } = t.params;
+
+ if (enable_required_feature) {
+ t.selectDeviceForTextureFormatOrSkipTestCase(viewFormats as GPUTextureFormat[]);
+ }
+ })
+ .fn(t => {
+ const { viewFormats, canvasType, enable_required_feature } = t.params;
+
+ const canvas = createCanvas(t, canvasType, 2, 2);
+ const ctx = canvas.getContext('webgpu');
+ assert(ctx instanceof GPUCanvasContext, 'Failed to get WebGPU context from canvas');
+
+ const canvasConf = {
+ device: t.device,
+ format: 'bgra8unorm' as const,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ viewFormats: viewFormats as GPUTextureFormat[],
+ };
+
+ if (enable_required_feature) {
+ t.expectValidationError(() => {
+ ctx.configure(canvasConf);
+ });
+ } else {
+ t.shouldThrow('TypeError', () => {
+ ctx.configure(canvasConf);
+ });
+ }
+ });
+
+g.test('storage_texture_binding_layout')
+ .desc(
+ `
+ Test creating a GPUStorageTextureBindingLayout with an optional texture format will fail if the
+ required optional feature are not enabled.
+
+ Note: This test has no cases if there are no optional texture formats supporting storage.
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kOptionalTextureFormats)
+ .filter(t => !!kTextureFormatInfo[t.format].color?.storage)
+ .combine('enable_required_feature', [true, false])
+ )
+ .beforeAllSubcases(t => {
+ const { format, enable_required_feature } = t.params;
+
+ const formatInfo = kTextureFormatInfo[format];
+ if (enable_required_feature) {
+ t.selectDeviceOrSkipTestCase(formatInfo.feature);
+ }
+ })
+ .fn(t => {
+ const { format, enable_required_feature } = t.params;
+
+ t.shouldThrow(enable_required_feature ? false : 'TypeError', () => {
+ t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ storageTexture: {
+ format,
+ },
+ },
+ ],
+ });
+ });
+ });
+
+g.test('color_target_state')
+ .desc(
+ `
+ Test creating a render pipeline with an optional texture format set in GPUColorTargetState will
+ fail if the required optional feature is not enabled.
+
+ Note: This test has no cases if there are no optional texture formats supporting color rendering.
+ `
+ )
+ .params(u =>
+ u
+ .combine('isAsync', [false, true])
+ .combine('format', kOptionalTextureFormats)
+ .filter(t => !!kTextureFormatInfo[t.format].colorRender)
+ .combine('enable_required_feature', [true, false])
+ )
+ .beforeAllSubcases(t => {
+ const { format, enable_required_feature } = t.params;
+
+ const formatInfo = kTextureFormatInfo[format];
+ if (enable_required_feature) {
+ t.selectDeviceOrSkipTestCase(formatInfo.feature);
+ }
+ })
+ .fn(t => {
+ const { isAsync, format, enable_required_feature } = t.params;
+
+ t.doCreateRenderPipelineTest(
+ isAsync,
+ enable_required_feature,
+ {
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ @vertex
+ fn main()-> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+ @fragment
+ fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format }],
+ },
+ },
+ 'TypeError'
+ );
+ });
+
+g.test('depth_stencil_state')
+ .desc(
+ `
+ Test creating a render pipeline with an optional texture format set in GPUColorTargetState will
+ fail if the required optional feature is not enabled.
+ `
+ )
+ .params(u =>
+ u
+ .combine('isAsync', [false, true])
+ .combine('format', kOptionalTextureFormats)
+ .filter(t => !!(kTextureFormatInfo[t.format].depth || kTextureFormatInfo[t.format].stencil))
+ .combine('enable_required_feature', [true, false])
+ )
+ .beforeAllSubcases(t => {
+ const { format, enable_required_feature } = t.params;
+
+ const formatInfo = kTextureFormatInfo[format];
+ if (enable_required_feature) {
+ t.selectDeviceOrSkipTestCase(formatInfo.feature);
+ }
+ })
+ .fn(t => {
+ const { isAsync, format, enable_required_feature } = t.params;
+
+ t.doCreateRenderPipelineTest(
+ isAsync,
+ enable_required_feature,
+ {
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ @vertex
+ fn main()-> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ depthStencil: {
+ format,
+ depthCompare: 'always',
+ depthWriteEnabled: false,
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+ @fragment
+ fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ },
+ 'TypeError'
+ );
+ });
+
+g.test('render_bundle_encoder_descriptor_color_format')
+ .desc(
+ `
+ Test creating a render bundle encoder with an optional texture format set as one of the color
+ format will fail if the required optional feature is not enabled.
+
+ Note: This test has no cases if there are no optional texture formats supporting color rendering.
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kOptionalTextureFormats)
+ .filter(t => !!kTextureFormatInfo[t.format].colorRender)
+ .combine('enable_required_feature', [true, false])
+ )
+ .beforeAllSubcases(t => {
+ const { format, enable_required_feature } = t.params;
+
+ const formatInfo = kTextureFormatInfo[format];
+ if (enable_required_feature) {
+ t.selectDeviceOrSkipTestCase(formatInfo.feature);
+ }
+ })
+ .fn(t => {
+ const { format, enable_required_feature } = t.params;
+
+ t.shouldThrow(enable_required_feature ? false : 'TypeError', () => {
+ t.device.createRenderBundleEncoder({
+ colorFormats: [format],
+ });
+ });
+ });
+
+g.test('render_bundle_encoder_descriptor_depth_stencil_format')
+ .desc(
+ `
+ Test creating a render bundle encoder with an optional texture format set as the depth stencil
+ format will fail if the required optional feature is not enabled.
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kOptionalTextureFormats)
+ .filter(t => !!(kTextureFormatInfo[t.format].depth || kTextureFormatInfo[t.format].stencil))
+ .combine('enable_required_feature', [true, false])
+ )
+ .beforeAllSubcases(t => {
+ const { format, enable_required_feature } = t.params;
+
+ const formatInfo = kTextureFormatInfo[format];
+ if (enable_required_feature) {
+ t.selectDeviceOrSkipTestCase(formatInfo.feature);
+ }
+ })
+ .fn(t => {
+ const { format, enable_required_feature } = t.params;
+
+ t.shouldThrow(enable_required_feature ? false : 'TypeError', () => {
+ t.device.createRenderBundleEncoder({
+ colorFormats: ['rgba8unorm'],
+ depthStencilFormat: format,
+ });
+ });
+ });
+
+g.test('check_capability_guarantees')
+ .desc(
+ `check "texture-compression-bc" is supported or both "texture-compression-etc2" and "texture-compression-astc" are supported.`
+ )
+ .fn(async t => {
+ const adapter = await getGPU(t.rec).requestAdapter();
+ assert(adapter !== null);
+
+ const features = adapter.features;
+ t.expect(
+ features.has('texture-compression-bc') ||
+ (features.has('texture-compression-etc2') && features.has('texture-compression-astc'))
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/README.txt
new file mode 100644
index 0000000000..3f2434d4ed
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/README.txt
@@ -0,0 +1,8 @@
+Test everything that shouldn't be valid without a higher-than-specified limit.
+
+- x= that limit {default, max supported (if different), lower than default (TODO: if allowed)}
+
+One file for each limit name.
+
+TODO: implement
+TODO: Also test that "alignment" limits require a power of 2.
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/limit_utils.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/limit_utils.ts
new file mode 100644
index 0000000000..fee2ea716e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/limit_utils.ts
@@ -0,0 +1,1089 @@
+import { kUnitCaseParamsBuilder } from '../../../../../common/framework/params_builder.js';
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { getGPU } from '../../../../../common/util/navigator_gpu.js';
+import { assert, range, reorder, ReorderOrder } from '../../../../../common/util/util.js';
+import { getDefaultLimitsForAdapter } from '../../../../capability_info.js';
+import { GPUTestBase } from '../../../../gpu_test.js';
+
+type GPUSupportedLimit = keyof GPUSupportedLimits;
+
+export const kCreatePipelineTypes = [
+ 'createRenderPipeline',
+ 'createRenderPipelineWithFragmentStage',
+ 'createComputePipeline',
+] as const;
+export type CreatePipelineType = (typeof kCreatePipelineTypes)[number];
+
+export const kRenderEncoderTypes = ['render', 'renderBundle'] as const;
+export type RenderEncoderType = (typeof kRenderEncoderTypes)[number];
+
+export const kEncoderTypes = ['compute', 'render', 'renderBundle'] as const;
+export type EncoderType = (typeof kEncoderTypes)[number];
+
+export const kBindGroupTests = ['sameGroup', 'differentGroups'] as const;
+export type BindGroupTest = (typeof kBindGroupTests)[number];
+
+export const kBindingCombinations = [
+ 'vertex',
+ 'fragment',
+ 'vertexAndFragmentWithPossibleVertexStageOverflow',
+ 'vertexAndFragmentWithPossibleFragmentStageOverflow',
+ 'compute',
+] as const;
+export type BindingCombination = (typeof kBindingCombinations)[number];
+
+export function getPipelineTypeForBindingCombination(bindingCombination: BindingCombination) {
+ switch (bindingCombination) {
+ case 'vertex':
+ return 'createRenderPipeline';
+ case 'fragment':
+ case 'vertexAndFragmentWithPossibleVertexStageOverflow':
+ case 'vertexAndFragmentWithPossibleFragmentStageOverflow':
+ return 'createRenderPipelineWithFragmentStage';
+ case 'compute':
+ return 'createComputePipeline';
+ }
+}
+
+function getBindGroupIndex(bindGroupTest: BindGroupTest, i: number) {
+ switch (bindGroupTest) {
+ case 'sameGroup':
+ return 0;
+ case 'differentGroups':
+ return i % 3;
+ }
+}
+
+function getWGSLBindings(
+ order: ReorderOrder,
+ bindGroupTest: BindGroupTest,
+ storageDefinitionWGSLSnippetFn: (i: number, j: number) => string,
+ numBindings: number,
+ id: number
+) {
+ return reorder(
+ order,
+ range(
+ numBindings,
+ i =>
+ `@group(${getBindGroupIndex(
+ bindGroupTest,
+ i
+ )}) @binding(${i}) ${storageDefinitionWGSLSnippetFn(i, id)};`
+ )
+ ).join('\n ');
+}
+
+export function getPerStageWGSLForBindingCombinationImpl(
+ bindingCombination: BindingCombination,
+ order: ReorderOrder,
+ bindGroupTest: BindGroupTest,
+ storageDefinitionWGSLSnippetFn: (i: number, j: number) => string,
+ bodyFn: (numBindings: number, set: number) => string,
+ numBindings: number,
+ extraWGSL = ''
+) {
+ switch (bindingCombination) {
+ case 'vertex':
+ return `
+ ${extraWGSL}
+
+ ${getWGSLBindings(order, bindGroupTest, storageDefinitionWGSLSnippetFn, numBindings, 0)}
+
+ @vertex fn mainVS() -> @builtin(position) vec4f {
+ ${bodyFn(numBindings, 0)}
+ return vec4f(0);
+ }
+ `;
+ case 'fragment':
+ return `
+ ${extraWGSL}
+
+ ${getWGSLBindings(order, bindGroupTest, storageDefinitionWGSLSnippetFn, numBindings, 0)}
+
+ @vertex fn mainVS() -> @builtin(position) vec4f {
+ return vec4f(0);
+ }
+
+ @fragment fn mainFS() {
+ ${bodyFn(numBindings, 0)}
+ }
+ `;
+ case 'vertexAndFragmentWithPossibleVertexStageOverflow': {
+ return `
+ ${extraWGSL}
+
+ ${getWGSLBindings(order, bindGroupTest, storageDefinitionWGSLSnippetFn, numBindings, 0)}
+
+ ${getWGSLBindings(order, bindGroupTest, storageDefinitionWGSLSnippetFn, numBindings - 1, 1)}
+
+ @vertex fn mainVS() -> @builtin(position) vec4f {
+ ${bodyFn(numBindings, 0)}
+ return vec4f(0);
+ }
+
+ @fragment fn mainFS() {
+ ${bodyFn(numBindings - 1, 1)}
+ }
+ `;
+ }
+ case 'vertexAndFragmentWithPossibleFragmentStageOverflow': {
+ return `
+ ${extraWGSL}
+
+ ${getWGSLBindings(order, bindGroupTest, storageDefinitionWGSLSnippetFn, numBindings - 1, 0)}
+
+ ${getWGSLBindings(order, bindGroupTest, storageDefinitionWGSLSnippetFn, numBindings, 1)}
+
+ @vertex fn mainVS() -> @builtin(position) vec4f {
+ ${bodyFn(numBindings - 1, 0)}
+ return vec4f(0);
+ }
+
+ @fragment fn mainFS() {
+ ${bodyFn(numBindings, 1)}
+ }
+ `;
+ }
+ case 'compute':
+ return `
+ ${extraWGSL}
+ ${getWGSLBindings(order, bindGroupTest, storageDefinitionWGSLSnippetFn, numBindings, 0)}
+ @group(3) @binding(0) var<storage, read_write> d: f32;
+ @compute @workgroup_size(1) fn main() {
+ ${bodyFn(numBindings, 0)}
+ }
+ `;
+ break;
+ }
+}
+
+export function getPerStageWGSLForBindingCombination(
+ bindingCombination: BindingCombination,
+ order: ReorderOrder,
+ bindGroupTest: BindGroupTest,
+ storageDefinitionWGSLSnippetFn: (i: number, j: number) => string,
+ usageWGSLSnippetFn: (i: number, j: number) => string,
+ numBindings: number,
+ extraWGSL = ''
+) {
+ return getPerStageWGSLForBindingCombinationImpl(
+ bindingCombination,
+ order,
+ bindGroupTest,
+ storageDefinitionWGSLSnippetFn,
+ (numBindings: number, set: number) =>
+ `${range(numBindings, i => usageWGSLSnippetFn(i, set)).join('\n ')}`,
+ numBindings,
+ extraWGSL
+ );
+}
+
+export function getPerStageWGSLForBindingCombinationStorageTextures(
+ bindingCombination: BindingCombination,
+ order: ReorderOrder,
+ bindGroupTest: BindGroupTest,
+ storageDefinitionWGSLSnippetFn: (i: number, j: number) => string,
+ usageWGSLSnippetFn: (i: number, j: number) => string,
+ numBindings: number,
+ extraWGSL = ''
+) {
+ return getPerStageWGSLForBindingCombinationImpl(
+ bindingCombination,
+ order,
+ bindGroupTest,
+ storageDefinitionWGSLSnippetFn,
+ (numBindings: number, set: number) =>
+ `${range(numBindings, i => usageWGSLSnippetFn(i, set)).join('\n ')}`,
+ numBindings,
+ extraWGSL
+ );
+}
+
+export const kLimitModes = ['defaultLimit', 'adapterLimit'] as const;
+export type LimitMode = (typeof kLimitModes)[number];
+export type LimitsRequest = Record<string, LimitMode>;
+
+export const kMaximumTestValues = ['atLimit', 'overLimit'] as const;
+export type MaximumTestValue = (typeof kMaximumTestValues)[number];
+
+export function getMaximumTestValue(limit: number, testValue: MaximumTestValue) {
+ switch (testValue) {
+ case 'atLimit':
+ return limit;
+ case 'overLimit':
+ return limit + 1;
+ }
+}
+
+export const kMinimumTestValues = ['atLimit', 'underLimit'] as const;
+export type MinimumTestValue = (typeof kMinimumTestValues)[number];
+
+export const kMaximumLimitValueTests = [
+ 'atDefault',
+ 'underDefault',
+ 'betweenDefaultAndMaximum',
+ 'atMaximum',
+ 'overMaximum',
+] as const;
+export type MaximumLimitValueTest = (typeof kMaximumLimitValueTests)[number];
+
+export function getLimitValue(
+ defaultLimit: number,
+ maximumLimit: number,
+ limitValueTest: MaximumLimitValueTest
+) {
+ switch (limitValueTest) {
+ case 'atDefault':
+ return defaultLimit;
+ case 'underDefault':
+ return defaultLimit - 1;
+ case 'betweenDefaultAndMaximum':
+ // The result can be larger than maximum i32.
+ return Math.floor((defaultLimit + maximumLimit) / 2);
+ case 'atMaximum':
+ return maximumLimit;
+ case 'overMaximum':
+ return maximumLimit + 1;
+ }
+}
+
+export const kMinimumLimitValueTests = [
+ 'atDefault',
+ 'overDefault',
+ 'betweenDefaultAndMinimum',
+ 'atMinimum',
+ 'underMinimum',
+] as const;
+export type MinimumLimitValueTest = (typeof kMinimumLimitValueTests)[number];
+
+export function getDefaultLimitForAdapter(adapter: GPUAdapter, limit: GPUSupportedLimit): number {
+ const limitInfo = getDefaultLimitsForAdapter(adapter);
+ return limitInfo[limit as keyof typeof limitInfo].default;
+}
+
+export type DeviceAndLimits = {
+ device: GPUDevice;
+ defaultLimit: number;
+ adapterLimit: number;
+ requestedLimit: number;
+ actualLimit: number;
+};
+
+export type SpecificLimitTestInputs = DeviceAndLimits & {
+ testValue: number;
+ shouldError: boolean;
+};
+
+export type MaximumLimitTestInputs = SpecificLimitTestInputs & {
+ testValueName: MaximumTestValue;
+};
+
+const kMinimumLimits = new Set<GPUSupportedLimit>([
+ 'minUniformBufferOffsetAlignment',
+ 'minStorageBufferOffsetAlignment',
+]);
+
+/**
+ * Adds the default parameters to a limit test
+ */
+export const kMaximumLimitBaseParams = kUnitCaseParamsBuilder
+ .combine('limitTest', kMaximumLimitValueTests)
+ .combine('testValueName', kMaximumTestValues);
+
+export const kMinimumLimitBaseParams = kUnitCaseParamsBuilder
+ .combine('limitTest', kMinimumLimitValueTests)
+ .combine('testValueName', kMinimumTestValues);
+
+export class LimitTestsImpl extends GPUTestBase {
+ _adapter: GPUAdapter | null = null;
+ _device: GPUDevice | undefined = undefined;
+ limit: GPUSupportedLimit = '' as GPUSupportedLimit;
+ defaultLimit = 0;
+ adapterLimit = 0;
+
+ override async init() {
+ await super.init();
+ const gpu = getGPU(this.rec);
+ this._adapter = await gpu.requestAdapter();
+ const limit = this.limit;
+ this.defaultLimit = getDefaultLimitForAdapter(this.adapter, limit);
+ this.adapterLimit = this.adapter.limits[limit] as number;
+ assert(!Number.isNaN(this.defaultLimit));
+ assert(!Number.isNaN(this.adapterLimit));
+ }
+
+ get adapter(): GPUAdapter {
+ assert(this._adapter !== undefined);
+ return this._adapter!;
+ }
+
+ override get device(): GPUDevice {
+ assert(this._device !== undefined, 'device is only valid in _testThenDestroyDevice callback');
+ return this._device;
+ }
+
+ async requestDeviceWithLimits(
+ adapter: GPUAdapter,
+ requiredLimits: Record<string, number>,
+ shouldReject: boolean,
+ requiredFeatures?: GPUFeatureName[]
+ ) {
+ if (shouldReject) {
+ this.shouldReject('OperationError', adapter.requestDevice({ requiredLimits }), {
+ allowMissingStack: true,
+ });
+ return undefined;
+ } else {
+ return await adapter.requestDevice({ requiredLimits, requiredFeatures });
+ }
+ }
+
+ getDefaultOrAdapterLimit(limit: GPUSupportedLimit, limitMode: LimitMode) {
+ switch (limitMode) {
+ case 'defaultLimit':
+ return getDefaultLimitForAdapter(this.adapter, limit);
+ case 'adapterLimit':
+ return this.adapter.limits[limit];
+ }
+ }
+
+ /**
+ * Gets a device with the adapter a requested limit and checks that that limit
+ * is correct or that the device failed to create if the requested limit is
+ * beyond the maximum supported by the device.
+ */
+ async _getDeviceWithSpecificLimit(
+ requestedLimit: number,
+ extraLimits?: LimitsRequest,
+ features?: GPUFeatureName[]
+ ): Promise<DeviceAndLimits | undefined> {
+ const { adapter, limit, adapterLimit, defaultLimit } = this;
+
+ const requiredLimits: Record<string, number> = {};
+ requiredLimits[limit] = requestedLimit;
+
+ if (extraLimits) {
+ for (const [extraLimitStr, limitMode] of Object.entries(extraLimits)) {
+ const extraLimit = extraLimitStr as GPUSupportedLimit;
+ requiredLimits[extraLimit] =
+ limitMode === 'defaultLimit'
+ ? getDefaultLimitForAdapter(adapter, extraLimit)
+ : (adapter.limits[extraLimit] as number);
+ }
+ }
+
+ const shouldReject = kMinimumLimits.has(limit)
+ ? requestedLimit < adapterLimit
+ : requestedLimit > adapterLimit;
+
+ const device = await this.requestDeviceWithLimits(
+ adapter,
+ requiredLimits,
+ shouldReject,
+ features
+ );
+ const actualLimit = (device ? device.limits[limit] : 0) as number;
+
+ if (shouldReject) {
+ this.expect(!device, 'expected no device');
+ } else {
+ if (kMinimumLimits.has(limit)) {
+ if (requestedLimit <= defaultLimit) {
+ this.expect(
+ actualLimit === requestedLimit,
+ `expected actual actualLimit: ${actualLimit} to equal defaultLimit: ${requestedLimit}`
+ );
+ } else {
+ this.expect(
+ actualLimit === defaultLimit,
+ `expected actual actualLimit: ${actualLimit} to equal defaultLimit: ${defaultLimit}`
+ );
+ }
+ } else {
+ if (requestedLimit <= defaultLimit) {
+ this.expect(
+ actualLimit === defaultLimit,
+ `expected actual actualLimit: ${actualLimit} to equal defaultLimit: ${defaultLimit}`
+ );
+ } else {
+ this.expect(
+ actualLimit === requestedLimit,
+ `expected actual actualLimit: ${actualLimit} to equal requestedLimit: ${requestedLimit}`
+ );
+ }
+ }
+ }
+
+ return device ? { device, defaultLimit, adapterLimit, requestedLimit, actualLimit } : undefined;
+ }
+
+ /**
+ * Gets a device with the adapter a requested limit and checks that that limit
+ * is correct or that the device failed to create if the requested limit is
+ * beyond the maximum supported by the device.
+ */
+ async _getDeviceWithRequestedMaximumLimit(
+ limitValueTest: MaximumLimitValueTest,
+ extraLimits?: LimitsRequest,
+ features?: GPUFeatureName[]
+ ): Promise<DeviceAndLimits | undefined> {
+ const { defaultLimit, adapterLimit: maximumLimit } = this;
+
+ const requestedLimit = getLimitValue(defaultLimit, maximumLimit, limitValueTest);
+ return this._getDeviceWithSpecificLimit(requestedLimit, extraLimits, features);
+ }
+
+ /**
+ * Call the given function and check no WebGPU errors are leaked.
+ */
+ async _testThenDestroyDevice(
+ deviceAndLimits: DeviceAndLimits,
+ testValue: number,
+ fn: (inputs: SpecificLimitTestInputs) => void | Promise<void>
+ ) {
+ assert(!this._device);
+
+ const { device, actualLimit } = deviceAndLimits;
+ this._device = device;
+
+ const shouldError = kMinimumLimits.has(this.limit)
+ ? testValue < actualLimit
+ : testValue > actualLimit;
+
+ device.pushErrorScope('internal');
+ device.pushErrorScope('out-of-memory');
+ device.pushErrorScope('validation');
+
+ await fn({ ...deviceAndLimits, testValue, shouldError });
+
+ const validationError = await device.popErrorScope();
+ const outOfMemoryError = await device.popErrorScope();
+ const internalError = await device.popErrorScope();
+
+ this.expect(!validationError, `unexpected validation error: ${validationError?.message || ''}`);
+ this.expect(
+ !outOfMemoryError,
+ `unexpected out-of-memory error: ${outOfMemoryError?.message || ''}`
+ );
+ this.expect(!internalError, `unexpected internal error: ${internalError?.message || ''}`);
+
+ device.destroy();
+ this._device = undefined;
+ }
+
+ /**
+ * Creates a device with a specific limit.
+ * If the limit of over the maximum we expect an exception
+ * If the device is created then we call a test function, checking
+ * that the function does not leak any GPU errors.
+ */
+ async testDeviceWithSpecificLimits(
+ deviceLimitValue: number,
+ testValue: number,
+ fn: (inputs: SpecificLimitTestInputs) => void | Promise<void>,
+ extraLimits?: LimitsRequest,
+ features?: GPUFeatureName[]
+ ) {
+ assert(!this._device);
+
+ const deviceAndLimits = await this._getDeviceWithSpecificLimit(
+ deviceLimitValue,
+ extraLimits,
+ features
+ );
+ // If we request over the limit requestDevice will throw
+ if (!deviceAndLimits) {
+ return;
+ }
+
+ await this._testThenDestroyDevice(deviceAndLimits, testValue, fn);
+ }
+
+ /**
+ * Creates a device with the limit defined by LimitValueTest.
+ * If the limit of over the maximum we expect an exception
+ * If the device is created then we call a test function, checking
+ * that the function does not leak any GPU errors.
+ */
+ async testDeviceWithRequestedMaximumLimits(
+ limitTest: MaximumLimitValueTest,
+ testValueName: MaximumTestValue,
+ fn: (inputs: MaximumLimitTestInputs) => void | Promise<void>,
+ extraLimits?: LimitsRequest
+ ) {
+ assert(!this._device);
+
+ const deviceAndLimits = await this._getDeviceWithRequestedMaximumLimit(limitTest, extraLimits);
+ // If we request over the limit requestDevice will throw
+ if (!deviceAndLimits) {
+ return;
+ }
+
+ const { actualLimit } = deviceAndLimits;
+ const testValue = getMaximumTestValue(actualLimit, testValueName);
+
+ await this._testThenDestroyDevice(
+ deviceAndLimits,
+ testValue,
+ async (inputs: SpecificLimitTestInputs) => {
+ await fn({ ...inputs, testValueName });
+ }
+ );
+ }
+
+ /**
+ * Calls a function that expects a GPU error if shouldError is true
+ */
+ // MAINTENANCE_TODO: Remove this duplicated code with GPUTest if possible
+ async expectGPUErrorAsync<R>(
+ filter: GPUErrorFilter,
+ fn: () => R,
+ shouldError: boolean = true,
+ msg = ''
+ ): Promise<R> {
+ const { device } = this;
+
+ device.pushErrorScope(filter);
+ const returnValue = fn();
+ if (returnValue instanceof Promise) {
+ await returnValue;
+ }
+
+ const error = await device.popErrorScope();
+ this.expect(
+ !!error === shouldError,
+ `${error?.message || 'no error when one was expected'}: ${msg}`
+ );
+
+ return returnValue;
+ }
+
+ /** Expect that the provided promise rejects, with the provided exception name. */
+ async shouldRejectConditionally(
+ expectedName: string,
+ p: Promise<unknown>,
+ shouldReject: boolean,
+ message?: string
+ ): Promise<void> {
+ if (shouldReject) {
+ this.shouldReject(expectedName, p, { message });
+ } else {
+ this.shouldResolve(p, message);
+ }
+
+ // We need to explicitly wait for the promise because the device may be
+ // destroyed immediately after returning from this function.
+ try {
+ await p;
+ } catch (e) {
+ //
+ }
+ }
+
+ /**
+ * Calls a function that expects a validation error if shouldError is true
+ */
+ override async expectValidationError<R>(
+ fn: () => R,
+ shouldError: boolean = true,
+ msg = ''
+ ): Promise<R> {
+ return this.expectGPUErrorAsync('validation', fn, shouldError, msg);
+ }
+
+ /**
+ * Calls a function that expects to not generate a validation error
+ */
+ async expectNoValidationError<R>(fn: () => R, msg = ''): Promise<R> {
+ return this.expectGPUErrorAsync('validation', fn, false, msg);
+ }
+
+ /**
+ * Calls a function that might expect a validation error.
+ * if shouldError is true then expect a validation error,
+ * if shouldError is false then ignore out-of-memory errors.
+ */
+ async testForValidationErrorWithPossibleOutOfMemoryError<R>(
+ fn: () => R,
+ shouldError: boolean = true,
+ msg = ''
+ ): Promise<R> {
+ const { device } = this;
+
+ if (!shouldError) {
+ device.pushErrorScope('out-of-memory');
+ const result = fn();
+ await device.popErrorScope();
+ return result;
+ }
+
+ // Validation should fail before out-of-memory so there is no need to check
+ // for out-of-memory here.
+ device.pushErrorScope('validation');
+ const returnValue = fn();
+ const validationError = await device.popErrorScope();
+
+ this.expect(
+ !!validationError,
+ `${validationError?.message || 'no error when one was expected'}: ${msg}`
+ );
+
+ return returnValue;
+ }
+
+ getGroupIndexWGSLForPipelineType(pipelineType: CreatePipelineType, groupIndex: number) {
+ switch (pipelineType) {
+ case 'createRenderPipeline':
+ return `
+ @group(${groupIndex}) @binding(0) var<uniform> v: f32;
+ @vertex fn mainVS() -> @builtin(position) vec4f {
+ return vec4f(v);
+ }
+ `;
+ case 'createRenderPipelineWithFragmentStage':
+ return `
+ @group(${groupIndex}) @binding(0) var<uniform> v: f32;
+ @vertex fn mainVS() -> @builtin(position) vec4f {
+ return vec4f(v);
+ }
+ @fragment fn mainFS() -> @location(0) vec4f {
+ return vec4f(1);
+ }
+ `;
+ case 'createComputePipeline':
+ return `
+ @group(${groupIndex}) @binding(0) var<uniform> v: f32;
+ @compute @workgroup_size(1) fn main() {
+ _ = v;
+ }
+ `;
+ break;
+ }
+ }
+
+ getBindingIndexWGSLForPipelineType(pipelineType: CreatePipelineType, bindingIndex: number) {
+ switch (pipelineType) {
+ case 'createRenderPipeline':
+ return `
+ @group(0) @binding(${bindingIndex}) var<uniform> v: f32;
+ @vertex fn mainVS() -> @builtin(position) vec4f {
+ return vec4f(v);
+ }
+ `;
+ case 'createRenderPipelineWithFragmentStage':
+ return `
+ @group(0) @binding(${bindingIndex}) var<uniform> v: f32;
+ @vertex fn mainVS() -> @builtin(position) vec4f {
+ return vec4f(v);
+ }
+ @fragment fn mainFS() -> @location(0) vec4f {
+ return vec4f(1);
+ }
+ `;
+ case 'createComputePipeline':
+ return `
+ @group(0) @binding(${bindingIndex}) var<uniform> v: f32;
+ @compute @workgroup_size(1) fn main() {
+ _ = v;
+ }
+ `;
+ break;
+ }
+ }
+
+ _createRenderPipelineDescriptor(module: GPUShaderModule): GPURenderPipelineDescriptor {
+ return {
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'mainVS',
+ },
+ };
+ }
+
+ _createRenderPipelineDescriptorWithFragmentShader(
+ module: GPUShaderModule
+ ): GPURenderPipelineDescriptor {
+ return {
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'mainVS',
+ },
+ fragment: {
+ module,
+ entryPoint: 'mainFS',
+ targets: [],
+ },
+ depthStencil: {
+ format: 'depth24plus-stencil8',
+ depthWriteEnabled: true,
+ depthCompare: 'always',
+ },
+ };
+ }
+
+ _createComputePipelineDescriptor(module: GPUShaderModule): GPUComputePipelineDescriptor {
+ return {
+ layout: 'auto',
+ compute: {
+ module,
+ entryPoint: 'main',
+ },
+ };
+ }
+
+ createPipeline(createPipelineType: CreatePipelineType, module: GPUShaderModule) {
+ const { device } = this;
+
+ switch (createPipelineType) {
+ case 'createRenderPipeline':
+ return device.createRenderPipeline(this._createRenderPipelineDescriptor(module));
+ break;
+ case 'createRenderPipelineWithFragmentStage':
+ return device.createRenderPipeline(
+ this._createRenderPipelineDescriptorWithFragmentShader(module)
+ );
+ break;
+ case 'createComputePipeline':
+ return device.createComputePipeline(this._createComputePipelineDescriptor(module));
+ break;
+ }
+ }
+
+ createPipelineAsync(createPipelineType: CreatePipelineType, module: GPUShaderModule) {
+ const { device } = this;
+
+ switch (createPipelineType) {
+ case 'createRenderPipeline':
+ return device.createRenderPipelineAsync(this._createRenderPipelineDescriptor(module));
+ case 'createRenderPipelineWithFragmentStage':
+ return device.createRenderPipelineAsync(
+ this._createRenderPipelineDescriptorWithFragmentShader(module)
+ );
+ case 'createComputePipeline':
+ return device.createComputePipelineAsync(this._createComputePipelineDescriptor(module));
+ }
+ }
+
+ async testCreatePipeline(
+ createPipelineType: CreatePipelineType,
+ async: boolean,
+ module: GPUShaderModule,
+ shouldError: boolean,
+ msg = ''
+ ) {
+ if (async) {
+ await this.shouldRejectConditionally(
+ 'GPUPipelineError',
+ this.createPipelineAsync(createPipelineType, module),
+ shouldError,
+ msg
+ );
+ } else {
+ await this.expectValidationError(
+ () => {
+ this.createPipeline(createPipelineType, module);
+ },
+ shouldError,
+ msg
+ );
+ }
+ }
+
+ async testCreateRenderPipeline(
+ pipelineDescriptor: GPURenderPipelineDescriptor,
+ async: boolean,
+ shouldError: boolean,
+ msg = ''
+ ) {
+ const { device } = this;
+ if (async) {
+ await this.shouldRejectConditionally(
+ 'GPUPipelineError',
+ device.createRenderPipelineAsync(pipelineDescriptor),
+ shouldError,
+ msg
+ );
+ } else {
+ await this.expectValidationError(
+ () => {
+ device.createRenderPipeline(pipelineDescriptor);
+ },
+ shouldError,
+ msg
+ );
+ }
+ }
+
+ async testMaxComputeWorkgroupSize(
+ limitTest: MaximumLimitValueTest,
+ testValueName: MaximumTestValue,
+ async: boolean,
+ axis: 'X' | 'Y' | 'Z'
+ ) {
+ const kExtraLimits: LimitsRequest = {
+ maxComputeInvocationsPerWorkgroup: 'adapterLimit',
+ };
+
+ await this.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, actualLimit, shouldError }) => {
+ if (testValue > device.limits.maxComputeInvocationsPerWorkgroup) {
+ return;
+ }
+
+ const size = [1, 1, 1];
+ size[axis.codePointAt(0)! - 'X'.codePointAt(0)!] = testValue;
+ const { module, code } = this.getModuleForWorkgroupSize(size);
+
+ await this.testCreatePipeline(
+ 'createComputePipeline',
+ async,
+ module,
+ shouldError,
+ `size: ${testValue}, limit: ${actualLimit}\n${code}`
+ );
+ },
+ kExtraLimits
+ );
+ }
+
+ /**
+ * Creates an GPURenderCommandsMixin setup with some initial state.
+ */
+ _getGPURenderCommandsMixin(encoderType: RenderEncoderType) {
+ const { device } = this;
+
+ switch (encoderType) {
+ case 'render': {
+ const buffer = this.trackForCleanup(
+ device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.UNIFORM,
+ })
+ );
+
+ const texture = this.trackForCleanup(
+ device.createTexture({
+ size: [1, 1],
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ })
+ );
+
+ const layout = device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.VERTEX,
+ buffer: {},
+ },
+ ],
+ });
+
+ const bindGroup = device.createBindGroup({
+ layout,
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer },
+ },
+ ],
+ });
+
+ const encoder = device.createCommandEncoder();
+ const mixin = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: texture.createView(),
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+
+ return {
+ mixin,
+ bindGroup,
+ prep() {
+ mixin.end();
+ },
+ test() {
+ encoder.finish();
+ },
+ };
+ break;
+ }
+
+ case 'renderBundle': {
+ const buffer = this.trackForCleanup(
+ device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.UNIFORM,
+ })
+ );
+
+ const layout = device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.VERTEX,
+ buffer: {},
+ },
+ ],
+ });
+
+ const bindGroup = device.createBindGroup({
+ layout,
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer },
+ },
+ ],
+ });
+
+ const mixin = device.createRenderBundleEncoder({
+ colorFormats: ['rgba8unorm'],
+ });
+
+ return {
+ mixin,
+ bindGroup,
+ prep() {},
+ test() {
+ mixin.finish();
+ },
+ };
+ break;
+ }
+ }
+ }
+
+ /**
+ * Tests a method on GPURenderCommandsMixin
+ * The function will be called with the mixin.
+ */
+ async testGPURenderCommandsMixin(
+ encoderType: RenderEncoderType,
+ fn: ({ mixin }: { mixin: GPURenderCommandsMixin }) => void,
+ shouldError: boolean,
+ msg = ''
+ ) {
+ const { mixin, prep, test } = this._getGPURenderCommandsMixin(encoderType);
+ fn({ mixin });
+ prep();
+
+ await this.expectValidationError(test, shouldError, msg);
+ }
+
+ /**
+ * Creates GPUBindingCommandsMixin setup with some initial state.
+ */
+ _getGPUBindingCommandsMixin(encoderType: EncoderType) {
+ const { device } = this;
+
+ switch (encoderType) {
+ case 'compute': {
+ const buffer = this.trackForCleanup(
+ device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.UNIFORM,
+ })
+ );
+
+ const layout = device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: {},
+ },
+ ],
+ });
+
+ const bindGroup = device.createBindGroup({
+ layout,
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer },
+ },
+ ],
+ });
+
+ const encoder = device.createCommandEncoder();
+ const mixin = encoder.beginComputePass();
+ return {
+ mixin,
+ bindGroup,
+ prep() {
+ mixin.end();
+ },
+ test() {
+ encoder.finish();
+ },
+ };
+ break;
+ }
+ case 'render':
+ return this._getGPURenderCommandsMixin('render');
+ case 'renderBundle':
+ return this._getGPURenderCommandsMixin('renderBundle');
+ }
+ }
+
+ /**
+ * Tests a method on GPUBindingCommandsMixin
+ * The function pass will be called with the mixin and a bindGroup
+ */
+ async testGPUBindingCommandsMixin(
+ encoderType: EncoderType,
+ fn: ({ bindGroup }: { mixin: GPUBindingCommandsMixin; bindGroup: GPUBindGroup }) => void,
+ shouldError: boolean,
+ msg = ''
+ ) {
+ const { mixin, bindGroup, prep, test } = this._getGPUBindingCommandsMixin(encoderType);
+ fn({ mixin, bindGroup });
+ prep();
+
+ await this.expectValidationError(test, shouldError, msg);
+ }
+
+ getModuleForWorkgroupSize(size: number[]) {
+ const { device } = this;
+ const code = `
+ @group(0) @binding(0) var<storage, read_write> d: f32;
+ @compute @workgroup_size(${size.join(',')}) fn main() {
+ d = 0;
+ }
+ `;
+ const module = device.createShaderModule({ code });
+ return { module, code };
+ }
+}
+
+/**
+ * Makes a new LimitTest class so that the tests have access to `limit`
+ */
+function makeLimitTestFixture(limit: GPUSupportedLimit): typeof LimitTestsImpl {
+ class LimitTests extends LimitTestsImpl {
+ override limit = limit;
+ }
+
+ return LimitTests;
+}
+
+/**
+ * This is to avoid repeating yourself (D.R.Y.) as I ran into that issue multiple times
+ * writing these tests where I'd copy a test, need to rename a limit in 3-4 places,
+ * forget one place, and then spend 20-30 minutes wondering why the test was failing.
+ */
+export function makeLimitTestGroup(limit: GPUSupportedLimit) {
+ const description = `API Validation Tests for ${limit}.`;
+ const g = makeTestGroup(makeLimitTestFixture(limit));
+ return { g, description, limit };
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxBindGroups.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxBindGroups.spec.ts
new file mode 100644
index 0000000000..334b49cc90
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxBindGroups.spec.ts
@@ -0,0 +1,95 @@
+import { range } from '../../../../../common/util/util.js';
+
+import {
+ kCreatePipelineTypes,
+ kEncoderTypes,
+ kMaximumLimitBaseParams,
+ makeLimitTestGroup,
+} from './limit_utils.js';
+
+const limit = 'maxBindGroups';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createPipelineLayout,at_over')
+ .desc(`Test using createPipelineLayout at and over ${limit} limit`)
+ .params(kMaximumLimitBaseParams)
+ .fn(async t => {
+ const { limitTest, testValueName } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ const bindGroupLayouts = range(testValue, _i =>
+ device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.VERTEX,
+ buffer: {},
+ },
+ ],
+ })
+ );
+
+ await t.expectValidationError(() => {
+ device.createPipelineLayout({ bindGroupLayouts });
+ }, shouldError);
+ }
+ );
+ });
+
+g.test('createPipeline,at_over')
+ .desc(
+ `Test using createRenderPipeline(Async) and createComputePipeline(Async) at and over ${limit} limit`
+ )
+ .params(
+ kMaximumLimitBaseParams
+ .combine('createPipelineType', kCreatePipelineTypes)
+ .combine('async', [false, true] as const)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, createPipelineType, async } = t.params;
+
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ const lastIndex = testValue - 1;
+
+ const code = t.getGroupIndexWGSLForPipelineType(createPipelineType, lastIndex);
+ const module = device.createShaderModule({ code });
+
+ await t.testCreatePipeline(createPipelineType, async, module, shouldError);
+ }
+ );
+ });
+
+g.test('setBindGroup,at_over')
+ .desc(`Test using setBindGroup at and over ${limit} limit`)
+ .params(kMaximumLimitBaseParams.combine('encoderType', kEncoderTypes))
+ .fn(async t => {
+ const { limitTest, testValueName, encoderType } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ testValue, actualLimit, shouldError }) => {
+ const lastIndex = testValue - 1;
+ await t.testGPUBindingCommandsMixin(
+ encoderType,
+ ({ mixin, bindGroup }) => {
+ mixin.setBindGroup(lastIndex, bindGroup);
+ },
+ shouldError,
+ `shouldError: ${shouldError}, actualLimit: ${actualLimit}, testValue: ${lastIndex}`
+ );
+ }
+ );
+ });
+
+g.test('validate,maxBindGroupsPlusVertexBuffers')
+ .desc(`Test that ${limit} <= maxBindGroupsPlusVertexBuffers`)
+ .fn(t => {
+ const { adapter, defaultLimit, adapterLimit } = t;
+ t.expect(defaultLimit <= t.getDefaultLimit('maxBindGroupsPlusVertexBuffers'));
+ t.expect(adapterLimit <= adapter.limits.maxBindGroupsPlusVertexBuffers);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxBindingsPerBindGroup.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxBindingsPerBindGroup.spec.ts
new file mode 100644
index 0000000000..b2d9a156c1
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxBindingsPerBindGroup.spec.ts
@@ -0,0 +1,75 @@
+import {
+ kCreatePipelineTypes,
+ kMaximumLimitBaseParams,
+ makeLimitTestGroup,
+} from './limit_utils.js';
+
+const limit = 'maxBindingsPerBindGroup';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createBindGroupLayout,at_over')
+ .desc(`Test using createBindGroupLayout at and over ${limit} limit`)
+ .params(kMaximumLimitBaseParams)
+ .fn(async t => {
+ const { limitTest, testValueName } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ await t.expectValidationError(() => {
+ device.createBindGroupLayout({
+ entries: [
+ {
+ binding: testValue - 1,
+ visibility: GPUShaderStage.VERTEX,
+ buffer: {},
+ },
+ ],
+ });
+ }, shouldError);
+ }
+ );
+ });
+
+g.test('createPipeline,at_over')
+ .desc(
+ `Test using createRenderPipeline(Async) and createComputePipeline(Async) at and over ${limit} limit`
+ )
+ .params(
+ kMaximumLimitBaseParams
+ .combine('createPipelineType', kCreatePipelineTypes)
+ .combine('async', [false, true] as const)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, createPipelineType, async } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ const lastIndex = testValue - 1;
+
+ const code = t.getBindingIndexWGSLForPipelineType(createPipelineType, lastIndex);
+ const module = device.createShaderModule({ code });
+
+ await t.testCreatePipeline(createPipelineType, async, module, shouldError, code);
+ }
+ );
+ });
+
+g.test('validate')
+ .desc(`Test ${limit} matches the spec limits`)
+ .fn(t => {
+ const { adapter, adapterLimit } = t;
+ const maxBindingsPerShaderStage =
+ adapter.limits.maxSampledTexturesPerShaderStage +
+ adapter.limits.maxSamplersPerShaderStage +
+ adapter.limits.maxStorageBuffersPerShaderStage +
+ adapter.limits.maxStorageTexturesPerShaderStage +
+ adapter.limits.maxUniformBuffersPerShaderStage;
+ const maxShaderStagesPerPipeline = 2;
+ const minMaxBindingsPerBindGroup = maxBindingsPerShaderStage * maxShaderStagesPerPipeline;
+ t.expect(
+ adapterLimit >= minMaxBindingsPerBindGroup,
+ `maxBindingsPerBindGroup(${adapterLimit}) >= maxBindingsPerShaderStage(${maxBindingsPerShaderStage}) * maxShaderStagesPerPipeline(${maxShaderStagesPerPipeline} = (${minMaxBindingsPerBindGroup}))`
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxBufferSize.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxBufferSize.spec.ts
new file mode 100644
index 0000000000..6f6715cd2a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxBufferSize.spec.ts
@@ -0,0 +1,28 @@
+import { kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
+
+const limit = 'maxBufferSize';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createBuffer,at_over')
+ .desc(`Test using at and over ${limit} limit`)
+ .params(kMaximumLimitBaseParams)
+ .fn(async t => {
+ const { limitTest, testValueName } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, actualLimit, shouldError }) => {
+ await t.testForValidationErrorWithPossibleOutOfMemoryError(
+ () => {
+ const buffer = device.createBuffer({
+ usage: GPUBufferUsage.VERTEX,
+ size: testValue,
+ });
+ buffer.destroy();
+ },
+ shouldError,
+ `size: ${testValue}, limit: ${actualLimit}`
+ );
+ }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxColorAttachmentBytesPerSample.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxColorAttachmentBytesPerSample.spec.ts
new file mode 100644
index 0000000000..cb7bce0667
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxColorAttachmentBytesPerSample.spec.ts
@@ -0,0 +1,260 @@
+import { assert } from '../../../../../common/util/util.js';
+import { kTextureSampleCounts } from '../../../../capability_info.js';
+import { kTextureFormatInfo } from '../../../../format_info.js';
+import { align } from '../../../../util/math.js';
+
+import {
+ kMaximumLimitBaseParams,
+ LimitsRequest,
+ LimitTestsImpl,
+ makeLimitTestGroup,
+} from './limit_utils.js';
+
+const kFormatsToUseBySize: GPUTextureFormat[] = [
+ 'rgba32float',
+ 'rgba16float',
+ 'rgba8unorm',
+ 'rg8unorm',
+ 'r8unorm',
+];
+
+const kInterleaveFormats: GPUTextureFormat[] = [
+ 'rgba16float',
+ 'rg16float',
+ 'rgba8unorm',
+ 'rg8unorm',
+ 'r8unorm',
+];
+
+function getAttachments(interleaveFormat: GPUTextureFormat, testValue: number) {
+ let bytesPerSample = 0;
+ const targets: GPUColorTargetState[] = [];
+
+ const addTexture = (format: GPUTextureFormat) => {
+ const info = kTextureFormatInfo[format];
+ const newBytesPerSample =
+ align(bytesPerSample, info.colorRender!.alignment) + info.colorRender!.byteCost;
+ if (newBytesPerSample > testValue) {
+ return false;
+ }
+ targets.push({ format, writeMask: 0 });
+ bytesPerSample = newBytesPerSample;
+ return true;
+ };
+
+ while (bytesPerSample < testValue) {
+ addTexture(interleaveFormat);
+ for (const format of kFormatsToUseBySize) {
+ if (addTexture(format)) {
+ break;
+ }
+ }
+ }
+
+ assert(bytesPerSample === testValue);
+ return targets;
+}
+
+function getDescription(
+ testValue: number,
+ actualLimit: number,
+ sampleCount: number,
+ targets: GPUColorTargetState[]
+) {
+ return `
+ // testValue : ${testValue}
+ // actualLimit: ${actualLimit}
+ // sampleCount: ${sampleCount}
+ // targets:
+ ${(() => {
+ let offset = 0;
+ return targets
+ .map(({ format }) => {
+ const info = kTextureFormatInfo[format];
+ offset = align(offset, info.colorRender!.alignment);
+ const s = `// ${format.padEnd(11)} (offset: ${offset.toString().padStart(2)}, align: ${
+ info.colorRender!.alignment
+ }, size: ${info.colorRender!.byteCost})`;
+ offset += info.colorRender!.byteCost;
+ return s;
+ })
+ .join('\n ');
+ })()}
+ `;
+}
+
+function getPipelineDescriptor(
+ device: GPUDevice,
+ actualLimit: number,
+ interleaveFormat: GPUTextureFormat,
+ sampleCount: number,
+ testValue: number
+): { pipelineDescriptor: GPURenderPipelineDescriptor; code: string } | undefined {
+ const targets = getAttachments(interleaveFormat, testValue);
+ if (!targets) {
+ return;
+ }
+
+ const code = `
+ ${getDescription(testValue, actualLimit, sampleCount, targets)}
+ @vertex fn vs() -> @builtin(position) vec4f {
+ return vec4f(0);
+ }
+
+ @fragment fn fs() -> @location(0) vec4f {
+ return vec4f(0);
+ }
+ `;
+ const module = device.createShaderModule({ code });
+ const pipelineDescriptor: GPURenderPipelineDescriptor = {
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vs',
+ },
+ fragment: {
+ module,
+ entryPoint: 'fs',
+ targets,
+ },
+ // depth should not affect the test so added to make sure the implementation does not consider it
+ depthStencil: {
+ depthWriteEnabled: true,
+ depthCompare: 'less',
+ format: 'depth24plus',
+ },
+ multisample: {
+ count: sampleCount,
+ },
+ };
+ return { pipelineDescriptor, code };
+}
+
+function createTextures(t: LimitTestsImpl, targets: GPUColorTargetState[]) {
+ return targets.map(({ format }) =>
+ t.trackForCleanup(
+ t.device.createTexture({
+ size: [1, 1],
+ format,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ })
+ )
+ );
+}
+
+const kExtraLimits: LimitsRequest = {
+ maxColorAttachments: 'adapterLimit',
+};
+
+const limit = 'maxColorAttachmentBytesPerSample';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createRenderPipeline,at_over')
+ .desc(`Test using at and over ${limit} limit in createRenderPipeline(Async)`)
+ .params(
+ kMaximumLimitBaseParams
+ .combine('async', [false, true] as const)
+ .combine('sampleCount', kTextureSampleCounts)
+ .combine('interleaveFormat', kInterleaveFormats)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, async, sampleCount, interleaveFormat } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, actualLimit, shouldError }) => {
+ const result = getPipelineDescriptor(
+ device,
+ actualLimit,
+ interleaveFormat,
+ sampleCount,
+ testValue
+ );
+ if (!result) {
+ return;
+ }
+ const { pipelineDescriptor, code } = result;
+ const numTargets = (pipelineDescriptor.fragment!.targets as GPUColorTargetState[]).length;
+ if (numTargets > device.limits.maxColorAttachments) {
+ return;
+ }
+
+ await t.testCreateRenderPipeline(pipelineDescriptor, async, shouldError, code);
+ },
+ kExtraLimits
+ );
+ });
+
+g.test('beginRenderPass,at_over')
+ .desc(`Test using at and over ${limit} limit in beginRenderPass`)
+ .params(
+ kMaximumLimitBaseParams
+ .combine('sampleCount', kTextureSampleCounts)
+ .combine('interleaveFormat', kInterleaveFormats)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, sampleCount, interleaveFormat } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, actualLimit, shouldError }) => {
+ const targets = getAttachments(interleaveFormat, testValue);
+ if (targets.length > device.limits.maxColorAttachments) {
+ return;
+ }
+
+ const encoder = device.createCommandEncoder();
+ const textures = createTextures(t, targets);
+
+ const pass = encoder.beginRenderPass({
+ colorAttachments: textures.map(texture => ({
+ view: texture.createView(),
+ loadOp: 'clear',
+ storeOp: 'store',
+ })),
+ });
+ pass.end();
+
+ await t.expectValidationError(
+ () => {
+ encoder.finish();
+ },
+ shouldError,
+ getDescription(testValue, actualLimit, sampleCount, targets)
+ );
+ },
+ kExtraLimits
+ );
+ });
+
+g.test('createRenderBundle,at_over')
+ .desc(`Test using at and over ${limit} limit in createRenderBundle`)
+ .params(
+ kMaximumLimitBaseParams
+ .combine('sampleCount', kTextureSampleCounts)
+ .combine('interleaveFormat', kInterleaveFormats)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, sampleCount, interleaveFormat } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, actualLimit, shouldError }) => {
+ const targets = getAttachments(interleaveFormat, testValue);
+ if (targets.length > device.limits.maxColorAttachments) {
+ return;
+ }
+
+ await t.expectValidationError(
+ () => {
+ device.createRenderBundleEncoder({
+ colorFormats: targets.map(({ format }) => format),
+ });
+ },
+ shouldError,
+ getDescription(testValue, actualLimit, sampleCount, targets)
+ );
+ },
+ kExtraLimits
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxColorAttachments.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxColorAttachments.spec.ts
new file mode 100644
index 0000000000..53c3b48c6b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxColorAttachments.spec.ts
@@ -0,0 +1,124 @@
+import { range } from '../../../../../common/util/util.js';
+import { kMaxColorAttachmentsToTest } from '../../../../capability_info.js';
+
+import { kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
+
+function getPipelineDescriptor(device: GPUDevice, testValue: number): GPURenderPipelineDescriptor {
+ const code = `
+ @vertex fn vs() -> @builtin(position) vec4f {
+ return vec4f(0);
+ }
+
+ @fragment fn fs() -> @location(0) vec4f {
+ return vec4f(0);
+ }
+ `;
+ const module = device.createShaderModule({ code });
+ return {
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vs',
+ },
+ fragment: {
+ module,
+ entryPoint: 'fs',
+ targets: new Array(testValue).fill({ format: 'r8unorm', writeMask: 0 }),
+ },
+ };
+}
+
+const limit = 'maxColorAttachments';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createRenderPipeline,at_over')
+ .desc(`Test using at and over ${limit} limit in createRenderPipeline(Async)`)
+ .params(kMaximumLimitBaseParams.combine('async', [false, true] as const))
+ .fn(async t => {
+ const { limitTest, testValueName, async } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ const pipelineDescriptor = getPipelineDescriptor(device, testValue);
+
+ await t.testCreateRenderPipeline(pipelineDescriptor, async, shouldError);
+ }
+ );
+ });
+
+g.test('beginRenderPass,at_over')
+ .desc(`Test using at and over ${limit} limit in beginRenderPass`)
+ .params(kMaximumLimitBaseParams)
+ .fn(async t => {
+ const { limitTest, testValueName } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ const encoder = device.createCommandEncoder();
+
+ const textures = range(testValue, _ =>
+ t.trackForCleanup(
+ device.createTexture({
+ size: [1, 1],
+ format: 'r8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ })
+ )
+ );
+
+ const pass = encoder.beginRenderPass({
+ colorAttachments: range(testValue, i => ({
+ view: textures[i].createView(),
+ loadOp: 'clear',
+ storeOp: 'store',
+ })),
+ });
+ pass.end();
+
+ await t.expectValidationError(() => {
+ encoder.finish();
+ }, shouldError);
+ }
+ );
+ });
+
+g.test('createRenderBundle,at_over')
+ .desc(`Test using at and over ${limit} limit in createRenderBundle`)
+ .params(kMaximumLimitBaseParams)
+ .fn(async t => {
+ const { limitTest, testValueName } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ await t.expectValidationError(() => {
+ device.createRenderBundleEncoder({
+ colorFormats: new Array(testValue).fill('r8unorm'),
+ });
+ }, shouldError);
+ }
+ );
+ });
+
+g.test('validate,maxColorAttachmentBytesPerSample')
+ .desc(`Test ${limit} against maxColorAttachmentBytesPerSample`)
+ .fn(t => {
+ const { adapter, defaultLimit, adapterLimit: maximumLimit } = t;
+ const minColorAttachmentBytesPerSample = t.getDefaultLimit('maxColorAttachmentBytesPerSample');
+ // The smallest attachment is 1 byte
+ // so make sure maxColorAttachments < maxColorAttachmentBytesPerSample
+ t.expect(defaultLimit <= minColorAttachmentBytesPerSample);
+ t.expect(maximumLimit <= adapter.limits.maxColorAttachmentBytesPerSample);
+ });
+
+g.test('validate,kMaxColorAttachmentsToTest')
+ .desc(
+ `
+ Tests that kMaxColorAttachmentsToTest is large enough to test the limits of this device
+ `
+ )
+ .fn(t => {
+ t.expect(t.adapter.limits.maxColorAttachments <= kMaxColorAttachmentsToTest);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeInvocationsPerWorkgroup.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeInvocationsPerWorkgroup.spec.ts
new file mode 100644
index 0000000000..a3858a6221
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeInvocationsPerWorkgroup.spec.ts
@@ -0,0 +1,147 @@
+import { GPUTestBase } from '../../../../gpu_test.js';
+
+import {
+ kMaximumLimitBaseParams,
+ MaximumLimitValueTest,
+ MaximumTestValue,
+ makeLimitTestGroup,
+} from './limit_utils.js';
+
+/**
+ * Given a 3 dimensional size, and a limit, compute
+ * the smallest volume with more then limit units.
+ */
+function getClosestSizeOverLimit(size: number[], limit: number) {
+ let closest = Number.MAX_SAFE_INTEGER;
+ let closestSize: number[] = [];
+ const depthLimit = Math.min(limit, size[2]);
+ for (let depth = 1; depth <= depthLimit; ++depth) {
+ for (let height = 1; height <= size[1]; ++height) {
+ const planeSize = depth * height;
+ if (planeSize <= limit) {
+ const width = Math.min(size[0], Math.ceil(limit / planeSize));
+ const num = width * planeSize;
+ const dist = num - limit;
+ if (dist > 0 && dist < closest) {
+ closest = dist;
+ closestSize = [width, height, depth];
+ }
+ }
+ }
+ }
+ return closestSize;
+}
+
+/**
+ * Given a 3 dimensional size, and a limit, compute
+ * the largest volume with limit or less units.
+ */
+function getClosestSizeUnderOrAtLimit(size: number[], limit: number) {
+ let closest = Number.MAX_SAFE_INTEGER;
+ let closestSize: number[] = [];
+ const depthLimit = Math.min(limit, size[2]);
+ for (let depth = 1; depth <= depthLimit; ++depth) {
+ for (let height = 1; height <= size[1]; ++height) {
+ const planeSize = depth * height;
+ if (planeSize <= limit) {
+ const width = Math.min(size[0], Math.floor(limit / planeSize));
+ const num = width * planeSize;
+ const dist = limit - num;
+ if (dist < closest) {
+ closest = dist;
+ closestSize = [width, height, depth];
+ }
+ }
+ }
+ }
+ return closestSize;
+}
+
+function getDeviceLimitToRequest(
+ limitValueTest: MaximumLimitValueTest,
+ defaultLimit: number,
+ maximumLimit: number
+) {
+ switch (limitValueTest) {
+ case 'atDefault':
+ return defaultLimit;
+ case 'underDefault':
+ return defaultLimit - 1;
+ case 'betweenDefaultAndMaximum':
+ return Math.floor((defaultLimit + maximumLimit) / 2);
+ case 'atMaximum':
+ return maximumLimit;
+ case 'overMaximum':
+ return maximumLimit + 1;
+ }
+}
+
+function getTestWorkgroupSize(
+ t: GPUTestBase,
+ testValueName: MaximumTestValue,
+ requestedLimit: number
+) {
+ const maxDimensions = [
+ t.getDefaultLimit('maxComputeWorkgroupSizeX'),
+ t.getDefaultLimit('maxComputeWorkgroupSizeY'),
+ t.getDefaultLimit('maxComputeWorkgroupSizeZ'),
+ ];
+
+ switch (testValueName) {
+ case 'atLimit':
+ return getClosestSizeUnderOrAtLimit(maxDimensions, requestedLimit);
+ case 'overLimit':
+ return getClosestSizeOverLimit(maxDimensions, requestedLimit);
+ }
+}
+
+function getDeviceLimitToRequestAndValueToTest(
+ t: GPUTestBase,
+ limitValueTest: MaximumLimitValueTest,
+ testValueName: MaximumTestValue,
+ defaultLimit: number,
+ maximumLimit: number
+) {
+ const requestedLimit = getDeviceLimitToRequest(limitValueTest, defaultLimit, maximumLimit);
+ const workgroupSize = getTestWorkgroupSize(t, testValueName, requestedLimit);
+ return {
+ requestedLimit,
+ workgroupSize,
+ };
+}
+
+const limit = 'maxComputeInvocationsPerWorkgroup';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createComputePipeline,at_over')
+ .desc(`Test using createComputePipeline(Async) at and over ${limit} limit`)
+ .params(kMaximumLimitBaseParams.combine('async', [false, true] as const))
+ .fn(async t => {
+ const { limitTest, testValueName, async } = t.params;
+ const { defaultLimit, adapterLimit: maximumLimit } = t;
+
+ const { requestedLimit, workgroupSize } = getDeviceLimitToRequestAndValueToTest(
+ t,
+ limitTest,
+ testValueName,
+ defaultLimit,
+ maximumLimit
+ );
+ const testValue = workgroupSize.reduce((a, b) => a * b, 1);
+
+ await t.testDeviceWithSpecificLimits(
+ requestedLimit,
+ testValue,
+ async ({ testValue, actualLimit, shouldError }) => {
+ const { module, code } = t.getModuleForWorkgroupSize(workgroupSize);
+
+ await t.testCreatePipeline(
+ 'createComputePipeline',
+ async,
+ module,
+ shouldError,
+ `workgroupSize: [${workgroupSize}], size: ${testValue}, limit: ${actualLimit}\n${code}`
+ );
+ }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupSizeX.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupSizeX.spec.ts
new file mode 100644
index 0000000000..6d394f998c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupSizeX.spec.ts
@@ -0,0 +1,20 @@
+import { kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
+
+const limit = 'maxComputeWorkgroupSizeX';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createComputePipeline,at_over')
+ .desc(`Test using createComputePipeline(Async) at and over ${limit} limit`)
+ .params(kMaximumLimitBaseParams.combine('async', [false, true] as const))
+ .fn(async t => {
+ const { limitTest, testValueName, async } = t.params;
+ await t.testMaxComputeWorkgroupSize(limitTest, testValueName, async, 'X');
+ });
+
+g.test('validate,maxComputeInvocationsPerWorkgroup')
+ .desc(`Test that ${limit} <= maxComputeInvocationsPerWorkgroup`)
+ .fn(t => {
+ const { adapter, defaultLimit, adapterLimit } = t;
+ t.expect(defaultLimit <= t.getDefaultLimit('maxComputeInvocationsPerWorkgroup'));
+ t.expect(adapterLimit <= adapter.limits.maxComputeInvocationsPerWorkgroup);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupSizeY.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupSizeY.spec.ts
new file mode 100644
index 0000000000..26eeaa9a64
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupSizeY.spec.ts
@@ -0,0 +1,20 @@
+import { kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
+
+const limit = 'maxComputeWorkgroupSizeY';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createComputePipeline,at_over')
+ .desc(`Test using createComputePipeline(Async) at and over ${limit} limit`)
+ .params(kMaximumLimitBaseParams.combine('async', [false, true] as const))
+ .fn(async t => {
+ const { limitTest, testValueName, async } = t.params;
+ await t.testMaxComputeWorkgroupSize(limitTest, testValueName, async, 'Y');
+ });
+
+g.test('validate,maxComputeInvocationsPerWorkgroup')
+ .desc(`Test that ${limit} <= maxComputeInvocationsPerWorkgroup`)
+ .fn(t => {
+ const { adapter, defaultLimit, adapterLimit } = t;
+ t.expect(defaultLimit <= t.getDefaultLimit('maxComputeInvocationsPerWorkgroup'));
+ t.expect(adapterLimit <= adapter.limits.maxComputeInvocationsPerWorkgroup);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupSizeZ.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupSizeZ.spec.ts
new file mode 100644
index 0000000000..9a199f4cd3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupSizeZ.spec.ts
@@ -0,0 +1,20 @@
+import { kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
+
+const limit = 'maxComputeWorkgroupSizeZ';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createComputePipeline,at_over')
+ .desc(`Test using createComputePipeline(Async) at and over ${limit} limit`)
+ .params(kMaximumLimitBaseParams.combine('async', [false, true] as const))
+ .fn(async t => {
+ const { limitTest, testValueName, async } = t.params;
+ await t.testMaxComputeWorkgroupSize(limitTest, testValueName, async, 'Z');
+ });
+
+g.test('validate,maxComputeInvocationsPerWorkgroup')
+ .desc(`Test that ${limit} <= maxComputeInvocationsPerWorkgroup`)
+ .fn(t => {
+ const { adapter, defaultLimit, adapterLimit } = t;
+ t.expect(defaultLimit <= t.getDefaultLimit('maxComputeInvocationsPerWorkgroup'));
+ t.expect(adapterLimit <= adapter.limits.maxComputeInvocationsPerWorkgroup);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupStorageSize.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupStorageSize.spec.ts
new file mode 100644
index 0000000000..cb26e18ebe
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupStorageSize.spec.ts
@@ -0,0 +1,182 @@
+import { keysOf } from '../../../../../common/util/data_tables.js';
+import { assert } from '../../../../../common/util/util.js';
+import { align, roundDown } from '../../../../util/math.js';
+
+import {
+ MaximumLimitValueTest,
+ MaximumTestValue,
+ kMaximumLimitBaseParams,
+ makeLimitTestGroup,
+} from './limit_utils.js';
+
+const limit = 'maxComputeWorkgroupStorageSize';
+export const { g, description } = makeLimitTestGroup(limit);
+
+const kSmallestWorkgroupVarSize = 4;
+
+const wgslF16Types = {
+ f16: { alignOf: 2, sizeOf: 2, requireF16: true },
+ 'vec2<f16>': { alignOf: 4, sizeOf: 4, requireF16: true },
+ 'vec3<f16>': { alignOf: 8, sizeOf: 6, requireF16: true },
+ 'vec4<f16>': { alignOf: 8, sizeOf: 8, requireF16: true },
+ 'mat2x2<f16>': { alignOf: 4, sizeOf: 8, requireF16: true },
+ 'mat3x2<f16>': { alignOf: 4, sizeOf: 12, requireF16: true },
+ 'mat4x2<f16>': { alignOf: 4, sizeOf: 16, requireF16: true },
+ 'mat2x3<f16>': { alignOf: 8, sizeOf: 16, requireF16: true },
+ 'mat3x3<f16>': { alignOf: 8, sizeOf: 24, requireF16: true },
+ 'mat4x3<f16>': { alignOf: 8, sizeOf: 32, requireF16: true },
+ 'mat2x4<f16>': { alignOf: 8, sizeOf: 16, requireF16: true },
+ 'mat3x4<f16>': { alignOf: 8, sizeOf: 24, requireF16: true },
+ 'mat4x4<f16>': { alignOf: 8, sizeOf: 32, requireF16: true },
+};
+
+const wgslBaseTypes = {
+ f32: { alignOf: 4, sizeOf: 4, requireF16: false },
+ i32: { alignOf: 4, sizeOf: 4, requireF16: false },
+ u32: { alignOf: 4, sizeOf: 4, requireF16: false },
+
+ 'vec2<f32>': { alignOf: 8, sizeOf: 8, requireF16: false },
+ 'vec2<i32>': { alignOf: 8, sizeOf: 8, requireF16: false },
+ 'vec2<u32>': { alignOf: 8, sizeOf: 8, requireF16: false },
+
+ 'vec3<f32>': { alignOf: 16, sizeOf: 12, requireF16: false },
+ 'vec3<i32>': { alignOf: 16, sizeOf: 12, requireF16: false },
+ 'vec3<u32>': { alignOf: 16, sizeOf: 12, requireF16: false },
+
+ 'vec4<f32>': { alignOf: 16, sizeOf: 16, requireF16: false },
+ 'vec4<i32>': { alignOf: 16, sizeOf: 16, requireF16: false },
+ 'vec4<u32>': { alignOf: 16, sizeOf: 16, requireF16: false },
+
+ 'mat2x2<f32>': { alignOf: 8, sizeOf: 16, requireF16: false },
+ 'mat3x2<f32>': { alignOf: 8, sizeOf: 24, requireF16: false },
+ 'mat4x2<f32>': { alignOf: 8, sizeOf: 32, requireF16: false },
+ 'mat2x3<f32>': { alignOf: 16, sizeOf: 32, requireF16: false },
+ 'mat3x3<f32>': { alignOf: 16, sizeOf: 48, requireF16: false },
+ 'mat4x3<f32>': { alignOf: 16, sizeOf: 64, requireF16: false },
+ 'mat2x4<f32>': { alignOf: 16, sizeOf: 32, requireF16: false },
+ 'mat3x4<f32>': { alignOf: 16, sizeOf: 48, requireF16: false },
+ 'mat4x4<f32>': { alignOf: 16, sizeOf: 64, requireF16: false },
+
+ S1: { alignOf: 16, sizeOf: 48, requireF16: false },
+ S2: { alignOf: 4, sizeOf: 16 * 7, requireF16: false },
+ S3: { alignOf: 16, sizeOf: 32, requireF16: false },
+};
+
+const wgslTypes = { ...wgslF16Types, ...wgslBaseTypes };
+type WGSLType = keyof typeof wgslTypes;
+const kWGSLTypes = keysOf(wgslTypes);
+
+function getModuleForWorkgroupStorageSize(device: GPUDevice, wgslType: WGSLType, size: number) {
+ assert(size % kSmallestWorkgroupVarSize === 0);
+ const { sizeOf, alignOf, requireF16 } = wgslTypes[wgslType];
+ const unitSize = align(sizeOf, alignOf);
+ const units = Math.floor(size / unitSize);
+ const extra = (size - units * unitSize) / kSmallestWorkgroupVarSize;
+
+ const code =
+ (requireF16 ? 'enable f16;\n' : '') +
+ `
+ struct S1 {
+ a: f32,
+ b: vec4f,
+ c: u32,
+ };
+ struct S2 {
+ a: array<vec3f, 7>,
+ };
+ struct S3 {
+ a: vec3f,
+ b: vec2f,
+ };
+ var<workgroup> d0: array<${wgslType}, ${units}>;
+ ${extra ? `var<workgroup> d1: array<f32, ${extra}>;` : ''}
+ @compute @workgroup_size(1) fn main() {
+ _ = d0;
+ ${extra ? '_ = d1;' : ''}
+ }
+ `;
+ return { module: device.createShaderModule({ code }), code };
+}
+
+function getDeviceLimitToRequest(
+ limitValueTest: MaximumLimitValueTest,
+ defaultLimit: number,
+ maximumLimit: number
+) {
+ switch (limitValueTest) {
+ case 'atDefault':
+ return defaultLimit;
+ case 'underDefault':
+ return defaultLimit - kSmallestWorkgroupVarSize;
+ case 'betweenDefaultAndMaximum':
+ return roundDown(Math.floor((defaultLimit + maximumLimit) / 2), kSmallestWorkgroupVarSize);
+ case 'atMaximum':
+ return maximumLimit;
+ case 'overMaximum':
+ return maximumLimit + kSmallestWorkgroupVarSize;
+ }
+}
+
+function getTestValue(testValueName: MaximumTestValue, requestedLimit: number) {
+ switch (testValueName) {
+ case 'atLimit':
+ return requestedLimit;
+ case 'overLimit':
+ return requestedLimit + kSmallestWorkgroupVarSize;
+ }
+}
+
+function getDeviceLimitToRequestAndValueToTest(
+ limitValueTest: MaximumLimitValueTest,
+ testValueName: MaximumTestValue,
+ defaultLimit: number,
+ maximumLimit: number
+) {
+ const requestedLimit = getDeviceLimitToRequest(limitValueTest, defaultLimit, maximumLimit);
+ const testValue = getTestValue(testValueName, requestedLimit);
+ return {
+ requestedLimit,
+ testValue,
+ };
+}
+
+g.test('createComputePipeline,at_over')
+ .desc(`Test using createComputePipeline(Async) at and over ${limit} limit`)
+ .params(
+ kMaximumLimitBaseParams.combine('async', [false, true] as const).combine('wgslType', kWGSLTypes)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, async, wgslType } = t.params;
+ const { defaultLimit, adapterLimit: maximumLimit } = t;
+
+ const hasF16 = t.adapter.features.has('shader-f16');
+ if (!hasF16 && wgslType in wgslF16Types) {
+ return;
+ }
+
+ const features: GPUFeatureName[] = hasF16 ? ['shader-f16'] : [];
+
+ const { requestedLimit, testValue } = getDeviceLimitToRequestAndValueToTest(
+ limitTest,
+ testValueName,
+ defaultLimit,
+ maximumLimit
+ );
+ await t.testDeviceWithSpecificLimits(
+ requestedLimit,
+ testValue,
+ async ({ device, testValue, actualLimit, shouldError }) => {
+ const { module, code } = getModuleForWorkgroupStorageSize(device, wgslType, testValue);
+
+ await t.testCreatePipeline(
+ 'createComputePipeline',
+ async,
+ module,
+ shouldError,
+ `size: ${testValue}, limit: ${actualLimit}\n${code}`
+ );
+ },
+ {},
+ features
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupsPerDimension.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupsPerDimension.spec.ts
new file mode 100644
index 0000000000..8b3805f47b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxComputeWorkgroupsPerDimension.spec.ts
@@ -0,0 +1,97 @@
+import { kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
+
+const limit = 'maxComputeWorkgroupsPerDimension';
+export const { g, description } = makeLimitTestGroup(limit);
+
+const kCreateComputePipelineTypes = [
+ 'createComputePipeline',
+ 'createComputePipelineAsync',
+] as const;
+type CreateComputePipelineType = (typeof kCreateComputePipelineTypes)[number];
+
+async function createComputePipeline(
+ device: GPUDevice,
+ descriptor: GPUComputePipelineDescriptor,
+ pipelineType: CreateComputePipelineType
+) {
+ switch (pipelineType) {
+ case 'createComputePipeline':
+ return device.createComputePipeline(descriptor);
+ case 'createComputePipelineAsync':
+ return await device.createComputePipelineAsync(descriptor);
+ }
+}
+
+// Note: dispatchWorkgroupsIndirect is not tested because it's not a validation error if that exceeds the limits
+g.test('dispatchWorkgroups,at_over')
+ .desc(`Test using dispatchWorkgroups at and over ${limit} limit`)
+ .params(
+ kMaximumLimitBaseParams
+ .combine('pipelineType', kCreateComputePipelineTypes)
+ .combine('axis', [0, 1, 2])
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, pipelineType, axis } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ const counts = [1, 1, 1];
+ counts[axis] = testValue;
+
+ const buffer = device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.STORAGE,
+ });
+
+ const module = device.createShaderModule({
+ code: `
+ @compute @workgroup_size(1) fn main() {
+ }
+ `,
+ });
+
+ const pipeline = await createComputePipeline(
+ device,
+ {
+ layout: 'auto',
+ compute: {
+ module,
+ entryPoint: 'main',
+ },
+ },
+ pipelineType
+ );
+
+ const encoder = device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.dispatchWorkgroups(counts[0], counts[1], counts[2]);
+ pass.end();
+
+ await t.expectValidationError(() => {
+ encoder.finish();
+ }, shouldError);
+
+ buffer.destroy();
+ }
+ );
+ });
+
+g.test('validate')
+ .desc(
+ `Test that ${limit} <= maxComputeWorkgroupSizeX x maxComputeWorkgroupSizeY x maxComputeWorkgroupSizeZ`
+ )
+ .fn(t => {
+ const { adapter, defaultLimit, adapterLimit } = t;
+ const defaultMaxComputeWorkgroupSizeProduct =
+ t.getDefaultLimit('maxComputeWorkgroupSizeX') *
+ t.getDefaultLimit('maxComputeWorkgroupSizeY') *
+ t.getDefaultLimit('maxComputeWorkgroupSizeZ');
+ const maxComputeWorkgroupSizeProduct =
+ adapter.limits.maxComputeWorkgroupSizeX *
+ adapter.limits.maxComputeWorkgroupSizeY *
+ adapter.limits.maxComputeWorkgroupSizeZ;
+ t.expect(defaultLimit <= defaultMaxComputeWorkgroupSizeProduct);
+ t.expect(adapterLimit <= maxComputeWorkgroupSizeProduct);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxDynamicStorageBuffersPerPipelineLayout.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxDynamicStorageBuffersPerPipelineLayout.spec.ts
new file mode 100644
index 0000000000..efd6c87196
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxDynamicStorageBuffersPerPipelineLayout.spec.ts
@@ -0,0 +1,39 @@
+import { range } from '../../../../../common/util/util.js';
+import { GPUConst } from '../../../../constants.js';
+
+import { kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
+
+const limit = 'maxDynamicStorageBuffersPerPipelineLayout';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createBindGroupLayout,at_over')
+ .desc(`Test using createBindGroupLayout at and over ${limit} limit`)
+ .params(
+ kMaximumLimitBaseParams.combine('visibility', [
+ GPUConst.ShaderStage.FRAGMENT,
+ GPUConst.ShaderStage.COMPUTE,
+ GPUConst.ShaderStage.COMPUTE | GPUConst.ShaderStage.FRAGMENT,
+ ])
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, visibility } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ shouldError ||= testValue > t.device.limits.maxStorageBuffersPerShaderStage;
+ await t.expectValidationError(() => {
+ device.createBindGroupLayout({
+ entries: range(testValue, i => ({
+ binding: i,
+ visibility,
+ buffer: {
+ type: 'storage',
+ hasDynamicOffset: true,
+ },
+ })),
+ });
+ }, shouldError);
+ }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxDynamicUniformBuffersPerPipelineLayout.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxDynamicUniformBuffersPerPipelineLayout.spec.ts
new file mode 100644
index 0000000000..0491d07191
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxDynamicUniformBuffersPerPipelineLayout.spec.ts
@@ -0,0 +1,42 @@
+import { range } from '../../../../../common/util/util.js';
+import { GPUConst } from '../../../../constants.js';
+
+import { kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
+
+const limit = 'maxDynamicUniformBuffersPerPipelineLayout';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createBindGroupLayout,at_over')
+ .desc(`Test using createBindGroupLayout at and over ${limit} limit`)
+ .params(
+ kMaximumLimitBaseParams.combine('visibility', [
+ GPUConst.ShaderStage.VERTEX,
+ GPUConst.ShaderStage.FRAGMENT,
+ GPUConst.ShaderStage.VERTEX | GPUConst.ShaderStage.FRAGMENT,
+ GPUConst.ShaderStage.COMPUTE,
+ GPUConst.ShaderStage.COMPUTE | GPUConst.ShaderStage.VERTEX,
+ GPUConst.ShaderStage.COMPUTE | GPUConst.ShaderStage.FRAGMENT,
+ GPUConst.ShaderStage.COMPUTE | GPUConst.ShaderStage.VERTEX | GPUConst.ShaderStage.FRAGMENT,
+ ])
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, visibility } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ shouldError ||= testValue > t.device.limits.maxUniformBuffersPerShaderStage;
+ await t.expectValidationError(() => {
+ device.createBindGroupLayout({
+ entries: range(testValue, i => ({
+ binding: i,
+ visibility,
+ buffer: {
+ hasDynamicOffset: true,
+ },
+ })),
+ });
+ }, shouldError);
+ }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxInterStageShaderComponents.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxInterStageShaderComponents.spec.ts
new file mode 100644
index 0000000000..eeb9eb0faf
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxInterStageShaderComponents.spec.ts
@@ -0,0 +1,151 @@
+import { range } from '../../../../../common/util/util.js';
+
+import { kMaximumLimitBaseParams, LimitsRequest, makeLimitTestGroup } from './limit_utils.js';
+
+function getTypeForNumComponents(numComponents: number) {
+ return numComponents > 1 ? `vec${numComponents}f` : 'f32';
+}
+
+function getPipelineDescriptor(
+ device: GPUDevice,
+ testValue: number,
+ pointList: boolean,
+ frontFacing: boolean,
+ sampleIndex: boolean,
+ sampleMaskIn: boolean,
+ sampleMaskOut: boolean
+): { pipelineDescriptor: GPURenderPipelineDescriptor; code: string } {
+ const maxVertexShaderOutputComponents = testValue - (pointList ? 1 : 0);
+ const maxFragmentShaderInputComponents =
+ testValue - (frontFacing ? 1 : 0) - (sampleIndex ? 1 : 0) - (sampleMaskIn ? 1 : 0);
+
+ const maxInterStageVariables = device.limits.maxInterStageShaderVariables;
+ const numComponents = Math.min(maxVertexShaderOutputComponents, maxFragmentShaderInputComponents);
+
+ const num4ComponentVaryings = Math.floor(numComponents / 4);
+ const lastVaryingNumComponents = numComponents % 4;
+
+ const varyings = `
+ ${range(num4ComponentVaryings, i => `@location(${i}) v4_${i}: vec4f,`).join('\n')}
+ ${
+ lastVaryingNumComponents > 0
+ ? `@location(${num4ComponentVaryings}) vx: ${getTypeForNumComponents(
+ lastVaryingNumComponents
+ )},`
+ : ``
+ }
+ `;
+
+ const code = `
+ // test value : ${testValue}
+ // maxInterStageShaderComponents : ${device.limits.maxInterStageShaderComponents}
+ // num components in vertex shader : ${numComponents}${pointList ? ' + point-list' : ''}
+ // num components in fragment shader : ${numComponents}${frontFacing ? ' + front-facing' : ''}${
+ sampleIndex ? ' + sample_index' : ''
+ }${sampleMaskIn ? ' + sample_mask' : ''}
+ // maxVertexShaderOutputComponents : ${maxVertexShaderOutputComponents}
+ // maxFragmentShaderInputComponents : ${maxFragmentShaderInputComponents}
+ // maxInterStageVariables: : ${maxInterStageVariables}
+ // num used inter stage variables : ${Math.ceil(numComponents / 4)}
+
+ struct VSOut {
+ @builtin(position) p: vec4f,
+ ${varyings}
+ }
+ struct FSIn {
+ ${frontFacing ? '@builtin(front_facing) frontFacing: bool,' : ''}
+ ${sampleIndex ? '@builtin(sample_index) sampleIndex: u32,' : ''}
+ ${sampleMaskIn ? '@builtin(sample_mask) sampleMask: u32,' : ''}
+ ${varyings}
+ }
+ struct FSOut {
+ @location(0) color: vec4f,
+ ${sampleMaskOut ? '@builtin(sample_mask) sampleMask: u32,' : ''}
+ }
+ @vertex fn vs() -> VSOut {
+ var o: VSOut;
+ o.p = vec4f(0);
+ return o;
+ }
+ @fragment fn fs(i: FSIn) -> FSOut {
+ var o: FSOut;
+ o.color = vec4f(0);
+ return o;
+ }
+ `;
+ const module = device.createShaderModule({ code });
+ const pipelineDescriptor: GPURenderPipelineDescriptor = {
+ layout: 'auto',
+ primitive: {
+ topology: pointList ? 'point-list' : 'triangle-list',
+ },
+ vertex: {
+ module,
+ entryPoint: 'vs',
+ },
+ fragment: {
+ module,
+ entryPoint: 'fs',
+ targets: [
+ {
+ format: 'rgba8unorm',
+ },
+ ],
+ },
+ };
+ return { pipelineDescriptor, code };
+}
+
+const limit = 'maxInterStageShaderComponents';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createRenderPipeline,at_over')
+ .desc(`Test using at and over ${limit} limit in createRenderPipeline(Async)`)
+ .params(
+ kMaximumLimitBaseParams
+ .combine('async', [false, true])
+ .combine('pointList', [false, true])
+ .combine('frontFacing', [false, true])
+ .combine('sampleIndex', [false, true])
+ .combine('sampleMaskIn', [false, true])
+ .combine('sampleMaskOut', [false, true])
+ )
+ .beforeAllSubcases(t => {
+ if (t.isCompatibility && (t.params.sampleMaskIn || t.params.sampleMaskOut)) {
+ t.skip('sample_mask not supported in compatibility mode');
+ }
+ })
+ .fn(async t => {
+ const {
+ limitTest,
+ testValueName,
+ async,
+ pointList,
+ frontFacing,
+ sampleIndex,
+ sampleMaskIn,
+ sampleMaskOut,
+ } = t.params;
+ // Request the largest value of maxInterStageShaderVariables to allow the test using as many
+ // inter-stage shader components as possible without being limited by
+ // maxInterStageShaderVariables.
+ const extraLimits: LimitsRequest = { maxInterStageShaderVariables: 'adapterLimit' };
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ const { pipelineDescriptor, code } = getPipelineDescriptor(
+ device,
+ testValue,
+ pointList,
+ frontFacing,
+ sampleIndex,
+ sampleMaskIn,
+ sampleMaskOut
+ );
+
+ await t.testCreateRenderPipeline(pipelineDescriptor, async, shouldError, code);
+ },
+ extraLimits
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxInterStageShaderVariables.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxInterStageShaderVariables.spec.ts
new file mode 100644
index 0000000000..e54b7f7df1
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxInterStageShaderVariables.spec.ts
@@ -0,0 +1,44 @@
+import { kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
+
+function getPipelineDescriptor(device: GPUDevice, testValue: number): GPURenderPipelineDescriptor {
+ const code = `
+ struct VSOut {
+ @builtin(position) p: vec4f,
+ @location(${testValue}) v: f32,
+ }
+ @vertex fn vs() -> VSOut {
+ var o: VSOut;
+ o.p = vec4f(0);
+ o.v = 1.0;
+ return o;
+ }
+ `;
+ const module = device.createShaderModule({ code });
+ return {
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vs',
+ },
+ };
+}
+
+const limit = 'maxInterStageShaderVariables';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createRenderPipeline,at_over')
+ .desc(`Test using at and over ${limit} limit in createRenderPipeline(Async)`)
+ .params(kMaximumLimitBaseParams.combine('async', [false, true]))
+ .fn(async t => {
+ const { limitTest, testValueName, async } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ const lastIndex = testValue - 1;
+ const pipelineDescriptor = getPipelineDescriptor(device, lastIndex);
+
+ await t.testCreateRenderPipeline(pipelineDescriptor, async, shouldError);
+ }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxSampledTexturesPerShaderStage.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxSampledTexturesPerShaderStage.spec.ts
new file mode 100644
index 0000000000..cd90d9d907
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxSampledTexturesPerShaderStage.spec.ts
@@ -0,0 +1,144 @@
+import {
+ range,
+ reorder,
+ kReorderOrderKeys,
+ ReorderOrder,
+} from '../../../../../common/util/util.js';
+import { kShaderStageCombinationsWithStage } from '../../../../capability_info.js';
+
+import {
+ kMaximumLimitBaseParams,
+ makeLimitTestGroup,
+ kBindGroupTests,
+ kBindingCombinations,
+ getPipelineTypeForBindingCombination,
+ getPerStageWGSLForBindingCombination,
+} from './limit_utils.js';
+
+const limit = 'maxSampledTexturesPerShaderStage';
+export const { g, description } = makeLimitTestGroup(limit);
+
+function createBindGroupLayout(
+ device: GPUDevice,
+ visibility: number,
+ order: ReorderOrder,
+ numBindings: number
+) {
+ return device.createBindGroupLayout({
+ entries: reorder(
+ order,
+ range(numBindings, i => ({
+ binding: i,
+ visibility,
+ texture: {},
+ }))
+ ),
+ });
+}
+
+g.test('createBindGroupLayout,at_over')
+ .desc(
+ `
+ Test using at and over ${limit} limit in createBindGroupLayout
+
+ Note: We also test order to make sure the implementation isn't just looking
+ at just the last entry.
+ `
+ )
+ .params(
+ kMaximumLimitBaseParams
+ .combine('visibility', kShaderStageCombinationsWithStage)
+ .combine('order', kReorderOrderKeys)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, visibility, order } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ await t.expectValidationError(
+ () => createBindGroupLayout(device, visibility, order, testValue),
+ shouldError
+ );
+ }
+ );
+ });
+
+g.test('createPipelineLayout,at_over')
+ .desc(
+ `
+ Test using at and over ${limit} limit in createPipelineLayout
+
+ Note: We also test order to make sure the implementation isn't just looking
+ at just the last entry.
+ `
+ )
+ .params(
+ kMaximumLimitBaseParams
+ .combine('visibility', kShaderStageCombinationsWithStage)
+ .combine('order', kReorderOrderKeys)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, visibility, order } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ const kNumGroups = 3;
+ const bindGroupLayouts = range(kNumGroups, i => {
+ const minInGroup = Math.floor(testValue / kNumGroups);
+ const numInGroup = i ? minInGroup : testValue - minInGroup * (kNumGroups - 1);
+ return createBindGroupLayout(device, visibility, order, numInGroup);
+ });
+ await t.expectValidationError(
+ () => device.createPipelineLayout({ bindGroupLayouts }),
+ shouldError
+ );
+ }
+ );
+ });
+
+g.test('createPipeline,at_over')
+ .desc(
+ `
+ Test using createRenderPipeline(Async) and createComputePipeline(Async) at and over ${limit} limit
+
+ Note: We also test order to make sure the implementation isn't just looking
+ at just the last entry.
+ `
+ )
+ .params(
+ kMaximumLimitBaseParams
+ .combine('async', [false, true] as const)
+ .combine('bindingCombination', kBindingCombinations)
+ .combine('order', kReorderOrderKeys)
+ .combine('bindGroupTest', kBindGroupTests)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, async, bindingCombination, order, bindGroupTest } = t.params;
+ const pipelineType = getPipelineTypeForBindingCombination(bindingCombination);
+
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, actualLimit, shouldError }) => {
+ const code = getPerStageWGSLForBindingCombination(
+ bindingCombination,
+ order,
+ bindGroupTest,
+ (i, j) => `var u${j}_${i}: texture_2d<f32>`,
+ (i, j) => `_ = textureLoad(u${j}_${i}, vec2u(0), 0);`,
+ testValue
+ );
+ const module = device.createShaderModule({ code });
+
+ await t.testCreatePipeline(
+ pipelineType,
+ async,
+ module,
+ shouldError,
+ `actualLimit: ${actualLimit}, testValue: ${testValue}\n:${code}`
+ );
+ }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxSamplersPerShaderStage.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxSamplersPerShaderStage.spec.ts
new file mode 100644
index 0000000000..3103d423c9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxSamplersPerShaderStage.spec.ts
@@ -0,0 +1,145 @@
+import {
+ range,
+ reorder,
+ kReorderOrderKeys,
+ ReorderOrder,
+} from '../../../../../common/util/util.js';
+import { kShaderStageCombinationsWithStage } from '../../../../capability_info.js';
+
+import {
+ kMaximumLimitBaseParams,
+ makeLimitTestGroup,
+ kBindGroupTests,
+ kBindingCombinations,
+ getPipelineTypeForBindingCombination,
+ getPerStageWGSLForBindingCombination,
+} from './limit_utils.js';
+
+const limit = 'maxSamplersPerShaderStage';
+export const { g, description } = makeLimitTestGroup(limit);
+
+function createBindGroupLayout(
+ device: GPUDevice,
+ visibility: number,
+ order: ReorderOrder,
+ numBindings: number
+) {
+ return device.createBindGroupLayout({
+ entries: reorder(
+ order,
+ range(numBindings, i => ({
+ binding: i,
+ visibility,
+ sampler: {},
+ }))
+ ),
+ });
+}
+
+g.test('createBindGroupLayout,at_over')
+ .desc(
+ `
+ Test using at and over ${limit} limit in createBindGroupLayout
+
+ Note: We also test order to make sure the implementation isn't just looking
+ at just the last entry.
+ `
+ )
+ .params(
+ kMaximumLimitBaseParams
+ .combine('visibility', kShaderStageCombinationsWithStage)
+ .combine('order', kReorderOrderKeys)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, visibility, order } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ await t.expectValidationError(
+ () => createBindGroupLayout(device, visibility, order, testValue),
+ shouldError
+ );
+ }
+ );
+ });
+
+g.test('createPipelineLayout,at_over')
+ .desc(
+ `
+ Test using at and over ${limit} limit in createPipelineLayout
+
+ Note: We also test order to make sure the implementation isn't just looking
+ at just the last entry.
+ `
+ )
+ .params(
+ kMaximumLimitBaseParams
+ .combine('visibility', kShaderStageCombinationsWithStage)
+ .combine('order', kReorderOrderKeys)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, visibility, order } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ const kNumGroups = 3;
+ const bindGroupLayouts = range(kNumGroups, i => {
+ const minInGroup = Math.floor(testValue / kNumGroups);
+ const numInGroup = i ? minInGroup : testValue - minInGroup * (kNumGroups - 1);
+ return createBindGroupLayout(device, visibility, order, numInGroup);
+ });
+ await t.expectValidationError(
+ () => device.createPipelineLayout({ bindGroupLayouts }),
+ shouldError
+ );
+ }
+ );
+ });
+
+g.test('createPipeline,at_over')
+ .desc(
+ `
+ Test using createRenderPipeline(Async) and createComputePipeline(Async) at and over ${limit} limit
+
+ Note: We also test order to make sure the implementation isn't just looking
+ at just the last entry.
+ `
+ )
+ .params(
+ kMaximumLimitBaseParams
+ .combine('async', [false, true] as const)
+ .combine('bindingCombination', kBindingCombinations)
+ .combine('order', kReorderOrderKeys)
+ .combine('bindGroupTest', kBindGroupTests)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, async, bindingCombination, order, bindGroupTest } = t.params;
+ const pipelineType = getPipelineTypeForBindingCombination(bindingCombination);
+
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, actualLimit, shouldError }) => {
+ const code = getPerStageWGSLForBindingCombination(
+ bindingCombination,
+ order,
+ bindGroupTest,
+ (i, j) => `var u${j}_${i}: sampler`,
+ (i, j) => `_ = textureGather(0, tex, u${j}_${i}, vec2f(0));`,
+ testValue,
+ '@group(3) @binding(1) var tex: texture_2d<f32>;'
+ );
+ const module = device.createShaderModule({ code });
+
+ await t.testCreatePipeline(
+ pipelineType,
+ async,
+ module,
+ shouldError,
+ `actualLimit: ${actualLimit}, testValue: ${testValue}\n:${code}`
+ );
+ }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxStorageBufferBindingSize.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxStorageBufferBindingSize.spec.ts
new file mode 100644
index 0000000000..9c39f3df61
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxStorageBufferBindingSize.spec.ts
@@ -0,0 +1,161 @@
+import { align, roundDown } from '../../../../util/math.js';
+
+import {
+ kMaximumLimitBaseParams,
+ makeLimitTestGroup,
+ LimitMode,
+ MaximumLimitValueTest,
+ MaximumTestValue,
+} from './limit_utils.js';
+
+const kBufferParts = ['wholeBuffer', 'biggerBufferWithOffset'] as const;
+type BufferPart = (typeof kBufferParts)[number];
+
+function getSizeAndOffsetForBufferPart(device: GPUDevice, bufferPart: BufferPart, size: number) {
+ const align = device.limits.minUniformBufferOffsetAlignment;
+ switch (bufferPart) {
+ case 'wholeBuffer':
+ return { size, offset: 0 };
+ case 'biggerBufferWithOffset':
+ return { size: size + align, offset: align };
+ }
+}
+
+const kStorageBufferRequiredSizeAlignment = 4;
+
+// We also need to update the maxBufferSize limit when testing.
+const kExtraLimits = { maxBufferSize: 'maxLimit' as LimitMode };
+
+function getDeviceLimitToRequest(
+ limitValueTest: MaximumLimitValueTest,
+ defaultLimit: number,
+ maximumLimit: number
+) {
+ switch (limitValueTest) {
+ case 'atDefault':
+ return defaultLimit;
+ case 'underDefault':
+ return defaultLimit - kStorageBufferRequiredSizeAlignment;
+ case 'betweenDefaultAndMaximum':
+ return Math.floor((defaultLimit + maximumLimit) / 2);
+ case 'atMaximum':
+ return maximumLimit;
+ case 'overMaximum':
+ return maximumLimit + kStorageBufferRequiredSizeAlignment;
+ }
+}
+
+function getTestValue(testValueName: MaximumTestValue, requestedLimit: number) {
+ switch (testValueName) {
+ case 'atLimit':
+ return roundDown(requestedLimit, kStorageBufferRequiredSizeAlignment);
+ case 'overLimit':
+ // Note: the requestedLimit might not meet alignment requirements.
+ return align(
+ requestedLimit + kStorageBufferRequiredSizeAlignment,
+ kStorageBufferRequiredSizeAlignment
+ );
+ }
+}
+
+function getDeviceLimitToRequestAndValueToTest(
+ limitValueTest: MaximumLimitValueTest,
+ testValueName: MaximumTestValue,
+ defaultLimit: number,
+ maximumLimit: number
+) {
+ const requestedLimit = getDeviceLimitToRequest(limitValueTest, defaultLimit, maximumLimit);
+ return {
+ requestedLimit,
+ testValue: getTestValue(testValueName, requestedLimit),
+ };
+}
+
+const limit = 'maxStorageBufferBindingSize';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createBindGroup,at_over')
+ .desc(`Test using createBindGroup at and over ${limit} limit`)
+ .params(kMaximumLimitBaseParams.combine('bufferPart', kBufferParts))
+ .fn(async t => {
+ const { limitTest, testValueName, bufferPart } = t.params;
+ const { defaultLimit, adapterLimit: maximumLimit } = t;
+ const { requestedLimit, testValue } = getDeviceLimitToRequestAndValueToTest(
+ limitTest,
+ testValueName,
+ defaultLimit,
+ maximumLimit
+ );
+
+ await t.testDeviceWithSpecificLimits(
+ requestedLimit,
+ testValue,
+ async ({ device, testValue, shouldError }) => {
+ const bindGroupLayout = device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: { type: 'storage' },
+ },
+ ],
+ });
+
+ const { size, offset } = getSizeAndOffsetForBufferPart(device, bufferPart, testValue);
+
+ // If the size of the buffer exceeds the related but separate maxBufferSize limit, we can
+ // skip the validation since the allocation will fail with a validation error.
+ if (size > device.limits.maxBufferSize) {
+ return;
+ }
+
+ device.pushErrorScope('out-of-memory');
+ const storageBuffer = t.trackForCleanup(
+ device.createBuffer({
+ usage: GPUBufferUsage.STORAGE,
+ size,
+ })
+ );
+ const outOfMemoryError = await device.popErrorScope();
+
+ if (!outOfMemoryError) {
+ await t.expectValidationError(
+ () => {
+ device.createBindGroup({
+ layout: bindGroupLayout,
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer: storageBuffer,
+ offset,
+ size: testValue,
+ },
+ },
+ ],
+ });
+ },
+ shouldError,
+ `size: ${size}, offset: ${offset}, testValue: ${testValue}`
+ );
+ }
+ },
+ kExtraLimits
+ );
+ });
+
+g.test('validate')
+ .desc(`Test that ${limit} is a multiple of 4 bytes`)
+ .fn(t => {
+ const { defaultLimit, adapterLimit } = t;
+ t.expect(defaultLimit % 4 === 0);
+ t.expect(adapterLimit % 4 === 0);
+ });
+
+g.test('validate,maxBufferSize')
+ .desc(`Test that ${limit} <= maxBufferSize`)
+ .fn(t => {
+ const { adapter, defaultLimit, adapterLimit } = t;
+ t.expect(defaultLimit <= t.getDefaultLimit('maxBufferSize'));
+ t.expect(adapterLimit <= adapter.limits.maxBufferSize);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxStorageBuffersPerShaderStage.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxStorageBuffersPerShaderStage.spec.ts
new file mode 100644
index 0000000000..5dfff78907
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxStorageBuffersPerShaderStage.spec.ts
@@ -0,0 +1,174 @@
+import {
+ range,
+ reorder,
+ kReorderOrderKeys,
+ ReorderOrder,
+} from '../../../../../common/util/util.js';
+import { GPUConst } from '../../../../constants.js';
+
+import {
+ kMaximumLimitBaseParams,
+ makeLimitTestGroup,
+ kBindGroupTests,
+ kBindingCombinations,
+ getPipelineTypeForBindingCombination,
+ getPerStageWGSLForBindingCombination,
+} from './limit_utils.js';
+
+const limit = 'maxStorageBuffersPerShaderStage';
+export const { g, description } = makeLimitTestGroup(limit);
+
+function createBindGroupLayout(
+ device: GPUDevice,
+ visibility: number,
+ type: GPUBufferBindingType,
+ order: ReorderOrder,
+ numBindings: number
+) {
+ return device.createBindGroupLayout({
+ entries: reorder(
+ order,
+ range(numBindings, i => ({
+ binding: i,
+ visibility,
+ buffer: { type },
+ }))
+ ),
+ });
+}
+
+g.test('createBindGroupLayout,at_over')
+ .desc(
+ `
+ Test using at and over ${limit} limit in createBindGroupLayout
+
+ Note: We also test order to make sure the implementation isn't just looking
+ at just the last entry.
+ `
+ )
+ .params(
+ kMaximumLimitBaseParams
+ .combine('visibility', [
+ GPUConst.ShaderStage.VERTEX,
+ GPUConst.ShaderStage.FRAGMENT,
+ GPUConst.ShaderStage.VERTEX | GPUConst.ShaderStage.FRAGMENT,
+ GPUConst.ShaderStage.COMPUTE,
+ GPUConst.ShaderStage.VERTEX | GPUConst.ShaderStage.COMPUTE,
+ GPUConst.ShaderStage.FRAGMENT | GPUConst.ShaderStage.COMPUTE,
+ GPUConst.ShaderStage.VERTEX | GPUConst.ShaderStage.FRAGMENT | GPUConst.ShaderStage.COMPUTE,
+ ])
+ .combine('type', ['storage', 'read-only-storage'] as GPUBufferBindingType[])
+ .combine('order', kReorderOrderKeys)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, visibility, order, type } = t.params;
+
+ if (visibility & GPUConst.ShaderStage.VERTEX && type === 'storage') {
+ // vertex stage does not support storage buffers
+ return;
+ }
+
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ await t.expectValidationError(() => {
+ createBindGroupLayout(device, visibility, type, order, testValue);
+ }, shouldError);
+ }
+ );
+ });
+
+g.test('createPipelineLayout,at_over')
+ .desc(
+ `
+ Test using at and over ${limit} limit in createPipelineLayout
+
+ Note: We also test order to make sure the implementation isn't just looking
+ at just the last entry.
+ `
+ )
+ .params(
+ kMaximumLimitBaseParams
+ .combine('visibility', [
+ GPUConst.ShaderStage.VERTEX,
+ GPUConst.ShaderStage.FRAGMENT,
+ GPUConst.ShaderStage.VERTEX | GPUConst.ShaderStage.FRAGMENT,
+ GPUConst.ShaderStage.COMPUTE,
+ GPUConst.ShaderStage.VERTEX | GPUConst.ShaderStage.COMPUTE,
+ GPUConst.ShaderStage.FRAGMENT | GPUConst.ShaderStage.COMPUTE,
+ GPUConst.ShaderStage.VERTEX | GPUConst.ShaderStage.FRAGMENT | GPUConst.ShaderStage.COMPUTE,
+ ])
+ .combine('type', ['storage', 'read-only-storage'] as GPUBufferBindingType[])
+ .combine('order', kReorderOrderKeys)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, visibility, order, type } = t.params;
+
+ if (visibility & GPUConst.ShaderStage.VERTEX && type === 'storage') {
+ // vertex stage does not support storage buffers
+ return;
+ }
+
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ const kNumGroups = 3;
+ const bindGroupLayouts = range(kNumGroups, i => {
+ const minInGroup = Math.floor(testValue / kNumGroups);
+ const numInGroup = i ? minInGroup : testValue - minInGroup * (kNumGroups - 1);
+ return createBindGroupLayout(device, visibility, type, order, numInGroup);
+ });
+ await t.expectValidationError(
+ () => device.createPipelineLayout({ bindGroupLayouts }),
+ shouldError
+ );
+ }
+ );
+ });
+
+g.test('createPipeline,at_over')
+ .desc(
+ `
+ Test using createRenderPipeline(Async) and createComputePipeline(Async) at and over ${limit} limit
+
+ Note: We also test order to make sure the implementation isn't just looking
+ at just the last entry.
+ `
+ )
+ .params(
+ kMaximumLimitBaseParams
+ .combine('async', [false, true] as const)
+ .combine('bindingCombination', kBindingCombinations)
+ .combine('order', kReorderOrderKeys)
+ .combine('bindGroupTest', kBindGroupTests)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, async, bindingCombination, order, bindGroupTest } = t.params;
+ const pipelineType = getPipelineTypeForBindingCombination(bindingCombination);
+
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, actualLimit, shouldError }) => {
+ const code = getPerStageWGSLForBindingCombination(
+ bindingCombination,
+ order,
+ bindGroupTest,
+ (i, j) => `var<storage> u${j}_${i}: f32`,
+ (i, j) => `_ = u${j}_${i};`,
+ testValue
+ );
+ const module = device.createShaderModule({ code });
+
+ await t.testCreatePipeline(
+ pipelineType,
+ async,
+ module,
+ shouldError,
+ `actualLimit: ${actualLimit}, testValue: ${testValue}\n:${code}`
+ );
+ }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxStorageTexturesPerShaderStage.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxStorageTexturesPerShaderStage.spec.ts
new file mode 100644
index 0000000000..dee6069b44
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxStorageTexturesPerShaderStage.spec.ts
@@ -0,0 +1,156 @@
+import {
+ range,
+ reorder,
+ ReorderOrder,
+ kReorderOrderKeys,
+} from '../../../../../common/util/util.js';
+import { GPUConst } from '../../../../constants.js';
+
+import {
+ kMaximumLimitBaseParams,
+ makeLimitTestGroup,
+ kBindGroupTests,
+ getPerStageWGSLForBindingCombinationStorageTextures,
+ getPipelineTypeForBindingCombination,
+ BindingCombination,
+} from './limit_utils.js';
+
+const limit = 'maxStorageTexturesPerShaderStage';
+export const { g, description } = makeLimitTestGroup(limit);
+
+function createBindGroupLayout(
+ device: GPUDevice,
+ visibility: number,
+ order: ReorderOrder,
+ numBindings: number
+) {
+ return device.createBindGroupLayout({
+ entries: reorder(
+ order,
+ range(numBindings, i => ({
+ binding: i,
+ visibility,
+ storageTexture: { format: 'rgba8unorm' },
+ }))
+ ),
+ });
+}
+
+g.test('createBindGroupLayout,at_over')
+ .desc(
+ `
+ Test using at and over ${limit} limit in createBindGroupLayout
+
+ Note: We also test order to make sure the implementation isn't just looking
+ at just the last entry.
+ `
+ )
+ .params(
+ kMaximumLimitBaseParams
+ .combine('visibility', [
+ GPUConst.ShaderStage.FRAGMENT,
+ GPUConst.ShaderStage.COMPUTE,
+ GPUConst.ShaderStage.FRAGMENT | GPUConst.ShaderStage.COMPUTE,
+ ])
+ .combine('order', kReorderOrderKeys)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, visibility, order } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ await t.expectValidationError(
+ () => createBindGroupLayout(device, visibility, order, testValue),
+ shouldError
+ );
+ }
+ );
+ });
+
+g.test('createPipelineLayout,at_over')
+ .desc(
+ `
+ Test using at and over ${limit} limit in createPipelineLayout
+
+ Note: We also test order to make sure the implementation isn't just looking
+ at just the last entry.
+ `
+ )
+ .params(
+ kMaximumLimitBaseParams
+ .combine('visibility', [
+ GPUConst.ShaderStage.FRAGMENT,
+ GPUConst.ShaderStage.COMPUTE,
+ GPUConst.ShaderStage.FRAGMENT | GPUConst.ShaderStage.COMPUTE,
+ ])
+ .combine('order', kReorderOrderKeys)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, visibility, order } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ const kNumGroups = 3;
+ const bindGroupLayouts = range(kNumGroups, i => {
+ const minInGroup = Math.floor(testValue / kNumGroups);
+ const numInGroup = i ? minInGroup : testValue - minInGroup * (kNumGroups - 1);
+ return createBindGroupLayout(device, visibility, order, numInGroup);
+ });
+ await t.expectValidationError(
+ () => device.createPipelineLayout({ bindGroupLayouts }),
+ shouldError
+ );
+ }
+ );
+ });
+
+g.test('createPipeline,at_over')
+ .desc(
+ `
+ Test using createRenderPipeline(Async) and createComputePipeline(Async) at and over ${limit} limit
+
+ Note: We also test order to make sure the implementation isn't just looking
+ at just the last entry.
+ `
+ )
+ .params(
+ kMaximumLimitBaseParams
+ .combine('async', [false, true] as const)
+ .combine('bindingCombination', ['fragment', 'compute'] as BindingCombination[])
+ .combine('order', kReorderOrderKeys)
+ .combine('bindGroupTest', kBindGroupTests)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, async, bindingCombination, order, bindGroupTest } = t.params;
+ const pipelineType = getPipelineTypeForBindingCombination(bindingCombination);
+
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, actualLimit, shouldError }) => {
+ if (bindingCombination === 'fragment') {
+ return;
+ }
+
+ const code = getPerStageWGSLForBindingCombinationStorageTextures(
+ bindingCombination,
+ order,
+ bindGroupTest,
+ (i, j) => `var u${j}_${i}: texture_storage_2d<rgba8unorm, write>`,
+ (i, j) => `textureStore(u${j}_${i}, vec2u(0), vec4f(1));`,
+ testValue
+ );
+ const module = device.createShaderModule({ code });
+
+ await t.testCreatePipeline(
+ pipelineType,
+ async,
+ module,
+ shouldError,
+ `actualLimit: ${actualLimit}, testValue: ${testValue}\n:${code}`
+ );
+ }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxTextureArrayLayers.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxTextureArrayLayers.spec.ts
new file mode 100644
index 0000000000..7dc5fccc97
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxTextureArrayLayers.spec.ts
@@ -0,0 +1,27 @@
+import { kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
+
+const limit = 'maxTextureArrayLayers';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createTexture,at_over')
+ .desc(`Test using at and over ${limit} limit`)
+ .params(kMaximumLimitBaseParams)
+ .fn(async t => {
+ const { limitTest, testValueName } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ await t.testForValidationErrorWithPossibleOutOfMemoryError(() => {
+ const texture = device.createTexture({
+ size: [1, 1, testValue],
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ });
+ if (!shouldError) {
+ texture.destroy();
+ }
+ }, shouldError);
+ }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxTextureDimension1D.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxTextureDimension1D.spec.ts
new file mode 100644
index 0000000000..5a1f56ce02
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxTextureDimension1D.spec.ts
@@ -0,0 +1,34 @@
+import { kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
+
+const limit = 'maxTextureDimension1D';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createTexture,at_over')
+ .desc(`Test using at and over ${limit} limit`)
+ .params(kMaximumLimitBaseParams)
+ .fn(async t => {
+ const { limitTest, testValueName } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ await t.testForValidationErrorWithPossibleOutOfMemoryError(() => {
+ const texture = device.createTexture({
+ size: [testValue, 1, 1],
+ format: 'rgba8unorm',
+ dimension: '1d',
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ });
+
+ // MAINTENANCE_TODO: Remove this 'if' once the bug in chrome is fixed
+ // This 'if' is only here because of a bug in Chrome
+ // that generates an error calling destroy on an invalid texture.
+ // This doesn't affect the test but the 'if' should be removed
+ // once the Chrome bug is fixed.
+ if (!shouldError) {
+ texture.destroy();
+ }
+ }, shouldError);
+ }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxTextureDimension2D.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxTextureDimension2D.spec.ts
new file mode 100644
index 0000000000..150e8c6097
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxTextureDimension2D.spec.ts
@@ -0,0 +1,133 @@
+import { getGPU } from '../../../../../common/util/navigator_gpu.js';
+import { kAllCanvasTypes, createCanvas } from '../../../../util/create_elements.js';
+
+import { kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
+
+const limit = 'maxTextureDimension2D';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createTexture,at_over')
+ .desc(`Test using at and over ${limit} limit`)
+ .params(kMaximumLimitBaseParams)
+ .fn(async t => {
+ const { limitTest, testValueName } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, shouldError, testValue, actualLimit }) => {
+ for (let dimensionIndex = 0; dimensionIndex < 2; ++dimensionIndex) {
+ const size = [1, 1, 1];
+ size[dimensionIndex] = testValue;
+
+ await t.testForValidationErrorWithPossibleOutOfMemoryError(
+ () => {
+ const texture = device.createTexture({
+ size,
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ });
+
+ // MAINTENANCE_TODO: Remove this 'if' once the bug in chrome is fixed
+ // This 'if' is only here because of a bug in Chrome
+ // that generates an error calling destroy on an invalid texture.
+ // This doesn't affect the test but the 'if' should be removed
+ // once the Chrome bug is fixed.
+ if (!shouldError) {
+ texture.destroy();
+ }
+ },
+ shouldError,
+ `size: ${size}, actualLimit: ${actualLimit}`
+ );
+ }
+ }
+ );
+ });
+
+g.test('configure,at_over')
+ .desc(`Test using at and over ${limit} limit`)
+ .params(kMaximumLimitBaseParams.combine('canvasType', kAllCanvasTypes))
+ .fn(async t => {
+ const { limitTest, testValueName, canvasType } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, shouldError, testValue, actualLimit }) => {
+ for (let dimensionIndex = 0; dimensionIndex < 2; ++dimensionIndex) {
+ const size = [1, 1];
+ size[dimensionIndex] = testValue;
+
+ // This should not fail, even if the size is too large but it might fail
+ // if we're in a worker and HTMLCanvasElement does not exist.
+ const canvas = createCanvas(t, canvasType, size[0], size[1])!;
+ if (canvas) {
+ const context = canvas.getContext('webgpu') as GPUCanvasContext;
+ t.expect(!!context, 'should not fail to create context even if size is too large');
+
+ await t.testForValidationErrorWithPossibleOutOfMemoryError(
+ () => {
+ context.configure({
+ device,
+ format: getGPU(t.rec).getPreferredCanvasFormat(),
+ });
+ },
+ shouldError,
+ `size: ${size}, actualLimit: ${actualLimit}`
+ );
+ }
+ }
+ }
+ );
+ });
+
+g.test('getCurrentTexture,at_over')
+ .desc(`Test using at and over ${limit} limit`)
+ .params(kMaximumLimitBaseParams.combine('canvasType', kAllCanvasTypes))
+ .fn(async t => {
+ const { limitTest, testValueName, canvasType } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, shouldError, testValue, actualLimit }) => {
+ for (let dimensionIndex = 0; dimensionIndex < 2; ++dimensionIndex) {
+ const size = [1, 1];
+ size[dimensionIndex] = testValue;
+
+ // Start with a small size so configure will succeed.
+ // This should not fail, even if the size is too large but it might fail
+ // if we're in a worker and HTMLCanvasElement does not exist.
+ const canvas = createCanvas(t, canvasType, 1, 1)!;
+ if (canvas) {
+ const context = canvas.getContext('webgpu') as GPUCanvasContext;
+ t.expect(!!context, 'should not fail to create context even if size is too large');
+
+ context.configure({
+ device,
+ format: getGPU(t.rec).getPreferredCanvasFormat(),
+ });
+
+ if (canvas) {
+ await t.testForValidationErrorWithPossibleOutOfMemoryError(
+ () => {
+ canvas.width = size[0];
+ canvas.height = size[1];
+ const texture = context.getCurrentTexture();
+
+ // MAINTENANCE_TODO: Remove this 'if' once the bug in chrome is fixed
+ // This 'if' is only here because of a bug in Chrome
+ // that generates an error calling destroy on an invalid texture.
+ // This doesn't affect the test but the 'if' should be removed
+ // once the Chrome bug is fixed.
+ if (!shouldError) {
+ texture.destroy();
+ }
+ },
+ shouldError,
+ `size: ${size}, actualLimit: ${actualLimit}`
+ );
+ }
+ }
+ }
+ }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxTextureDimension3D.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxTextureDimension3D.spec.ts
new file mode 100644
index 0000000000..4faacd8fd5
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxTextureDimension3D.spec.ts
@@ -0,0 +1,39 @@
+import { kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
+
+const limit = 'maxTextureDimension3D';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createTexture,at_over')
+ .desc(`Test using at and over ${limit} limit`)
+ .params(kMaximumLimitBaseParams)
+ .fn(async t => {
+ const { limitTest, testValueName } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, shouldError, testValue }) => {
+ for (let dimensionIndex = 0; dimensionIndex < 3; ++dimensionIndex) {
+ const size = [2, 2, 2];
+ size[dimensionIndex] = testValue;
+
+ await t.testForValidationErrorWithPossibleOutOfMemoryError(() => {
+ const texture = device.createTexture({
+ size,
+ format: 'rgba8unorm',
+ dimension: '3d',
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ });
+
+ // MAINTENANCE_TODO: Remove this 'if' once the bug in chrome is fixed
+ // This 'if' is only here because of a bug in Chrome
+ // that generates an error calling destroy on an invalid texture.
+ // This doesn't affect the test but the 'if' should be removed
+ // once the Chrome bug is fixed.
+ if (!shouldError) {
+ texture.destroy();
+ }
+ }, shouldError);
+ }
+ }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxUniformBufferBindingSize.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxUniformBufferBindingSize.spec.ts
new file mode 100644
index 0000000000..24f5997534
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxUniformBufferBindingSize.spec.ts
@@ -0,0 +1,90 @@
+import { LimitMode, kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
+
+const kBufferParts = ['wholeBuffer', 'biggerBufferWithOffset'] as const;
+type BufferPart = (typeof kBufferParts)[number];
+
+function getSizeAndOffsetForBufferPart(device: GPUDevice, bufferPart: BufferPart, size: number) {
+ const align = device.limits.minUniformBufferOffsetAlignment;
+ switch (bufferPart) {
+ case 'wholeBuffer':
+ return { offset: 0, size };
+ case 'biggerBufferWithOffset':
+ return { size: size + align, offset: align };
+ }
+}
+
+const limit = 'maxUniformBufferBindingSize';
+export const { g, description } = makeLimitTestGroup(limit);
+
+// We also need to update the maxBufferSize limit when testing.
+const kExtraLimits = { maxBufferSize: 'maxLimit' as LimitMode };
+
+g.test('createBindGroup,at_over')
+ .desc(`Test using at and over ${limit} limit`)
+ .params(kMaximumLimitBaseParams.combine('bufferPart', kBufferParts))
+ .fn(async t => {
+ const { limitTest, testValueName, bufferPart } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ const bindGroupLayout = device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.VERTEX,
+ buffer: {},
+ },
+ ],
+ });
+
+ const { size, offset } = getSizeAndOffsetForBufferPart(device, bufferPart, testValue);
+
+ // If the size of the buffer exceeds the related but separate maxBufferSize limit, we can
+ // skip the validation since the allocation will fail with a validation error.
+ if (size > device.limits.maxBufferSize) {
+ return;
+ }
+
+ device.pushErrorScope('out-of-memory');
+ const uniformBuffer = t.trackForCleanup(
+ device.createBuffer({
+ usage: GPUBufferUsage.UNIFORM,
+ size,
+ })
+ );
+ const outOfMemoryError = await device.popErrorScope();
+
+ if (!outOfMemoryError) {
+ await t.expectValidationError(
+ () => {
+ device.createBindGroup({
+ layout: bindGroupLayout,
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer: uniformBuffer,
+ offset,
+ size: testValue,
+ },
+ },
+ ],
+ });
+ },
+ shouldError,
+ `size: ${size}, offset: ${offset}, testValue: ${testValue}`
+ );
+ }
+ },
+ kExtraLimits
+ );
+ });
+
+g.test('validate,maxBufferSize')
+ .desc(`Test that ${limit} <= maxBufferSize`)
+ .fn(t => {
+ const { adapter, defaultLimit, adapterLimit } = t;
+ t.expect(defaultLimit <= t.getDefaultLimit('maxBufferSize'));
+ t.expect(adapterLimit <= adapter.limits.maxBufferSize);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxUniformBuffersPerShaderStage.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxUniformBuffersPerShaderStage.spec.ts
new file mode 100644
index 0000000000..7e55078f16
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxUniformBuffersPerShaderStage.spec.ts
@@ -0,0 +1,144 @@
+import {
+ range,
+ reorder,
+ kReorderOrderKeys,
+ ReorderOrder,
+} from '../../../../../common/util/util.js';
+import { kShaderStageCombinationsWithStage } from '../../../../capability_info.js';
+
+import {
+ kMaximumLimitBaseParams,
+ makeLimitTestGroup,
+ kBindGroupTests,
+ kBindingCombinations,
+ getPipelineTypeForBindingCombination,
+ getPerStageWGSLForBindingCombination,
+} from './limit_utils.js';
+
+const limit = 'maxUniformBuffersPerShaderStage';
+export const { g, description } = makeLimitTestGroup(limit);
+
+function createBindGroupLayout(
+ device: GPUDevice,
+ visibility: number,
+ order: ReorderOrder,
+ numBindings: number
+) {
+ return device.createBindGroupLayout({
+ entries: reorder(
+ order,
+ range(numBindings, i => ({
+ binding: i,
+ visibility,
+ buffer: {},
+ }))
+ ),
+ });
+}
+
+g.test('createBindGroupLayout,at_over')
+ .desc(
+ `
+ Test using at and over ${limit} limit in createBindGroupLayout
+
+ Note: We also test order to make sure the implementation isn't just looking
+ at just the last entry.
+ `
+ )
+ .params(
+ kMaximumLimitBaseParams
+ .combine('visibility', kShaderStageCombinationsWithStage)
+ .combine('order', kReorderOrderKeys)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, visibility, order } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ await t.expectValidationError(
+ () => createBindGroupLayout(device, visibility, order, testValue),
+ shouldError
+ );
+ }
+ );
+ });
+
+g.test('createPipelineLayout,at_over')
+ .desc(
+ `
+ Test using at and over ${limit} limit in createPipelineLayout
+
+ Note: We also test order to make sure the implementation isn't just looking
+ at just the last entry.
+ `
+ )
+ .params(
+ kMaximumLimitBaseParams
+ .combine('visibility', kShaderStageCombinationsWithStage)
+ .combine('order', kReorderOrderKeys)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, visibility, order } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ const kNumGroups = 3;
+ const bindGroupLayouts = range(kNumGroups, i => {
+ const minInGroup = Math.floor(testValue / kNumGroups);
+ const numInGroup = i ? minInGroup : testValue - minInGroup * (kNumGroups - 1);
+ return createBindGroupLayout(device, visibility, order, numInGroup);
+ });
+ await t.expectValidationError(
+ () => device.createPipelineLayout({ bindGroupLayouts }),
+ shouldError
+ );
+ }
+ );
+ });
+
+g.test('createPipeline,at_over')
+ .desc(
+ `
+ Test using createRenderPipeline(Async) and createComputePipeline(Async) at and over ${limit} limit
+
+ Note: We also test order to make sure the implementation isn't just looking
+ at just the last entry.
+ `
+ )
+ .params(
+ kMaximumLimitBaseParams
+ .combine('async', [false, true] as const)
+ .combine('bindingCombination', kBindingCombinations)
+ .combine('order', kReorderOrderKeys)
+ .combine('bindGroupTest', kBindGroupTests)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, async, bindingCombination, order, bindGroupTest } = t.params;
+ const pipelineType = getPipelineTypeForBindingCombination(bindingCombination);
+
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, actualLimit, shouldError }) => {
+ const code = getPerStageWGSLForBindingCombination(
+ bindingCombination,
+ order,
+ bindGroupTest,
+ (i, j) => `var<uniform> u${j}_${i}: f32`,
+ (i, j) => `_ = u${j}_${i};`,
+ testValue
+ );
+ const module = device.createShaderModule({ code });
+
+ await t.testCreatePipeline(
+ pipelineType,
+ async,
+ module,
+ shouldError,
+ `actualLimit: ${actualLimit}, testValue: ${testValue}\n:${code}`
+ );
+ }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxVertexAttributes.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxVertexAttributes.spec.ts
new file mode 100644
index 0000000000..9e5aaa144b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxVertexAttributes.spec.ts
@@ -0,0 +1,43 @@
+import { kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
+
+function getPipelineDescriptor(device: GPUDevice, lastIndex: number): GPURenderPipelineDescriptor {
+ const code = `
+ @vertex fn vs(@location(${lastIndex}) v: vec4f) -> @builtin(position) vec4f {
+ return v;
+ }
+ `;
+ const module = device.createShaderModule({ code });
+ return {
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vs',
+ buffers: [
+ {
+ arrayStride: 32,
+ attributes: [{ shaderLocation: lastIndex, offset: 0, format: 'float32x4' }],
+ },
+ ],
+ },
+ };
+}
+
+const limit = 'maxVertexAttributes';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createRenderPipeline,at_over')
+ .desc(`Test using createRenderPipeline(Async) at and over ${limit} limit`)
+ .params(kMaximumLimitBaseParams.combine('async', [false, true]))
+ .fn(async t => {
+ const { limitTest, testValueName, async } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ const lastIndex = testValue - 1;
+ const pipelineDescriptor = getPipelineDescriptor(device, lastIndex);
+
+ await t.testCreateRenderPipeline(pipelineDescriptor, async, shouldError);
+ }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxVertexBufferArrayStride.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxVertexBufferArrayStride.spec.ts
new file mode 100644
index 0000000000..0af5724f2a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxVertexBufferArrayStride.spec.ts
@@ -0,0 +1,121 @@
+import { roundDown } from '../../../../util/math.js';
+
+import {
+ kMaximumLimitBaseParams,
+ makeLimitTestGroup,
+ MaximumLimitValueTest,
+ MaximumTestValue,
+} from './limit_utils.js';
+
+function getPipelineDescriptor(device: GPUDevice, testValue: number): GPURenderPipelineDescriptor {
+ const code = `
+ @vertex fn vs(@location(0) v: f32) -> @builtin(position) vec4f {
+ return vec4f(v);
+ }
+ `;
+ const module = device.createShaderModule({ code });
+ return {
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vs',
+ buffers: [
+ {
+ arrayStride: testValue,
+ attributes: [
+ {
+ shaderLocation: 0,
+ offset: 0,
+ format: 'float32',
+ },
+ ],
+ },
+ ],
+ },
+ };
+}
+
+const kMinAttributeStride = 4;
+
+function getDeviceLimitToRequest(
+ limitValueTest: MaximumLimitValueTest,
+ defaultLimit: number,
+ maximumLimit: number
+) {
+ switch (limitValueTest) {
+ case 'atDefault':
+ return defaultLimit;
+ case 'underDefault':
+ return defaultLimit - kMinAttributeStride;
+ case 'betweenDefaultAndMaximum':
+ return Math.min(
+ defaultLimit,
+ roundDown(Math.floor((defaultLimit + maximumLimit) / 2), kMinAttributeStride)
+ );
+ case 'atMaximum':
+ return maximumLimit;
+ case 'overMaximum':
+ return maximumLimit + kMinAttributeStride;
+ }
+}
+
+function getTestValue(testValueName: MaximumTestValue, requestedLimit: number) {
+ switch (testValueName) {
+ case 'atLimit':
+ return requestedLimit;
+ case 'overLimit':
+ return requestedLimit + kMinAttributeStride;
+ }
+}
+
+function getDeviceLimitToRequestAndValueToTest(
+ limitValueTest: MaximumLimitValueTest,
+ testValueName: MaximumTestValue,
+ defaultLimit: number,
+ maximumLimit: number
+) {
+ const requestedLimit = getDeviceLimitToRequest(limitValueTest, defaultLimit, maximumLimit);
+ return {
+ requestedLimit,
+ testValue: getTestValue(testValueName, requestedLimit),
+ };
+}
+
+/*
+Note: We need to request +4 (vs the default +1) because otherwise we may trigger the wrong validation
+of the arrayStride not being a multiple of 4
+*/
+const limit = 'maxVertexBufferArrayStride';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createRenderPipeline,at_over')
+ .desc(`Test using createRenderPipeline(Async) at and over ${limit} limit`)
+ .params(kMaximumLimitBaseParams.combine('async', [false, true]))
+ .fn(async t => {
+ const { limitTest, testValueName, async } = t.params;
+ const { defaultLimit, adapterLimit: maximumLimit } = t;
+ const { requestedLimit, testValue } = getDeviceLimitToRequestAndValueToTest(
+ limitTest,
+ testValueName,
+ defaultLimit,
+ maximumLimit
+ );
+
+ await t.testDeviceWithSpecificLimits(
+ requestedLimit,
+ testValue,
+ async ({ device, testValue, shouldError }) => {
+ const pipelineDescriptor = getPipelineDescriptor(device, testValue);
+
+ await t.testCreateRenderPipeline(pipelineDescriptor, async, shouldError);
+ }
+ );
+ });
+
+g.test('validate')
+ .desc(`Test that ${limit} is a multiple of 4 bytes`)
+ .fn(t => {
+ const { defaultLimit, adapterLimit } = t;
+ t.expect(defaultLimit % 4 === 0);
+ t.expect(adapterLimit % 4 === 0);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxVertexBuffers.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxVertexBuffers.spec.ts
new file mode 100644
index 0000000000..7f760fe9b6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/maxVertexBuffers.spec.ts
@@ -0,0 +1,100 @@
+import { range } from '../../../../../common/util/util.js';
+
+import { kRenderEncoderTypes, kMaximumLimitBaseParams, makeLimitTestGroup } from './limit_utils.js';
+
+const kPipelineTypes = ['withoutLocations', 'withLocations'] as const;
+type PipelineType = (typeof kPipelineTypes)[number];
+
+function getPipelineDescriptor(
+ device: GPUDevice,
+ pipelineType: PipelineType,
+ testValue: number
+): GPURenderPipelineDescriptor {
+ const code =
+ pipelineType === 'withLocations'
+ ? `
+ struct VSInput {
+ ${range(testValue, i => `@location(${i}) p${i}: f32,`).join('\n')}
+ }
+ @vertex fn vs(v: VSInput) -> @builtin(position) vec4f {
+ let x = ${range(testValue, i => `v.p${i}`).join(' + ')};
+ return vec4f(x, 0, 0, 1);
+ }
+ `
+ : `
+ @vertex fn vs() -> @builtin(position) vec4f {
+ return vec4f(0);
+ }
+ `;
+ const module = device.createShaderModule({ code });
+ return {
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vs',
+ buffers: range(testValue, i => ({
+ arrayStride: 32,
+ attributes: [{ shaderLocation: i, offset: 0, format: 'float32' }],
+ })),
+ },
+ };
+}
+
+const limit = 'maxVertexBuffers';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createRenderPipeline,at_over')
+ .desc(`Test using at and over ${limit} limit in createRenderPipeline(Async)`)
+ .params(
+ kMaximumLimitBaseParams.combine('async', [false, true]).combine('pipelineType', kPipelineTypes)
+ )
+ .fn(async t => {
+ const { limitTest, testValueName, async, pipelineType } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError }) => {
+ const pipelineDescriptor = getPipelineDescriptor(device, pipelineType, testValue);
+
+ await t.testCreateRenderPipeline(pipelineDescriptor, async, shouldError);
+ }
+ );
+ });
+
+g.test('setVertexBuffer,at_over')
+ .desc(`Test using at and over ${limit} limit in setVertexBuffer`)
+ .params(kMaximumLimitBaseParams.combine('encoderType', kRenderEncoderTypes))
+ .fn(async t => {
+ const { limitTest, testValueName, encoderType } = t.params;
+ await t.testDeviceWithRequestedMaximumLimits(
+ limitTest,
+ testValueName,
+ async ({ device, testValue, shouldError, actualLimit }) => {
+ const lastIndex = testValue - 1;
+
+ const buffer = device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.VERTEX,
+ });
+
+ await t.testGPURenderCommandsMixin(
+ encoderType,
+ ({ mixin }) => {
+ mixin.setVertexBuffer(lastIndex, buffer);
+ },
+ shouldError,
+ `lastIndex: ${lastIndex}, actualLimit: ${actualLimit}, shouldError: ${shouldError}`
+ );
+
+ buffer.destroy();
+ }
+ );
+ });
+
+g.test('validate,maxBindGroupsPlusVertexBuffers')
+ .desc(`Test that ${limit} <= maxBindGroupsPlusVertexBuffers`)
+ .fn(t => {
+ const { adapter, defaultLimit, adapterLimit } = t;
+ t.expect(defaultLimit <= t.getDefaultLimit('maxBindGroupsPlusVertexBuffers'));
+ t.expect(adapterLimit <= adapter.limits.maxBindGroupsPlusVertexBuffers);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/minStorageBufferOffsetAlignment.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/minStorageBufferOffsetAlignment.spec.ts
new file mode 100644
index 0000000000..1113499673
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/minStorageBufferOffsetAlignment.spec.ts
@@ -0,0 +1,183 @@
+import { GPUConst } from '../../../../constants.js';
+import { isPowerOfTwo } from '../../../../util/math.js';
+
+import {
+ kMinimumLimitBaseParams,
+ makeLimitTestGroup,
+ MinimumLimitValueTest,
+ MinimumTestValue,
+} from './limit_utils.js';
+
+function getDeviceLimitToRequest(
+ limitValueTest: MinimumLimitValueTest,
+ defaultLimit: number,
+ minimumLimit: number
+) {
+ switch (limitValueTest) {
+ case 'atDefault':
+ return defaultLimit;
+ case 'overDefault':
+ return 2 ** (Math.log2(defaultLimit) + 1);
+ case 'betweenDefaultAndMinimum':
+ return 2 ** (((Math.log2(defaultLimit) + Math.log2(minimumLimit)) / 2) | 0);
+ case 'atMinimum':
+ return minimumLimit;
+ case 'underMinimum':
+ return 2 ** (Math.log2(minimumLimit) - 1);
+ }
+}
+
+function getTestValue(testValueName: MinimumTestValue, requestedLimit: number) {
+ switch (testValueName) {
+ case 'atLimit':
+ return requestedLimit;
+ case 'underLimit':
+ return 2 ** (Math.log2(requestedLimit) - 1);
+ }
+}
+
+function getDeviceLimitToRequestAndValueToTest(
+ limitValueTest: MinimumLimitValueTest,
+ testValueName: MinimumTestValue,
+ defaultLimit: number,
+ maximumLimit: number
+) {
+ const requestedLimit = getDeviceLimitToRequest(limitValueTest, defaultLimit, maximumLimit);
+ return {
+ requestedLimit,
+ testValue: getTestValue(testValueName, requestedLimit),
+ };
+}
+
+const limit = 'minStorageBufferOffsetAlignment';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createBindGroup,at_over')
+ .desc(`Test using createBindGroup at and over ${limit} limit`)
+ .params(kMinimumLimitBaseParams)
+ .fn(async t => {
+ const { limitTest, testValueName } = t.params;
+ // note: LimitTest.maximum is the adapter.limits[limit] value
+ const { defaultLimit, adapterLimit: minimumLimit } = t;
+ const { requestedLimit, testValue } = getDeviceLimitToRequestAndValueToTest(
+ limitTest,
+ testValueName,
+ defaultLimit,
+ minimumLimit
+ );
+
+ await t.testDeviceWithSpecificLimits(
+ requestedLimit,
+ testValue,
+ async ({ device, testValue, shouldError }) => {
+ const storageBuffer = t.trackForCleanup(
+ device.createBuffer({
+ size: testValue * 2,
+ usage: GPUBufferUsage.STORAGE,
+ })
+ );
+
+ const layout = device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: { type: 'storage' },
+ },
+ ],
+ });
+
+ await t.expectValidationError(() => {
+ device.createBindGroup({
+ layout,
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer: storageBuffer,
+ offset: testValue,
+ },
+ },
+ ],
+ });
+ }, shouldError);
+ }
+ );
+ });
+
+g.test('setBindGroup,at_over')
+ .desc(`Test using setBindGroup at and over ${limit} limit`)
+ .params(kMinimumLimitBaseParams)
+ .fn(async t => {
+ const { limitTest, testValueName } = t.params;
+ // note: LimitTest.maximum is the adapter.limits[limit] value
+ const { defaultLimit, adapterLimit: minimumLimit } = t;
+ const { requestedLimit, testValue } = getDeviceLimitToRequestAndValueToTest(
+ limitTest,
+ testValueName,
+ defaultLimit,
+ minimumLimit
+ );
+
+ await t.testDeviceWithSpecificLimits(
+ requestedLimit,
+ testValue,
+ async ({ device, testValue, shouldError }) => {
+ const buffer = device.createBuffer({
+ size: testValue * 2,
+ usage: GPUBufferUsage.STORAGE,
+ });
+
+ const layout = device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUConst.ShaderStage.COMPUTE,
+ buffer: {
+ type: 'storage',
+ hasDynamicOffset: true,
+ },
+ },
+ ],
+ });
+
+ const bindGroup = device.createBindGroup({
+ layout,
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer,
+ size: testValue / 2,
+ },
+ },
+ ],
+ });
+
+ const encoder = device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setBindGroup(0, bindGroup, [testValue]);
+ pass.end();
+
+ await t.expectValidationError(() => {
+ encoder.finish();
+ }, shouldError);
+
+ buffer.destroy();
+ }
+ );
+ });
+
+g.test('validate,powerOf2')
+ .desc('Verify that ${limit} is power of 2')
+ .fn(t => {
+ t.expect(isPowerOfTwo(t.defaultLimit));
+ t.expect(isPowerOfTwo(t.adapterLimit));
+ });
+
+g.test('validate,greaterThanOrEqualTo32')
+ .desc('Verify that ${limit} is >= 32')
+ .fn(t => {
+ t.expect(t.defaultLimit >= 32);
+ t.expect(t.adapterLimit >= 32);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/minUniformBufferOffsetAlignment.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/minUniformBufferOffsetAlignment.spec.ts
new file mode 100644
index 0000000000..08e29f80ea
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/capability_checks/limits/minUniformBufferOffsetAlignment.spec.ts
@@ -0,0 +1,186 @@
+import { GPUConst } from '../../../../constants.js';
+import { isPowerOfTwo } from '../../../../util/math.js';
+
+import {
+ kMinimumLimitBaseParams,
+ makeLimitTestGroup,
+ MinimumLimitValueTest,
+ MinimumTestValue,
+} from './limit_utils.js';
+
+function getDeviceLimitToRequest(
+ limitValueTest: MinimumLimitValueTest,
+ defaultLimit: number,
+ minimumLimit: number
+) {
+ switch (limitValueTest) {
+ case 'atDefault':
+ return defaultLimit;
+ case 'overDefault':
+ return 2 ** (Math.log2(defaultLimit) + 1);
+ case 'betweenDefaultAndMinimum':
+ return Math.min(
+ minimumLimit,
+ 2 ** (((Math.log2(defaultLimit) + Math.log2(minimumLimit)) / 2) | 0)
+ );
+ case 'atMinimum':
+ return minimumLimit;
+ case 'underMinimum':
+ return 2 ** (Math.log2(minimumLimit) - 1);
+ }
+}
+
+function getTestValue(testValueName: MinimumTestValue, requestedLimit: number) {
+ switch (testValueName) {
+ case 'atLimit':
+ return requestedLimit;
+ case 'underLimit':
+ return 2 ** (Math.log2(requestedLimit) - 1);
+ }
+}
+
+function getDeviceLimitToRequestAndValueToTest(
+ limitValueTest: MinimumLimitValueTest,
+ testValueName: MinimumTestValue,
+ defaultLimit: number,
+ maximumLimit: number
+) {
+ const requestedLimit = getDeviceLimitToRequest(limitValueTest, defaultLimit, maximumLimit);
+ return {
+ requestedLimit,
+ testValue: getTestValue(testValueName, requestedLimit),
+ };
+}
+
+const limit = 'minUniformBufferOffsetAlignment';
+export const { g, description } = makeLimitTestGroup(limit);
+
+g.test('createBindGroup,at_over')
+ .desc(`Test using createBindGroup at and over ${limit} limit`)
+ .params(kMinimumLimitBaseParams)
+ .fn(async t => {
+ const { limitTest, testValueName } = t.params;
+ // note: LimitTest.maximum is the adapter.limits[limit] value
+ const { defaultLimit, adapterLimit: minimumLimit } = t;
+ const { requestedLimit, testValue } = getDeviceLimitToRequestAndValueToTest(
+ limitTest,
+ testValueName,
+ defaultLimit,
+ minimumLimit
+ );
+
+ await t.testDeviceWithSpecificLimits(
+ requestedLimit,
+ testValue,
+ async ({ device, testValue, shouldError }) => {
+ const buffer = t.trackForCleanup(
+ device.createBuffer({
+ size: testValue * 2,
+ usage: GPUBufferUsage.UNIFORM,
+ })
+ );
+
+ const layout = device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: {},
+ },
+ ],
+ });
+
+ await t.expectValidationError(() => {
+ device.createBindGroup({
+ layout,
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer,
+ offset: testValue,
+ },
+ },
+ ],
+ });
+ }, shouldError);
+ }
+ );
+ });
+
+g.test('setBindGroup,at_over')
+ .desc(`Test using setBindGroup at and over ${limit} limit`)
+ .params(kMinimumLimitBaseParams)
+ .fn(async t => {
+ const { limitTest, testValueName } = t.params;
+ // note: LimitTest.maximum is the adapter.limits[limit] value
+ const { defaultLimit, adapterLimit: minimumLimit } = t;
+ const { requestedLimit, testValue } = getDeviceLimitToRequestAndValueToTest(
+ limitTest,
+ testValueName,
+ defaultLimit,
+ minimumLimit
+ );
+
+ await t.testDeviceWithSpecificLimits(
+ requestedLimit,
+ testValue,
+ async ({ device, testValue, shouldError }) => {
+ const buffer = device.createBuffer({
+ size: testValue * 2,
+ usage: GPUBufferUsage.UNIFORM,
+ });
+
+ const layout = device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUConst.ShaderStage.COMPUTE,
+ buffer: {
+ type: 'uniform',
+ hasDynamicOffset: true,
+ },
+ },
+ ],
+ });
+
+ const bindGroup = device.createBindGroup({
+ layout,
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer,
+ size: testValue / 2,
+ },
+ },
+ ],
+ });
+
+ const encoder = device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setBindGroup(0, bindGroup, [testValue]);
+ pass.end();
+
+ await t.expectValidationError(() => {
+ encoder.finish();
+ }, shouldError);
+
+ buffer.destroy();
+ }
+ );
+ });
+
+g.test('validate,powerOf2')
+ .desc('Verify that ${limit} is power of 2')
+ .fn(t => {
+ t.expect(isPowerOfTwo(t.defaultLimit));
+ t.expect(isPowerOfTwo(t.adapterLimit));
+ });
+
+g.test('validate,greaterThanOrEqualTo32')
+ .desc('Verify that ${limit} is >= 32')
+ .fn(t => {
+ t.expect(t.defaultLimit >= 32);
+ t.expect(t.adapterLimit >= 32);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/compute_pipeline.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/compute_pipeline.spec.ts
new file mode 100644
index 0000000000..3a0a51b363
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/compute_pipeline.spec.ts
@@ -0,0 +1,692 @@
+export const description = `
+createComputePipeline and createComputePipelineAsync validation tests.
+
+Note: entry point matching tests are in shader_module/entry_point.spec.ts
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { kValue } from '../../util/constants.js';
+import { TShaderStage, getShaderWithEntryPoint } from '../../util/shader.js';
+
+import { ValidationTest } from './validation_test.js';
+
+class F extends ValidationTest {
+ getShaderModule(
+ shaderStage: TShaderStage = 'compute',
+ entryPoint: string = 'main'
+ ): GPUShaderModule {
+ return this.device.createShaderModule({
+ code: getShaderWithEntryPoint(shaderStage, entryPoint),
+ });
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('basic')
+ .desc(
+ `
+Control case for createComputePipeline and createComputePipelineAsync.
+Call the API with valid compute shader and matching valid entryPoint, making sure that the test function working well.
+`
+ )
+ .params(u => u.combine('isAsync', [true, false]))
+ .fn(t => {
+ const { isAsync } = t.params;
+ t.doCreateComputePipelineTest(isAsync, true, {
+ layout: 'auto',
+ compute: { module: t.getShaderModule('compute', 'main'), entryPoint: 'main' },
+ });
+ });
+
+g.test('shader_module,invalid')
+ .desc(
+ `
+Tests calling createComputePipeline(Async) with a invalid compute shader, and check that the APIs catch this error.
+`
+ )
+ .params(u => u.combine('isAsync', [true, false]))
+ .fn(t => {
+ const { isAsync } = t.params;
+ t.doCreateComputePipelineTest(isAsync, false, {
+ layout: 'auto',
+ compute: {
+ module: t.createInvalidShaderModule(),
+ entryPoint: 'main',
+ },
+ });
+ });
+
+g.test('shader_module,compute')
+ .desc(
+ `
+Tests calling createComputePipeline(Async) with valid but different stage shader and matching entryPoint,
+and check that the APIs only accept compute shader.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combine('shaderModuleStage', ['compute', 'vertex', 'fragment'] as TShaderStage[])
+ )
+ .fn(t => {
+ const { isAsync, shaderModuleStage } = t.params;
+ const descriptor = {
+ layout: 'auto' as const,
+ compute: {
+ module: t.getShaderModule(shaderModuleStage, 'main'),
+ entryPoint: 'main',
+ },
+ };
+ t.doCreateComputePipelineTest(isAsync, shaderModuleStage === 'compute', descriptor);
+ });
+
+g.test('shader_module,device_mismatch')
+ .desc(
+ 'Tests createComputePipeline(Async) cannot be called with a shader module created from another device'
+ )
+ .paramsSubcasesOnly(u => u.combine('isAsync', [true, false]).combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { isAsync, mismatched } = t.params;
+
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const module = sourceDevice.createShaderModule({
+ code: '@compute @workgroup_size(1) fn main() {}',
+ });
+
+ const descriptor = {
+ layout: 'auto' as const,
+ compute: {
+ module,
+ entryPoint: 'main',
+ },
+ };
+
+ t.doCreateComputePipelineTest(isAsync, !mismatched, descriptor);
+ });
+
+g.test('pipeline_layout,device_mismatch')
+ .desc(
+ 'Tests createComputePipeline(Async) cannot be called with a pipeline layout created from another device'
+ )
+ .paramsSubcasesOnly(u => u.combine('isAsync', [true, false]).combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { isAsync, mismatched } = t.params;
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const layout = sourceDevice.createPipelineLayout({ bindGroupLayouts: [] });
+
+ const descriptor = {
+ layout,
+ compute: {
+ module: t.getShaderModule('compute', 'main'),
+ entryPoint: 'main',
+ },
+ };
+
+ t.doCreateComputePipelineTest(isAsync, !mismatched, descriptor);
+ });
+
+g.test('limits,workgroup_storage_size')
+ .desc(
+ `
+Tests calling createComputePipeline(Async) validation for compute using <= device.limits.maxComputeWorkgroupStorageSize bytes of workgroup storage.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combineWithParams([
+ { type: 'vec4<f32>', _typeSize: 16 },
+ { type: 'mat4x4<f32>', _typeSize: 64 },
+ ])
+ .beginSubcases()
+ .combine('countDeltaFromLimit', [0, 1])
+ )
+ .fn(t => {
+ const { isAsync, type, _typeSize, countDeltaFromLimit } = t.params;
+ const countAtLimit = Math.floor(t.device.limits.maxComputeWorkgroupStorageSize / _typeSize);
+ const count = countAtLimit + countDeltaFromLimit;
+
+ const descriptor = {
+ layout: 'auto' as const,
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+ var<workgroup> data: array<${type}, ${count}>;
+ @compute @workgroup_size(64) fn main () {
+ _ = data;
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ };
+ t.doCreateComputePipelineTest(isAsync, count <= countAtLimit, descriptor);
+ });
+
+g.test('limits,invocations_per_workgroup')
+ .desc(
+ `
+Tests calling createComputePipeline(Async) validation for compute using <= device.limits.maxComputeInvocationsPerWorkgroup per workgroup.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combine('size', [
+ // Assume maxComputeWorkgroupSizeX/Y >= 129, maxComputeWorkgroupSizeZ >= 33
+ [128, 1, 2],
+ [129, 1, 2],
+ [2, 128, 1],
+ [2, 129, 1],
+ [1, 8, 32],
+ [1, 8, 33],
+ ])
+ )
+ .fn(t => {
+ const { isAsync, size } = t.params;
+
+ const descriptor = {
+ layout: 'auto' as const,
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+ @compute @workgroup_size(${size.join(',')}) fn main () {
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ };
+
+ t.doCreateComputePipelineTest(
+ isAsync,
+ size[0] * size[1] * size[2] <= t.device.limits.maxComputeInvocationsPerWorkgroup,
+ descriptor
+ );
+ });
+
+g.test('limits,invocations_per_workgroup,each_component')
+ .desc(
+ `
+Tests calling createComputePipeline(Async) validation for compute workgroup_size attribute has each component no more than their limits.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combine('size', [
+ // Assume maxComputeInvocationsPerWorkgroup >= 256
+ [64],
+ [256, 1, 1],
+ [257, 1, 1],
+ [1, 256, 1],
+ [1, 257, 1],
+ [1, 1, 63],
+ [1, 1, 64],
+ [1, 1, 65],
+ ] as const)
+ )
+ .fn(t => {
+ const { isAsync, size } = t.params;
+
+ const descriptor = {
+ layout: 'auto' as const,
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+ @compute @workgroup_size(${size.join(',')}) fn main () {
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ };
+
+ const workgroupX = size[0];
+ const workgroupY = size[1] ?? 1;
+ const workgroupZ = size[2] ?? 1;
+
+ const _success =
+ workgroupX <= t.device.limits.maxComputeWorkgroupSizeX &&
+ workgroupY <= t.device.limits.maxComputeWorkgroupSizeY &&
+ workgroupZ <= t.device.limits.maxComputeWorkgroupSizeZ;
+ t.doCreateComputePipelineTest(isAsync, _success, descriptor);
+ });
+
+g.test('overrides,identifier')
+ .desc(
+ `
+Tests calling createComputePipeline(Async) validation for overridable constants identifiers.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combineWithParams([
+ { constants: {}, _success: true },
+ { constants: { c0: 0 }, _success: true },
+ { constants: { c0: 0, c1: 1 }, _success: true },
+ { constants: { 'c0\0': 0 }, _success: false },
+ { constants: { c9: 0 }, _success: false },
+ { constants: { 1: 0 }, _success: true },
+ { constants: { c3: 0 }, _success: false }, // pipeline constant id is specified for c3
+ { constants: { 2: 0 }, _success: false },
+ { constants: { 1000: 0 }, _success: true },
+ { constants: { 9999: 0 }, _success: false },
+ { constants: { 1000: 0, c2: 0 }, _success: false },
+ { constants: { 数: 0 }, _success: true },
+ { constants: { séquençage: 0 }, _success: false }, // test unicode is not normalized
+ ] as { constants: Record<string, GPUPipelineConstantValue>; _success: boolean }[])
+ )
+ .fn(t => {
+ const { isAsync, constants, _success } = t.params;
+
+ const descriptor = {
+ layout: 'auto' as const,
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+ override c0: bool = true; // type: bool
+ override c1: u32 = 0u; // default override
+ override 数: u32 = 0u; // non-ASCII
+ override séquençage: u32 = 0u; // normalizable unicode (WGSL does not normalize)
+ @id(1000) override c2: u32 = 10u; // default
+ @id(1) override c3: u32 = 11u; // default
+ @compute @workgroup_size(1) fn main () {
+ // make sure the overridable constants are not optimized out
+ _ = u32(c0);
+ _ = u32(c1);
+ _ = u32(c2 + séquençage);
+ _ = u32(c3 + 数);
+ }`,
+ }),
+ entryPoint: 'main',
+ constants,
+ },
+ };
+
+ t.doCreateComputePipelineTest(isAsync, _success, descriptor);
+ });
+
+g.test('overrides,uninitialized')
+ .desc(
+ `
+Tests calling createComputePipeline(Async) validation for uninitialized overridable constants.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combineWithParams([
+ { constants: {}, _success: false },
+ { constants: { c0: 0, c2: 0, c8: 0 }, _success: false }, // c5 is missing
+ { constants: { c0: 0, c2: 0, c5: 0, c8: 0 }, _success: true },
+ { constants: { c0: 0, c2: 0, c5: 0, c8: 0, c1: 0 }, _success: true },
+ ] as { constants: Record<string, GPUPipelineConstantValue>; _success: boolean }[])
+ )
+ .fn(t => {
+ const { isAsync, constants, _success } = t.params;
+
+ const descriptor = {
+ layout: 'auto' as const,
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+ override c0: bool; // type: bool
+ override c1: bool = false; // default override
+ override c2: f32; // type: float32
+ override c3: f32 = 0.0; // default override
+ override c4: f32 = 4.0; // default
+ override c5: i32; // type: int32
+ override c6: i32 = 0; // default override
+ override c7: i32 = 7; // default
+ override c8: u32; // type: uint32
+ override c9: u32 = 0u; // default override
+ @id(1000) override c10: u32 = 10u; // default
+ @compute @workgroup_size(1) fn main () {
+ // make sure the overridable constants are not optimized out
+ _ = u32(c0);
+ _ = u32(c1);
+ _ = u32(c2);
+ _ = u32(c3);
+ _ = u32(c4);
+ _ = u32(c5);
+ _ = u32(c6);
+ _ = u32(c7);
+ _ = u32(c8);
+ _ = u32(c9);
+ _ = u32(c10);
+ }`,
+ }),
+ entryPoint: 'main',
+ constants,
+ },
+ };
+
+ t.doCreateComputePipelineTest(isAsync, _success, descriptor);
+ });
+
+g.test('overrides,value,type_error')
+ .desc(
+ `
+Tests calling createComputePipeline(Async) validation for constant values like inf, NaN will results in TypeError.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combineWithParams([
+ { constants: { cf: 1 }, _success: true }, // control
+ { constants: { cf: NaN }, _success: false },
+ { constants: { cf: Number.POSITIVE_INFINITY }, _success: false },
+ { constants: { cf: Number.NEGATIVE_INFINITY }, _success: false },
+ ] as const)
+ )
+ .fn(t => {
+ const { isAsync, constants, _success } = t.params;
+
+ const descriptor = {
+ layout: 'auto' as const,
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+ override cf: f32 = 0.0;
+ @compute @workgroup_size(1) fn main () {
+ _ = cf;
+ }`,
+ }),
+ entryPoint: 'main',
+ constants,
+ },
+ };
+
+ t.doCreateComputePipelineTest(isAsync, _success, descriptor, 'TypeError');
+ });
+
+g.test('overrides,value,validation_error')
+ .desc(
+ `
+Tests calling createComputePipeline(Async) validation for unrepresentable constant values in compute stage.
+
+TODO(#2060): test with last_castable_pipeline_override.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combineWithParams([
+ { constants: { cu: kValue.u32.min }, _success: true },
+ { constants: { cu: kValue.u32.min - 1 }, _success: false },
+ { constants: { cu: kValue.u32.max }, _success: true },
+ { constants: { cu: kValue.u32.max + 1 }, _success: false },
+ { constants: { ci: kValue.i32.negative.min }, _success: true },
+ { constants: { ci: kValue.i32.negative.min - 1 }, _success: false },
+ { constants: { ci: kValue.i32.positive.max }, _success: true },
+ { constants: { ci: kValue.i32.positive.max + 1 }, _success: false },
+ { constants: { cf: kValue.f32.negative.min }, _success: true },
+ {
+ constants: { cf: kValue.f32.negative.first_non_castable_pipeline_override },
+ _success: false,
+ },
+ { constants: { cf: kValue.f32.positive.max }, _success: true },
+ {
+ constants: { cf: kValue.f32.positive.first_non_castable_pipeline_override },
+ _success: false,
+ },
+ // Conversion to boolean can't fail
+ { constants: { cb: Number.MAX_VALUE }, _success: true },
+ { constants: { cb: kValue.i32.negative.min - 1 }, _success: true },
+ ] as { constants: Record<string, GPUPipelineConstantValue>; _success: boolean }[])
+ )
+ .fn(t => {
+ const { isAsync, constants, _success } = t.params;
+ const descriptor = {
+ layout: 'auto' as const,
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+ override cb: bool = false;
+ override cu: u32 = 0u;
+ override ci: i32 = 0;
+ override cf: f32 = 0.0;
+ @compute @workgroup_size(1) fn main () {
+ _ = cb;
+ _ = cu;
+ _ = ci;
+ _ = cf;
+ }`,
+ }),
+ entryPoint: 'main',
+ constants,
+ },
+ };
+
+ t.doCreateComputePipelineTest(isAsync, _success, descriptor);
+ });
+
+g.test('overrides,value,validation_error,f16')
+ .desc(
+ `
+Tests calling createComputePipeline(Async) validation for unrepresentable f16 constant values in compute stage.
+
+TODO(#2060): Tighten the cases around the valid/invalid boundary once we have WGSL spec
+clarity on whether values like f16.positive.last_castable_pipeline_override would be valid. See issue.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combineWithParams([
+ { constants: { cf16: kValue.f16.negative.min }, _success: true },
+ {
+ constants: { cf16: kValue.f16.negative.first_non_castable_pipeline_override },
+ _success: false,
+ },
+ { constants: { cf16: kValue.f16.positive.max }, _success: true },
+ {
+ constants: { cf16: kValue.f16.positive.first_non_castable_pipeline_override },
+ _success: false,
+ },
+ { constants: { cf16: kValue.f32.negative.min }, _success: false },
+ { constants: { cf16: kValue.f32.positive.max }, _success: false },
+ {
+ constants: { cf16: kValue.f32.negative.first_non_castable_pipeline_override },
+ _success: false,
+ },
+ {
+ constants: { cf16: kValue.f32.positive.first_non_castable_pipeline_override },
+ _success: false,
+ },
+ ] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(t => {
+ const { isAsync, constants, _success } = t.params;
+
+ const descriptor = {
+ layout: 'auto' as const,
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+ enable f16;
+
+ override cf16: f16 = 0.0h;
+ @compute @workgroup_size(1) fn main () {
+ _ = cf16;
+ }`,
+ }),
+ entryPoint: 'main',
+ constants,
+ },
+ };
+
+ t.doCreateComputePipelineTest(isAsync, _success, descriptor);
+ });
+
+const kOverridesWorkgroupSizeShaders = {
+ u32: `
+override x: u32 = 1u;
+override y: u32 = 1u;
+override z: u32 = 1u;
+@compute @workgroup_size(x, y, z) fn main () {
+ _ = 0u;
+}
+`,
+ i32: `
+override x: i32 = 1;
+override y: i32 = 1;
+override z: i32 = 1;
+@compute @workgroup_size(x, y, z) fn main () {
+ _ = 0u;
+}
+`,
+};
+
+g.test('overrides,workgroup_size')
+ .desc(
+ `
+Tests calling createComputePipeline(Async) validation for overridable constants used for workgroup size.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combine('type', ['u32', 'i32'] as const)
+ .combineWithParams([
+ { constants: {}, _success: true },
+ { constants: { x: 0, y: 0, z: 0 }, _success: false },
+ { constants: { x: 1, y: -1, z: 1 }, _success: false },
+ { constants: { x: 1, y: 0, z: 0 }, _success: false },
+ { constants: { x: 16, y: 1, z: 1 }, _success: true },
+ ] as { constants: Record<string, GPUPipelineConstantValue>; _success: boolean }[])
+ )
+ .fn(t => {
+ const { isAsync, type, constants, _success } = t.params;
+
+ const descriptor = {
+ layout: 'auto' as const,
+ compute: {
+ module: t.device.createShaderModule({
+ code: kOverridesWorkgroupSizeShaders[type],
+ }),
+ entryPoint: 'main',
+ constants,
+ },
+ };
+
+ t.doCreateComputePipelineTest(isAsync, _success, descriptor);
+ });
+
+g.test('overrides,workgroup_size,limits')
+ .desc(
+ `
+Tests calling createComputePipeline(Async) validation for overridable constants for workgroupSize exceeds device limits.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combine('type', ['u32', 'i32'] as const)
+ )
+ .fn(t => {
+ const { isAsync, type } = t.params;
+
+ const limits = t.device.limits;
+
+ const testFn = (x: number, y: number, z: number, _success: boolean) => {
+ const descriptor = {
+ layout: 'auto' as const,
+ compute: {
+ module: t.device.createShaderModule({
+ code: kOverridesWorkgroupSizeShaders[type],
+ }),
+ entryPoint: 'main',
+ constants: {
+ x,
+ y,
+ z,
+ },
+ },
+ };
+
+ t.doCreateComputePipelineTest(isAsync, _success, descriptor);
+ };
+
+ testFn(limits.maxComputeWorkgroupSizeX, 1, 1, true);
+ testFn(limits.maxComputeWorkgroupSizeX + 1, 1, 1, false);
+ testFn(1, limits.maxComputeWorkgroupSizeY, 1, true);
+ testFn(1, limits.maxComputeWorkgroupSizeY + 1, 1, false);
+ testFn(1, 1, limits.maxComputeWorkgroupSizeZ, true);
+ testFn(1, 1, limits.maxComputeWorkgroupSizeZ + 1, false);
+ testFn(
+ limits.maxComputeWorkgroupSizeX,
+ limits.maxComputeWorkgroupSizeY,
+ limits.maxComputeWorkgroupSizeZ,
+ limits.maxComputeWorkgroupSizeX *
+ limits.maxComputeWorkgroupSizeY *
+ limits.maxComputeWorkgroupSizeZ <=
+ limits.maxComputeInvocationsPerWorkgroup
+ );
+ });
+
+g.test('overrides,workgroup_size,limits,workgroup_storage_size')
+ .desc(
+ `
+Tests calling createComputePipeline(Async) validation for overridable constants for workgroupStorageSize exceeds device limits.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ )
+ .fn(t => {
+ const { isAsync } = t.params;
+
+ const limits = t.device.limits;
+
+ const kVec4Size = 16;
+ const maxVec4Count = limits.maxComputeWorkgroupStorageSize / kVec4Size;
+ const kMat4Size = 64;
+ const maxMat4Count = limits.maxComputeWorkgroupStorageSize / kMat4Size;
+
+ const testFn = (vec4Count: number, mat4Count: number, _success: boolean) => {
+ const descriptor = {
+ layout: 'auto' as const,
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+ override a: u32;
+ override b: u32;
+ ${vec4Count <= 0 ? '' : 'var<workgroup> vec4_data: array<vec4<f32>, a>;'}
+ ${mat4Count <= 0 ? '' : 'var<workgroup> mat4_data: array<mat4x4<f32>, b>;'}
+ @compute @workgroup_size(1) fn main() {
+ ${vec4Count <= 0 ? '' : '_ = vec4_data[0];'}
+ ${mat4Count <= 0 ? '' : '_ = mat4_data[0];'}
+ }`,
+ }),
+ entryPoint: 'main',
+ constants: {
+ a: vec4Count,
+ b: mat4Count,
+ },
+ },
+ };
+
+ t.doCreateComputePipelineTest(isAsync, _success, descriptor);
+ };
+
+ testFn(1, 1, true);
+ testFn(maxVec4Count + 1, 0, false);
+ testFn(0, maxMat4Count + 1, false);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createBindGroup.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createBindGroup.spec.ts
new file mode 100644
index 0000000000..ddd0f8b39f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createBindGroup.spec.ts
@@ -0,0 +1,1110 @@
+export const description = `
+ createBindGroup validation tests.
+
+ TODO: Ensure sure tests cover all createBindGroup validation rules.
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { assert, makeValueTestVariant, unreachable } from '../../../common/util/util.js';
+import {
+ allBindingEntries,
+ bindingTypeInfo,
+ bufferBindingEntries,
+ bufferBindingTypeInfo,
+ kBindableResources,
+ kBufferBindingTypes,
+ kBufferUsages,
+ kCompareFunctions,
+ kSamplerBindingTypes,
+ kTextureUsages,
+ kTextureViewDimensions,
+ sampledAndStorageBindingEntries,
+ texBindingTypeInfo,
+} from '../../capability_info.js';
+import { GPUConst } from '../../constants.js';
+import { kAllTextureFormats, kTextureFormatInfo } from '../../format_info.js';
+import { kResourceStates } from '../../gpu_test.js';
+import { getTextureDimensionFromView } from '../../util/texture/base.js';
+
+import { ValidationTest } from './validation_test.js';
+
+function clone<T extends GPUTextureDescriptor>(descriptor: T): T {
+ return JSON.parse(JSON.stringify(descriptor));
+}
+
+export const g = makeTestGroup(ValidationTest);
+
+const kStorageTextureFormats = kAllTextureFormats.filter(f => kTextureFormatInfo[f].color?.storage);
+
+g.test('binding_count_mismatch')
+ .desc('Test that the number of entries must match the number of entries in the BindGroupLayout.')
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('layoutEntryCount', [1, 2, 3])
+ .combine('bindGroupEntryCount', [1, 2, 3])
+ )
+ .fn(t => {
+ const { layoutEntryCount, bindGroupEntryCount } = t.params;
+
+ const layoutEntries: Array<GPUBindGroupLayoutEntry> = [];
+ for (let i = 0; i < layoutEntryCount; ++i) {
+ layoutEntries.push({
+ binding: i,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: { type: 'storage' },
+ });
+ }
+ const bindGroupLayout = t.device.createBindGroupLayout({ entries: layoutEntries });
+
+ const entries: Array<GPUBindGroupEntry> = [];
+ for (let i = 0; i < bindGroupEntryCount; ++i) {
+ entries.push({
+ binding: i,
+ resource: { buffer: t.getStorageBuffer() },
+ });
+ }
+
+ const shouldError = layoutEntryCount !== bindGroupEntryCount;
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ entries,
+ layout: bindGroupLayout,
+ });
+ }, shouldError);
+ });
+
+g.test('binding_must_be_present_in_layout')
+ .desc(
+ 'Test that the binding slot for each entry matches a binding slot defined in the BindGroupLayout.'
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('layoutBinding', [0, 1, 2])
+ .combine('binding', [0, 1, 2])
+ )
+ .fn(t => {
+ const { layoutBinding, binding } = t.params;
+
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [
+ { binding: layoutBinding, visibility: GPUShaderStage.COMPUTE, buffer: { type: 'storage' } },
+ ],
+ });
+
+ const descriptor = {
+ entries: [{ binding, resource: { buffer: t.getStorageBuffer() } }],
+ layout: bindGroupLayout,
+ };
+
+ const shouldError = layoutBinding !== binding;
+ t.expectValidationError(() => {
+ t.device.createBindGroup(descriptor);
+ }, shouldError);
+ });
+
+g.test('binding_must_contain_resource_defined_in_layout')
+ .desc(
+ 'Test that only compatible resource types specified in the BindGroupLayout are allowed for each entry.'
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('resourceType', kBindableResources)
+ .combine('entry', allBindingEntries(false))
+ )
+ .fn(t => {
+ const { resourceType, entry } = t.params;
+ const info = bindingTypeInfo(entry);
+
+ const layout = t.device.createBindGroupLayout({
+ entries: [{ binding: 0, visibility: GPUShaderStage.COMPUTE, ...entry }],
+ });
+
+ const resource = t.getBindingResource(resourceType);
+
+ let resourceBindingIsCompatible;
+ switch (info.resource) {
+ // Either type of sampler may be bound to a filtering sampler binding.
+ case 'filtSamp':
+ resourceBindingIsCompatible = resourceType === 'filtSamp' || resourceType === 'nonFiltSamp';
+ break;
+ // But only non-filtering samplers can be used with non-filtering sampler bindings.
+ case 'nonFiltSamp':
+ resourceBindingIsCompatible = resourceType === 'nonFiltSamp';
+ break;
+ default:
+ resourceBindingIsCompatible = info.resource === resourceType;
+ break;
+ }
+ t.expectValidationError(() => {
+ t.device.createBindGroup({ layout, entries: [{ binding: 0, resource }] });
+ }, !resourceBindingIsCompatible);
+ });
+
+g.test('texture_binding_must_have_correct_usage')
+ .desc('Tests that texture bindings must have the correct usage.')
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('entry', sampledAndStorageBindingEntries(false))
+ .combine('usage', kTextureUsages)
+ .unless(({ entry, usage }) => {
+ const info = texBindingTypeInfo(entry);
+ // Can't create the texture for this (usage=STORAGE_BINDING and sampleCount=4), so skip.
+ return usage === GPUConst.TextureUsage.STORAGE_BINDING && info.resource === 'sampledTexMS';
+ })
+ )
+ .fn(t => {
+ const { entry, usage } = t.params;
+ const info = texBindingTypeInfo(entry);
+
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [{ binding: 0, visibility: GPUShaderStage.FRAGMENT, ...entry }],
+ });
+
+ // The `RENDER_ATTACHMENT` usage must be specified if sampleCount > 1 according to WebGPU SPEC.
+ const appliedUsage =
+ info.resource === 'sampledTexMS' ? usage | GPUConst.TextureUsage.RENDER_ATTACHMENT : usage;
+
+ const descriptor = {
+ size: { width: 16, height: 16, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm' as const,
+ usage: appliedUsage,
+ sampleCount: info.resource === 'sampledTexMS' ? 4 : 1,
+ };
+ const resource = t.device.createTexture(descriptor).createView();
+
+ const shouldError = (usage & info.usage) === 0;
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ entries: [{ binding: 0, resource }],
+ layout: bindGroupLayout,
+ });
+ }, shouldError);
+ });
+
+g.test('texture_must_have_correct_component_type')
+ .desc(
+ `
+ Tests that texture bindings must have a format that matches the sample type specified in the BindGroupLayout.
+ - Tests a compatible format for every sample type
+ - Tests an incompatible format for every sample type`
+ )
+ .params(u => u.combine('sampleType', ['float', 'sint', 'uint'] as const))
+ .fn(t => {
+ const { sampleType } = t.params;
+
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ texture: { sampleType },
+ },
+ ],
+ });
+
+ let format: GPUTextureFormat;
+ if (sampleType === 'float') {
+ format = 'r8unorm';
+ } else if (sampleType === 'sint') {
+ format = 'r8sint';
+ } else if (sampleType === 'uint') {
+ format = 'r8uint';
+ } else {
+ unreachable('Unexpected texture component type');
+ }
+
+ const goodDescriptor = {
+ size: { width: 16, height: 16, depthOrArrayLayers: 1 },
+ format,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ };
+
+ // Control case
+ t.device.createBindGroup({
+ entries: [
+ {
+ binding: 0,
+ resource: t.device.createTexture(goodDescriptor).createView(),
+ },
+ ],
+ layout: bindGroupLayout,
+ });
+
+ function* mismatchedTextureFormats(): Iterable<GPUTextureFormat> {
+ if (sampleType !== 'float') {
+ yield 'r8unorm';
+ }
+ if (sampleType !== 'sint') {
+ yield 'r8sint';
+ }
+ if (sampleType !== 'uint') {
+ yield 'r8uint';
+ }
+ }
+
+ // Mismatched texture binding formats are not valid.
+ for (const mismatchedTextureFormat of mismatchedTextureFormats()) {
+ const badDescriptor: GPUTextureDescriptor = clone(goodDescriptor);
+ badDescriptor.format = mismatchedTextureFormat;
+
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ entries: [{ binding: 0, resource: t.device.createTexture(badDescriptor).createView() }],
+ layout: bindGroupLayout,
+ });
+ });
+ }
+ });
+
+g.test('texture_must_have_correct_dimension')
+ .desc(
+ `
+ Test that bound texture views match the dimensions supplied in the BindGroupLayout
+ - Test for every GPUTextureViewDimension
+ - Test for both TEXTURE_BINDING and STORAGE_BINDING.
+ `
+ )
+ .params(u =>
+ u
+ .combine('usage', [
+ GPUConst.TextureUsage.TEXTURE_BINDING,
+ GPUConst.TextureUsage.STORAGE_BINDING,
+ ])
+ .combine('viewDimension', kTextureViewDimensions)
+ .unless(
+ p =>
+ p.usage === GPUConst.TextureUsage.STORAGE_BINDING &&
+ (p.viewDimension === 'cube' || p.viewDimension === 'cube-array')
+ )
+ .beginSubcases()
+ .combine('dimension', kTextureViewDimensions)
+ )
+ .fn(t => {
+ const { usage, viewDimension, dimension } = t.params;
+
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [
+ usage === GPUTextureUsage.TEXTURE_BINDING
+ ? {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ texture: { viewDimension },
+ }
+ : {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ storageTexture: { access: 'write-only', format: 'rgba8unorm', viewDimension },
+ },
+ ],
+ });
+
+ let height = 16;
+ let depthOrArrayLayers = 6;
+ if (dimension === '1d') {
+ height = 1;
+ depthOrArrayLayers = 1;
+ }
+
+ const texture = t.device.createTexture({
+ size: { width: 16, height, depthOrArrayLayers },
+ format: 'rgba8unorm' as const,
+ usage,
+ dimension: getTextureDimensionFromView(dimension),
+ });
+
+ t.skipIfTextureViewDimensionNotSupported(viewDimension, dimension);
+
+ const shouldError = viewDimension !== dimension;
+ const textureView = texture.createView({ dimension });
+
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ entries: [{ binding: 0, resource: textureView }],
+ layout: bindGroupLayout,
+ });
+ }, shouldError);
+ });
+
+g.test('multisampled_validation')
+ .desc(
+ `
+ Test that the sample count of the texture is greater than 1 if the BindGroup entry's
+ multisampled is true. Otherwise, the texture's sampleCount should be 1.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('multisampled', [true, false])
+ .beginSubcases()
+ .combine('sampleCount', [1, 4])
+ )
+ .fn(t => {
+ const { multisampled, sampleCount } = t.params;
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ texture: { multisampled, sampleType: multisampled ? 'unfilterable-float' : undefined },
+ },
+ ],
+ });
+
+ const texture = t.device.createTexture({
+ size: { width: 16, height: 16, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm' as const,
+ usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.RENDER_ATTACHMENT,
+ sampleCount,
+ });
+
+ const isValid = (!multisampled && sampleCount === 1) || (multisampled && sampleCount > 1);
+
+ const textureView = texture.createView();
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ entries: [{ binding: 0, resource: textureView }],
+ layout: bindGroupLayout,
+ });
+ }, !isValid);
+ });
+
+g.test('buffer_offset_and_size_for_bind_groups_match')
+ .desc(
+ `
+ Test that a buffer binding's [offset, offset + size) must be contained in the BindGroup entry's buffer.
+ - Test for various offsets and sizes`
+ )
+ .paramsSubcasesOnly([
+ { offset: 0, size: 512, _success: true }, // offset 0 is valid
+ { offset: 256, size: 256, _success: true }, // offset 256 (aligned) is valid
+
+ // Touching the end of the buffer
+ { offset: 0, size: 1024, _success: true },
+ { offset: 0, size: undefined, _success: true },
+ { offset: 256 * 3, size: 256, _success: true },
+ { offset: 256 * 3, size: undefined, _success: true },
+
+ // Zero-sized bindings
+ { offset: 0, size: 0, _success: false },
+ { offset: 256, size: 0, _success: false },
+ { offset: 1024, size: 0, _success: false },
+ { offset: 1024, size: undefined, _success: false },
+
+ // Unaligned buffer offset is invalid
+ { offset: 1, size: 256, _success: false },
+ { offset: 1, size: undefined, _success: false },
+ { offset: 128, size: 256, _success: false },
+ { offset: 255, size: 256, _success: false },
+
+ // Out-of-bounds
+ { offset: 256 * 5, size: 0, _success: false }, // offset is OOB
+ { offset: 0, size: 256 * 5, _success: false }, // size is OOB
+ { offset: 1024, size: 1, _success: false }, // offset+size is OOB
+ ])
+ .fn(t => {
+ const { offset, size, _success } = t.params;
+
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [{ binding: 0, visibility: GPUShaderStage.COMPUTE, buffer: { type: 'storage' } }],
+ });
+
+ const buffer = t.device.createBuffer({
+ size: 1024,
+ usage: GPUBufferUsage.STORAGE,
+ });
+
+ const descriptor = {
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer, offset, size },
+ },
+ ],
+ layout: bindGroupLayout,
+ };
+
+ if (_success) {
+ // Control case
+ t.device.createBindGroup(descriptor);
+ } else {
+ // Buffer offset and/or size don't match in bind groups.
+ t.expectValidationError(() => {
+ t.device.createBindGroup(descriptor);
+ });
+ }
+ });
+
+g.test('minBindingSize')
+ .desc('Tests that minBindingSize is correctly enforced.')
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('minBindingSize', [undefined, 4, 8, 256])
+ .expand('size', ({ minBindingSize }) =>
+ minBindingSize !== undefined
+ ? [minBindingSize - 4, minBindingSize, minBindingSize + 4]
+ : [4, 256]
+ )
+ )
+ .fn(t => {
+ const { size, minBindingSize } = t.params;
+
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ buffer: {
+ type: 'storage',
+ minBindingSize,
+ },
+ },
+ ],
+ });
+
+ const storageBuffer = t.device.createBuffer({
+ size,
+ usage: GPUBufferUsage.STORAGE,
+ });
+
+ t.expectValidationError(
+ () => {
+ t.device.createBindGroup({
+ layout: bindGroupLayout,
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer: storageBuffer },
+ },
+ ],
+ });
+ },
+ minBindingSize !== undefined && size < minBindingSize
+ );
+ });
+
+g.test('buffer,resource_state')
+ .desc('Test bind group creation with various buffer resource states')
+ .paramsSubcasesOnly(u =>
+ u.combine('state', kResourceStates).combine('entry', bufferBindingEntries(true))
+ )
+ .fn(t => {
+ const { state, entry } = t.params;
+
+ assert(entry.buffer !== undefined);
+ const info = bufferBindingTypeInfo(entry.buffer);
+
+ const bgl = t.device.createBindGroupLayout({
+ entries: [
+ {
+ ...entry,
+ binding: 0,
+ visibility: info.validStages,
+ },
+ ],
+ });
+
+ const buffer = t.createBufferWithState(state, {
+ usage: info.usage,
+ size: 4,
+ });
+
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ layout: bgl,
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer,
+ },
+ },
+ ],
+ });
+ }, state === 'invalid');
+ });
+
+g.test('texture,resource_state')
+ .desc('Test bind group creation with various texture resource states')
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('state', kResourceStates)
+ .combine('entry', sampledAndStorageBindingEntries(true, 'rgba8unorm'))
+ )
+ .fn(t => {
+ const { state, entry } = t.params;
+ const info = texBindingTypeInfo(entry);
+
+ const bgl = t.device.createBindGroupLayout({
+ entries: [
+ {
+ ...entry,
+ binding: 0,
+ visibility: info.validStages,
+ },
+ ],
+ });
+
+ // The `RENDER_ATTACHMENT` usage must be specified if sampleCount > 1 according to WebGPU SPEC.
+ const usage = entry.texture?.multisampled
+ ? info.usage | GPUConst.TextureUsage.RENDER_ATTACHMENT
+ : info.usage;
+ const texture = t.createTextureWithState(state, {
+ usage,
+ size: [1, 1],
+ format: 'rgba8unorm',
+ sampleCount: entry.texture?.multisampled ? 4 : 1,
+ });
+
+ let textureView: GPUTextureView;
+ t.expectValidationError(() => {
+ textureView = texture.createView();
+ }, state === 'invalid');
+
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ layout: bgl,
+ entries: [
+ {
+ binding: 0,
+ resource: textureView,
+ },
+ ],
+ });
+ }, state === 'invalid');
+ });
+
+g.test('bind_group_layout,device_mismatch')
+ .desc(
+ 'Tests createBindGroup cannot be called with a bind group layout created from another device'
+ )
+ .paramsSubcasesOnly(u => u.combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const mismatched = t.params.mismatched;
+
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const bgl = sourceDevice.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUConst.ShaderStage.VERTEX,
+ buffer: {},
+ },
+ ],
+ });
+
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ layout: bgl,
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer: t.getUniformBuffer() },
+ },
+ ],
+ });
+ }, mismatched);
+ });
+
+g.test('binding_resources,device_mismatch')
+ .desc(
+ `
+ Tests createBindGroup cannot be called with various resources created from another device
+ Test with two resources to make sure all resources can be validated:
+ - resource0 and resource1 from same device
+ - resource0 and resource1 from different device
+
+ TODO: test GPUExternalTexture as a resource
+ `
+ )
+ .params(u =>
+ u
+ .combine('entry', [
+ { buffer: { type: 'storage' } },
+ { sampler: { type: 'filtering' } },
+ { texture: { multisampled: false } },
+ { storageTexture: { access: 'write-only', format: 'rgba8unorm' } },
+ ] as const)
+ .beginSubcases()
+ .combineWithParams([
+ { resource0Mismatched: false, resource1Mismatched: false }, //control case
+ { resource0Mismatched: true, resource1Mismatched: false },
+ { resource0Mismatched: false, resource1Mismatched: true },
+ ])
+ )
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { entry, resource0Mismatched, resource1Mismatched } = t.params;
+
+ const info = bindingTypeInfo(entry);
+
+ const resource0 = resource0Mismatched
+ ? t.getDeviceMismatchedBindingResource(info.resource)
+ : t.getBindingResource(info.resource);
+ const resource1 = resource1Mismatched
+ ? t.getDeviceMismatchedBindingResource(info.resource)
+ : t.getBindingResource(info.resource);
+
+ const bgl = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: info.validStages,
+ ...entry,
+ },
+ {
+ binding: 1,
+ visibility: info.validStages,
+ ...entry,
+ },
+ ],
+ });
+
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ layout: bgl,
+ entries: [
+ {
+ binding: 0,
+ resource: resource0,
+ },
+ {
+ binding: 1,
+ resource: resource1,
+ },
+ ],
+ });
+ }, resource0Mismatched || resource1Mismatched);
+ });
+
+g.test('storage_texture,usage')
+ .desc(
+ `
+ Test that the texture usage contains STORAGE_BINDING if the BindGroup entry defines
+ storageTexture.
+ `
+ )
+ .params(u =>
+ u //
+ // If usage0 and usage1 are the same, the usage being test is a single usage. Otherwise, it's
+ // a combined usage.
+ .combine('usage0', kTextureUsages)
+ .combine('usage1', kTextureUsages)
+ )
+ .fn(t => {
+ const { usage0, usage1 } = t.params;
+
+ const usage = usage0 | usage1;
+
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ storageTexture: { access: 'write-only', format: 'rgba8unorm' },
+ },
+ ],
+ });
+
+ const texture = t.device.createTexture({
+ size: { width: 16, height: 16, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm' as const,
+ usage,
+ });
+
+ const isValid = GPUTextureUsage.STORAGE_BINDING & usage;
+
+ const textureView = texture.createView();
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ entries: [{ binding: 0, resource: textureView }],
+ layout: bindGroupLayout,
+ });
+ }, !isValid);
+ });
+
+g.test('storage_texture,mip_level_count')
+ .desc(
+ `
+ Test that the mip level count of the resource of the BindGroup entry as a descriptor is 1 if the
+ BindGroup entry defines storageTexture. If the mip level count is not 1, a validation error
+ should be generated.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('baseMipLevel', [1, 2])
+ .combine('mipLevelCount', [1, 2])
+ )
+ .fn(t => {
+ const { baseMipLevel, mipLevelCount } = t.params;
+
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ storageTexture: { access: 'write-only', format: 'rgba8unorm' },
+ },
+ ],
+ });
+
+ const MIP_LEVEL_COUNT = 4;
+ const texture = t.device.createTexture({
+ size: { width: 16, height: 16, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm' as const,
+ usage: GPUTextureUsage.STORAGE_BINDING,
+ mipLevelCount: MIP_LEVEL_COUNT,
+ });
+
+ const textureView = texture.createView({ baseMipLevel, mipLevelCount });
+
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ entries: [{ binding: 0, resource: textureView }],
+ layout: bindGroupLayout,
+ });
+ }, mipLevelCount !== 1);
+ });
+
+g.test('storage_texture,format')
+ .desc(
+ `
+ Test that the format of the storage texture is equal to resource's descriptor format if the
+ BindGroup entry defines storageTexture.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('storageTextureFormat', kStorageTextureFormats)
+ .combine('resourceFormat', kStorageTextureFormats)
+ )
+ .fn(t => {
+ const { storageTextureFormat, resourceFormat } = t.params;
+
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ storageTexture: { access: 'write-only', format: storageTextureFormat },
+ },
+ ],
+ });
+
+ const texture = t.device.createTexture({
+ size: { width: 16, height: 16, depthOrArrayLayers: 1 },
+ format: resourceFormat,
+ usage: GPUTextureUsage.STORAGE_BINDING,
+ });
+
+ const isValid = storageTextureFormat === resourceFormat;
+ const textureView = texture.createView({ format: resourceFormat });
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ entries: [{ binding: 0, resource: textureView }],
+ layout: bindGroupLayout,
+ });
+ }, !isValid);
+ });
+
+g.test('buffer,usage')
+ .desc(
+ `
+ Test that the buffer usage contains 'UNIFORM' if the BindGroup entry defines buffer and it's
+ type is 'uniform', and the buffer usage contains 'STORAGE' if the BindGroup entry's buffer type
+ is 'storage'|read-only-storage'.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('type', kBufferBindingTypes)
+ // If usage0 and usage1 are the same, the usage being test is a single usage. Otherwise, it's
+ // a combined usage.
+ .beginSubcases()
+ .combine('usage0', kBufferUsages)
+ .combine('usage1', kBufferUsages)
+ .unless(
+ ({ usage0, usage1 }) =>
+ ((usage0 | usage1) & (GPUConst.BufferUsage.MAP_READ | GPUConst.BufferUsage.MAP_WRITE)) !==
+ 0
+ )
+ )
+ .fn(t => {
+ const { type, usage0, usage1 } = t.params;
+
+ const usage = usage0 | usage1;
+
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: { type },
+ },
+ ],
+ });
+
+ const buffer = t.device.createBuffer({
+ size: 4,
+ usage,
+ });
+
+ let isValid = false;
+ if (type === 'uniform') {
+ isValid = GPUBufferUsage.UNIFORM & usage ? true : false;
+ } else if (type === 'storage' || type === 'read-only-storage') {
+ isValid = GPUBufferUsage.STORAGE & usage ? true : false;
+ }
+
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ entries: [{ binding: 0, resource: { buffer } }],
+ layout: bindGroupLayout,
+ });
+ }, !isValid);
+ });
+
+g.test('buffer,resource_offset')
+ .desc(
+ `
+ Test that the resource.offset of the BindGroup entry is a multiple of limits.
+ 'minUniformBufferOffsetAlignment|minStorageBufferOffsetAlignment' if the BindGroup entry defines
+ buffer and the buffer type is 'uniform|storage|read-only-storage'.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('type', kBufferBindingTypes)
+ .beginSubcases()
+ .combine('offsetAddMult', [
+ { add: 0, mult: 0 },
+ { add: 0, mult: 0.5 },
+ { add: 0, mult: 1.5 },
+ { add: 2, mult: 0 },
+ ])
+ )
+ .fn(t => {
+ const { type, offsetAddMult } = t.params;
+ const minAlignment =
+ t.device.limits[
+ type === 'uniform' ? 'minUniformBufferOffsetAlignment' : 'minStorageBufferOffsetAlignment'
+ ];
+ const offset = makeValueTestVariant(minAlignment, offsetAddMult);
+
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: { type },
+ },
+ ],
+ });
+
+ const usage = type === 'uniform' ? GPUBufferUsage.UNIFORM : GPUBufferUsage.STORAGE;
+ const isValid = offset % minAlignment === 0;
+
+ const buffer = t.device.createBuffer({
+ size: 1024,
+ usage,
+ });
+
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ entries: [{ binding: 0, resource: { buffer, offset } }],
+ layout: bindGroupLayout,
+ });
+ }, !isValid);
+ });
+
+g.test('buffer,resource_binding_size')
+ .desc(
+ `
+ Test that the buffer binding size of the BindGroup entry is equal to or less than limits.
+ 'maxUniformBufferBindingSize|maxStorageBufferBindingSize' if the BindGroup entry defines
+ buffer and the buffer type is 'uniform|storage|read-only-storage'.
+ `
+ )
+ .params(u =>
+ u
+ .combine('type', kBufferBindingTypes)
+ .beginSubcases()
+ // Test a size of 1 (for uniform buffer) or 4 (for storage and read-only storage buffer)
+ // then values just within and just above the limit.
+ .combine('bindingSize', [
+ { base: 1, limit: 0 },
+ { base: 0, limit: 1 },
+ { base: 1, limit: 1 },
+ ])
+ )
+ .fn(t => {
+ const {
+ type,
+ bindingSize: { base, limit },
+ } = t.params;
+ const mult = type === 'uniform' ? 1 : 4;
+ const maxBindingSize =
+ t.device.limits[
+ type === 'uniform' ? 'maxUniformBufferBindingSize' : 'maxStorageBufferBindingSize'
+ ];
+ const bindingSize = base * mult + maxBindingSize * limit;
+
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: { type },
+ },
+ ],
+ });
+
+ const usage = type === 'uniform' ? GPUBufferUsage.UNIFORM : GPUBufferUsage.STORAGE;
+ const isValid = bindingSize <= maxBindingSize;
+
+ // MAINTENANCE_TODO: Allocating the max size seems likely to fail. Refactor test.
+ const buffer = t.device.createBuffer({
+ size: maxBindingSize,
+ usage,
+ });
+
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ entries: [{ binding: 0, resource: { buffer, size: bindingSize } }],
+ layout: bindGroupLayout,
+ });
+ }, !isValid);
+ });
+
+g.test('buffer,effective_buffer_binding_size')
+ .desc(
+ `
+ Test that the effective buffer binding size of the BindGroup entry must be a multiple of 4 if the
+ buffer type is 'storage|read-only-storage', while there is no such restriction on uniform buffers.
+`
+ )
+ .params(u =>
+ u
+ .combine('type', kBufferBindingTypes)
+ .beginSubcases()
+ .combine('offsetMult', [0, 1])
+ .combine('bufferSizeAddition', [8, 10])
+ .combine('bindingSize', [undefined, 2, 4, 6])
+ )
+ .fn(t => {
+ const { type, offsetMult, bufferSizeAddition, bindingSize } = t.params;
+ const minAlignment =
+ t.device.limits[
+ type === 'uniform' ? 'minUniformBufferOffsetAlignment' : 'minStorageBufferOffsetAlignment'
+ ];
+ const offset = minAlignment * offsetMult;
+ const bufferSize = minAlignment + bufferSizeAddition;
+
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: { type },
+ },
+ ],
+ });
+
+ const effectiveBindingSize = bindingSize ?? bufferSize - offset;
+ let usage, isValid;
+ if (type === 'uniform') {
+ usage = GPUBufferUsage.UNIFORM;
+ isValid = true;
+ } else {
+ usage = GPUBufferUsage.STORAGE;
+ isValid = effectiveBindingSize % 4 === 0;
+ }
+
+ const buffer = t.device.createBuffer({
+ size: bufferSize,
+ usage,
+ });
+
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ entries: [{ binding: 0, resource: { buffer, offset, size: bindingSize } }],
+ layout: bindGroupLayout,
+ });
+ }, !isValid);
+ });
+
+g.test('sampler,device_mismatch')
+ .desc(`Tests createBindGroup cannot be called with a sampler created from another device.`)
+ .paramsSubcasesOnly(u => u.combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { mismatched } = t.params;
+
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ sampler: { type: 'filtering' as const },
+ },
+ ],
+ });
+
+ const sampler = sourceDevice.createSampler();
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ entries: [{ binding: 0, resource: sampler }],
+ layout: bindGroupLayout,
+ });
+ }, mismatched);
+ });
+
+g.test('sampler,compare_function_with_binding_type')
+ .desc(
+ `
+ Test that the sampler of the BindGroup has a 'compareFunction' value if the sampler type of the
+ BindGroupLayout is 'comparison'. Other sampler types should not have 'compare' field in
+ the descriptor of the sampler.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('bgType', kSamplerBindingTypes)
+ .beginSubcases()
+ .combine('compareFunction', [undefined, ...kCompareFunctions])
+ )
+ .fn(t => {
+ const { bgType, compareFunction } = t.params;
+
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ sampler: { type: bgType },
+ },
+ ],
+ });
+
+ const isValid =
+ bgType === 'comparison' ? compareFunction !== undefined : compareFunction === undefined;
+
+ const sampler = t.device.createSampler({ compare: compareFunction });
+
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ entries: [{ binding: 0, resource: sampler }],
+ layout: bindGroupLayout,
+ });
+ }, !isValid);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createBindGroupLayout.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createBindGroupLayout.spec.ts
new file mode 100644
index 0000000000..a50247aa13
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createBindGroupLayout.spec.ts
@@ -0,0 +1,464 @@
+export const description = `
+createBindGroupLayout validation tests.
+
+TODO: make sure tests are complete.
+`;
+
+import { kUnitCaseParamsBuilder } from '../../../common/framework/params_builder.js';
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import {
+ kShaderStages,
+ kShaderStageCombinations,
+ kStorageTextureAccessValues,
+ kTextureSampleTypes,
+ kTextureViewDimensions,
+ allBindingEntries,
+ bindingTypeInfo,
+ bufferBindingTypeInfo,
+ kBufferBindingTypes,
+ BGLEntry,
+} from '../../capability_info.js';
+import { kAllTextureFormats, kTextureFormatInfo } from '../../format_info.js';
+
+import { ValidationTest } from './validation_test.js';
+
+function clone<T extends GPUBindGroupLayoutDescriptor>(descriptor: T): T {
+ return JSON.parse(JSON.stringify(descriptor));
+}
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('duplicate_bindings')
+ .desc('Test that uniqueness of binding numbers across entries is enforced.')
+ .paramsSubcasesOnly([
+ { bindings: [0, 1], _valid: true },
+ { bindings: [0, 0], _valid: false },
+ ])
+ .fn(t => {
+ const { bindings, _valid } = t.params;
+ const entries: Array<GPUBindGroupLayoutEntry> = [];
+
+ for (const binding of bindings) {
+ entries.push({
+ binding,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: { type: 'storage' as const },
+ });
+ }
+
+ t.expectValidationError(() => {
+ t.device.createBindGroupLayout({
+ entries,
+ });
+ }, !_valid);
+ });
+
+g.test('maximum_binding_limit')
+ .desc(
+ `
+ Test that a validation error is generated if the binding number exceeds the maximum binding limit.
+
+ TODO: Need to also test with higher limits enabled on the device, once we have a way to do that.
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u.combine('bindingVariant', [1, 4, 8, 256, 'default', 'default-minus-one'] as const)
+ )
+ .fn(t => {
+ const { bindingVariant } = t.params;
+ const entries: Array<GPUBindGroupLayoutEntry> = [];
+
+ const binding =
+ bindingVariant === 'default'
+ ? t.device.limits.maxBindingsPerBindGroup
+ : bindingVariant === 'default-minus-one'
+ ? t.device.limits.maxBindingsPerBindGroup - 1
+ : bindingVariant;
+
+ entries.push({
+ binding,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: { type: 'storage' as const },
+ });
+
+ const success = binding < t.device.limits.maxBindingsPerBindGroup;
+
+ t.expectValidationError(() => {
+ t.device.createBindGroupLayout({
+ entries,
+ });
+ }, !success);
+ });
+
+g.test('visibility')
+ .desc(
+ `
+ Test that only the appropriate combinations of visibilities are allowed for each resource type.
+ - Test each possible combination of shader stage visibilities.
+ - Test each type of bind group resource.`
+ )
+ .params(u =>
+ u
+ .combine('visibility', kShaderStageCombinations)
+ .beginSubcases()
+ .combine('entry', allBindingEntries(false))
+ )
+ .fn(t => {
+ const { visibility, entry } = t.params;
+ const info = bindingTypeInfo(entry);
+
+ const success = (visibility & ~info.validStages) === 0;
+
+ t.expectValidationError(() => {
+ t.device.createBindGroupLayout({
+ entries: [{ binding: 0, visibility, ...entry }],
+ });
+ }, !success);
+ });
+
+g.test('visibility,VERTEX_shader_stage_buffer_type')
+ .desc(
+ `
+ Test that a validation error is generated if the buffer type is 'storage' when the
+ visibility of the entry includes VERTEX.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('shaderStage', kShaderStageCombinations)
+ .beginSubcases()
+ .combine('type', kBufferBindingTypes)
+ )
+ .fn(t => {
+ const { shaderStage, type } = t.params;
+
+ const success = !(type === 'storage' && shaderStage & GPUShaderStage.VERTEX);
+
+ t.expectValidationError(() => {
+ t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: shaderStage,
+ buffer: { type },
+ },
+ ],
+ });
+ }, !success);
+ });
+
+g.test('visibility,VERTEX_shader_stage_storage_texture_access')
+ .desc(
+ `
+ Test that a validation error is generated if the access value is 'write-only' when the
+ visibility of the entry includes VERTEX.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('shaderStage', kShaderStageCombinations)
+ .beginSubcases()
+ .combine('access', [undefined, ...kStorageTextureAccessValues])
+ )
+ .fn(t => {
+ const { shaderStage, access } = t.params;
+
+ const success = !(
+ (access ?? 'write-only') === 'write-only' && shaderStage & GPUShaderStage.VERTEX
+ );
+
+ t.expectValidationError(() => {
+ t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: shaderStage,
+ storageTexture: { access, format: 'rgba8unorm' },
+ },
+ ],
+ });
+ }, !success);
+ });
+
+g.test('multisampled_validation')
+ .desc(
+ `
+ Test that multisampling is only allowed if view dimensions is "2d" and the sampleType is not
+ "float".
+ `
+ )
+ .params(u =>
+ u //
+ .combine('viewDimension', [undefined, ...kTextureViewDimensions])
+ .beginSubcases()
+ .combine('sampleType', [undefined, ...kTextureSampleTypes])
+ )
+ .fn(t => {
+ const { viewDimension, sampleType } = t.params;
+
+ const success =
+ (viewDimension === '2d' || viewDimension === undefined) &&
+ (sampleType ?? 'float') !== 'float';
+
+ t.expectValidationError(() => {
+ t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ texture: { multisampled: true, viewDimension, sampleType },
+ },
+ ],
+ });
+ }, !success);
+ });
+
+g.test('max_dynamic_buffers')
+ .desc(
+ `
+ Test that limits on the maximum number of dynamic buffers are enforced.
+ - Test creation of a bind group layout using the maximum number of dynamic buffers works.
+ - Test creation of a bind group layout using the maximum number of dynamic buffers + 1 fails.
+ - TODO(#230): Update to enforce per-stage and per-pipeline-layout limits on BGLs as well.`
+ )
+ .params(u =>
+ u
+ .combine('type', kBufferBindingTypes)
+ .beginSubcases()
+ .combine('extraDynamicBuffers', [0, 1])
+ .combine('staticBuffers', [0, 1])
+ )
+ .fn(t => {
+ const { type, extraDynamicBuffers, staticBuffers } = t.params;
+ const info = bufferBindingTypeInfo({ type });
+
+ const limitName = info.perPipelineLimitClass.maxDynamicLimit;
+ const bufferCount = limitName ? t.getDefaultLimit(limitName) : 0;
+ const dynamicBufferCount = bufferCount + extraDynamicBuffers;
+ const perStageLimit = t.getDefaultLimit(info.perStageLimitClass.maxLimit);
+
+ const entries = [];
+ for (let i = 0; i < dynamicBufferCount; i++) {
+ entries.push({
+ binding: i,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: { type, hasDynamicOffset: true },
+ });
+ }
+
+ for (let i = dynamicBufferCount; i < dynamicBufferCount + staticBuffers; i++) {
+ entries.push({
+ binding: i,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: { type, hasDynamicOffset: false },
+ });
+ }
+
+ const descriptor = {
+ entries,
+ };
+
+ t.expectValidationError(
+ () => {
+ t.device.createBindGroupLayout(descriptor);
+ },
+ extraDynamicBuffers > 0 || entries.length > perStageLimit
+ );
+ });
+
+/**
+ * One bind group layout will be filled with kPerStageBindingLimit[...] of the type |type|.
+ * For each item in the array returned here, a case will be generated which tests a pipeline
+ * layout with one extra bind group layout with one extra binding. That extra binding will have:
+ *
+ * - If extraTypeSame, any of the binding types which counts toward the same limit as |type|.
+ * (i.e. 'storage-buffer' <-> 'readonly-storage-buffer').
+ * - Otherwise, an arbitrary other type.
+ */
+function* pickExtraBindingTypesForPerStage(entry: BGLEntry, extraTypeSame: boolean) {
+ if (extraTypeSame) {
+ const info = bindingTypeInfo(entry);
+ for (const extra of allBindingEntries(false)) {
+ const extraInfo = bindingTypeInfo(extra);
+ if (info.perStageLimitClass.class === extraInfo.perStageLimitClass.class) {
+ yield extra;
+ }
+ }
+ } else {
+ yield entry.sampler ? { texture: {} } : { sampler: {} };
+ }
+}
+
+const kMaxResourcesCases = kUnitCaseParamsBuilder
+ .combine('maxedEntry', allBindingEntries(false))
+ .beginSubcases()
+ .combine('maxedVisibility', kShaderStages)
+ .filter(p => (bindingTypeInfo(p.maxedEntry).validStages & p.maxedVisibility) !== 0)
+ .expand('extraEntry', p => [
+ ...pickExtraBindingTypesForPerStage(p.maxedEntry, true),
+ ...pickExtraBindingTypesForPerStage(p.maxedEntry, false),
+ ])
+ .combine('extraVisibility', kShaderStages)
+ .filter(p => (bindingTypeInfo(p.extraEntry).validStages & p.extraVisibility) !== 0);
+
+// Should never fail unless limitInfo.maxBindingsPerBindGroup.default is exceeded, because the validation for
+// resources-of-type-per-stage is in pipeline layout creation.
+g.test('max_resources_per_stage,in_bind_group_layout')
+ .desc(
+ `
+ Test that the maximum number of bindings of a given type per-stage cannot be exceeded in a
+ single bind group layout.
+ - Test each binding type.
+ - Test that creation of a bind group layout using the maximum number of bindings works.
+ - Test that creation of a bind group layout using the maximum number of bindings + 1 fails.
+ - TODO(#230): Update to enforce per-stage and per-pipeline-layout limits on BGLs as well.`
+ )
+ .params(kMaxResourcesCases)
+ .fn(t => {
+ const { maxedEntry, extraEntry, maxedVisibility, extraVisibility } = t.params;
+ const maxedTypeInfo = bindingTypeInfo(maxedEntry);
+ const maxedCount = t.getDefaultLimit(maxedTypeInfo.perStageLimitClass.maxLimit);
+ const extraTypeInfo = bindingTypeInfo(extraEntry);
+
+ const maxResourceBindings: GPUBindGroupLayoutEntry[] = [];
+ for (let i = 0; i < maxedCount; i++) {
+ maxResourceBindings.push({
+ binding: i,
+ visibility: maxedVisibility,
+ ...maxedEntry,
+ });
+ }
+
+ const goodDescriptor = { entries: maxResourceBindings };
+
+ // Control
+ t.device.createBindGroupLayout(goodDescriptor);
+
+ // Add an entry counting towards the same limit. It should produce a validation error.
+ const newDescriptor = clone(goodDescriptor);
+ newDescriptor.entries.push({
+ binding: maxedCount,
+ visibility: extraVisibility,
+ ...extraEntry,
+ });
+
+ const newBindingCountsTowardSamePerStageLimit =
+ (maxedVisibility & extraVisibility) !== 0 &&
+ maxedTypeInfo.perStageLimitClass.class === extraTypeInfo.perStageLimitClass.class;
+
+ t.expectValidationError(() => {
+ t.device.createBindGroupLayout(newDescriptor);
+ }, newBindingCountsTowardSamePerStageLimit);
+ });
+
+// One pipeline layout can have a maximum number of each type of binding *per stage* (which is
+// different for each type). Test that the max works, then add one more binding of same-or-different
+// type and same-or-different visibility.
+g.test('max_resources_per_stage,in_pipeline_layout')
+ .desc(
+ `
+ Test that the maximum number of bindings of a given type per-stage cannot be exceeded across
+ multiple bind group layouts when creating a pipeline layout.
+ - Test each binding type.
+ - Test that creation of a pipeline using the maximum number of bindings works.
+ - Test that creation of a pipeline using the maximum number of bindings + 1 fails.
+ `
+ )
+ .params(kMaxResourcesCases)
+ .fn(t => {
+ const { maxedEntry, extraEntry, maxedVisibility, extraVisibility } = t.params;
+ const maxedTypeInfo = bindingTypeInfo(maxedEntry);
+ const maxedCount = t.getDefaultLimit(maxedTypeInfo.perStageLimitClass.maxLimit);
+ const extraTypeInfo = bindingTypeInfo(extraEntry);
+
+ const maxResourceBindings: GPUBindGroupLayoutEntry[] = [];
+ for (let i = 0; i < maxedCount; i++) {
+ maxResourceBindings.push({
+ binding: i,
+ visibility: maxedVisibility,
+ ...maxedEntry,
+ });
+ }
+
+ const goodLayout = t.device.createBindGroupLayout({ entries: maxResourceBindings });
+
+ // Control
+ t.device.createPipelineLayout({ bindGroupLayouts: [goodLayout] });
+
+ const extraLayout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: extraVisibility,
+ ...extraEntry,
+ },
+ ],
+ });
+
+ // Some binding types use the same limit, e.g. 'storage-buffer' and 'readonly-storage-buffer'.
+ const newBindingCountsTowardSamePerStageLimit =
+ (maxedVisibility & extraVisibility) !== 0 &&
+ maxedTypeInfo.perStageLimitClass.class === extraTypeInfo.perStageLimitClass.class;
+
+ t.expectValidationError(() => {
+ t.device.createPipelineLayout({ bindGroupLayouts: [goodLayout, extraLayout] });
+ }, newBindingCountsTowardSamePerStageLimit);
+ });
+
+g.test('storage_texture,layout_dimension')
+ .desc(
+ `
+ Test that viewDimension is not cube or cube-array if storageTextureLayout is not undefined.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('viewDimension', [undefined, ...kTextureViewDimensions])
+ )
+ .fn(t => {
+ const { viewDimension } = t.params;
+
+ const success = viewDimension !== 'cube' && viewDimension !== `cube-array`;
+
+ t.expectValidationError(() => {
+ t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ storageTexture: { format: 'rgba8unorm', viewDimension },
+ },
+ ],
+ });
+ }, !success);
+ });
+
+g.test('storage_texture,formats')
+ .desc(
+ `
+ Test that a validation error is generated if the format doesn't support the storage usage.
+ `
+ )
+ .params(u => u.combine('format', kAllTextureFormats))
+ .beforeAllSubcases(t => {
+ t.selectDeviceForTextureFormatOrSkipTestCase(t.params.format);
+ })
+ .fn(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ t.expectValidationError(
+ () => {
+ t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ storageTexture: { format },
+ },
+ ],
+ });
+ },
+ !info.color?.storage
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createPipelineLayout.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createPipelineLayout.spec.ts
new file mode 100644
index 0000000000..00e64cac74
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createPipelineLayout.spec.ts
@@ -0,0 +1,164 @@
+export const description = `
+createPipelineLayout validation tests.
+
+TODO: review existing tests, write descriptions, and make sure tests are complete.
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { bufferBindingTypeInfo, kBufferBindingTypes } from '../../capability_info.js';
+
+import { ValidationTest } from './validation_test.js';
+
+function clone<T extends GPUBindGroupLayoutDescriptor>(descriptor: T): T {
+ return JSON.parse(JSON.stringify(descriptor));
+}
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('number_of_dynamic_buffers_exceeds_the_maximum_value')
+ .desc(
+ `
+ Test that creating a pipeline layout fails with a validation error if the number of dynamic
+ buffers exceeds the maximum value in the pipeline layout.
+ - Test that creation of a pipeline using the maximum number of dynamic buffers added a dynamic
+ buffer fails.
+
+ TODO(#230): Update to enforce per-stage and per-pipeline-layout limits on BGLs as well.
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('visibility', [0, 2, 4, 6])
+ .combine('type', kBufferBindingTypes)
+ )
+ .fn(t => {
+ const { type, visibility } = t.params;
+ const info = bufferBindingTypeInfo({ type });
+ const { maxDynamicLimit } = info.perPipelineLimitClass;
+ const perStageLimit = t.getDefaultLimit(info.perStageLimitClass.maxLimit);
+ const maxDynamic = Math.min(
+ maxDynamicLimit ? t.getDefaultLimit(maxDynamicLimit) : 0,
+ perStageLimit
+ );
+
+ const maxDynamicBufferBindings: GPUBindGroupLayoutEntry[] = [];
+ for (let binding = 0; binding < maxDynamic; binding++) {
+ maxDynamicBufferBindings.push({
+ binding,
+ visibility,
+ buffer: { type, hasDynamicOffset: true },
+ });
+ }
+
+ const maxDynamicBufferBindGroupLayout = t.device.createBindGroupLayout({
+ entries: maxDynamicBufferBindings,
+ });
+
+ const goodDescriptor = {
+ entries: [{ binding: 0, visibility, buffer: { type, hasDynamicOffset: false } }],
+ };
+
+ if (perStageLimit > maxDynamic) {
+ const goodPipelineLayoutDescriptor = {
+ bindGroupLayouts: [
+ maxDynamicBufferBindGroupLayout,
+ t.device.createBindGroupLayout(goodDescriptor),
+ ],
+ };
+
+ // Control case
+ t.device.createPipelineLayout(goodPipelineLayoutDescriptor);
+ }
+
+ // Check dynamic buffers exceed maximum in pipeline layout.
+ const badDescriptor = clone(goodDescriptor);
+ badDescriptor.entries[0].buffer.hasDynamicOffset = true;
+
+ const badPipelineLayoutDescriptor = {
+ bindGroupLayouts: [
+ maxDynamicBufferBindGroupLayout,
+ t.device.createBindGroupLayout(badDescriptor),
+ ],
+ };
+
+ t.expectValidationError(() => {
+ t.device.createPipelineLayout(badPipelineLayoutDescriptor);
+ });
+ });
+
+g.test('number_of_bind_group_layouts_exceeds_the_maximum_value')
+ .desc(
+ `
+ Test that creating a pipeline layout fails with a validation error if the number of bind group
+ layouts exceeds the maximum value in the pipeline layout.
+ - Test that creation of a pipeline using the maximum number of bind groups added a bind group
+ fails.
+ `
+ )
+ .fn(t => {
+ const bindGroupLayoutDescriptor: GPUBindGroupLayoutDescriptor = {
+ entries: [],
+ };
+
+ // 4 is the maximum number of bind group layouts.
+ const maxBindGroupLayouts = [1, 2, 3, 4].map(() =>
+ t.device.createBindGroupLayout(bindGroupLayoutDescriptor)
+ );
+
+ const goodPipelineLayoutDescriptor = {
+ bindGroupLayouts: maxBindGroupLayouts,
+ };
+
+ // Control case
+ t.device.createPipelineLayout(goodPipelineLayoutDescriptor);
+
+ // Check bind group layouts exceed maximum in pipeline layout.
+ const badPipelineLayoutDescriptor = {
+ bindGroupLayouts: [
+ ...maxBindGroupLayouts,
+ t.device.createBindGroupLayout(bindGroupLayoutDescriptor),
+ ],
+ };
+
+ t.expectValidationError(() => {
+ t.device.createPipelineLayout(badPipelineLayoutDescriptor);
+ });
+ });
+
+g.test('bind_group_layouts,device_mismatch')
+ .desc(
+ `
+ Tests createPipelineLayout cannot be called with bind group layouts created from another device
+ Test with two layouts to make sure all layouts can be validated:
+ - layout0 and layout1 from same device
+ - layout0 and layout1 from different device
+ `
+ )
+ .paramsSubcasesOnly([
+ { layout0Mismatched: false, layout1Mismatched: false }, // control case
+ { layout0Mismatched: true, layout1Mismatched: false },
+ { layout0Mismatched: false, layout1Mismatched: true },
+ ])
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { layout0Mismatched, layout1Mismatched } = t.params;
+
+ const mismatched = layout0Mismatched || layout1Mismatched;
+
+ const bglDescriptor: GPUBindGroupLayoutDescriptor = {
+ entries: [],
+ };
+
+ const layout0 = layout0Mismatched
+ ? t.mismatchedDevice.createBindGroupLayout(bglDescriptor)
+ : t.device.createBindGroupLayout(bglDescriptor);
+ const layout1 = layout1Mismatched
+ ? t.mismatchedDevice.createBindGroupLayout(bglDescriptor)
+ : t.device.createBindGroupLayout(bglDescriptor);
+
+ t.expectValidationError(() => {
+ t.device.createPipelineLayout({ bindGroupLayouts: [layout0, layout1] });
+ }, mismatched);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createSampler.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createSampler.spec.ts
new file mode 100644
index 0000000000..7a7f050ba9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createSampler.spec.ts
@@ -0,0 +1,68 @@
+export const description = `
+createSampler validation tests.
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+
+import { ValidationTest } from './validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('lodMinAndMaxClamp')
+ .desc('test different combinations of min and max clamp values')
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('lodMinClamp', [-4e-30, -1, 0, 0.5, 1, 10, 4e30])
+ .combine('lodMaxClamp', [-4e-30, -1, 0, 0.5, 1, 10, 4e30])
+ )
+ .fn(t => {
+ const shouldError =
+ t.params.lodMinClamp > t.params.lodMaxClamp ||
+ t.params.lodMinClamp < 0 ||
+ t.params.lodMaxClamp < 0;
+ t.expectValidationError(() => {
+ t.device.createSampler({
+ lodMinClamp: t.params.lodMinClamp,
+ lodMaxClamp: t.params.lodMaxClamp,
+ });
+ }, shouldError);
+ });
+
+g.test('maxAnisotropy')
+ .desc('test different maxAnisotropy values and combinations with min/mag/mipmapFilter')
+ .params(u =>
+ u //
+ .beginSubcases()
+ .combineWithParams([
+ ...u.combine('maxAnisotropy', [-1, undefined, 0, 1, 2, 4, 7, 16, 32, 33, 1024]),
+ { minFilter: 'nearest' as const },
+ { magFilter: 'nearest' as const },
+ { mipmapFilter: 'nearest' as const },
+ ])
+ )
+ .fn(t => {
+ const {
+ maxAnisotropy = 1,
+ minFilter = 'linear',
+ magFilter = 'linear',
+ mipmapFilter = 'linear',
+ } = t.params as {
+ maxAnisotropy?: number;
+ minFilter?: GPUFilterMode;
+ magFilter?: GPUFilterMode;
+ mipmapFilter?: GPUFilterMode;
+ };
+
+ const shouldError =
+ maxAnisotropy < 1 ||
+ (maxAnisotropy > 1 &&
+ !(minFilter === 'linear' && magFilter === 'linear' && mipmapFilter === 'linear'));
+ t.expectValidationError(() => {
+ t.device.createSampler({
+ minFilter,
+ magFilter,
+ mipmapFilter,
+ maxAnisotropy,
+ });
+ }, shouldError);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createTexture.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createTexture.spec.ts
new file mode 100644
index 0000000000..a9fe352b74
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createTexture.spec.ts
@@ -0,0 +1,1130 @@
+export const description = `createTexture validation tests.`;
+
+import { SkipTestCase } from '../../../common/framework/fixture.js';
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { assert, makeValueTestVariant } from '../../../common/util/util.js';
+import { kTextureDimensions, kTextureUsages } from '../../capability_info.js';
+import { GPUConst } from '../../constants.js';
+import {
+ kTextureFormats,
+ kTextureFormatInfo,
+ kCompressedTextureFormats,
+ kUncompressedTextureFormats,
+ kRegularTextureFormats,
+ kFeaturesForFormats,
+ filterFormatsByFeature,
+ viewCompatible,
+ textureDimensionAndFormatCompatible,
+} from '../../format_info.js';
+import { maxMipLevelCount } from '../../util/texture/base.js';
+
+import { ValidationTest } from './validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('zero_size_and_usage')
+ .desc(
+ `Test texture creation with zero or nonzero size of
+ width, height, depthOrArrayLayers and mipLevelCount, usage for every dimension, and
+ representative formats.
+ `
+ )
+ .params(u =>
+ u
+ .combine('dimension', [undefined, ...kTextureDimensions])
+ .combine('format', [
+ 'rgba8unorm',
+ 'rgb10a2unorm',
+ 'bc1-rgba-unorm',
+ 'depth24plus-stencil8',
+ ] as const)
+ .beginSubcases()
+ .combine('zeroArgument', [
+ 'none',
+ 'width',
+ 'height',
+ 'depthOrArrayLayers',
+ 'mipLevelCount',
+ 'usage',
+ ] as const)
+ // Filter out incompatible dimension type and format combinations.
+ .filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { dimension, zeroArgument, format } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ const size = [info.blockWidth, info.blockHeight, 1];
+ let mipLevelCount = 1;
+ let usage = GPUTextureUsage.TEXTURE_BINDING;
+
+ switch (zeroArgument) {
+ case 'width':
+ size[0] = 0;
+ break;
+ case 'height':
+ size[1] = 0;
+ break;
+ case 'depthOrArrayLayers':
+ size[2] = 0;
+ break;
+ case 'mipLevelCount':
+ mipLevelCount = 0;
+ break;
+ case 'usage':
+ usage = 0;
+ break;
+ default:
+ break;
+ }
+
+ const descriptor = {
+ size,
+ mipLevelCount,
+ dimension,
+ format,
+ usage,
+ };
+
+ const success = zeroArgument === 'none';
+
+ t.expectValidationError(() => {
+ t.device.createTexture(descriptor);
+ }, !success);
+ });
+
+g.test('dimension_type_and_format_compatibility')
+ .desc(
+ `Test every dimension type on every format. Note that compressed formats and depth/stencil formats are not valid for 1D/3D dimension types.`
+ )
+ .params(u =>
+ u.combine('dimension', [undefined, ...kTextureDimensions]).combine('format', kTextureFormats)
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.skipIfTextureFormatNotSupported(format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { dimension, format } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ const descriptor: GPUTextureDescriptor = {
+ size: [info.blockWidth, info.blockHeight, 1],
+ dimension,
+ format,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ };
+
+ t.expectValidationError(() => {
+ t.device.createTexture(descriptor);
+ }, !textureDimensionAndFormatCompatible(dimension, format));
+ });
+
+g.test('mipLevelCount,format')
+ .desc(
+ `Test texture creation with no mipmap chain, partial mipmap chain, full mipmap chain, out-of-bounds mipmap chain
+ for every format with different texture dimension types.`
+ )
+ .params(u =>
+ u
+ .combine('dimension', [undefined, ...kTextureDimensions])
+ .combine('format', kTextureFormats)
+ .beginSubcases()
+ .combine('mipLevelCount', [1, 2, 3, 6, 7])
+ // Filter out incompatible dimension type and format combinations.
+ .filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
+ .combine('largestDimension', [0, 1, 2])
+ .unless(({ dimension, largestDimension }) => dimension === '1d' && largestDimension > 0)
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.skipIfTextureFormatNotSupported(format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { dimension, format, mipLevelCount, largestDimension } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ // Compute dimensions such that the dimensions are in range [17, 32] and aligned with the
+ // format block size so that there will be exactly 6 mip levels.
+ const kTargetMipLevelCount = 5;
+ const kTargetLargeSize = (1 << kTargetMipLevelCount) - 1;
+ const largeSize = [
+ Math.floor(kTargetLargeSize / info.blockWidth) * info.blockWidth,
+ Math.floor(kTargetLargeSize / info.blockHeight) * info.blockHeight,
+ kTargetLargeSize,
+ ];
+ assert(17 <= largeSize[0] && largeSize[0] <= 32);
+ assert(17 <= largeSize[1] && largeSize[1] <= 32);
+
+ // Note that compressed formats are not valid for 1D. They have already been filtered out for 1D
+ // in this test. So there is no dilemma about size.width equals 1 vs
+ // size.width % info.blockHeight equals 0 for 1D compressed formats.
+ const size = [info.blockWidth, info.blockHeight, 1];
+ size[largestDimension] = largeSize[largestDimension];
+
+ const descriptor = {
+ size,
+ mipLevelCount,
+ dimension,
+ format,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ };
+
+ const success = mipLevelCount <= maxMipLevelCount(descriptor);
+
+ t.expectValidationError(() => {
+ t.device.createTexture(descriptor);
+ }, !success);
+ });
+
+g.test('mipLevelCount,bound_check')
+ .desc(
+ `Test mip level count bound check upon different texture size and different texture dimension types.
+ The cases below test: 1) there must be no mip levels after a 1 level (1D texture), or 1x1 level (2D texture), or 1x1x1 level (3D texture), 2) array layers are not mip-mapped, 3) power-of-two, non-power-of-two, and non-square sizes.`
+ )
+ .params(u =>
+ u //
+ .combine('format', ['rgba8unorm', 'bc1-rgba-unorm'] as const)
+ .beginSubcases()
+ .combineWithParams([
+ { size: [32, 32] }, // Mip level sizes: 32x32, 16x16, 8x8, 4x4, 2x2, 1x1
+ { size: [31, 32] }, // Mip level sizes: 31x32, 15x16, 7x8, 3x4, 1x2, 1x1
+ { size: [28, 32] }, // Mip level sizes: 28x32, 14x16, 7x8, 3x4, 1x2, 1x1
+ { size: [32, 31] }, // Mip level sizes: 32x31, 16x15, 8x7, 4x3, 2x1, 1x1
+ { size: [32, 28] }, // Mip level sizes: 32x28, 16x14, 8x7, 4x3, 2x1, 1x1
+ { size: [31, 31] }, // Mip level sizes: 31x31, 15x15, 7x7, 3x3, 1x1
+ { size: [32], dimension: '1d' as const }, // Mip level sizes: 32, 16, 8, 4, 2, 1
+ { size: [31], dimension: '1d' as const }, // Mip level sizes: 31, 15, 7, 3, 1
+ { size: [32, 32, 32], dimension: '3d' as const }, // Mip level sizes: 32x32x32, 16x16x16, 8x8x8, 4x4x4, 2x2x2, 1x1x1
+ { size: [32, 31, 31], dimension: '3d' as const }, // Mip level sizes: 32x31x31, 16x15x15, 8x7x7, 4x3x3, 2x1x1, 1x1x1
+ { size: [31, 32, 31], dimension: '3d' as const }, // Mip level sizes: 31x32x31, 15x16x15, 7x8x7, 3x4x3, 1x2x1, 1x1x1
+ { size: [31, 31, 32], dimension: '3d' as const }, // Mip level sizes: 31x31x32, 15x15x16, 7x7x8, 3x3x4, 1x1x2, 1x1x1
+ { size: [31, 31, 31], dimension: '3d' as const }, // Mip level sizes: 31x31x31, 15x15x15, 7x7x7, 3x3x3, 1x1x1
+ { size: [32, 8] }, // Mip levels: 32x8, 16x4, 8x2, 4x1, 2x1, 1x1
+ { size: [32, 32, 64] }, // Mip levels: 32x32x64, 16x16x64, 8x8x64, 4x4x64, 2x2x64, 1x1x64
+ { size: [32, 32, 64], dimension: '3d' as const }, // Mip levels: 32x32x64, 16x16x32, 8x8x16, 4x4x8, 2x2x4, 1x1x2, 1x1x1
+ ])
+ .unless(
+ ({ format, size, dimension }) =>
+ format === 'bc1-rgba-unorm' &&
+ (dimension === '1d' ||
+ dimension === '3d' ||
+ size[0] % kTextureFormatInfo[format].blockWidth !== 0 ||
+ size[1] % kTextureFormatInfo[format].blockHeight !== 0)
+ )
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { format, size, dimension } = t.params;
+
+ const descriptor = {
+ size,
+ mipLevelCount: 0,
+ dimension,
+ format,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ };
+
+ const mipLevelCount = maxMipLevelCount(descriptor);
+ descriptor.mipLevelCount = mipLevelCount;
+ t.device.createTexture(descriptor);
+
+ descriptor.mipLevelCount = mipLevelCount + 1;
+ t.expectValidationError(() => {
+ t.device.createTexture(descriptor);
+ });
+ });
+
+g.test('mipLevelCount,bound_check,bigger_than_integer_bit_width')
+ .desc(`Test mip level count bound check when mipLevelCount is bigger than integer bit width`)
+ .fn(t => {
+ const descriptor = {
+ size: [32, 32],
+ mipLevelCount: 100,
+ format: 'rgba8unorm' as const,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ };
+
+ t.expectValidationError(() => {
+ t.device.createTexture(descriptor);
+ });
+ });
+
+g.test('sampleCount,various_sampleCount_with_all_formats')
+ .desc(
+ `Test texture creation with various (valid or invalid) sample count and all formats. Note that 1D and 3D textures can't support multisample.`
+ )
+ .params(u =>
+ u
+ .combine('dimension', [undefined, '2d'] as const)
+ .combine('format', kTextureFormats)
+ .beginSubcases()
+ .combine('sampleCount', [0, 1, 2, 4, 8, 16, 32, 256])
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.skipIfTextureFormatNotSupported(format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { dimension, sampleCount, format } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ const usage =
+ sampleCount > 1
+ ? GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.RENDER_ATTACHMENT
+ : GPUTextureUsage.TEXTURE_BINDING;
+ const descriptor = {
+ size: [32 * info.blockWidth, 32 * info.blockHeight, 1],
+ sampleCount,
+ dimension,
+ format,
+ usage,
+ };
+
+ const success = sampleCount === 1 || (sampleCount === 4 && info.multisample && info.renderable);
+
+ t.expectValidationError(() => {
+ t.device.createTexture(descriptor);
+ }, !success);
+ });
+
+g.test('sampleCount,valid_sampleCount_with_other_parameter_varies')
+ .desc(
+ `Test texture creation with valid sample count when dimensions, arrayLayerCount, mipLevelCount,
+ format, and usage varies. Texture can be single sample (sampleCount is 1) or multi-sample
+ (sampleCount is 4). Multisample texture requires that
+ 1) its dimension is 2d or undefined,
+ 2) its format supports multisample,
+ 3) its mipLevelCount and arrayLayerCount are 1,
+ 4) its usage doesn't include STORAGE_BINDING,
+ 5) its usage includes RENDER_ATTACHMENT.`
+ )
+ .params(u =>
+ u
+ .combine('dimension', [undefined, ...kTextureDimensions])
+ .combine('format', kTextureFormats)
+ .beginSubcases()
+ .combine('sampleCount', [1, 4])
+ .combine('arrayLayerCount', [1, 2])
+ .unless(
+ ({ dimension, arrayLayerCount }) =>
+ arrayLayerCount === 2 && dimension !== '2d' && dimension !== undefined
+ )
+ .combine('mipLevelCount', [1, 2])
+ .expand('usage', () => {
+ const usageSet = new Set<number>();
+ for (const usage0 of kTextureUsages) {
+ for (const usage1 of kTextureUsages) {
+ usageSet.add(usage0 | usage1);
+ }
+ }
+ return usageSet;
+ })
+ // Filter out incompatible dimension type and format combinations.
+ .filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
+ .unless(({ usage, format, mipLevelCount, dimension }) => {
+ const info = kTextureFormatInfo[format];
+ return (
+ ((usage & GPUConst.TextureUsage.RENDER_ATTACHMENT) !== 0 &&
+ (!info.colorRender || dimension !== '2d')) ||
+ ((usage & GPUConst.TextureUsage.STORAGE_BINDING) !== 0 && !info.color?.storage) ||
+ (mipLevelCount !== 1 && dimension === '1d')
+ );
+ })
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.skipIfTextureFormatNotSupported(format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { dimension, sampleCount, format, mipLevelCount, arrayLayerCount, usage } = t.params;
+ const { blockWidth, blockHeight } = kTextureFormatInfo[format];
+
+ const size =
+ dimension === '1d'
+ ? [32 * blockWidth, 1 * blockHeight, 1]
+ : dimension === '2d' || dimension === undefined
+ ? [32 * blockWidth, 32 * blockHeight, arrayLayerCount]
+ : [32 * blockWidth, 32 * blockHeight, 32];
+ const descriptor = {
+ size,
+ mipLevelCount,
+ sampleCount,
+ dimension,
+ format,
+ usage,
+ };
+
+ const success =
+ sampleCount === 1 ||
+ (sampleCount === 4 &&
+ (dimension === '2d' || dimension === undefined) &&
+ kTextureFormatInfo[format].multisample &&
+ mipLevelCount === 1 &&
+ arrayLayerCount === 1 &&
+ (usage & GPUConst.TextureUsage.RENDER_ATTACHMENT) !== 0 &&
+ (usage & GPUConst.TextureUsage.STORAGE_BINDING) === 0);
+
+ t.expectValidationError(() => {
+ t.device.createTexture(descriptor);
+ }, !success);
+ });
+
+g.test('sample_count,1d_2d_array_3d')
+ .desc(`Test that you can not create 1d, 2d_array, and 3d multisampled textures`)
+ .params(u =>
+ u.combineWithParams([
+ { dimension: '2d', size: [4, 4, 1], shouldError: false },
+ { dimension: '1d', size: [4, 1, 1], shouldError: true },
+ { dimension: '2d', size: [4, 4, 4], shouldError: true },
+ { dimension: '2d', size: [4, 4, 6], shouldError: true },
+ { dimension: '3d', size: [4, 4, 4], shouldError: true },
+ ] as const)
+ )
+ .fn(t => {
+ const { dimension, size, shouldError } = t.params;
+
+ t.expectValidationError(() => {
+ t.device.createTexture({
+ size,
+ dimension,
+ sampleCount: 4,
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ }, shouldError);
+ });
+
+g.test('texture_size,default_value_and_smallest_size,uncompressed_format')
+ .desc(
+ `Test default values for height and depthOrArrayLayers for every dimension type and every uncompressed format.
+ It also tests smallest size (lower bound) for every dimension type and every uncompressed format, while other texture_size tests are testing the upper bound.`
+ )
+ .params(u =>
+ u
+ .combine('dimension', [undefined, ...kTextureDimensions])
+ .combine('format', kUncompressedTextureFormats)
+ .beginSubcases()
+ .combine('size', [[1], [1, 1], [1, 1, 1]])
+ // Filter out incompatible dimension type and format combinations.
+ .filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.skipIfTextureFormatNotSupported(format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { dimension, format, size } = t.params;
+
+ const descriptor: GPUTextureDescriptor = {
+ size,
+ dimension,
+ format,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ };
+
+ t.device.createTexture(descriptor);
+ });
+
+g.test('texture_size,default_value_and_smallest_size,compressed_format')
+ .desc(
+ `Test default values for height and depthOrArrayLayers for every dimension type and every compressed format.
+ It also tests smallest size (lower bound) for every dimension type and every compressed format, while other texture_size tests are testing the upper bound.`
+ )
+ .params(u =>
+ u
+ // Compressed formats are invalid for 1D and 3D.
+ .combine('dimension', [undefined, '2d'] as const)
+ .combine('format', kCompressedTextureFormats)
+ .beginSubcases()
+ .expandWithParams(p => {
+ const { blockWidth, blockHeight } = kTextureFormatInfo[p.format];
+ return [
+ { size: [1], _success: false },
+ { size: [blockWidth], _success: false },
+ { size: [1, 1], _success: false },
+ { size: [blockWidth, blockHeight], _success: true },
+ { size: [1, 1, 1], _success: false },
+ { size: [blockWidth, blockHeight, 1], _success: true },
+ ];
+ })
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { dimension, format, size, _success } = t.params;
+
+ const descriptor: GPUTextureDescriptor = {
+ size,
+ dimension,
+ format,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ };
+
+ t.expectValidationError(() => {
+ t.device.createTexture(descriptor);
+ }, !_success);
+ });
+
+g.test('texture_size,1d_texture')
+ .desc(`Test texture size requirement for 1D texture`)
+ .params(u =>
+ u //
+ // Compressed and depth-stencil textures are invalid for 1D.
+ .combine('format', kRegularTextureFormats)
+ .beginSubcases()
+ .combine('widthVariant', [
+ { mult: 1, add: -1 },
+ { mult: 1, add: 0 },
+ { mult: 1, add: 1 },
+ ])
+ .combine('height', [1, 2])
+ .combine('depthOrArrayLayers', [1, 2])
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.skipIfTextureFormatNotSupported(format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { format, widthVariant, height, depthOrArrayLayers } = t.params;
+ const width = t.makeLimitVariant('maxTextureDimension1D', widthVariant);
+
+ const descriptor: GPUTextureDescriptor = {
+ size: [width, height, depthOrArrayLayers],
+ dimension: '1d' as const,
+ format,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ };
+
+ const success =
+ width <= t.device.limits.maxTextureDimension1D && height === 1 && depthOrArrayLayers === 1;
+
+ t.expectValidationError(() => {
+ t.device.createTexture(descriptor);
+ }, !success);
+ });
+
+g.test('texture_size,2d_texture,uncompressed_format')
+ .desc(`Test texture size requirement for 2D texture with uncompressed format.`)
+ .params(u =>
+ u
+ .combine('dimension', [undefined, '2d'] as const)
+ .combine('format', kUncompressedTextureFormats)
+ .combine(
+ 'sizeVariant',
+ /* prettier-ignore */ [
+ // Test the bound of width
+ [{ mult: 1, add: -1 }, { mult: 0, add: 1 }, { mult: 0, add: 1 }],
+ [{ mult: 1, add: 0 }, { mult: 0, add: 1 }, { mult: 0, add: 1 }],
+ [{ mult: 1, add: 1 }, { mult: 0, add: 1 }, { mult: 0, add: 1 }],
+ // Test the bound of height
+ [{ mult: 0, add: 1 }, { mult: 1, add: -1 }, { mult: 0, add: 1 }],
+ [{ mult: 0, add: 1 }, { mult: 1, add: 0 }, { mult: 0, add: 1 }],
+ [{ mult: 0, add: 1 }, { mult: 1, add: 1 }, { mult: 0, add: 1 }],
+ // Test the bound of array layers
+ [{ mult: 0, add: 1 }, { mult: 0, add: 1 }, { mult: 1, add: -1 }],
+ [{ mult: 0, add: 1 }, { mult: 0, add: 1 }, { mult: 1, add: 0 }],
+ [{ mult: 0, add: 1 }, { mult: 0, add: 1 }, { mult: 1, add: 1 }],
+ ]
+ )
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.skipIfTextureFormatNotSupported(format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { dimension, format, sizeVariant } = t.params;
+ const size = [
+ t.device.limits.maxTextureDimension2D,
+ t.device.limits.maxTextureDimension2D,
+ t.device.limits.maxTextureArrayLayers,
+ ].map((limit, ndx) => makeValueTestVariant(limit, sizeVariant[ndx]));
+
+ const descriptor: GPUTextureDescriptor = {
+ size,
+ dimension,
+ format,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ };
+
+ const success =
+ size[0] <= t.device.limits.maxTextureDimension2D &&
+ size[1] <= t.device.limits.maxTextureDimension2D &&
+ size[2] <= t.device.limits.maxTextureArrayLayers;
+
+ t.expectValidationError(() => {
+ t.device.createTexture(descriptor);
+ }, !success);
+ });
+
+g.test('texture_size,2d_texture,compressed_format')
+ .desc(`Test texture size requirement for 2D texture with compressed format.`)
+ .params(u =>
+ u
+ .combine('dimension', [undefined, '2d'] as const)
+ .combine('format', kCompressedTextureFormats)
+ .expand('sizeVariant', p => {
+ const { blockWidth, blockHeight } = kTextureFormatInfo[p.format];
+ return [
+ // Test the bound of width
+ [
+ { mult: 1, add: -1 },
+ { mult: 0, add: 1 },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 1, add: -blockWidth },
+ { mult: 0, add: 1 },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 1, add: -blockWidth },
+ { mult: 0, add: blockHeight },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 1, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 1, add: 0 },
+ { mult: 0, add: blockHeight },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 1, add: 1 },
+ { mult: 0, add: 1 },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 1, add: blockWidth },
+ { mult: 0, add: 1 },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 1, add: blockWidth },
+ { mult: 0, add: blockHeight },
+ { mult: 0, add: 1 },
+ ],
+ // Test the bound of height
+ [
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 0, add: 1 },
+ { mult: 1, add: -blockHeight },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 0, add: blockWidth },
+ { mult: 1, add: -blockHeight },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 0, add: 1 },
+ { mult: 1, add: 0 },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 0, add: blockWidth },
+ { mult: 1, add: 0 },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 0, add: 1 },
+ { mult: 1, add: +1 },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 0, add: 1 },
+ { mult: 1, add: +blockWidth },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 0, add: blockWidth },
+ { mult: 1, add: +blockHeight },
+ { mult: 0, add: 1 },
+ ],
+ // Test the bound of array layers
+ [
+ { mult: 0, add: 1 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ ],
+ [
+ { mult: 0, add: blockWidth },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ ],
+ [
+ { mult: 0, add: 1 },
+ { mult: 0, add: blockHeight },
+ { mult: 1, add: -1 },
+ ],
+ [
+ { mult: 0, add: blockWidth },
+ { mult: 0, add: blockHeight },
+ { mult: 1, add: -1 },
+ ],
+ [
+ { mult: 0, add: 1 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: 0 },
+ ],
+ [
+ { mult: 0, add: blockWidth },
+ { mult: 0, add: 1 },
+ { mult: 1, add: 0 },
+ ],
+ [
+ { mult: 0, add: 1 },
+ { mult: 0, add: blockHeight },
+ { mult: 1, add: 0 },
+ ],
+ [
+ { mult: 0, add: blockWidth },
+ { mult: 0, add: blockHeight },
+ { mult: 1, add: 0 },
+ ],
+ [
+ { mult: 0, add: 1 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: +1 },
+ ],
+ [
+ { mult: 0, add: blockWidth },
+ { mult: 0, add: 1 },
+ { mult: 1, add: +1 },
+ ],
+ [
+ { mult: 0, add: 1 },
+ { mult: 0, add: blockHeight },
+ { mult: 1, add: +1 },
+ ],
+ [
+ { mult: 0, add: blockWidth },
+ { mult: 0, add: blockHeight },
+ { mult: 1, add: +1 },
+ ],
+ ];
+ })
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { dimension, format, sizeVariant } = t.params;
+ const info = kTextureFormatInfo[format];
+ const size = [
+ t.device.limits.maxTextureDimension2D,
+ t.device.limits.maxTextureDimension2D,
+ t.device.limits.maxTextureArrayLayers,
+ ].map((limit, ndx) => makeValueTestVariant(limit, sizeVariant[ndx]));
+
+ const descriptor: GPUTextureDescriptor = {
+ size,
+ dimension,
+ format,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ };
+
+ const success =
+ size[0] % info.blockWidth === 0 &&
+ size[1] % info.blockHeight === 0 &&
+ size[0] <= t.device.limits.maxTextureDimension2D &&
+ size[1] <= t.device.limits.maxTextureDimension2D &&
+ size[2] <= t.device.limits.maxTextureArrayLayers;
+
+ t.expectValidationError(() => {
+ t.device.createTexture(descriptor);
+ }, !success);
+ });
+
+g.test('texture_size,3d_texture,uncompressed_format')
+ .desc(
+ `Test texture size requirement for 3D texture with uncompressed format. Note that depth/stencil formats are invalid for 3D textures, so we only test regular formats.`
+ )
+ .params(u =>
+ u //
+ .combine('format', kRegularTextureFormats)
+ .beginSubcases()
+ .combine(
+ 'sizeVariant',
+ /* prettier-ignore */ [
+ // Test the bound of width
+ [{ mult: 1, add: -1 }, { mult: 0, add: 1 }, { mult: 0, add: 1 }],
+ [{ mult: 1, add: 0 }, { mult: 0, add: 1 }, { mult: 0, add: 1 }],
+ [{ mult: 1, add: +1 }, { mult: 0, add: 1 }, { mult: 0, add: 1 }],
+ // Test the bound of height
+ [{ mult: 0, add: 1 }, { mult: 1, add: -1 }, { mult: 0, add: 1 }],
+ [{ mult: 0, add: 1 }, { mult: 1, add: 0 }, { mult: 0, add: 1 }],
+ [{ mult: 0, add: 1 }, { mult: 1, add: +1 }, { mult: 0, add: 1 }],
+ // Test the bound of depth
+ [{ mult: 0, add: 1 }, { mult: 0, add: 1 }, { mult: 1, add: -1 }],
+ [{ mult: 0, add: 1 }, { mult: 0, add: 1 }, { mult: 1, add: 0 }],
+ [{ mult: 0, add: 1 }, { mult: 0, add: 1 }, { mult: 1, add: +1 }],
+ ]
+ )
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.skipIfTextureFormatNotSupported(format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { format, sizeVariant } = t.params;
+ const maxTextureDimension3D = t.device.limits.maxTextureDimension3D;
+ const size = sizeVariant.map(variant => t.makeLimitVariant('maxTextureDimension3D', variant));
+
+ const descriptor: GPUTextureDescriptor = {
+ size,
+ dimension: '3d' as const,
+ format,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ };
+
+ const success =
+ size[0] <= maxTextureDimension3D &&
+ size[1] <= maxTextureDimension3D &&
+ size[2] <= maxTextureDimension3D;
+
+ t.expectValidationError(() => {
+ t.device.createTexture(descriptor);
+ }, !success);
+ });
+
+g.test('texture_size,3d_texture,compressed_format')
+ .desc(`Test texture size requirement for 3D texture with compressed format.`)
+ .params(u =>
+ u //
+ .combine('format', kCompressedTextureFormats)
+ .beginSubcases()
+ .expand('sizeVariant', p => {
+ const { blockWidth, blockHeight } = kTextureFormatInfo[p.format];
+ return [
+ // Test the bound of width
+ [
+ { mult: 1, add: -1 },
+ { mult: 0, add: 1 },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 1, add: -blockWidth },
+ { mult: 0, add: 1 },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 1, add: -blockWidth },
+ { mult: 0, add: blockHeight },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 1, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 1, add: 0 },
+ { mult: 0, add: blockHeight },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 1, add: +1 },
+ { mult: 0, add: 1 },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 1, add: +blockWidth },
+ { mult: 0, add: 1 },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 1, add: +blockWidth },
+ { mult: 0, add: blockHeight },
+ { mult: 0, add: 1 },
+ ],
+ // Test the bound of height
+ [
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 0, add: 1 },
+ { mult: 1, add: -blockHeight },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 0, add: blockWidth },
+ { mult: 1, add: -blockHeight },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 0, add: 1 },
+ { mult: 1, add: 0 },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 0, add: blockWidth },
+ { mult: 1, add: 0 },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 0, add: 1 },
+ { mult: 1, add: +1 },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 0, add: 1 },
+ { mult: 1, add: +blockWidth },
+ { mult: 0, add: 1 },
+ ],
+ [
+ { mult: 0, add: blockWidth },
+ { mult: 1, add: +blockHeight },
+ { mult: 0, add: 1 },
+ ],
+ // Test the bound of depth
+ [
+ { mult: 0, add: 1 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ ],
+ [
+ { mult: 0, add: blockWidth },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ ],
+ [
+ { mult: 0, add: 1 },
+ { mult: 0, add: blockHeight },
+ { mult: 1, add: -1 },
+ ],
+ [
+ { mult: 0, add: blockWidth },
+ { mult: 0, add: blockHeight },
+ { mult: 1, add: -1 },
+ ],
+ [
+ { mult: 0, add: 1 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: 0 },
+ ],
+ [
+ { mult: 0, add: blockWidth },
+ { mult: 0, add: 1 },
+ { mult: 1, add: 0 },
+ ],
+ [
+ { mult: 0, add: 1 },
+ { mult: 0, add: blockHeight },
+ { mult: 1, add: 0 },
+ ],
+ [
+ { mult: 0, add: blockWidth },
+ { mult: 0, add: blockHeight },
+ { mult: 1, add: 0 },
+ ],
+ [
+ { mult: 0, add: 1 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: +1 },
+ ],
+ [
+ { mult: 0, add: blockWidth },
+ { mult: 0, add: 1 },
+ { mult: 1, add: +1 },
+ ],
+ [
+ { mult: 0, add: 1 },
+ { mult: 0, add: blockHeight },
+ { mult: 1, add: +1 },
+ ],
+ [
+ { mult: 0, add: blockWidth },
+ { mult: 0, add: blockHeight },
+ { mult: 1, add: +1 },
+ ],
+ ];
+ })
+ )
+ .beforeAllSubcases(t => {
+ // Compressed formats are not supported in 3D in WebGPU v1 because they are complicated but not very useful for now.
+ throw new SkipTestCase('Compressed 3D texture is not supported');
+
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { format, sizeVariant } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ const maxTextureDimension3D = t.device.limits.maxTextureDimension3D;
+ const size = sizeVariant.map(variant => t.makeLimitVariant('maxTextureDimension3D', variant));
+
+ assert(
+ maxTextureDimension3D % info.blockWidth === 0 &&
+ maxTextureDimension3D % info.blockHeight === 0
+ );
+
+ const descriptor: GPUTextureDescriptor = {
+ size,
+ dimension: '3d' as const,
+ format,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ };
+
+ const success =
+ size[0] % info.blockWidth === 0 &&
+ size[1] % info.blockHeight === 0 &&
+ size[0] <= maxTextureDimension3D &&
+ size[1] <= maxTextureDimension3D &&
+ size[2] <= maxTextureDimension3D;
+
+ t.expectValidationError(() => {
+ t.device.createTexture(descriptor);
+ }, !success);
+ });
+
+g.test('texture_usage')
+ .desc(
+ `Test texture usage (single usage or combined usages) for every texture format and every dimension type`
+ )
+ .params(u =>
+ u
+ .combine('dimension', [undefined, ...kTextureDimensions])
+ .combine('format', kTextureFormats)
+ .beginSubcases()
+ // If usage0 and usage1 are the same, then the usage being test is a single usage. Otherwise, it is a combined usage.
+ .combine('usage0', kTextureUsages)
+ .combine('usage1', kTextureUsages)
+ // Filter out incompatible dimension type and format combinations.
+ .filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.skipIfTextureFormatNotSupported(format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { dimension, format, usage0, usage1 } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ const size = [info.blockWidth, info.blockHeight, 1];
+ const usage = usage0 | usage1;
+ const descriptor = {
+ size,
+ dimension,
+ format,
+ usage,
+ };
+
+ let success = true;
+ const appliedDimension = dimension ?? '2d';
+ // Note that we unconditionally test copy usages for all formats. We don't check copySrc/copyDst in kTextureFormatInfo in capability_info.js
+ // if (!info.copySrc && (usage & GPUTextureUsage.COPY_SRC) !== 0) success = false;
+ // if (!info.copyDst && (usage & GPUTextureUsage.COPY_DST) !== 0) success = false;
+ if (!info.color?.storage && (usage & GPUTextureUsage.STORAGE_BINDING) !== 0) success = false;
+ if (
+ (!info.renderable || appliedDimension !== '2d') &&
+ (usage & GPUTextureUsage.RENDER_ATTACHMENT) !== 0
+ )
+ success = false;
+
+ t.expectValidationError(() => {
+ t.device.createTexture(descriptor);
+ }, !success);
+ });
+
+g.test('viewFormats')
+ .desc(
+ `Test creating a texture with viewFormats list for all {texture format}x{view format}. Only compatible view formats should be valid.`
+ )
+ .params(u =>
+ u
+ .combine('formatFeature', kFeaturesForFormats)
+ .combine('viewFormatFeature', kFeaturesForFormats)
+ .beginSubcases()
+ .expand('format', ({ formatFeature }) =>
+ filterFormatsByFeature(formatFeature, kTextureFormats)
+ )
+ .expand('viewFormat', ({ viewFormatFeature }) =>
+ filterFormatsByFeature(viewFormatFeature, kTextureFormats)
+ )
+ )
+ .beforeAllSubcases(t => {
+ const { formatFeature, viewFormatFeature } = t.params;
+ t.selectDeviceOrSkipTestCase([formatFeature, viewFormatFeature]);
+ })
+ .fn(t => {
+ const { format, viewFormat } = t.params;
+ const { blockWidth, blockHeight } = kTextureFormatInfo[format];
+
+ t.skipIfTextureFormatNotSupported(format, viewFormat);
+
+ const compatible = viewCompatible(format, viewFormat);
+
+ // Test the viewFormat in the list.
+ t.expectValidationError(() => {
+ t.device.createTexture({
+ format,
+ size: [blockWidth, blockHeight],
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ viewFormats: [viewFormat],
+ });
+ }, !compatible);
+
+ // Test the viewFormat and the texture format in the list.
+ t.expectValidationError(() => {
+ t.device.createTexture({
+ format,
+ size: [blockWidth, blockHeight],
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ viewFormats: [viewFormat, format],
+ });
+ }, !compatible);
+
+ // Test the viewFormat multiple times in the list.
+ t.expectValidationError(() => {
+ t.device.createTexture({
+ format,
+ size: [blockWidth, blockHeight],
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ viewFormats: [viewFormat, viewFormat],
+ });
+ }, !compatible);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createView.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createView.spec.ts
new file mode 100644
index 0000000000..e4871c5d80
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/createView.spec.ts
@@ -0,0 +1,340 @@
+export const description = `createView validation tests.`;
+
+import { kUnitCaseParamsBuilder } from '../../../common/framework/params_builder.js';
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { unreachable } from '../../../common/util/util.js';
+import {
+ kTextureAspects,
+ kTextureDimensions,
+ kTextureViewDimensions,
+} from '../../capability_info.js';
+import {
+ kTextureFormatInfo,
+ kTextureFormats,
+ kFeaturesForFormats,
+ filterFormatsByFeature,
+ viewCompatible,
+} from '../../format_info.js';
+import { kResourceStates } from '../../gpu_test.js';
+import {
+ getTextureDimensionFromView,
+ reifyTextureViewDescriptor,
+ viewDimensionsForTextureDimension,
+} from '../../util/texture/base.js';
+import { reifyExtent3D } from '../../util/unions.js';
+
+import { ValidationTest } from './validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+const kLevels = 6;
+
+g.test('format')
+ .desc(
+ `Views must have the view format compatible with the base texture, for all {texture format}x{view format}.`
+ )
+ .params(u =>
+ u
+ .combine('textureFormatFeature', kFeaturesForFormats)
+ .combine('viewFormatFeature', kFeaturesForFormats)
+ .beginSubcases()
+ .expand('textureFormat', ({ textureFormatFeature }) =>
+ filterFormatsByFeature(textureFormatFeature, kTextureFormats)
+ )
+ .expand('viewFormat', ({ viewFormatFeature }) =>
+ filterFormatsByFeature(viewFormatFeature, [undefined, ...kTextureFormats])
+ )
+ .combine('useViewFormatList', [false, true])
+ )
+ .beforeAllSubcases(t => {
+ const { textureFormatFeature, viewFormatFeature } = t.params;
+ t.selectDeviceOrSkipTestCase([textureFormatFeature, viewFormatFeature]);
+ })
+ .fn(t => {
+ const { textureFormat, viewFormat, useViewFormatList } = t.params;
+ const { blockWidth, blockHeight } = kTextureFormatInfo[textureFormat];
+
+ t.skipIfTextureFormatNotSupported(textureFormat, viewFormat);
+
+ const compatible = viewFormat === undefined || viewCompatible(textureFormat, viewFormat);
+
+ const texture = t.device.createTexture({
+ format: textureFormat,
+ size: [blockWidth, blockHeight],
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+
+ // This is a test of createView, not createTexture. Don't pass viewFormats here that
+ // are not compatible, as that is tested in createTexture.spec.ts.
+ viewFormats:
+ useViewFormatList && compatible && viewFormat !== undefined ? [viewFormat] : undefined,
+ });
+
+ // Successful if there is no view format, no reinterpretation was required, or the formats are compatible
+ // and is was specified in the viewFormats list.
+ const success =
+ viewFormat === undefined || viewFormat === textureFormat || (compatible && useViewFormatList);
+ t.expectValidationError(() => {
+ texture.createView({ format: viewFormat });
+ }, !success);
+ });
+
+g.test('dimension')
+ .desc(
+ `For all {texture dimension}, {view dimension}, test that they must be compatible:
+ - 1d -> 1d
+ - 2d -> 2d, 2d-array, cube, or cube-array
+ - 3d -> 3d`
+ )
+ .params(u =>
+ u
+ .combine('textureDimension', kTextureDimensions)
+ .combine('viewDimension', [...kTextureViewDimensions, undefined])
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureViewDimensionNotSupported(t.params.viewDimension);
+ })
+ .fn(t => {
+ const { textureDimension, viewDimension } = t.params;
+
+ const size = textureDimension === '1d' ? [4] : [4, 4, 6];
+ const textureDescriptor = {
+ format: 'rgba8unorm' as const,
+ dimension: textureDimension,
+ size,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ };
+ const texture = t.device.createTexture(textureDescriptor);
+
+ const view = { dimension: viewDimension };
+ const reified = reifyTextureViewDescriptor(textureDescriptor, view);
+
+ const success = getTextureDimensionFromView(reified.dimension) === textureDimension;
+ t.expectValidationError(() => {
+ texture.createView(view);
+ }, !success);
+ });
+
+g.test('aspect')
+ .desc(
+ `For every {format}x{aspect}, test that the view aspect must exist in the format:
+ - "all" is allowed for any format
+ - "depth-only" is allowed only for depth and depth-stencil formats
+ - "stencil-only" is allowed only for stencil and depth-stencil formats`
+ )
+ .params(u =>
+ u //
+ .combine('format', kTextureFormats)
+ .combine('aspect', kTextureAspects)
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ t.selectDeviceForTextureFormatOrSkipTestCase(format);
+ })
+ .fn(t => {
+ const { format, aspect } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ const texture = t.device.createTexture({
+ format,
+ size: [info.blockWidth, info.blockHeight, 1],
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ });
+
+ const success =
+ aspect === 'all' ||
+ (aspect === 'depth-only' && info.depth) ||
+ (aspect === 'stencil-only' && info.stencil);
+ t.expectValidationError(() => {
+ texture.createView({ aspect });
+ }, !success);
+ });
+
+const kTextureAndViewDimensions = kUnitCaseParamsBuilder
+ .combine('textureDimension', kTextureDimensions)
+ .expand('viewDimension', p => [
+ undefined,
+ ...viewDimensionsForTextureDimension(p.textureDimension),
+ ]);
+
+function validateCreateViewLayersLevels(tex: GPUTextureDescriptor, view: GPUTextureViewDescriptor) {
+ const textureLevels = tex.mipLevelCount ?? 1;
+ const textureLayers = tex.dimension === '2d' ? reifyExtent3D(tex.size).depthOrArrayLayers : 1;
+ const reified = reifyTextureViewDescriptor(tex, view);
+
+ let success =
+ reified.mipLevelCount > 0 &&
+ reified.baseMipLevel < textureLevels &&
+ reified.baseMipLevel + reified.mipLevelCount <= textureLevels &&
+ reified.arrayLayerCount > 0 &&
+ reified.baseArrayLayer < textureLayers &&
+ reified.baseArrayLayer + reified.arrayLayerCount <= textureLayers;
+ if (reified.dimension === '1d' || reified.dimension === '2d' || reified.dimension === '3d') {
+ success &&= reified.arrayLayerCount === 1;
+ } else if (reified.dimension === 'cube') {
+ success &&= reified.arrayLayerCount === 6;
+ } else if (reified.dimension === 'cube-array') {
+ success &&= reified.arrayLayerCount % 6 === 0;
+ }
+ return success;
+}
+
+g.test('array_layers')
+ .desc(
+ `For each texture dimension {1d,2d,3d}, for each possible view dimension for that texture
+ dimension (or undefined, which defaults to the texture dimension), test validation of layer
+ counts:
+ - 1d, 2d, and 3d must have exactly 1 layer
+ - 2d-array must have 1 or more layers
+ - cube must have 6 layers
+ - cube-array must have a positive multiple of 6 layers
+ - Defaulting of baseArrayLayer and arrayLayerCount
+ - baseArrayLayer+arrayLayerCount must be within the texture`
+ )
+ .params(
+ kTextureAndViewDimensions
+ .beginSubcases()
+ .expand('textureLayers', ({ textureDimension: d }) => (d === '2d' ? [1, 6, 18] : [1]))
+ .combine('textureLevels', [1, kLevels])
+ .unless(p => p.textureDimension === '1d' && p.textureLevels !== 1)
+ .expand(
+ 'baseArrayLayer',
+ ({ textureLayers: l }) => new Set([undefined, 0, 1, 5, 6, 7, l - 1, l, l + 1])
+ )
+ .expand('arrayLayerCount', function* ({ textureLayers: l, baseArrayLayer = 0 }) {
+ yield undefined;
+ for (const lastArrayLayer of new Set([0, 1, 5, 6, 7, l - 1, l, l + 1])) {
+ if (baseArrayLayer <= lastArrayLayer) yield lastArrayLayer - baseArrayLayer;
+ }
+ })
+ )
+ .fn(t => {
+ const {
+ textureDimension,
+ viewDimension,
+ textureLayers,
+ textureLevels,
+ baseArrayLayer,
+ arrayLayerCount,
+ } = t.params;
+
+ t.skipIfTextureViewDimensionNotSupported(viewDimension);
+
+ const kWidth = 1 << (kLevels - 1); // 32
+ const textureDescriptor: GPUTextureDescriptor = {
+ format: 'rgba8unorm',
+ dimension: textureDimension,
+ size:
+ textureDimension === '1d'
+ ? [kWidth]
+ : textureDimension === '2d'
+ ? [kWidth, kWidth, textureLayers]
+ : textureDimension === '3d'
+ ? [kWidth, kWidth, kWidth]
+ : unreachable(),
+ mipLevelCount: textureLevels,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ };
+
+ const viewDescriptor = { dimension: viewDimension, baseArrayLayer, arrayLayerCount };
+ const success = validateCreateViewLayersLevels(textureDescriptor, viewDescriptor);
+
+ const texture = t.device.createTexture(textureDescriptor);
+ t.expectValidationError(() => {
+ texture.createView(viewDescriptor);
+ }, !success);
+ });
+
+g.test('mip_levels')
+ .desc(
+ `Views must have at least one level, and must be within the level of the base texture.
+
+ - mipLevelCount=0 at various baseMipLevel values
+ - Cases where baseMipLevel+mipLevelCount goes past the end of the texture
+ - Cases with baseMipLevel or mipLevelCount undefined (compares against reference defaulting impl)
+ `
+ )
+ .params(
+ kTextureAndViewDimensions
+ .beginSubcases()
+ .combine('textureLevels', [1, kLevels - 2, kLevels])
+ .unless(p => p.textureDimension === '1d' && p.textureLevels !== 1)
+ .expand(
+ 'baseMipLevel',
+ ({ textureLevels: l }) => new Set([undefined, 0, 1, 5, 6, 7, l - 1, l, l + 1])
+ )
+ .expand('mipLevelCount', function* ({ textureLevels: l, baseMipLevel = 0 }) {
+ yield undefined;
+ for (const lastMipLevel of new Set([0, 1, 5, 6, 7, l - 1, l, l + 1])) {
+ if (baseMipLevel <= lastMipLevel) yield lastMipLevel - baseMipLevel;
+ }
+ })
+ )
+ .fn(t => {
+ const { textureDimension, viewDimension, textureLevels, baseMipLevel, mipLevelCount } =
+ t.params;
+
+ t.skipIfTextureViewDimensionNotSupported(viewDimension);
+
+ const textureDescriptor: GPUTextureDescriptor = {
+ format: 'rgba8unorm',
+ dimension: textureDimension,
+ size:
+ textureDimension === '1d' ? [32] : textureDimension === '3d' ? [32, 32, 32] : [32, 32, 18],
+ mipLevelCount: textureLevels,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ };
+
+ const viewDescriptor = { dimension: viewDimension, baseMipLevel, mipLevelCount };
+ const success = validateCreateViewLayersLevels(textureDescriptor, viewDescriptor);
+
+ const texture = t.device.createTexture(textureDescriptor);
+ t.debug(`${mipLevelCount} ${success}`);
+ t.expectValidationError(() => {
+ texture.createView(viewDescriptor);
+ }, !success);
+ });
+
+g.test('cube_faces_square')
+ .desc(
+ `Test that the X/Y dimensions of cube and cube array textures must be square.
+ - {2d (control case), cube, cube-array}`
+ )
+ .params(u =>
+ u //
+ .combine('dimension', ['2d', 'cube', 'cube-array'] as const)
+ .combine('size', [
+ [4, 4, 6],
+ [5, 5, 6],
+ [4, 5, 6],
+ [4, 8, 6],
+ [8, 4, 6],
+ ])
+ )
+ .fn(t => {
+ const { dimension, size } = t.params;
+
+ t.skipIfTextureViewDimensionNotSupported(dimension);
+
+ const texture = t.device.createTexture({
+ format: 'rgba8unorm',
+ size,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ });
+
+ const success = dimension === '2d' || size[0] === size[1];
+ t.expectValidationError(() => {
+ texture.createView({ dimension });
+ }, !success);
+ });
+
+g.test('texture_state')
+ .desc(`createView should fail if the texture is invalid (but succeed if it is destroyed)`)
+ .paramsSubcasesOnly(u => u.combine('state', kResourceStates))
+ .fn(t => {
+ const { state } = t.params;
+ const texture = t.createTextureWithState(state);
+
+ t.expectValidationError(() => {
+ texture.createView();
+ }, state === 'invalid');
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/debugMarker.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/debugMarker.spec.ts
new file mode 100644
index 0000000000..466f903f46
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/debugMarker.spec.ts
@@ -0,0 +1,98 @@
+export const description = `
+Test validation of pushDebugGroup, popDebugGroup, and insertDebugMarker.
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+
+import { ValidationTest } from './validation_test.js';
+
+class F extends ValidationTest {
+ beginRenderPass(commandEncoder: GPUCommandEncoder): GPURenderPassEncoder {
+ const attachmentTexture = this.device.createTexture({
+ format: 'rgba8unorm',
+ size: { width: 16, height: 16, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ this.trackForCleanup(attachmentTexture);
+ return commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: attachmentTexture.createView(),
+ clearValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('push_pop_call_count_unbalance,command_encoder')
+ .desc(
+ `
+ Test that a validation error is generated if {push,pop} debug group call count is not paired.
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('pushCount', [1, 2, 3])
+ .combine('popCount', [1, 2, 3])
+ )
+ .fn(t => {
+ const { pushCount, popCount } = t.params;
+
+ const encoder = t.device.createCommandEncoder();
+
+ for (let i = 0; i < pushCount; ++i) {
+ encoder.pushDebugGroup('EventStart');
+ }
+
+ encoder.insertDebugMarker('Marker');
+
+ for (let i = 0; i < popCount; ++i) {
+ encoder.popDebugGroup();
+ }
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, pushCount !== popCount);
+ });
+
+g.test('push_pop_call_count_unbalance,render_compute_pass')
+ .desc(
+ `
+ Test that a validation error is generated if {push,pop} debug group call count is not paired in
+ ComputePassEncoder and RenderPassEncoder.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('passType', ['compute', 'render'])
+ .beginSubcases()
+ .combine('pushCount', [1, 2, 3])
+ .combine('popCount', [1, 2, 3])
+ )
+ .fn(t => {
+ const { passType, pushCount, popCount } = t.params;
+
+ const encoder = t.device.createCommandEncoder();
+
+ const pass = passType === 'compute' ? encoder.beginComputePass() : t.beginRenderPass(encoder);
+
+ for (let i = 0; i < pushCount; ++i) {
+ pass.pushDebugGroup('EventStart');
+ }
+
+ pass.insertDebugMarker('Marker');
+
+ for (let i = 0; i < popCount; ++i) {
+ pass.popDebugGroup();
+ }
+
+ t.expectValidationError(() => {
+ pass.end();
+ encoder.finish();
+ }, pushCount !== popCount);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/beginComputePass.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/beginComputePass.spec.ts
new file mode 100644
index 0000000000..74640fad1f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/beginComputePass.spec.ts
@@ -0,0 +1,147 @@
+export const description = `
+Tests for validation in beginComputePass and GPUComputePassDescriptor as its optional descriptor.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { kQueryTypes } from '../../../capability_info.js';
+import { ValidationTest } from '../validation_test.js';
+
+class F extends ValidationTest {
+ tryComputePass(success: boolean, descriptor: GPUComputePassDescriptor): void {
+ const encoder = this.device.createCommandEncoder();
+ const computePass = encoder.beginComputePass(descriptor);
+ computePass.end();
+
+ this.expectValidationError(() => {
+ encoder.finish();
+ }, !success);
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('timestampWrites,query_set_type')
+ .desc(
+ `
+ Test that all entries of the timestampWrites must have type 'timestamp'. If all query types are
+ not 'timestamp' in GPUComputePassDescriptor, a validation error should be generated.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('queryType', kQueryTypes)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceForQueryTypeOrSkipTestCase(['timestamp', t.params.queryType]);
+ })
+ .fn(t => {
+ const { queryType } = t.params;
+
+ const isValid = queryType === 'timestamp';
+
+ const timestampWrites = {
+ querySet: t.device.createQuerySet({ type: queryType, count: 2 }),
+ beginningOfPassWriteIndex: 0,
+ endOfPassWriteIndex: 1,
+ };
+
+ const descriptor = {
+ timestampWrites,
+ };
+
+ t.tryComputePass(isValid, descriptor);
+ });
+
+g.test('timestampWrites,invalid_query_set')
+ .desc(`Tests that timestampWrite that has an invalid query set generates a validation error.`)
+ .params(u => u.combine('querySetState', ['valid', 'invalid'] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase(['timestamp-query']);
+ })
+ .fn(t => {
+ const { querySetState } = t.params;
+
+ const querySet = t.createQuerySetWithState(querySetState, {
+ type: 'timestamp',
+ count: 1,
+ });
+
+ const timestampWrites = {
+ querySet,
+ beginningOfPassWriteIndex: 0,
+ };
+
+ const descriptor = {
+ timestampWrites,
+ };
+
+ t.tryComputePass(querySetState === 'valid', descriptor);
+ });
+
+g.test('timestampWrites,query_index')
+ .desc(
+ `Test that querySet.count should be greater than timestampWrite.queryIndex, and that the
+ query indexes are unique.`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('beginningOfPassWriteIndex', [undefined, 0, 1, 2, 3] as const)
+ .combine('endOfPassWriteIndex', [undefined, 0, 1, 2, 3] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase(['timestamp-query']);
+ })
+ .fn(t => {
+ const { beginningOfPassWriteIndex, endOfPassWriteIndex } = t.params;
+
+ const querySetCount = 2;
+
+ const timestampWrites = {
+ querySet: t.device.createQuerySet({ type: 'timestamp', count: querySetCount }),
+ beginningOfPassWriteIndex,
+ endOfPassWriteIndex,
+ };
+
+ const isValid =
+ beginningOfPassWriteIndex !== endOfPassWriteIndex &&
+ (beginningOfPassWriteIndex === undefined || beginningOfPassWriteIndex < querySetCount) &&
+ (endOfPassWriteIndex === undefined || endOfPassWriteIndex < querySetCount);
+
+ const descriptor = {
+ timestampWrites,
+ };
+
+ t.tryComputePass(isValid, descriptor);
+ });
+
+g.test('timestamp_query_set,device_mismatch')
+ .desc(
+ `
+ Tests beginComputePass cannot be called with a timestamp query set created from another device.
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase(['timestamp-query']);
+ t.selectMismatchedDeviceOrSkipTestCase('timestamp-query');
+ })
+ .fn(t => {
+ const { mismatched } = t.params;
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const timestampQuerySet = sourceDevice.createQuerySet({
+ type: 'timestamp',
+ count: 1,
+ });
+
+ const timestampWrites = {
+ querySet: timestampQuerySet,
+ beginningOfPassWriteIndex: 0,
+ };
+
+ const descriptor = {
+ timestampWrites,
+ };
+
+ t.tryComputePass(!mismatched, descriptor);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/beginRenderPass.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/beginRenderPass.spec.ts
new file mode 100644
index 0000000000..8db7b9bb5e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/beginRenderPass.spec.ts
@@ -0,0 +1,215 @@
+export const description = `
+Note: render pass 'occlusionQuerySet' validation is tested in queries/general.spec.ts
+
+TODO: Check that depth-stencil attachment views must encompass all aspects.
+
+TODO: check for duplication (render_pass/, etc.), plan, and implement.
+Note possibly a lot of this should be operation tests instead.
+Notes:
+> - color attachments {zero, one, multiple}
+> - many different formats (some are non-renderable)
+> - is a view on a texture with multiple mip levels or array layers
+> - two attachments use the same view, or views of {intersecting, disjoint} ranges
+> - {without, with} resolve target
+> - resolve format compatibility with multisampled format
+> - {all possible load ops, load color {in range, negative, too large}}
+> - all possible store ops
+> - depth/stencil attachment
+> - {unset, all possible formats}
+> - {all possible {depth, stencil} load ops, load values {in range, negative, too large}}
+> - all possible {depth, stencil} store ops
+> - depthReadOnly {t,f}, stencilReadOnly {t,f}
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ValidationTest } from '../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('color_attachments,device_mismatch')
+ .desc(
+ `
+ Tests beginRenderPass cannot be called with color attachments whose texture view or resolve target is created from another device
+ The 'view' and 'resolveTarget' are:
+ - created from same device in ColorAttachment0 and ColorAttachment1
+ - created from different device in ColorAttachment0 and ColorAttachment1
+ - created from same device in ColorAttachment0, but from different device in ColorAttachment1
+ `
+ )
+ .paramsSubcasesOnly([
+ {
+ view0Mismatched: false,
+ target0Mismatched: false,
+ view1Mismatched: false,
+ target1Mismatched: false,
+ }, // control case
+ {
+ view0Mismatched: false,
+ target0Mismatched: true,
+ view1Mismatched: false,
+ target1Mismatched: true,
+ },
+ {
+ view0Mismatched: true,
+ target0Mismatched: false,
+ view1Mismatched: true,
+ target1Mismatched: false,
+ },
+ {
+ view0Mismatched: false,
+ target0Mismatched: false,
+ view1Mismatched: false,
+ target1Mismatched: true,
+ },
+ ])
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { view0Mismatched, target0Mismatched, view1Mismatched, target1Mismatched } = t.params;
+ const mismatched = view0Mismatched || target0Mismatched || view1Mismatched || target1Mismatched;
+
+ const view0Texture = view0Mismatched
+ ? t.getDeviceMismatchedRenderTexture(4)
+ : t.getRenderTexture(4);
+ const target0Texture = target0Mismatched
+ ? t.getDeviceMismatchedRenderTexture()
+ : t.getRenderTexture();
+ const view1Texture = view1Mismatched
+ ? t.getDeviceMismatchedRenderTexture(4)
+ : t.getRenderTexture(4);
+ const target1Texture = target1Mismatched
+ ? t.getDeviceMismatchedRenderTexture()
+ : t.getRenderTexture();
+
+ const encoder = t.createEncoder('non-pass');
+ const pass = encoder.encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: view0Texture.createView(),
+ clearValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ resolveTarget: target0Texture.createView(),
+ },
+ {
+ view: view1Texture.createView(),
+ clearValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ resolveTarget: target1Texture.createView(),
+ },
+ ],
+ });
+ pass.end();
+
+ encoder.validateFinish(!mismatched);
+ });
+
+g.test('depth_stencil_attachment,device_mismatch')
+ .desc(
+ 'Tests beginRenderPass cannot be called with a depth stencil attachment whose texture view is created from another device'
+ )
+ .paramsSubcasesOnly(u => u.combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { mismatched } = t.params;
+
+ const descriptor: GPUTextureDescriptor = {
+ size: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ format: 'depth24plus-stencil8',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ };
+
+ const depthStencilTexture = mismatched
+ ? t.getDeviceMismatchedTexture(descriptor)
+ : t.device.createTexture(descriptor);
+
+ const encoder = t.createEncoder('non-pass');
+ const pass = encoder.encoder.beginRenderPass({
+ colorAttachments: [],
+ depthStencilAttachment: {
+ view: depthStencilTexture.createView(),
+ depthClearValue: 0,
+ depthLoadOp: 'clear',
+ depthStoreOp: 'store',
+ stencilClearValue: 0,
+ stencilLoadOp: 'clear',
+ stencilStoreOp: 'store',
+ },
+ });
+ pass.end();
+
+ encoder.validateFinish(!mismatched);
+ });
+
+g.test('occlusion_query_set,device_mismatch')
+ .desc(
+ 'Tests beginRenderPass cannot be called with an occlusion query set created from another device'
+ )
+ .paramsSubcasesOnly(u => u.combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { mismatched } = t.params;
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const occlusionQuerySet = sourceDevice.createQuerySet({
+ type: 'occlusion',
+ count: 1,
+ });
+ t.trackForCleanup(occlusionQuerySet);
+
+ const encoder = t.createEncoder('render pass', { occlusionQuerySet });
+ encoder.validateFinish(!mismatched);
+ });
+
+g.test('timestamp_query_set,device_mismatch')
+ .desc(
+ `
+ Tests beginRenderPass cannot be called with a timestamp query set created from another device.
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase(['timestamp-query']);
+ t.selectMismatchedDeviceOrSkipTestCase('timestamp-query');
+ })
+ .fn(t => {
+ const { mismatched } = t.params;
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const timestampQuerySet = sourceDevice.createQuerySet({
+ type: 'timestamp',
+ count: 1,
+ });
+
+ const timestampWrites = {
+ querySet: timestampQuerySet,
+ beginningOfPassWriteIndex: 0,
+ };
+
+ const colorTexture = t.device.createTexture({
+ format: 'rgba8unorm',
+ size: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const encoder = t.createEncoder('non-pass');
+ const pass = encoder.encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorTexture.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ timestampWrites,
+ });
+ pass.end();
+
+ encoder.validateFinish(!mismatched);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/clearBuffer.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/clearBuffer.spec.ts
new file mode 100644
index 0000000000..e349ac2f71
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/clearBuffer.spec.ts
@@ -0,0 +1,246 @@
+export const description = `
+API validation tests for clearBuffer.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { kBufferUsages } from '../../../../capability_info.js';
+import { kResourceStates } from '../../../../gpu_test.js';
+import { kMaxSafeMultipleOf8 } from '../../../../util/math.js';
+import { ValidationTest } from '../../validation_test.js';
+
+class F extends ValidationTest {
+ TestClearBuffer(options: {
+ buffer: GPUBuffer;
+ offset: number | undefined;
+ size: number | undefined;
+ isSuccess: boolean;
+ }): void {
+ const { buffer, offset, size, isSuccess } = options;
+
+ const commandEncoder = this.device.createCommandEncoder();
+ commandEncoder.clearBuffer(buffer, offset, size);
+
+ this.expectValidationError(() => {
+ commandEncoder.finish();
+ }, !isSuccess);
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('buffer_state')
+ .desc(`Test that clearing an invalid or destroyed buffer fails.`)
+ .params(u => u.combine('bufferState', kResourceStates))
+ .fn(t => {
+ const { bufferState } = t.params;
+
+ const buffer = t.createBufferWithState(bufferState, {
+ size: 8,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+
+ const commandEncoder = t.device.createCommandEncoder();
+ commandEncoder.clearBuffer(buffer, 0, 8);
+
+ if (bufferState === 'invalid') {
+ t.expectValidationError(() => {
+ commandEncoder.finish();
+ });
+ } else {
+ const cmd = commandEncoder.finish();
+ t.expectValidationError(() => {
+ t.device.queue.submit([cmd]);
+ }, bufferState === 'destroyed');
+ }
+ });
+
+g.test('buffer,device_mismatch')
+ .desc(`Tests clearBuffer cannot be called with buffer created from another device.`)
+ .paramsSubcasesOnly(u => u.combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { mismatched } = t.params;
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+ const size = 8;
+
+ const buffer = sourceDevice.createBuffer({
+ size,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+ t.trackForCleanup(buffer);
+
+ t.TestClearBuffer({
+ buffer,
+ offset: 0,
+ size,
+ isSuccess: !mismatched,
+ });
+ });
+
+g.test('default_args')
+ .desc(`Test that calling clearBuffer with a default offset and size is valid.`)
+ .paramsSubcasesOnly([
+ { offset: undefined, size: undefined },
+ { offset: 4, size: undefined },
+ { offset: undefined, size: 8 },
+ ] as const)
+ .fn(t => {
+ const { offset, size } = t.params;
+
+ const buffer = t.device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+
+ t.TestClearBuffer({
+ buffer,
+ offset,
+ size,
+ isSuccess: true,
+ });
+ });
+
+g.test('buffer_usage')
+ .desc(`Test that only buffers with COPY_DST usage are valid to use with copyBuffers.`)
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('usage', kBufferUsages)
+ )
+ .fn(t => {
+ const { usage } = t.params;
+
+ const buffer = t.device.createBuffer({
+ size: 16,
+ usage,
+ });
+
+ t.TestClearBuffer({
+ buffer,
+ offset: 0,
+ size: 16,
+ isSuccess: usage === GPUBufferUsage.COPY_DST,
+ });
+ });
+
+g.test('size_alignment')
+ .desc(
+ `
+ Test that the clear size must be 4 byte aligned.
+ - Test size is not a multiple of 4.
+ - Test size is 0.
+ - Test size overflows the buffer size.
+ - Test size is omitted.
+ `
+ )
+ .paramsSubcasesOnly([
+ { size: 0, _isSuccess: true },
+ { size: 2, _isSuccess: false },
+ { size: 4, _isSuccess: true },
+ { size: 5, _isSuccess: false },
+ { size: 8, _isSuccess: true },
+ { size: 20, _isSuccess: false },
+ { size: undefined, _isSuccess: true },
+ ] as const)
+ .fn(t => {
+ const { size, _isSuccess: isSuccess } = t.params;
+
+ const buffer = t.device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+
+ t.TestClearBuffer({
+ buffer,
+ offset: 0,
+ size,
+ isSuccess,
+ });
+ });
+
+g.test('offset_alignment')
+ .desc(
+ `
+ Test that the clear offsets must be 4 byte aligned.
+ - Test offset is not a multiple of 4.
+ - Test offset is larger than the buffer size.
+ - Test offset is omitted.
+ `
+ )
+ .paramsSubcasesOnly([
+ { offset: 0, _isSuccess: true },
+ { offset: 2, _isSuccess: false },
+ { offset: 4, _isSuccess: true },
+ { offset: 5, _isSuccess: false },
+ { offset: 8, _isSuccess: true },
+ { offset: 20, _isSuccess: false },
+ { offset: undefined, _isSuccess: true },
+ ] as const)
+ .fn(t => {
+ const { offset, _isSuccess: isSuccess } = t.params;
+
+ const buffer = t.device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+
+ t.TestClearBuffer({
+ buffer,
+ offset,
+ size: 8,
+ isSuccess,
+ });
+ });
+
+g.test('overflow')
+ .desc(`Test that clears which may cause arithmetic overflows are invalid.`)
+ .paramsSubcasesOnly([
+ { offset: 0, size: kMaxSafeMultipleOf8 },
+ { offset: 16, size: kMaxSafeMultipleOf8 },
+ { offset: kMaxSafeMultipleOf8, size: 16 },
+ { offset: kMaxSafeMultipleOf8, size: kMaxSafeMultipleOf8 },
+ ] as const)
+ .fn(t => {
+ const { offset, size } = t.params;
+
+ const buffer = t.device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+
+ t.TestClearBuffer({
+ buffer,
+ offset,
+ size,
+ isSuccess: false,
+ });
+ });
+
+g.test('out_of_bounds')
+ .desc(`Test that clears which exceed the buffer bounds are invalid.`)
+ .paramsSubcasesOnly([
+ { offset: 0, size: 32, _isSuccess: true },
+ { offset: 0, size: 36 },
+ { offset: 32, size: 0, _isSuccess: true },
+ { offset: 32, size: 4 },
+ { offset: 36, size: 4 },
+ { offset: 36, size: 0 },
+ { offset: 20, size: 16 },
+ { offset: 20, size: 12, _isSuccess: true },
+ ] as const)
+ .fn(t => {
+ const { offset, size, _isSuccess = false } = t.params;
+
+ const buffer = t.device.createBuffer({
+ size: 32,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+
+ t.TestClearBuffer({
+ buffer,
+ offset,
+ size,
+ isSuccess: _isSuccess,
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/compute_pass.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/compute_pass.spec.ts
new file mode 100644
index 0000000000..a73e142a5a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/compute_pass.spec.ts
@@ -0,0 +1,259 @@
+export const description = `
+API validation test for compute pass
+
+Does **not** test usage scopes (resource_usages/) or programmable pass stuff (programmable_pass).
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { makeValueTestVariant } from '../../../../../common/util/util.js';
+import { kBufferUsages } from '../../../../capability_info.js';
+import { GPUConst } from '../../../../constants.js';
+import { kResourceStates, ResourceState } from '../../../../gpu_test.js';
+import { ValidationTest } from '../../validation_test.js';
+
+class F extends ValidationTest {
+ createComputePipeline(state: 'valid' | 'invalid'): GPUComputePipeline {
+ if (state === 'valid') {
+ return this.createNoOpComputePipeline();
+ }
+
+ return this.createErrorComputePipeline();
+ }
+
+ createIndirectBuffer(state: ResourceState, data: Uint32Array): GPUBuffer {
+ const descriptor: GPUBufferDescriptor = {
+ size: data.byteLength,
+ usage: GPUBufferUsage.INDIRECT | GPUBufferUsage.COPY_DST,
+ };
+
+ if (state === 'invalid') {
+ descriptor.usage = 0xffff; // Invalid GPUBufferUsage
+ }
+
+ this.device.pushErrorScope('validation');
+ const buffer = this.device.createBuffer(descriptor);
+ void this.device.popErrorScope();
+
+ if (state === 'valid') {
+ this.queue.writeBuffer(buffer, 0, data);
+ }
+
+ if (state === 'destroyed') {
+ buffer.destroy();
+ }
+
+ return buffer;
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('set_pipeline')
+ .desc(
+ `
+setPipeline should generate an error iff using an 'invalid' pipeline.
+`
+ )
+ .params(u => u.beginSubcases().combine('state', ['valid', 'invalid'] as const))
+ .fn(t => {
+ const { state } = t.params;
+ const pipeline = t.createComputePipeline(state);
+
+ const { encoder, validateFinishAndSubmitGivenState } = t.createEncoder('compute pass');
+ encoder.setPipeline(pipeline);
+ validateFinishAndSubmitGivenState(state);
+ });
+
+g.test('pipeline,device_mismatch')
+ .desc('Tests setPipeline cannot be called with a compute pipeline created from another device')
+ .paramsSubcasesOnly(u => u.combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { mismatched } = t.params;
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const pipeline = sourceDevice.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: sourceDevice.createShaderModule({
+ code: '@compute @workgroup_size(1) fn main() {}',
+ }),
+ entryPoint: 'main',
+ },
+ });
+
+ const { encoder, validateFinish } = t.createEncoder('compute pass');
+ encoder.setPipeline(pipeline);
+ validateFinish(!mismatched);
+ });
+
+g.test('dispatch_sizes')
+ .desc(
+ `Test 'direct' and 'indirect' dispatch with various sizes.
+
+ Only direct dispatches can produce validation errors.
+ Workgroup sizes:
+ - valid: { zero, one, just under limit }
+ - invalid: { just over limit, way over limit }
+
+ TODO: Verify that the invalid cases don't execute any invocations at all.
+`
+ )
+ .params(u =>
+ u
+ .combine('dispatchType', ['direct', 'indirect'] as const)
+ .combine('largeDimValueVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: 0 },
+ { mult: 1, add: 1 },
+ { mult: 0, add: 0x7fff_ffff },
+ { mult: 0, add: 0xffff_ffff },
+ ])
+ .beginSubcases()
+ .combine('largeDimIndex', [0, 1, 2] as const)
+ .combine('smallDimValue', [0, 1])
+ )
+ .fn(t => {
+ const { dispatchType, largeDimIndex, smallDimValue, largeDimValueVariant } = t.params;
+ const maxDispatch = t.device.limits.maxComputeWorkgroupsPerDimension;
+ const largeDimValue = makeValueTestVariant(maxDispatch, largeDimValueVariant);
+
+ const pipeline = t.createNoOpComputePipeline();
+
+ const workSizes = [smallDimValue, smallDimValue, smallDimValue];
+ workSizes[largeDimIndex] = largeDimValue;
+
+ const { encoder, validateFinishAndSubmit } = t.createEncoder('compute pass');
+ encoder.setPipeline(pipeline);
+ if (dispatchType === 'direct') {
+ const [x, y, z] = workSizes;
+ encoder.dispatchWorkgroups(x, y, z);
+ } else if (dispatchType === 'indirect') {
+ encoder.dispatchWorkgroupsIndirect(
+ t.createIndirectBuffer('valid', new Uint32Array(workSizes)),
+ 0
+ );
+ }
+
+ const shouldError =
+ dispatchType === 'direct' &&
+ (workSizes[0] > maxDispatch || workSizes[1] > maxDispatch || workSizes[2] > maxDispatch);
+
+ validateFinishAndSubmit(!shouldError, true);
+ });
+
+const kBufferData = new Uint32Array(6).fill(1);
+g.test('indirect_dispatch_buffer_state')
+ .desc(
+ `
+Test dispatchWorkgroupsIndirect validation by submitting various dispatches with a no-op pipeline
+and an indirectBuffer with 6 elements.
+- indirectBuffer: {'valid', 'invalid', 'destroyed'}
+- indirectOffset:
+ - valid, within the buffer: {beginning, middle, end} of the buffer
+ - invalid, non-multiple of 4
+ - invalid, the last element is outside the buffer
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('state', kResourceStates)
+ .combine('offset', [
+ // valid (for 'valid' buffers)
+ 0,
+ Uint32Array.BYTES_PER_ELEMENT,
+ kBufferData.byteLength - 3 * Uint32Array.BYTES_PER_ELEMENT,
+ // invalid, non-multiple of 4 offset
+ 1,
+ // invalid, last element outside buffer
+ kBufferData.byteLength - 2 * Uint32Array.BYTES_PER_ELEMENT,
+ ])
+ )
+ .fn(t => {
+ const { state, offset } = t.params;
+ const pipeline = t.createNoOpComputePipeline();
+ const buffer = t.createIndirectBuffer(state, kBufferData);
+
+ const { encoder, validateFinishAndSubmit } = t.createEncoder('compute pass');
+ encoder.setPipeline(pipeline);
+ encoder.dispatchWorkgroupsIndirect(buffer, offset);
+
+ const finishShouldError =
+ state === 'invalid' ||
+ offset % 4 !== 0 ||
+ offset + 3 * Uint32Array.BYTES_PER_ELEMENT > kBufferData.byteLength;
+ validateFinishAndSubmit(!finishShouldError, state !== 'destroyed');
+ });
+
+g.test('indirect_dispatch_buffer,device_mismatch')
+ .desc(
+ `Tests dispatchWorkgroupsIndirect cannot be called with an indirect buffer created from another device`
+ )
+ .paramsSubcasesOnly(u => u.combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { mismatched } = t.params;
+
+ const pipeline = t.createNoOpComputePipeline();
+
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const buffer = sourceDevice.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.INDIRECT,
+ });
+ t.trackForCleanup(buffer);
+
+ const { encoder, validateFinish } = t.createEncoder('compute pass');
+ encoder.setPipeline(pipeline);
+ encoder.dispatchWorkgroupsIndirect(buffer, 0);
+ validateFinish(!mismatched);
+ });
+
+g.test('indirect_dispatch_buffer,usage')
+ .desc(
+ `
+ Tests dispatchWorkgroupsIndirect generates a validation error if the buffer usage does not
+ contain INDIRECT usage.
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ // If bufferUsage0 and bufferUsage1 are the same, the usage being test is a single usage.
+ // Otherwise, it's a combined usage.
+ .combine('bufferUsage0', kBufferUsages)
+ .combine('bufferUsage1', kBufferUsages)
+ .unless(
+ ({ bufferUsage0, bufferUsage1 }) =>
+ ((bufferUsage0 | bufferUsage1) &
+ (GPUConst.BufferUsage.MAP_READ | GPUConst.BufferUsage.MAP_WRITE)) !==
+ 0
+ )
+ )
+ .fn(t => {
+ const { bufferUsage0, bufferUsage1 } = t.params;
+
+ const bufferUsage = bufferUsage0 | bufferUsage1;
+
+ const layout = t.device.createPipelineLayout({ bindGroupLayouts: [] });
+ const pipeline = t.createNoOpComputePipeline(layout);
+
+ const buffer = t.device.createBuffer({
+ size: 16,
+ usage: bufferUsage,
+ });
+ t.trackForCleanup(buffer);
+
+ const success = (GPUBufferUsage.INDIRECT & bufferUsage) !== 0;
+
+ const { encoder, validateFinish } = t.createEncoder('compute pass');
+ encoder.setPipeline(pipeline);
+
+ encoder.dispatchWorkgroupsIndirect(buffer, 0);
+ validateFinish(success);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/copyBufferToBuffer.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/copyBufferToBuffer.spec.ts
new file mode 100644
index 0000000000..343bafab3e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/copyBufferToBuffer.spec.ts
@@ -0,0 +1,326 @@
+export const description = `
+copyBufferToBuffer tests.
+
+Test Plan:
+* Buffer is valid/invalid
+ - the source buffer is invalid
+ - the destination buffer is invalid
+* Buffer usages
+ - the source buffer is created without GPUBufferUsage::COPY_SRC
+ - the destination buffer is created without GPUBufferUsage::COPY_DEST
+* CopySize
+ - copySize is not a multiple of 4
+ - copySize is 0
+* copy offsets
+ - sourceOffset is not a multiple of 4
+ - destinationOffset is not a multiple of 4
+* Arithmetic overflow
+ - (sourceOffset + copySize) is overflow
+ - (destinationOffset + copySize) is overflow
+* Out of bounds
+ - (sourceOffset + copySize) > size of source buffer
+ - (destinationOffset + copySize) > size of destination buffer
+* Source buffer and destination buffer are the same buffer
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { kBufferUsages } from '../../../../capability_info.js';
+import { kResourceStates } from '../../../../gpu_test.js';
+import { kMaxSafeMultipleOf8 } from '../../../../util/math.js';
+import { ValidationTest } from '../../validation_test.js';
+
+class F extends ValidationTest {
+ TestCopyBufferToBuffer(options: {
+ srcBuffer: GPUBuffer;
+ srcOffset: number;
+ dstBuffer: GPUBuffer;
+ dstOffset: number;
+ copySize: number;
+ expectation: 'Success' | 'FinishError' | 'SubmitError';
+ }): void {
+ const { srcBuffer, srcOffset, dstBuffer, dstOffset, copySize, expectation } = options;
+
+ const commandEncoder = this.device.createCommandEncoder();
+ commandEncoder.copyBufferToBuffer(srcBuffer, srcOffset, dstBuffer, dstOffset, copySize);
+
+ if (expectation === 'FinishError') {
+ this.expectValidationError(() => {
+ commandEncoder.finish();
+ });
+ } else {
+ const cmd = commandEncoder.finish();
+ this.expectValidationError(() => {
+ this.device.queue.submit([cmd]);
+ }, expectation === 'SubmitError');
+ }
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('buffer_state')
+ .params(u =>
+ u //
+ .combine('srcBufferState', kResourceStates)
+ .combine('dstBufferState', kResourceStates)
+ )
+ .fn(t => {
+ const { srcBufferState, dstBufferState } = t.params;
+ const srcBuffer = t.createBufferWithState(srcBufferState, {
+ size: 16,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+ const dstBuffer = t.createBufferWithState(dstBufferState, {
+ size: 16,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+
+ const shouldFinishError = srcBufferState === 'invalid' || dstBufferState === 'invalid';
+ const shouldSubmitSuccess = srcBufferState === 'valid' && dstBufferState === 'valid';
+ const expectation = shouldSubmitSuccess
+ ? 'Success'
+ : shouldFinishError
+ ? 'FinishError'
+ : 'SubmitError';
+
+ t.TestCopyBufferToBuffer({
+ srcBuffer,
+ srcOffset: 0,
+ dstBuffer,
+ dstOffset: 0,
+ copySize: 8,
+ expectation,
+ });
+ });
+
+g.test('buffer,device_mismatch')
+ .desc(
+ 'Tests copyBufferToBuffer cannot be called with src buffer or dst buffer created from another device'
+ )
+ .paramsSubcasesOnly([
+ { srcMismatched: false, dstMismatched: false }, // control case
+ { srcMismatched: true, dstMismatched: false },
+ { srcMismatched: false, dstMismatched: true },
+ ] as const)
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { srcMismatched, dstMismatched } = t.params;
+
+ const srcBufferDevice = srcMismatched ? t.mismatchedDevice : t.device;
+ const srcBuffer = srcBufferDevice.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(srcBuffer);
+
+ const dstBufferDevice = dstMismatched ? t.mismatchedDevice : t.device;
+ const dstBuffer = dstBufferDevice.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+ t.trackForCleanup(dstBuffer);
+
+ t.TestCopyBufferToBuffer({
+ srcBuffer,
+ srcOffset: 0,
+ dstBuffer,
+ dstOffset: 0,
+ copySize: 8,
+ expectation: srcMismatched || dstMismatched ? 'FinishError' : 'Success',
+ });
+ });
+
+g.test('buffer_usage')
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('srcUsage', kBufferUsages)
+ .combine('dstUsage', kBufferUsages)
+ )
+ .fn(t => {
+ const { srcUsage, dstUsage } = t.params;
+
+ const srcBuffer = t.device.createBuffer({
+ size: 16,
+ usage: srcUsage,
+ });
+ const dstBuffer = t.device.createBuffer({
+ size: 16,
+ usage: dstUsage,
+ });
+
+ const isSuccess = srcUsage === GPUBufferUsage.COPY_SRC && dstUsage === GPUBufferUsage.COPY_DST;
+ const expectation = isSuccess ? 'Success' : 'FinishError';
+
+ t.TestCopyBufferToBuffer({
+ srcBuffer,
+ srcOffset: 0,
+ dstBuffer,
+ dstOffset: 0,
+ copySize: 8,
+ expectation,
+ });
+ });
+
+g.test('copy_size_alignment')
+ .paramsSubcasesOnly([
+ { copySize: 0, _isSuccess: true },
+ { copySize: 2, _isSuccess: false },
+ { copySize: 4, _isSuccess: true },
+ { copySize: 5, _isSuccess: false },
+ { copySize: 8, _isSuccess: true },
+ ] as const)
+ .fn(t => {
+ const { copySize, _isSuccess: isSuccess } = t.params;
+
+ const srcBuffer = t.device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.COPY_SRC,
+ });
+ const dstBuffer = t.device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+
+ t.TestCopyBufferToBuffer({
+ srcBuffer,
+ srcOffset: 0,
+ dstBuffer,
+ dstOffset: 0,
+ copySize,
+ expectation: isSuccess ? 'Success' : 'FinishError',
+ });
+ });
+
+g.test('copy_offset_alignment')
+ .paramsSubcasesOnly([
+ { srcOffset: 0, dstOffset: 0, _isSuccess: true },
+ { srcOffset: 2, dstOffset: 0, _isSuccess: false },
+ { srcOffset: 4, dstOffset: 0, _isSuccess: true },
+ { srcOffset: 5, dstOffset: 0, _isSuccess: false },
+ { srcOffset: 8, dstOffset: 0, _isSuccess: true },
+ { srcOffset: 0, dstOffset: 2, _isSuccess: false },
+ { srcOffset: 0, dstOffset: 4, _isSuccess: true },
+ { srcOffset: 0, dstOffset: 5, _isSuccess: false },
+ { srcOffset: 0, dstOffset: 8, _isSuccess: true },
+ { srcOffset: 4, dstOffset: 4, _isSuccess: true },
+ ] as const)
+ .fn(t => {
+ const { srcOffset, dstOffset, _isSuccess: isSuccess } = t.params;
+
+ const srcBuffer = t.device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.COPY_SRC,
+ });
+ const dstBuffer = t.device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+
+ t.TestCopyBufferToBuffer({
+ srcBuffer,
+ srcOffset,
+ dstBuffer,
+ dstOffset,
+ copySize: 8,
+ expectation: isSuccess ? 'Success' : 'FinishError',
+ });
+ });
+
+g.test('copy_overflow')
+ .paramsSubcasesOnly([
+ { srcOffset: 0, dstOffset: 0, copySize: kMaxSafeMultipleOf8 },
+ { srcOffset: 16, dstOffset: 0, copySize: kMaxSafeMultipleOf8 },
+ { srcOffset: 0, dstOffset: 16, copySize: kMaxSafeMultipleOf8 },
+ { srcOffset: kMaxSafeMultipleOf8, dstOffset: 0, copySize: 16 },
+ { srcOffset: 0, dstOffset: kMaxSafeMultipleOf8, copySize: 16 },
+ { srcOffset: kMaxSafeMultipleOf8, dstOffset: 0, copySize: kMaxSafeMultipleOf8 },
+ { srcOffset: 0, dstOffset: kMaxSafeMultipleOf8, copySize: kMaxSafeMultipleOf8 },
+ {
+ srcOffset: kMaxSafeMultipleOf8,
+ dstOffset: kMaxSafeMultipleOf8,
+ copySize: kMaxSafeMultipleOf8,
+ },
+ ] as const)
+ .fn(t => {
+ const { srcOffset, dstOffset, copySize } = t.params;
+
+ const srcBuffer = t.device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.COPY_SRC,
+ });
+ const dstBuffer = t.device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+
+ t.TestCopyBufferToBuffer({
+ srcBuffer,
+ srcOffset,
+ dstBuffer,
+ dstOffset,
+ copySize,
+ expectation: 'FinishError',
+ });
+ });
+
+g.test('copy_out_of_bounds')
+ .paramsSubcasesOnly([
+ { srcOffset: 0, dstOffset: 0, copySize: 32, _isSuccess: true },
+ { srcOffset: 0, dstOffset: 0, copySize: 36 },
+ { srcOffset: 36, dstOffset: 0, copySize: 4 },
+ { srcOffset: 0, dstOffset: 36, copySize: 4 },
+ { srcOffset: 36, dstOffset: 0, copySize: 0 },
+ { srcOffset: 0, dstOffset: 36, copySize: 0 },
+ { srcOffset: 20, dstOffset: 0, copySize: 16 },
+ { srcOffset: 20, dstOffset: 0, copySize: 12, _isSuccess: true },
+ { srcOffset: 0, dstOffset: 20, copySize: 16 },
+ { srcOffset: 0, dstOffset: 20, copySize: 12, _isSuccess: true },
+ ] as const)
+ .fn(t => {
+ const { srcOffset, dstOffset, copySize, _isSuccess = false } = t.params;
+
+ const srcBuffer = t.device.createBuffer({
+ size: 32,
+ usage: GPUBufferUsage.COPY_SRC,
+ });
+ const dstBuffer = t.device.createBuffer({
+ size: 32,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+
+ t.TestCopyBufferToBuffer({
+ srcBuffer,
+ srcOffset,
+ dstBuffer,
+ dstOffset,
+ copySize,
+ expectation: _isSuccess ? 'Success' : 'FinishError',
+ });
+ });
+
+g.test('copy_within_same_buffer')
+ .paramsSubcasesOnly([
+ { srcOffset: 0, dstOffset: 8, copySize: 4 },
+ { srcOffset: 8, dstOffset: 0, copySize: 4 },
+ { srcOffset: 0, dstOffset: 4, copySize: 8 },
+ { srcOffset: 4, dstOffset: 0, copySize: 8 },
+ ] as const)
+ .fn(t => {
+ const { srcOffset, dstOffset, copySize } = t.params;
+
+ const buffer = t.device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+
+ t.TestCopyBufferToBuffer({
+ srcBuffer: buffer,
+ srcOffset,
+ dstBuffer: buffer,
+ dstOffset,
+ copySize,
+ expectation: 'FinishError',
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/copyTextureToTexture.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/copyTextureToTexture.spec.ts
new file mode 100644
index 0000000000..f1c3d91e29
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/copyTextureToTexture.spec.ts
@@ -0,0 +1,874 @@
+export const description = `
+copyTextureToTexture tests.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { kTextureUsages, kTextureDimensions } from '../../../../capability_info.js';
+import {
+ kTextureFormatInfo,
+ kTextureFormats,
+ kCompressedTextureFormats,
+ kDepthStencilFormats,
+ kFeaturesForFormats,
+ filterFormatsByFeature,
+ textureDimensionAndFormatCompatible,
+} from '../../../../format_info.js';
+import { kResourceStates } from '../../../../gpu_test.js';
+import { align, lcm } from '../../../../util/math.js';
+import { ValidationTest } from '../../validation_test.js';
+
+class F extends ValidationTest {
+ TestCopyTextureToTexture(
+ source: GPUImageCopyTexture,
+ destination: GPUImageCopyTexture,
+ copySize: GPUExtent3D,
+ expectation: 'Success' | 'FinishError' | 'SubmitError'
+ ): void {
+ const commandEncoder = this.device.createCommandEncoder();
+ commandEncoder.copyTextureToTexture(source, destination, copySize);
+
+ if (expectation === 'FinishError') {
+ this.expectValidationError(() => {
+ commandEncoder.finish();
+ });
+ } else {
+ const cmd = commandEncoder.finish();
+ this.expectValidationError(() => {
+ this.device.queue.submit([cmd]);
+ }, expectation === 'SubmitError');
+ }
+ }
+
+ GetPhysicalSubresourceSize(
+ dimension: GPUTextureDimension,
+ textureSize: Required<GPUExtent3DDict>,
+ format: GPUTextureFormat,
+ mipLevel: number
+ ): Required<GPUExtent3DDict> {
+ const virtualWidthAtLevel = Math.max(textureSize.width >> mipLevel, 1);
+ const virtualHeightAtLevel = Math.max(textureSize.height >> mipLevel, 1);
+ const physicalWidthAtLevel = align(virtualWidthAtLevel, kTextureFormatInfo[format].blockWidth);
+ const physicalHeightAtLevel = align(
+ virtualHeightAtLevel,
+ kTextureFormatInfo[format].blockHeight
+ );
+
+ switch (dimension) {
+ case '1d':
+ return { width: physicalWidthAtLevel, height: 1, depthOrArrayLayers: 1 };
+ case '2d':
+ return {
+ width: physicalWidthAtLevel,
+ height: physicalHeightAtLevel,
+ depthOrArrayLayers: textureSize.depthOrArrayLayers,
+ };
+ case '3d':
+ return {
+ width: physicalWidthAtLevel,
+ height: physicalHeightAtLevel,
+ depthOrArrayLayers: Math.max(textureSize.depthOrArrayLayers >> mipLevel, 1),
+ };
+ }
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('copy_with_invalid_or_destroyed_texture')
+ .desc('Test copyTextureToTexture is an error when one of the textures is invalid or destroyed.')
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('srcState', kResourceStates)
+ .combine('dstState', kResourceStates)
+ )
+ .fn(t => {
+ const { srcState, dstState } = t.params;
+
+ const textureDesc: GPUTextureDescriptor = {
+ size: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ };
+
+ const srcTexture = t.createTextureWithState(srcState, textureDesc);
+ const dstTexture = t.createTextureWithState(dstState, textureDesc);
+
+ const isSubmitSuccess = srcState === 'valid' && dstState === 'valid';
+ const isFinishSuccess = srcState !== 'invalid' && dstState !== 'invalid';
+ const expectation = isFinishSuccess
+ ? isSubmitSuccess
+ ? 'Success'
+ : 'SubmitError'
+ : 'FinishError';
+
+ t.TestCopyTextureToTexture(
+ { texture: srcTexture },
+ { texture: dstTexture },
+ { width: 1, height: 1, depthOrArrayLayers: 1 },
+ expectation
+ );
+ });
+
+g.test('texture,device_mismatch')
+ .desc(
+ 'Tests copyTextureToTexture cannot be called with src texture or dst texture created from another device.'
+ )
+ .paramsSubcasesOnly([
+ { srcMismatched: false, dstMismatched: false }, // control case
+ { srcMismatched: true, dstMismatched: false },
+ { srcMismatched: false, dstMismatched: true },
+ ] as const)
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { srcMismatched, dstMismatched } = t.params;
+
+ const size = { width: 4, height: 4, depthOrArrayLayers: 1 };
+ const format = 'rgba8unorm';
+
+ const srcTextureDevice = srcMismatched ? t.mismatchedDevice : t.device;
+ const srcTexture = srcTextureDevice.createTexture({
+ size,
+ format,
+ usage: GPUTextureUsage.COPY_SRC,
+ });
+ t.trackForCleanup(srcTexture);
+
+ const dstTextureDevice = dstMismatched ? t.mismatchedDevice : t.device;
+ const dstTexture = dstTextureDevice.createTexture({
+ size,
+ format,
+ usage: GPUTextureUsage.COPY_DST,
+ });
+ t.trackForCleanup(dstTexture);
+
+ t.TestCopyTextureToTexture(
+ { texture: srcTexture },
+ { texture: dstTexture },
+ { width: 1, height: 1, depthOrArrayLayers: 1 },
+ srcMismatched || dstMismatched ? 'FinishError' : 'Success'
+ );
+ });
+
+g.test('mipmap_level')
+ .desc(
+ `
+Test copyTextureToTexture must specify mipLevels that are in range.
+- for various dimensions
+- for various mip level count in the texture
+- for various copy target mip level (in range and not in range)
+`
+ )
+ .params(u =>
+ u //
+ .combine('dimension', kTextureDimensions)
+ .beginSubcases()
+ .combineWithParams([
+ { srcLevelCount: 1, dstLevelCount: 1, srcCopyLevel: 0, dstCopyLevel: 0 },
+ { srcLevelCount: 1, dstLevelCount: 1, srcCopyLevel: 1, dstCopyLevel: 0 },
+ { srcLevelCount: 1, dstLevelCount: 1, srcCopyLevel: 0, dstCopyLevel: 1 },
+ { srcLevelCount: 3, dstLevelCount: 3, srcCopyLevel: 0, dstCopyLevel: 0 },
+ { srcLevelCount: 3, dstLevelCount: 3, srcCopyLevel: 2, dstCopyLevel: 0 },
+ { srcLevelCount: 3, dstLevelCount: 3, srcCopyLevel: 3, dstCopyLevel: 0 },
+ { srcLevelCount: 3, dstLevelCount: 3, srcCopyLevel: 0, dstCopyLevel: 2 },
+ { srcLevelCount: 3, dstLevelCount: 3, srcCopyLevel: 0, dstCopyLevel: 3 },
+ ] as const)
+ .unless(p => p.dimension === '1d' && (p.srcLevelCount !== 1 || p.dstLevelCount !== 1))
+ )
+
+ .fn(t => {
+ const { srcLevelCount, dstLevelCount, srcCopyLevel, dstCopyLevel, dimension } = t.params;
+
+ const srcTexture = t.device.createTexture({
+ size: { width: 32, height: 1, depthOrArrayLayers: 1 },
+ dimension,
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_SRC,
+ mipLevelCount: srcLevelCount,
+ });
+ const dstTexture = t.device.createTexture({
+ size: { width: 32, height: 1, depthOrArrayLayers: 1 },
+ dimension,
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_DST,
+ mipLevelCount: dstLevelCount,
+ });
+
+ const isSuccess = srcCopyLevel < srcLevelCount && dstCopyLevel < dstLevelCount;
+ t.TestCopyTextureToTexture(
+ { texture: srcTexture, mipLevel: srcCopyLevel },
+ { texture: dstTexture, mipLevel: dstCopyLevel },
+ { width: 1, height: 1, depthOrArrayLayers: 1 },
+ isSuccess ? 'Success' : 'FinishError'
+ );
+ });
+
+g.test('texture_usage')
+ .desc(
+ `
+Test that copyTextureToTexture source/destination need COPY_SRC/COPY_DST usages.
+- for all possible source texture usages
+- for all possible destination texture usages
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('srcUsage', kTextureUsages)
+ .combine('dstUsage', kTextureUsages)
+ )
+ .fn(t => {
+ const { srcUsage, dstUsage } = t.params;
+
+ const srcTexture = t.device.createTexture({
+ size: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage: srcUsage,
+ });
+ const dstTexture = t.device.createTexture({
+ size: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage: dstUsage,
+ });
+
+ const isSuccess =
+ srcUsage === GPUTextureUsage.COPY_SRC && dstUsage === GPUTextureUsage.COPY_DST;
+
+ t.TestCopyTextureToTexture(
+ { texture: srcTexture },
+ { texture: dstTexture },
+ { width: 1, height: 1, depthOrArrayLayers: 1 },
+ isSuccess ? 'Success' : 'FinishError'
+ );
+ });
+
+g.test('sample_count')
+ .desc(
+ `
+Test that textures in copyTextureToTexture must have the same sample count.
+- for various source texture sample count
+- for various destination texture sample count
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('srcSampleCount', [1, 4])
+ .combine('dstSampleCount', [1, 4])
+ )
+ .fn(t => {
+ const { srcSampleCount, dstSampleCount } = t.params;
+
+ const srcTexture = t.device.createTexture({
+ size: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ sampleCount: srcSampleCount,
+ });
+ const dstTexture = t.device.createTexture({
+ size: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ sampleCount: dstSampleCount,
+ });
+
+ const isSuccess = srcSampleCount === dstSampleCount;
+ t.TestCopyTextureToTexture(
+ { texture: srcTexture },
+ { texture: dstTexture },
+ { width: 4, height: 4, depthOrArrayLayers: 1 },
+ isSuccess ? 'Success' : 'FinishError'
+ );
+ });
+
+g.test('multisampled_copy_restrictions')
+ .desc(
+ `
+Test that copyTextureToTexture of multisampled texture must copy a whole subresource to a whole subresource.
+- for various origin for the source and destination of the copies.
+
+Note: this is only tested for 2D textures as it is the only dimension compatible with multisampling.
+TODO: Check the source and destination constraints separately.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('srcCopyOrigin', [
+ { x: 0, y: 0, z: 0 },
+ { x: 1, y: 0, z: 0 },
+ { x: 0, y: 1, z: 0 },
+ { x: 1, y: 1, z: 0 },
+ ])
+ .combine('dstCopyOrigin', [
+ { x: 0, y: 0, z: 0 },
+ { x: 1, y: 0, z: 0 },
+ { x: 0, y: 1, z: 0 },
+ { x: 1, y: 1, z: 0 },
+ ])
+ .expand('copyWidth', p => [32 - Math.max(p.srcCopyOrigin.x, p.dstCopyOrigin.x), 16])
+ .expand('copyHeight', p => [16 - Math.max(p.srcCopyOrigin.y, p.dstCopyOrigin.y), 8])
+ )
+ .fn(t => {
+ const { srcCopyOrigin, dstCopyOrigin, copyWidth, copyHeight } = t.params;
+
+ const kWidth = 32;
+ const kHeight = 16;
+
+ // Currently we don't support multisampled 2D array textures and the mipmap level count of the
+ // multisampled textures must be 1.
+ const srcTexture = t.device.createTexture({
+ size: { width: kWidth, height: kHeight, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ sampleCount: 4,
+ });
+ const dstTexture = t.device.createTexture({
+ size: { width: kWidth, height: kHeight, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ sampleCount: 4,
+ });
+
+ const isSuccess = copyWidth === kWidth && copyHeight === kHeight;
+ t.TestCopyTextureToTexture(
+ { texture: srcTexture, origin: srcCopyOrigin },
+ { texture: dstTexture, origin: dstCopyOrigin },
+ { width: copyWidth, height: copyHeight, depthOrArrayLayers: 1 },
+ isSuccess ? 'Success' : 'FinishError'
+ );
+ });
+
+g.test('texture_format_compatibility')
+ .desc(
+ `
+Test the formats of textures in copyTextureToTexture must be copy-compatible.
+- for all source texture formats
+- for all destination texture formats
+`
+ )
+ .params(u =>
+ u
+ .combine('srcFormatFeature', kFeaturesForFormats)
+ .combine('dstFormatFeature', kFeaturesForFormats)
+ .beginSubcases()
+ .expand('srcFormat', ({ srcFormatFeature }) =>
+ filterFormatsByFeature(srcFormatFeature, kTextureFormats)
+ )
+ .expand('dstFormat', ({ dstFormatFeature }) =>
+ filterFormatsByFeature(dstFormatFeature, kTextureFormats)
+ )
+ )
+ .beforeAllSubcases(t => {
+ const { srcFormatFeature, dstFormatFeature } = t.params;
+ t.selectDeviceOrSkipTestCase([srcFormatFeature, dstFormatFeature]);
+ })
+ .fn(t => {
+ const { srcFormat, dstFormat } = t.params;
+
+ t.skipIfTextureFormatNotSupported(srcFormat, dstFormat);
+ t.skipIfCopyTextureToTextureNotSupportedForFormat(srcFormat, dstFormat);
+
+ const srcFormatInfo = kTextureFormatInfo[srcFormat];
+ const dstFormatInfo = kTextureFormatInfo[dstFormat];
+
+ const textureSize = {
+ width: lcm(srcFormatInfo.blockWidth, dstFormatInfo.blockWidth),
+ height: lcm(srcFormatInfo.blockHeight, dstFormatInfo.blockHeight),
+ depthOrArrayLayers: 1,
+ };
+
+ const srcTexture = t.device.createTexture({
+ size: textureSize,
+ format: srcFormat,
+ usage: GPUTextureUsage.COPY_SRC,
+ });
+
+ const dstTexture = t.device.createTexture({
+ size: textureSize,
+ format: dstFormat,
+ usage: GPUTextureUsage.COPY_DST,
+ });
+
+ // Allow copy between compatible format textures.
+ const srcBaseFormat = kTextureFormatInfo[srcFormat].baseFormat ?? srcFormat;
+ const dstBaseFormat = kTextureFormatInfo[dstFormat].baseFormat ?? dstFormat;
+ const isSuccess = srcBaseFormat === dstBaseFormat;
+
+ t.TestCopyTextureToTexture(
+ { texture: srcTexture },
+ { texture: dstTexture },
+ textureSize,
+ isSuccess ? 'Success' : 'FinishError'
+ );
+ });
+
+g.test('depth_stencil_copy_restrictions')
+ .desc(
+ `
+Test that depth textures subresources must be entirely copied in copyTextureToTexture
+- for various depth-stencil formats
+- for various copy origin and size offsets
+- for various source and destination texture sizes
+- for various source and destination mip levels
+
+Note: this is only tested for 2D textures as it is the only dimension compatible with depth-stencil.
+`
+ )
+ .params(u =>
+ u
+ .combine('format', kDepthStencilFormats)
+ .beginSubcases()
+ .combine('copyBoxOffsets', [
+ { x: 0, y: 0, width: 0, height: 0 },
+ { x: 1, y: 0, width: 0, height: 0 },
+ { x: 0, y: 1, width: 0, height: 0 },
+ { x: 0, y: 0, width: -1, height: 0 },
+ { x: 0, y: 0, width: 0, height: -1 },
+ ])
+ .combine('srcTextureSize', [
+ { width: 64, height: 64, depthOrArrayLayers: 1 },
+ { width: 64, height: 32, depthOrArrayLayers: 1 },
+ { width: 32, height: 32, depthOrArrayLayers: 1 },
+ ])
+ .combine('dstTextureSize', [
+ { width: 64, height: 64, depthOrArrayLayers: 1 },
+ { width: 64, height: 32, depthOrArrayLayers: 1 },
+ { width: 32, height: 32, depthOrArrayLayers: 1 },
+ ])
+ .combine('srcCopyLevel', [1, 2])
+ .combine('dstCopyLevel', [0, 1])
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ t.selectDeviceOrSkipTestCase(kTextureFormatInfo[format].feature);
+ })
+ .fn(t => {
+ const { format, copyBoxOffsets, srcTextureSize, dstTextureSize, srcCopyLevel, dstCopyLevel } =
+ t.params;
+ const kMipLevelCount = 3;
+
+ const srcTexture = t.device.createTexture({
+ size: { width: srcTextureSize.width, height: srcTextureSize.height, depthOrArrayLayers: 1 },
+ format,
+ mipLevelCount: kMipLevelCount,
+ usage: GPUTextureUsage.COPY_SRC,
+ });
+ const dstTexture = t.device.createTexture({
+ size: { width: dstTextureSize.width, height: dstTextureSize.height, depthOrArrayLayers: 1 },
+ format,
+ mipLevelCount: kMipLevelCount,
+ usage: GPUTextureUsage.COPY_DST,
+ });
+
+ const srcSizeAtLevel = t.GetPhysicalSubresourceSize('2d', srcTextureSize, format, srcCopyLevel);
+ const dstSizeAtLevel = t.GetPhysicalSubresourceSize('2d', dstTextureSize, format, dstCopyLevel);
+
+ const copyOrigin = { x: copyBoxOffsets.x, y: copyBoxOffsets.y, z: 0 };
+
+ const copyWidth =
+ Math.min(srcSizeAtLevel.width, dstSizeAtLevel.width) + copyBoxOffsets.width - copyOrigin.x;
+ const copyHeight =
+ Math.min(srcSizeAtLevel.height, dstSizeAtLevel.height) + copyBoxOffsets.height - copyOrigin.y;
+
+ // Depth/stencil copies must copy whole subresources.
+ const isSuccess =
+ copyOrigin.x === 0 &&
+ copyOrigin.y === 0 &&
+ copyWidth === srcSizeAtLevel.width &&
+ copyHeight === srcSizeAtLevel.height &&
+ copyWidth === dstSizeAtLevel.width &&
+ copyHeight === dstSizeAtLevel.height;
+ t.TestCopyTextureToTexture(
+ { texture: srcTexture, origin: { x: 0, y: 0, z: 0 }, mipLevel: srcCopyLevel },
+ { texture: dstTexture, origin: copyOrigin, mipLevel: dstCopyLevel },
+ { width: copyWidth, height: copyHeight, depthOrArrayLayers: 1 },
+ isSuccess ? 'Success' : 'FinishError'
+ );
+ t.TestCopyTextureToTexture(
+ { texture: srcTexture, origin: copyOrigin, mipLevel: srcCopyLevel },
+ { texture: dstTexture, origin: { x: 0, y: 0, z: 0 }, mipLevel: dstCopyLevel },
+ { width: copyWidth, height: copyHeight, depthOrArrayLayers: 1 },
+ isSuccess ? 'Success' : 'FinishError'
+ );
+ });
+
+g.test('copy_ranges')
+ .desc(
+ `
+Test that copyTextureToTexture copy boxes must be in range of the subresource.
+- for various dimensions
+- for various offsets to a full copy for the copy origin/size
+- for various copy mip levels
+`
+ )
+ .params(u =>
+ u
+ .combine('dimension', kTextureDimensions)
+ //.beginSubcases()
+ .combine('copyBoxOffsets', [
+ { x: 0, y: 0, z: 0, width: 0, height: 0, depthOrArrayLayers: -2 },
+ { x: 1, y: 0, z: 0, width: 0, height: 0, depthOrArrayLayers: -2 },
+ { x: 1, y: 0, z: 0, width: -1, height: 0, depthOrArrayLayers: -2 },
+ { x: 0, y: 1, z: 0, width: 0, height: 0, depthOrArrayLayers: -2 },
+ { x: 0, y: 1, z: 0, width: 0, height: -1, depthOrArrayLayers: -2 },
+ { x: 0, y: 0, z: 1, width: 0, height: 1, depthOrArrayLayers: -2 },
+ { x: 0, y: 0, z: 2, width: 0, height: 1, depthOrArrayLayers: 0 },
+ { x: 0, y: 0, z: 0, width: 1, height: 0, depthOrArrayLayers: -2 },
+ { x: 0, y: 0, z: 0, width: 0, height: 1, depthOrArrayLayers: -2 },
+ { x: 0, y: 0, z: 0, width: 0, height: 0, depthOrArrayLayers: 1 },
+ { x: 0, y: 0, z: 0, width: 0, height: 0, depthOrArrayLayers: 0 },
+ { x: 0, y: 0, z: 1, width: 0, height: 0, depthOrArrayLayers: -1 },
+ { x: 0, y: 0, z: 2, width: 0, height: 0, depthOrArrayLayers: -1 },
+ ])
+ .unless(
+ p =>
+ p.dimension === '1d' &&
+ (p.copyBoxOffsets.y !== 0 ||
+ p.copyBoxOffsets.z !== 0 ||
+ p.copyBoxOffsets.height !== 0 ||
+ p.copyBoxOffsets.depthOrArrayLayers !== 0)
+ )
+ .combine('srcCopyLevel', [0, 1, 3])
+ .combine('dstCopyLevel', [0, 1, 3])
+ .unless(p => p.dimension === '1d' && (p.srcCopyLevel !== 0 || p.dstCopyLevel !== 0))
+ )
+ .fn(t => {
+ const { dimension, copyBoxOffsets, srcCopyLevel, dstCopyLevel } = t.params;
+
+ const textureSize = { width: 16, height: 8, depthOrArrayLayers: 3 };
+ let mipLevelCount = 4;
+ if (dimension === '1d') {
+ mipLevelCount = 1;
+ textureSize.height = 1;
+ textureSize.depthOrArrayLayers = 1;
+ }
+ const kFormat = 'rgba8unorm';
+
+ const srcTexture = t.device.createTexture({
+ size: textureSize,
+ format: kFormat,
+ dimension,
+ mipLevelCount,
+ usage: GPUTextureUsage.COPY_SRC,
+ });
+ const dstTexture = t.device.createTexture({
+ size: textureSize,
+ format: kFormat,
+ dimension,
+ mipLevelCount,
+ usage: GPUTextureUsage.COPY_DST,
+ });
+
+ const srcSizeAtLevel = t.GetPhysicalSubresourceSize(
+ dimension,
+ textureSize,
+ kFormat,
+ srcCopyLevel
+ );
+ const dstSizeAtLevel = t.GetPhysicalSubresourceSize(
+ dimension,
+ textureSize,
+ kFormat,
+ dstCopyLevel
+ );
+
+ const copyOrigin = { x: copyBoxOffsets.x, y: copyBoxOffsets.y, z: copyBoxOffsets.z };
+
+ const copyWidth = Math.max(
+ Math.min(srcSizeAtLevel.width, dstSizeAtLevel.width) + copyBoxOffsets.width - copyOrigin.x,
+ 0
+ );
+ const copyHeight = Math.max(
+ Math.min(srcSizeAtLevel.height, dstSizeAtLevel.height) + copyBoxOffsets.height - copyOrigin.y,
+ 0
+ );
+ const copyDepth =
+ textureSize.depthOrArrayLayers + copyBoxOffsets.depthOrArrayLayers - copyOrigin.z;
+
+ {
+ let isSuccess =
+ copyWidth <= srcSizeAtLevel.width &&
+ copyHeight <= srcSizeAtLevel.height &&
+ copyOrigin.x + copyWidth <= dstSizeAtLevel.width &&
+ copyOrigin.y + copyHeight <= dstSizeAtLevel.height;
+
+ if (dimension === '3d') {
+ isSuccess =
+ isSuccess &&
+ copyDepth <= srcSizeAtLevel.depthOrArrayLayers &&
+ copyOrigin.z + copyDepth <= dstSizeAtLevel.depthOrArrayLayers;
+ } else {
+ isSuccess =
+ isSuccess &&
+ copyDepth <= textureSize.depthOrArrayLayers &&
+ copyOrigin.z + copyDepth <= textureSize.depthOrArrayLayers;
+ }
+
+ t.TestCopyTextureToTexture(
+ { texture: srcTexture, origin: { x: 0, y: 0, z: 0 }, mipLevel: srcCopyLevel },
+ { texture: dstTexture, origin: copyOrigin, mipLevel: dstCopyLevel },
+ { width: copyWidth, height: copyHeight, depthOrArrayLayers: copyDepth },
+ isSuccess ? 'Success' : 'FinishError'
+ );
+ }
+
+ {
+ let isSuccess =
+ copyOrigin.x + copyWidth <= srcSizeAtLevel.width &&
+ copyOrigin.y + copyHeight <= srcSizeAtLevel.height &&
+ copyWidth <= dstSizeAtLevel.width &&
+ copyHeight <= dstSizeAtLevel.height;
+
+ if (dimension === '3d') {
+ isSuccess =
+ isSuccess &&
+ copyDepth <= dstSizeAtLevel.depthOrArrayLayers &&
+ copyOrigin.z + copyDepth <= srcSizeAtLevel.depthOrArrayLayers;
+ } else {
+ isSuccess =
+ isSuccess &&
+ copyDepth <= textureSize.depthOrArrayLayers &&
+ copyOrigin.z + copyDepth <= textureSize.depthOrArrayLayers;
+ }
+
+ t.TestCopyTextureToTexture(
+ { texture: srcTexture, origin: copyOrigin, mipLevel: srcCopyLevel },
+ { texture: dstTexture, origin: { x: 0, y: 0, z: 0 }, mipLevel: dstCopyLevel },
+ { width: copyWidth, height: copyHeight, depthOrArrayLayers: copyDepth },
+ isSuccess ? 'Success' : 'FinishError'
+ );
+ }
+ });
+
+g.test('copy_within_same_texture')
+ .desc(
+ `
+Test that it is an error to use copyTextureToTexture from one subresource to itself.
+- for various starting source/destination array layers.
+- for various copy sizes in number of array layers
+
+TODO: Extend to check the copy is allowed between different mip levels.
+TODO: Extend to 1D and 3D textures.`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('srcCopyOriginZ', [0, 2, 4])
+ .combine('dstCopyOriginZ', [0, 2, 4])
+ .combine('copyExtentDepth', [1, 2, 3])
+ )
+ .fn(t => {
+ const { srcCopyOriginZ, dstCopyOriginZ, copyExtentDepth } = t.params;
+
+ const kArrayLayerCount = 7;
+
+ const testTexture = t.device.createTexture({
+ size: { width: 16, height: 16, depthOrArrayLayers: kArrayLayerCount },
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ const isSuccess =
+ Math.min(srcCopyOriginZ, dstCopyOriginZ) + copyExtentDepth <=
+ Math.max(srcCopyOriginZ, dstCopyOriginZ);
+ t.TestCopyTextureToTexture(
+ { texture: testTexture, origin: { x: 0, y: 0, z: srcCopyOriginZ } },
+ { texture: testTexture, origin: { x: 0, y: 0, z: dstCopyOriginZ } },
+ { width: 16, height: 16, depthOrArrayLayers: copyExtentDepth },
+ isSuccess ? 'Success' : 'FinishError'
+ );
+ });
+
+g.test('copy_aspects')
+ .desc(
+ `
+Test the validations on the member 'aspect' of GPUImageCopyTexture in CopyTextureToTexture().
+- for all the color and depth-stencil formats: the texture copy aspects must be both 'all'.
+- for all the depth-only formats: the texture copy aspects must be either 'all' or 'depth-only'.
+- for all the stencil-only formats: the texture copy aspects must be either 'all' or 'stencil-only'.
+`
+ )
+ .params(u =>
+ u
+ .combine('format', ['rgba8unorm', ...kDepthStencilFormats] as const)
+ .beginSubcases()
+ .combine('sourceAspect', ['all', 'depth-only', 'stencil-only'] as const)
+ .combine('destinationAspect', ['all', 'depth-only', 'stencil-only'] as const)
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ t.selectDeviceOrSkipTestCase(kTextureFormatInfo[format].feature);
+ })
+ .fn(t => {
+ const { format, sourceAspect, destinationAspect } = t.params;
+
+ const kTextureSize = { width: 16, height: 8, depthOrArrayLayers: 1 };
+
+ const srcTexture = t.device.createTexture({
+ size: kTextureSize,
+ format,
+ usage: GPUTextureUsage.COPY_SRC,
+ });
+ const dstTexture = t.device.createTexture({
+ size: kTextureSize,
+ format,
+ usage: GPUTextureUsage.COPY_DST,
+ });
+
+ // MAINTENANCE_TODO: get the valid aspects from capability_info.ts.
+ const kValidAspectsForFormat = {
+ rgba8unorm: ['all'],
+
+ // kUnsizedDepthStencilFormats
+ depth24plus: ['all', 'depth-only'],
+ 'depth24plus-stencil8': ['all'],
+ 'depth32float-stencil8': ['all'],
+
+ // kSizedDepthStencilFormats
+ depth32float: ['all', 'depth-only'],
+ stencil8: ['all', 'stencil-only'],
+ depth16unorm: ['all', 'depth-only'],
+ };
+
+ const isSourceAspectValid = kValidAspectsForFormat[format].includes(sourceAspect);
+ const isDestinationAspectValid = kValidAspectsForFormat[format].includes(destinationAspect);
+
+ t.TestCopyTextureToTexture(
+ { texture: srcTexture, origin: { x: 0, y: 0, z: 0 }, aspect: sourceAspect },
+ { texture: dstTexture, origin: { x: 0, y: 0, z: 0 }, aspect: destinationAspect },
+ kTextureSize,
+ isSourceAspectValid && isDestinationAspectValid ? 'Success' : 'FinishError'
+ );
+ });
+
+g.test('copy_ranges_with_compressed_texture_formats')
+ .desc(
+ `
+Test that copyTextureToTexture copy boxes must be in range of the subresource and aligned to the block size
+- for various dimensions
+- for various offsets to a full copy for the copy origin/size
+- for various copy mip levels
+
+TODO: Express the offsets in "block size" so as to be able to test non-4x4 compressed formats
+`
+ )
+ .params(u =>
+ u
+ .combine('format', kCompressedTextureFormats)
+ .combine('dimension', kTextureDimensions)
+ .filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
+ .beginSubcases()
+ .combine('copyBoxOffsets', [
+ { x: 0, y: 0, z: 0, width: 0, height: 0, depthOrArrayLayers: -2 },
+ { x: 1, y: 0, z: 0, width: 0, height: 0, depthOrArrayLayers: -2 },
+ { x: 4, y: 0, z: 0, width: 0, height: 0, depthOrArrayLayers: -2 },
+ { x: 0, y: 0, z: 0, width: -1, height: 0, depthOrArrayLayers: -2 },
+ { x: 0, y: 0, z: 0, width: -4, height: 0, depthOrArrayLayers: -2 },
+ { x: 0, y: 1, z: 0, width: 0, height: 0, depthOrArrayLayers: -2 },
+ { x: 0, y: 4, z: 0, width: 0, height: 0, depthOrArrayLayers: -2 },
+ { x: 0, y: 0, z: 0, width: 0, height: -1, depthOrArrayLayers: -2 },
+ { x: 0, y: 0, z: 0, width: 0, height: -4, depthOrArrayLayers: -2 },
+ { x: 0, y: 0, z: 0, width: 0, height: 0, depthOrArrayLayers: 0 },
+ { x: 0, y: 0, z: 1, width: 0, height: 0, depthOrArrayLayers: -1 },
+ ])
+ .combine('srcCopyLevel', [0, 1, 2])
+ .combine('dstCopyLevel', [0, 1, 2])
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ t.selectDeviceOrSkipTestCase(kTextureFormatInfo[format].feature);
+ t.skipIfCopyTextureToTextureNotSupportedForFormat(format);
+ })
+ .fn(t => {
+ const { format, dimension, copyBoxOffsets, srcCopyLevel, dstCopyLevel } = t.params;
+ const { blockWidth, blockHeight } = kTextureFormatInfo[format];
+
+ const kTextureSize = {
+ width: 15 * blockWidth,
+ height: 12 * blockHeight,
+ depthOrArrayLayers: 3,
+ };
+ const kMipLevelCount = 4;
+
+ const srcTexture = t.device.createTexture({
+ size: kTextureSize,
+ format,
+ dimension,
+ mipLevelCount: kMipLevelCount,
+ usage: GPUTextureUsage.COPY_SRC,
+ });
+ const dstTexture = t.device.createTexture({
+ size: kTextureSize,
+ format,
+ dimension,
+ mipLevelCount: kMipLevelCount,
+ usage: GPUTextureUsage.COPY_DST,
+ });
+
+ const srcSizeAtLevel = t.GetPhysicalSubresourceSize(
+ dimension,
+ kTextureSize,
+ format,
+ srcCopyLevel
+ );
+ const dstSizeAtLevel = t.GetPhysicalSubresourceSize(
+ dimension,
+ kTextureSize,
+ format,
+ dstCopyLevel
+ );
+
+ const copyOrigin = { x: copyBoxOffsets.x, y: copyBoxOffsets.y, z: copyBoxOffsets.z };
+
+ const copyWidth = Math.max(
+ Math.min(srcSizeAtLevel.width, dstSizeAtLevel.width) + copyBoxOffsets.width - copyOrigin.x,
+ 0
+ );
+ const copyHeight = Math.max(
+ Math.min(srcSizeAtLevel.height, dstSizeAtLevel.height) + copyBoxOffsets.height - copyOrigin.y,
+ 0
+ );
+ const copyDepth =
+ kTextureSize.depthOrArrayLayers + copyBoxOffsets.depthOrArrayLayers - copyOrigin.z;
+
+ const texelBlockWidth = kTextureFormatInfo[format].blockWidth;
+ const texelBlockHeight = kTextureFormatInfo[format].blockHeight;
+
+ const isSuccessForCompressedFormats =
+ copyOrigin.x % texelBlockWidth === 0 &&
+ copyOrigin.y % texelBlockHeight === 0 &&
+ copyWidth % texelBlockWidth === 0 &&
+ copyHeight % texelBlockHeight === 0;
+
+ {
+ const isSuccess =
+ isSuccessForCompressedFormats &&
+ copyWidth <= srcSizeAtLevel.width &&
+ copyHeight <= srcSizeAtLevel.height &&
+ copyOrigin.x + copyWidth <= dstSizeAtLevel.width &&
+ copyOrigin.y + copyHeight <= dstSizeAtLevel.height &&
+ copyOrigin.z + copyDepth <= kTextureSize.depthOrArrayLayers;
+
+ t.TestCopyTextureToTexture(
+ { texture: srcTexture, origin: { x: 0, y: 0, z: 0 }, mipLevel: srcCopyLevel },
+ { texture: dstTexture, origin: copyOrigin, mipLevel: dstCopyLevel },
+ { width: copyWidth, height: copyHeight, depthOrArrayLayers: copyDepth },
+ isSuccess ? 'Success' : 'FinishError'
+ );
+ }
+
+ {
+ const isSuccess =
+ isSuccessForCompressedFormats &&
+ copyOrigin.x + copyWidth <= srcSizeAtLevel.width &&
+ copyOrigin.y + copyHeight <= srcSizeAtLevel.height &&
+ copyWidth <= dstSizeAtLevel.width &&
+ copyHeight <= dstSizeAtLevel.height &&
+ copyOrigin.z + copyDepth <= kTextureSize.depthOrArrayLayers;
+
+ t.TestCopyTextureToTexture(
+ { texture: srcTexture, origin: copyOrigin, mipLevel: srcCopyLevel },
+ { texture: dstTexture, origin: { x: 0, y: 0, z: 0 }, mipLevel: dstCopyLevel },
+ { width: copyWidth, height: copyHeight, depthOrArrayLayers: copyDepth },
+ isSuccess ? 'Success' : 'FinishError'
+ );
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/debug.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/debug.spec.ts
new file mode 100644
index 0000000000..6032364dc2
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/debug.spec.ts
@@ -0,0 +1,66 @@
+export const description = `
+API validation test for debug groups and markers
+
+Test Coverage:
+ - For each encoder type (GPUCommandEncoder, GPUComputeEncoder, GPURenderPassEncoder,
+ GPURenderBundleEncoder):
+ - Test that all pushDebugGroup must have a corresponding popDebugGroup
+ - Push and pop counts of 0, 1, and 2 will be used.
+ - An error must be generated for non matching counts.
+ - Test calling pushDebugGroup with empty and non-empty strings.
+ - Test inserting a debug marker with empty and non-empty strings.
+ - Test strings with \0 in them.
+ - Test non-ASCII strings.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { kEncoderTypes } from '../../../../util/command_buffer_maker.js';
+import { ValidationTest } from '../../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('debug_group_balanced')
+ .params(u =>
+ u
+ .combine('encoderType', kEncoderTypes)
+ .beginSubcases()
+ .combine('pushCount', [0, 1, 2])
+ .combine('popCount', [0, 1, 2])
+ )
+ .fn(t => {
+ const { encoder, validateFinishAndSubmit } = t.createEncoder(t.params.encoderType);
+ for (let i = 0; i < t.params.pushCount; ++i) {
+ encoder.pushDebugGroup(`${i}`);
+ }
+ for (let i = 0; i < t.params.popCount; ++i) {
+ encoder.popDebugGroup();
+ }
+ validateFinishAndSubmit(t.params.pushCount === t.params.popCount, true);
+ });
+
+g.test('debug_group')
+ .params(u =>
+ u //
+ .combine('encoderType', kEncoderTypes)
+ .beginSubcases()
+ .combine('label', ['', 'group', 'null\0in\0group\0label', '\0null at beginning', '🌞👆'])
+ )
+ .fn(t => {
+ const { encoder, validateFinishAndSubmit } = t.createEncoder(t.params.encoderType);
+ encoder.pushDebugGroup(t.params.label);
+ encoder.popDebugGroup();
+ validateFinishAndSubmit(true, true);
+ });
+
+g.test('debug_marker')
+ .params(u =>
+ u //
+ .combine('encoderType', kEncoderTypes)
+ .beginSubcases()
+ .combine('label', ['', 'marker', 'null\0in\0marker', '\0null at beginning', '🌞👆'])
+ )
+ .fn(t => {
+ const { encoder, validateFinishAndSubmit } = t.createEncoder(t.params.encoderType);
+ encoder.insertDebugMarker(t.params.label);
+ validateFinishAndSubmit(true, true);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/index_access.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/index_access.spec.ts
new file mode 100644
index 0000000000..cdd7159d15
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/index_access.spec.ts
@@ -0,0 +1,162 @@
+export const description = `
+Validation tests for indexed draws accessing the index buffer.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { ValidationTest } from '../../validation_test.js';
+
+class F extends ValidationTest {
+ createIndexBuffer(indexData: Iterable<number>): GPUBuffer {
+ return this.makeBufferWithContents(new Uint32Array(indexData), GPUBufferUsage.INDEX);
+ }
+
+ createRenderPipeline(): GPURenderPipeline {
+ return this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: this.device.createShaderModule({
+ code: `
+ @vertex fn main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: this.device.createShaderModule({
+ code: `
+ @fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ primitive: {
+ topology: 'triangle-strip',
+ stripIndexFormat: 'uint32',
+ },
+ });
+ }
+
+ beginRenderPass(encoder: GPUCommandEncoder) {
+ const colorAttachment = this.device.createTexture({
+ format: 'rgba8unorm',
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ return encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachment.createView(),
+ clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ }
+
+ drawIndexed(
+ indexBuffer: GPUBuffer,
+ indexCount: number,
+ instanceCount: number,
+ firstIndex: number,
+ baseVertex: number,
+ firstInstance: number,
+ isSuccess: boolean
+ ) {
+ const pipeline = this.createRenderPipeline();
+
+ const encoder = this.device.createCommandEncoder();
+ const pass = this.beginRenderPass(encoder);
+ pass.setPipeline(pipeline);
+ pass.setIndexBuffer(indexBuffer, 'uint32');
+ pass.drawIndexed(indexCount, instanceCount, firstIndex, baseVertex, firstInstance);
+ pass.end();
+
+ if (isSuccess) {
+ this.device.queue.submit([encoder.finish()]);
+ } else {
+ this.expectValidationError(() => {
+ encoder.finish();
+ });
+ }
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('out_of_bounds')
+ .desc(
+ `Test drawing with out of bound index access to make sure encoder validation catch the
+ following indexCount and firstIndex OOB conditions
+ - either is within bound but indexCount + firstIndex is out of bound
+ - only firstIndex is out of bound
+ - only indexCount is out of bound
+ - firstIndex much larger than indexCount
+ - indexCount much larger than firstIndex
+ - max uint32 value for both to make sure the sum doesn't overflow
+ - max uint32 indexCount and small firstIndex
+ - max uint32 firstIndex and small indexCount
+ Together with normal and large instanceCount`
+ )
+ .params(
+ u =>
+ u
+ .combineWithParams([
+ { indexCount: 6, firstIndex: 0 }, // draw all 6 out of 6 index
+ { indexCount: 5, firstIndex: 1 }, // draw the last 5 out of 6 index
+ { indexCount: 1, firstIndex: 5 }, // draw the last 1 out of 6 index
+ { indexCount: 0, firstIndex: 6 }, // firstIndex point to the one after last, but (indexCount + firstIndex) * stride <= bufferSize, valid
+ { indexCount: 0, firstIndex: 7 }, // (indexCount + firstIndex) * stride > bufferSize, invalid
+ { indexCount: 7, firstIndex: 0 }, // only indexCount out of bound
+ { indexCount: 6, firstIndex: 1 }, // indexCount + firstIndex out of bound
+ { indexCount: 1, firstIndex: 6 }, // indexCount valid, but (indexCount + firstIndex) out of bound
+ { indexCount: 6, firstIndex: 10000 }, // firstIndex much larger than the bound
+ { indexCount: 10000, firstIndex: 0 }, // indexCount much larger than the bound
+ { indexCount: 0xffffffff, firstIndex: 0xffffffff }, // max uint32 value
+ { indexCount: 0xffffffff, firstIndex: 2 }, // max uint32 indexCount and small firstIndex
+ { indexCount: 2, firstIndex: 0xffffffff }, // small indexCount and max uint32 firstIndex
+ ] as const)
+ .combine('instanceCount', [1, 10000]) // normal and large instanceCount
+ )
+ .fn(t => {
+ const { indexCount, firstIndex, instanceCount } = t.params;
+
+ const indexBuffer = t.createIndexBuffer([0, 1, 2, 3, 1, 2]);
+ const isSuccess: boolean = indexCount + firstIndex <= 6;
+
+ t.drawIndexed(indexBuffer, indexCount, instanceCount, firstIndex, 0, 0, isSuccess);
+ });
+
+g.test('out_of_bounds_zero_sized_index_buffer')
+ .desc(
+ `Test drawing with an empty index buffer to make sure the encoder validation catch the
+ following indexCount and firstIndex conditions
+ - indexCount + firstIndex is out of bound
+ - indexCount is 0 but firstIndex is out of bound
+ - only indexCount is out of bound
+ - both are 0s (not out of bound) but index buffer size is 0
+ Together with normal and large instanceCount`
+ )
+ .params(
+ u =>
+ u
+ .combineWithParams([
+ { indexCount: 3, firstIndex: 1 }, // indexCount + firstIndex out of bound
+ { indexCount: 0, firstIndex: 1 }, // indexCount is 0 but firstIndex out of bound
+ { indexCount: 3, firstIndex: 0 }, // only indexCount out of bound
+ { indexCount: 0, firstIndex: 0 }, // just zeros, valid
+ ] as const)
+ .combine('instanceCount', [1, 10000]) // normal and large instanceCount
+ )
+ .fn(t => {
+ const { indexCount, firstIndex, instanceCount } = t.params;
+
+ const indexBuffer = t.createIndexBuffer([]);
+ const isSuccess: boolean = indexCount + firstIndex <= 0;
+
+ t.drawIndexed(indexBuffer, indexCount, instanceCount, firstIndex, 0, 0, isSuccess);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/draw.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/draw.spec.ts
new file mode 100644
index 0000000000..147c2f3fd0
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/draw.spec.ts
@@ -0,0 +1,877 @@
+export const description = `
+Here we test the validation for draw functions, mainly the buffer access validation. All four types
+of draw calls are tested, and test that validation errors do / don't occur for certain call type
+and parameters as expect.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { kVertexFormatInfo } from '../../../../../capability_info.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { ValidationTest } from '../../../validation_test.js';
+
+type VertexAttrib<A> = A & { shaderLocation: number };
+type VertexBuffer<V, A> = V & {
+ slot: number;
+ attributes: VertexAttrib<A>[];
+};
+type VertexState<V, A> = VertexBuffer<V, A>[];
+
+type VertexLayoutState<V, A> = VertexState<
+ { stepMode: GPUVertexStepMode; arrayStride: number } & V,
+ { format: GPUVertexFormat; offset: number } & A
+>;
+
+interface DrawIndexedParameter {
+ indexCount: number;
+ instanceCount?: number;
+ firstIndex?: number;
+ baseVertex?: number;
+ firstInstance?: number;
+}
+
+function callDrawIndexed(
+ test: GPUTest,
+ encoder: GPURenderCommandsMixin,
+ drawType: 'drawIndexed' | 'drawIndexedIndirect',
+ param: DrawIndexedParameter
+) {
+ switch (drawType) {
+ case 'drawIndexed': {
+ encoder.drawIndexed(
+ param.indexCount,
+ param.instanceCount ?? 1,
+ param.firstIndex ?? 0,
+ param.baseVertex ?? 0,
+ param.firstInstance ?? 0
+ );
+ break;
+ }
+ case 'drawIndexedIndirect': {
+ const indirectArray = new Int32Array([
+ param.indexCount,
+ param.instanceCount ?? 1,
+ param.firstIndex ?? 0,
+ param.baseVertex ?? 0,
+ param.firstInstance ?? 0,
+ ]);
+ const indirectBuffer = test.makeBufferWithContents(indirectArray, GPUBufferUsage.INDIRECT);
+ encoder.drawIndexedIndirect(indirectBuffer, 0);
+ break;
+ }
+ }
+}
+interface DrawParameter {
+ vertexCount: number;
+ instanceCount?: number;
+ firstVertex?: number;
+ firstInstance?: number;
+}
+
+function callDraw(
+ test: GPUTest,
+ encoder: GPURenderCommandsMixin,
+ drawType: 'draw' | 'drawIndirect',
+ param: DrawParameter
+) {
+ switch (drawType) {
+ case 'draw': {
+ encoder.draw(
+ param.vertexCount,
+ param.instanceCount ?? 1,
+ param.firstVertex ?? 0,
+ param.firstInstance ?? 0
+ );
+ break;
+ }
+ case 'drawIndirect': {
+ const indirectArray = new Int32Array([
+ param.vertexCount,
+ param.instanceCount ?? 1,
+ param.firstVertex ?? 0,
+ param.firstInstance ?? 0,
+ ]);
+ const indirectBuffer = test.makeBufferWithContents(indirectArray, GPUBufferUsage.INDIRECT);
+ encoder.drawIndirect(indirectBuffer, 0);
+ break;
+ }
+ }
+}
+
+function makeTestPipeline(
+ test: ValidationTest,
+ buffers: VertexState<
+ { stepMode: GPUVertexStepMode; arrayStride: number },
+ {
+ offset: number;
+ format: GPUVertexFormat;
+ }
+ >
+): GPURenderPipeline {
+ const bufferLayouts: GPUVertexBufferLayout[] = [];
+ for (const b of buffers) {
+ bufferLayouts[b.slot] = b;
+ }
+
+ return test.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: test.device.createShaderModule({
+ code: test.getNoOpShaderCode('VERTEX'),
+ }),
+ entryPoint: 'main',
+ buffers: bufferLayouts,
+ },
+ fragment: {
+ module: test.device.createShaderModule({
+ code: test.getNoOpShaderCode('FRAGMENT'),
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm', writeMask: 0 }],
+ },
+ primitive: { topology: 'triangle-list' },
+ });
+}
+
+function makeTestPipelineWithVertexAndInstanceBuffer(
+ test: ValidationTest,
+ arrayStride: number,
+ attributeFormat: GPUVertexFormat,
+ attributeOffset: number = 0
+): GPURenderPipeline {
+ const vertexBufferLayouts: VertexLayoutState<{}, {}> = [
+ {
+ slot: 1,
+ stepMode: 'vertex',
+ arrayStride,
+ attributes: [
+ {
+ shaderLocation: 2,
+ format: attributeFormat,
+ offset: attributeOffset,
+ },
+ ],
+ },
+ {
+ slot: 7,
+ stepMode: 'instance',
+ arrayStride,
+ attributes: [
+ {
+ shaderLocation: 6,
+ format: attributeFormat,
+ offset: attributeOffset,
+ },
+ ],
+ },
+ ];
+
+ return makeTestPipeline(test, vertexBufferLayouts);
+}
+
+// Default parameters for all kind of draw call, arbitrary non-zero values that is not very large.
+const kDefaultParameterForDraw = {
+ instanceCount: 100,
+ firstInstance: 100,
+};
+
+// Default parameters for non-indexed draw, arbitrary non-zero values that is not very large.
+const kDefaultParameterForNonIndexedDraw = {
+ vertexCount: 100,
+ firstVertex: 100,
+};
+
+// Default parameters for indexed draw call and required index buffer, arbitrary non-zero values
+// that is not very large.
+const kDefaultParameterForIndexedDraw = {
+ indexCount: 100,
+ firstIndex: 100,
+ baseVertex: 100,
+ indexFormat: 'uint16' as GPUIndexFormat,
+ indexBufferSize: 2 * 200, // exact required bound size for index buffer
+};
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test(`unused_buffer_bound`)
+ .desc(
+ `
+In this test we test that a small buffer bound to unused buffer slot won't cause validation error.
+- All draw commands,
+ - An unused {index , vertex} buffer with uselessly small range is bound (immediately before draw
+ call)
+`
+ )
+ .params(u =>
+ u //
+ .combine('smallIndexBuffer', [false, true])
+ .combine('smallVertexBuffer', [false, true])
+ .combine('smallInstanceBuffer', [false, true])
+ .beginSubcases()
+ .combine('drawType', ['draw', 'drawIndexed', 'drawIndirect', 'drawIndexedIndirect'] as const)
+ .unless(
+ // Always provide index buffer of enough size if it is used by indexed draw
+ p =>
+ p.smallIndexBuffer &&
+ (p.drawType === 'drawIndexed' || p.drawType === 'drawIndexedIndirect')
+ )
+ .combine('bufferOffset', [0, 4])
+ .combine('boundSize', [0, 1])
+ )
+ .fn(t => {
+ const {
+ smallIndexBuffer,
+ smallVertexBuffer,
+ smallInstanceBuffer,
+ drawType,
+ bufferOffset,
+ boundSize,
+ } = t.params;
+ const renderPipeline = t.createNoOpRenderPipeline();
+ const bufferSize = bufferOffset + boundSize;
+ const smallBuffer = t.createBufferWithState('valid', {
+ size: bufferSize,
+ usage: GPUBufferUsage.INDEX | GPUBufferUsage.VERTEX,
+ });
+
+ // An index buffer of enough size, used if smallIndexBuffer === false
+ const { indexFormat, indexBufferSize } = kDefaultParameterForIndexedDraw;
+ const indexBuffer = t.createBufferWithState('valid', {
+ size: indexBufferSize,
+ usage: GPUBufferUsage.INDEX,
+ });
+
+ for (const encoderType of ['render bundle', 'render pass'] as const) {
+ for (const setPipelineBeforeBuffer of [false, true]) {
+ const commandBufferMaker = t.createEncoder(encoderType);
+ const renderEncoder = commandBufferMaker.encoder;
+
+ if (setPipelineBeforeBuffer) {
+ renderEncoder.setPipeline(renderPipeline);
+ }
+
+ if (drawType === 'drawIndexed' || drawType === 'drawIndexedIndirect') {
+ // Always use large enough index buffer for indexed draw. Index buffer OOB validation is
+ // tested in index_buffer_OOB.
+ renderEncoder.setIndexBuffer(indexBuffer, indexFormat, 0, indexBufferSize);
+ } else if (smallIndexBuffer) {
+ renderEncoder.setIndexBuffer(smallBuffer, indexFormat, bufferOffset, boundSize);
+ }
+ if (smallVertexBuffer) {
+ renderEncoder.setVertexBuffer(1, smallBuffer, bufferOffset, boundSize);
+ }
+ if (smallInstanceBuffer) {
+ renderEncoder.setVertexBuffer(7, smallBuffer, bufferOffset, boundSize);
+ }
+
+ if (!setPipelineBeforeBuffer) {
+ renderEncoder.setPipeline(renderPipeline);
+ }
+
+ if (drawType === 'draw' || drawType === 'drawIndirect') {
+ const drawParam: DrawParameter = {
+ ...kDefaultParameterForDraw,
+ ...kDefaultParameterForNonIndexedDraw,
+ };
+ callDraw(t, renderEncoder, drawType, drawParam);
+ } else {
+ const drawParam: DrawIndexedParameter = {
+ ...kDefaultParameterForDraw,
+ ...kDefaultParameterForIndexedDraw,
+ };
+ callDrawIndexed(t, renderEncoder, drawType, drawParam);
+ }
+
+ // Binding a unused small index/vertex buffer will never cause validation error.
+ commandBufferMaker.validateFinishAndSubmit(true, true);
+ }
+ }
+ });
+
+g.test(`index_buffer_OOB`)
+ .desc(
+ `
+In this test we test that index buffer OOB is caught as a validation error in drawIndexed, but not in
+drawIndexedIndirect as it is GPU-validated.
+- Issue an indexed draw call, with the following index buffer states, for {all index formats}:
+ - range and GPUBuffer are exactly the required size for the draw call
+ - range is too small but GPUBuffer is still large enough
+ - range and GPUBuffer are both too small
+`
+ )
+ .params(u =>
+ u
+ .combine('bufferSizeInElements', [10, 100])
+ // Binding size is always no larger than buffer size, make sure that setIndexBuffer succeed
+ .combine('bindingSizeInElements', [10])
+ .combine('drawIndexCount', [10, 11])
+ .combine('drawType', ['drawIndexed', 'drawIndexedIndirect'] as const)
+ .beginSubcases()
+ .combine('indexFormat', ['uint16', 'uint32'] as GPUIndexFormat[])
+ )
+ .fn(t => {
+ const { indexFormat, bindingSizeInElements, bufferSizeInElements, drawIndexCount, drawType } =
+ t.params;
+
+ const indexElementSize = indexFormat === 'uint16' ? 2 : 4;
+ const bindingSize = bindingSizeInElements * indexElementSize;
+ const bufferSize = bufferSizeInElements * indexElementSize;
+
+ const desc: GPUBufferDescriptor = {
+ size: bufferSize,
+ usage: GPUBufferUsage.INDEX | GPUBufferUsage.COPY_DST,
+ };
+ const indexBuffer = t.createBufferWithState('valid', desc);
+
+ const drawCallParam: DrawIndexedParameter = {
+ indexCount: drawIndexCount,
+ };
+
+ // Encoder finish will succeed if no index buffer access OOB when calling drawIndexed,
+ // and always succeed when calling drawIndexedIndirect.
+ const isFinishSuccess =
+ drawIndexCount <= bindingSizeInElements || drawType === 'drawIndexedIndirect';
+
+ const renderPipeline = t.createNoOpRenderPipeline();
+
+ for (const encoderType of ['render bundle', 'render pass'] as const) {
+ for (const setPipelineBeforeBuffer of [false, true]) {
+ const commandBufferMaker = t.createEncoder(encoderType);
+ const renderEncoder = commandBufferMaker.encoder;
+
+ if (setPipelineBeforeBuffer) {
+ renderEncoder.setPipeline(renderPipeline);
+ }
+ renderEncoder.setIndexBuffer(indexBuffer, indexFormat, 0, bindingSize);
+ if (!setPipelineBeforeBuffer) {
+ renderEncoder.setPipeline(renderPipeline);
+ }
+
+ callDrawIndexed(t, renderEncoder, drawType, drawCallParam);
+
+ commandBufferMaker.validateFinishAndSubmit(isFinishSuccess, true);
+ }
+ }
+ });
+
+g.test(`vertex_buffer_OOB`)
+ .desc(
+ `
+In this test we test the vertex buffer OOB validation in draw calls. Specifically, only vertex step
+mode buffer OOB in draw and instance step mode buffer OOB in draw and drawIndexed are CPU-validated.
+Other cases are handled by robust access and no validation error occurs.
+- Test that:
+ - Draw call needs to read {=, >} any bound vertex buffer range, with GPUBuffer that is {large
+ enough, exactly the size of bound range}
+ - Binding size = 0 (ensure it's not treated as a special case)
+ - x= weird buffer offset values
+ - x= weird attribute offset values
+ - x= weird arrayStride values
+ - x= {render pass, render bundle}
+- For vertex step mode vertex buffer,
+ - Test that:
+ - vertexCount largeish
+ - firstVertex {=, >} 0
+ - arrayStride is 0 and bound buffer size too small
+ - (vertexCount + firstVertex) is zero
+ - Validation error occurs in:
+ - draw
+ - drawIndexed with a zero array stride vertex step mode buffer OOB
+ - Otherwise no validation error in drawIndexed, draIndirect and drawIndexedIndirect
+- For instance step mode vertex buffer,
+ - Test with draw and drawIndexed:
+ - instanceCount largeish
+ - firstInstance {=, >} 0
+ - arrayStride is 0 and bound buffer size too small
+ - (instanceCount + firstInstance) is zero
+ - Validation error occurs in draw and drawIndexed
+ - No validation error in drawIndirect and drawIndexedIndirect
+
+In this test, we use a a render pipeline requiring one vertex step mode with different vertex buffer
+layouts (attribute offset, array stride, vertex format). Then for a given drawing parameter set (e.g.,
+vertexCount, instanceCount, firstVertex, indexCount), we calculate the exactly required size for
+vertex step mode vertex buffer. Then, we generate buffer parameters (i.e. GPU buffer size,
+binding offset and binding size) for all buffers, covering both (bound size == required size),
+(bound size == required size - 1), and (bound size == 0), and test that draw and drawIndexed will
+success/error as expected. Such set of buffer parameters should include cases like weird offset values.
+`
+ )
+ .params(u =>
+ u
+ // type of draw call
+ .combine('type', ['draw', 'drawIndexed', 'drawIndirect', 'drawIndexedIndirect'] as const)
+ // VBSize: the state of vertex step mode vertex buffer bound size
+ // IBSize: the state of instance step mode vertex buffer bound size
+ .combineWithParams([
+ { VBSize: 'exact', IBSize: 'exact' },
+ { VBSize: 'zero', IBSize: 'exact' },
+ { VBSize: 'oneTooSmall', IBSize: 'exact' },
+ { VBSize: 'exact', IBSize: 'zero' },
+ { VBSize: 'exact', IBSize: 'oneTooSmall' },
+ ] as const)
+ // the state of array stride
+ .combine('AStride', ['zero', 'exact', 'oversize'] as const)
+ .beginSubcases()
+ // should the vertex stride count be zero
+ .combine('VStride0', [false, true] as const)
+ // should the instance stride count be zero
+ .combine('IStride0', [false, true] as const)
+ // the factor for offset of attributes in vertex layout
+ .combine('offset', [0, 1, 2, 7]) // the offset of attribute will be factor * MIN(4, sizeof(vertexFormat))
+ .combine('setBufferOffset', [200]) // must be a multiple of 4
+ .combine('attributeFormat', ['snorm8x2', 'float32', 'float16x4'] as GPUVertexFormat[])
+ .expandWithParams(p =>
+ p.VStride0
+ ? [{ firstVertex: 0, vertexCount: 0 }]
+ : [
+ { firstVertex: 0, vertexCount: 1 },
+ { firstVertex: 0, vertexCount: 10000 },
+ { firstVertex: 10000, vertexCount: 0 },
+ { firstVertex: 10000, vertexCount: 10000 },
+ ]
+ )
+ .expandWithParams(p =>
+ p.IStride0
+ ? [{ firstInstance: 0, instanceCount: 0 }]
+ : [
+ { firstInstance: 0, instanceCount: 1 },
+ { firstInstance: 0, instanceCount: 10000 },
+ { firstInstance: 10000, instanceCount: 0 },
+ { firstInstance: 10000, instanceCount: 10000 },
+ ]
+ )
+ .unless(p => p.vertexCount === 10000 && p.instanceCount === 10000)
+ )
+ .fn(t => {
+ const {
+ type: drawType,
+ VBSize: boundVertexBufferSizeState,
+ IBSize: boundInstanceBufferSizeState,
+ VStride0: zeroVertexStrideCount,
+ IStride0: zeroInstanceStrideCount,
+ AStride: arrayStrideState,
+ offset: attributeOffsetFactor,
+ setBufferOffset,
+ attributeFormat,
+ vertexCount,
+ instanceCount,
+ firstVertex,
+ firstInstance,
+ } = t.params;
+
+ const attributeFormatInfo = kVertexFormatInfo[attributeFormat];
+ const formatSize = attributeFormatInfo.byteSize;
+ const attributeOffset = attributeOffsetFactor * Math.min(4, formatSize);
+ const lastStride = attributeOffset + formatSize;
+ let arrayStride = 0;
+ if (arrayStrideState !== 'zero') {
+ arrayStride = lastStride;
+ if (arrayStrideState === 'oversize') {
+ // Add an arbitrary number to array stride to make it larger than required by attributes
+ arrayStride = arrayStride + 20;
+ }
+ arrayStride = arrayStride + (-arrayStride & 3); // Make sure arrayStride is a multiple of 4
+ }
+
+ const calcSetBufferSize = (
+ boundBufferSizeState: 'zero' | 'oneTooSmall' | 'exact',
+ strideCount: number
+ ): number => {
+ let requiredBufferSize: number;
+ if (strideCount > 0) {
+ requiredBufferSize = arrayStride * (strideCount - 1) + lastStride;
+ } else {
+ // Spec do not validate bounded buffer size if strideCount == 0.
+ requiredBufferSize = lastStride;
+ }
+ let setBufferSize: number;
+ switch (boundBufferSizeState) {
+ case 'zero': {
+ setBufferSize = 0;
+ break;
+ }
+ case 'oneTooSmall': {
+ setBufferSize = requiredBufferSize - 1;
+ break;
+ }
+ case 'exact': {
+ setBufferSize = requiredBufferSize;
+ break;
+ }
+ }
+ return setBufferSize;
+ };
+
+ const strideCountForVertexBuffer = firstVertex + vertexCount;
+ const setVertexBufferSize = calcSetBufferSize(
+ boundVertexBufferSizeState,
+ strideCountForVertexBuffer
+ );
+ const vertexBufferSize = setBufferOffset + setVertexBufferSize;
+ const strideCountForInstanceBuffer = firstInstance + instanceCount;
+ const setInstanceBufferSize = calcSetBufferSize(
+ boundInstanceBufferSizeState,
+ strideCountForInstanceBuffer
+ );
+ const instanceBufferSize = setBufferOffset + setInstanceBufferSize;
+
+ const vertexBuffer = t.createBufferWithState('valid', {
+ size: vertexBufferSize,
+ usage: GPUBufferUsage.VERTEX,
+ });
+ const instanceBuffer = t.createBufferWithState('valid', {
+ size: instanceBufferSize,
+ usage: GPUBufferUsage.VERTEX,
+ });
+
+ const renderPipeline = makeTestPipelineWithVertexAndInstanceBuffer(
+ t,
+ arrayStride,
+ attributeFormat,
+ attributeOffset
+ );
+
+ for (const encoderType of ['render bundle', 'render pass'] as const) {
+ for (const setPipelineBeforeBuffer of [false, true]) {
+ const commandBufferMaker = t.createEncoder(encoderType);
+ const renderEncoder = commandBufferMaker.encoder;
+
+ if (setPipelineBeforeBuffer) {
+ renderEncoder.setPipeline(renderPipeline);
+ }
+ renderEncoder.setVertexBuffer(1, vertexBuffer, setBufferOffset, setVertexBufferSize);
+ renderEncoder.setVertexBuffer(7, instanceBuffer, setBufferOffset, setInstanceBufferSize);
+ if (!setPipelineBeforeBuffer) {
+ renderEncoder.setPipeline(renderPipeline);
+ }
+
+ if (drawType === 'draw' || drawType === 'drawIndirect') {
+ const drawParam: DrawParameter = {
+ vertexCount,
+ instanceCount,
+ firstVertex,
+ firstInstance,
+ };
+
+ callDraw(t, renderEncoder, drawType, drawParam);
+ } else {
+ const { indexFormat, indexCount, firstIndex, indexBufferSize } =
+ kDefaultParameterForIndexedDraw;
+
+ const desc: GPUBufferDescriptor = {
+ size: indexBufferSize,
+ usage: GPUBufferUsage.INDEX | GPUBufferUsage.COPY_DST,
+ };
+ const indexBuffer = t.createBufferWithState('valid', desc);
+
+ const drawParam: DrawIndexedParameter = {
+ indexCount,
+ instanceCount,
+ firstIndex,
+ baseVertex: firstVertex,
+ firstInstance,
+ };
+
+ renderEncoder.setIndexBuffer(indexBuffer, indexFormat, 0, indexBufferSize);
+ callDrawIndexed(t, renderEncoder, drawType, drawParam);
+ }
+
+ const isVertexBufferOOB =
+ boundVertexBufferSizeState !== 'exact' &&
+ drawType === 'draw' && // drawIndirect, drawIndexed, and drawIndexedIndirect do not validate vertex step mode buffer
+ !zeroVertexStrideCount; // vertex step mode buffer never OOB if stride count = 0
+ const isInstanceBufferOOB =
+ boundInstanceBufferSizeState !== 'exact' &&
+ (drawType === 'draw' || drawType === 'drawIndexed') && // drawIndirect and drawIndexedIndirect do not validate instance step mode buffer
+ !zeroInstanceStrideCount; // vertex step mode buffer never OOB if stride count = 0
+ const isFinishSuccess = !isVertexBufferOOB && !isInstanceBufferOOB;
+
+ commandBufferMaker.validateFinishAndSubmit(isFinishSuccess, true);
+ }
+ }
+ });
+
+g.test(`buffer_binding_overlap`)
+ .desc(
+ `
+In this test we test that binding one GPU buffer to multiple vertex buffer slot or both vertex
+buffer slot and index buffer will cause no validation error, with completely/partial overlap.
+ - x= all draw types
+
+ TODO: The "Factor" parameters don't necessarily guarantee that we test all configurations
+ of buffers overlapping or not. This test should be refactored to test specific overlap cases,
+ and have fewer total parameterizations.
+ See https://github.com/gpuweb/cts/pull/3122#discussion_r1378623214
+`
+ )
+ .params(u =>
+ u //
+ .combine('drawType', ['draw', 'drawIndexed', 'drawIndirect', 'drawIndexedIndirect'] as const)
+ .beginSubcases()
+ .combine('vertexBoundOffestFactor', [0, 0.5, 1, 1.5, 2])
+ .combine('instanceBoundOffestFactor', [0, 0.5, 1, 1.5, 2])
+ .combine('indexBoundOffestFactor', [0, 0.5, 1, 1.5, 2])
+ .combine('arrayStrideState', ['zero', 'exact', 'oversize'] as const)
+ )
+ .fn(t => {
+ const {
+ drawType,
+ vertexBoundOffestFactor,
+ instanceBoundOffestFactor,
+ indexBoundOffestFactor,
+ arrayStrideState,
+ } = t.params;
+
+ // Compute the array stride for vertex step mode and instance step mode attribute
+ const attributeFormat = 'float32x4';
+ const attributeFormatInfo = kVertexFormatInfo[attributeFormat];
+ const formatSize = attributeFormatInfo.byteSize;
+ const attributeOffset = 0;
+ const lastStride = attributeOffset + formatSize;
+ let arrayStride = 0;
+ if (arrayStrideState !== 'zero') {
+ arrayStride = lastStride;
+ if (arrayStrideState === 'oversize') {
+ // Add an arbitrary number to array stride
+ arrayStride = arrayStride + 20;
+ }
+ arrayStride = arrayStride + (-arrayStride & 3); // Make sure arrayStride is a multiple of 4
+ }
+
+ const calcAttributeBufferSize = (strideCount: number): number => {
+ let requiredBufferSize: number;
+ if (strideCount > 0) {
+ requiredBufferSize = arrayStride * (strideCount - 1) + lastStride;
+ } else {
+ // Spec do not validate bounded buffer size if strideCount == 0.
+ requiredBufferSize = lastStride;
+ }
+ return requiredBufferSize;
+ };
+
+ const calcSetBufferOffset = (requiredSetBufferSize: number, offsetFactor: number): number => {
+ const offset = Math.ceil(requiredSetBufferSize * offsetFactor);
+ const alignedOffset = offset + (-offset & 3); // Make sure offset is a multiple of 4
+ return alignedOffset;
+ };
+
+ // Compute required bound range for all vertex and index buffer to ensure the shared GPU buffer
+ // has enough size.
+ const { vertexCount, firstVertex } = kDefaultParameterForNonIndexedDraw;
+ const strideCountForVertexBuffer = firstVertex + vertexCount;
+ const setVertexBufferSize = calcAttributeBufferSize(strideCountForVertexBuffer);
+ const setVertexBufferOffset = calcSetBufferOffset(setVertexBufferSize, vertexBoundOffestFactor);
+ let requiredBufferSize = setVertexBufferOffset + setVertexBufferSize;
+
+ const { instanceCount, firstInstance } = kDefaultParameterForDraw;
+ const strideCountForInstanceBuffer = firstInstance + instanceCount;
+ const setInstanceBufferSize = calcAttributeBufferSize(strideCountForInstanceBuffer);
+ const setInstanceBufferOffset = calcSetBufferOffset(
+ setInstanceBufferSize,
+ instanceBoundOffestFactor
+ );
+ requiredBufferSize = Math.max(
+ requiredBufferSize,
+ setInstanceBufferOffset + setInstanceBufferSize
+ );
+
+ const { indexBufferSize: setIndexBufferSize, indexFormat } = kDefaultParameterForIndexedDraw;
+ const setIndexBufferOffset = calcSetBufferOffset(setIndexBufferSize, indexBoundOffestFactor);
+ requiredBufferSize = Math.max(requiredBufferSize, setIndexBufferOffset + setIndexBufferSize);
+
+ // Create the shared GPU buffer with both vertetx and index usage
+ const sharedBuffer = t.createBufferWithState('valid', {
+ size: requiredBufferSize,
+ usage: GPUBufferUsage.VERTEX | GPUBufferUsage.INDEX,
+ });
+
+ const renderPipeline = makeTestPipelineWithVertexAndInstanceBuffer(
+ t,
+ arrayStride,
+ attributeFormat
+ );
+
+ for (const encoderType of ['render bundle', 'render pass'] as const) {
+ for (const setPipelineBeforeBuffer of [false, true]) {
+ const commandBufferMaker = t.createEncoder(encoderType);
+ const renderEncoder = commandBufferMaker.encoder;
+
+ if (setPipelineBeforeBuffer) {
+ renderEncoder.setPipeline(renderPipeline);
+ }
+ renderEncoder.setVertexBuffer(1, sharedBuffer, setVertexBufferOffset, setVertexBufferSize);
+ renderEncoder.setVertexBuffer(
+ 7,
+ sharedBuffer,
+ setInstanceBufferOffset,
+ setInstanceBufferSize
+ );
+ renderEncoder.setIndexBuffer(
+ sharedBuffer,
+ indexFormat,
+ setIndexBufferOffset,
+ setIndexBufferSize
+ );
+ if (!setPipelineBeforeBuffer) {
+ renderEncoder.setPipeline(renderPipeline);
+ }
+
+ if (drawType === 'draw' || drawType === 'drawIndirect') {
+ const drawParam: DrawParameter = {
+ ...kDefaultParameterForDraw,
+ ...kDefaultParameterForNonIndexedDraw,
+ };
+ callDraw(t, renderEncoder, drawType, drawParam);
+ } else {
+ const drawParam: DrawIndexedParameter = {
+ ...kDefaultParameterForDraw,
+ ...kDefaultParameterForIndexedDraw,
+ };
+ callDrawIndexed(t, renderEncoder, drawType, drawParam);
+ }
+
+ // Since all bound buffer are of enough size, draw call should always succeed.
+ commandBufferMaker.validateFinishAndSubmit(true, true);
+ }
+ }
+ });
+
+g.test(`last_buffer_setting_take_account`)
+ .desc(
+ `
+In this test we test that only the last setting for a buffer slot take account.
+- All (non/indexed, in/direct) draw commands
+ - setPl, setVB, setIB, draw, {setPl,setVB,setIB,nothing (control)}, then a larger draw that
+ wouldn't have been valid before that
+`
+ )
+ .unimplemented();
+
+g.test(`max_draw_count`)
+ .desc(
+ `
+In this test we test that draw count which exceeds
+GPURenderPassDescriptor.maxDrawCount causes validation error on
+GPUCommandEncoder.finish(). The test sets specified maxDrawCount,
+calls specified draw call specified times with or without bundles,
+and checks whether GPUCommandEncoder.finish() causes a validation error.
+ - x= whether to use a bundle for the first half of the draw calls
+ - x= whether to use a bundle for the second half of the draw calls
+ - x= several different draw counts
+ - x= several different maxDrawCounts
+`
+ )
+ .params(u =>
+ u
+ .combine('bundleFirstHalf', [false, true])
+ .combine('bundleSecondHalf', [false, true])
+ .combine('maxDrawCount', [0, 1, 4, 16])
+ .beginSubcases()
+ .expand('drawCount', p => new Set([0, p.maxDrawCount, p.maxDrawCount + 1]))
+ )
+ .fn(t => {
+ const { bundleFirstHalf, bundleSecondHalf, maxDrawCount, drawCount } = t.params;
+
+ const colorFormat = 'rgba8unorm';
+ const colorTexture = t.device.createTexture({
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ format: colorFormat,
+ mipLevelCount: 1,
+ sampleCount: 1,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ @vertex fn main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>();
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `@fragment fn main() {}`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: colorFormat, writeMask: 0 }],
+ },
+ });
+
+ const indexBuffer = t.makeBufferWithContents(new Uint16Array([0, 0, 0]), GPUBufferUsage.INDEX);
+ const indirectBuffer = t.makeBufferWithContents(
+ new Uint32Array([3, 1, 0, 0]),
+ GPUBufferUsage.INDIRECT
+ );
+ const indexedIndirectBuffer = t.makeBufferWithContents(
+ new Uint32Array([3, 1, 0, 0, 0]),
+ GPUBufferUsage.INDIRECT
+ );
+
+ const commandEncoder = t.device.createCommandEncoder();
+ const renderPassEncoder = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorTexture.createView(),
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ maxDrawCount,
+ });
+
+ const firstHalfEncoder = bundleFirstHalf
+ ? t.device.createRenderBundleEncoder({
+ colorFormats: [colorFormat],
+ })
+ : renderPassEncoder;
+
+ const secondHalfEncoder = bundleSecondHalf
+ ? t.device.createRenderBundleEncoder({
+ colorFormats: [colorFormat],
+ })
+ : renderPassEncoder;
+
+ firstHalfEncoder.setPipeline(pipeline);
+ firstHalfEncoder.setIndexBuffer(indexBuffer, 'uint16');
+ secondHalfEncoder.setPipeline(pipeline);
+ secondHalfEncoder.setIndexBuffer(indexBuffer, 'uint16');
+
+ const halfDrawCount = Math.floor(drawCount / 2);
+ for (let i = 0; i < drawCount; i++) {
+ const encoder = i < halfDrawCount ? firstHalfEncoder : secondHalfEncoder;
+ if (i % 4 === 0) {
+ encoder.draw(3);
+ }
+ if (i % 4 === 1) {
+ encoder.drawIndexed(3);
+ }
+ if (i % 4 === 2) {
+ encoder.drawIndirect(indirectBuffer, 0);
+ }
+ if (i % 4 === 3) {
+ encoder.drawIndexedIndirect(indexedIndirectBuffer, 0);
+ }
+ }
+
+ const bundles = [];
+ if (bundleFirstHalf) {
+ bundles.push((firstHalfEncoder as GPURenderBundleEncoder).finish());
+ }
+ if (bundleSecondHalf) {
+ bundles.push((secondHalfEncoder as GPURenderBundleEncoder).finish());
+ }
+
+ if (bundles.length > 0) {
+ renderPassEncoder.executeBundles(bundles);
+ }
+
+ renderPassEncoder.end();
+
+ t.expectValidationError(() => {
+ commandEncoder.finish();
+ }, drawCount > maxDrawCount);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/dynamic_state.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/dynamic_state.spec.ts
new file mode 100644
index 0000000000..d7bdec6ba5
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/dynamic_state.spec.ts
@@ -0,0 +1,319 @@
+export const description = `
+API validation tests for dynamic state commands (setViewport/ScissorRect/BlendColor...).
+
+TODO: ensure existing tests cover these notes. Note many of these may be operation tests instead.
+> - setViewport
+> - {x, y} = {0, invalid values if any}
+> - {width, height, minDepth, maxDepth} = {
+> - least possible value that's valid
+> - greatest possible negative value that's invalid
+> - greatest possible positive value that's valid
+> - least possible positive value that's invalid if any
+> - }
+> - minDepth {<, =, >} maxDepth
+> - setScissorRect
+> - {width, height} = 0
+> - {x+width, y+height} = attachment size + 1
+> - setBlendConstant
+> - color {slightly, very} out of range
+> - used with a simple pipeline that {does, doesn't} use it
+> - setStencilReference
+> - {0, max}
+> - used with a simple pipeline that {does, doesn't} use it
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { ValidationTest } from '../../../validation_test.js';
+
+interface ViewportCall {
+ x: number;
+ y: number;
+ w: number;
+ h: number;
+ minDepth: number;
+ maxDepth: number;
+}
+
+interface ScissorCall {
+ x: number;
+ y: number;
+ w: number;
+ h: number;
+}
+
+class F extends ValidationTest {
+ testViewportCall(
+ success: boolean,
+ v: ViewportCall,
+ attachmentSize: GPUExtent3D = { width: 1, height: 1, depthOrArrayLayers: 1 }
+ ) {
+ const attachment = this.device.createTexture({
+ format: 'rgba8unorm',
+ size: attachmentSize,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const encoder = this.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: attachment.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.setViewport(v.x, v.y, v.w, v.h, v.minDepth, v.maxDepth);
+ pass.end();
+
+ this.expectValidationError(() => {
+ encoder.finish();
+ }, !success);
+ }
+
+ testScissorCall(
+ success: boolean | 'type-error',
+ s: ScissorCall,
+ attachmentSize: GPUExtent3D = { width: 1, height: 1, depthOrArrayLayers: 1 }
+ ) {
+ const attachment = this.device.createTexture({
+ format: 'rgba8unorm',
+ size: attachmentSize,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const encoder = this.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: attachment.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ });
+ if (success === 'type-error') {
+ this.shouldThrow('TypeError', () => {
+ pass.setScissorRect(s.x, s.y, s.w, s.h);
+ });
+ } else {
+ pass.setScissorRect(s.x, s.y, s.w, s.h);
+ pass.end();
+
+ this.expectValidationError(() => {
+ encoder.finish();
+ }, !success);
+ }
+ }
+
+ createDummyRenderPassEncoder(): { encoder: GPUCommandEncoder; pass: GPURenderPassEncoder } {
+ const attachment = this.device.createTexture({
+ format: 'rgba8unorm',
+ size: [1, 1, 1],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const encoder = this.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: attachment.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ });
+
+ return { encoder, pass };
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('setViewport,x_y_width_height_nonnegative')
+ .desc(
+ `Test that the parameters of setViewport to define the box must be non-negative.
+
+TODO Test -0 (it should be valid) but can't be tested because the harness complains about duplicate parameters.
+TODO Test the first value smaller than -0`
+ )
+ .paramsSubcasesOnly([
+ // Control case: everything to 0 is ok, covers the empty viewport case.
+ { x: 0, y: 0, w: 0, h: 0 },
+
+ // Test -1
+ { x: -1, y: 0, w: 0, h: 0 },
+ { x: 0, y: -1, w: 0, h: 0 },
+ { x: 0, y: 0, w: -1, h: 0 },
+ { x: 0, y: 0, w: 0, h: -1 },
+ ])
+ .fn(t => {
+ const { x, y, w, h } = t.params;
+ const success = x >= 0 && y >= 0 && w >= 0 && h >= 0;
+ t.testViewportCall(success, { x, y, w, h, minDepth: 0, maxDepth: 1 });
+ });
+
+g.test('setViewport,xy_rect_contained_in_attachment')
+ .desc(
+ 'Test that the rectangle defined by x, y, width, height must be contained in the attachments'
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combineWithParams([
+ { attachmentWidth: 3, attachmentHeight: 5 },
+ { attachmentWidth: 5, attachmentHeight: 3 },
+ { attachmentWidth: 1024, attachmentHeight: 1 },
+ { attachmentWidth: 1, attachmentHeight: 1024 },
+ ])
+ .combineWithParams([
+ // Control case: a full viewport is valid.
+ { dx: 0, dy: 0, dw: 0, dh: 0 },
+
+ // Other valid cases with a partial viewport.
+ { dx: 1, dy: 0, dw: -1, dh: 0 },
+ { dx: 0, dy: 1, dw: 0, dh: -1 },
+ { dx: 0, dy: 0, dw: -1, dh: 0 },
+ { dx: 0, dy: 0, dw: 0, dh: -1 },
+
+ // Test with a small value that causes the viewport to go outside the attachment.
+ { dx: 1, dy: 0, dw: 0, dh: 0 },
+ { dx: 0, dy: 1, dw: 0, dh: 0 },
+ { dx: 0, dy: 0, dw: 1, dh: 0 },
+ { dx: 0, dy: 0, dw: 0, dh: 1 },
+ ])
+ )
+ .fn(t => {
+ const { attachmentWidth, attachmentHeight, dx, dy, dw, dh } = t.params;
+ const x = dx;
+ const y = dy;
+ const w = attachmentWidth + dw;
+ const h = attachmentWidth + dh;
+
+ const success = x + w <= attachmentWidth && y + h <= attachmentHeight;
+ t.testViewportCall(
+ success,
+ { x, y, w, h, minDepth: 0, maxDepth: 1 },
+ { width: attachmentWidth, height: attachmentHeight, depthOrArrayLayers: 1 }
+ );
+ });
+
+g.test('setViewport,depth_rangeAndOrder')
+ .desc('Test that 0 <= minDepth <= maxDepth <= 1')
+ .paramsSubcasesOnly([
+ // Success cases
+ { minDepth: 0, maxDepth: 1 },
+ { minDepth: -0, maxDepth: -0 },
+ { minDepth: 1, maxDepth: 1 },
+ { minDepth: 0.3, maxDepth: 0.7 },
+ { minDepth: 0.7, maxDepth: 0.7 },
+ { minDepth: 0.3, maxDepth: 0.3 },
+
+ // Invalid cases
+ { minDepth: -0.1, maxDepth: 1 },
+ { minDepth: 0, maxDepth: 1.1 },
+ { minDepth: 0.5, maxDepth: 0.49999 },
+ ])
+ .fn(t => {
+ const { minDepth, maxDepth } = t.params;
+ const success =
+ 0 <= minDepth && minDepth <= 1 && 0 <= maxDepth && maxDepth <= 1 && minDepth <= maxDepth;
+ t.testViewportCall(success, { x: 0, y: 0, w: 1, h: 1, minDepth, maxDepth });
+ });
+
+g.test('setScissorRect,x_y_width_height_nonnegative')
+ .desc(
+ `Test that the parameters of setScissorRect to define the box must be non-negative or a TypeError is thrown.
+
+TODO Test -0 (it should be valid) but can't be tested because the harness complains about duplicate parameters.
+TODO Test the first value smaller than -0`
+ )
+ .paramsSubcasesOnly([
+ // Control case: everything to 0 is ok, covers the empty scissor case.
+ { x: 0, y: 0, w: 0, h: 0 },
+
+ // Test -1
+ { x: -1, y: 0, w: 0, h: 0 },
+ { x: 0, y: -1, w: 0, h: 0 },
+ { x: 0, y: 0, w: -1, h: 0 },
+ { x: 0, y: 0, w: 0, h: -1 },
+ ])
+ .fn(t => {
+ const { x, y, w, h } = t.params;
+ const success = x >= 0 && y >= 0 && w >= 0 && h >= 0;
+ t.testScissorCall(success ? true : 'type-error', { x, y, w, h });
+ });
+
+g.test('setScissorRect,xy_rect_contained_in_attachment')
+ .desc(
+ 'Test that the rectangle defined by x, y, width, height must be contained in the attachments'
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combineWithParams([
+ { attachmentWidth: 3, attachmentHeight: 5 },
+ { attachmentWidth: 5, attachmentHeight: 3 },
+ { attachmentWidth: 1024, attachmentHeight: 1 },
+ { attachmentWidth: 1, attachmentHeight: 1024 },
+ ])
+ .combineWithParams([
+ // Control case: a full scissor is valid.
+ { dx: 0, dy: 0, dw: 0, dh: 0 },
+
+ // Other valid cases with a partial scissor.
+ { dx: 1, dy: 0, dw: -1, dh: 0 },
+ { dx: 0, dy: 1, dw: 0, dh: -1 },
+ { dx: 0, dy: 0, dw: -1, dh: 0 },
+ { dx: 0, dy: 0, dw: 0, dh: -1 },
+
+ // Test with a small value that causes the scissor to go outside the attachment.
+ { dx: 1, dy: 0, dw: 0, dh: 0 },
+ { dx: 0, dy: 1, dw: 0, dh: 0 },
+ { dx: 0, dy: 0, dw: 1, dh: 0 },
+ { dx: 0, dy: 0, dw: 0, dh: 1 },
+ ])
+ )
+ .fn(t => {
+ const { attachmentWidth, attachmentHeight, dx, dy, dw, dh } = t.params;
+ const x = dx;
+ const y = dy;
+ const w = attachmentWidth + dw;
+ const h = attachmentWidth + dh;
+
+ const success = x + w <= attachmentWidth && y + h <= attachmentHeight;
+ t.testScissorCall(
+ success,
+ { x, y, w, h },
+ { width: attachmentWidth, height: attachmentHeight, depthOrArrayLayers: 1 }
+ );
+ });
+
+g.test('setBlendConstant')
+ .desc('Test that almost any color value is valid for setBlendConstant')
+ .paramsSubcasesOnly([
+ { r: 1.0, g: 1.0, b: 1.0, a: 1.0 },
+ { r: -1.0, g: -1.0, b: -1.0, a: -1.0 },
+ { r: Number.MAX_SAFE_INTEGER, g: Number.MIN_SAFE_INTEGER, b: -0, a: 100000 },
+ ])
+ .fn(t => {
+ const { r, g, b, a } = t.params;
+ const encoders = t.createDummyRenderPassEncoder();
+ encoders.pass.setBlendConstant({ r, g, b, a });
+ encoders.pass.end();
+ encoders.encoder.finish();
+ });
+
+g.test('setStencilReference')
+ .desc('Test that almost any stencil reference value is valid for setStencilReference')
+ .paramsSubcasesOnly([
+ { value: 1 }, //
+ { value: 0 },
+ { value: 1000 },
+ { value: 0xffffffff },
+ ])
+ .fn(t => {
+ const { value } = t.params;
+ const encoders = t.createDummyRenderPassEncoder();
+ encoders.pass.setStencilReference(value);
+ encoders.pass.end();
+ encoders.encoder.finish();
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/indirect_draw.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/indirect_draw.spec.ts
new file mode 100644
index 0000000000..94bf686ca4
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/indirect_draw.spec.ts
@@ -0,0 +1,202 @@
+export const description = `
+Validation tests for drawIndirect/drawIndexedIndirect on render pass and render bundle.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUConst } from '../../../../../constants.js';
+import { kResourceStates } from '../../../../../gpu_test.js';
+import { ValidationTest } from '../../../validation_test.js';
+
+import { kRenderEncodeTypeParams } from './render.js';
+
+const kIndirectDrawTestParams = kRenderEncodeTypeParams.combine('indexed', [true, false] as const);
+
+class F extends ValidationTest {
+ makeIndexBuffer(): GPUBuffer {
+ return this.device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.INDEX,
+ });
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('indirect_buffer_state')
+ .desc(
+ `
+Tests indirect buffer must be valid.
+ `
+ )
+ .paramsSubcasesOnly(kIndirectDrawTestParams.combine('state', kResourceStates))
+ .fn(t => {
+ const { encoderType, indexed, state } = t.params;
+ const pipeline = t.createNoOpRenderPipeline();
+ const indirectBuffer = t.createBufferWithState(state, {
+ size: 256,
+ usage: GPUBufferUsage.INDIRECT,
+ });
+
+ const { encoder, validateFinishAndSubmitGivenState } = t.createEncoder(encoderType);
+ encoder.setPipeline(pipeline);
+ if (indexed) {
+ const indexBuffer = t.makeIndexBuffer();
+ encoder.setIndexBuffer(indexBuffer, 'uint32');
+ encoder.drawIndexedIndirect(indirectBuffer, 0);
+ } else {
+ encoder.drawIndirect(indirectBuffer, 0);
+ }
+
+ validateFinishAndSubmitGivenState(state);
+ });
+
+g.test('indirect_buffer,device_mismatch')
+ .desc(
+ 'Tests draw(Indexed)Indirect cannot be called with an indirect buffer created from another device'
+ )
+ .paramsSubcasesOnly(kIndirectDrawTestParams.combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { encoderType, indexed, mismatched } = t.params;
+
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const indirectBuffer = sourceDevice.createBuffer({
+ size: 256,
+ usage: GPUBufferUsage.INDIRECT,
+ });
+ t.trackForCleanup(indirectBuffer);
+
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ encoder.setPipeline(t.createNoOpRenderPipeline());
+
+ if (indexed) {
+ encoder.setIndexBuffer(t.makeIndexBuffer(), 'uint32');
+ encoder.drawIndexedIndirect(indirectBuffer, 0);
+ } else {
+ encoder.drawIndirect(indirectBuffer, 0);
+ }
+ validateFinish(!mismatched);
+ });
+
+g.test('indirect_buffer_usage')
+ .desc(
+ `
+Tests indirect buffer must have 'Indirect' usage.
+ `
+ )
+ .paramsSubcasesOnly(
+ kIndirectDrawTestParams.combine('usage', [
+ GPUConst.BufferUsage.INDIRECT, // control case
+ GPUConst.BufferUsage.COPY_DST,
+ GPUConst.BufferUsage.COPY_DST | GPUConst.BufferUsage.INDIRECT,
+ ] as const)
+ )
+ .fn(t => {
+ const { encoderType, indexed, usage } = t.params;
+ const indirectBuffer = t.device.createBuffer({
+ size: 256,
+ usage,
+ });
+
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ encoder.setPipeline(t.createNoOpRenderPipeline());
+ if (indexed) {
+ const indexBuffer = t.makeIndexBuffer();
+ encoder.setIndexBuffer(indexBuffer, 'uint32');
+ encoder.drawIndexedIndirect(indirectBuffer, 0);
+ } else {
+ encoder.drawIndirect(indirectBuffer, 0);
+ }
+ validateFinish((usage & GPUBufferUsage.INDIRECT) !== 0);
+ });
+
+g.test('indirect_offset_alignment')
+ .desc(
+ `
+Tests indirect offset must be a multiple of 4.
+ `
+ )
+ .paramsSubcasesOnly(kIndirectDrawTestParams.combine('indirectOffset', [0, 2, 4] as const))
+ .fn(t => {
+ const { encoderType, indexed, indirectOffset } = t.params;
+ const pipeline = t.createNoOpRenderPipeline();
+ const indirectBuffer = t.device.createBuffer({
+ size: 256,
+ usage: GPUBufferUsage.INDIRECT,
+ });
+
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ encoder.setPipeline(pipeline);
+ if (indexed) {
+ const indexBuffer = t.makeIndexBuffer();
+ encoder.setIndexBuffer(indexBuffer, 'uint32');
+ encoder.drawIndexedIndirect(indirectBuffer, indirectOffset);
+ } else {
+ encoder.drawIndirect(indirectBuffer, indirectOffset);
+ }
+
+ validateFinish(indirectOffset % 4 === 0);
+ });
+
+g.test('indirect_offset_oob')
+ .desc(
+ `
+Tests indirect draw calls with various indirect offsets and buffer sizes.
+- (offset, b.size) is
+ - (0, 0)
+ - (0, min size) (control case)
+ - (0, min size + 1) (control case)
+ - (0, min size - 1)
+ - (0, min size - min alignment)
+ - (min alignment, min size + min alignment)
+ - (min alignment, min size + min alignment - 1)
+ - (min alignment / 2, min size + min alignment)
+ - (min alignment +/- 1, min size + min alignment)
+ - (min size, min size)
+ - (min size + min alignment, min size)
+ - min size = indirect draw parameters size
+ - x =(drawIndirect, drawIndexedIndirect)
+ `
+ )
+ .paramsSubcasesOnly(
+ kIndirectDrawTestParams.expandWithParams(p => {
+ const indirectParamsSize = p.indexed ? 20 : 16;
+ return [
+ { indirectOffset: 0, bufferSize: 0, _valid: false },
+ { indirectOffset: 0, bufferSize: indirectParamsSize, _valid: true },
+ { indirectOffset: 0, bufferSize: indirectParamsSize + 1, _valid: true },
+ { indirectOffset: 0, bufferSize: indirectParamsSize - 1, _valid: false },
+ { indirectOffset: 0, bufferSize: indirectParamsSize - 4, _valid: false },
+ { indirectOffset: 4, bufferSize: indirectParamsSize + 4, _valid: true },
+ { indirectOffset: 4, bufferSize: indirectParamsSize + 3, _valid: false },
+ { indirectOffset: 2, bufferSize: indirectParamsSize + 4, _valid: false },
+ { indirectOffset: 3, bufferSize: indirectParamsSize + 4, _valid: false },
+ { indirectOffset: 5, bufferSize: indirectParamsSize + 4, _valid: false },
+ { indirectOffset: indirectParamsSize, bufferSize: indirectParamsSize, _valid: false },
+ { indirectOffset: indirectParamsSize + 4, bufferSize: indirectParamsSize, _valid: false },
+ ] as const;
+ })
+ )
+ .fn(t => {
+ const { encoderType, indexed, indirectOffset, bufferSize, _valid } = t.params;
+ const pipeline = t.createNoOpRenderPipeline();
+ const indirectBuffer = t.device.createBuffer({
+ size: bufferSize,
+ usage: GPUBufferUsage.INDIRECT,
+ });
+
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ encoder.setPipeline(pipeline);
+ if (indexed) {
+ const indexBuffer = t.makeIndexBuffer();
+ encoder.setIndexBuffer(indexBuffer, 'uint32');
+ encoder.drawIndexedIndirect(indirectBuffer, indirectOffset);
+ } else {
+ encoder.drawIndirect(indirectBuffer, indirectOffset);
+ }
+
+ validateFinish(_valid);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/render.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/render.ts
new file mode 100644
index 0000000000..0df9ec6365
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/render.ts
@@ -0,0 +1,29 @@
+import { kUnitCaseParamsBuilder } from '../../../../../../common/framework/params_builder.js';
+import { kRenderEncodeTypes } from '../../../../../util/command_buffer_maker.js';
+
+export const kRenderEncodeTypeParams = kUnitCaseParamsBuilder.combine(
+ 'encoderType',
+ kRenderEncodeTypes
+);
+
+export function buildBufferOffsetAndSizeOOBTestParams(minAlignment: number, bufferSize: number) {
+ return kRenderEncodeTypeParams.combineWithParams([
+ // Explicit size
+ { offset: 0, size: 0, _valid: true },
+ { offset: 0, size: 1, _valid: true },
+ { offset: 0, size: 4, _valid: true },
+ { offset: 0, size: 5, _valid: true },
+ { offset: 0, size: bufferSize, _valid: true },
+ { offset: 0, size: bufferSize + 4, _valid: false },
+ { offset: minAlignment, size: bufferSize, _valid: false },
+ { offset: minAlignment, size: bufferSize - minAlignment, _valid: true },
+ { offset: bufferSize - minAlignment, size: minAlignment, _valid: true },
+ { offset: bufferSize, size: 1, _valid: false },
+ // Implicit size: buffer.size - offset
+ { offset: 0, size: undefined, _valid: true },
+ { offset: minAlignment, size: undefined, _valid: true },
+ { offset: bufferSize - minAlignment, size: undefined, _valid: true },
+ { offset: bufferSize, size: undefined, _valid: true },
+ { offset: bufferSize + minAlignment, size: undefined, _valid: false },
+ ]);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/setIndexBuffer.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/setIndexBuffer.spec.ts
new file mode 100644
index 0000000000..344fc25ff2
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/setIndexBuffer.spec.ts
@@ -0,0 +1,124 @@
+export const description = `
+Validation tests for setIndexBuffer on render pass and render bundle.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUConst } from '../../../../../constants.js';
+import { kResourceStates } from '../../../../../gpu_test.js';
+import { ValidationTest } from '../../../validation_test.js';
+
+import { kRenderEncodeTypeParams, buildBufferOffsetAndSizeOOBTestParams } from './render.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('index_buffer_state')
+ .desc(
+ `
+Tests index buffer must be valid.
+ `
+ )
+ .paramsSubcasesOnly(kRenderEncodeTypeParams.combine('state', kResourceStates))
+ .fn(t => {
+ const { encoderType, state } = t.params;
+ const indexBuffer = t.createBufferWithState(state, {
+ size: 16,
+ usage: GPUBufferUsage.INDEX,
+ });
+
+ const { encoder, validateFinishAndSubmitGivenState } = t.createEncoder(encoderType);
+ encoder.setIndexBuffer(indexBuffer, 'uint32');
+ validateFinishAndSubmitGivenState(state);
+ });
+
+g.test('index_buffer,device_mismatch')
+ .desc('Tests setIndexBuffer cannot be called with an index buffer created from another device')
+ .paramsSubcasesOnly(kRenderEncodeTypeParams.combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { encoderType, mismatched } = t.params;
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const indexBuffer = sourceDevice.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.INDEX,
+ });
+ t.trackForCleanup(indexBuffer);
+
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ encoder.setIndexBuffer(indexBuffer, 'uint32');
+ validateFinish(!mismatched);
+ });
+
+g.test('index_buffer_usage')
+ .desc(
+ `
+Tests index buffer must have 'Index' usage.
+ `
+ )
+ .paramsSubcasesOnly(
+ kRenderEncodeTypeParams.combine('usage', [
+ GPUConst.BufferUsage.INDEX, // control case
+ GPUConst.BufferUsage.COPY_DST,
+ GPUConst.BufferUsage.COPY_DST | GPUConst.BufferUsage.INDEX,
+ ] as const)
+ )
+ .fn(t => {
+ const { encoderType, usage } = t.params;
+ const indexBuffer = t.device.createBuffer({
+ size: 16,
+ usage,
+ });
+
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ encoder.setIndexBuffer(indexBuffer, 'uint32');
+ validateFinish((usage & GPUBufferUsage.INDEX) !== 0);
+ });
+
+g.test('offset_alignment')
+ .desc(
+ `
+Tests offset must be a multiple of index format’s byte size.
+ `
+ )
+ .paramsSubcasesOnly(
+ kRenderEncodeTypeParams
+ .combine('indexFormat', ['uint16', 'uint32'] as const)
+ .expand('offset', p => {
+ return p.indexFormat === 'uint16' ? ([0, 1, 2] as const) : ([0, 2, 4] as const);
+ })
+ )
+ .fn(t => {
+ const { encoderType, indexFormat, offset } = t.params;
+ const indexBuffer = t.device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.INDEX,
+ });
+
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ encoder.setIndexBuffer(indexBuffer, indexFormat, offset);
+
+ const alignment =
+ indexFormat === 'uint16' ? Uint16Array.BYTES_PER_ELEMENT : Uint32Array.BYTES_PER_ELEMENT;
+ validateFinish(offset % alignment === 0);
+ });
+
+g.test('offset_and_size_oob')
+ .desc(
+ `
+Tests offset and size cannot be larger than index buffer size.
+ `
+ )
+ .paramsSubcasesOnly(buildBufferOffsetAndSizeOOBTestParams(4, 256))
+ .fn(t => {
+ const { encoderType, offset, size, _valid } = t.params;
+ const indexBuffer = t.device.createBuffer({
+ size: 256,
+ usage: GPUBufferUsage.INDEX,
+ });
+
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ encoder.setIndexBuffer(indexBuffer, 'uint32', offset, size);
+ validateFinish(_valid);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/setPipeline.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/setPipeline.spec.ts
new file mode 100644
index 0000000000..00624c0690
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/setPipeline.spec.ts
@@ -0,0 +1,62 @@
+export const description = `
+Validation tests for setPipeline on render pass and render bundle.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { kRenderEncodeTypes } from '../../../../../util/command_buffer_maker.js';
+import { ValidationTest } from '../../../validation_test.js';
+
+import { kRenderEncodeTypeParams } from './render.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('invalid_pipeline')
+ .desc(
+ `
+Tests setPipeline should generate an error iff using an 'invalid' pipeline.
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u.combine('encoderType', kRenderEncodeTypes).combine('state', ['valid', 'invalid'] as const)
+ )
+ .fn(t => {
+ const { encoderType, state } = t.params;
+ const pipeline = t.createRenderPipelineWithState(state);
+
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ encoder.setPipeline(pipeline);
+ validateFinish(state !== 'invalid');
+ });
+
+g.test('pipeline,device_mismatch')
+ .desc('Tests setPipeline cannot be called with a render pipeline created from another device')
+ .paramsSubcasesOnly(kRenderEncodeTypeParams.combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { encoderType, mismatched } = t.params;
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const pipeline = sourceDevice.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: sourceDevice.createShaderModule({
+ code: `@vertex fn main() -> @builtin(position) vec4<f32> { return vec4<f32>(); }`,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: sourceDevice.createShaderModule({
+ code: '@fragment fn main() {}',
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm', writeMask: 0 }],
+ },
+ primitive: { topology: 'triangle-list' },
+ });
+
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ encoder.setPipeline(pipeline);
+ validateFinish(!mismatched);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/setVertexBuffer.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/setVertexBuffer.spec.ts
new file mode 100644
index 0000000000..2faa3b58b9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/setVertexBuffer.spec.ts
@@ -0,0 +1,144 @@
+export const description = `
+Validation tests for setVertexBuffer on render pass and render bundle.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { makeValueTestVariant } from '../../../../../../common/util/util.js';
+import { GPUConst } from '../../../../../constants.js';
+import { kResourceStates } from '../../../../../gpu_test.js';
+import { ValidationTest } from '../../../validation_test.js';
+
+import { kRenderEncodeTypeParams, buildBufferOffsetAndSizeOOBTestParams } from './render.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('slot')
+ .desc(
+ `
+Tests slot must be less than the maxVertexBuffers in device limits.
+ `
+ )
+ .paramsSubcasesOnly(
+ kRenderEncodeTypeParams.combine('slotVariant', [
+ { mult: 0, add: 0 },
+ { mult: 1, add: -1 },
+ { mult: 1, add: 0 },
+ ] as const)
+ )
+ .fn(t => {
+ const { encoderType, slotVariant } = t.params;
+ const maxVertexBuffers = t.device.limits.maxVertexBuffers;
+ const slot = makeValueTestVariant(maxVertexBuffers, slotVariant);
+
+ const vertexBuffer = t.createBufferWithState('valid', {
+ size: 16,
+ usage: GPUBufferUsage.VERTEX,
+ });
+
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ encoder.setVertexBuffer(slot, vertexBuffer);
+ validateFinish(slot < maxVertexBuffers);
+ });
+
+g.test('vertex_buffer_state')
+ .desc(
+ `
+Tests vertex buffer must be valid.
+ `
+ )
+ .paramsSubcasesOnly(kRenderEncodeTypeParams.combine('state', kResourceStates))
+ .fn(t => {
+ const { encoderType, state } = t.params;
+ const vertexBuffer = t.createBufferWithState(state, {
+ size: 16,
+ usage: GPUBufferUsage.VERTEX,
+ });
+
+ const { encoder, validateFinishAndSubmitGivenState } = t.createEncoder(encoderType);
+ encoder.setVertexBuffer(0, vertexBuffer);
+ validateFinishAndSubmitGivenState(state);
+ });
+
+g.test('vertex_buffer,device_mismatch')
+ .desc('Tests setVertexBuffer cannot be called with a vertex buffer created from another device')
+ .paramsSubcasesOnly(kRenderEncodeTypeParams.combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { encoderType, mismatched } = t.params;
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const vertexBuffer = sourceDevice.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.VERTEX,
+ });
+ t.trackForCleanup(vertexBuffer);
+
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ encoder.setVertexBuffer(0, vertexBuffer);
+ validateFinish(!mismatched);
+ });
+
+g.test('vertex_buffer_usage')
+ .desc(
+ `
+Tests vertex buffer must have 'Vertex' usage.
+ `
+ )
+ .paramsSubcasesOnly(
+ kRenderEncodeTypeParams.combine('usage', [
+ GPUConst.BufferUsage.VERTEX, // control case
+ GPUConst.BufferUsage.COPY_DST,
+ GPUConst.BufferUsage.COPY_DST | GPUConst.BufferUsage.VERTEX,
+ ] as const)
+ )
+ .fn(t => {
+ const { encoderType, usage } = t.params;
+ const vertexBuffer = t.device.createBuffer({
+ size: 16,
+ usage,
+ });
+
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ encoder.setVertexBuffer(0, vertexBuffer);
+ validateFinish((usage & GPUBufferUsage.VERTEX) !== 0);
+ });
+
+g.test('offset_alignment')
+ .desc(
+ `
+Tests offset must be a multiple of 4.
+ `
+ )
+ .paramsSubcasesOnly(kRenderEncodeTypeParams.combine('offset', [0, 2, 4] as const))
+ .fn(t => {
+ const { encoderType, offset } = t.params;
+ const vertexBuffer = t.device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.VERTEX,
+ });
+
+ const { encoder, validateFinish: finish } = t.createEncoder(encoderType);
+ encoder.setVertexBuffer(0, vertexBuffer, offset);
+ finish(offset % 4 === 0);
+ });
+
+g.test('offset_and_size_oob')
+ .desc(
+ `
+Tests offset and size cannot be larger than vertex buffer size.
+ `
+ )
+ .paramsSubcasesOnly(buildBufferOffsetAndSizeOOBTestParams(4, 256))
+ .fn(t => {
+ const { encoderType, offset, size, _valid } = t.params;
+ const vertexBuffer = t.device.createBuffer({
+ size: 256,
+ usage: GPUBufferUsage.VERTEX,
+ });
+
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ encoder.setVertexBuffer(0, vertexBuffer, offset, size);
+ validateFinish(_valid);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/state_tracking.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/state_tracking.spec.ts
new file mode 100644
index 0000000000..b004293579
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render/state_tracking.spec.ts
@@ -0,0 +1,184 @@
+export const description = `
+Validation tests for setVertexBuffer/setIndexBuffer state (not validation). See also operation tests.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { range } from '../../../../../../common/util/util.js';
+import { ValidationTest } from '../../../validation_test.js';
+
+class F extends ValidationTest {
+ getVertexBuffer(): GPUBuffer {
+ return this.device.createBuffer({
+ size: 256,
+ usage: GPUBufferUsage.VERTEX,
+ });
+ }
+
+ createRenderPipeline(bufferCount: number): GPURenderPipeline {
+ return this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: this.device.createShaderModule({
+ code: `
+ struct Inputs {
+ ${range(bufferCount, i => `\n@location(${i}) a_position${i} : vec3<f32>,`).join('')}
+ };
+ @vertex fn main(input : Inputs
+ ) -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ buffers: [
+ {
+ arrayStride: 3 * 4,
+ attributes: range(bufferCount, i => ({
+ format: 'float32x3',
+ offset: 0,
+ shaderLocation: i,
+ })),
+ },
+ ],
+ },
+ fragment: {
+ module: this.device.createShaderModule({
+ code: `
+ @fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ primitive: { topology: 'triangle-list' },
+ });
+ }
+
+ beginRenderPass(commandEncoder: GPUCommandEncoder): GPURenderPassEncoder {
+ const attachmentTexture = this.device.createTexture({
+ format: 'rgba8unorm',
+ size: { width: 16, height: 16, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ return commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: attachmentTexture.createView(),
+ clearValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test(`all_needed_vertex_buffer_should_be_bound`)
+ .desc(
+ `
+In this test we test that any missing vertex buffer for a used slot will cause validation errors when drawing.
+- All (non/indexed, in/direct) draw commands
+ - A needed vertex buffer is not bound
+ - Was bound in another render pass but not the current one
+`
+ )
+ .unimplemented();
+
+g.test(`all_needed_index_buffer_should_be_bound`)
+ .desc(
+ `
+In this test we test that missing index buffer for a used slot will cause validation errors when drawing.
+- All indexed in/direct draw commands
+ - No index buffer is bound
+`
+ )
+ .unimplemented();
+
+g.test('vertex_buffers_inherit_from_previous_pipeline').fn(t => {
+ const pipeline1 = t.createRenderPipeline(1);
+ const pipeline2 = t.createRenderPipeline(2);
+
+ const vertexBuffer1 = t.getVertexBuffer();
+ const vertexBuffer2 = t.getVertexBuffer();
+
+ {
+ // Check failure when vertex buffer is not set
+ const commandEncoder = t.device.createCommandEncoder();
+ const renderPass = t.beginRenderPass(commandEncoder);
+ renderPass.setPipeline(pipeline1);
+ renderPass.draw(3);
+ renderPass.end();
+
+ t.expectValidationError(() => {
+ commandEncoder.finish();
+ });
+ }
+ {
+ // Check success when vertex buffer is inherited from previous pipeline
+ const commandEncoder = t.device.createCommandEncoder();
+ const renderPass = t.beginRenderPass(commandEncoder);
+ renderPass.setPipeline(pipeline2);
+ renderPass.setVertexBuffer(0, vertexBuffer1);
+ renderPass.setVertexBuffer(1, vertexBuffer2);
+ renderPass.draw(3);
+ renderPass.setPipeline(pipeline1);
+ renderPass.draw(3);
+ renderPass.end();
+
+ commandEncoder.finish();
+ }
+});
+
+g.test('vertex_buffers_do_not_inherit_between_render_passes').fn(t => {
+ const pipeline1 = t.createRenderPipeline(1);
+ const pipeline2 = t.createRenderPipeline(2);
+
+ const vertexBuffer1 = t.getVertexBuffer();
+ const vertexBuffer2 = t.getVertexBuffer();
+
+ {
+ // Check success when vertex buffer is set for each render pass
+ const commandEncoder = t.device.createCommandEncoder();
+ {
+ const renderPass = t.beginRenderPass(commandEncoder);
+ renderPass.setPipeline(pipeline2);
+ renderPass.setVertexBuffer(0, vertexBuffer1);
+ renderPass.setVertexBuffer(1, vertexBuffer2);
+ renderPass.draw(3);
+ renderPass.end();
+ }
+ {
+ const renderPass = t.beginRenderPass(commandEncoder);
+ renderPass.setPipeline(pipeline1);
+ renderPass.setVertexBuffer(0, vertexBuffer1);
+ renderPass.draw(3);
+ renderPass.end();
+ }
+ commandEncoder.finish();
+ }
+ {
+ // Check failure because vertex buffer is not inherited in second subpass
+ const commandEncoder = t.device.createCommandEncoder();
+ {
+ const renderPass = t.beginRenderPass(commandEncoder);
+ renderPass.setPipeline(pipeline2);
+ renderPass.setVertexBuffer(0, vertexBuffer1);
+ renderPass.setVertexBuffer(1, vertexBuffer2);
+ renderPass.draw(3);
+ renderPass.end();
+ }
+ {
+ const renderPass = t.beginRenderPass(commandEncoder);
+ renderPass.setPipeline(pipeline1);
+ renderPass.draw(3);
+ renderPass.end();
+ }
+
+ t.expectValidationError(() => {
+ commandEncoder.finish();
+ });
+ }
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render_pass.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render_pass.spec.ts
new file mode 100644
index 0000000000..e3e881e01d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/render_pass.spec.ts
@@ -0,0 +1,14 @@
+export const description = `
+Validation tests for render pass encoding.
+Does **not** test usage scopes (resource_usages/), GPUProgrammablePassEncoder (programmable_pass),
+dynamic state (dynamic_render_state.spec.ts), or GPURenderEncoderBase (render.spec.ts).
+
+TODO:
+- executeBundles:
+ - with {zero, one, multiple} bundles where {zero, one} of them are invalid objects
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { ValidationTest } from '../../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/setBindGroup.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/setBindGroup.spec.ts
new file mode 100644
index 0000000000..ebc015bd54
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/cmds/setBindGroup.spec.ts
@@ -0,0 +1,435 @@
+export const description = `
+setBindGroup validation tests.
+
+TODO: merge these notes and implement.
+> (Note: If there are errors with using certain binding types in certain passes, test those in the file for that pass type, not here.)
+>
+> - state tracking (probably separate file)
+> - x= {compute pass, render pass}
+> - {null, compatible, incompatible} current pipeline (should have no effect without draw/dispatch)
+> - setBindGroup in different orders (e.g. 0,1,2 vs 2,0,1)
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { makeValueTestVariant, range, unreachable } from '../../../../../common/util/util.js';
+import {
+ kBufferBindingTypes,
+ kMinDynamicBufferOffsetAlignment,
+} from '../../../../capability_info.js';
+import { kResourceStates, ResourceState } from '../../../../gpu_test.js';
+import {
+ kProgrammableEncoderTypes,
+ ProgrammableEncoderType,
+} from '../../../../util/command_buffer_maker.js';
+import { ValidationTest } from '../../validation_test.js';
+
+class F extends ValidationTest {
+ encoderTypeToStageFlag(encoderType: ProgrammableEncoderType): GPUShaderStageFlags {
+ switch (encoderType) {
+ case 'compute pass':
+ return GPUShaderStage.COMPUTE;
+ case 'render pass':
+ case 'render bundle':
+ return GPUShaderStage.FRAGMENT;
+ default:
+ unreachable('Unknown encoder type');
+ }
+ }
+
+ createBindingResourceWithState(
+ resourceType: 'texture' | 'buffer',
+ state: 'valid' | 'destroyed'
+ ): GPUBindingResource {
+ switch (resourceType) {
+ case 'texture': {
+ const texture = this.createTextureWithState('valid');
+ const view = texture.createView();
+ if (state === 'destroyed') {
+ texture.destroy();
+ }
+ return view;
+ }
+ case 'buffer':
+ return {
+ buffer: this.createBufferWithState(state, {
+ size: 4,
+ usage: GPUBufferUsage.STORAGE,
+ }),
+ };
+ default:
+ unreachable('unknown resource type');
+ }
+ }
+
+ /**
+ * If state is 'invalid', creates an invalid bind group with valid resources.
+ * If state is 'destroyed', creates a valid bind group with destroyed resources.
+ */
+ createBindGroup(
+ state: ResourceState,
+ resourceType: 'buffer' | 'texture',
+ encoderType: ProgrammableEncoderType,
+ indices: number[]
+ ) {
+ if (state === 'invalid') {
+ this.device.pushErrorScope('validation');
+ indices = new Array<number>(indices.length + 1).fill(0);
+ }
+
+ const layout = this.device.createBindGroupLayout({
+ entries: indices.map(binding => ({
+ binding,
+ visibility: this.encoderTypeToStageFlag(encoderType),
+ ...(resourceType === 'buffer' ? { buffer: { type: 'storage' } } : { texture: {} }),
+ })),
+ });
+ const bindGroup = this.device.createBindGroup({
+ layout,
+ entries: indices.map(binding => ({
+ binding,
+ resource: this.createBindingResourceWithState(
+ resourceType,
+ state === 'destroyed' ? state : 'valid'
+ ),
+ })),
+ });
+
+ if (state === 'invalid') {
+ void this.device.popErrorScope();
+ }
+ return bindGroup;
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('state_and_binding_index')
+ .desc('Tests that setBindGroup correctly handles {valid, invalid, destroyed} bindGroups.')
+ .params(u =>
+ u
+ .combine('encoderType', kProgrammableEncoderTypes)
+ .combine('state', kResourceStates)
+ .combine('resourceType', ['buffer', 'texture'] as const)
+ )
+ .fn(t => {
+ const { encoderType, state, resourceType } = t.params;
+ const maxBindGroups = t.device.limits.maxBindGroups;
+
+ function runTest(index: number) {
+ const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType);
+ encoder.setBindGroup(index, t.createBindGroup(state, resourceType, encoderType, [index]));
+
+ validateFinishAndSubmit(state !== 'invalid' && index < maxBindGroups, state !== 'destroyed');
+ }
+
+ // MAINTENANCE_TODO: move to subcases() once we can query the device limits
+ for (const index of [1, maxBindGroups - 1, maxBindGroups]) {
+ t.debug(`test bind group index ${index}`);
+ runTest(index);
+ }
+ });
+
+g.test('bind_group,device_mismatch')
+ .desc(
+ `
+ Tests setBindGroup cannot be called with a bind group created from another device
+ - x= setBindGroup {sequence overload, Uint32Array overload}
+ `
+ )
+ .params(u =>
+ u
+ .combine('encoderType', kProgrammableEncoderTypes)
+ .beginSubcases()
+ .combine('useU32Array', [true, false])
+ .combine('mismatched', [true, false])
+ )
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { encoderType, useU32Array, mismatched } = t.params;
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const buffer = sourceDevice.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.STORAGE,
+ });
+
+ const layout = sourceDevice.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: t.encoderTypeToStageFlag(encoderType),
+ buffer: { type: 'storage', hasDynamicOffset: useU32Array },
+ },
+ ],
+ });
+
+ const bindGroup = sourceDevice.createBindGroup({
+ layout,
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer },
+ },
+ ],
+ });
+
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ if (useU32Array) {
+ encoder.setBindGroup(0, bindGroup, new Uint32Array([0]), 0, 1);
+ } else {
+ encoder.setBindGroup(0, bindGroup);
+ }
+ validateFinish(!mismatched);
+ });
+
+g.test('dynamic_offsets_passed_but_not_expected')
+ .desc('Tests that setBindGroup correctly errors on unexpected dynamicOffsets.')
+ .params(u => u.combine('encoderType', kProgrammableEncoderTypes))
+ .fn(t => {
+ const { encoderType } = t.params;
+ const bindGroup = t.createBindGroup('valid', 'buffer', encoderType, []);
+ const dynamicOffsets = [0];
+
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ encoder.setBindGroup(0, bindGroup, dynamicOffsets);
+ validateFinish(false);
+ });
+
+g.test('dynamic_offsets_match_expectations_in_pass_encoder')
+ .desc('Tests that given dynamicOffsets match the specified bindGroup.')
+ .params(u =>
+ u
+ .combine('encoderType', kProgrammableEncoderTypes)
+ .combineWithParams([
+ { dynamicOffsets: [256, 0], _success: true }, // Dynamic offsets aligned
+ { dynamicOffsets: [1, 2], _success: false }, // Dynamic offsets not aligned
+
+ // Wrong number of dynamic offsets
+ { dynamicOffsets: [256, 0, 0], _success: false },
+ { dynamicOffsets: [256], _success: false },
+ { dynamicOffsets: [], _success: false },
+
+ // Dynamic uniform buffer out of bounds because of binding size
+ { dynamicOffsets: [512, 0], _success: false },
+ { dynamicOffsets: [1024, 0], _success: false },
+ { dynamicOffsets: [0xffffffff, 0], _success: false },
+
+ // Dynamic storage buffer out of bounds because of binding size
+ { dynamicOffsets: [0, 512], _success: false },
+ { dynamicOffsets: [0, 1024], _success: false },
+ { dynamicOffsets: [0, 0xffffffff], _success: false },
+ ])
+ .combine('useU32array', [false, true])
+ )
+ .fn(t => {
+ const kBindingSize = 12;
+
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE | GPUShaderStage.FRAGMENT,
+ buffer: {
+ type: 'uniform',
+ hasDynamicOffset: true,
+ },
+ },
+ {
+ binding: 1,
+ visibility: GPUShaderStage.COMPUTE | GPUShaderStage.FRAGMENT,
+ buffer: {
+ type: 'storage',
+ hasDynamicOffset: true,
+ },
+ },
+ ],
+ });
+
+ const uniformBuffer = t.device.createBuffer({
+ size: 2 * kMinDynamicBufferOffsetAlignment + 8,
+ usage: GPUBufferUsage.UNIFORM,
+ });
+
+ const storageBuffer = t.device.createBuffer({
+ size: 2 * kMinDynamicBufferOffsetAlignment + 8,
+ usage: GPUBufferUsage.STORAGE,
+ });
+
+ const bindGroup = t.device.createBindGroup({
+ layout: bindGroupLayout,
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer: uniformBuffer,
+ size: kBindingSize,
+ },
+ },
+ {
+ binding: 1,
+ resource: {
+ buffer: storageBuffer,
+ size: kBindingSize,
+ },
+ },
+ ],
+ });
+
+ const { encoderType, dynamicOffsets, useU32array, _success } = t.params;
+
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ if (useU32array) {
+ encoder.setBindGroup(0, bindGroup, new Uint32Array(dynamicOffsets), 0, dynamicOffsets.length);
+ } else {
+ encoder.setBindGroup(0, bindGroup, dynamicOffsets);
+ }
+ validateFinish(_success);
+ });
+
+g.test('u32array_start_and_length')
+ .desc('Tests that dynamicOffsetsData(Start|Length) apply to the given Uint32Array.')
+ .paramsSubcasesOnly([
+ // dynamicOffsetsDataLength > offsets.length
+ {
+ offsets: [0] as const,
+ dynamicOffsetsDataStart: 0,
+ dynamicOffsetsDataLength: 2,
+ _success: false,
+ },
+ // dynamicOffsetsDataStart + dynamicOffsetsDataLength > offsets.length
+ {
+ offsets: [0] as const,
+ dynamicOffsetsDataStart: 1,
+ dynamicOffsetsDataLength: 1,
+ _success: false,
+ },
+ {
+ offsets: [0, 0] as const,
+ dynamicOffsetsDataStart: 1,
+ dynamicOffsetsDataLength: 1,
+ _success: true,
+ },
+ {
+ offsets: [0, 0, 0] as const,
+ dynamicOffsetsDataStart: 1,
+ dynamicOffsetsDataLength: 1,
+ _success: true,
+ },
+ {
+ offsets: [0, 0] as const,
+ dynamicOffsetsDataStart: 0,
+ dynamicOffsetsDataLength: 2,
+ _success: true,
+ },
+ ])
+ .fn(t => {
+ const { offsets, dynamicOffsetsDataStart, dynamicOffsetsDataLength, _success } = t.params;
+ const kBindingSize = 8;
+
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: range(dynamicOffsetsDataLength, i => ({
+ binding: i,
+ visibility: GPUShaderStage.FRAGMENT,
+ buffer: {
+ type: 'storage',
+ hasDynamicOffset: true,
+ },
+ })),
+ });
+
+ const bindGroup = t.device.createBindGroup({
+ layout: bindGroupLayout,
+ entries: range(dynamicOffsetsDataLength, i => ({
+ binding: i,
+ resource: {
+ buffer: t.createBufferWithState('valid', {
+ size: kBindingSize,
+ usage: GPUBufferUsage.STORAGE,
+ }),
+ size: kBindingSize,
+ },
+ })),
+ });
+
+ const { encoder, validateFinish } = t.createEncoder('render pass');
+
+ const doSetBindGroup = () => {
+ encoder.setBindGroup(
+ 0,
+ bindGroup,
+ new Uint32Array(offsets),
+ dynamicOffsetsDataStart,
+ dynamicOffsetsDataLength
+ );
+ };
+
+ if (_success) {
+ doSetBindGroup();
+ } else {
+ t.shouldThrow('RangeError', doSetBindGroup);
+ }
+
+ // RangeError in setBindGroup does not cause the encoder to become invalid.
+ validateFinish(true);
+ });
+
+g.test('buffer_dynamic_offsets')
+ .desc(
+ `
+ Test that the dynamic offsets of the BufferLayout is a multiple of
+ 'minUniformBufferOffsetAlignment|minStorageBufferOffsetAlignment' if the BindGroup entry defines
+ buffer and the buffer type is 'uniform|storage|read-only-storage'.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('type', kBufferBindingTypes)
+ .combine('encoderType', kProgrammableEncoderTypes)
+ .beginSubcases()
+ .combine('dynamicOffsetVariant', [
+ { mult: 1, add: 0 },
+ { mult: 0.5, add: 0 },
+ { mult: 1.5, add: 0 },
+ { mult: 2, add: 0 },
+ { mult: 1, add: 2 },
+ ])
+ )
+ .fn(t => {
+ const { type, dynamicOffsetVariant, encoderType } = t.params;
+ const kBindingSize = 12;
+
+ const minAlignment =
+ t.device.limits[
+ type === 'uniform' ? 'minUniformBufferOffsetAlignment' : 'minStorageBufferOffsetAlignment'
+ ];
+ const dynamicOffset = makeValueTestVariant(minAlignment, dynamicOffsetVariant);
+
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: { type, hasDynamicOffset: true },
+ },
+ ],
+ });
+
+ const usage = type === 'uniform' ? GPUBufferUsage.UNIFORM : GPUBufferUsage.STORAGE;
+ const isValid = dynamicOffset % minAlignment === 0;
+
+ const buffer = t.device.createBuffer({
+ size: 3 * kMinDynamicBufferOffsetAlignment,
+ usage,
+ });
+
+ const bindGroup = t.device.createBindGroup({
+ entries: [{ binding: 0, resource: { buffer, size: kBindingSize } }],
+ layout: bindGroupLayout,
+ });
+
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ encoder.setBindGroup(0, bindGroup, [dynamicOffset]);
+ validateFinish(isValid);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/createRenderBundleEncoder.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/createRenderBundleEncoder.spec.ts
new file mode 100644
index 0000000000..2eaa9b43fd
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/createRenderBundleEncoder.spec.ts
@@ -0,0 +1,259 @@
+export const description = `
+createRenderBundleEncoder validation tests.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { range } from '../../../../common/util/util.js';
+import { kMaxColorAttachmentsToTest } from '../../../capability_info.js';
+import {
+ computeBytesPerSampleFromFormats,
+ kAllTextureFormats,
+ kDepthStencilFormats,
+ kTextureFormatInfo,
+ kRenderableColorTextureFormats,
+} from '../../../format_info.js';
+import { ValidationTest } from '../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('attachment_state,limits,maxColorAttachments')
+ .desc(`Tests that attachment state must have <= device.limits.maxColorAttachments.`)
+ .params(u =>
+ u.beginSubcases().combine(
+ 'colorFormatCount',
+ range(kMaxColorAttachmentsToTest, i => i + 1)
+ )
+ )
+ .fn(t => {
+ const { colorFormatCount } = t.params;
+ const maxColorAttachments = t.device.limits.maxColorAttachments;
+ t.skipIf(
+ colorFormatCount > maxColorAttachments,
+ `${colorFormatCount} > maxColorAttachments: ${maxColorAttachments}`
+ );
+ t.expectValidationError(() => {
+ t.device.createRenderBundleEncoder({
+ colorFormats: Array(colorFormatCount).fill('r8unorm'),
+ });
+ }, colorFormatCount > t.device.limits.maxColorAttachments);
+ });
+
+g.test('attachment_state,limits,maxColorAttachmentBytesPerSample,aligned')
+ .desc(
+ `
+ Tests that the total color attachment bytes per sample <=
+ device.limits.maxColorAttachmentBytesPerSample when using the same format (aligned) for multiple
+ attachments.
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kRenderableColorTextureFormats)
+ .beginSubcases()
+ .combine(
+ 'colorFormatCount',
+ range(kMaxColorAttachmentsToTest, i => i + 1)
+ )
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ })
+ .fn(t => {
+ const { format, colorFormatCount } = t.params;
+ const maxColorAttachments = t.device.limits.maxColorAttachments;
+ t.skipIf(
+ colorFormatCount > maxColorAttachments,
+ `${colorFormatCount} > maxColorAttachments: ${maxColorAttachments}`
+ );
+ const info = kTextureFormatInfo[format];
+ const shouldError =
+ !info.colorRender ||
+ info.colorRender.byteCost * colorFormatCount >
+ t.device.limits.maxColorAttachmentBytesPerSample;
+
+ t.expectValidationError(() => {
+ t.device.createRenderBundleEncoder({
+ colorFormats: Array(colorFormatCount).fill(format),
+ });
+ }, shouldError);
+ });
+
+g.test('attachment_state,limits,maxColorAttachmentBytesPerSample,unaligned')
+ .desc(
+ `
+ Tests that the total color attachment bytes per sample <=
+ device.limits.maxColorAttachmentBytesPerSample when using various sets of (potentially)
+ unaligned formats.
+ `
+ )
+ .params(u =>
+ u.combineWithParams([
+ // Alignment causes the first 1 byte R8Unorm to become 4 bytes. So even though
+ // 1+4+8+16+1 < 32, the 4 byte alignment requirement of R32Float makes the first R8Unorm
+ // become 4 and 4+4+8+16+1 > 32. Re-ordering this so the R8Unorm's are at the end, however
+ // is allowed: 4+8+16+1+1 < 32.
+ {
+ formats: [
+ 'r8unorm',
+ 'r32float',
+ 'rgba8unorm',
+ 'rgba32float',
+ 'r8unorm',
+ ] as GPUTextureFormat[],
+ },
+ {
+ formats: [
+ 'r32float',
+ 'rgba8unorm',
+ 'rgba32float',
+ 'r8unorm',
+ 'r8unorm',
+ ] as GPUTextureFormat[],
+ },
+ ])
+ )
+ .fn(t => {
+ const { formats } = t.params;
+
+ t.skipIf(
+ formats.length > t.device.limits.maxColorAttachments,
+ `numColorAttachments: ${formats.length} > maxColorAttachments: ${t.device.limits.maxColorAttachments}`
+ );
+
+ const shouldError =
+ computeBytesPerSampleFromFormats(formats) > t.device.limits.maxColorAttachmentBytesPerSample;
+
+ t.expectValidationError(() => {
+ t.device.createRenderBundleEncoder({
+ colorFormats: formats,
+ });
+ }, shouldError);
+ });
+
+g.test('attachment_state,empty_color_formats')
+ .desc(`Tests that if no colorFormats are given, a depthStencilFormat must be specified.`)
+ .params(u =>
+ u.beginSubcases().combine('depthStencilFormat', [undefined, 'depth24plus-stencil8'] as const)
+ )
+ .fn(t => {
+ const { depthStencilFormat } = t.params;
+ t.expectValidationError(() => {
+ t.device.createRenderBundleEncoder({
+ colorFormats: [],
+ depthStencilFormat,
+ });
+ }, depthStencilFormat === undefined);
+ });
+
+g.test('valid_texture_formats')
+ .desc(
+ `
+ Tests that createRenderBundleEncoder only accepts valid formats for its attachments.
+ - colorFormats
+ - depthStencilFormat
+ `
+ )
+ .params(u =>
+ u //
+ .combine('format', kAllTextureFormats)
+ .beginSubcases()
+ .combine('attachment', ['color', 'depthStencil'])
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ t.selectDeviceForTextureFormatOrSkipTestCase(format);
+ })
+ .fn(t => {
+ const { format, attachment } = t.params;
+
+ const colorRenderable = kTextureFormatInfo[format].colorRender;
+
+ const depthStencil = kTextureFormatInfo[format].depth || kTextureFormatInfo[format].stencil;
+
+ switch (attachment) {
+ case 'color': {
+ t.expectValidationError(() => {
+ t.device.createRenderBundleEncoder({
+ colorFormats: [format],
+ });
+ }, !colorRenderable);
+
+ break;
+ }
+ case 'depthStencil': {
+ t.expectValidationError(() => {
+ t.device.createRenderBundleEncoder({
+ colorFormats: [],
+ depthStencilFormat: format,
+ });
+ }, !depthStencil);
+
+ break;
+ }
+ }
+ });
+
+g.test('depth_stencil_readonly')
+ .desc(
+ `
+ Tests that createRenderBundleEncoder validation of depthReadOnly and stencilReadOnly
+ - With depth-only formats
+ - With stencil-only formats
+ - With depth-stencil-combined formats
+ `
+ )
+ .params(u =>
+ u //
+ .combine('depthStencilFormat', kDepthStencilFormats)
+ .beginSubcases()
+ .combine('depthReadOnly', [false, true])
+ .combine('stencilReadOnly', [false, true])
+ )
+ .beforeAllSubcases(t => {
+ const { depthStencilFormat } = t.params;
+ t.selectDeviceForTextureFormatOrSkipTestCase(depthStencilFormat);
+ })
+ .fn(t => {
+ const { depthStencilFormat, depthReadOnly, stencilReadOnly } = t.params;
+
+ let shouldError = false;
+ if (
+ kTextureFormatInfo[depthStencilFormat].depth &&
+ kTextureFormatInfo[depthStencilFormat].stencil &&
+ depthReadOnly !== stencilReadOnly
+ ) {
+ shouldError = true;
+ }
+
+ t.expectValidationError(() => {
+ t.device.createRenderBundleEncoder({
+ colorFormats: [],
+ depthStencilFormat,
+ depthReadOnly,
+ stencilReadOnly,
+ });
+ }, shouldError);
+ });
+
+g.test('depth_stencil_readonly_with_undefined_depth')
+ .desc(
+ `
+ Tests that createRenderBundleEncoder validation of depthReadOnly and stencilReadOnly is ignored
+ if there is no depthStencilFormat set.
+ `
+ )
+ .params(u =>
+ u //
+ .beginSubcases()
+ .combine('depthReadOnly', [false, true])
+ .combine('stencilReadOnly', [false, true])
+ )
+ .fn(t => {
+ const { depthReadOnly, stencilReadOnly } = t.params;
+
+ t.device.createRenderBundleEncoder({
+ colorFormats: ['bgra8unorm'],
+ depthReadOnly,
+ stencilReadOnly,
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/encoder_open_state.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/encoder_open_state.spec.ts
new file mode 100644
index 0000000000..0d56222eed
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/encoder_open_state.spec.ts
@@ -0,0 +1,587 @@
+export const description = `
+Validation tests to all commands of GPUCommandEncoder, GPUComputePassEncoder, and
+GPURenderPassEncoder when the encoder is not finished.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { unreachable } from '../../../../common/util/util.js';
+import { ValidationTest } from '../validation_test.js';
+
+import { beginRenderPassWithQuerySet } from './queries/common.js';
+
+class F extends ValidationTest {
+ createRenderPipelineForTest(): GPURenderPipeline {
+ return this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: this.device.createShaderModule({
+ code: `
+ @vertex fn main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>();
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: this.device.createShaderModule({
+ code: `@fragment fn main() {}`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm', writeMask: 0 }],
+ },
+ });
+ }
+
+ createBindGroupForTest(): GPUBindGroup {
+ return this.device.createBindGroup({
+ entries: [
+ {
+ binding: 0,
+ resource: this.device.createSampler(),
+ },
+ ],
+ layout: this.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ sampler: { type: 'filtering' },
+ },
+ ],
+ }),
+ });
+ }
+}
+
+export const g = makeTestGroup(F);
+
+type EncoderCommands = keyof Omit<GPUCommandEncoder, '__brand' | 'label' | 'finish'>;
+const kEncoderCommandInfo: {
+ readonly [k in EncoderCommands]: {};
+} = {
+ beginComputePass: {},
+ beginRenderPass: {},
+ clearBuffer: {},
+ copyBufferToBuffer: {},
+ copyBufferToTexture: {},
+ copyTextureToBuffer: {},
+ copyTextureToTexture: {},
+ insertDebugMarker: {},
+ popDebugGroup: {},
+ pushDebugGroup: {},
+ writeTimestamp: {},
+ resolveQuerySet: {},
+};
+const kEncoderCommands = keysOf(kEncoderCommandInfo);
+
+type RenderPassEncoderCommands = keyof Omit<GPURenderPassEncoder, '__brand' | 'label' | 'end'>;
+const kRenderPassEncoderCommandInfo: {
+ readonly [k in RenderPassEncoderCommands]: {};
+} = {
+ draw: {},
+ drawIndexed: {},
+ drawIndexedIndirect: {},
+ drawIndirect: {},
+ setIndexBuffer: {},
+ setBindGroup: {},
+ setVertexBuffer: {},
+ setPipeline: {},
+ setViewport: {},
+ setScissorRect: {},
+ setBlendConstant: {},
+ setStencilReference: {},
+ beginOcclusionQuery: {},
+ endOcclusionQuery: {},
+ executeBundles: {},
+ pushDebugGroup: {},
+ popDebugGroup: {},
+ insertDebugMarker: {},
+};
+const kRenderPassEncoderCommands = keysOf(kRenderPassEncoderCommandInfo);
+
+type RenderBundleEncoderCommands = keyof Omit<
+ GPURenderBundleEncoder,
+ '__brand' | 'label' | 'finish'
+>;
+const kRenderBundleEncoderCommandInfo: {
+ readonly [k in RenderBundleEncoderCommands]: {};
+} = {
+ draw: {},
+ drawIndexed: {},
+ drawIndexedIndirect: {},
+ drawIndirect: {},
+ setPipeline: {},
+ setBindGroup: {},
+ setIndexBuffer: {},
+ setVertexBuffer: {},
+ pushDebugGroup: {},
+ popDebugGroup: {},
+ insertDebugMarker: {},
+};
+const kRenderBundleEncoderCommands = keysOf(kRenderBundleEncoderCommandInfo);
+
+// MAINTENANCE_TODO: remove the deprecated 'dispatch' and 'dispatchIndirect' here once they're
+// removed from `@webgpu/types`.
+type ComputePassEncoderCommands = keyof Omit<
+ GPUComputePassEncoder,
+ '__brand' | 'label' | 'end' | 'dispatch' | 'dispatchIndirect'
+>;
+const kComputePassEncoderCommandInfo: {
+ readonly [k in ComputePassEncoderCommands]: {};
+} = {
+ setBindGroup: {},
+ setPipeline: {},
+ dispatchWorkgroups: {},
+ dispatchWorkgroupsIndirect: {},
+ pushDebugGroup: {},
+ popDebugGroup: {},
+ insertDebugMarker: {},
+};
+const kComputePassEncoderCommands = keysOf(kComputePassEncoderCommandInfo);
+
+g.test('non_pass_commands')
+ .desc(
+ `
+ Test that functions of GPUCommandEncoder generate a validation error if the encoder is already
+ finished.
+ `
+ )
+ .params(u =>
+ u
+ .combine('command', kEncoderCommands)
+ .beginSubcases()
+ .combine('finishBeforeCommand', [false, true])
+ )
+ .beforeAllSubcases(t => {
+ switch (t.params.command) {
+ case 'writeTimestamp':
+ t.selectDeviceOrSkipTestCase('timestamp-query');
+ break;
+ }
+ })
+ .fn(t => {
+ const { command, finishBeforeCommand } = t.params;
+
+ const srcBuffer = t.device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+ const dstBuffer = t.device.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.QUERY_RESOLVE,
+ });
+
+ const textureSize = { width: 1, height: 1 };
+ const textureFormat = 'rgba8unorm';
+ const srcTexture = t.device.createTexture({
+ size: textureSize,
+ format: textureFormat,
+ usage: GPUTextureUsage.COPY_SRC,
+ });
+ const dstTexture = t.device.createTexture({
+ size: textureSize,
+ format: textureFormat,
+ usage: GPUTextureUsage.COPY_DST,
+ });
+
+ const querySet = t.device.createQuerySet({
+ type: command === 'writeTimestamp' ? 'timestamp' : 'occlusion',
+ count: 1,
+ });
+
+ const encoder = t.device.createCommandEncoder();
+
+ if (finishBeforeCommand) encoder.finish();
+
+ t.expectValidationError(() => {
+ switch (command) {
+ case 'beginComputePass':
+ {
+ encoder.beginComputePass();
+ }
+ break;
+ case 'beginRenderPass':
+ {
+ encoder.beginRenderPass({ colorAttachments: [] });
+ }
+ break;
+ case 'clearBuffer':
+ {
+ encoder.clearBuffer(dstBuffer, 0, 16);
+ }
+ break;
+ case 'copyBufferToBuffer':
+ {
+ encoder.copyBufferToBuffer(srcBuffer, 0, dstBuffer, 0, 0);
+ }
+ break;
+ case 'copyBufferToTexture':
+ {
+ encoder.copyBufferToTexture(
+ { buffer: srcBuffer },
+ { texture: dstTexture },
+ textureSize
+ );
+ }
+ break;
+ case 'copyTextureToBuffer':
+ {
+ encoder.copyTextureToBuffer(
+ { texture: srcTexture },
+ { buffer: dstBuffer },
+ textureSize
+ );
+ }
+ break;
+ case 'copyTextureToTexture':
+ {
+ encoder.copyTextureToTexture(
+ { texture: srcTexture },
+ { texture: dstTexture },
+ textureSize
+ );
+ }
+ break;
+ case 'insertDebugMarker':
+ {
+ encoder.insertDebugMarker('marker');
+ }
+ break;
+ case 'pushDebugGroup':
+ {
+ encoder.pushDebugGroup('group');
+ }
+ break;
+ case 'popDebugGroup':
+ {
+ encoder.popDebugGroup();
+ }
+ break;
+ case 'writeTimestamp':
+ {
+ encoder.writeTimestamp(querySet, 0);
+ }
+ break;
+ case 'resolveQuerySet':
+ {
+ encoder.resolveQuerySet(querySet, 0, 1, dstBuffer, 0);
+ }
+ break;
+ default:
+ unreachable();
+ }
+ }, finishBeforeCommand);
+ });
+
+g.test('render_pass_commands')
+ .desc(
+ `
+ Test that functions of GPURenderPassEncoder generate a validation error if the encoder or the
+ pass is already finished.
+
+ - TODO: Consider testing: nothing before command, end before command, end+finish before command.
+ `
+ )
+ .params(u =>
+ u
+ .combine('command', kRenderPassEncoderCommands)
+ .beginSubcases()
+ .combine('finishBeforeCommand', [false, true])
+ )
+ .fn(t => {
+ const { command, finishBeforeCommand } = t.params;
+
+ const querySet = t.device.createQuerySet({ type: 'occlusion', count: 1 });
+ const encoder = t.device.createCommandEncoder();
+ const renderPass = beginRenderPassWithQuerySet(t, encoder, querySet);
+
+ const buffer = t.device.createBuffer({
+ size: 12,
+ usage: GPUBufferUsage.INDIRECT | GPUBufferUsage.VERTEX,
+ });
+
+ const pipeline = t.createRenderPipelineForTest();
+
+ const bindGroup = t.createBindGroupForTest();
+
+ if (finishBeforeCommand) {
+ renderPass.end();
+ encoder.finish();
+ }
+
+ t.expectValidationError(() => {
+ switch (command) {
+ case 'draw':
+ {
+ renderPass.draw(1);
+ }
+ break;
+ case 'drawIndexed':
+ {
+ renderPass.drawIndexed(1);
+ }
+ break;
+ case 'drawIndirect':
+ {
+ renderPass.drawIndirect(buffer, 1);
+ }
+ break;
+ case 'setIndexBuffer':
+ {
+ renderPass.setIndexBuffer(buffer, 'uint32');
+ }
+ break;
+ case 'drawIndexedIndirect':
+ {
+ renderPass.drawIndexedIndirect(buffer, 0);
+ }
+ break;
+ case 'setBindGroup':
+ {
+ renderPass.setBindGroup(0, bindGroup);
+ }
+ break;
+ case 'setVertexBuffer':
+ {
+ renderPass.setVertexBuffer(1, buffer);
+ }
+ break;
+ case 'setPipeline':
+ {
+ renderPass.setPipeline(pipeline);
+ }
+ break;
+ case 'setViewport':
+ {
+ const kNumTestPoints = 8;
+ const kViewportMinDepth = 0;
+ const kViewportMaxDepth = 1;
+ renderPass.setViewport(0, 0, kNumTestPoints, 0, kViewportMinDepth, kViewportMaxDepth);
+ }
+ break;
+ case 'setScissorRect':
+ {
+ renderPass.setScissorRect(0, 0, 0, 0);
+ }
+ break;
+ case 'setBlendConstant':
+ {
+ renderPass.setBlendConstant({ r: 1.0, g: 1.0, b: 1.0, a: 1.0 });
+ }
+ break;
+ case 'setStencilReference':
+ {
+ renderPass.setStencilReference(0);
+ }
+ break;
+ case 'beginOcclusionQuery':
+ {
+ renderPass.beginOcclusionQuery(0);
+ }
+ break;
+ case 'endOcclusionQuery':
+ {
+ renderPass.endOcclusionQuery();
+ }
+ break;
+ case 'executeBundles':
+ {
+ renderPass.executeBundles([]);
+ }
+ break;
+ case 'pushDebugGroup':
+ {
+ encoder.pushDebugGroup('group');
+ }
+ break;
+ case 'popDebugGroup':
+ {
+ encoder.popDebugGroup();
+ }
+ break;
+ case 'insertDebugMarker':
+ {
+ encoder.insertDebugMarker('marker');
+ }
+ break;
+ default:
+ unreachable();
+ }
+ }, finishBeforeCommand);
+ });
+
+g.test('render_bundle_commands')
+ .desc(
+ `
+ Test that functions of GPURenderBundleEncoder generate a validation error if the encoder or the
+ pass is already finished.
+ `
+ )
+ .params(u =>
+ u
+ .combine('command', kRenderBundleEncoderCommands)
+ .beginSubcases()
+ .combine('finishBeforeCommand', [false, true])
+ )
+ .fn(t => {
+ const { command, finishBeforeCommand } = t.params;
+
+ const buffer = t.device.createBuffer({
+ size: 12,
+ usage: GPUBufferUsage.INDIRECT | GPUBufferUsage.VERTEX,
+ });
+
+ const pipeline = t.createRenderPipelineForTest();
+
+ const bindGroup = t.createBindGroupForTest();
+
+ const bundleEncoder = t.device.createRenderBundleEncoder({
+ colorFormats: ['rgba8unorm'],
+ });
+
+ if (finishBeforeCommand) {
+ bundleEncoder.finish();
+ }
+
+ t.expectValidationError(() => {
+ switch (command) {
+ case 'draw':
+ {
+ bundleEncoder.draw(1);
+ }
+ break;
+ case 'drawIndexed':
+ {
+ bundleEncoder.drawIndexed(1);
+ }
+ break;
+ case 'drawIndexedIndirect':
+ {
+ bundleEncoder.drawIndexedIndirect(buffer, 0);
+ }
+ break;
+ case 'drawIndirect':
+ {
+ bundleEncoder.drawIndirect(buffer, 1);
+ }
+ break;
+ case 'setPipeline':
+ {
+ bundleEncoder.setPipeline(pipeline);
+ }
+ break;
+ case 'setBindGroup':
+ {
+ bundleEncoder.setBindGroup(0, bindGroup);
+ }
+ break;
+ case 'setIndexBuffer':
+ {
+ bundleEncoder.setIndexBuffer(buffer, 'uint32');
+ }
+ break;
+ case 'setVertexBuffer':
+ {
+ bundleEncoder.setVertexBuffer(1, buffer);
+ }
+ break;
+ case 'pushDebugGroup':
+ {
+ bundleEncoder.pushDebugGroup('group');
+ }
+ break;
+ case 'popDebugGroup':
+ {
+ bundleEncoder.popDebugGroup();
+ }
+ break;
+ case 'insertDebugMarker':
+ {
+ bundleEncoder.insertDebugMarker('marker');
+ }
+ break;
+ default:
+ unreachable();
+ }
+ }, finishBeforeCommand);
+ });
+
+g.test('compute_pass_commands')
+ .desc(
+ `
+ Test that functions of GPUComputePassEncoder generate a validation error if the encoder or the
+ pass is already finished.
+
+ - TODO: Consider testing: nothing before command, end before command, end+finish before command.
+ `
+ )
+ .params(u =>
+ u
+ .combine('command', kComputePassEncoderCommands)
+ .beginSubcases()
+ .combine('finishBeforeCommand', [false, true])
+ )
+ .fn(t => {
+ const { command, finishBeforeCommand } = t.params;
+
+ const encoder = t.device.createCommandEncoder();
+ const computePass = encoder.beginComputePass();
+
+ const indirectBuffer = t.device.createBuffer({
+ size: 12,
+ usage: GPUBufferUsage.INDIRECT,
+ });
+
+ const computePipeline = t.createNoOpComputePipeline();
+
+ const bindGroup = t.createBindGroupForTest();
+
+ if (finishBeforeCommand) {
+ computePass.end();
+ encoder.finish();
+ }
+
+ t.expectValidationError(() => {
+ switch (command) {
+ case 'setBindGroup':
+ {
+ computePass.setBindGroup(0, bindGroup);
+ }
+ break;
+ case 'setPipeline':
+ {
+ computePass.setPipeline(computePipeline);
+ }
+ break;
+ case 'dispatchWorkgroups':
+ {
+ computePass.dispatchWorkgroups(0);
+ }
+ break;
+ case 'dispatchWorkgroupsIndirect':
+ {
+ computePass.dispatchWorkgroupsIndirect(indirectBuffer, 0);
+ }
+ break;
+ case 'pushDebugGroup':
+ {
+ computePass.pushDebugGroup('group');
+ }
+ break;
+ case 'popDebugGroup':
+ {
+ computePass.popDebugGroup();
+ }
+ break;
+ case 'insertDebugMarker':
+ {
+ computePass.insertDebugMarker('marker');
+ }
+ break;
+ default:
+ unreachable();
+ }
+ }, finishBeforeCommand);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/encoder_state.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/encoder_state.spec.ts
new file mode 100644
index 0000000000..98f1a98bb0
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/encoder_state.spec.ts
@@ -0,0 +1,250 @@
+export const description = `
+TODO:
+- createCommandEncoder
+- non-pass command, or beginPass, during {render, compute} pass
+- {before (control case), after} finish()
+ - x= {finish(), ... all non-pass commands}
+- {before (control case), after} end()
+ - x= {render, compute} pass
+ - x= {finish(), ... all relevant pass commands}
+ - x= {
+ - before endPass (control case)
+ - after endPass (no pass open)
+ - after endPass+beginPass (a new pass of the same type is open)
+ - }
+ - should make whole encoder invalid
+- ?
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { objectEquals } from '../../../../common/util/util.js';
+import { ValidationTest } from '../validation_test.js';
+
+class F extends ValidationTest {
+ beginRenderPass(commandEncoder: GPUCommandEncoder, view: GPUTextureView): GPURenderPassEncoder {
+ return commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view,
+ clearValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ }
+
+ createAttachmentTextureView(): GPUTextureView {
+ const texture = this.device.createTexture({
+ format: 'rgba8unorm',
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ this.trackForCleanup(texture);
+ return texture.createView();
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('pass_end_invalid_order')
+ .desc(
+ `
+ Test that beginning a {compute,render} pass before ending the previous {compute,render} pass
+ causes an error.
+ `
+ )
+ .params(u =>
+ u
+ .combine('pass0Type', ['compute', 'render'])
+ .combine('pass1Type', ['compute', 'render'])
+ .beginSubcases()
+ .combine('firstPassEnd', [true, false])
+ .combine('endPasses', [[], [0], [1], [0, 1], [1, 0]])
+ // Don't end the first pass multiple times (that generates a validation error but doesn't invalidate the encoder)
+ .unless(p => p.firstPassEnd && p.endPasses.includes(0))
+ )
+ .fn(t => {
+ const { pass0Type, pass1Type, firstPassEnd, endPasses } = t.params;
+
+ const view = t.createAttachmentTextureView();
+ const encoder = t.device.createCommandEncoder();
+
+ const firstPass =
+ pass0Type === 'compute' ? encoder.beginComputePass() : t.beginRenderPass(encoder, view);
+
+ if (firstPassEnd) firstPass.end();
+
+ // Begin a second pass before ending the previous pass.
+ const secondPass =
+ pass1Type === 'compute' ? encoder.beginComputePass() : t.beginRenderPass(encoder, view);
+
+ const passes = [firstPass, secondPass];
+ for (const index of endPasses) {
+ passes[index].end();
+ }
+
+ // If {endPasses} is '[1]' and {firstPass} ends, it's a control case.
+ const valid = firstPassEnd && objectEquals(endPasses, [1]);
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, !valid);
+ });
+
+g.test('call_after_successful_finish')
+ .desc(`Test that encoding command after a successful finish generates a validation error.`)
+ .params(u =>
+ u
+ .combine('callCmd', ['beginComputePass', 'beginRenderPass', 'insertDebugMarker'])
+ .beginSubcases()
+ .combine('prePassType', ['compute', 'render', 'no-op'])
+ .combine('IsEncoderFinished', [false, true])
+ )
+ .fn(t => {
+ const { prePassType, IsEncoderFinished, callCmd } = t.params;
+
+ const view = t.createAttachmentTextureView();
+ const encoder = t.device.createCommandEncoder();
+
+ if (prePassType !== 'no-op') {
+ const pass =
+ prePassType === 'compute' ? encoder.beginComputePass() : t.beginRenderPass(encoder, view);
+ pass.end();
+ }
+
+ if (IsEncoderFinished) {
+ encoder.finish();
+ }
+
+ switch (callCmd) {
+ case 'beginComputePass':
+ {
+ let pass: GPUComputePassEncoder;
+ t.expectValidationError(() => {
+ pass = encoder.beginComputePass();
+ }, IsEncoderFinished);
+ t.expectValidationError(() => {
+ pass.end();
+ }, IsEncoderFinished);
+ }
+ break;
+ case 'beginRenderPass':
+ {
+ let pass: GPURenderPassEncoder;
+ t.expectValidationError(() => {
+ pass = t.beginRenderPass(encoder, view);
+ }, IsEncoderFinished);
+ t.expectValidationError(() => {
+ pass.end();
+ }, IsEncoderFinished);
+ }
+ break;
+ case 'insertDebugMarker':
+ t.expectValidationError(() => {
+ encoder.insertDebugMarker('');
+ }, IsEncoderFinished);
+ break;
+ }
+
+ if (!IsEncoderFinished) {
+ encoder.finish();
+ }
+ });
+
+g.test('pass_end_none')
+ .desc(
+ `
+ Test that ending a {compute,render} pass without ending the passes generates a validation error.
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('passType', ['compute', 'render']).combine('endCount', [0, 1]))
+ .fn(t => {
+ const { passType, endCount } = t.params;
+
+ const view = t.createAttachmentTextureView();
+ const encoder = t.device.createCommandEncoder();
+
+ const pass =
+ passType === 'compute' ? encoder.beginComputePass() : t.beginRenderPass(encoder, view);
+
+ for (let i = 0; i < endCount; ++i) {
+ pass.end();
+ }
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, endCount === 0);
+ });
+
+g.test('pass_end_twice,basic')
+ .desc(
+ 'Test that ending a {compute,render} pass twice generates a validation error. The parent encoder (command encoder) can be either locked or open.'
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('passType', ['compute', 'render'])
+ // Simply end twice, the parent encoder is open at that time. If the second pass end is in the middle of another pass, the parent encoder is locked. It should generate a validation error in either situation.
+ .combine('endTwice', [false, true])
+ .combine('secondEndInAnotherPass', [false, 'compute', 'render'])
+ .filter(p => p.endTwice || !p.secondEndInAnotherPass)
+ )
+ .fn(t => {
+ const { passType, endTwice, secondEndInAnotherPass } = t.params;
+
+ const view = t.createAttachmentTextureView();
+ const encoder = t.device.createCommandEncoder();
+
+ const pass =
+ passType === 'compute' ? encoder.beginComputePass() : t.beginRenderPass(encoder, view);
+
+ pass.end();
+
+ if (secondEndInAnotherPass) {
+ const pass1 =
+ secondEndInAnotherPass === 'compute'
+ ? encoder.beginComputePass()
+ : t.beginRenderPass(encoder, view);
+
+ t.expectValidationError(() => {
+ pass.end();
+ });
+
+ pass1.end();
+ } else {
+ if (endTwice) {
+ t.expectValidationError(() => {
+ pass.end();
+ });
+ }
+ }
+
+ encoder.finish();
+ });
+
+g.test('pass_end_twice,render_pass_invalid')
+ .desc(
+ 'Test that ending a render pass twice generates a validation error even if the pass is invalid.'
+ )
+ .paramsSubcasesOnly(u => u.combine('endTwice', [false, true]))
+ .fn(t => {
+ const { endTwice } = t.params;
+
+ const encoder = t.device.createCommandEncoder();
+ // Pass encoder creation will fail because both color and depth/stencil attachments are empty.
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [],
+ });
+
+ pass.end();
+
+ if (endTwice) {
+ t.expectValidationError(() => {
+ pass.end();
+ });
+ }
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/programmable/pipeline_bind_group_compat.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/programmable/pipeline_bind_group_compat.spec.ts
new file mode 100644
index 0000000000..163c20c311
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/programmable/pipeline_bind_group_compat.spec.ts
@@ -0,0 +1,777 @@
+export const description = `
+TODO:
+- test compatibility between bind groups and pipelines
+ - the binding resource in bindGroups[i].layout is "group-equivalent" (value-equal) to pipelineLayout.bgls[i].
+ - in the test fn, test once without the dispatch/draw (should always be valid) and once with
+ the dispatch/draw, to make sure the validation happens in dispatch/draw.
+ - x= {dispatch, all draws} (dispatch/draw should be size 0 to make sure validation still happens if no-op)
+ - x= all relevant stages
+
+TODO: subsume existing test, rewrite fixture as needed.
+TODO: Add externalTexture to kResourceTypes [1]
+`;
+
+import { kUnitCaseParamsBuilder } from '../../../../../common/framework/params_builder.js';
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { memcpy, unreachable } from '../../../../../common/util/util.js';
+import {
+ kSamplerBindingTypes,
+ kShaderStageCombinations,
+ kBufferBindingTypes,
+ ValidBindableResource,
+} from '../../../../capability_info.js';
+import { GPUConst } from '../../../../constants.js';
+import {
+ ProgrammableEncoderType,
+ kProgrammableEncoderTypes,
+} from '../../../../util/command_buffer_maker.js';
+import { ValidationTest } from '../../validation_test.js';
+
+const kComputeCmds = ['dispatch', 'dispatchIndirect'] as const;
+type ComputeCmd = (typeof kComputeCmds)[number];
+const kRenderCmds = ['draw', 'drawIndexed', 'drawIndirect', 'drawIndexedIndirect'] as const;
+type RenderCmd = (typeof kRenderCmds)[number];
+
+// Test resource type compatibility in pipeline and bind group
+// [1]: Need to add externalTexture
+const kResourceTypes: ValidBindableResource[] = [
+ 'uniformBuf',
+ 'filtSamp',
+ 'sampledTex',
+ 'storageTex',
+];
+
+function getTestCmds(
+ encoderType: ProgrammableEncoderType
+): readonly ComputeCmd[] | readonly RenderCmd[] {
+ return encoderType === 'compute pass' ? kComputeCmds : kRenderCmds;
+}
+
+const kCompatTestParams = kUnitCaseParamsBuilder
+ .combine('encoderType', kProgrammableEncoderTypes)
+ .expand('call', p => getTestCmds(p.encoderType))
+ .combine('callWithZero', [true, false]);
+
+class F extends ValidationTest {
+ getIndexBuffer(): GPUBuffer {
+ return this.device.createBuffer({
+ size: 8 * Uint32Array.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.INDEX,
+ });
+ }
+
+ getIndirectBuffer(indirectParams: Array<number>): GPUBuffer {
+ const buffer = this.device.createBuffer({
+ mappedAtCreation: true,
+ size: indirectParams.length * Uint32Array.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.INDIRECT | GPUBufferUsage.COPY_DST,
+ });
+ memcpy({ src: new Uint32Array(indirectParams) }, { dst: buffer.getMappedRange() });
+ buffer.unmap();
+ return buffer;
+ }
+
+ getBindingResourceType(entry: GPUBindGroupLayoutEntry): ValidBindableResource {
+ if (entry.buffer !== undefined) return 'uniformBuf';
+ if (entry.sampler !== undefined) return 'filtSamp';
+ if (entry.texture !== undefined) return 'sampledTex';
+ if (entry.storageTexture !== undefined) return 'storageTex';
+ unreachable();
+ }
+
+ createRenderPipelineWithLayout(
+ bindGroups: Array<Array<GPUBindGroupLayoutEntry>>
+ ): GPURenderPipeline {
+ const shader = `
+ @vertex fn vs_main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(1.0, 1.0, 0.0, 1.0);
+ }
+
+ @fragment fn fs_main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }
+ `;
+ const module = this.device.createShaderModule({ code: shader });
+ const pipeline = this.device.createRenderPipeline({
+ layout: this.device.createPipelineLayout({
+ bindGroupLayouts: bindGroups.map(entries => this.device.createBindGroupLayout({ entries })),
+ }),
+ vertex: {
+ module,
+ entryPoint: 'vs_main',
+ },
+ fragment: {
+ module,
+ entryPoint: 'fs_main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ primitive: { topology: 'triangle-list' },
+ });
+ return pipeline;
+ }
+
+ createComputePipelineWithLayout(
+ bindGroups: Array<Array<GPUBindGroupLayoutEntry>>
+ ): GPUComputePipeline {
+ const shader = `
+ @compute @workgroup_size(1)
+ fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
+ }
+ `;
+
+ const module = this.device.createShaderModule({ code: shader });
+ const pipeline = this.device.createComputePipeline({
+ layout: this.device.createPipelineLayout({
+ bindGroupLayouts: bindGroups.map(entries => this.device.createBindGroupLayout({ entries })),
+ }),
+ compute: {
+ module,
+ entryPoint: 'main',
+ },
+ });
+ return pipeline;
+ }
+
+ createBindGroupWithLayout(bglEntries: Array<GPUBindGroupLayoutEntry>): GPUBindGroup {
+ const bgEntries: Array<GPUBindGroupEntry> = [];
+ for (const entry of bglEntries) {
+ const resource = this.getBindingResource(this.getBindingResourceType(entry));
+ bgEntries.push({
+ binding: entry.binding,
+ resource,
+ });
+ }
+
+ return this.device.createBindGroup({
+ entries: bgEntries,
+ layout: this.device.createBindGroupLayout({ entries: bglEntries }),
+ });
+ }
+
+ doCompute(pass: GPUComputePassEncoder, call: ComputeCmd | undefined, callWithZero: boolean) {
+ const x = callWithZero ? 0 : 1;
+ switch (call) {
+ case 'dispatch':
+ pass.dispatchWorkgroups(x, 1, 1);
+ break;
+ case 'dispatchIndirect':
+ pass.dispatchWorkgroupsIndirect(this.getIndirectBuffer([x, 1, 1]), 0);
+ break;
+ default:
+ break;
+ }
+ }
+
+ doRender(
+ pass: GPURenderPassEncoder | GPURenderBundleEncoder,
+ call: RenderCmd | undefined,
+ callWithZero: boolean
+ ) {
+ const vertexCount = callWithZero ? 0 : 3;
+ switch (call) {
+ case 'draw':
+ pass.draw(vertexCount, 1, 0, 0);
+ break;
+ case 'drawIndexed':
+ pass.setIndexBuffer(this.getIndexBuffer(), 'uint32');
+ pass.drawIndexed(vertexCount, 1, 0, 0, 0);
+ break;
+ case 'drawIndirect':
+ pass.drawIndirect(this.getIndirectBuffer([vertexCount, 1, 0, 0, 0]), 0);
+ break;
+ case 'drawIndexedIndirect':
+ pass.setIndexBuffer(this.getIndexBuffer(), 'uint32');
+ pass.drawIndexedIndirect(this.getIndirectBuffer([vertexCount, 1, 0, 0, 0]), 0);
+ break;
+ default:
+ break;
+ }
+ }
+
+ createBindGroupLayoutEntry(
+ encoderType: ProgrammableEncoderType,
+ resourceType: ValidBindableResource,
+ useU32Array: boolean
+ ): GPUBindGroupLayoutEntry {
+ const entry: GPUBindGroupLayoutEntry = {
+ binding: 0,
+ visibility: encoderType === 'compute pass' ? GPUShaderStage.COMPUTE : GPUShaderStage.FRAGMENT,
+ };
+
+ switch (resourceType) {
+ case 'uniformBuf':
+ entry.buffer = { hasDynamicOffset: useU32Array }; // default type: uniform
+ break;
+ case 'filtSamp':
+ entry.sampler = {}; // default type: filtering
+ break;
+ case 'sampledTex':
+ entry.texture = {}; // default sampleType: float
+ break;
+ case 'storageTex':
+ entry.storageTexture = { access: 'write-only', format: 'rgba8unorm' };
+ break;
+ }
+
+ return entry;
+ }
+
+ runTest(
+ encoderType: ProgrammableEncoderType,
+ pipeline: GPUComputePipeline | GPURenderPipeline,
+ bindGroups: Array<GPUBindGroup | undefined>,
+ dynamicOffsets: Array<number> | undefined,
+ call: ComputeCmd | RenderCmd | undefined,
+ callWithZero: boolean,
+ success: boolean
+ ) {
+ const { encoder, validateFinish } = this.createEncoder(encoderType);
+
+ if (encoder instanceof GPUComputePassEncoder) {
+ encoder.setPipeline(pipeline as GPUComputePipeline);
+ } else {
+ encoder.setPipeline(pipeline as GPURenderPipeline);
+ }
+
+ for (let i = 0; i < bindGroups.length; i++) {
+ const bindGroup = bindGroups[i];
+ if (!bindGroup) {
+ break;
+ }
+ if (dynamicOffsets) {
+ encoder.setBindGroup(
+ i,
+ bindGroup,
+ new Uint32Array(dynamicOffsets),
+ 0,
+ dynamicOffsets.length
+ );
+ } else {
+ encoder.setBindGroup(i, bindGroup);
+ }
+ }
+
+ if (encoder instanceof GPUComputePassEncoder) {
+ this.doCompute(encoder, call as ComputeCmd, callWithZero);
+ } else {
+ this.doRender(encoder, call as RenderCmd, callWithZero);
+ }
+
+ validateFinish(success);
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('bind_groups_and_pipeline_layout_mismatch')
+ .desc(
+ `
+ Tests the bind groups must match the requirements of the pipeline layout.
+ - bind groups required by the pipeline layout are required.
+ - bind groups unused by the pipeline layout can be set or not.
+ `
+ )
+ .params(
+ kCompatTestParams
+ .beginSubcases()
+ .combineWithParams([
+ { setBindGroup0: true, setBindGroup1: true, setUnusedBindGroup2: true, _success: true },
+ { setBindGroup0: true, setBindGroup1: true, setUnusedBindGroup2: false, _success: true },
+ { setBindGroup0: true, setBindGroup1: false, setUnusedBindGroup2: true, _success: false },
+ { setBindGroup0: false, setBindGroup1: true, setUnusedBindGroup2: true, _success: false },
+ { setBindGroup0: false, setBindGroup1: false, setUnusedBindGroup2: false, _success: false },
+ ])
+ .combine('useU32Array', [false, true])
+ )
+ .fn(t => {
+ const {
+ encoderType,
+ call,
+ callWithZero,
+ setBindGroup0,
+ setBindGroup1,
+ setUnusedBindGroup2,
+ _success,
+ useU32Array,
+ } = t.params;
+ const visibility =
+ encoderType === 'compute pass' ? GPUShaderStage.COMPUTE : GPUShaderStage.VERTEX;
+
+ const bindGroupLayouts: Array<Array<GPUBindGroupLayoutEntry>> = [
+ // bind group layout 0
+ [
+ {
+ binding: 0,
+ visibility,
+ buffer: { hasDynamicOffset: useU32Array }, // default type: uniform
+ },
+ ],
+ // bind group layout 1
+ [
+ {
+ binding: 0,
+ visibility,
+ buffer: { hasDynamicOffset: useU32Array }, // default type: uniform
+ },
+ ],
+ ];
+
+ // Create required bind groups
+ const bindGroup0 = setBindGroup0 ? t.createBindGroupWithLayout(bindGroupLayouts[0]) : undefined;
+ const bindGroup1 = setBindGroup1 ? t.createBindGroupWithLayout(bindGroupLayouts[1]) : undefined;
+ const unusedBindGroup2 = setUnusedBindGroup2
+ ? t.createBindGroupWithLayout(bindGroupLayouts[1])
+ : undefined;
+
+ // Create fixed pipeline
+ const pipeline =
+ encoderType === 'compute pass'
+ ? t.createComputePipelineWithLayout(bindGroupLayouts)
+ : t.createRenderPipelineWithLayout(bindGroupLayouts);
+
+ const dynamicOffsets = useU32Array ? [0] : undefined;
+
+ // Test without the dispatch/draw (should always be valid)
+ t.runTest(
+ encoderType,
+ pipeline,
+ [bindGroup0, bindGroup1, unusedBindGroup2],
+ dynamicOffsets,
+ undefined,
+ false,
+ true
+ );
+
+ // Test with the dispatch/draw, to make sure the validation happens in dispatch/draw.
+ t.runTest(
+ encoderType,
+ pipeline,
+ [bindGroup0, bindGroup1, unusedBindGroup2],
+ dynamicOffsets,
+ call,
+ callWithZero,
+ _success
+ );
+ });
+
+g.test('buffer_binding,render_pipeline')
+ .desc(
+ `
+ The GPUBufferBindingLayout bindings configure should be exactly
+ same in PipelineLayout and bindgroup.
+ - TODO: test more draw functions, e.g. indirect
+ - TODO: test more visibilities, e.g. vertex
+ - TODO: bind group should be created with different layout
+ `
+ )
+ .params(u => u.combine('type', kBufferBindingTypes))
+ .fn(t => {
+ const { type } = t.params;
+
+ // Create fixed bindGroup
+ const uniformBuffer = t.getUniformBuffer();
+
+ const bindGroup = t.device.createBindGroup({
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer: uniformBuffer,
+ },
+ },
+ ],
+ layout: t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ buffer: {}, // default type: uniform
+ },
+ ],
+ }),
+ });
+
+ // Create pipeline with different layouts
+ const pipeline = t.createRenderPipelineWithLayout([
+ [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ buffer: {
+ type,
+ },
+ },
+ ],
+ ]);
+
+ const { encoder, validateFinish } = t.createEncoder('render pass');
+ encoder.setPipeline(pipeline);
+ encoder.setBindGroup(0, bindGroup);
+ encoder.draw(3);
+
+ validateFinish(type === undefined || type === 'uniform');
+ });
+
+g.test('sampler_binding,render_pipeline')
+ .desc(
+ `
+ The GPUSamplerBindingLayout bindings configure should be exactly
+ same in PipelineLayout and bindgroup.
+ - TODO: test more draw functions, e.g. indirect
+ - TODO: test more visibilities, e.g. vertex
+ `
+ )
+ .params(u =>
+ u //
+ .combine('bglType', kSamplerBindingTypes)
+ .combine('bgType', kSamplerBindingTypes)
+ )
+ .fn(t => {
+ const { bglType, bgType } = t.params;
+ const bindGroup = t.device.createBindGroup({
+ entries: [
+ {
+ binding: 0,
+ resource:
+ bgType === 'comparison'
+ ? t.device.createSampler({ compare: 'always' })
+ : t.device.createSampler(),
+ },
+ ],
+ layout: t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ sampler: { type: bgType },
+ },
+ ],
+ }),
+ });
+
+ // Create pipeline with different layouts
+ const pipeline = t.createRenderPipelineWithLayout([
+ [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ sampler: {
+ type: bglType,
+ },
+ },
+ ],
+ ]);
+
+ const { encoder, validateFinish } = t.createEncoder('render pass');
+ encoder.setPipeline(pipeline);
+ encoder.setBindGroup(0, bindGroup);
+ encoder.draw(3);
+
+ validateFinish(bglType === bgType);
+ });
+
+g.test('bgl_binding_mismatch')
+ .desc(
+ 'Tests the binding number must exist or not exist in both bindGroups[i].layout and pipelineLayout.bgls[i]'
+ )
+ .params(
+ kCompatTestParams
+ .beginSubcases()
+ .combineWithParams([
+ { bgBindings: [0, 1, 2], plBindings: [0, 1, 2], _success: true },
+ { bgBindings: [0, 1, 2], plBindings: [0, 1, 3], _success: false },
+ { bgBindings: [0, 2], plBindings: [0, 2], _success: true },
+ { bgBindings: [0, 2], plBindings: [2, 0], _success: true },
+ { bgBindings: [0, 1, 2], plBindings: [0, 1], _success: false },
+ { bgBindings: [0, 1], plBindings: [0, 1, 2], _success: false },
+ ])
+ .combine('useU32Array', [false, true])
+ )
+ .fn(t => {
+ const { encoderType, call, callWithZero, bgBindings, plBindings, _success, useU32Array } =
+ t.params;
+ const visibility =
+ encoderType === 'compute pass' ? GPUShaderStage.COMPUTE : GPUShaderStage.VERTEX;
+
+ const bglEntries: Array<GPUBindGroupLayoutEntry> = [];
+ for (const binding of bgBindings) {
+ bglEntries.push({
+ binding,
+ visibility,
+ buffer: { hasDynamicOffset: useU32Array }, // default type: uniform
+ });
+ }
+ const bindGroup = t.createBindGroupWithLayout(bglEntries);
+
+ const plEntries: Array<Array<GPUBindGroupLayoutEntry>> = [[]];
+ for (const binding of plBindings) {
+ plEntries[0].push({
+ binding,
+ visibility,
+ buffer: { hasDynamicOffset: useU32Array }, // default type: uniform
+ });
+ }
+ const pipeline =
+ encoderType === 'compute pass'
+ ? t.createComputePipelineWithLayout(plEntries)
+ : t.createRenderPipelineWithLayout(plEntries);
+
+ const dynamicOffsets = useU32Array ? new Array(bgBindings.length).fill(0) : undefined;
+
+ // Test without the dispatch/draw (should always be valid)
+ t.runTest(encoderType, pipeline, [bindGroup], dynamicOffsets, undefined, false, true);
+
+ // Test with the dispatch/draw, to make sure the validation happens in dispatch/draw.
+ t.runTest(encoderType, pipeline, [bindGroup], dynamicOffsets, call, callWithZero, _success);
+ });
+
+g.test('bgl_visibility_mismatch')
+ .desc('Tests the visibility in bindGroups[i].layout and pipelineLayout.bgls[i] must be matched')
+ .params(
+ kCompatTestParams
+ .beginSubcases()
+ .combine('bgVisibility', kShaderStageCombinations)
+ .expand('plVisibility', p =>
+ p.encoderType === 'compute pass'
+ ? ([GPUConst.ShaderStage.COMPUTE] as const)
+ : ([
+ GPUConst.ShaderStage.VERTEX,
+ GPUConst.ShaderStage.FRAGMENT,
+ GPUConst.ShaderStage.VERTEX | GPUConst.ShaderStage.FRAGMENT,
+ ] as const)
+ )
+ .combine('useU32Array', [false, true])
+ )
+ .fn(t => {
+ const { encoderType, call, callWithZero, bgVisibility, plVisibility, useU32Array } = t.params;
+
+ const bglEntries: Array<GPUBindGroupLayoutEntry> = [
+ {
+ binding: 0,
+ visibility: bgVisibility,
+ buffer: { hasDynamicOffset: useU32Array }, // default type: uniform
+ },
+ ];
+ const bindGroup = t.createBindGroupWithLayout(bglEntries);
+
+ const plEntries: Array<Array<GPUBindGroupLayoutEntry>> = [
+ [
+ {
+ binding: 0,
+ visibility: plVisibility,
+ buffer: { hasDynamicOffset: useU32Array }, // default type: uniform
+ },
+ ],
+ ];
+ const pipeline =
+ encoderType === 'compute pass'
+ ? t.createComputePipelineWithLayout(plEntries)
+ : t.createRenderPipelineWithLayout(plEntries);
+
+ const dynamicOffsets = useU32Array ? [0] : undefined;
+
+ // Test without the dispatch/draw (should always be valid)
+ t.runTest(encoderType, pipeline, [bindGroup], dynamicOffsets, undefined, false, true);
+
+ // Test with the dispatch/draw, to make sure the validation happens in dispatch/draw.
+ t.runTest(
+ encoderType,
+ pipeline,
+ [bindGroup],
+ dynamicOffsets,
+ call,
+ callWithZero,
+ bgVisibility === plVisibility
+ );
+ });
+
+g.test('bgl_resource_type_mismatch')
+ .desc(
+ `
+ Tests the binding resource type in bindGroups[i].layout and pipelineLayout.bgls[i] must be matched
+ - TODO: Test externalTexture
+ `
+ )
+ .params(
+ kCompatTestParams
+ .beginSubcases()
+ .combine('bgResourceType', kResourceTypes)
+ .combine('plResourceType', kResourceTypes)
+ .expand('useU32Array', p => (p.bgResourceType === 'uniformBuf' ? [true, false] : [false]))
+ )
+ .fn(t => {
+ const { encoderType, call, callWithZero, bgResourceType, plResourceType, useU32Array } =
+ t.params;
+
+ const bglEntries: Array<GPUBindGroupLayoutEntry> = [
+ t.createBindGroupLayoutEntry(encoderType, bgResourceType, useU32Array),
+ ];
+ const bindGroup = t.createBindGroupWithLayout(bglEntries);
+
+ const plEntries: Array<Array<GPUBindGroupLayoutEntry>> = [
+ [t.createBindGroupLayoutEntry(encoderType, plResourceType, useU32Array)],
+ ];
+ const pipeline =
+ encoderType === 'compute pass'
+ ? t.createComputePipelineWithLayout(plEntries)
+ : t.createRenderPipelineWithLayout(plEntries);
+
+ const dynamicOffsets = useU32Array ? [0] : undefined;
+
+ // Test without the dispatch/draw (should always be valid)
+ t.runTest(encoderType, pipeline, [bindGroup], dynamicOffsets, undefined, false, true);
+
+ // Test with the dispatch/draw, to make sure the validation happens in dispatch/draw.
+ t.runTest(
+ encoderType,
+ pipeline,
+ [bindGroup],
+ dynamicOffsets,
+ call,
+ callWithZero,
+ bgResourceType === plResourceType
+ );
+ });
+
+g.test('empty_bind_group_layouts_requires_empty_bind_groups,compute_pass')
+ .desc(
+ `
+ Test that a compute pipeline with empty bind groups layouts requires empty bind groups to be set.
+ `
+ )
+ .params(u =>
+ u
+ .combine('bindGroupLayoutEntryCount', [3, 4])
+ .combine('computeCommand', ['dispatchIndirect', 'dispatch'] as const)
+ )
+ .fn(t => {
+ const { bindGroupLayoutEntryCount, computeCommand } = t.params;
+
+ const emptyBGLCount = 4;
+ const emptyBGL = t.device.createBindGroupLayout({ entries: [] });
+ const emptyBGLs = [];
+ for (let i = 0; i < emptyBGLCount; i++) {
+ emptyBGLs.push(emptyBGL);
+ }
+
+ const pipelineLayout = t.device.createPipelineLayout({
+ bindGroupLayouts: emptyBGLs,
+ });
+
+ const pipeline = t.device.createComputePipeline({
+ layout: pipelineLayout,
+ compute: {
+ module: t.device.createShaderModule({
+ code: '@compute @workgroup_size(1) fn main() {}',
+ }),
+ entryPoint: 'main',
+ },
+ });
+
+ const emptyBindGroup = t.device.createBindGroup({
+ layout: emptyBGL,
+ entries: [],
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const computePass = encoder.beginComputePass();
+ computePass.setPipeline(pipeline);
+ for (let i = 0; i < bindGroupLayoutEntryCount; i++) {
+ computePass.setBindGroup(i, emptyBindGroup);
+ }
+
+ t.doCompute(computePass, computeCommand, true);
+ computePass.end();
+
+ const success = bindGroupLayoutEntryCount === emptyBGLCount;
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, !success);
+ });
+
+g.test('empty_bind_group_layouts_requires_empty_bind_groups,render_pass')
+ .desc(
+ `
+ Test that a render pipeline with empty bind groups layouts requires empty bind groups to be set.
+ `
+ )
+ .params(u =>
+ u
+ .combine('bindGroupLayoutEntryCount', [3, 4])
+ .combine('renderCommand', [
+ 'draw',
+ 'drawIndexed',
+ 'drawIndirect',
+ 'drawIndexedIndirect',
+ ] as const)
+ )
+ .fn(t => {
+ const { bindGroupLayoutEntryCount, renderCommand } = t.params;
+
+ const emptyBGLCount = 4;
+ const emptyBGL = t.device.createBindGroupLayout({ entries: [] });
+ const emptyBGLs = [];
+ for (let i = 0; i < emptyBGLCount; i++) {
+ emptyBGLs.push(emptyBGL);
+ }
+
+ const pipelineLayout = t.device.createPipelineLayout({
+ bindGroupLayouts: emptyBGLs,
+ });
+
+ const colorFormat = 'rgba8unorm';
+ const pipeline = t.device.createRenderPipeline({
+ layout: pipelineLayout,
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `@vertex fn main() -> @builtin(position) vec4<f32> { return vec4<f32>(); }`,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `@fragment fn main() {}`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: colorFormat, writeMask: 0 }],
+ },
+ });
+
+ const emptyBindGroup = t.device.createBindGroup({
+ layout: emptyBGL,
+ entries: [],
+ });
+
+ const encoder = t.device.createCommandEncoder();
+
+ const attachmentTexture = t.device.createTexture({
+ format: 'rgba8unorm',
+ size: { width: 16, height: 16, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: attachmentTexture.createView(),
+ clearValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+
+ renderPass.setPipeline(pipeline);
+ for (let i = 0; i < bindGroupLayoutEntryCount; i++) {
+ renderPass.setBindGroup(i, emptyBindGroup);
+ }
+ t.doRender(renderPass, renderCommand, true);
+ renderPass.end();
+
+ const success = bindGroupLayoutEntryCount === emptyBGLCount;
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, !success);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/queries/begin_end.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/queries/begin_end.spec.ts
new file mode 100644
index 0000000000..e1329023d7
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/queries/begin_end.spec.ts
@@ -0,0 +1,117 @@
+export const description = `
+Validation for encoding begin/endable queries.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { ValidationTest } from '../../validation_test.js';
+
+import { beginRenderPassWithQuerySet, createQuerySetWithType } from './common.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('occlusion_query,begin_end_balance')
+ .desc(
+ `
+Tests that begin/end occlusion queries mismatch on render pass:
+- begin n queries, then end m queries, for various n and m.
+ `
+ )
+ .paramsSubcasesOnly([
+ { begin: 0, end: 1 },
+ { begin: 1, end: 0 },
+ { begin: 1, end: 1 }, // control case
+ { begin: 1, end: 2 },
+ { begin: 2, end: 1 },
+ ] as const)
+ .fn(t => {
+ const { begin, end } = t.params;
+
+ const occlusionQuerySet = createQuerySetWithType(t, 'occlusion', 2);
+
+ const encoder = t.createEncoder('render pass', { occlusionQuerySet });
+ for (let i = 0; i < begin; i++) {
+ encoder.encoder.beginOcclusionQuery(i);
+ }
+ for (let j = 0; j < end; j++) {
+ encoder.encoder.endOcclusionQuery();
+ }
+ encoder.validateFinishAndSubmit(begin === end, true);
+ });
+
+g.test('occlusion_query,begin_end_invalid_nesting')
+ .desc(
+ `
+Tests the invalid nesting of begin/end occlusion queries:
+- begin index 0, end, begin index 0, end (control case)
+- begin index 0, begin index 0, end, end
+- begin index 0, begin index 1, end, end
+ `
+ )
+ .paramsSubcasesOnly([
+ { calls: [0, 'end', 1, 'end'], _valid: true }, // control case
+ { calls: [0, 0, 'end', 'end'], _valid: false },
+ { calls: [0, 1, 'end', 'end'], _valid: false },
+ ] as const)
+ .fn(t => {
+ const { calls, _valid } = t.params;
+
+ const occlusionQuerySet = createQuerySetWithType(t, 'occlusion', 2);
+
+ const encoder = t.createEncoder('render pass', { occlusionQuerySet });
+ for (const i of calls) {
+ if (i !== 'end') {
+ encoder.encoder.beginOcclusionQuery(i);
+ } else {
+ encoder.encoder.endOcclusionQuery();
+ }
+ }
+ encoder.validateFinishAndSubmit(_valid, true);
+ });
+
+g.test('occlusion_query,disjoint_queries_with_same_query_index')
+ .desc(
+ `
+Tests that two disjoint occlusion queries cannot be begun with same query index on same render pass:
+- begin index 0, end, begin index 0, end
+- call on {same (invalid), different (control case)} render pass
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('isOnSameRenderPass', [false, true]))
+ .fn(t => {
+ const querySet = createQuerySetWithType(t, 'occlusion', 1);
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = beginRenderPassWithQuerySet(t, encoder, querySet);
+ pass.beginOcclusionQuery(0);
+ pass.endOcclusionQuery();
+
+ if (t.params.isOnSameRenderPass) {
+ pass.beginOcclusionQuery(0);
+ pass.endOcclusionQuery();
+ pass.end();
+ } else {
+ pass.end();
+ const otherPass = beginRenderPassWithQuerySet(t, encoder, querySet);
+ otherPass.beginOcclusionQuery(0);
+ otherPass.endOcclusionQuery();
+ otherPass.end();
+ }
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, t.params.isOnSameRenderPass);
+ });
+
+g.test('nesting')
+ .desc(
+ `
+Tests that whether it's allowed to nest various types of queries:
+- call {occlusion, timestamp} query in same type or other type.
+ `
+ )
+ .paramsSubcasesOnly([
+ { begin: 'occlusion', nest: 'timestamp', end: 'occlusion', _valid: true },
+ { begin: 'occlusion', nest: 'occlusion', end: 'occlusion', _valid: false },
+ { begin: 'timestamp', nest: 'occlusion', end: 'occlusion', _valid: true },
+ ] as const)
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/queries/common.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/queries/common.ts
new file mode 100644
index 0000000000..66e8e78b13
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/queries/common.ts
@@ -0,0 +1,37 @@
+import { GPUTest } from '../../../../gpu_test.js';
+
+export function createQuerySetWithType(
+ t: GPUTest,
+ type: GPUQueryType,
+ count: GPUSize32
+): GPUQuerySet {
+ return t.device.createQuerySet({
+ type,
+ count,
+ });
+}
+
+export function beginRenderPassWithQuerySet(
+ t: GPUTest,
+ encoder: GPUCommandEncoder,
+ querySet?: GPUQuerySet
+): GPURenderPassEncoder {
+ const view = t.device
+ .createTexture({
+ format: 'rgba8unorm' as const,
+ size: { width: 16, height: 16, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ })
+ .createView();
+ return encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view,
+ clearValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ occlusionQuerySet: querySet,
+ });
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/queries/general.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/queries/general.spec.ts
new file mode 100644
index 0000000000..0ed2352bfd
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/queries/general.spec.ts
@@ -0,0 +1,152 @@
+export const description = `
+Validation for encoding queries.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { kQueryTypes } from '../../../../capability_info.js';
+import { ValidationTest } from '../../validation_test.js';
+
+import { createQuerySetWithType } from './common.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('occlusion_query,query_type')
+ .desc(
+ `
+Tests that set occlusion query set with all types in render pass descriptor:
+- type {occlusion (control case), timestamp}
+- {undefined} for occlusion query set in render pass descriptor
+ `
+ )
+ .params(u => u.combine('type', [undefined, ...kQueryTypes]))
+ .beforeAllSubcases(t => {
+ const { type } = t.params;
+ if (type) {
+ t.selectDeviceForQueryTypeOrSkipTestCase(type);
+ }
+ })
+ .fn(t => {
+ const type = t.params.type;
+ const querySet = type === undefined ? undefined : createQuerySetWithType(t, type, 1);
+
+ const encoder = t.createEncoder('render pass', { occlusionQuerySet: querySet });
+ encoder.encoder.beginOcclusionQuery(0);
+ encoder.encoder.endOcclusionQuery();
+ encoder.validateFinish(type === 'occlusion');
+ });
+
+g.test('occlusion_query,invalid_query_set')
+ .desc(
+ `
+Tests that begin occlusion query with a invalid query set that failed during creation.
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('querySetState', ['valid', 'invalid'] as const))
+ .fn(t => {
+ const occlusionQuerySet = t.createQuerySetWithState(t.params.querySetState);
+
+ const encoder = t.createEncoder('render pass', { occlusionQuerySet });
+ encoder.encoder.beginOcclusionQuery(0);
+ encoder.encoder.endOcclusionQuery();
+ encoder.validateFinishAndSubmitGivenState(t.params.querySetState);
+ });
+
+g.test('occlusion_query,query_index')
+ .desc(
+ `
+Tests that begin occlusion query with query index:
+- queryIndex {in, out of} range for GPUQuerySet
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('queryIndex', [0, 2]))
+ .fn(t => {
+ const occlusionQuerySet = createQuerySetWithType(t, 'occlusion', 2);
+
+ const encoder = t.createEncoder('render pass', { occlusionQuerySet });
+ encoder.encoder.beginOcclusionQuery(t.params.queryIndex);
+ encoder.encoder.endOcclusionQuery();
+ encoder.validateFinish(t.params.queryIndex < 2);
+ });
+
+g.test('timestamp_query,query_type_and_index')
+ .desc(
+ `
+Tests that write timestamp to all types of query set on all possible encoders:
+- type {occlusion, timestamp}
+- queryIndex {in, out of} range for GPUQuerySet
+- x= {non-pass} encoder
+ `
+ )
+ .params(u =>
+ u
+ .combine('type', kQueryTypes)
+ .beginSubcases()
+ .expand('queryIndex', p => (p.type === 'timestamp' ? [0, 2] : [0]))
+ )
+ .beforeAllSubcases(t => {
+ const { type } = t.params;
+
+ // writeTimestamp is only available for devices that enable the 'timestamp-query' feature.
+ const queryTypes: GPUQueryType[] = ['timestamp'];
+ if (type !== 'timestamp') {
+ queryTypes.push(type);
+ }
+
+ t.selectDeviceForQueryTypeOrSkipTestCase(queryTypes);
+ })
+ .fn(t => {
+ const { type, queryIndex } = t.params;
+
+ const count = 2;
+ const querySet = createQuerySetWithType(t, type, count);
+
+ const encoder = t.createEncoder('non-pass');
+ encoder.encoder.writeTimestamp(querySet, queryIndex);
+ encoder.validateFinish(type === 'timestamp' && queryIndex < count);
+ });
+
+g.test('timestamp_query,invalid_query_set')
+ .desc(
+ `
+Tests that write timestamp to a invalid query set that failed during creation:
+- x= {non-pass} encoder
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('querySetState', ['valid', 'invalid'] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceForQueryTypeOrSkipTestCase('timestamp');
+ })
+ .fn(t => {
+ const { querySetState } = t.params;
+
+ const querySet = t.createQuerySetWithState(querySetState, {
+ type: 'timestamp',
+ count: 2,
+ });
+
+ const encoder = t.createEncoder('non-pass');
+ encoder.encoder.writeTimestamp(querySet, 0);
+ encoder.validateFinish(querySetState !== 'invalid');
+ });
+
+g.test('timestamp_query,device_mismatch')
+ .desc('Tests writeTimestamp cannot be called with a query set created from another device')
+ .paramsSubcasesOnly(u => u.combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectDeviceForQueryTypeOrSkipTestCase('timestamp');
+ t.selectMismatchedDeviceOrSkipTestCase('timestamp-query');
+ })
+ .fn(t => {
+ const { mismatched } = t.params;
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const querySet = sourceDevice.createQuerySet({
+ type: 'timestamp',
+ count: 2,
+ });
+ t.trackForCleanup(querySet);
+
+ const encoder = t.createEncoder('non-pass');
+ encoder.encoder.writeTimestamp(querySet, 0);
+ encoder.validateFinish(!mismatched);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/queries/resolveQuerySet.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/queries/resolveQuerySet.spec.ts
new file mode 100644
index 0000000000..757648097a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/queries/resolveQuerySet.spec.ts
@@ -0,0 +1,181 @@
+export const description = `
+Validation tests for resolveQuerySet.
+`;
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUConst } from '../../../../constants.js';
+import { kResourceStates } from '../../../../gpu_test.js';
+import { ValidationTest } from '../../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+export const kQueryCount = 2;
+
+g.test('queryset_and_destination_buffer_state')
+ .desc(
+ `
+Tests that resolve query set must be with valid query set and destination buffer.
+- {invalid, destroyed} GPUQuerySet results in validation error.
+- {invalid, destroyed} destination buffer results in validation error.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('querySetState', kResourceStates)
+ .combine('destinationState', kResourceStates)
+ )
+ .fn(t => {
+ const { querySetState, destinationState } = t.params;
+
+ const shouldBeValid = querySetState !== 'invalid' && destinationState !== 'invalid';
+ const shouldSubmitSuccess = querySetState === 'valid' && destinationState === 'valid';
+
+ const querySet = t.createQuerySetWithState(querySetState);
+
+ const destination = t.createBufferWithState(destinationState, {
+ size: kQueryCount * 8,
+ usage: GPUBufferUsage.QUERY_RESOLVE,
+ });
+
+ const encoder = t.createEncoder('non-pass');
+ encoder.encoder.resolveQuerySet(querySet, 0, 1, destination, 0);
+ encoder.validateFinishAndSubmit(shouldBeValid, shouldSubmitSuccess);
+ });
+
+g.test('first_query_and_query_count')
+ .desc(
+ `
+Tests that resolve query set with invalid firstQuery and queryCount:
+- firstQuery and/or queryCount out of range
+ `
+ )
+ .paramsSubcasesOnly([
+ { firstQuery: 0, queryCount: kQueryCount }, // control case
+ { firstQuery: 0, queryCount: kQueryCount + 1 },
+ { firstQuery: 1, queryCount: kQueryCount },
+ { firstQuery: kQueryCount, queryCount: 1 },
+ ])
+ .fn(t => {
+ const { firstQuery, queryCount } = t.params;
+
+ const querySet = t.device.createQuerySet({ type: 'occlusion', count: kQueryCount });
+ const destination = t.device.createBuffer({
+ size: kQueryCount * 8,
+ usage: GPUBufferUsage.QUERY_RESOLVE,
+ });
+
+ const encoder = t.createEncoder('non-pass');
+ encoder.encoder.resolveQuerySet(querySet, firstQuery, queryCount, destination, 0);
+ encoder.validateFinish(firstQuery + queryCount <= kQueryCount);
+ });
+
+g.test('destination_buffer_usage')
+ .desc(
+ `
+Tests that resolve query set with invalid destinationBuffer:
+- Buffer usage {with, without} QUERY_RESOLVE
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('bufferUsage', [
+ GPUConst.BufferUsage.STORAGE,
+ GPUConst.BufferUsage.QUERY_RESOLVE, // control case
+ ] as const)
+ )
+ .fn(t => {
+ const querySet = t.device.createQuerySet({ type: 'occlusion', count: kQueryCount });
+ const destination = t.device.createBuffer({
+ size: kQueryCount * 8,
+ usage: t.params.bufferUsage,
+ });
+
+ const encoder = t.createEncoder('non-pass');
+ encoder.encoder.resolveQuerySet(querySet, 0, kQueryCount, destination, 0);
+ encoder.validateFinish(t.params.bufferUsage === GPUConst.BufferUsage.QUERY_RESOLVE);
+ });
+
+g.test('destination_offset_alignment')
+ .desc(
+ `
+Tests that resolve query set with invalid destinationOffset:
+- destinationOffset is not a multiple of 256
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('destinationOffset', [0, 128, 256, 384]))
+ .fn(t => {
+ const { destinationOffset } = t.params;
+ const querySet = t.device.createQuerySet({ type: 'occlusion', count: kQueryCount });
+ const destination = t.device.createBuffer({
+ size: 512,
+ usage: GPUBufferUsage.QUERY_RESOLVE,
+ });
+
+ const encoder = t.createEncoder('non-pass');
+ encoder.encoder.resolveQuerySet(querySet, 0, kQueryCount, destination, destinationOffset);
+ encoder.validateFinish(destinationOffset % 256 === 0);
+ });
+
+g.test('resolve_buffer_oob')
+ .desc(
+ `
+Tests that resolve query set with the size oob:
+- The size of destinationBuffer - destinationOffset < queryCount * 8
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u.combineWithParams([
+ { queryCount: 2, bufferSize: 16, destinationOffset: 0, _success: true },
+ { queryCount: 3, bufferSize: 16, destinationOffset: 0, _success: false },
+ { queryCount: 2, bufferSize: 16, destinationOffset: 256, _success: false },
+ { queryCount: 2, bufferSize: 272, destinationOffset: 256, _success: true },
+ { queryCount: 2, bufferSize: 264, destinationOffset: 256, _success: false },
+ ])
+ )
+ .fn(t => {
+ const { queryCount, bufferSize, destinationOffset, _success } = t.params;
+ const querySet = t.device.createQuerySet({ type: 'occlusion', count: queryCount });
+ const destination = t.device.createBuffer({
+ size: bufferSize,
+ usage: GPUBufferUsage.QUERY_RESOLVE,
+ });
+
+ const encoder = t.createEncoder('non-pass');
+ encoder.encoder.resolveQuerySet(querySet, 0, queryCount, destination, destinationOffset);
+ encoder.validateFinish(_success);
+ });
+
+g.test('query_set_buffer,device_mismatch')
+ .desc(
+ 'Tests resolveQuerySet cannot be called with a query set or destination buffer created from another device'
+ )
+ .paramsSubcasesOnly([
+ { querySetMismatched: false, bufferMismatched: false }, // control case
+ { querySetMismatched: true, bufferMismatched: false },
+ { querySetMismatched: false, bufferMismatched: true },
+ ] as const)
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { querySetMismatched, bufferMismatched } = t.params;
+
+ const kQueryCount = 1;
+
+ const querySetDevice = querySetMismatched ? t.mismatchedDevice : t.device;
+ const querySet = querySetDevice.createQuerySet({
+ type: 'occlusion',
+ count: kQueryCount,
+ });
+ t.trackForCleanup(querySet);
+
+ const bufferDevice = bufferMismatched ? t.mismatchedDevice : t.device;
+ const buffer = bufferDevice.createBuffer({
+ size: kQueryCount * 8,
+ usage: GPUBufferUsage.QUERY_RESOLVE,
+ });
+ t.trackForCleanup(buffer);
+
+ const encoder = t.createEncoder('non-pass');
+ encoder.encoder.resolveQuerySet(querySet, 0, kQueryCount, buffer, 0);
+ encoder.validateFinish(!(querySetMismatched || bufferMismatched));
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/render_bundle.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/render_bundle.spec.ts
new file mode 100644
index 0000000000..883b634446
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/encoding/render_bundle.spec.ts
@@ -0,0 +1,258 @@
+export const description = `
+Tests execution of render bundles.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { kDepthStencilFormats, kTextureFormatInfo } from '../../../format_info.js';
+import { ValidationTest } from '../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('empty_bundle_list')
+ .desc(
+ `
+ Test that it is valid to execute an empty list of render bundles
+ `
+ )
+ .fn(t => {
+ const encoder = t.createEncoder('render pass');
+ encoder.encoder.executeBundles([]);
+ encoder.validateFinish(true);
+ });
+
+g.test('device_mismatch')
+ .desc(
+ `
+ Tests executeBundles cannot be called with render bundles created from another device
+ Test with two bundles to make sure all bundles can be validated:
+ - bundle0 and bundle1 from same device
+ - bundle0 and bundle1 from different device
+ `
+ )
+ .paramsSubcasesOnly([
+ { bundle0Mismatched: false, bundle1Mismatched: false }, // control case
+ { bundle0Mismatched: true, bundle1Mismatched: false },
+ { bundle0Mismatched: false, bundle1Mismatched: true },
+ ])
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { bundle0Mismatched, bundle1Mismatched } = t.params;
+
+ const descriptor: GPURenderBundleEncoderDescriptor = {
+ colorFormats: ['rgba8unorm'],
+ };
+
+ const bundle0Device = bundle0Mismatched ? t.mismatchedDevice : t.device;
+ const bundle0 = bundle0Device.createRenderBundleEncoder(descriptor).finish();
+
+ const bundle1Device = bundle1Mismatched ? t.mismatchedDevice : t.device;
+ const bundle1 = bundle1Device.createRenderBundleEncoder(descriptor).finish();
+
+ const encoder = t.createEncoder('render pass');
+ encoder.encoder.executeBundles([bundle0, bundle1]);
+
+ encoder.validateFinish(!(bundle0Mismatched || bundle1Mismatched));
+ });
+
+g.test('color_formats_mismatch')
+ .desc(
+ `
+ Tests executeBundles cannot be called with render bundles that do match the colorFormats of the
+ render pass. This includes:
+ - formats don't match
+ - formats match but are in a different order
+ - formats match but there is a different count
+ `
+ )
+ .params(u =>
+ u.combineWithParams([
+ {
+ bundleFormats: ['bgra8unorm', 'rg8unorm'] as const,
+ passFormats: ['bgra8unorm', 'rg8unorm'] as const,
+ _compatible: true,
+ }, // control case
+ {
+ bundleFormats: ['bgra8unorm', 'rg8unorm'] as const,
+ passFormats: ['bgra8unorm', 'bgra8unorm'] as const,
+ _compatible: false,
+ },
+ {
+ bundleFormats: ['bgra8unorm', 'rg8unorm'] as const,
+ passFormats: ['rg8unorm', 'bgra8unorm'] as const,
+ _compatible: false,
+ },
+ {
+ bundleFormats: ['bgra8unorm', 'rg8unorm', 'rgba8unorm'] as const,
+ passFormats: ['rg8unorm', 'bgra8unorm'] as const,
+ _compatible: false,
+ },
+ {
+ bundleFormats: ['bgra8unorm', 'rg8unorm'] as const,
+ passFormats: ['rg8unorm', 'bgra8unorm', 'rgba8unorm'] as const,
+ _compatible: false,
+ },
+ ])
+ )
+ .fn(t => {
+ const { bundleFormats, passFormats, _compatible } = t.params;
+
+ const bundleEncoder = t.device.createRenderBundleEncoder({
+ colorFormats: bundleFormats,
+ });
+ const bundle = bundleEncoder.finish();
+
+ const encoder = t.createEncoder('render pass', {
+ attachmentInfo: {
+ colorFormats: passFormats,
+ },
+ });
+ encoder.encoder.executeBundles([bundle]);
+
+ encoder.validateFinish(_compatible);
+ });
+
+g.test('depth_stencil_formats_mismatch')
+ .desc(
+ `
+ Tests executeBundles cannot be called with render bundles that do match the depthStencil of the
+ render pass. This includes:
+ - formats don't match
+ - formats have matching depth or stencil aspects, but other aspects are missing
+ `
+ )
+ .params(u =>
+ u.combineWithParams([
+ { bundleFormat: 'depth24plus', passFormat: 'depth24plus' }, // control case
+ { bundleFormat: 'depth24plus', passFormat: 'depth16unorm' },
+ { bundleFormat: 'depth24plus', passFormat: 'depth24plus-stencil8' },
+ { bundleFormat: 'stencil8', passFormat: 'depth24plus-stencil8' },
+ ] as const)
+ )
+ .beforeAllSubcases(t => {
+ const { bundleFormat, passFormat } = t.params;
+ t.selectDeviceForTextureFormatOrSkipTestCase([bundleFormat, passFormat]);
+ })
+ .fn(t => {
+ const { bundleFormat, passFormat } = t.params;
+ const compatible = bundleFormat === passFormat;
+
+ const bundleEncoder = t.device.createRenderBundleEncoder({
+ colorFormats: [],
+ depthStencilFormat: bundleFormat,
+ });
+ const bundle = bundleEncoder.finish();
+
+ const encoder = t.createEncoder('render pass', {
+ attachmentInfo: {
+ colorFormats: [],
+ depthStencilFormat: passFormat,
+ },
+ });
+ encoder.encoder.executeBundles([bundle]);
+
+ encoder.validateFinish(compatible);
+ });
+
+g.test('depth_stencil_readonly_mismatch')
+ .desc(
+ `
+ Tests executeBundles cannot be called with render bundles that do match the depthStencil
+ readonly state of the render pass.
+ `
+ )
+ .params(u =>
+ u
+ .combine('depthStencilFormat', kDepthStencilFormats)
+ .beginSubcases()
+ .combine('bundleDepthReadOnly', [false, true])
+ .combine('bundleStencilReadOnly', [false, true])
+ .combine('passDepthReadOnly', [false, true])
+ .combine('passStencilReadOnly', [false, true])
+ .filter(p => {
+ // For combined depth/stencil formats the depth and stencil read only state must match
+ // in order to create a valid render bundle or render pass.
+ const depthStencilInfo = kTextureFormatInfo[p.depthStencilFormat];
+ if (depthStencilInfo.depth && depthStencilInfo.stencil) {
+ return (
+ p.passDepthReadOnly === p.passStencilReadOnly &&
+ p.bundleDepthReadOnly === p.bundleStencilReadOnly
+ );
+ }
+ return true;
+ })
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceForTextureFormatOrSkipTestCase(t.params.depthStencilFormat);
+ })
+ .fn(t => {
+ const {
+ depthStencilFormat,
+ bundleDepthReadOnly,
+ bundleStencilReadOnly,
+ passDepthReadOnly,
+ passStencilReadOnly,
+ } = t.params;
+
+ const compatible =
+ (!passDepthReadOnly || bundleDepthReadOnly === passDepthReadOnly) &&
+ (!passStencilReadOnly || bundleStencilReadOnly === passStencilReadOnly);
+
+ const bundleEncoder = t.device.createRenderBundleEncoder({
+ colorFormats: [],
+ depthStencilFormat,
+ depthReadOnly: bundleDepthReadOnly,
+ stencilReadOnly: bundleStencilReadOnly,
+ });
+ const bundle = bundleEncoder.finish();
+
+ const encoder = t.createEncoder('render pass', {
+ attachmentInfo: {
+ colorFormats: [],
+ depthStencilFormat,
+ depthReadOnly: passDepthReadOnly,
+ stencilReadOnly: passStencilReadOnly,
+ },
+ });
+ encoder.encoder.executeBundles([bundle]);
+
+ encoder.validateFinish(compatible);
+ });
+
+g.test('sample_count_mismatch')
+ .desc(
+ `
+ Tests executeBundles cannot be called with render bundles that do match the sampleCount of the
+ render pass.
+ `
+ )
+ .params(u =>
+ u.combineWithParams([
+ { bundleSamples: 1, passSamples: 1 }, // control case
+ { bundleSamples: 4, passSamples: 4 }, // control case
+ { bundleFormat: 4, passFormat: 1 },
+ { bundleFormat: 1, passFormat: 4 },
+ ])
+ )
+ .fn(t => {
+ const { bundleSamples, passSamples } = t.params;
+
+ const compatible = bundleSamples === passSamples;
+
+ const bundleEncoder = t.device.createRenderBundleEncoder({
+ colorFormats: ['bgra8unorm'],
+ sampleCount: bundleSamples,
+ });
+ const bundle = bundleEncoder.finish();
+
+ const encoder = t.createEncoder('render pass', {
+ attachmentInfo: {
+ colorFormats: ['bgra8unorm'],
+ sampleCount: passSamples,
+ },
+ });
+ encoder.encoder.executeBundles([bundle]);
+
+ encoder.validateFinish(compatible);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/error_scope.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/error_scope.spec.ts
new file mode 100644
index 0000000000..cb5581fed6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/error_scope.spec.ts
@@ -0,0 +1,291 @@
+export const description = `
+Error scope validation tests.
+
+Note these must create their own device, not use GPUTest (that one already has error scopes on it).
+
+TODO: (POSTV1) Test error scopes of different threads and make sure they go to the right place.
+TODO: (POSTV1) Test that unhandled errors go the right device, and nowhere if the device was dropped.
+`;
+
+import { Fixture } from '../../../common/framework/fixture.js';
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { getGPU } from '../../../common/util/navigator_gpu.js';
+import { assert, raceWithRejectOnTimeout } from '../../../common/util/util.js';
+import { kErrorScopeFilters, kGeneratableErrorScopeFilters } from '../../capability_info.js';
+
+class ErrorScopeTests extends Fixture {
+ _device: GPUDevice | undefined = undefined;
+
+ get device(): GPUDevice {
+ assert(this._device !== undefined);
+ return this._device;
+ }
+
+ override async init(): Promise<void> {
+ await super.init();
+ const gpu = getGPU(this.rec);
+ const adapter = await gpu.requestAdapter();
+ assert(adapter !== null);
+ const device = await adapter.requestDevice();
+ assert(device !== null);
+ this._device = device;
+ }
+
+ // Generates an error of the given filter type. For now, the errors are generated by calling a
+ // known code-path to cause the error. This can be updated in the future should there be a more
+ // direct way to inject errors.
+ generateError(filter: GPUErrorFilter): void {
+ switch (filter) {
+ case 'out-of-memory':
+ this.trackForCleanup(
+ this.device.createTexture({
+ // One of the largest formats. With the base limits, the texture will be 256 GiB.
+ format: 'rgba32float',
+ usage: GPUTextureUsage.COPY_DST,
+ size: [
+ this.device.limits.maxTextureDimension2D,
+ this.device.limits.maxTextureDimension2D,
+ this.device.limits.maxTextureArrayLayers,
+ ],
+ })
+ );
+ break;
+ case 'validation':
+ // Generating a validation error by passing in an invalid usage when creating a buffer.
+ this.trackForCleanup(
+ this.device.createBuffer({
+ size: 1024,
+ usage: 0xffff, // Invalid GPUBufferUsage
+ })
+ );
+ break;
+ }
+ // MAINTENANCE_TODO: This is a workaround for Chromium not flushing. Remove when not needed.
+ this.device.queue.submit([]);
+ }
+
+ // Checks whether the error is of the type expected given the filter.
+ isInstanceOfError(filter: GPUErrorFilter, error: GPUError | null): boolean {
+ switch (filter) {
+ case 'out-of-memory':
+ return error instanceof GPUOutOfMemoryError;
+ case 'validation':
+ return error instanceof GPUValidationError;
+ case 'internal':
+ return error instanceof GPUInternalError;
+ }
+ }
+
+ // Expect an uncapturederror event to occur. Note: this MUST be awaited, because
+ // otherwise it could erroneously pass by capturing an error from later in the test.
+ async expectUncapturedError(fn: Function): Promise<GPUUncapturedErrorEvent> {
+ return this.immediateAsyncExpectation(() => {
+ // MAINTENANCE_TODO: Make arbitrary timeout value a test runner variable
+ const TIMEOUT_IN_MS = 1000;
+
+ const promise: Promise<GPUUncapturedErrorEvent> = new Promise(resolve => {
+ const eventListener = ((event: GPUUncapturedErrorEvent) => {
+ this.debug(`Got uncaptured error event with ${event.error}`);
+ resolve(event);
+ }) as EventListener;
+
+ this.device.addEventListener('uncapturederror', eventListener, { once: true });
+ });
+
+ fn();
+
+ return raceWithRejectOnTimeout(
+ promise,
+ TIMEOUT_IN_MS,
+ 'Timeout occurred waiting for uncaptured error'
+ );
+ });
+ }
+}
+
+export const g = makeTestGroup(ErrorScopeTests);
+
+g.test('simple')
+ .desc(
+ `
+Tests that error scopes catches their expected errors, firing an uncaptured error event otherwise.
+
+- Same error and error filter (popErrorScope should return the error)
+- Different error from filter (uncaptured error should result)
+ `
+ )
+ .params(u =>
+ u.combine('errorType', kGeneratableErrorScopeFilters).combine('errorFilter', kErrorScopeFilters)
+ )
+ .fn(async t => {
+ const { errorType, errorFilter } = t.params;
+ t.device.pushErrorScope(errorFilter);
+
+ if (errorType !== errorFilter) {
+ // Different error case
+ const uncapturedErrorEvent = await t.expectUncapturedError(() => {
+ t.generateError(errorType);
+ });
+ t.expect(t.isInstanceOfError(errorType, uncapturedErrorEvent.error));
+
+ const error = await t.device.popErrorScope();
+ t.expect(error === null);
+ } else {
+ // Same error as filter
+ t.generateError(errorType);
+ const error = await t.device.popErrorScope();
+ t.expect(t.isInstanceOfError(errorType, error));
+ }
+ });
+
+g.test('empty')
+ .desc(
+ `
+Tests that popping an empty error scope stack should reject.
+ `
+ )
+ .fn(t => {
+ const promise = t.device.popErrorScope();
+ t.shouldReject('OperationError', promise);
+ });
+
+g.test('parent_scope')
+ .desc(
+ `
+Tests that an error bubbles to the correct parent scope.
+
+- Different error types as the parent scope
+- Different depths of non-capturing filters for the generated error
+ `
+ )
+ .params(u =>
+ u
+ .combine('errorFilter', kGeneratableErrorScopeFilters)
+ .combine('stackDepth', [1, 10, 100, 1000])
+ )
+ .fn(async t => {
+ const { errorFilter, stackDepth } = t.params;
+ t.device.pushErrorScope(errorFilter);
+
+ // Push a bunch of error filters onto the stack (none that match errorFilter)
+ const unmatchedFilters = kErrorScopeFilters.filter(filter => {
+ return filter !== errorFilter;
+ });
+ for (let i = 0; i < stackDepth; i++) {
+ t.device.pushErrorScope(unmatchedFilters[i % unmatchedFilters.length]);
+ }
+
+ // Cause the error and then pop all the unrelated filters.
+ t.generateError(errorFilter);
+ const promises = [];
+ for (let i = 0; i < stackDepth; i++) {
+ promises.push(t.device.popErrorScope());
+ }
+ const errors = await Promise.all(promises);
+ t.expect(errors.every(e => e === null));
+
+ // Finally the actual error should have been caught by the parent scope.
+ const error = await t.device.popErrorScope();
+ t.expect(t.isInstanceOfError(errorFilter, error));
+ });
+
+g.test('current_scope')
+ .desc(
+ `
+Tests that an error does not bubbles to parent scopes when local scope matches.
+
+- Different error types as the current scope
+- Different depths of non-capturing filters for the generated error
+ `
+ )
+ .params(u =>
+ u
+ .combine('errorFilter', kGeneratableErrorScopeFilters)
+ .combine('stackDepth', [1, 10, 100, 1000, 100000])
+ )
+ .fn(async t => {
+ const { errorFilter, stackDepth } = t.params;
+
+ // Push a bunch of error filters onto the stack
+ for (let i = 0; i < stackDepth; i++) {
+ t.device.pushErrorScope(kErrorScopeFilters[i % kErrorScopeFilters.length]);
+ }
+
+ // Current scope should catch the error immediately.
+ t.device.pushErrorScope(errorFilter);
+ t.generateError(errorFilter);
+ const error = await t.device.popErrorScope();
+ t.expect(t.isInstanceOfError(errorFilter, error));
+
+ // Remaining scopes shouldn't catch anything.
+ const promises = [];
+ for (let i = 0; i < stackDepth; i++) {
+ promises.push(t.device.popErrorScope());
+ }
+ const errors = await Promise.all(promises);
+ t.expect(errors.every(e => e === null));
+ });
+
+g.test('balanced_siblings')
+ .desc(
+ `
+Tests that sibling error scopes need to be balanced.
+
+- Different error types as the current scope
+- Different number of sibling errors
+ `
+ )
+ .params(u =>
+ u.combine('errorFilter', kErrorScopeFilters).combine('numErrors', [1, 10, 100, 1000])
+ )
+ .fn(async t => {
+ const { errorFilter, numErrors } = t.params;
+
+ const promises = [];
+ for (let i = 0; i < numErrors; i++) {
+ t.device.pushErrorScope(errorFilter);
+ promises.push(t.device.popErrorScope());
+ }
+
+ {
+ // Trying to pop an additional non-existing scope should reject.
+ const promise = t.device.popErrorScope();
+ t.shouldReject('OperationError', promise);
+ }
+
+ const errors = await Promise.all(promises);
+ t.expect(errors.every(e => e === null));
+ });
+
+g.test('balanced_nesting')
+ .desc(
+ `
+Tests that nested error scopes need to be balanced.
+
+- Different error types as the current scope
+- Different number of nested errors
+ `
+ )
+ .params(u =>
+ u.combine('errorFilter', kErrorScopeFilters).combine('numErrors', [1, 10, 100, 1000])
+ )
+ .fn(async t => {
+ const { errorFilter, numErrors } = t.params;
+
+ for (let i = 0; i < numErrors; i++) {
+ t.device.pushErrorScope(errorFilter);
+ }
+
+ const promises = [];
+ for (let i = 0; i < numErrors; i++) {
+ promises.push(t.device.popErrorScope());
+ }
+ const errors = await Promise.all(promises);
+ t.expect(errors.every(e => e === null));
+
+ {
+ // Trying to pop an additional non-existing scope should reject.
+ const promise = t.device.popErrorScope();
+ t.shouldReject('OperationError', promise);
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/getBindGroupLayout.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/getBindGroupLayout.spec.ts
new file mode 100644
index 0000000000..295f2a91f8
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/getBindGroupLayout.spec.ts
@@ -0,0 +1,201 @@
+export const description = `
+ getBindGroupLayout validation tests.
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { assert } from '../../../common/util/util.js';
+
+import { ValidationTest } from './validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('index_range,explicit_layout')
+ .desc(
+ `
+ Test that a validation error is generated if the index exceeds the size of the bind group layouts
+ using a pipeline with an explicit layout.
+ `
+ )
+ .params(u => u.combine('index', [0, 1, 2, 3, 4, 5]))
+ .fn(t => {
+ const { index } = t.params;
+
+ const pipelineBindGroupLayouts = t.device.createBindGroupLayout({
+ entries: [],
+ });
+
+ const kBindGroupLayoutsSizeInPipelineLayout = 1;
+ const pipelineLayout = t.device.createPipelineLayout({
+ bindGroupLayouts: [pipelineBindGroupLayouts],
+ });
+
+ const pipeline = t.device.createRenderPipeline({
+ layout: pipelineLayout,
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ @vertex
+ fn main()-> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+ @fragment
+ fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ });
+
+ const shouldError = index >= kBindGroupLayoutsSizeInPipelineLayout;
+
+ t.expectValidationError(() => {
+ pipeline.getBindGroupLayout(index);
+ }, shouldError);
+ });
+
+g.test('index_range,auto_layout')
+ .desc(
+ `
+ Test that a validation error is generated if the index exceeds the size of the bind group layouts
+ using a pipeline with an auto layout.
+ `
+ )
+ .params(u => u.combine('index', [0, 1, 2, 3, 4, 5]))
+ .fn(t => {
+ const { index } = t.params;
+
+ const kBindGroupLayoutsSizeInPipelineLayout = 1;
+
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ @vertex
+ fn main()-> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var<uniform> binding: f32;
+ @fragment
+ fn main() -> @location(0) vec4<f32> {
+ _ = binding;
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ });
+
+ const shouldError = index >= kBindGroupLayoutsSizeInPipelineLayout;
+
+ t.expectValidationError(() => {
+ pipeline.getBindGroupLayout(index);
+ }, shouldError);
+ });
+
+g.test('unique_js_object,auto_layout')
+ .desc(
+ `
+ Test that getBindGroupLayout returns a new JavaScript object for each call.
+ `
+ )
+ .fn(t => {
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ @vertex
+ fn main()-> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var<uniform> binding: f32;
+ @fragment
+ fn main() -> @location(0) vec4<f32> {
+ _ = binding;
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ });
+
+ const kIndex = 0;
+ const bgl1 = pipeline.getBindGroupLayout(kIndex) as unknown as Record<string, number>;
+ bgl1.extra = 42;
+ const bgl2 = pipeline.getBindGroupLayout(kIndex) as unknown as Record<string, number>;
+
+ assert(bgl1 !== bgl2, 'objects are not the same object');
+ assert(bgl2.extra === undefined, 'objects do not retain expando properties');
+ });
+
+g.test('unique_js_object,explicit_layout')
+ .desc(
+ `
+ Test that getBindGroupLayout returns a new JavaScript object for each call.
+ `
+ )
+ .fn(t => {
+ const pipelineBindGroupLayouts = t.device.createBindGroupLayout({
+ entries: [],
+ });
+
+ const pipelineLayout = t.device.createPipelineLayout({
+ bindGroupLayouts: [pipelineBindGroupLayouts],
+ });
+
+ const pipeline = t.device.createRenderPipeline({
+ layout: pipelineLayout,
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ @vertex
+ fn main()-> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+ @fragment
+ fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ });
+
+ const kIndex = 0;
+ const bgl1 = pipeline.getBindGroupLayout(kIndex) as unknown as Record<string, number>;
+ bgl1.extra = 42;
+ const bgl2 = pipeline.getBindGroupLayout(kIndex) as unknown as Record<string, number>;
+
+ assert(bgl1 !== bgl2, 'objects are not the same object');
+ assert(bgl2.extra === undefined, 'objects do not retain expando properties');
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/gpu_external_texture_expiration.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/gpu_external_texture_expiration.spec.ts
new file mode 100644
index 0000000000..7d77329920
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/gpu_external_texture_expiration.spec.ts
@@ -0,0 +1,332 @@
+export const description = `
+GPUExternalTexture expiration mechanism validation tests.
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { assert } from '../../../common/util/util.js';
+import {
+ getVideoElement,
+ startPlayingAndWaitForVideo,
+ getVideoFrameFromVideoElement,
+ waitForNextFrame,
+ waitForNextTask,
+} from '../../web_platform/util.js';
+
+import { ValidationTest } from './validation_test.js';
+
+class GPUExternalTextureExpireTest extends ValidationTest {
+ submitCommandBuffer(bindGroup: GPUBindGroup, success: boolean): void {
+ const kHeight = 16;
+ const kWidth = 16;
+ const kFormat = 'rgba8unorm';
+
+ const colorAttachment = this.device.createTexture({
+ format: kFormat,
+ size: { width: kWidth, height: kHeight, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ const passDescriptor = {
+ colorAttachments: [
+ {
+ view: colorAttachment.createView(),
+ clearValue: [0, 0, 0, 1],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ } as const;
+
+ const commandEncoder = this.device.createCommandEncoder();
+ const passEncoder = commandEncoder.beginRenderPass(passDescriptor);
+ passEncoder.setBindGroup(0, bindGroup);
+ passEncoder.end();
+ const commandBuffer = commandEncoder.finish();
+ this.expectValidationError(() => this.device.queue.submit([commandBuffer]), !success);
+ }
+
+ getDefaultVideoElementAndCheck(): HTMLVideoElement {
+ const videoElement = getVideoElement(this, 'four-colors-vp9-bt601.webm');
+
+ if (!('requestVideoFrameCallback' in videoElement)) {
+ this.skip('HTMLVideoElement.requestVideoFrameCallback is not supported');
+ }
+
+ return videoElement;
+ }
+
+ getDefaultBindGroupLayout(): GPUBindGroupLayout {
+ return this.device.createBindGroupLayout({
+ entries: [{ binding: 0, visibility: GPUShaderStage.FRAGMENT, externalTexture: {} }],
+ });
+ }
+}
+
+export const g = makeTestGroup(GPUExternalTextureExpireTest);
+
+g.test('import_multiple_times_in_same_task_scope')
+ .desc(
+ `
+ Tests that GPUExternalTexture is valid after been imported in the task.
+ Tests that in the same task scope, import twice on the same video source may return
+ the same GPUExternalTexture and bindGroup doesn't need to be updated.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('sourceType', ['VideoElement', 'VideoFrame'] as const)
+ )
+ .fn(async t => {
+ const sourceType = t.params.sourceType;
+ const videoElement = t.getDefaultVideoElementAndCheck();
+
+ let bindGroup: GPUBindGroup;
+ let externalTexture: GPUExternalTexture;
+ await startPlayingAndWaitForVideo(videoElement, async () => {
+ const source =
+ sourceType === 'VideoFrame'
+ ? await getVideoFrameFromVideoElement(t, videoElement)
+ : videoElement;
+ externalTexture = t.device.importExternalTexture({
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ source: source as any,
+ });
+
+ bindGroup = t.device.createBindGroup({
+ layout: t.getDefaultBindGroupLayout(),
+ entries: [{ binding: 0, resource: externalTexture }],
+ });
+
+ t.submitCommandBuffer(bindGroup, true);
+
+ // Import again in the same task scope should return same object.
+ const mayBeTheSameExternalTexture = t.device.importExternalTexture({
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ source: source as any,
+ });
+
+ if (externalTexture === mayBeTheSameExternalTexture) {
+ t.submitCommandBuffer(bindGroup, true);
+ } else {
+ bindGroup = t.device.createBindGroup({
+ layout: t.getDefaultBindGroupLayout(),
+ entries: [{ binding: 0, resource: externalTexture }],
+ });
+
+ t.submitCommandBuffer(bindGroup, true);
+ }
+ });
+ });
+
+g.test('import_and_use_in_different_microtask')
+ .desc(
+ `
+ Tests that in the same task scope, imported GPUExternalTexture is valid in
+ different microtasks.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('sourceType', ['VideoElement', 'VideoFrame'] as const)
+ )
+ .fn(async t => {
+ const sourceType = t.params.sourceType;
+ const videoElement = t.getDefaultVideoElementAndCheck();
+
+ let bindGroup: GPUBindGroup;
+ let externalTexture: GPUExternalTexture;
+ await startPlayingAndWaitForVideo(videoElement, async () => {
+ const source =
+ sourceType === 'VideoFrame'
+ ? await getVideoFrameFromVideoElement(t, videoElement)
+ : videoElement;
+
+ // Import GPUExternalTexture
+ queueMicrotask(() => {
+ externalTexture = t.device.importExternalTexture({
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ source: source as any,
+ });
+ });
+
+ // Submit GPUExternalTexture
+ queueMicrotask(() => {
+ bindGroup = t.device.createBindGroup({
+ layout: t.getDefaultBindGroupLayout(),
+ entries: [{ binding: 0, resource: externalTexture }],
+ });
+ t.submitCommandBuffer(bindGroup, true);
+ });
+ });
+ });
+
+g.test('import_and_use_in_different_task')
+ .desc(
+ `
+ Tests that in the different task scope, previous imported GPUExternalTexture
+ should be expired if it is imported from HTMLVideoElment. GPUExternalTexture
+ imported from WebCodec VideoFrame is not expired.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('sourceType', ['VideoElement', 'VideoFrame'] as const)
+ )
+ .fn(async t => {
+ const sourceType = t.params.sourceType;
+ const videoElement = t.getDefaultVideoElementAndCheck();
+
+ let bindGroup: GPUBindGroup;
+ let externalTexture: GPUExternalTexture;
+ await startPlayingAndWaitForVideo(videoElement, async () => {
+ const source =
+ sourceType === 'VideoFrame'
+ ? await getVideoFrameFromVideoElement(t, videoElement)
+ : videoElement;
+ externalTexture = t.device.importExternalTexture({
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ source: source as any,
+ });
+
+ bindGroup = t.device.createBindGroup({
+ layout: t.getDefaultBindGroupLayout(),
+ entries: [{ binding: 0, resource: externalTexture }],
+ });
+
+ t.submitCommandBuffer(bindGroup, true);
+ });
+
+ await waitForNextTask(() => {
+ // Enter in another task scope. For GPUExternalTexture imported from WebCodec,
+ // it shouldn't be expired because VideoFrame is not 'closed'.
+ // For GPUExternalTexutre imported from HTMLVideoElement, it should be expired.
+ t.submitCommandBuffer(bindGroup, sourceType === 'VideoFrame' ? true : false);
+ });
+ });
+
+g.test('use_import_to_refresh')
+ .desc(
+ `
+ Tests that in the different task scope, imported GPUExternalTexture
+ again on the same HTMLVideoElement should return active GPUExternalTexture.
+ `
+ )
+ .fn(async t => {
+ const videoElement = t.getDefaultVideoElementAndCheck();
+
+ let bindGroup: GPUBindGroup;
+ let externalTexture: GPUExternalTexture;
+ let source: HTMLVideoElement | VideoFrame;
+ await startPlayingAndWaitForVideo(videoElement, () => {
+ source = videoElement;
+ externalTexture = t.device.importExternalTexture({
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ source: source as any,
+ });
+
+ bindGroup = t.device.createBindGroup({
+ layout: t.getDefaultBindGroupLayout(),
+ entries: [{ binding: 0, resource: externalTexture }],
+ });
+
+ t.submitCommandBuffer(bindGroup, true);
+ });
+
+ await waitForNextTask(() => {
+ const mayBeTheSameExternalTexture = t.device.importExternalTexture({
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ source: source as any,
+ });
+
+ if (externalTexture === mayBeTheSameExternalTexture) {
+ // ImportExternalTexture should refresh expired GPUExternalTexture.
+ t.submitCommandBuffer(bindGroup, true);
+ } else {
+ bindGroup = t.device.createBindGroup({
+ layout: t.getDefaultBindGroupLayout(),
+ entries: [{ binding: 0, resource: externalTexture }],
+ });
+ t.submitCommandBuffer(bindGroup, true);
+ }
+ });
+ });
+
+g.test('webcodec_video_frame_close_expire_immediately')
+ .desc(
+ `
+ Tests that in the same task scope, imported GPUExternalTexture should be expired
+ immediately if webcodec VideoFrame.close() is called.
+ `
+ )
+ .fn(async t => {
+ const videoElement = t.getDefaultVideoElementAndCheck();
+
+ let bindGroup: GPUBindGroup;
+ let externalTexture: GPUExternalTexture;
+ await startPlayingAndWaitForVideo(videoElement, async () => {
+ const source = await getVideoFrameFromVideoElement(t, videoElement);
+ externalTexture = t.device.importExternalTexture({
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ source: source as any,
+ });
+
+ bindGroup = t.device.createBindGroup({
+ layout: t.getDefaultBindGroupLayout(),
+ entries: [{ binding: 0, resource: externalTexture }],
+ });
+
+ t.submitCommandBuffer(bindGroup, true);
+
+ source.close();
+
+ t.submitCommandBuffer(bindGroup, false);
+ });
+ });
+
+g.test('import_from_different_video_frame')
+ .desc(
+ `
+ Tests that imported GPUExternalTexture from different video frame should
+ return different GPUExternalTexture objects.
+ If the frames are from the same HTMLVideoElement source, GPUExternalTexture
+ with old frame should be expired and not been refreshed again.
+ `
+ )
+ .fn(async t => {
+ const videoElement = t.getDefaultVideoElementAndCheck();
+
+ let bindGroup: GPUBindGroup;
+ let externalTexture: GPUExternalTexture;
+ await startPlayingAndWaitForVideo(videoElement, () => {
+ externalTexture = t.device.importExternalTexture({
+ source: videoElement,
+ });
+
+ bindGroup = t.device.createBindGroup({
+ layout: t.getDefaultBindGroupLayout(),
+ entries: [{ binding: 0, resource: externalTexture }],
+ });
+
+ t.submitCommandBuffer(bindGroup, true);
+ });
+
+ // Update new video frame.
+ await waitForNextFrame(videoElement, () => {
+ // Import again for the new video frame.
+ const newValidExternalTexture = t.device.importExternalTexture({
+ source: videoElement,
+ });
+ assert(externalTexture !== newValidExternalTexture);
+
+ // VideoFrame is updated. GPUExternalTexture imported from old frame should be expired and
+ // cannot be refreshed again.
+ // Using the GPUExternalTexture should result in an error.
+ t.submitCommandBuffer(bindGroup, false);
+
+ // Update bindGroup with updated GPUExternalTexture should work.
+ bindGroup = t.device.createBindGroup({
+ layout: t.getDefaultBindGroupLayout(),
+ entries: [{ binding: 0, resource: newValidExternalTexture }],
+ });
+ t.submitCommandBuffer(bindGroup, true);
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/README.txt
new file mode 100644
index 0000000000..1c6c02accd
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/README.txt
@@ -0,0 +1,32 @@
+writeTexture + copyBufferToTexture + copyTextureToBuffer validation tests.
+
+Test coverage:
+* resource usages:
+ - texture_usage_must_be_valid: for GPUTextureUsage::COPY_SRC, GPUTextureUsage::COPY_DST flags.
+ - buffer_usage_must_be_valid: for GPUBufferUsage::COPY_SRC, GPUBufferUsage::COPY_DST flags.
+
+* textureCopyView:
+ - texture_must_be_valid: for valid, destroyed, error textures.
+ - sample_count_must_be_1: for sample count 1 and 4.
+ - mip_level_must_be_in_range: for various combinations of mipLevel and mipLevelCount.
+ - format: for all formats with full and non-full copies on width, height, and depth.
+ - texel_block_alignment_on_origin: for all formats and coordinates.
+
+* bufferCopyView:
+ - buffer_must_be_valid: for valid, destroyed, error buffers.
+ - bytes_per_row_alignment: for bytesPerRow to be 256-byte aligned or not, and bytesPerRow is required or not.
+
+* linear texture data:
+ - bound_on_rows_per_image: for various combinations of copyDepth (1, >1), copyHeight, rowsPerImage.
+ - offset_plus_required_bytes_in_copy_overflow
+ - required_bytes_in_copy: testing minimal data size and data size too small for various combinations of bytesPerRow, rowsPerImage, copyExtent and offset. for the copy method, bytesPerRow is computed as bytesInACompleteRow aligned to be a multiple of 256 + bytesPerRowPadding * 256.
+ - texel_block_alignment_on_rows_per_image: for all formats.
+ - offset_alignment: for all formats.
+ - bound_on_offset: for various combinations of offset and dataSize.
+
+* texture copy range:
+ - 1d_texture: copyExtent.height isn't 1, copyExtent.depthOrArrayLayers isn't 1.
+ - texel_block_alignment_on_size: for all formats and coordinates.
+ - texture_range_conditons: for all coordinate and various combinations of origin, copyExtent, textureSize and mipLevel.
+
+TODO: more test coverage for 1D and 3D textures.
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/buffer_related.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/buffer_related.spec.ts
new file mode 100644
index 0000000000..6952e37347
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/buffer_related.spec.ts
@@ -0,0 +1,226 @@
+export const description = `Validation tests for buffer related parameters for buffer <-> texture copies`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { kTextureDimensions } from '../../../capability_info.js';
+import { GPUConst } from '../../../constants.js';
+import {
+ kSizedTextureFormats,
+ kTextureFormatInfo,
+ textureDimensionAndFormatCompatible,
+} from '../../../format_info.js';
+import { kResourceStates } from '../../../gpu_test.js';
+import { kImageCopyTypes } from '../../../util/texture/layout.js';
+
+import { ImageCopyTest, formatCopyableWithMethod } from './image_copy.js';
+
+export const g = makeTestGroup(ImageCopyTest);
+
+g.test('buffer_state')
+ .desc(
+ `
+Test that the buffer must be valid and not destroyed.
+- for all buffer <-> texture copy methods
+- for various buffer states
+`
+ )
+ .params(u =>
+ u //
+ // B2B copy validations are at api,validation,encoding,cmds,copyBufferToBuffer.spec.ts
+ .combine('method', ['CopyB2T', 'CopyT2B'] as const)
+ .combine('state', kResourceStates)
+ )
+ .fn(t => {
+ const { method, state } = t.params;
+
+ // A valid buffer.
+ const buffer = t.createBufferWithState(state, {
+ size: 16,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+
+ // Invalid buffer will fail finish, and destroyed buffer will fail submit
+ const submit = state !== 'invalid';
+ const success = state === 'valid';
+
+ const texture = t.device.createTexture({
+ size: { width: 2, height: 2, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ t.testBuffer(
+ buffer,
+ texture,
+ { bytesPerRow: 0 },
+ { width: 0, height: 0, depthOrArrayLayers: 0 },
+ { dataSize: 16, method, success, submit }
+ );
+ });
+
+g.test('buffer,device_mismatch')
+ .desc('Tests the image copies cannot be called with a buffer created from another device')
+ .paramsSubcasesOnly(u =>
+ u.combine('method', ['CopyB2T', 'CopyT2B'] as const).combine('mismatched', [true, false])
+ )
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { method, mismatched } = t.params;
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const buffer = sourceDevice.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+ t.trackForCleanup(buffer);
+
+ const texture = t.device.createTexture({
+ size: { width: 2, height: 2, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ const success = !mismatched;
+
+ // Expect success in both finish and submit, or validation error in finish
+ t.testBuffer(
+ buffer,
+ texture,
+ { bytesPerRow: 0 },
+ { width: 0, height: 0, depthOrArrayLayers: 0 },
+ { dataSize: 16, method, success, submit: success }
+ );
+ });
+
+g.test('usage')
+ .desc(
+ `
+Test the buffer must have the appropriate COPY_SRC/COPY_DST usage.
+TODO update such that it tests
+- for all buffer source usages
+- for all buffer destination usages
+`
+ )
+ .params(u =>
+ u
+ // B2B copy validations are at api,validation,encoding,cmds,copyBufferToBuffer.spec.ts
+ .combine('method', ['CopyB2T', 'CopyT2B'] as const)
+ .beginSubcases()
+ .combine('usage', [
+ GPUConst.BufferUsage.COPY_SRC | GPUConst.BufferUsage.UNIFORM,
+ GPUConst.BufferUsage.COPY_DST | GPUConst.BufferUsage.UNIFORM,
+ GPUConst.BufferUsage.COPY_SRC | GPUConst.BufferUsage.COPY_DST,
+ ])
+ )
+ .fn(t => {
+ const { method, usage } = t.params;
+
+ const buffer = t.device.createBuffer({
+ size: 16,
+ usage,
+ });
+
+ const success =
+ method === 'CopyB2T'
+ ? (usage & GPUBufferUsage.COPY_SRC) !== 0
+ : (usage & GPUBufferUsage.COPY_DST) !== 0;
+
+ const texture = t.device.createTexture({
+ size: { width: 2, height: 2, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ // Expect success in both finish and submit, or validation error in finish
+ t.testBuffer(
+ buffer,
+ texture,
+ { bytesPerRow: 0 },
+ { width: 0, height: 0, depthOrArrayLayers: 0 },
+ { dataSize: 16, method, success, submit: success }
+ );
+ });
+
+g.test('bytes_per_row_alignment')
+ .desc(
+ `
+Test that bytesPerRow must be a multiple of 256 for CopyB2T and CopyT2B if it is required.
+- for all copy methods between linear data and textures
+- for all texture dimensions
+- for all sized formats.
+- for various bytesPerRow aligned to 256 or not
+- for various number of blocks rows copied
+`
+ )
+ .params(u =>
+ u //
+ .combine('method', kImageCopyTypes)
+ .combine('format', kSizedTextureFormats)
+ .filter(formatCopyableWithMethod)
+ .combine('dimension', kTextureDimensions)
+ .filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
+ .beginSubcases()
+ .combine('bytesPerRow', [undefined, 0, 1, 255, 256, 257, 512])
+ .combine('copyHeightInBlocks', [0, 1, 2, 3])
+ .expand('_textureHeightInBlocks', p => [
+ p.copyHeightInBlocks === 0 ? 1 : p.copyHeightInBlocks,
+ ])
+ .unless(p => p.dimension === '1d' && p.copyHeightInBlocks > 1)
+ // Depth/stencil format copies must copy the whole subresource.
+ .unless(p => {
+ const info = kTextureFormatInfo[p.format];
+ return (
+ (!!info.depth || !!info.stencil) && p.copyHeightInBlocks !== p._textureHeightInBlocks
+ );
+ })
+ // bytesPerRow must be specified and it must be equal or greater than the bytes size of each row if we are copying multiple rows.
+ // Note that we are copying one single block on each row in this test.
+ .filter(
+ ({ format, bytesPerRow, copyHeightInBlocks }) =>
+ (bytesPerRow === undefined && copyHeightInBlocks <= 1) ||
+ (bytesPerRow !== undefined && bytesPerRow >= kTextureFormatInfo[format].bytesPerBlock)
+ )
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format];
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { method, dimension, format, bytesPerRow, copyHeightInBlocks, _textureHeightInBlocks } =
+ t.params;
+
+ const info = kTextureFormatInfo[format];
+
+ const buffer = t.device.createBuffer({
+ size: 512 * 8 * 16,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+
+ let success = false;
+ // writeTexture doesn't require bytesPerRow to be 256-byte aligned.
+ if (method === 'WriteTexture') success = true;
+ // If the copy height <= 1, bytesPerRow is not required.
+ if (copyHeightInBlocks <= 1 && bytesPerRow === undefined) success = true;
+ // If bytesPerRow > 0 and it is a multiple of 256, it will succeed if other parameters are valid.
+ if (bytesPerRow !== undefined && bytesPerRow > 0 && bytesPerRow % 256 === 0) success = true;
+
+ const size = [info.blockWidth, _textureHeightInBlocks * info.blockHeight, 1];
+ const texture = t.device.createTexture({
+ size,
+ dimension,
+ format,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ const copySize = [info.blockWidth, copyHeightInBlocks * info.blockHeight, 1];
+
+ // Expect success in both finish and submit, or validation error in finish
+ t.testBuffer(buffer, texture, { bytesPerRow }, copySize, {
+ dataSize: 512 * 8 * 16,
+ method,
+ success,
+ submit: success,
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/buffer_texture_copies.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/buffer_texture_copies.spec.ts
new file mode 100644
index 0000000000..847555298d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/buffer_texture_copies.spec.ts
@@ -0,0 +1,453 @@
+export const description = `
+copyTextureToBuffer and copyBufferToTexture validation tests not covered by
+the general image_copy tests, or by destroyed,*.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { assert, unreachable } from '../../../../common/util/util.js';
+import { kBufferUsages, kTextureUsages } from '../../../capability_info.js';
+import { GPUConst } from '../../../constants.js';
+import {
+ kDepthStencilFormats,
+ depthStencilBufferTextureCopySupported,
+ depthStencilFormatAspectSize,
+} from '../../../format_info.js';
+import { align } from '../../../util/math.js';
+import { kBufferCopyAlignment, kBytesPerRowAlignment } from '../../../util/texture/layout.js';
+import { ValidationTest } from '../validation_test.js';
+
+class ImageCopyTest extends ValidationTest {
+ testCopyBufferToTexture(
+ source: GPUImageCopyBuffer,
+ destination: GPUImageCopyTexture,
+ copySize: GPUExtent3DStrict,
+ isSuccess: boolean
+ ): void {
+ const { encoder, validateFinishAndSubmit } = this.createEncoder('non-pass');
+ encoder.copyBufferToTexture(source, destination, copySize);
+ validateFinishAndSubmit(isSuccess, true);
+ }
+
+ testCopyTextureToBuffer(
+ source: GPUImageCopyTexture,
+ destination: GPUImageCopyBuffer,
+ copySize: GPUExtent3DStrict,
+ isSuccess: boolean
+ ): void {
+ const { encoder, validateFinishAndSubmit } = this.createEncoder('non-pass');
+ encoder.copyTextureToBuffer(source, destination, copySize);
+ validateFinishAndSubmit(isSuccess, true);
+ }
+
+ testWriteTexture(
+ destination: GPUImageCopyTexture,
+ uploadData: Uint8Array,
+ dataLayout: GPUImageDataLayout,
+ copySize: GPUExtent3DStrict,
+ isSuccess: boolean
+ ): void {
+ this.expectGPUError(
+ 'validation',
+ () => this.queue.writeTexture(destination, uploadData, dataLayout, copySize),
+ !isSuccess
+ );
+ }
+}
+
+export const g = makeTestGroup(ImageCopyTest);
+
+g.test('depth_stencil_format,copy_usage_and_aspect')
+ .desc(
+ `
+ Validate the combination of usage and aspect of each depth stencil format in copyBufferToTexture,
+ copyTextureToBuffer and writeTexture. See https://gpuweb.github.io/gpuweb/#depth-formats for more
+ details.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('format', kDepthStencilFormats)
+ .beginSubcases()
+ .combine('aspect', ['all', 'depth-only', 'stencil-only'] as const)
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ t.selectDeviceForTextureFormatOrSkipTestCase(format);
+ })
+ .fn(t => {
+ const { format, aspect } = t.params;
+
+ const textureSize = { width: 1, height: 1, depthOrArrayLayers: 1 };
+ const texture = t.device.createTexture({
+ size: textureSize,
+ format,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ const uploadBufferSize = 32;
+ const buffer = t.device.createBuffer({
+ size: uploadBufferSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+
+ {
+ const success = depthStencilBufferTextureCopySupported('CopyB2T', format, aspect);
+ t.testCopyBufferToTexture({ buffer }, { texture, aspect }, textureSize, success);
+ }
+
+ {
+ const success = depthStencilBufferTextureCopySupported('CopyT2B', format, aspect);
+ t.testCopyTextureToBuffer({ texture, aspect }, { buffer }, textureSize, success);
+ }
+
+ {
+ const success = depthStencilBufferTextureCopySupported('WriteTexture', format, aspect);
+ const uploadData = new Uint8Array(uploadBufferSize);
+ t.testWriteTexture({ texture, aspect }, uploadData, {}, textureSize, success);
+ }
+ });
+
+g.test('depth_stencil_format,copy_buffer_size')
+ .desc(
+ `
+ Validate the minimum buffer size for each depth stencil format in copyBufferToTexture,
+ copyTextureToBuffer and writeTexture.
+
+ Given a depth stencil format, a copy aspect ('depth-only' or 'stencil-only'), the copy method
+ (buffer-to-texture or texture-to-buffer) and the copy size, validate
+ - if the copy can be successfully executed with the minimum required buffer size.
+ - if the copy fails with a validation error when the buffer size is less than the minimum
+ required buffer size.
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kDepthStencilFormats)
+ .combine('aspect', ['depth-only', 'stencil-only'] as const)
+ .combine('copyType', ['CopyB2T', 'CopyT2B', 'WriteTexture'] as const)
+ .filter(param =>
+ depthStencilBufferTextureCopySupported(param.copyType, param.format, param.aspect)
+ )
+ .beginSubcases()
+ .combine('copySize', [
+ { width: 8, height: 1, depthOrArrayLayers: 1 },
+ { width: 4, height: 4, depthOrArrayLayers: 1 },
+ { width: 4, height: 4, depthOrArrayLayers: 3 },
+ ])
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ t.selectDeviceForTextureFormatOrSkipTestCase(format);
+ })
+ .fn(t => {
+ const { format, aspect, copyType, copySize } = t.params;
+
+ const texture = t.device.createTexture({
+ size: copySize,
+ format,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ const texelAspectSize = depthStencilFormatAspectSize(format, aspect);
+ assert(texelAspectSize > 0);
+
+ const bytesPerRowAlignment = copyType === 'WriteTexture' ? 1 : kBytesPerRowAlignment;
+ const bytesPerRow = align(texelAspectSize * copySize.width, bytesPerRowAlignment);
+ const rowsPerImage = copySize.height;
+ const minimumBufferSize =
+ bytesPerRow * (rowsPerImage * copySize.depthOrArrayLayers - 1) +
+ align(texelAspectSize * copySize.width, kBufferCopyAlignment);
+ assert(minimumBufferSize > kBufferCopyAlignment);
+
+ const bigEnoughBuffer = t.device.createBuffer({
+ size: minimumBufferSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+ const smallerBuffer = t.device.createBuffer({
+ size: minimumBufferSize - kBufferCopyAlignment,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+
+ if (copyType === 'CopyB2T') {
+ t.testCopyBufferToTexture(
+ { buffer: bigEnoughBuffer, bytesPerRow, rowsPerImage },
+ { texture, aspect },
+ copySize,
+ true
+ );
+ t.testCopyBufferToTexture(
+ { buffer: smallerBuffer, bytesPerRow, rowsPerImage },
+ { texture, aspect },
+ copySize,
+ false
+ );
+ } else if (copyType === 'CopyT2B') {
+ t.testCopyTextureToBuffer(
+ { texture, aspect },
+ { buffer: bigEnoughBuffer, bytesPerRow, rowsPerImage },
+ copySize,
+ true
+ );
+ t.testCopyTextureToBuffer(
+ { texture, aspect },
+ { buffer: smallerBuffer, bytesPerRow, rowsPerImage },
+ copySize,
+ false
+ );
+ } else if (copyType === 'WriteTexture') {
+ const enoughUploadData = new Uint8Array(minimumBufferSize);
+ const smallerUploadData = new Uint8Array(minimumBufferSize - kBufferCopyAlignment);
+ t.testWriteTexture(
+ { texture, aspect },
+ enoughUploadData,
+ {
+ bytesPerRow,
+ rowsPerImage,
+ },
+ copySize,
+ true
+ );
+
+ t.testWriteTexture(
+ { texture, aspect },
+ smallerUploadData,
+ {
+ bytesPerRow,
+ rowsPerImage,
+ },
+ copySize,
+ false
+ );
+ } else {
+ unreachable();
+ }
+ });
+
+g.test('depth_stencil_format,copy_buffer_offset')
+ .desc(
+ `
+ Validate for every depth stencil formats the buffer offset must be a multiple of 4 in
+ copyBufferToTexture() and copyTextureToBuffer(), but the offset in writeTexture() doesn't always
+ need to be a multiple of 4.
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kDepthStencilFormats)
+ .combine('aspect', ['depth-only', 'stencil-only'] as const)
+ .combine('copyType', ['CopyB2T', 'CopyT2B', 'WriteTexture'] as const)
+ .filter(param =>
+ depthStencilBufferTextureCopySupported(param.copyType, param.format, param.aspect)
+ )
+ .beginSubcases()
+ .combine('offset', [1, 2, 4, 6, 8])
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ t.selectDeviceForTextureFormatOrSkipTestCase(format);
+ })
+ .fn(t => {
+ const { format, aspect, copyType, offset } = t.params;
+
+ const textureSize = { width: 4, height: 4, depthOrArrayLayers: 1 };
+
+ const texture = t.device.createTexture({
+ size: textureSize,
+ format,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ const texelAspectSize = depthStencilFormatAspectSize(format, aspect);
+ assert(texelAspectSize > 0);
+
+ const bytesPerRowAlignment = copyType === 'WriteTexture' ? 1 : kBytesPerRowAlignment;
+ const bytesPerRow = align(texelAspectSize * textureSize.width, bytesPerRowAlignment);
+ const rowsPerImage = textureSize.height;
+ const minimumBufferSize =
+ bytesPerRow * (rowsPerImage * textureSize.depthOrArrayLayers - 1) +
+ align(texelAspectSize * textureSize.width, kBufferCopyAlignment);
+ assert(minimumBufferSize > kBufferCopyAlignment);
+
+ const buffer = t.device.createBuffer({
+ size: align(minimumBufferSize + offset, kBufferCopyAlignment),
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+
+ const isSuccess = copyType === 'WriteTexture' ? true : offset % 4 === 0;
+
+ if (copyType === 'CopyB2T') {
+ t.testCopyBufferToTexture(
+ { buffer, offset, bytesPerRow, rowsPerImage },
+ { texture, aspect },
+ textureSize,
+ isSuccess
+ );
+ } else if (copyType === 'CopyT2B') {
+ t.testCopyTextureToBuffer(
+ { texture, aspect },
+ { buffer, offset, bytesPerRow, rowsPerImage },
+ textureSize,
+ isSuccess
+ );
+ } else if (copyType === 'WriteTexture') {
+ const uploadData = new Uint8Array(minimumBufferSize + offset);
+ t.testWriteTexture(
+ { texture, aspect },
+ uploadData,
+ {
+ offset,
+ bytesPerRow,
+ rowsPerImage,
+ },
+ textureSize,
+ isSuccess
+ );
+ } else {
+ unreachable();
+ }
+ });
+
+g.test('sample_count')
+ .desc(
+ `
+ Test that the texture sample count. Check that a validation error is generated if sample count is
+ not 1.
+ `
+ )
+ .params(u =>
+ u //
+ // writeTexture is handled by writeTexture.spec.ts.
+ .combine('copyType', ['CopyB2T', 'CopyT2B'] as const)
+ .beginSubcases()
+ .combine('sampleCount', [1, 4])
+ )
+ .fn(t => {
+ const { sampleCount, copyType } = t.params;
+
+ let usage = GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST;
+ // WebGPU SPEC requires multisampled textures must have RENDER_ATTACHMENT usage.
+ if (sampleCount > 1) {
+ usage |= GPUTextureUsage.RENDER_ATTACHMENT;
+ }
+ const texture = t.device.createTexture({
+ size: { width: 16, height: 16 },
+ sampleCount,
+ format: 'bgra8unorm',
+ usage,
+ });
+
+ const uploadBufferSize = 32;
+ const buffer = t.device.createBuffer({
+ size: uploadBufferSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+
+ const textureSize = { width: 1, height: 1, depthOrArrayLayers: 1 };
+
+ const isSuccess = sampleCount === 1;
+
+ if (copyType === 'CopyB2T') {
+ t.testCopyBufferToTexture({ buffer }, { texture }, textureSize, isSuccess);
+ } else if (copyType === 'CopyT2B') {
+ t.testCopyTextureToBuffer({ texture }, { buffer }, textureSize, isSuccess);
+ }
+ });
+
+const kRequiredTextureUsage = {
+ CopyT2B: GPUConst.TextureUsage.COPY_SRC,
+ CopyB2T: GPUConst.TextureUsage.COPY_DST,
+};
+const kRequiredBufferUsage = {
+ CopyB2T: GPUConst.BufferUsage.COPY_SRC,
+ CopyT2B: GPUConst.BufferUsage.COPY_DST,
+};
+
+g.test('texture_buffer_usages')
+ .desc(
+ `
+ Tests calling copyTextureToBuffer or copyBufferToTexture with the texture and the buffer missed
+ COPY_SRC, COPY_DST usage respectively.
+ - texture and buffer {with, without} COPY_SRC and COPY_DST usage.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('copyType', ['CopyB2T', 'CopyT2B'] as const)
+ .beginSubcases()
+ .combine('textureUsage', kTextureUsages)
+ .expand('_textureUsageValid', p => [p.textureUsage === kRequiredTextureUsage[p.copyType]])
+ .combine('bufferUsage', kBufferUsages)
+ .expand('_bufferUsageValid', p => [p.bufferUsage === kRequiredBufferUsage[p.copyType]])
+ .filter(p => p._textureUsageValid || p._bufferUsageValid)
+ )
+ .fn(t => {
+ const { copyType, textureUsage, _textureUsageValid, bufferUsage, _bufferUsageValid } = t.params;
+
+ const texture = t.device.createTexture({
+ size: { width: 16, height: 16 },
+ format: 'rgba8unorm',
+ usage: textureUsage,
+ });
+
+ const uploadBufferSize = 32;
+ const buffer = t.device.createBuffer({
+ size: uploadBufferSize,
+ usage: bufferUsage,
+ });
+
+ const textureSize = { width: 1, height: 1, depthOrArrayLayers: 1 };
+
+ const isSuccess = _textureUsageValid && _bufferUsageValid;
+ if (copyType === 'CopyB2T') {
+ t.testCopyBufferToTexture({ buffer }, { texture }, textureSize, isSuccess);
+ } else if (copyType === 'CopyT2B') {
+ t.testCopyTextureToBuffer({ texture }, { buffer }, textureSize, isSuccess);
+ }
+ });
+
+g.test('device_mismatch')
+ .desc(
+ `
+ Tests copyBufferToTexture and copyTextureToBuffer cannot be called with a buffer or a texture
+ created from another device.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('copyType', ['CopyB2T', 'CopyT2B'] as const)
+ .beginSubcases()
+ .combineWithParams([
+ { bufMismatched: false, texMismatched: false }, // control case
+ { bufMismatched: true, texMismatched: false },
+ { bufMismatched: false, texMismatched: true },
+ ] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { copyType, bufMismatched, texMismatched } = t.params;
+
+ const uploadBufferSize = 32;
+ const buffer = (bufMismatched ? t.mismatchedDevice : t.device).createBuffer({
+ size: uploadBufferSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+ t.trackForCleanup(buffer);
+
+ const textureSize = { width: 1, height: 1, depthOrArrayLayers: 1 };
+ const texture = (texMismatched ? t.mismatchedDevice : t.device).createTexture({
+ size: textureSize,
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+ t.trackForCleanup(texture);
+
+ const isValid = !bufMismatched && !texMismatched;
+
+ if (copyType === 'CopyB2T') {
+ t.testCopyBufferToTexture({ buffer }, { texture }, textureSize, isValid);
+ } else if (copyType === 'CopyT2B') {
+ t.testCopyTextureToBuffer({ texture }, { buffer }, textureSize, isValid);
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/image_copy.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/image_copy.ts
new file mode 100644
index 0000000000..686a5ee1cf
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/image_copy.ts
@@ -0,0 +1,278 @@
+import {
+ depthStencilFormatCopyableAspects,
+ DepthStencilFormat,
+ SizedTextureFormat,
+ kTextureFormatInfo,
+ isCompressedTextureFormat,
+} from '../../../format_info.js';
+import { align } from '../../../util/math.js';
+import { ImageCopyType } from '../../../util/texture/layout.js';
+import { ValidationTest } from '../validation_test.js';
+
+export class ImageCopyTest extends ValidationTest {
+ testRun(
+ textureCopyView: GPUImageCopyTexture,
+ textureDataLayout: GPUImageDataLayout,
+ size: GPUExtent3D,
+ {
+ method,
+ dataSize,
+ success,
+ submit = false,
+ }: {
+ method: ImageCopyType;
+ dataSize: number;
+ success: boolean;
+ /** If submit is true, the validation error is expected to come from the submit and encoding
+ * should succeed. */
+ submit?: boolean;
+ }
+ ): void {
+ switch (method) {
+ case 'WriteTexture': {
+ const data = new Uint8Array(dataSize);
+
+ this.expectValidationError(() => {
+ this.device.queue.writeTexture(textureCopyView, data, textureDataLayout, size);
+ }, !success);
+
+ break;
+ }
+ case 'CopyB2T': {
+ const buffer = this.device.createBuffer({
+ size: dataSize,
+ usage: GPUBufferUsage.COPY_SRC,
+ });
+ this.trackForCleanup(buffer);
+
+ const encoder = this.device.createCommandEncoder();
+ encoder.copyBufferToTexture({ buffer, ...textureDataLayout }, textureCopyView, size);
+
+ if (submit) {
+ const cmd = encoder.finish();
+ this.expectValidationError(() => {
+ this.device.queue.submit([cmd]);
+ }, !success);
+ } else {
+ this.expectValidationError(() => {
+ encoder.finish();
+ }, !success);
+ }
+
+ break;
+ }
+ case 'CopyT2B': {
+ if (this.isCompatibility && isCompressedTextureFormat(textureCopyView.texture.format)) {
+ this.skip(
+ 'copyTextureToBuffer is not supported for compressed texture formats in compatibility mode.'
+ );
+ }
+ const buffer = this.device.createBuffer({
+ size: dataSize,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+ this.trackForCleanup(buffer);
+
+ const encoder = this.device.createCommandEncoder();
+ encoder.copyTextureToBuffer(textureCopyView, { buffer, ...textureDataLayout }, size);
+
+ if (submit) {
+ const cmd = encoder.finish();
+ this.expectValidationError(() => {
+ this.device.queue.submit([cmd]);
+ }, !success);
+ } else {
+ this.expectValidationError(() => {
+ encoder.finish();
+ }, !success);
+ }
+
+ break;
+ }
+ }
+ }
+
+ /**
+ * Creates a texture when all that is needed is an aligned texture given the format and desired
+ * dimensions/origin. The resultant texture guarantees that a copy with the same size and origin
+ * should be possible.
+ */
+ createAlignedTexture(
+ format: SizedTextureFormat,
+ size: Required<GPUExtent3DDict> = {
+ width: 1,
+ height: 1,
+ depthOrArrayLayers: 1,
+ },
+ origin: Required<GPUOrigin3DDict> = { x: 0, y: 0, z: 0 },
+ dimension: Required<GPUTextureDimension> = '2d'
+ ): GPUTexture {
+ const info = kTextureFormatInfo[format];
+ const alignedSize = {
+ width: align(Math.max(1, size.width + origin.x), info.blockWidth),
+ height: align(Math.max(1, size.height + origin.y), info.blockHeight),
+ depthOrArrayLayers: Math.max(1, size.depthOrArrayLayers + origin.z),
+ };
+ return this.device.createTexture({
+ size: alignedSize,
+ dimension,
+ format,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+ }
+
+ testBuffer(
+ buffer: GPUBuffer,
+ texture: GPUTexture,
+ textureDataLayout: GPUImageDataLayout,
+ size: GPUExtent3D,
+ {
+ method,
+ dataSize,
+ success,
+ submit = true,
+ }: {
+ method: ImageCopyType;
+ dataSize: number;
+ success: boolean;
+ /** If submit is true, the validation error is expected to come from the submit and encoding
+ * should succeed. */
+ submit?: boolean;
+ }
+ ): void {
+ switch (method) {
+ case 'WriteTexture': {
+ const data = new Uint8Array(dataSize);
+
+ this.expectValidationError(() => {
+ this.device.queue.writeTexture({ texture }, data, textureDataLayout, size);
+ }, !success);
+
+ break;
+ }
+ case 'CopyB2T': {
+ const { encoder, validateFinish, validateFinishAndSubmit } = this.createEncoder('non-pass');
+ encoder.copyBufferToTexture({ buffer, ...textureDataLayout }, { texture }, size);
+
+ if (submit) {
+ // validation error is expected to come from the submit and encoding should succeed
+ validateFinishAndSubmit(true, success);
+ } else {
+ // validation error is expected to come from the encoding
+ validateFinish(success);
+ }
+
+ break;
+ }
+ case 'CopyT2B': {
+ if (this.isCompatibility && isCompressedTextureFormat(texture.format)) {
+ this.skip(
+ 'copyTextureToBuffer is not supported for compressed texture formats in compatibility mode.'
+ );
+ }
+ const { encoder, validateFinish, validateFinishAndSubmit } = this.createEncoder('non-pass');
+ encoder.copyTextureToBuffer({ texture }, { buffer, ...textureDataLayout }, size);
+
+ if (submit) {
+ // validation error is expected to come from the submit and encoding should succeed
+ validateFinishAndSubmit(true, success);
+ } else {
+ // validation error is expected to come from the encoding
+ validateFinish(success);
+ }
+
+ break;
+ }
+ }
+ }
+}
+
+// For testing divisibility by a number we test all the values returned by this function:
+function valuesToTestDivisibilityBy(number: number): Iterable<number> {
+ const values = [];
+ for (let i = 0; i <= 2 * number; ++i) {
+ values.push(i);
+ }
+ values.push(3 * number);
+ return values;
+}
+
+interface WithFormat {
+ format: SizedTextureFormat;
+}
+
+interface WithFormatAndCoordinate extends WithFormat {
+ coordinateToTest: keyof GPUOrigin3DDict | keyof GPUExtent3DDict;
+}
+
+interface WithFormatAndMethod extends WithFormat {
+ method: ImageCopyType;
+}
+
+// This is a helper function used for expanding test parameters for offset alignment, by spec
+export function texelBlockAlignmentTestExpanderForOffset({ format }: WithFormat) {
+ const info = kTextureFormatInfo[format];
+ if (info.depth || info.stencil) {
+ return valuesToTestDivisibilityBy(4);
+ }
+
+ return valuesToTestDivisibilityBy(kTextureFormatInfo[format].bytesPerBlock);
+}
+
+// This is a helper function used for expanding test parameters for texel block alignment tests on rowsPerImage
+export function texelBlockAlignmentTestExpanderForRowsPerImage({ format }: WithFormat) {
+ return valuesToTestDivisibilityBy(kTextureFormatInfo[format].blockHeight);
+}
+
+// This is a helper function used for expanding test parameters for texel block alignment tests on origin and size
+export function texelBlockAlignmentTestExpanderForValueToCoordinate({
+ format,
+ coordinateToTest,
+}: WithFormatAndCoordinate) {
+ switch (coordinateToTest) {
+ case 'x':
+ case 'width':
+ return valuesToTestDivisibilityBy(kTextureFormatInfo[format].blockWidth);
+
+ case 'y':
+ case 'height':
+ return valuesToTestDivisibilityBy(kTextureFormatInfo[format].blockHeight);
+
+ case 'z':
+ case 'depthOrArrayLayers':
+ return valuesToTestDivisibilityBy(1);
+ }
+}
+
+// This is a helper function used for filtering test parameters
+export function formatCopyableWithMethod({ format, method }: WithFormatAndMethod): boolean {
+ const info = kTextureFormatInfo[format];
+ if (info.depth || info.stencil) {
+ const supportedAspects: readonly GPUTextureAspect[] = depthStencilFormatCopyableAspects(
+ method,
+ format as DepthStencilFormat
+ );
+ return supportedAspects.length > 0;
+ }
+ if (method === 'CopyT2B') {
+ return info.copySrc;
+ } else {
+ return info.copyDst;
+ }
+}
+
+// This is a helper function used for filtering test parameters
+export function getACopyableAspectWithMethod({
+ format,
+ method,
+}: WithFormatAndMethod): GPUTextureAspect {
+ const info = kTextureFormatInfo[format];
+ if (info.depth || info.stencil) {
+ const supportedAspects: readonly GPUTextureAspect[] = depthStencilFormatCopyableAspects(
+ method,
+ format as DepthStencilFormat
+ );
+ return supportedAspects[0];
+ }
+ return 'all' as GPUTextureAspect;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/layout_related.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/layout_related.spec.ts
new file mode 100644
index 0000000000..f8e03890e1
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/layout_related.spec.ts
@@ -0,0 +1,483 @@
+export const description = `Validation tests for the linear data layout of linear data <-> texture copies
+
+TODO check if the tests need to be updated to support aspects of depth-stencil textures`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { assert } from '../../../../common/util/util.js';
+import { kTextureDimensions } from '../../../capability_info.js';
+import {
+ kTextureFormatInfo,
+ kSizedTextureFormats,
+ textureDimensionAndFormatCompatible,
+} from '../../../format_info.js';
+import { align } from '../../../util/math.js';
+import {
+ bytesInACompleteRow,
+ dataBytesForCopyOrOverestimate,
+ dataBytesForCopyOrFail,
+ kImageCopyTypes,
+} from '../../../util/texture/layout.js';
+
+import {
+ ImageCopyTest,
+ texelBlockAlignmentTestExpanderForOffset,
+ texelBlockAlignmentTestExpanderForRowsPerImage,
+ formatCopyableWithMethod,
+} from './image_copy.js';
+
+export const g = makeTestGroup(ImageCopyTest);
+
+g.test('bound_on_rows_per_image')
+ .desc(
+ `
+Test that rowsPerImage must be at least the copy height (if defined).
+- for various copy methods
+- for all texture dimensions
+- for various values of rowsPerImage including undefined
+- for various copy heights
+- for various copy depths
+`
+ )
+ .params(u =>
+ u
+ .combine('method', kImageCopyTypes)
+ .combineWithParams([
+ { dimension: '1d', size: [4, 1, 1] },
+ { dimension: '2d', size: [4, 4, 1] },
+ { dimension: '2d', size: [4, 4, 3] },
+ { dimension: '3d', size: [4, 4, 3] },
+ ] as const)
+ .beginSubcases()
+ .combine('rowsPerImage', [undefined, 0, 1, 2, 1024])
+ .combine('copyHeightInBlocks', [0, 1, 2])
+ .combine('copyDepth', [1, 3])
+ .unless(p => p.dimension === '1d' && p.copyHeightInBlocks !== 1)
+ .unless(p => p.copyDepth > p.size[2])
+ )
+ .fn(t => {
+ const { rowsPerImage, copyHeightInBlocks, copyDepth, dimension, size, method } = t.params;
+
+ const format = 'rgba8unorm';
+ const copyHeight = copyHeightInBlocks * kTextureFormatInfo[format].blockHeight;
+
+ const texture = t.device.createTexture({
+ size,
+ dimension,
+ format,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ const layout = { bytesPerRow: 1024, rowsPerImage };
+ const copySize = { width: 0, height: copyHeight, depthOrArrayLayers: copyDepth };
+ const { minDataSizeOrOverestimate, copyValid } = dataBytesForCopyOrOverestimate({
+ layout,
+ format,
+ copySize,
+ method,
+ });
+
+ t.testRun({ texture }, layout, copySize, {
+ dataSize: minDataSizeOrOverestimate,
+ method,
+ success: copyValid,
+ });
+ });
+
+g.test('copy_end_overflows_u64')
+ .desc(
+ `
+Test an error is produced when offset+requiredBytesInCopy overflows GPUSize64.
+- for various copy methods
+`
+ )
+ .params(u =>
+ u
+ .combine('method', kImageCopyTypes)
+ .beginSubcases()
+ .combineWithParams([
+ { bytesPerRow: 2 ** 31, rowsPerImage: 2 ** 31, depthOrArrayLayers: 1, _success: true }, // success case
+ { bytesPerRow: 2 ** 31, rowsPerImage: 2 ** 31, depthOrArrayLayers: 16, _success: false }, // bytesPerRow * rowsPerImage * (depthOrArrayLayers - 1) overflows.
+ ])
+ )
+ .fn(t => {
+ const { method, bytesPerRow, rowsPerImage, depthOrArrayLayers, _success } = t.params;
+
+ const texture = t.device.createTexture({
+ size: [1, 1, depthOrArrayLayers],
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ t.testRun(
+ { texture },
+ { bytesPerRow, rowsPerImage },
+ { width: 1, height: 1, depthOrArrayLayers },
+ {
+ dataSize: 10000,
+ method,
+ success: _success,
+ }
+ );
+ });
+
+g.test('required_bytes_in_copy')
+ .desc(
+ `
+Test the computation of requiredBytesInCopy by computing the minimum data size for the copy and checking success/error at the boundary.
+- for various copy methods
+- for all formats
+- for all dimensions
+- for various extra bytesPerRow/rowsPerImage
+- for various copy sizes
+- for various offsets in the linear data
+`
+ )
+ .params(u =>
+ u
+ .combine('method', kImageCopyTypes)
+ .combine('format', kSizedTextureFormats)
+ .filter(formatCopyableWithMethod)
+ .combine('dimension', kTextureDimensions)
+ .filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
+ .beginSubcases()
+ .combineWithParams([
+ { bytesPerRowPadding: 0, rowsPerImagePaddingInBlocks: 0 }, // no padding
+ { bytesPerRowPadding: 0, rowsPerImagePaddingInBlocks: 6 }, // rowsPerImage padding
+ { bytesPerRowPadding: 6, rowsPerImagePaddingInBlocks: 0 }, // bytesPerRow padding
+ { bytesPerRowPadding: 15, rowsPerImagePaddingInBlocks: 17 }, // both paddings
+ ])
+ .combineWithParams([
+ { copyWidthInBlocks: 3, copyHeightInBlocks: 4, copyDepth: 5, _offsetMultiplier: 0 }, // standard copy
+ { copyWidthInBlocks: 5, copyHeightInBlocks: 4, copyDepth: 3, _offsetMultiplier: 11 }, // standard copy, offset > 0
+ { copyWidthInBlocks: 256, copyHeightInBlocks: 3, copyDepth: 2, _offsetMultiplier: 0 }, // copyWidth is 256-aligned
+ { copyWidthInBlocks: 0, copyHeightInBlocks: 4, copyDepth: 5, _offsetMultiplier: 0 }, // empty copy because of width
+ { copyWidthInBlocks: 3, copyHeightInBlocks: 0, copyDepth: 5, _offsetMultiplier: 0 }, // empty copy because of height
+ { copyWidthInBlocks: 3, copyHeightInBlocks: 4, copyDepth: 0, _offsetMultiplier: 13 }, // empty copy because of depth, offset > 0
+ { copyWidthInBlocks: 1, copyHeightInBlocks: 4, copyDepth: 5, _offsetMultiplier: 0 }, // copyWidth = 1
+ { copyWidthInBlocks: 3, copyHeightInBlocks: 1, copyDepth: 5, _offsetMultiplier: 15 }, // copyHeight = 1, offset > 0
+ { copyWidthInBlocks: 5, copyHeightInBlocks: 4, copyDepth: 1, _offsetMultiplier: 0 }, // copyDepth = 1
+ { copyWidthInBlocks: 7, copyHeightInBlocks: 1, copyDepth: 1, _offsetMultiplier: 0 }, // copyHeight = 1 and copyDepth = 1
+ ])
+ // The test texture size will be rounded up from the copy size to the next valid texture size.
+ // If the format is a depth/stencil format, its copy size must equal to subresource's size.
+ // So filter out depth/stencil cases where the rounded-up texture size would be different from the copy size.
+ .filter(({ format, copyWidthInBlocks, copyHeightInBlocks, copyDepth }) => {
+ const info = kTextureFormatInfo[format];
+ return (
+ (!info.depth && !info.stencil) ||
+ (copyWidthInBlocks > 0 && copyHeightInBlocks > 0 && copyDepth > 0)
+ );
+ })
+ .unless(p => p.dimension === '1d' && (p.copyHeightInBlocks > 1 || p.copyDepth > 1))
+ .expand('offset', p => {
+ const info = kTextureFormatInfo[p.format];
+ if (info.depth || info.stencil) {
+ return [p._offsetMultiplier * 4];
+ }
+ return [p._offsetMultiplier * info.color.bytes];
+ })
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format];
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const {
+ offset,
+ bytesPerRowPadding,
+ rowsPerImagePaddingInBlocks,
+ copyWidthInBlocks,
+ copyHeightInBlocks,
+ copyDepth,
+ format,
+ dimension,
+ method,
+ } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ // In the CopyB2T and CopyT2B cases we need to have bytesPerRow 256-aligned,
+ // to make this happen we align the bytesInACompleteRow value and multiply
+ // bytesPerRowPadding by 256.
+ const bytesPerRowAlignment = method === 'WriteTexture' ? 1 : 256;
+ const copyWidth = copyWidthInBlocks * info.blockWidth;
+ const copyHeight = copyHeightInBlocks * info.blockHeight;
+ const rowsPerImage = copyHeight + rowsPerImagePaddingInBlocks * info.blockHeight;
+ const bytesPerRow =
+ align(bytesInACompleteRow(copyWidth, format), bytesPerRowAlignment) +
+ bytesPerRowPadding * bytesPerRowAlignment;
+ const copySize = { width: copyWidth, height: copyHeight, depthOrArrayLayers: copyDepth };
+
+ const layout = { offset, bytesPerRow, rowsPerImage };
+ const minDataSize = dataBytesForCopyOrFail({ layout, format, copySize, method });
+
+ const texture = t.createAlignedTexture(format, copySize, undefined, dimension);
+
+ t.testRun({ texture }, layout, copySize, {
+ dataSize: minDataSize,
+ method,
+ success: true,
+ });
+
+ if (minDataSize > 0) {
+ t.testRun({ texture }, layout, copySize, {
+ dataSize: minDataSize - 1,
+ method,
+ success: false,
+ });
+ }
+ });
+
+g.test('rows_per_image_alignment')
+ .desc(
+ `
+Test that rowsPerImage has no alignment constraints.
+- for various copy methods
+- for all sized format
+- for all dimensions
+- for various rowsPerImage
+`
+ )
+ .params(u =>
+ u
+ .combine('method', kImageCopyTypes)
+ .combine('format', kSizedTextureFormats)
+ .filter(formatCopyableWithMethod)
+ .combine('dimension', kTextureDimensions)
+ .filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
+ .beginSubcases()
+ .expand('rowsPerImage', texelBlockAlignmentTestExpanderForRowsPerImage)
+ // Copy height is info.blockHeight, so rowsPerImage must be equal or greater than it.
+ .filter(({ rowsPerImage, format }) => rowsPerImage >= kTextureFormatInfo[format].blockHeight)
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format];
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { rowsPerImage, format, method } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ const size = { width: info.blockWidth, height: info.blockHeight, depthOrArrayLayers: 1 };
+ const texture = t.device.createTexture({
+ size,
+ format,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ t.testRun({ texture }, { bytesPerRow: 256, rowsPerImage }, size, {
+ dataSize: info.bytesPerBlock,
+ method,
+ success: true,
+ });
+ });
+
+g.test('offset_alignment')
+ .desc(
+ `
+Test the alignment requirement on the linear data offset (block size, or 4 for depth-stencil).
+- for various copy methods
+- for all sized formats
+- for all dimensions
+- for various linear data offsets
+`
+ )
+ .params(u =>
+ u
+ .combine('method', kImageCopyTypes)
+ .combine('format', kSizedTextureFormats)
+ .filter(formatCopyableWithMethod)
+ .combine('dimension', kTextureDimensions)
+ .filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
+ .beginSubcases()
+ .expand('offset', texelBlockAlignmentTestExpanderForOffset)
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format];
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { format, offset, method } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ const size = { width: info.blockWidth, height: info.blockHeight, depthOrArrayLayers: 1 };
+ const texture = t.device.createTexture({
+ size,
+ format,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ let success = false;
+ if (method === 'WriteTexture') success = true;
+ if (info.depth || info.stencil) {
+ if (offset % 4 === 0) success = true;
+ } else {
+ if (offset % info.color.bytes === 0) success = true;
+ }
+
+ t.testRun({ texture }, { offset, bytesPerRow: 256 }, size, {
+ dataSize: offset + info.bytesPerBlock,
+ method,
+ success,
+ });
+ });
+
+g.test('bound_on_bytes_per_row')
+ .desc(
+ `
+Test that bytesPerRow, if specified must be big enough for a full copy row.
+- for various copy methods
+- for all sized formats
+- for all dimension
+- for various copy heights
+- for various copy depths
+- for various combinations of bytesPerRow and copy width.
+`
+ )
+ .params(u =>
+ u
+ .combine('method', kImageCopyTypes)
+ .combine('format', kSizedTextureFormats)
+ .filter(formatCopyableWithMethod)
+ .combine('dimension', kTextureDimensions)
+ .filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
+ .beginSubcases()
+ .combine('copyHeightInBlocks', [1, 2])
+ .combine('copyDepth', [1, 2])
+ .unless(p => p.dimension === '1d' && (p.copyHeightInBlocks > 1 || p.copyDepth > 1))
+ .expandWithParams(p => {
+ const info = kTextureFormatInfo[p.format];
+ // We currently have a built-in assumption that for all formats, 128 % bytesPerBlock === 0.
+ // This assumption ensures that all division below results in integers.
+ assert(128 % info.bytesPerBlock === 0);
+ return [
+ // Copying exact fit with aligned bytesPerRow should work.
+ {
+ bytesPerRow: 256,
+ widthInBlocks: 256 / info.bytesPerBlock,
+ copyWidthInBlocks: 256 / info.bytesPerBlock,
+ _success: true,
+ },
+ // Copying into smaller texture when padding in bytesPerRow is enough should work unless
+ // it is a depth/stencil typed format.
+ {
+ bytesPerRow: 256,
+ widthInBlocks: 256 / info.bytesPerBlock,
+ copyWidthInBlocks: 256 / info.bytesPerBlock - 1,
+ _success: !(info.stencil || info.depth),
+ },
+ // Unaligned bytesPerRow should not work unless the method is 'WriteTexture'.
+ {
+ bytesPerRow: 128,
+ widthInBlocks: 128 / info.bytesPerBlock,
+ copyWidthInBlocks: 128 / info.bytesPerBlock,
+ _success: p.method === 'WriteTexture',
+ },
+ {
+ bytesPerRow: 384,
+ widthInBlocks: 384 / info.bytesPerBlock,
+ copyWidthInBlocks: 384 / info.bytesPerBlock,
+ _success: p.method === 'WriteTexture',
+ },
+ // When bytesPerRow is smaller than bytesInLastRow copying should fail.
+ {
+ bytesPerRow: 256,
+ widthInBlocks: (2 * 256) / info.bytesPerBlock,
+ copyWidthInBlocks: (2 * 256) / info.bytesPerBlock,
+ _success: false,
+ },
+ // When copyHeightInBlocks > 1, bytesPerRow must be specified.
+ {
+ bytesPerRow: undefined,
+ widthInBlocks: 256 / info.bytesPerBlock,
+ copyWidthInBlocks: 256 / info.bytesPerBlock,
+ _success: !(p.copyHeightInBlocks > 1 || p.copyDepth > 1),
+ },
+ ];
+ })
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format];
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const {
+ method,
+ format,
+ bytesPerRow,
+ widthInBlocks,
+ copyWidthInBlocks,
+ copyHeightInBlocks,
+ copyDepth,
+ _success,
+ } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ // We create an aligned texture using the widthInBlocks which may be different from the
+ // copyWidthInBlocks. This allows us to test scenarios where the two may be different.
+ const texture = t.createAlignedTexture(format, {
+ width: widthInBlocks * info.blockWidth,
+ height: copyHeightInBlocks * info.blockHeight,
+ depthOrArrayLayers: copyDepth,
+ });
+
+ const layout = { bytesPerRow, rowsPerImage: copyHeightInBlocks };
+ const copySize = {
+ width: copyWidthInBlocks * info.blockWidth,
+ height: copyHeightInBlocks * info.blockHeight,
+ depthOrArrayLayers: copyDepth,
+ };
+ const { minDataSizeOrOverestimate } = dataBytesForCopyOrOverestimate({
+ layout,
+ format,
+ copySize,
+ method,
+ });
+
+ t.testRun({ texture }, layout, copySize, {
+ dataSize: minDataSizeOrOverestimate,
+ method,
+ success: _success,
+ });
+ });
+
+g.test('bound_on_offset')
+ .desc(
+ `
+Test that the offset cannot be larger than the linear data size (even for an empty copy).
+- for various offsets and data sizes
+`
+ )
+ .params(u =>
+ u
+ .combine('method', kImageCopyTypes)
+ .beginSubcases()
+ .combine('offsetInBlocks', [0, 1, 2])
+ .combine('dataSizeInBlocks', [0, 1, 2])
+ )
+ .fn(t => {
+ const { offsetInBlocks, dataSizeInBlocks, method } = t.params;
+
+ const format = 'rgba8unorm';
+ const info = kTextureFormatInfo[format];
+ const offset = offsetInBlocks * info.color.bytes;
+ const dataSize = dataSizeInBlocks * info.color.bytes;
+
+ const texture = t.device.createTexture({
+ size: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ format,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ const success = offset <= dataSize;
+
+ t.testRun(
+ { texture },
+ { offset, bytesPerRow: 0 },
+ { width: 0, height: 0, depthOrArrayLayers: 0 },
+ { dataSize, method, success }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/texture_related.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/texture_related.spec.ts
new file mode 100644
index 0000000000..a0fe38e8e3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/image_copy/texture_related.spec.ts
@@ -0,0 +1,534 @@
+export const description = `Texture related validation tests for B2T copy and T2B copy and writeTexture.`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { assert } from '../../../../common/util/util.js';
+import { kTextureDimensions, kTextureUsages } from '../../../capability_info.js';
+import { GPUConst } from '../../../constants.js';
+import {
+ kColorTextureFormats,
+ kSizedTextureFormats,
+ kTextureFormatInfo,
+ textureDimensionAndFormatCompatible,
+} from '../../../format_info.js';
+import { kResourceStates } from '../../../gpu_test.js';
+import { align } from '../../../util/math.js';
+import { virtualMipSize } from '../../../util/texture/base.js';
+import { kImageCopyTypes } from '../../../util/texture/layout.js';
+
+import {
+ ImageCopyTest,
+ texelBlockAlignmentTestExpanderForValueToCoordinate,
+ formatCopyableWithMethod,
+ getACopyableAspectWithMethod,
+} from './image_copy.js';
+
+export const g = makeTestGroup(ImageCopyTest);
+
+g.test('valid')
+ .desc(
+ `
+Test that the texture must be valid and not destroyed.
+- for all copy methods
+- for all texture states
+- for various dimensions
+`
+ )
+ .params(u =>
+ u //
+ .combine('method', kImageCopyTypes)
+ .combine('textureState', kResourceStates)
+ .combineWithParams([
+ { dimension: '1d', size: [4, 1, 1] },
+ { dimension: '2d', size: [4, 4, 1] },
+ { dimension: '2d', size: [4, 4, 3] },
+ { dimension: '3d', size: [4, 4, 3] },
+ ] as const)
+ )
+ .fn(t => {
+ const { method, textureState, size, dimension } = t.params;
+
+ const texture = t.createTextureWithState(textureState, {
+ size,
+ dimension,
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ const success = textureState === 'valid';
+ const submit = textureState !== 'invalid';
+
+ t.testRun(
+ { texture },
+ { bytesPerRow: 0 },
+ { width: 0, height: 0, depthOrArrayLayers: 0 },
+ { dataSize: 1, method, success, submit }
+ );
+ });
+
+g.test('texture,device_mismatch')
+ .desc('Tests the image copies cannot be called with a texture created from another device')
+ .paramsSubcasesOnly(u =>
+ u.combine('method', kImageCopyTypes).combine('mismatched', [true, false])
+ )
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { method, mismatched } = t.params;
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const texture = sourceDevice.createTexture({
+ size: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ t.testRun(
+ { texture },
+ { bytesPerRow: 0 },
+ { width: 0, height: 0, depthOrArrayLayers: 0 },
+ { dataSize: 1, method, success: !mismatched }
+ );
+ });
+
+g.test('usage')
+ .desc(
+ `
+The texture must have the appropriate COPY_SRC/COPY_DST usage.
+- for various copy methods
+- for various dimensions
+- for various usages
+`
+ )
+ .params(u =>
+ u
+ .combine('method', kImageCopyTypes)
+ .combineWithParams([
+ { dimension: '1d', size: [4, 1, 1] },
+ { dimension: '2d', size: [4, 4, 1] },
+ { dimension: '2d', size: [4, 4, 3] },
+ { dimension: '3d', size: [4, 4, 3] },
+ ] as const)
+ .beginSubcases()
+ // If usage0 and usage1 are the same, the usage being test is a single usage. Otherwise, it's
+ // a combined usage.
+ .combine('usage0', kTextureUsages)
+ .combine('usage1', kTextureUsages)
+ // RENDER_ATTACHMENT is not valid with 1d and 3d textures.
+ .unless(
+ ({ usage0, usage1, dimension }) =>
+ ((usage0 | usage1) & GPUConst.TextureUsage.RENDER_ATTACHMENT) !== 0 &&
+ (dimension === '1d' || dimension === '3d')
+ )
+ )
+ .fn(t => {
+ const { usage0, usage1, method, size, dimension } = t.params;
+
+ const usage = usage0 | usage1;
+ const texture = t.device.createTexture({
+ size,
+ dimension,
+ format: 'rgba8unorm',
+ usage,
+ });
+
+ const success =
+ method === 'CopyT2B'
+ ? (usage & GPUTextureUsage.COPY_SRC) !== 0
+ : (usage & GPUTextureUsage.COPY_DST) !== 0;
+
+ t.testRun(
+ { texture },
+ { bytesPerRow: 0 },
+ { width: 0, height: 0, depthOrArrayLayers: 0 },
+ { dataSize: 1, method, success }
+ );
+ });
+
+g.test('sample_count')
+ .desc(
+ `
+Test that multisampled textures cannot be copied.
+- for various copy methods
+- multisampled or not
+
+Note: we don't test 1D, 2D array and 3D textures because multisample is not supported them.
+`
+ )
+ .params(u =>
+ u //
+ .combine('method', kImageCopyTypes)
+ .beginSubcases()
+ .combine('sampleCount', [1, 4])
+ )
+ .fn(t => {
+ const { sampleCount, method } = t.params;
+
+ const texture = t.device.createTexture({
+ size: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ sampleCount,
+ format: 'rgba8unorm',
+ usage:
+ GPUTextureUsage.COPY_SRC |
+ GPUTextureUsage.COPY_DST |
+ GPUTextureUsage.TEXTURE_BINDING |
+ GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const success = sampleCount === 1;
+
+ t.testRun(
+ { texture },
+ { bytesPerRow: 0 },
+ { width: 0, height: 0, depthOrArrayLayers: 0 },
+ { dataSize: 1, method, success }
+ );
+ });
+
+g.test('mip_level')
+ .desc(
+ `
+Test that the mipLevel of the copy must be in range of the texture.
+- for various copy methods
+- for various dimensions
+- for several mipLevelCounts
+- for several target/source mipLevels`
+ )
+ .params(u =>
+ u
+ .combine('method', kImageCopyTypes)
+ .combineWithParams([
+ { dimension: '1d', size: [32, 1, 1] },
+ { dimension: '2d', size: [32, 32, 1] },
+ { dimension: '2d', size: [32, 32, 3] },
+ { dimension: '3d', size: [32, 32, 3] },
+ ] as const)
+ .beginSubcases()
+ .combine('mipLevelCount', [1, 3, 5])
+ .unless(p => p.dimension === '1d' && p.mipLevelCount !== 1)
+ .combine('mipLevel', [0, 1, 3, 4])
+ )
+ .fn(t => {
+ const { mipLevelCount, mipLevel, method, size, dimension } = t.params;
+
+ const texture = t.device.createTexture({
+ size,
+ dimension,
+ mipLevelCount,
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ const success = mipLevel < mipLevelCount;
+
+ t.testRun(
+ { texture, mipLevel },
+ { bytesPerRow: 0 },
+ { width: 0, height: 0, depthOrArrayLayers: 0 },
+ { dataSize: 1, method, success }
+ );
+ });
+
+g.test('format')
+ .desc(
+ `
+Test the copy must be a full subresource if the texture's format is depth/stencil format.
+- for various copy methods
+- for various dimensions
+- for all sized formats
+- for a couple target/source mipLevels
+- for some modifier (or not) for the full copy size
+`
+ )
+ .params(u =>
+ u //
+ .combine('method', kImageCopyTypes)
+ .combineWithParams([
+ { depthOrArrayLayers: 1, dimension: '1d' },
+ { depthOrArrayLayers: 1, dimension: '2d' },
+ { depthOrArrayLayers: 3, dimension: '2d' },
+ { depthOrArrayLayers: 32, dimension: '3d' },
+ ] as const)
+ .combine('format', kSizedTextureFormats)
+ .filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
+ .filter(formatCopyableWithMethod)
+ .beginSubcases()
+ .combine('mipLevel', [0, 2])
+ .unless(p => p.dimension === '1d' && p.mipLevel !== 0)
+ .combine('copyWidthModifier', [0, -1])
+ .combine('copyHeightModifier', [0, -1])
+ // If the texture has multiple depth/array slices and it is not a 3D texture, which means it is an array texture,
+ // depthModifier is not needed upon the third dimension. Because different layers are different subresources in
+ // an array texture. Whether it is a full copy or non-full copy doesn't make sense across different subresources.
+ // However, different depth slices on the same mip level are within the same subresource for a 3d texture. So we
+ // need to examine depth dimension via copyDepthModifier to determine whether it is a full copy for a 3D texture.
+ .expand('copyDepthModifier', ({ dimension: d }) => (d === '3d' ? [0, -1] : [0]))
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format];
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const {
+ method,
+ depthOrArrayLayers,
+ dimension,
+ format,
+ mipLevel,
+ copyWidthModifier,
+ copyHeightModifier,
+ copyDepthModifier,
+ } = t.params;
+
+ const info = kTextureFormatInfo[format];
+ const size = { width: 32 * info.blockWidth, height: 32 * info.blockHeight, depthOrArrayLayers };
+ if (dimension === '1d') {
+ size.height = 1;
+ }
+
+ const texture = t.device.createTexture({
+ size,
+ dimension,
+ format,
+ mipLevelCount: dimension === '1d' ? 1 : 5,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ let success = true;
+ if (
+ (info.depth || info.stencil) &&
+ (copyWidthModifier !== 0 || copyHeightModifier !== 0 || copyDepthModifier !== 0)
+ ) {
+ success = false;
+ }
+
+ const levelSize = virtualMipSize(
+ dimension,
+ [size.width, size.height, size.depthOrArrayLayers],
+ mipLevel
+ );
+ const copySize = [
+ levelSize[0] + copyWidthModifier * info.blockWidth,
+ levelSize[1] + copyHeightModifier * info.blockHeight,
+ // Note that compressed format is not supported for 3D textures yet, so there is no info.blockDepth.
+ levelSize[2] + copyDepthModifier,
+ ];
+
+ t.testRun(
+ { texture, mipLevel, aspect: getACopyableAspectWithMethod({ format, method }) },
+ { bytesPerRow: 512, rowsPerImage: 32 },
+ copySize,
+ {
+ dataSize: 512 * 32 * 32,
+ method,
+ success,
+ }
+ );
+ });
+
+g.test('origin_alignment')
+ .desc(
+ `
+Test that the texture copy origin must be aligned to the format's block size.
+- for various copy methods
+- for all color formats (depth stencil formats require a full copy)
+- for X, Y and Z coordinates
+- for various values for that coordinate depending on the block size
+`
+ )
+ .params(u =>
+ u
+ .combine('method', kImageCopyTypes)
+ // No need to test depth/stencil formats because its copy origin must be [0, 0, 0], which is already aligned with block size.
+ .combine('format', kColorTextureFormats)
+ .filter(formatCopyableWithMethod)
+ .combineWithParams([
+ { depthOrArrayLayers: 1, dimension: '1d' },
+ { depthOrArrayLayers: 1, dimension: '2d' },
+ { depthOrArrayLayers: 3, dimension: '2d' },
+ { depthOrArrayLayers: 3, dimension: '3d' },
+ ] as const)
+ .filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
+ .beginSubcases()
+ .combine('coordinateToTest', ['x', 'y', 'z'] as const)
+ .unless(p => p.dimension === '1d' && p.coordinateToTest !== 'x')
+ .expand('valueToCoordinate', texelBlockAlignmentTestExpanderForValueToCoordinate)
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format];
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { valueToCoordinate, coordinateToTest, format, method, depthOrArrayLayers, dimension } =
+ t.params;
+ const info = kTextureFormatInfo[format];
+ const size = { width: 0, height: 0, depthOrArrayLayers };
+ const origin = { x: 0, y: 0, z: 0 };
+ let success = true;
+
+ origin[coordinateToTest] = valueToCoordinate;
+ switch (coordinateToTest) {
+ case 'x': {
+ success = origin.x % info.blockWidth === 0;
+ break;
+ }
+ case 'y': {
+ success = origin.y % info.blockHeight === 0;
+ break;
+ }
+ }
+
+ const texture = t.createAlignedTexture(format, size, origin, dimension);
+
+ t.testRun({ texture, origin }, { bytesPerRow: 0, rowsPerImage: 0 }, size, {
+ dataSize: 1,
+ method,
+ success,
+ });
+ });
+
+g.test('size_alignment')
+ .desc(
+ `
+Test that the copy size must be aligned to the texture's format's block size.
+- for various copy methods
+- for all formats (depth-stencil formats require a full copy)
+- for all texture dimensions
+- for the size's parameters to test (width / height / depth)
+- for various values for that copy size parameters, depending on the block size
+`
+ )
+ .params(u =>
+ u
+ .combine('method', kImageCopyTypes)
+ // No need to test depth/stencil formats because its copy size must be subresource's size, which is already aligned with block size.
+ .combine('format', kColorTextureFormats)
+ .filter(formatCopyableWithMethod)
+ .combine('dimension', kTextureDimensions)
+ .filter(({ dimension, format }) => textureDimensionAndFormatCompatible(dimension, format))
+ .beginSubcases()
+ .combine('coordinateToTest', ['width', 'height', 'depthOrArrayLayers'] as const)
+ .unless(p => p.dimension === '1d' && p.coordinateToTest !== 'width')
+ .expand('valueToCoordinate', texelBlockAlignmentTestExpanderForValueToCoordinate)
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format];
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { valueToCoordinate, coordinateToTest, dimension, format, method } = t.params;
+ const info = kTextureFormatInfo[format];
+ const size = { width: 0, height: 0, depthOrArrayLayers: 0 };
+ const origin = { x: 0, y: 0, z: 0 };
+ let success = true;
+
+ size[coordinateToTest] = valueToCoordinate;
+ switch (coordinateToTest) {
+ case 'width': {
+ success = size.width % info.blockWidth === 0;
+ break;
+ }
+ case 'height': {
+ success = size.height % info.blockHeight === 0;
+ break;
+ }
+ }
+
+ const texture = t.createAlignedTexture(format, size, origin, dimension);
+
+ const bytesPerRow = align(
+ Math.max(1, Math.ceil(size.width / info.blockWidth)) * info.bytesPerBlock,
+ 256
+ );
+ const rowsPerImage = Math.ceil(size.height / info.blockHeight);
+ t.testRun({ texture, origin }, { bytesPerRow, rowsPerImage }, size, {
+ dataSize: 1,
+ method,
+ success,
+ });
+ });
+
+g.test('copy_rectangle')
+ .desc(
+ `
+Test that the max corner of the copy rectangle (origin+copySize) must be inside the texture.
+- for various copy methods
+- for all dimensions
+- for the X, Y and Z dimensions
+- for various origin and copy size values (and texture sizes)
+- for various mip levels
+`
+ )
+ .params(u =>
+ u
+ .combine('method', kImageCopyTypes)
+ .combine('dimension', kTextureDimensions)
+ .beginSubcases()
+ .combine('originValue', [7, 8])
+ .combine('copySizeValue', [7, 8])
+ .combine('textureSizeValue', [14, 15])
+ .combine('mipLevel', [0, 2])
+ .combine('coordinateToTest', [0, 1, 2] as const)
+ .unless(p => p.dimension === '1d' && (p.coordinateToTest !== 0 || p.mipLevel !== 0))
+ )
+ .fn(t => {
+ const {
+ originValue,
+ copySizeValue,
+ textureSizeValue,
+ mipLevel,
+ coordinateToTest,
+ method,
+ dimension,
+ } = t.params;
+ const format = 'rgba8unorm';
+ const info = kTextureFormatInfo[format];
+
+ const origin = [0, 0, 0];
+ const copySize = [0, 0, 0];
+ const textureSize = { width: 16 << mipLevel, height: 16 << mipLevel, depthOrArrayLayers: 16 };
+ if (dimension === '1d') {
+ textureSize.height = 1;
+ textureSize.depthOrArrayLayers = 1;
+ }
+ const success = originValue + copySizeValue <= textureSizeValue;
+
+ origin[coordinateToTest] = originValue;
+ copySize[coordinateToTest] = copySizeValue;
+ switch (coordinateToTest) {
+ case 0: {
+ textureSize.width = textureSizeValue << mipLevel;
+ break;
+ }
+ case 1: {
+ textureSize.height = textureSizeValue << mipLevel;
+ break;
+ }
+ case 2: {
+ textureSize.depthOrArrayLayers =
+ dimension === '3d' ? textureSizeValue << mipLevel : textureSizeValue;
+ break;
+ }
+ }
+
+ const texture = t.device.createTexture({
+ size: textureSize,
+ dimension,
+ mipLevelCount: dimension === '1d' ? 1 : 3,
+ format,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ });
+
+ assert(copySize[0] % info.blockWidth === 0);
+ const bytesPerRow = align(copySize[0] / info.blockWidth, 256);
+ assert(copySize[1] % info.blockHeight === 0);
+ const rowsPerImage = copySize[1] / info.blockHeight;
+ t.testRun({ texture, origin, mipLevel }, { bytesPerRow, rowsPerImage }, copySize, {
+ dataSize: 1,
+ method,
+ success,
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/layout_shader_compat.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/layout_shader_compat.spec.ts
new file mode 100644
index 0000000000..986fc42296
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/layout_shader_compat.spec.ts
@@ -0,0 +1,14 @@
+export const description = `
+TODO:
+- interface matching between pipeline layout and shader
+ - x= {compute, vertex, fragment, vertex+fragment}, visibilities
+ - x= bind group index values, binding index values, multiple bindings
+ - x= types of bindings
+ - x= {equal, superset, subset}
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+
+import { ValidationTest } from './validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/query_set/create.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/query_set/create.spec.ts
new file mode 100644
index 0000000000..d6ca908155
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/query_set/create.spec.ts
@@ -0,0 +1,34 @@
+export const description = `
+Tests for validation in createQuerySet.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { kQueryTypes, kMaxQueryCount } from '../../../capability_info.js';
+import { ValidationTest } from '../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('count')
+ .desc(
+ `
+Tests that create query set with the count for all query types:
+- count {<, =, >} kMaxQueryCount
+- x= {occlusion, timestamp} query
+ `
+ )
+ .params(u =>
+ u
+ .combine('type', kQueryTypes)
+ .beginSubcases()
+ .combine('count', [0, kMaxQueryCount, kMaxQueryCount + 1])
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceForQueryTypeOrSkipTestCase(t.params.type);
+ })
+ .fn(t => {
+ const { type, count } = t.params;
+
+ t.expectValidationError(() => {
+ t.device.createQuerySet({ type, count });
+ }, count > kMaxQueryCount);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/query_set/destroy.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/query_set/destroy.spec.ts
new file mode 100644
index 0000000000..0a3c4fe241
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/query_set/destroy.spec.ts
@@ -0,0 +1,33 @@
+export const description = `
+Destroying a query set more than once is allowed.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ValidationTest } from '../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('twice').fn(t => {
+ const qset = t.device.createQuerySet({ type: 'occlusion', count: 1 });
+
+ qset.destroy();
+ qset.destroy();
+});
+
+g.test('invalid_queryset')
+ .desc('Test that invalid querysets may be destroyed without generating validation errors.')
+ .fn(async t => {
+ t.device.pushErrorScope('validation');
+
+ const invalidQuerySet = t.device.createQuerySet({
+ type: 'occlusion',
+ count: 4097, // 4096 is the limit
+ });
+
+ // Expect error because it's invalid.
+ const error = await t.device.popErrorScope();
+ t.expect(!!error);
+
+ // This line should not generate an error
+ invalidQuerySet.destroy();
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/README.txt
new file mode 100644
index 0000000000..a46a0e3d1c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/README.txt
@@ -0,0 +1,13 @@
+Tests for validation that occurs inside queued operations
+(submit, writeBuffer, writeTexture, copyExternalImageToTexture).
+
+BufferMapStatesToTest = {
+ mapped -> unmapped,
+ mapped at creation -> unmapped,
+ mapping pending -> unmapped,
+ pending -> mapped (await map),
+ unmapped -> pending (noawait map),
+ created mapped-at-creation,
+}
+
+Note writeTexture is tested in image_copy.
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/buffer_mapped.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/buffer_mapped.spec.ts
new file mode 100644
index 0000000000..9254ee31a0
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/buffer_mapped.spec.ts
@@ -0,0 +1,280 @@
+export const description = `
+Validation tests for the map-state of mappable buffers used in submitted command buffers.
+
+Tests every operation that has a dependency on a buffer
+ - writeBuffer
+ - copyB2B {src,dst}
+ - copyB2T
+ - copyT2B
+
+Test those operations against buffers in the following states:
+ - Unmapped
+ - In the process of mapping
+ - mapped
+ - mapped with a mapped range queried
+ - unmapped after mapping
+ - mapped at creation
+
+Also tests every order of operations combination of mapping operations and command recording
+operations to ensure the mapping state is only considered when a command buffer is submitted.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ValidationTest } from '../validation_test.js';
+
+class F extends ValidationTest {
+ async runBufferDependencyTest(usage: number, callback: Function): Promise<void> {
+ const bufferDesc = {
+ size: 8,
+ usage,
+ mappedAtCreation: false,
+ };
+
+ const mapMode = usage & GPUBufferUsage.MAP_READ ? GPUMapMode.READ : GPUMapMode.WRITE;
+
+ // Create a mappable buffer, and one that will remain unmapped for comparison.
+ const mappableBuffer = this.device.createBuffer(bufferDesc);
+ const unmappedBuffer = this.device.createBuffer(bufferDesc);
+
+ // Run the given operation before the buffer is mapped. Should succeed.
+ callback(mappableBuffer);
+
+ // Map the buffer
+ const mapPromise = mappableBuffer.mapAsync(mapMode);
+
+ // Run the given operation while the buffer is in the process of mapping. Should fail.
+ this.expectValidationError(() => {
+ callback(mappableBuffer);
+ });
+
+ // Run on a different, unmapped buffer. Should succeed.
+ callback(unmappedBuffer);
+
+ await mapPromise;
+
+ // Run the given operation when the buffer is finished mapping with no getMappedRange. Should fail.
+ this.expectValidationError(() => {
+ callback(mappableBuffer);
+ });
+
+ // Run on a different, unmapped buffer. Should succeed.
+ callback(unmappedBuffer);
+
+ // Run the given operation when the buffer is mapped with getMappedRange. Should fail.
+ mappableBuffer.getMappedRange();
+ this.expectValidationError(() => {
+ callback(mappableBuffer);
+ });
+
+ // Unmap the buffer and run the operation. Should succeed.
+ mappableBuffer.unmap();
+ callback(mappableBuffer);
+
+ // Create a buffer that's mappedAtCreation.
+ bufferDesc.mappedAtCreation = true;
+ const mappedBuffer = this.device.createBuffer(bufferDesc);
+
+ // Run the operation with the mappedAtCreation buffer. Should fail.
+ this.expectValidationError(() => {
+ callback(mappedBuffer);
+ });
+
+ // Run on a different, unmapped buffer. Should succeed.
+ callback(unmappedBuffer);
+
+ // Unmap the mappedAtCreation buffer and run the operation. Should succeed.
+ mappedBuffer.unmap();
+ callback(mappedBuffer);
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('writeBuffer')
+ .desc(`Test that an outstanding mapping will prevent writeBuffer calls.`)
+ .fn(async t => {
+ const data = new Uint32Array([42]);
+
+ await t.runBufferDependencyTest(
+ GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
+ (buffer: GPUBuffer) => {
+ t.queue.writeBuffer(buffer, 0, data);
+ }
+ );
+ });
+
+g.test('copyBufferToBuffer')
+ .desc(
+ `
+ Test that an outstanding mapping will prevent copyBufferToTexture commands from submitting,
+ both when used as the source and destination.`
+ )
+ .fn(async t => {
+ const sourceBuffer = t.device.createBuffer({
+ size: 8,
+ usage: GPUBufferUsage.COPY_SRC,
+ });
+
+ const destBuffer = t.device.createBuffer({
+ size: 8,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+
+ await t.runBufferDependencyTest(
+ GPUBufferUsage.MAP_WRITE | GPUBufferUsage.COPY_SRC,
+ (buffer: GPUBuffer) => {
+ const commandEncoder = t.device.createCommandEncoder();
+ commandEncoder.copyBufferToBuffer(buffer, 0, destBuffer, 0, 4);
+ t.queue.submit([commandEncoder.finish()]);
+ }
+ );
+
+ await t.runBufferDependencyTest(
+ GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
+ (buffer: GPUBuffer) => {
+ const commandEncoder = t.device.createCommandEncoder();
+ commandEncoder.copyBufferToBuffer(sourceBuffer, 0, buffer, 0, 4);
+ t.queue.submit([commandEncoder.finish()]);
+ }
+ );
+ });
+
+g.test('copyBufferToTexture')
+ .desc(
+ `Test that an outstanding mapping will prevent copyBufferToTexture commands from submitting.`
+ )
+ .fn(async t => {
+ const size = { width: 1, height: 1 };
+
+ const texture = t.device.createTexture({
+ size,
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_DST,
+ });
+
+ await t.runBufferDependencyTest(
+ GPUBufferUsage.MAP_WRITE | GPUBufferUsage.COPY_SRC,
+ (buffer: GPUBuffer) => {
+ const commandEncoder = t.device.createCommandEncoder();
+ commandEncoder.copyBufferToTexture({ buffer }, { texture }, size);
+ t.queue.submit([commandEncoder.finish()]);
+ }
+ );
+ });
+
+g.test('copyTextureToBuffer')
+ .desc(
+ `Test that an outstanding mapping will prevent copyTextureToBuffer commands from submitting.`
+ )
+ .fn(async t => {
+ const size = { width: 1, height: 1 };
+
+ const texture = t.device.createTexture({
+ size,
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_SRC,
+ });
+
+ await t.runBufferDependencyTest(
+ GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
+ (buffer: GPUBuffer) => {
+ const commandEncoder = t.device.createCommandEncoder();
+ commandEncoder.copyTextureToBuffer({ texture }, { buffer }, size);
+ t.queue.submit([commandEncoder.finish()]);
+ }
+ );
+ });
+
+g.test('map_command_recording_order')
+ .desc(
+ `
+Test that the order of mapping a buffer relative to when commands are recorded that use it
+ does not matter, as long as the buffer is unmapped when the commands are submitted.
+ `
+ )
+ .paramsSubcasesOnly([
+ {
+ order: ['record', 'map', 'unmap', 'finish', 'submit'],
+ mappedAtCreation: false,
+ _shouldError: false,
+ },
+ {
+ order: ['record', 'map', 'finish', 'unmap', 'submit'],
+ mappedAtCreation: false,
+ _shouldError: false,
+ },
+ {
+ order: ['record', 'finish', 'map', 'unmap', 'submit'],
+ mappedAtCreation: false,
+ _shouldError: false,
+ },
+ {
+ order: ['map', 'record', 'unmap', 'finish', 'submit'],
+ mappedAtCreation: false,
+ _shouldError: false,
+ },
+ {
+ order: ['map', 'record', 'finish', 'unmap', 'submit'],
+ mappedAtCreation: false,
+ _shouldError: false,
+ },
+ {
+ order: ['map', 'record', 'finish', 'submit', 'unmap'],
+ mappedAtCreation: false,
+ _shouldError: true,
+ },
+ {
+ order: ['record', 'map', 'finish', 'submit', 'unmap'],
+ mappedAtCreation: false,
+ _shouldError: true,
+ },
+ {
+ order: ['record', 'finish', 'map', 'submit', 'unmap'],
+ mappedAtCreation: false,
+ _shouldError: true,
+ },
+ { order: ['record', 'unmap', 'finish', 'submit'], mappedAtCreation: true, _shouldError: false },
+ { order: ['record', 'finish', 'unmap', 'submit'], mappedAtCreation: true, _shouldError: false },
+ { order: ['record', 'finish', 'submit', 'unmap'], mappedAtCreation: true, _shouldError: true },
+ ] as const)
+ .fn(async t => {
+ const { order, mappedAtCreation, _shouldError: shouldError } = t.params;
+
+ const buffer = t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.MAP_WRITE | GPUBufferUsage.COPY_SRC,
+ mappedAtCreation,
+ });
+
+ const targetBuffer = t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+
+ const commandEncoder = t.device.createCommandEncoder();
+ let commandBuffer: GPUCommandBuffer;
+
+ const steps = {
+ record: () => {
+ commandEncoder.copyBufferToBuffer(buffer, 0, targetBuffer, 0, 4);
+ },
+ map: async () => {
+ await buffer.mapAsync(GPUMapMode.WRITE);
+ },
+ unmap: () => {
+ buffer.unmap();
+ },
+ finish: () => {
+ commandBuffer = commandEncoder.finish();
+ },
+ submit: () => {
+ t.expectValidationError(() => {
+ t.queue.submit([commandBuffer]);
+ }, shouldError);
+ },
+ };
+
+ for (const op of order) {
+ await steps[op]();
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/copyToTexture/CopyExternalImageToTexture.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/copyToTexture/CopyExternalImageToTexture.spec.ts
new file mode 100644
index 0000000000..4ac240d66e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/copyToTexture/CopyExternalImageToTexture.spec.ts
@@ -0,0 +1,816 @@
+export const description = `
+copyExternalImageToTexture Validation Tests in Queue.
+Note that we don't need to add tests on the destination texture dimension as currently we require
+the destination texture should have RENDER_ATTACHMENT usage, which is only allowed to be used on 2D
+textures.
+`;
+
+import {
+ getResourcePath,
+ getCrossOriginResourcePath,
+} from '../../../../../common/framework/resources.js';
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { raceWithRejectOnTimeout, unreachable, assert } from '../../../../../common/util/util.js';
+import { kTextureUsages } from '../../../../capability_info.js';
+import {
+ kTextureFormatInfo,
+ kTextureFormats,
+ kValidTextureFormatsForCopyE2T,
+} from '../../../../format_info.js';
+import { kResourceStates } from '../../../../gpu_test.js';
+import {
+ CanvasType,
+ createCanvas,
+ createOnscreenCanvas,
+ createOffscreenCanvas,
+} from '../../../../util/create_elements.js';
+import { ValidationTest } from '../../validation_test.js';
+
+const kDefaultBytesPerPixel = 4; // using 'bgra8unorm' or 'rgba8unorm'
+const kDefaultWidth = 32;
+const kDefaultHeight = 32;
+const kDefaultDepth = 1;
+const kDefaultMipLevelCount = 6;
+
+function computeMipMapSize(width: number, height: number, mipLevel: number) {
+ return {
+ mipWidth: Math.max(width >> mipLevel, 1),
+ mipHeight: Math.max(height >> mipLevel, 1),
+ };
+}
+
+interface WithMipLevel {
+ mipLevel: number;
+}
+
+interface WithDstOriginMipLevel extends WithMipLevel {
+ dstOrigin: Required<GPUOrigin3DDict>;
+}
+
+// Helper function to generate copySize for src OOB test
+function generateCopySizeForSrcOOB({ srcOrigin }: { srcOrigin: Required<GPUOrigin2DDict> }) {
+ // OOB origin fails even with no-op copy.
+ if (srcOrigin.x > kDefaultWidth || srcOrigin.y > kDefaultHeight) {
+ return [{ width: 0, height: 0, depthOrArrayLayers: 0 }];
+ }
+
+ const justFitCopySize = {
+ width: kDefaultWidth - srcOrigin.x,
+ height: kDefaultHeight - srcOrigin.y,
+ depthOrArrayLayers: 1,
+ };
+
+ return [
+ justFitCopySize, // correct size, maybe no-op copy.
+ {
+ width: justFitCopySize.width + 1,
+ height: justFitCopySize.height,
+ depthOrArrayLayers: justFitCopySize.depthOrArrayLayers,
+ }, // OOB in width
+ {
+ width: justFitCopySize.width,
+ height: justFitCopySize.height + 1,
+ depthOrArrayLayers: justFitCopySize.depthOrArrayLayers,
+ }, // OOB in height
+ {
+ width: justFitCopySize.width,
+ height: justFitCopySize.height,
+ depthOrArrayLayers: justFitCopySize.depthOrArrayLayers + 1,
+ }, // OOB in depthOrArrayLayers
+ ];
+}
+
+// Helper function to generate dst origin value based on mipLevel.
+function generateDstOriginValue({ mipLevel }: WithMipLevel) {
+ const origin = computeMipMapSize(kDefaultWidth, kDefaultHeight, mipLevel);
+
+ return [
+ { x: 0, y: 0, z: 0 },
+ { x: origin.mipWidth - 1, y: 0, z: 0 },
+ { x: 0, y: origin.mipHeight - 1, z: 0 },
+ { x: origin.mipWidth, y: 0, z: 0 },
+ { x: 0, y: origin.mipHeight, z: 0 },
+ { x: 0, y: 0, z: kDefaultDepth },
+ { x: origin.mipWidth + 1, y: 0, z: 0 },
+ { x: 0, y: origin.mipHeight + 1, z: 0 },
+ { x: 0, y: 0, z: kDefaultDepth + 1 },
+ ];
+}
+
+// Helper function to generate copySize for dst OOB test
+function generateCopySizeForDstOOB({ mipLevel, dstOrigin }: WithDstOriginMipLevel) {
+ const dstMipMapSize = computeMipMapSize(kDefaultWidth, kDefaultHeight, mipLevel);
+
+ // OOB origin fails even with no-op copy.
+ if (
+ dstOrigin.x > dstMipMapSize.mipWidth ||
+ dstOrigin.y > dstMipMapSize.mipHeight ||
+ dstOrigin.z > kDefaultDepth
+ ) {
+ return [{ width: 0, height: 0, depthOrArrayLayers: 0 }];
+ }
+
+ const justFitCopySize = {
+ width: dstMipMapSize.mipWidth - dstOrigin.x,
+ height: dstMipMapSize.mipHeight - dstOrigin.y,
+ depthOrArrayLayers: kDefaultDepth - dstOrigin.z,
+ };
+
+ return [
+ justFitCopySize,
+ {
+ width: justFitCopySize.width + 1,
+ height: justFitCopySize.height,
+ depthOrArrayLayers: justFitCopySize.depthOrArrayLayers,
+ }, // OOB in width
+ {
+ width: justFitCopySize.width,
+ height: justFitCopySize.height + 1,
+ depthOrArrayLayers: justFitCopySize.depthOrArrayLayers,
+ }, // OOB in height
+ {
+ width: justFitCopySize.width,
+ height: justFitCopySize.height,
+ depthOrArrayLayers: justFitCopySize.depthOrArrayLayers + 1,
+ }, // OOB in depthOrArrayLayers
+ ];
+}
+
+class CopyExternalImageToTextureTest extends ValidationTest {
+ onlineCrossOriginUrl = 'https://raw.githubusercontent.com/gpuweb/gpuweb/main/logo/webgpu.png';
+
+ getImageData(width: number, height: number): ImageData {
+ if (typeof ImageData === 'undefined') {
+ this.skip('ImageData is not supported.');
+ }
+
+ const pixelSize = kDefaultBytesPerPixel * width * height;
+ const imagePixels = new Uint8ClampedArray(pixelSize);
+ return new ImageData(imagePixels, width, height);
+ }
+
+ getCanvasWithContent(
+ canvasType: CanvasType,
+ width: number,
+ height: number,
+ content: HTMLImageElement | HTMLCanvasElement | OffscreenCanvas | ImageBitmap
+ ): HTMLCanvasElement | OffscreenCanvas {
+ const canvas = createCanvas(this, canvasType, 1, 1);
+ const ctx = canvas.getContext('2d');
+ switch (canvasType) {
+ case 'onscreen':
+ assert(ctx instanceof CanvasRenderingContext2D);
+ break;
+ case 'offscreen':
+ assert(ctx instanceof OffscreenCanvasRenderingContext2D);
+ break;
+ }
+ ctx.drawImage(content, 0, 0);
+
+ return canvas;
+ }
+
+ createImageBitmap(image: ImageBitmapSource | OffscreenCanvas): Promise<ImageBitmap> {
+ if (typeof createImageBitmap === 'undefined') {
+ this.skip('Creating ImageBitmaps is not supported.');
+ }
+ return createImageBitmap(image);
+ }
+
+ runTest(
+ imageBitmapCopyView: GPUImageCopyExternalImage,
+ textureCopyView: GPUImageCopyTextureTagged,
+ copySize: GPUExtent3D,
+ validationScopeSuccess: boolean,
+ exceptionName?: string
+ ): void {
+ // copyExternalImageToTexture will generate two types of errors. One is synchronous exceptions;
+ // the other is asynchronous validation error scope errors.
+ if (exceptionName) {
+ this.shouldThrow(exceptionName, () => {
+ this.device.queue.copyExternalImageToTexture(
+ imageBitmapCopyView,
+ textureCopyView,
+ copySize
+ );
+ });
+ } else {
+ this.expectValidationError(() => {
+ this.device.queue.copyExternalImageToTexture(
+ imageBitmapCopyView,
+ textureCopyView,
+ copySize
+ );
+ }, !validationScopeSuccess);
+ }
+ }
+}
+
+export const g = makeTestGroup(CopyExternalImageToTextureTest);
+
+g.test('source_image,crossOrigin')
+ .desc(
+ `
+ Test contents of source image is [clean, cross-origin].
+
+ Load crossOrigin image or same origin image and init the source
+ images.
+
+ Check whether 'SecurityError' is generated when source image is not origin clean.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('sourceImage', ['canvas', 'offscreenCanvas', 'imageBitmap'])
+ .combine('isOriginClean', [true, false])
+ .beginSubcases()
+ .combine('contentFrom', ['image', 'imageBitmap', 'canvas', 'offscreenCanvas'] as const)
+ .combine('copySize', [
+ { width: 0, height: 0, depthOrArrayLayers: 0 },
+ { width: 1, height: 1, depthOrArrayLayers: 1 },
+ ])
+ )
+ .fn(async t => {
+ const { sourceImage, isOriginClean, contentFrom, copySize } = t.params;
+ if (typeof document === 'undefined') {
+ t.skip('DOM is not available to create an image element.');
+ }
+
+ const crossOriginUrl = getCrossOriginResourcePath('webgpu.png', t.onlineCrossOriginUrl);
+ const originCleanUrl = getResourcePath('webgpu.png');
+ const img = document.createElement('img');
+ img.src = isOriginClean ? originCleanUrl : crossOriginUrl;
+
+ // Load image
+ const timeout_ms = 5000;
+ try {
+ await raceWithRejectOnTimeout(img.decode(), timeout_ms, 'load image timeout');
+ } catch (e) {
+ if (isOriginClean) {
+ throw e;
+ } else {
+ t.skip('Cannot load cross origin image in time');
+ return;
+ }
+ }
+
+ // The externalImage contents can be updated by:
+ // - decoded image element
+ // - canvas/offscreenCanvas with image draw on it.
+ // - imageBitmap created with the image.
+ // Test covers all of these cases to ensure origin clean checks works.
+ let source: HTMLImageElement | HTMLCanvasElement | OffscreenCanvas | ImageBitmap;
+ switch (contentFrom) {
+ case 'image': {
+ source = img;
+ break;
+ }
+ case 'imageBitmap': {
+ source = await t.createImageBitmap(img);
+ break;
+ }
+ case 'canvas':
+ case 'offscreenCanvas': {
+ const canvasType = contentFrom === 'offscreenCanvas' ? 'offscreen' : 'onscreen';
+ source = t.getCanvasWithContent(canvasType, 1, 1, img);
+ break;
+ }
+ default:
+ unreachable();
+ }
+
+ // Update the externalImage content with source.
+ let externalImage: HTMLCanvasElement | OffscreenCanvas | ImageBitmap;
+ switch (sourceImage) {
+ case 'imageBitmap': {
+ externalImage = await t.createImageBitmap(source);
+ break;
+ }
+ case 'canvas':
+ case 'offscreenCanvas': {
+ const canvasType = contentFrom === 'offscreenCanvas' ? 'offscreen' : 'onscreen';
+ externalImage = t.getCanvasWithContent(canvasType, 1, 1, source);
+ break;
+ }
+ default:
+ unreachable();
+ }
+
+ const dstTexture = t.device.createTexture({
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ format: 'bgra8unorm',
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ t.runTest(
+ { source: externalImage },
+ { texture: dstTexture },
+ copySize,
+ true, // No validation errors.
+ isOriginClean ? '' : 'SecurityError'
+ );
+ });
+
+g.test('source_imageBitmap,state')
+ .desc(
+ `
+ Test ImageBitmap as source image in state [valid, closed].
+
+ Call imageBitmap.close() to transfer the imageBitmap into
+ 'closed' state.
+
+ Check whether 'InvalidStateError' is generated when ImageBitmap is
+ closed.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('closed', [false, true])
+ .beginSubcases()
+ .combine('copySize', [
+ { width: 0, height: 0, depthOrArrayLayers: 0 },
+ { width: 1, height: 1, depthOrArrayLayers: 1 },
+ ])
+ )
+ .fn(async t => {
+ const { closed, copySize } = t.params;
+ const imageBitmap = await t.createImageBitmap(t.getImageData(1, 1));
+ const dstTexture = t.device.createTexture({
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ format: 'bgra8unorm',
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ if (closed) imageBitmap.close();
+
+ t.runTest(
+ { source: imageBitmap },
+ { texture: dstTexture },
+ copySize,
+ true, // No validation errors.
+ closed ? 'InvalidStateError' : ''
+ );
+ });
+
+g.test('source_canvas,state')
+ .desc(
+ `
+ Test HTMLCanvasElement as source image in state
+ [nocontext, 'placeholder-nocontext', 'placeholder-hascontext', valid].
+
+ Nocontext means using a canvas without any context as copy param.
+
+ Call 'transferControlToOffscreen' on HTMLCanvasElement will cause the
+ canvas control right transfer. And this canvas is in state 'placeholder'
+ Whether getContext in new generated offscreenCanvas won't affect the origin
+ canvas state.
+
+
+ Check whether 'OperationError' is generated when HTMLCanvasElement has no
+ context.
+
+ Check whether 'InvalidStateError' is generated when HTMLCanvasElement is
+ in 'placeholder' state.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('state', ['nocontext', 'placeholder-nocontext', 'placeholder-hascontext', 'valid'])
+ .beginSubcases()
+ .combine('copySize', [
+ { width: 0, height: 0, depthOrArrayLayers: 0 },
+ { width: 1, height: 1, depthOrArrayLayers: 1 },
+ ])
+ )
+ .fn(t => {
+ const { state, copySize } = t.params;
+ const canvas = createOnscreenCanvas(t, 1, 1);
+ if (typeof canvas.transferControlToOffscreen === 'undefined') {
+ t.skip("Browser doesn't support HTMLCanvasElement.transferControlToOffscreen");
+ return;
+ }
+
+ const dstTexture = t.device.createTexture({
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ format: 'bgra8unorm',
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ let exceptionName: string = '';
+
+ switch (state) {
+ case 'nocontext': {
+ exceptionName = 'OperationError';
+ break;
+ }
+ case 'placeholder-nocontext': {
+ canvas.transferControlToOffscreen();
+ exceptionName = 'InvalidStateError';
+ break;
+ }
+ case 'placeholder-hascontext': {
+ const offscreenCanvas = canvas.transferControlToOffscreen();
+ t.tryTrackForCleanup(offscreenCanvas.getContext('webgl'));
+ exceptionName = 'InvalidStateError';
+ break;
+ }
+ case 'valid': {
+ assert(canvas.getContext('2d') !== null);
+ break;
+ }
+ default:
+ unreachable();
+ }
+
+ t.runTest(
+ { source: canvas },
+ { texture: dstTexture },
+ copySize,
+ true, // No validation errors.
+ exceptionName
+ );
+ });
+
+g.test('source_offscreenCanvas,state')
+ .desc(
+ `
+ Test OffscreenCanvas as source image in state [valid, detached].
+
+ Nocontext means using a canvas without any context as copy param.
+
+ Transfer OffscreenCanvas with MessageChannel will detach the OffscreenCanvas.
+
+ Check whether 'OperationError' is generated when HTMLCanvasElement has no
+ context.
+
+ Check whether 'InvalidStateError' is generated when OffscreenCanvas is
+ detached.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('state', ['nocontext', 'detached-nocontext', 'detached-hascontext', 'valid'])
+ .beginSubcases()
+ .combine('getContextInOffscreenCanvas', [false, true])
+ .combine('copySize', [
+ { width: 0, height: 0, depthOrArrayLayers: 0 },
+ { width: 1, height: 1, depthOrArrayLayers: 1 },
+ ])
+ )
+ .fn(async t => {
+ const { state, copySize } = t.params;
+ const offscreenCanvas = createOffscreenCanvas(t, 1, 1);
+ const dstTexture = t.device.createTexture({
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ format: 'bgra8unorm',
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ let exceptionName: string = '';
+ switch (state) {
+ case 'nocontext': {
+ exceptionName = 'OperationError';
+ break;
+ }
+ case 'detached-nocontext': {
+ const messageChannel = new MessageChannel();
+ messageChannel.port1.postMessage(offscreenCanvas, [offscreenCanvas]);
+
+ exceptionName = 'InvalidStateError';
+ break;
+ }
+ case 'detached-hascontext': {
+ const messageChannel = new MessageChannel();
+ const port2FirstMessage = new Promise(resolve => {
+ messageChannel.port2.onmessage = m => resolve(m);
+ });
+
+ messageChannel.port1.postMessage(offscreenCanvas, [offscreenCanvas]);
+
+ const receivedOffscreenCanvas = (await port2FirstMessage) as MessageEvent;
+ t.tryTrackForCleanup(receivedOffscreenCanvas.data.getContext('webgl'));
+
+ exceptionName = 'InvalidStateError';
+ break;
+ }
+ case 'valid': {
+ offscreenCanvas.getContext('webgl');
+ break;
+ }
+ default:
+ unreachable();
+ }
+
+ t.runTest(
+ { source: offscreenCanvas },
+ { texture: dstTexture },
+ copySize,
+ true, // No validation errors.
+ exceptionName
+ );
+ });
+
+g.test('destination_texture,state')
+ .desc(
+ `
+ Test dst texture is [valid, invalid, destroyed].
+
+ Check that an error is generated when texture is an error texture.
+ Check that an error is generated when texture is in destroyed state.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('state', kResourceStates)
+ .beginSubcases()
+ .combine('copySize', [
+ { width: 0, height: 0, depthOrArrayLayers: 0 },
+ { width: 1, height: 1, depthOrArrayLayers: 1 },
+ ])
+ )
+ .fn(async t => {
+ const { state, copySize } = t.params;
+ const imageBitmap = await t.createImageBitmap(t.getImageData(1, 1));
+ const dstTexture = t.createTextureWithState(state);
+
+ t.runTest({ source: imageBitmap }, { texture: dstTexture }, copySize, state === 'valid');
+ });
+
+g.test('destination_texture,device_mismatch')
+ .desc(
+ 'Tests copyExternalImageToTexture cannot be called with a destination texture created from another device'
+ )
+ .paramsSubcasesOnly(u => u.combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(async t => {
+ const { mismatched } = t.params;
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+ const copySize = { width: 1, height: 1, depthOrArrayLayers: 1 };
+
+ const texture = sourceDevice.createTexture({
+ size: copySize,
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const imageBitmap = await t.createImageBitmap(t.getImageData(1, 1));
+
+ t.runTest({ source: imageBitmap }, { texture }, copySize, !mismatched);
+ });
+
+g.test('destination_texture,usage')
+ .desc(
+ `
+ Test dst texture usages
+
+ Check that an error is generated when texture is created without usage COPY_DST | RENDER_ATTACHMENT.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('usage', kTextureUsages)
+ .beginSubcases()
+ .combine('copySize', [
+ { width: 0, height: 0, depthOrArrayLayers: 0 },
+ { width: 1, height: 1, depthOrArrayLayers: 1 },
+ ])
+ )
+ .fn(async t => {
+ const { usage, copySize } = t.params;
+ const imageBitmap = await t.createImageBitmap(t.getImageData(1, 1));
+ const dstTexture = t.device.createTexture({
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage,
+ });
+
+ t.runTest(
+ { source: imageBitmap },
+ { texture: dstTexture },
+ copySize,
+ !!(usage & GPUTextureUsage.COPY_DST && usage & GPUTextureUsage.RENDER_ATTACHMENT)
+ );
+ });
+
+g.test('destination_texture,sample_count')
+ .desc(
+ `
+ Test dst texture sample count.
+
+ Check that an error is generated when sample count it not 1.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('sampleCount', [1, 4])
+ .beginSubcases()
+ .combine('copySize', [
+ { width: 0, height: 0, depthOrArrayLayers: 0 },
+ { width: 1, height: 1, depthOrArrayLayers: 1 },
+ ])
+ )
+ .fn(async t => {
+ const { sampleCount, copySize } = t.params;
+ const imageBitmap = await t.createImageBitmap(t.getImageData(1, 1));
+ const dstTexture = t.device.createTexture({
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ sampleCount,
+ format: 'bgra8unorm',
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ t.runTest({ source: imageBitmap }, { texture: dstTexture }, copySize, sampleCount === 1);
+ });
+
+g.test('destination_texture,mipLevel')
+ .desc(
+ `
+ Test dst mipLevel.
+
+ Check that an error is generated when mipLevel is too large.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('mipLevel', [0, kDefaultMipLevelCount - 1, kDefaultMipLevelCount])
+ .beginSubcases()
+ .combine('copySize', [
+ { width: 0, height: 0, depthOrArrayLayers: 0 },
+ { width: 1, height: 1, depthOrArrayLayers: 1 },
+ ])
+ )
+ .fn(async t => {
+ const { mipLevel, copySize } = t.params;
+ const imageBitmap = await t.createImageBitmap(t.getImageData(1, 1));
+ const dstTexture = t.device.createTexture({
+ size: { width: kDefaultWidth, height: kDefaultHeight, depthOrArrayLayers: kDefaultDepth },
+ mipLevelCount: kDefaultMipLevelCount,
+ format: 'bgra8unorm',
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ t.runTest(
+ { source: imageBitmap },
+ { texture: dstTexture, mipLevel },
+ copySize,
+ mipLevel < kDefaultMipLevelCount
+ );
+ });
+
+g.test('destination_texture,format')
+ .desc(
+ `
+ Test dst texture format.
+
+ Check that an error is generated when texture format is not valid.
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kTextureFormats)
+ .beginSubcases()
+ .combine('copySize', [
+ { width: 0, height: 0, depthOrArrayLayers: 0 },
+ { width: 1, height: 1, depthOrArrayLayers: 1 },
+ ])
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ t.skipIfTextureFormatNotSupported(format);
+ t.selectDeviceOrSkipTestCase(kTextureFormatInfo[format].feature);
+ })
+ .fn(async t => {
+ const { format, copySize } = t.params;
+
+ const imageBitmap = await t.createImageBitmap(t.getImageData(1, 1));
+
+ // createTexture with all possible texture format may have validation error when using
+ // compressed texture format.
+ t.device.pushErrorScope('validation');
+ const dstTexture = t.device.createTexture({
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ format,
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ void t.device.popErrorScope();
+
+ const success = (kValidTextureFormatsForCopyE2T as readonly string[]).includes(format);
+
+ t.runTest({ source: imageBitmap }, { texture: dstTexture }, copySize, success);
+ });
+
+g.test('OOB,source')
+ .desc(
+ `
+ Test source image origin and copy size
+
+ Check that an error is generated when source.externalImage.origin + copySize is too large.
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('srcOrigin', [
+ { x: 0, y: 0 }, // origin is on top-left
+ { x: kDefaultWidth - 1, y: 0 }, // x near the border
+ { x: 0, y: kDefaultHeight - 1 }, // y is near the border
+ { x: kDefaultWidth, y: kDefaultHeight }, // origin is on bottom-right
+ { x: kDefaultWidth + 1, y: 0 }, // x is too large
+ { x: 0, y: kDefaultHeight + 1 }, // y is too large
+ ])
+ .expand('copySize', generateCopySizeForSrcOOB)
+ )
+ .fn(async t => {
+ const { srcOrigin, copySize } = t.params;
+ const imageBitmap = await t.createImageBitmap(t.getImageData(kDefaultWidth, kDefaultHeight));
+ const dstTexture = t.device.createTexture({
+ size: {
+ width: kDefaultWidth + 1,
+ height: kDefaultHeight + 1,
+ depthOrArrayLayers: kDefaultDepth,
+ },
+ mipLevelCount: kDefaultMipLevelCount,
+ format: 'bgra8unorm',
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ let success = true;
+
+ if (
+ srcOrigin.x + copySize.width > kDefaultWidth ||
+ srcOrigin.y + copySize.height > kDefaultHeight ||
+ copySize.depthOrArrayLayers > 1
+ ) {
+ success = false;
+ }
+
+ t.runTest(
+ { source: imageBitmap, origin: srcOrigin },
+ { texture: dstTexture },
+ copySize,
+ true,
+ success ? '' : 'OperationError'
+ );
+ });
+
+g.test('OOB,destination')
+ .desc(
+ `
+ Test dst texture copy origin and copy size
+
+ Check that an error is generated when destination.texture.origin + copySize is too large.
+ Check that 'OperationError' is generated when copySize.depth is larger than 1.
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('mipLevel', [0, 1, kDefaultMipLevelCount - 2])
+ .expand('dstOrigin', generateDstOriginValue)
+ .expand('copySize', generateCopySizeForDstOOB)
+ )
+ .fn(async t => {
+ const { mipLevel, dstOrigin, copySize } = t.params;
+
+ const imageBitmap = await t.createImageBitmap(
+ t.getImageData(kDefaultWidth + 1, kDefaultHeight + 1)
+ );
+ const dstTexture = t.device.createTexture({
+ size: {
+ width: kDefaultWidth,
+ height: kDefaultHeight,
+ depthOrArrayLayers: kDefaultDepth,
+ },
+ format: 'bgra8unorm',
+ mipLevelCount: kDefaultMipLevelCount,
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ let success = true;
+ let hasOperationError = false;
+ const dstMipMapSize = computeMipMapSize(kDefaultWidth, kDefaultHeight, mipLevel);
+
+ if (
+ copySize.depthOrArrayLayers > 1 ||
+ dstOrigin.x + copySize.width > dstMipMapSize.mipWidth ||
+ dstOrigin.y + copySize.height > dstMipMapSize.mipHeight ||
+ dstOrigin.z + copySize.depthOrArrayLayers > kDefaultDepth
+ ) {
+ success = false;
+ }
+ if (copySize.depthOrArrayLayers > 1) {
+ hasOperationError = true;
+ }
+
+ t.runTest(
+ { source: imageBitmap },
+ {
+ texture: dstTexture,
+ mipLevel,
+ origin: dstOrigin,
+ },
+ copySize,
+ success,
+ hasOperationError ? 'OperationError' : ''
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/destroyed/buffer.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/destroyed/buffer.spec.ts
new file mode 100644
index 0000000000..39fcfe4e0d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/destroyed/buffer.spec.ts
@@ -0,0 +1,296 @@
+export const description = `
+Tests using a destroyed buffer on a queue.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { ValidationTest } from '../../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('writeBuffer')
+ .desc(
+ `
+Tests that using a destroyed buffer in writeBuffer fails.
+- x= {destroyed, not destroyed (control case)}
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('destroyed', [false, true] as const))
+ .fn(t => {
+ const { destroyed } = t.params;
+ const buffer = t.trackForCleanup(
+ t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.COPY_DST,
+ })
+ );
+
+ if (destroyed) {
+ buffer.destroy();
+ }
+
+ t.expectValidationError(() => t.queue.writeBuffer(buffer, 0, new Uint8Array(4)), destroyed);
+ });
+
+g.test('copyBufferToBuffer')
+ .desc(
+ `
+Tests that using a destroyed buffer in copyBufferToBuffer fails.
+- x= {not destroyed (control case), src destroyed, dst destroyed}
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('destroyed', ['none', 'src', 'dst', 'both'] as const))
+ .fn(t => {
+ const src = t.trackForCleanup(
+ t.device.createBuffer({ size: 4, usage: GPUBufferUsage.COPY_SRC })
+ );
+ const dst = t.trackForCleanup(
+ t.device.createBuffer({ size: 4, usage: GPUBufferUsage.COPY_DST })
+ );
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.copyBufferToBuffer(src, 0, dst, 0, dst.size);
+ const commandBuffer = encoder.finish();
+
+ let shouldError = true;
+ switch (t.params.destroyed) {
+ case 'none':
+ shouldError = false;
+ break;
+ case 'src':
+ src.destroy();
+ break;
+ case 'dst':
+ dst.destroy();
+ break;
+ case 'both':
+ src.destroy();
+ dst.destroy();
+ break;
+ }
+
+ t.expectValidationError(() => {
+ t.queue.submit([commandBuffer]);
+ }, shouldError);
+ });
+
+g.test('copyBufferToTexture')
+ .desc(
+ `
+Tests that using a destroyed buffer in copyBufferToTexture fails.
+- x= {not destroyed (control case), src destroyed}
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('destroyed', [false, true] as const))
+ .fn(t => {
+ const { destroyed } = t.params;
+ const buffer = t.trackForCleanup(
+ t.device.createBuffer({ size: 4, usage: GPUBufferUsage.COPY_SRC })
+ );
+ const texture = t.trackForCleanup(
+ t.device.createTexture({
+ size: [1, 1, 1],
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_DST,
+ })
+ );
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.copyBufferToTexture({ buffer }, { texture }, [1, 1, 1]);
+ const commandBuffer = encoder.finish();
+
+ if (destroyed) {
+ buffer.destroy();
+ }
+
+ t.expectValidationError(() => {
+ t.queue.submit([commandBuffer]);
+ }, destroyed);
+ });
+
+g.test('copyTextureToBuffer')
+ .desc(
+ `
+Tests that using a destroyed buffer in copyTextureToBuffer fails.
+- x= {not destroyed (control case), dst destroyed}
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('destroyed', [false, true] as const))
+ .fn(t => {
+ const { destroyed } = t.params;
+ const texture = t.trackForCleanup(
+ t.device.createTexture({
+ size: [1, 1, 1],
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_SRC,
+ })
+ );
+ const buffer = t.trackForCleanup(
+ t.device.createBuffer({ size: 4, usage: GPUBufferUsage.COPY_DST })
+ );
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.copyTextureToBuffer({ texture }, { buffer }, [1, 1, 1]);
+ const commandBuffer = encoder.finish();
+
+ if (destroyed) {
+ buffer.destroy();
+ }
+
+ t.expectValidationError(() => {
+ t.queue.submit([commandBuffer]);
+ }, destroyed);
+ });
+
+g.test('setBindGroup')
+ .desc(
+ `
+Tests that using a destroyed buffer referenced by a bindGroup set with setBindGroup fails
+- x= {not destroyed (control case), destroyed}
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('destroyed', [false, true] as const)
+ .combine('encoderType', ['compute pass', 'render pass', 'render bundle'] as const)
+ )
+ .fn(t => {
+ const { destroyed, encoderType } = t.params;
+ const { device } = t;
+ const buffer = t.trackForCleanup(
+ t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.UNIFORM,
+ })
+ );
+
+ const layout = device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE | GPUShaderStage.VERTEX,
+ buffer: {},
+ },
+ ],
+ });
+
+ const bindGroup = device.createBindGroup({
+ layout,
+ entries: [{ binding: 0, resource: { buffer } }],
+ });
+
+ const { encoder, finish } = t.createEncoder(encoderType);
+ encoder.setBindGroup(0, bindGroup);
+ const commandBuffer = finish();
+
+ if (destroyed) {
+ buffer.destroy();
+ }
+
+ t.expectValidationError(() => {
+ t.queue.submit([commandBuffer]);
+ }, destroyed);
+ });
+
+g.test('setVertexBuffer')
+ .desc(
+ `
+Tests that using a destroyed buffer referenced in a render pass fails
+- x= {not destroyed (control case), destroyed}
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('destroyed', [false, true] as const)
+ .combine('encoderType', ['render pass', 'render bundle'] as const)
+ )
+ .fn(t => {
+ const { destroyed, encoderType } = t.params;
+ const vertexBuffer = t.trackForCleanup(
+ t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.VERTEX,
+ })
+ );
+
+ const { encoder, finish } = t.createEncoder(encoderType);
+ encoder.setVertexBuffer(0, vertexBuffer);
+ const commandBuffer = finish();
+
+ if (destroyed) {
+ vertexBuffer.destroy();
+ }
+
+ t.expectValidationError(() => {
+ t.queue.submit([commandBuffer]);
+ }, destroyed);
+ });
+
+g.test('setIndexBuffer')
+ .desc(
+ `
+Tests that using a destroyed buffer referenced in a render pass fails
+- x= {not destroyed (control case), destroyed}
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('destroyed', [false, true] as const)
+ .combine('encoderType', ['render pass', 'render bundle'] as const)
+ )
+ .fn(t => {
+ const { destroyed, encoderType } = t.params;
+ const indexBuffer = t.trackForCleanup(
+ t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.INDEX,
+ })
+ );
+
+ const { encoder, finish } = t.createEncoder(encoderType);
+ encoder.setIndexBuffer(indexBuffer, 'uint16');
+ const commandBuffer = finish();
+
+ if (destroyed) {
+ indexBuffer.destroy();
+ }
+
+ t.expectValidationError(() => {
+ t.queue.submit([commandBuffer]);
+ }, destroyed);
+ });
+
+g.test('resolveQuerySet')
+ .desc(
+ `
+Tests that using a destroyed buffer referenced via resolveQuerySet fails
+- x= {not destroyed (control case), destroyed}
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('destroyed', [false, true] as const))
+ .fn(t => {
+ const { destroyed } = t.params;
+ const querySet = t.trackForCleanup(
+ t.device.createQuerySet({
+ type: 'occlusion',
+ count: 1,
+ })
+ );
+ const querySetBuffer = t.trackForCleanup(
+ t.device.createBuffer({
+ size: 8,
+ usage: GPUBufferUsage.QUERY_RESOLVE,
+ })
+ );
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.resolveQuerySet(querySet, 0, 1, querySetBuffer, 0);
+ const commandBuffer = encoder.finish();
+
+ if (destroyed) {
+ querySetBuffer.destroy();
+ }
+
+ t.expectValidationError(() => {
+ t.queue.submit([commandBuffer]);
+ }, destroyed);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/destroyed/query_set.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/destroyed/query_set.spec.ts
new file mode 100644
index 0000000000..1d8adab7e8
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/destroyed/query_set.spec.ts
@@ -0,0 +1,63 @@
+export const description = `
+Tests using a destroyed query set on a queue.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { ValidationTest } from '../../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('beginOcclusionQuery')
+ .desc(
+ `
+Tests that use a destroyed query set in occlusion query on render pass encoder.
+- x= {destroyed, not destroyed (control case)}
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('querySetState', ['valid', 'destroyed'] as const))
+ .fn(t => {
+ const occlusionQuerySet = t.createQuerySetWithState(t.params.querySetState);
+
+ const encoder = t.createEncoder('render pass', { occlusionQuerySet });
+ encoder.encoder.beginOcclusionQuery(0);
+ encoder.encoder.endOcclusionQuery();
+ encoder.validateFinishAndSubmitGivenState(t.params.querySetState);
+ });
+
+g.test('writeTimestamp')
+ .desc(
+ `
+Tests that use a destroyed query set in writeTimestamp on {non-pass, compute, render} encoder.
+- x= {destroyed, not destroyed (control case)}
+ `
+ )
+ .params(u => u.beginSubcases().combine('querySetState', ['valid', 'destroyed'] as const))
+ .beforeAllSubcases(t => t.selectDeviceOrSkipTestCase('timestamp-query'))
+ .fn(t => {
+ const querySet = t.createQuerySetWithState(t.params.querySetState, {
+ type: 'timestamp',
+ count: 2,
+ });
+
+ const encoder = t.createEncoder('non-pass');
+ encoder.encoder.writeTimestamp(querySet, 0);
+ encoder.validateFinishAndSubmitGivenState(t.params.querySetState);
+ });
+
+g.test('resolveQuerySet')
+ .desc(
+ `
+Tests that use a destroyed query set in resolveQuerySet.
+- x= {destroyed, not destroyed (control case)}
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('querySetState', ['valid', 'destroyed'] as const))
+ .fn(t => {
+ const querySet = t.createQuerySetWithState(t.params.querySetState);
+
+ const buffer = t.device.createBuffer({ size: 8, usage: GPUBufferUsage.QUERY_RESOLVE });
+
+ const encoder = t.createEncoder('non-pass');
+ encoder.encoder.resolveQuerySet(querySet, 0, 1, buffer, 0);
+ encoder.validateFinishAndSubmitGivenState(t.params.querySetState);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/destroyed/texture.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/destroyed/texture.spec.ts
new file mode 100644
index 0000000000..42036bd881
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/destroyed/texture.spec.ts
@@ -0,0 +1,294 @@
+export const description = `
+Tests using a destroyed texture on a queue.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { unreachable } from '../../../../../common/util/util.js';
+import { ValidationTest } from '../../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('writeTexture')
+ .desc(
+ `
+Tests that using a destroyed texture in writeTexture fails.
+- x= {destroyed, not destroyed (control case)}
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('destroyed', [false, true] as const))
+ .fn(t => {
+ const { destroyed } = t.params;
+ const texture = t.trackForCleanup(
+ t.device.createTexture({
+ size: [1, 1, 1],
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_DST,
+ })
+ );
+
+ if (destroyed) {
+ texture.destroy();
+ }
+
+ t.expectValidationError(
+ () => t.queue.writeTexture({ texture }, new Uint8Array(4), { bytesPerRow: 4 }, [1, 1, 1]),
+ destroyed
+ );
+ });
+
+g.test('copyTextureToTexture')
+ .desc(
+ `
+Tests that using a destroyed texture in copyTextureToTexture fails.
+- x= {not destroyed (control case), src destroyed, dst destroyed}
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('destroyed', ['none', 'src', 'dst', 'both'] as const))
+ .fn(t => {
+ const src = t.trackForCleanup(
+ t.device.createTexture({
+ size: [1, 1, 1],
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_SRC,
+ })
+ );
+ const dst = t.trackForCleanup(
+ t.device.createTexture({
+ size: [1, 1, 1],
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_DST,
+ })
+ );
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.copyTextureToTexture({ texture: src }, { texture: dst }, [1, 1, 1]);
+ const commandBuffer = encoder.finish();
+
+ let shouldError = true;
+ switch (t.params.destroyed) {
+ case 'none':
+ shouldError = false;
+ break;
+ case 'src':
+ src.destroy();
+ break;
+ case 'dst':
+ dst.destroy();
+ break;
+ case 'both':
+ src.destroy();
+ dst.destroy();
+ break;
+ }
+
+ t.expectValidationError(() => {
+ t.queue.submit([commandBuffer]);
+ }, shouldError);
+ });
+
+g.test('copyBufferToTexture')
+ .desc(
+ `
+Tests that using a destroyed texture in copyBufferToTexture fails.
+- x= {not destroyed (control case), dst destroyed}
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('destroyed', [false, true] as const))
+ .fn(t => {
+ const { destroyed } = t.params;
+ const buffer = t.trackForCleanup(
+ t.device.createBuffer({ size: 4, usage: GPUBufferUsage.COPY_SRC })
+ );
+ const texture = t.trackForCleanup(
+ t.device.createTexture({
+ size: [1, 1, 1],
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_DST,
+ })
+ );
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.copyBufferToTexture({ buffer }, { texture }, [1, 1, 1]);
+ const commandBuffer = encoder.finish();
+
+ if (destroyed) {
+ texture.destroy();
+ }
+
+ t.expectValidationError(() => {
+ t.queue.submit([commandBuffer]);
+ }, destroyed);
+ });
+
+g.test('copyTextureToBuffer')
+ .desc(
+ `
+Tests that using a destroyed texture in copyTextureToBuffer fails.
+- x= {not destroyed (control case), src destroyed}
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('destroyed', [false, true] as const))
+ .fn(t => {
+ const { destroyed } = t.params;
+ const texture = t.trackForCleanup(
+ t.device.createTexture({
+ size: [1, 1, 1],
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_SRC,
+ })
+ );
+ const buffer = t.trackForCleanup(
+ t.device.createBuffer({ size: 4, usage: GPUBufferUsage.COPY_DST })
+ );
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.copyTextureToBuffer({ texture }, { buffer }, [1, 1, 1]);
+ const commandBuffer = encoder.finish();
+
+ if (destroyed) {
+ texture.destroy();
+ }
+
+ t.expectValidationError(() => {
+ t.queue.submit([commandBuffer]);
+ }, destroyed);
+ });
+
+g.test('setBindGroup')
+ .desc(
+ `
+Tests that using a destroyed texture referenced by a bindGroup set with setBindGroup fails
+- x= {not destroyed (control case), destroyed}
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('destroyed', [false, true] as const)
+ .combine('encoderType', ['compute pass', 'render pass', 'render bundle'] as const)
+ )
+ .fn(t => {
+ const { destroyed, encoderType } = t.params;
+ const { device } = t;
+ const texture = t.trackForCleanup(
+ t.device.createTexture({
+ size: [1, 1, 1],
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ })
+ );
+
+ const layout = device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ texture: {},
+ },
+ ],
+ });
+
+ const bindGroup = device.createBindGroup({
+ layout,
+ entries: [{ binding: 0, resource: texture.createView() }],
+ });
+
+ const { encoder, finish } = t.createEncoder(encoderType);
+ encoder.setBindGroup(0, bindGroup);
+ const commandBuffer = finish();
+
+ if (destroyed) {
+ texture.destroy();
+ }
+
+ t.expectValidationError(() => {
+ t.queue.submit([commandBuffer]);
+ }, destroyed);
+ });
+
+g.test('beginRenderPass')
+ .desc(
+ `
+Tests that using a destroyed texture referenced by a render pass fails
+- x= {not destroyed (control case), colorAttachment destroyed, depthAttachment destroyed, resolveTarget destroyed}
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u.combine('textureToDestroy', [
+ 'none',
+ 'colorAttachment',
+ 'resolveAttachment',
+ 'depthStencilAttachment',
+ ])
+ )
+ .fn(t => {
+ const { textureToDestroy } = t.params;
+ const { device } = t;
+
+ const colorAttachment = t.trackForCleanup(
+ t.device.createTexture({
+ size: [1, 1, 1],
+ format: 'rgba8unorm',
+ sampleCount: 4,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ })
+ );
+
+ const resolveAttachment = t.trackForCleanup(
+ t.device.createTexture({
+ size: [1, 1, 1],
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ })
+ );
+
+ const depthStencilAttachment = t.trackForCleanup(
+ t.device.createTexture({
+ size: [1, 1, 1],
+ format: 'depth32float',
+ sampleCount: 4,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ })
+ );
+
+ const encoder = device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachment.createView(),
+ resolveTarget: resolveAttachment.createView(),
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ depthStencilAttachment: {
+ view: depthStencilAttachment.createView(),
+ depthClearValue: 0,
+ depthLoadOp: 'clear',
+ depthStoreOp: 'store',
+ },
+ });
+ pass.end();
+ const commandBuffer = encoder.finish();
+
+ switch (textureToDestroy) {
+ case 'none':
+ break;
+ case 'colorAttachment':
+ colorAttachment.destroy();
+ break;
+ case 'resolveAttachment':
+ resolveAttachment.destroy();
+ break;
+ case 'depthStencilAttachment':
+ depthStencilAttachment.destroy();
+ break;
+ default:
+ unreachable();
+ }
+
+ const shouldError = textureToDestroy !== 'none';
+
+ t.expectValidationError(() => {
+ t.queue.submit([commandBuffer]);
+ }, shouldError);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/submit.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/submit.spec.ts
new file mode 100644
index 0000000000..599e2d557b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/submit.spec.ts
@@ -0,0 +1,47 @@
+export const description = `
+Tests submit validation.
+
+Note: destroyed buffer/texture/querySet are tested in destroyed/. (unless it gets moved here)
+Note: buffer map state is tested in ./buffer_mapped.spec.ts.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ValidationTest } from '../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('command_buffer,device_mismatch')
+ .desc(
+ `
+ Tests submit cannot be called with command buffers created from another device
+ Test with two command buffers to make sure all command buffers can be validated:
+ - cb0 and cb1 from same device
+ - cb0 and cb1 from different device
+ `
+ )
+ .paramsSubcasesOnly([
+ { cb0Mismatched: false, cb1Mismatched: false }, // control case
+ { cb0Mismatched: true, cb1Mismatched: false },
+ { cb0Mismatched: false, cb1Mismatched: true },
+ ])
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { cb0Mismatched, cb1Mismatched } = t.params;
+ const mismatched = cb0Mismatched || cb1Mismatched;
+
+ const encoder0 = cb0Mismatched
+ ? t.mismatchedDevice.createCommandEncoder()
+ : t.device.createCommandEncoder();
+ const cb0 = encoder0.finish();
+
+ const encoder1 = cb1Mismatched
+ ? t.mismatchedDevice.createCommandEncoder()
+ : t.device.createCommandEncoder();
+ const cb1 = encoder1.finish();
+
+ t.expectValidationError(() => {
+ t.device.queue.submit([cb0, cb1]);
+ }, mismatched);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/writeBuffer.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/writeBuffer.spec.ts
new file mode 100644
index 0000000000..2871b4bad9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/writeBuffer.spec.ts
@@ -0,0 +1,200 @@
+export const description = `
+Tests writeBuffer validation.
+
+Note: buffer map state is tested in ./buffer_mapped.spec.ts.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import {
+ kTypedArrayBufferViewConstructors,
+ TypedArrayBufferView,
+ TypedArrayBufferViewConstructor,
+} from '../../../../common/util/util.js';
+import { Float16Array } from '../../../../external/petamoriken/float16/float16.js';
+import { GPUConst } from '../../../constants.js';
+import { kResourceStates } from '../../../gpu_test.js';
+import { ValidationTest } from '../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('buffer_state')
+ .desc(
+ `
+ Test that the buffer used for GPUQueue.writeBuffer() must be valid. Tests calling writeBuffer
+ with {valid, invalid, destroyed} buffer.
+ `
+ )
+ .params(u => u.combine('bufferState', kResourceStates))
+ .fn(t => {
+ const { bufferState } = t.params;
+ const buffer = t.createBufferWithState(bufferState, {
+ size: 16,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+ const data = new Uint8Array(16);
+ const _valid = bufferState === 'valid';
+
+ t.expectValidationError(() => {
+ t.device.queue.writeBuffer(buffer, 0, data, 0, data.length);
+ }, !_valid);
+ });
+
+g.test('ranges')
+ .desc(
+ `
+ Tests that the data ranges given to GPUQueue.writeBuffer() are properly validated. Tests calling
+ writeBuffer with both TypedArrays and ArrayBuffers and checks that the data offset and size is
+ interpreted correctly for both.
+ - When passing a TypedArray the data offset and size is given in elements.
+ - When passing an ArrayBuffer the data offset and size is given in bytes.
+
+ Also verifies that the specified data range:
+ - Describes a valid range of the destination buffer and source buffer.
+ - Fits fully within the destination buffer.
+ - Has a byte size which is a multiple of 4.
+ `
+ )
+ .fn(t => {
+ const queue = t.device.queue;
+
+ function runTest(arrayType: TypedArrayBufferViewConstructor, testBuffer: boolean) {
+ const elementSize = arrayType.BYTES_PER_ELEMENT;
+ const bufferSize = 16 * elementSize;
+ const buffer = t.device.createBuffer({
+ size: bufferSize,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+ const arraySm: TypedArrayBufferView | ArrayBuffer = testBuffer
+ ? new arrayType(8).buffer
+ : new arrayType(8);
+ const arrayMd: TypedArrayBufferView | ArrayBuffer = testBuffer
+ ? new arrayType(16).buffer
+ : new arrayType(16);
+ const arrayLg: TypedArrayBufferView | ArrayBuffer = testBuffer
+ ? new arrayType(32).buffer
+ : new arrayType(32);
+
+ if (elementSize < 4) {
+ const array15: TypedArrayBufferView | ArrayBuffer = testBuffer
+ ? new arrayType(15).buffer
+ : new arrayType(15);
+
+ // Writing the full buffer that isn't 4-byte aligned.
+ t.shouldThrow('OperationError', () => queue.writeBuffer(buffer, 0, array15));
+
+ // Writing from an offset that causes source to be 4-byte aligned.
+ queue.writeBuffer(buffer, 0, array15, 3);
+
+ // Writing from an offset that causes the source to not be 4-byte aligned.
+ t.shouldThrow('OperationError', () => queue.writeBuffer(buffer, 0, arrayMd, 3));
+
+ // Writing with a size that is not 4-byte aligned.
+ t.shouldThrow('OperationError', () => queue.writeBuffer(buffer, 0, arraySm, 0, 7));
+ }
+
+ // Writing the full buffer without offsets.
+ queue.writeBuffer(buffer, 0, arraySm);
+ queue.writeBuffer(buffer, 0, arrayMd);
+ t.expectValidationError(() => queue.writeBuffer(buffer, 0, arrayLg));
+
+ // Writing the full buffer with a 4-byte aligned offset.
+ queue.writeBuffer(buffer, 8, arraySm);
+ t.expectValidationError(() => queue.writeBuffer(buffer, 8, arrayMd));
+
+ // Writing the full buffer with a unaligned offset.
+ t.expectValidationError(() => queue.writeBuffer(buffer, 3, arraySm));
+
+ // Writing remainder of buffer from offset.
+ queue.writeBuffer(buffer, 0, arraySm, 4);
+ queue.writeBuffer(buffer, 0, arrayMd, 4);
+ t.expectValidationError(() => queue.writeBuffer(buffer, 0, arrayLg, 4));
+
+ // Writing a larger buffer from an offset that allows it to fit in the destination.
+ queue.writeBuffer(buffer, 0, arrayLg, 16);
+
+ // Writing with both an offset and size.
+ queue.writeBuffer(buffer, 0, arraySm, 4, 4);
+
+ // Writing with a size that extends past the source buffer length.
+ t.shouldThrow('OperationError', () => queue.writeBuffer(buffer, 0, arraySm, 0, 16));
+ t.shouldThrow('OperationError', () => queue.writeBuffer(buffer, 0, arraySm, 4, 8));
+
+ // Writing with a size that is 4-byte aligned but an offset that is not.
+ queue.writeBuffer(buffer, 0, arraySm, 3, 4);
+
+ // Writing zero bytes at the end of the buffer.
+ queue.writeBuffer(buffer, bufferSize, arraySm, 0, 0);
+
+ // Writing with a buffer offset that is out of range of buffer size.
+ t.expectValidationError(() => queue.writeBuffer(buffer, bufferSize + 4, arraySm, 0, 0));
+
+ // Writing zero bytes from the end of the data.
+ queue.writeBuffer(buffer, 0, arraySm, 8, 0);
+
+ // Writing with a data offset that is out of range of data size.
+ t.shouldThrow('OperationError', () => queue.writeBuffer(buffer, 0, arraySm, 9, 0));
+
+ // Writing with a data offset that is out of range of data size with implicit copy size.
+ t.shouldThrow('OperationError', () => queue.writeBuffer(buffer, 0, arraySm, 9, undefined));
+
+ // A data offset of undefined should be treated as 0.
+ queue.writeBuffer(buffer, 0, arraySm, undefined, 8);
+ t.shouldThrow('OperationError', () => queue.writeBuffer(buffer, 0, arraySm, undefined, 12));
+ }
+
+ runTest(Uint8Array, true);
+
+ for (const arrayType of kTypedArrayBufferViewConstructors) {
+ if (arrayType === Float16Array) {
+ // Skip Float16Array since it is supplied by an external module, so there isn't an overload for it.
+ continue;
+ }
+ runTest(arrayType, false);
+ }
+ });
+
+g.test('usages')
+ .desc(
+ `
+ Tests calling writeBuffer with the buffer missed COPY_DST usage.
+ - buffer {with, without} COPY DST usage
+ `
+ )
+ .paramsSubcasesOnly([
+ { usage: GPUConst.BufferUsage.COPY_DST, _valid: true }, // control case
+ { usage: GPUConst.BufferUsage.STORAGE, _valid: false }, // without COPY_DST usage
+ { usage: GPUConst.BufferUsage.STORAGE | GPUConst.BufferUsage.COPY_SRC, _valid: false }, // with other usage
+ { usage: GPUConst.BufferUsage.STORAGE | GPUConst.BufferUsage.COPY_DST, _valid: true }, // with COPY_DST usage
+ ])
+ .fn(t => {
+ const { usage, _valid } = t.params;
+ const buffer = t.device.createBuffer({ size: 16, usage });
+ const data = new Uint8Array(16);
+
+ t.expectValidationError(() => {
+ t.device.queue.writeBuffer(buffer, 0, data, 0, data.length);
+ }, !_valid);
+ });
+
+g.test('buffer,device_mismatch')
+ .desc('Tests writeBuffer cannot be called with a buffer created from another device.')
+ .paramsSubcasesOnly(u => u.combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { mismatched } = t.params;
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const buffer = sourceDevice.createBuffer({
+ size: 16,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+ t.trackForCleanup(buffer);
+
+ const data = new Uint8Array(16);
+
+ t.expectValidationError(() => {
+ t.device.queue.writeBuffer(buffer, 0, data, 0, data.length);
+ }, mismatched);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/writeTexture.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/writeTexture.spec.ts
new file mode 100644
index 0000000000..6cabfadebd
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/queue/writeTexture.spec.ts
@@ -0,0 +1,110 @@
+export const description = `Tests writeTexture validation.`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUConst } from '../../../constants.js';
+import { kResourceStates } from '../../../gpu_test.js';
+import { ValidationTest } from '../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('texture_state')
+ .desc(
+ `
+ Test that the texture used for GPUQueue.writeTexture() must be valid. Tests calling writeTexture
+ with {valid, invalid, destroyed} texture.
+ `
+ )
+ .params(u => u.combine('textureState', kResourceStates))
+ .fn(t => {
+ const { textureState } = t.params;
+ const texture = t.createTextureWithState(textureState);
+ const data = new Uint8Array(16);
+ const size = [1, 1];
+
+ const isValid = textureState === 'valid';
+
+ t.expectValidationError(() => {
+ t.device.queue.writeTexture({ texture }, data, {}, size);
+ }, !isValid);
+ });
+
+g.test('usages')
+ .desc(
+ `
+ Tests calling writeTexture with the texture missed COPY_DST usage.
+ - texture {with, without} COPY DST usage
+ `
+ )
+ .paramsSubcasesOnly([
+ { usage: GPUConst.TextureUsage.COPY_DST }, // control case
+ { usage: GPUConst.TextureUsage.STORAGE_BINDING },
+ { usage: GPUConst.TextureUsage.STORAGE_BINDING | GPUConst.TextureUsage.COPY_SRC },
+ { usage: GPUConst.TextureUsage.STORAGE_BINDING | GPUConst.TextureUsage.COPY_DST },
+ ])
+ .fn(t => {
+ const { usage } = t.params;
+ const texture = t.device.createTexture({
+ size: { width: 16, height: 16 },
+ usage,
+ format: 'rgba8unorm' as const,
+ });
+ const data = new Uint8Array(16);
+ const size = [1, 1];
+
+ const isValid = usage & GPUConst.TextureUsage.COPY_DST ? true : false;
+ t.expectValidationError(() => {
+ t.device.queue.writeTexture({ texture }, data, {}, size);
+ }, !isValid);
+ });
+
+g.test('sample_count')
+ .desc(
+ `
+ Test that the texture sample count. Check that a validation error is generated if sample count is
+ not 1.
+ `
+ )
+ .params(u => u.combine('sampleCount', [1, 4]))
+ .fn(t => {
+ const { sampleCount } = t.params;
+ const texture = t.device.createTexture({
+ size: { width: 16, height: 16 },
+ sampleCount,
+ format: 'bgra8unorm',
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const data = new Uint8Array(16);
+ const size = [1, 1];
+
+ const isValid = sampleCount === 1;
+
+ t.expectValidationError(() => {
+ t.device.queue.writeTexture({ texture }, data, {}, size);
+ }, !isValid);
+ });
+
+g.test('texture,device_mismatch')
+ .desc('Tests writeTexture cannot be called with a texture created from another device.')
+ .paramsSubcasesOnly(u => u.combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { mismatched } = t.params;
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const texture = sourceDevice.createTexture({
+ size: { width: 16, height: 16 },
+ format: 'bgra8unorm',
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ t.trackForCleanup(texture);
+
+ const data = new Uint8Array(16);
+ const size = [1, 1];
+
+ t.expectValidationError(() => {
+ t.device.queue.writeTexture({ texture }, data, {}, size);
+ }, mismatched);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pass/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pass/README.txt
new file mode 100644
index 0000000000..a5797c2b63
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pass/README.txt
@@ -0,0 +1 @@
+Render pass stuff other than commands (which are in encoding/cmds/).
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pass/attachment_compatibility.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pass/attachment_compatibility.spec.ts
new file mode 100644
index 0000000000..c0ab23b91c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pass/attachment_compatibility.spec.ts
@@ -0,0 +1,690 @@
+export const description = `
+Validation for attachment compatibility between render passes, bundles, and pipelines
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { range } from '../../../../common/util/util.js';
+import { kMaxColorAttachmentsToTest, kTextureSampleCounts } from '../../../capability_info.js';
+import {
+ kRegularTextureFormats,
+ kSizedDepthStencilFormats,
+ kUnsizedDepthStencilFormats,
+ kTextureFormatInfo,
+ filterFormatsByFeature,
+ getFeaturesForFormats,
+} from '../../../format_info.js';
+import { ValidationTest } from '../validation_test.js';
+
+const kColorAttachmentCounts = range(kMaxColorAttachmentsToTest, i => i + 1);
+const kColorAttachments = kColorAttachmentCounts
+ .map(count => {
+ // generate cases with 0..1 null attachments at different location
+ // e.g. count == 2
+ // [
+ // [1, 1],
+ // [0, 1],
+ // [1, 0],
+ // ]
+ // 0 (false) means null attachment, 1 (true) means non-null attachment, at the slot
+
+ // Special cases: we need at least a color attachment, when we don't have depth stencil attachment
+ if (count === 1) {
+ return [[1]];
+ }
+ if (count === 2) {
+ return [
+ [1, 1],
+ [0, 1],
+ [1, 0],
+ ];
+ }
+
+ // [1, 1, ..., 1]: all color attachment are used
+ let result = [new Array<boolean>(count).fill(true)];
+
+ // [1, 0, 1, ..., 1]: generate cases with one null attachment at different locations
+ result = result.concat(
+ range(count, i => {
+ const r = new Array<boolean>(count).fill(true);
+ r[i] = false;
+ return r;
+ })
+ );
+
+ // [1, 0, 1, ..., 0, 1]: generate cases with two null attachments at different locations
+ // To reduce test run time, limit the attachment count to <= 4
+ if (count <= 4) {
+ result = result.concat(
+ range(count - 1, i => {
+ const cases = [] as boolean[][];
+ for (let j = i + 1; j < count; j++) {
+ const r = new Array<boolean>(count).fill(true);
+ r[i] = false;
+ r[j] = false;
+ cases.push(r);
+ }
+ return cases;
+ }).flat()
+ );
+ }
+
+ return result;
+ })
+ .flat() as boolean[][];
+
+const kDepthStencilAttachmentFormats = [
+ undefined,
+ ...kSizedDepthStencilFormats,
+ ...kUnsizedDepthStencilFormats,
+] as const;
+
+const kFeaturesForDepthStencilAttachmentFormats = getFeaturesForFormats([
+ ...kSizedDepthStencilFormats,
+ ...kUnsizedDepthStencilFormats,
+]);
+
+class F extends ValidationTest {
+ createAttachmentTextureView(format: GPUTextureFormat, sampleCount?: number) {
+ return this.device
+ .createTexture({
+ // Size matching the "arbitrary" size used by ValidationTest helpers.
+ size: [16, 16, 1],
+ format,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ sampleCount,
+ })
+ .createView();
+ }
+
+ createColorAttachment(
+ format: GPUTextureFormat | null,
+ sampleCount?: number
+ ): GPURenderPassColorAttachment | null {
+ return format === null
+ ? null
+ : {
+ view: this.createAttachmentTextureView(format, sampleCount),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ };
+ }
+
+ createDepthAttachment(
+ format: GPUTextureFormat,
+ sampleCount?: number
+ ): GPURenderPassDepthStencilAttachment {
+ const attachment: GPURenderPassDepthStencilAttachment = {
+ view: this.createAttachmentTextureView(format, sampleCount),
+ };
+ if (kTextureFormatInfo[format].depth) {
+ attachment.depthClearValue = 0;
+ attachment.depthLoadOp = 'clear';
+ attachment.depthStoreOp = 'discard';
+ }
+ if (kTextureFormatInfo[format].stencil) {
+ attachment.stencilClearValue = 1;
+ attachment.stencilLoadOp = 'clear';
+ attachment.stencilStoreOp = 'discard';
+ }
+ return attachment;
+ }
+
+ createRenderPipeline(
+ targets: Iterable<GPUColorTargetState | null>,
+ depthStencil?: GPUDepthStencilState,
+ sampleCount?: number,
+ cullMode?: GPUCullMode
+ ) {
+ return this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: this.device.createShaderModule({
+ code: `
+ @vertex fn main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: this.device.createShaderModule({
+ code: '@fragment fn main() {}',
+ }),
+ entryPoint: 'main',
+ targets,
+ },
+ primitive: { topology: 'triangle-list', cullMode },
+ depthStencil,
+ multisample: { count: sampleCount },
+ });
+ }
+}
+
+export const g = makeTestGroup(F);
+
+const kColorAttachmentFormats = kRegularTextureFormats.filter(
+ format => !!kTextureFormatInfo[format].colorRender
+);
+
+g.test('render_pass_and_bundle,color_format')
+ .desc('Test that color attachment formats in render passes and bundles must match.')
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('passFormat', kColorAttachmentFormats)
+ .combine('bundleFormat', kColorAttachmentFormats)
+ )
+ .fn(t => {
+ const { passFormat, bundleFormat } = t.params;
+
+ t.skipIfTextureFormatNotSupported(passFormat, bundleFormat);
+
+ const bundleEncoder = t.device.createRenderBundleEncoder({
+ colorFormats: [bundleFormat],
+ });
+ const bundle = bundleEncoder.finish();
+
+ const { encoder, validateFinishAndSubmit } = t.createEncoder('non-pass');
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [t.createColorAttachment(passFormat)],
+ });
+ pass.executeBundles([bundle]);
+ pass.end();
+ validateFinishAndSubmit(passFormat === bundleFormat, true);
+ });
+
+g.test('render_pass_and_bundle,color_count')
+ .desc(
+ `
+ Test that the number of color attachments in render passes and bundles must match.
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('passCount', kColorAttachmentCounts)
+ .combine('bundleCount', kColorAttachmentCounts)
+ )
+ .fn(t => {
+ const { passCount, bundleCount } = t.params;
+
+ const { maxColorAttachments } = t.device.limits;
+ t.skipIf(
+ passCount > maxColorAttachments,
+ `passCount: ${passCount} > maxColorAttachments for device: ${maxColorAttachments}`
+ );
+ t.skipIf(
+ bundleCount > maxColorAttachments,
+ `bundleCount: ${bundleCount} > maxColorAttachments for device: ${maxColorAttachments}`
+ );
+
+ const bundleEncoder = t.device.createRenderBundleEncoder({
+ colorFormats: range(bundleCount, () => 'rgba8uint'),
+ });
+ const bundle = bundleEncoder.finish();
+
+ const { encoder, validateFinishAndSubmit } = t.createEncoder('non-pass');
+ const pass = encoder.beginRenderPass({
+ colorAttachments: range(passCount, () => t.createColorAttachment('rgba8uint')),
+ });
+ pass.executeBundles([bundle]);
+ pass.end();
+ validateFinishAndSubmit(passCount === bundleCount, true);
+ });
+
+g.test('render_pass_and_bundle,color_sparse')
+ .desc(
+ `
+ Test that each of color attachments in render passes and bundles must match.
+ `
+ )
+ .params(u =>
+ u //
+ // introduce attachmentCount to make it easier to split the test
+ .combine('attachmentCount', kColorAttachmentCounts)
+ .beginSubcases()
+ .combine('passAttachments', kColorAttachments)
+ .combine('bundleAttachments', kColorAttachments)
+ .filter(
+ p =>
+ p.attachmentCount === p.passAttachments.length &&
+ p.attachmentCount === p.bundleAttachments.length
+ )
+ )
+ .fn(t => {
+ const { passAttachments, bundleAttachments } = t.params;
+
+ const { maxColorAttachments } = t.device.limits;
+ t.skipIf(
+ passAttachments.length > maxColorAttachments,
+ `num passAttachments: ${passAttachments.length} > maxColorAttachments for device: ${maxColorAttachments}`
+ );
+ t.skipIf(
+ bundleAttachments.length > maxColorAttachments,
+ `num bundleAttachments: ${bundleAttachments.length} > maxColorAttachments for device: ${maxColorAttachments}`
+ );
+
+ const colorFormats = bundleAttachments.map(i => (i ? 'rgba8uint' : null));
+ const bundleEncoder = t.device.createRenderBundleEncoder({
+ colorFormats,
+ });
+ const bundle = bundleEncoder.finish();
+
+ const { encoder, validateFinishAndSubmit } = t.createEncoder('non-pass');
+ const colorAttachments = passAttachments.map(i =>
+ t.createColorAttachment(i ? 'rgba8uint' : null)
+ );
+ const pass = encoder.beginRenderPass({
+ colorAttachments,
+ });
+ pass.executeBundles([bundle]);
+ pass.end();
+ validateFinishAndSubmit(
+ passAttachments.every((v, i) => v === bundleAttachments[i]),
+ true
+ );
+ });
+
+g.test('render_pass_and_bundle,depth_format')
+ .desc('Test that the depth attachment format in render passes and bundles must match.')
+ .params(u =>
+ u //
+ .combine('passFeature', kFeaturesForDepthStencilAttachmentFormats)
+ .combine('bundleFeature', kFeaturesForDepthStencilAttachmentFormats)
+ .beginSubcases()
+ .expand('passFormat', ({ passFeature }) =>
+ filterFormatsByFeature(passFeature, kDepthStencilAttachmentFormats)
+ )
+ .expand('bundleFormat', ({ bundleFeature }) =>
+ filterFormatsByFeature(bundleFeature, kDepthStencilAttachmentFormats)
+ )
+ )
+ .beforeAllSubcases(t => {
+ const { passFeature, bundleFeature } = t.params;
+ t.selectDeviceOrSkipTestCase([passFeature, bundleFeature]);
+ })
+ .fn(t => {
+ const { passFormat, bundleFormat } = t.params;
+
+ const bundleEncoder = t.device.createRenderBundleEncoder({
+ colorFormats: ['rgba8unorm'],
+ depthStencilFormat: bundleFormat,
+ });
+ const bundle = bundleEncoder.finish();
+
+ const { encoder, validateFinishAndSubmit } = t.createEncoder('non-pass');
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [t.createColorAttachment('rgba8unorm')],
+ depthStencilAttachment:
+ passFormat !== undefined ? t.createDepthAttachment(passFormat) : undefined,
+ });
+ pass.executeBundles([bundle]);
+ pass.end();
+ validateFinishAndSubmit(passFormat === bundleFormat, true);
+ });
+
+g.test('render_pass_and_bundle,sample_count')
+ .desc('Test that the sample count in render passes and bundles must match.')
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('renderSampleCount', kTextureSampleCounts)
+ .combine('bundleSampleCount', kTextureSampleCounts)
+ )
+ .fn(t => {
+ const { renderSampleCount, bundleSampleCount } = t.params;
+ const bundleEncoder = t.device.createRenderBundleEncoder({
+ colorFormats: ['rgba8unorm'],
+ sampleCount: bundleSampleCount,
+ });
+ const bundle = bundleEncoder.finish();
+ const { encoder, validateFinishAndSubmit } = t.createEncoder('non-pass');
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [t.createColorAttachment('rgba8unorm', renderSampleCount)],
+ });
+ pass.executeBundles([bundle]);
+ pass.end();
+ validateFinishAndSubmit(renderSampleCount === bundleSampleCount, true);
+ });
+
+g.test('render_pass_and_bundle,device_mismatch')
+ .desc('Test that render passes cannot be called with bundles created from another device.')
+ .paramsSubcasesOnly(u => u.combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { mismatched } = t.params;
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const format = 'r16float';
+ const bundleEncoder = sourceDevice.createRenderBundleEncoder({
+ colorFormats: [format],
+ });
+ const bundle = bundleEncoder.finish();
+
+ const { encoder, validateFinishAndSubmit } = t.createEncoder('non-pass');
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [t.createColorAttachment(format)],
+ });
+ pass.executeBundles([bundle]);
+ pass.end();
+ validateFinishAndSubmit(!mismatched, true);
+ });
+
+g.test('render_pass_or_bundle_and_pipeline,color_format')
+ .desc(
+ `
+Test that color attachment formats in render passes or bundles match the pipeline color format.
+`
+ )
+ .params(u =>
+ u
+ .combine('encoderType', ['render pass', 'render bundle'] as const)
+ .beginSubcases()
+ .combine('encoderFormat', kColorAttachmentFormats)
+ .combine('pipelineFormat', kColorAttachmentFormats)
+ )
+ .fn(t => {
+ const { encoderType, encoderFormat, pipelineFormat } = t.params;
+
+ t.skipIfTextureFormatNotSupported(encoderFormat, pipelineFormat);
+
+ const pipeline = t.createRenderPipeline([{ format: pipelineFormat, writeMask: 0 }]);
+
+ const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType, {
+ attachmentInfo: { colorFormats: [encoderFormat] },
+ });
+ encoder.setPipeline(pipeline);
+ validateFinishAndSubmit(encoderFormat === pipelineFormat, true);
+ });
+
+g.test('render_pass_or_bundle_and_pipeline,color_count')
+ .desc(
+ `
+Test that the number of color attachments in render passes or bundles match the pipeline color
+count.
+`
+ )
+ .params(u =>
+ u
+ .combine('encoderType', ['render pass', 'render bundle'] as const)
+ .beginSubcases()
+ .combine('encoderCount', kColorAttachmentCounts)
+ .combine('pipelineCount', kColorAttachmentCounts)
+ )
+ .fn(t => {
+ const { encoderType, encoderCount, pipelineCount } = t.params;
+
+ const { maxColorAttachments } = t.device.limits;
+ t.skipIf(
+ pipelineCount > maxColorAttachments,
+ `pipelineCount: ${pipelineCount} > maxColorAttachments for device: ${maxColorAttachments}`
+ );
+ t.skipIf(
+ encoderCount > maxColorAttachments,
+ `encoderCount: ${encoderCount} > maxColorAttachments for device: ${maxColorAttachments}`
+ );
+
+ const pipeline = t.createRenderPipeline(
+ range(pipelineCount, () => ({ format: 'rgba8uint', writeMask: 0 }))
+ );
+
+ const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType, {
+ attachmentInfo: { colorFormats: range(encoderCount, () => 'rgba8uint') },
+ });
+ encoder.setPipeline(pipeline);
+ validateFinishAndSubmit(encoderCount === pipelineCount, true);
+ });
+
+g.test('render_pass_or_bundle_and_pipeline,color_sparse')
+ .desc(
+ `
+Test that each of color attachments in render passes or bundles match that of the pipeline.
+`
+ )
+ .params(u =>
+ u
+ .combine('encoderType', ['render pass', 'render bundle'] as const)
+ // introduce attachmentCount to make it easier to split the test
+ .combine('attachmentCount', kColorAttachmentCounts)
+ .beginSubcases()
+ .combine('encoderAttachments', kColorAttachments)
+ .combine('pipelineAttachments', kColorAttachments)
+ .filter(
+ p =>
+ p.attachmentCount === p.encoderAttachments.length &&
+ p.attachmentCount === p.pipelineAttachments.length
+ )
+ )
+ .fn(t => {
+ const { encoderType, encoderAttachments, pipelineAttachments } = t.params;
+ const { maxColorAttachments } = t.device.limits;
+ t.skipIf(
+ encoderAttachments.length > maxColorAttachments,
+ `num encoderAttachments: ${encoderAttachments.length} > maxColorAttachments for device: ${maxColorAttachments}`
+ );
+ t.skipIf(
+ pipelineAttachments.length > maxColorAttachments,
+ `num pipelineAttachments: ${pipelineAttachments.length} > maxColorAttachments for device: ${maxColorAttachments}`
+ );
+
+ const colorTargets = pipelineAttachments.map(i =>
+ i ? ({ format: 'rgba8uint', writeMask: 0 } as GPUColorTargetState) : null
+ );
+ const pipeline = t.createRenderPipeline(colorTargets);
+
+ const colorFormats = encoderAttachments.map(i => (i ? 'rgba8uint' : null));
+ const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType, {
+ attachmentInfo: { colorFormats },
+ });
+ encoder.setPipeline(pipeline);
+ validateFinishAndSubmit(
+ encoderAttachments.every((v, i) => v === pipelineAttachments[i]),
+ true
+ );
+ });
+
+g.test('render_pass_or_bundle_and_pipeline,depth_format')
+ .desc(
+ `
+Test that the depth attachment format in render passes or bundles match the pipeline depth format.
+`
+ )
+ .params(u =>
+ u
+ .combine('encoderType', ['render pass', 'render bundle'] as const)
+ .combine('encoderFormatFeature', kFeaturesForDepthStencilAttachmentFormats)
+ .combine('pipelineFormatFeature', kFeaturesForDepthStencilAttachmentFormats)
+ .beginSubcases()
+ .expand('encoderFormat', ({ encoderFormatFeature }) =>
+ filterFormatsByFeature(encoderFormatFeature, kDepthStencilAttachmentFormats)
+ )
+ .expand('pipelineFormat', ({ pipelineFormatFeature }) =>
+ filterFormatsByFeature(pipelineFormatFeature, kDepthStencilAttachmentFormats)
+ )
+ )
+ .beforeAllSubcases(t => {
+ const { encoderFormatFeature, pipelineFormatFeature } = t.params;
+ t.selectDeviceOrSkipTestCase([encoderFormatFeature, pipelineFormatFeature]);
+ })
+ .fn(t => {
+ const { encoderType, encoderFormat, pipelineFormat } = t.params;
+
+ const pipeline = t.createRenderPipeline(
+ [{ format: 'rgba8unorm', writeMask: 0 }],
+ pipelineFormat !== undefined
+ ? { format: pipelineFormat, depthCompare: 'always', depthWriteEnabled: false }
+ : undefined
+ );
+
+ const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType, {
+ attachmentInfo: { colorFormats: ['rgba8unorm'], depthStencilFormat: encoderFormat },
+ });
+ encoder.setPipeline(pipeline);
+ validateFinishAndSubmit(encoderFormat === pipelineFormat, true);
+ });
+
+const kStencilFaceStates = [
+ { failOp: 'keep', depthFailOp: 'keep', passOp: 'keep' },
+ { failOp: 'zero', depthFailOp: 'zero', passOp: 'zero' },
+] as GPUStencilFaceState[];
+
+g.test('render_pass_or_bundle_and_pipeline,depth_stencil_read_only_write_state')
+ .desc(
+ `
+Test that the depth stencil read only state in render passes or bundles is compatible with the depth stencil write state of the pipeline.
+`
+ )
+ .params(u =>
+ u
+ .combine('encoderType', ['render pass', 'render bundle'] as const)
+ .combine('format', kDepthStencilAttachmentFormats)
+ .beginSubcases()
+ // pass/bundle state
+ .combine('depthReadOnly', [false, true])
+ .combine('stencilReadOnly', [false, true])
+ .combine('stencilFront', kStencilFaceStates)
+ .combine('stencilBack', kStencilFaceStates)
+ // pipeline state
+ .combine('depthWriteEnabled', [false, true])
+ .combine('stencilWriteMask', [0, 0xffffffff])
+ .combine('cullMode', ['none', 'front', 'back'] as const)
+ .filter(p => {
+ if (p.format) {
+ const depthStencilInfo = kTextureFormatInfo[p.format];
+ // For combined depth/stencil formats the depth and stencil read only state must match
+ // in order to create a valid render bundle or render pass.
+ if (depthStencilInfo.depth && depthStencilInfo.stencil) {
+ if (p.depthReadOnly !== p.stencilReadOnly) {
+ return false;
+ }
+ }
+ // If the format has no depth aspect, the depthReadOnly, depthWriteEnabled of the pipeline must not be true
+ // in order to create a valid render pipeline.
+ if (!depthStencilInfo.depth && p.depthWriteEnabled) {
+ return false;
+ }
+ // If the format has no stencil aspect, the stencil state operation must be 'keep'
+ // in order to create a valid render pipeline.
+ if (
+ !depthStencilInfo.stencil &&
+ (p.stencilFront.failOp !== 'keep' || p.stencilBack.failOp !== 'keep')
+ ) {
+ return false;
+ }
+ }
+ // No depthStencil attachment
+ return true;
+ })
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceForTextureFormatOrSkipTestCase(t.params.format);
+ })
+ .fn(t => {
+ const {
+ encoderType,
+ format,
+ depthReadOnly,
+ stencilReadOnly,
+ depthWriteEnabled,
+ stencilWriteMask,
+ cullMode,
+ stencilFront,
+ stencilBack,
+ } = t.params;
+
+ const pipeline = t.createRenderPipeline(
+ [{ format: 'rgba8unorm', writeMask: 0 }],
+ format === undefined
+ ? undefined
+ : {
+ format,
+ depthWriteEnabled,
+ depthCompare: 'always',
+ stencilWriteMask,
+ stencilFront,
+ stencilBack,
+ },
+ 1,
+ cullMode
+ );
+
+ const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType, {
+ attachmentInfo: {
+ colorFormats: ['rgba8unorm'],
+ depthStencilFormat: format,
+ depthReadOnly,
+ stencilReadOnly,
+ },
+ });
+ encoder.setPipeline(pipeline);
+
+ let writesDepth = false;
+ let writesStencil = false;
+ if (format) {
+ writesDepth = depthWriteEnabled;
+ if (stencilWriteMask !== 0) {
+ if (
+ cullMode !== 'front' &&
+ (stencilFront.passOp !== 'keep' ||
+ stencilFront.depthFailOp !== 'keep' ||
+ stencilFront.failOp !== 'keep')
+ ) {
+ writesStencil = true;
+ }
+ if (
+ cullMode !== 'back' &&
+ (stencilBack.passOp !== 'keep' ||
+ stencilBack.depthFailOp !== 'keep' ||
+ stencilBack.failOp !== 'keep')
+ ) {
+ writesStencil = true;
+ }
+ }
+ }
+
+ let isValid = true;
+ if (writesDepth) {
+ isValid &&= !depthReadOnly;
+ }
+ if (writesStencil) {
+ isValid &&= !stencilReadOnly;
+ }
+
+ validateFinishAndSubmit(isValid, true);
+ });
+
+g.test('render_pass_or_bundle_and_pipeline,sample_count')
+ .desc(
+ `
+Test that the sample count in render passes or bundles match the pipeline sample count for both color texture and depthstencil texture.
+`
+ )
+ .params(u =>
+ u
+ .combine('encoderType', ['render pass', 'render bundle'] as const)
+ .combine('attachmentType', ['color', 'depthstencil'] as const)
+ .beginSubcases()
+ .combine('encoderSampleCount', kTextureSampleCounts)
+ .combine('pipelineSampleCount', kTextureSampleCounts)
+ )
+ .fn(t => {
+ const { encoderType, attachmentType, encoderSampleCount, pipelineSampleCount } = t.params;
+
+ const colorFormats = attachmentType === 'color' ? ['rgba8unorm' as const] : [];
+ const depthStencilFormat =
+ attachmentType === 'depthstencil' ? ('depth24plus-stencil8' as const) : undefined;
+
+ const pipeline = t.createRenderPipeline(
+ colorFormats.map(format => ({ format, writeMask: 0 })),
+ depthStencilFormat
+ ? { format: depthStencilFormat, depthWriteEnabled: false, depthCompare: 'always' }
+ : undefined,
+ pipelineSampleCount
+ );
+
+ const { encoder, validateFinishAndSubmit } = t.createEncoder(encoderType, {
+ attachmentInfo: { colorFormats, depthStencilFormat, sampleCount: encoderSampleCount },
+ });
+ encoder.setPipeline(pipeline);
+ validateFinishAndSubmit(encoderSampleCount === pipelineSampleCount, true);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pass/render_pass_descriptor.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pass/render_pass_descriptor.spec.ts
new file mode 100644
index 0000000000..9713beea52
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pass/render_pass_descriptor.spec.ts
@@ -0,0 +1,1097 @@
+export const description = `
+render pass descriptor validation tests.
+
+TODO: review for completeness
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { range } from '../../../../common/util/util.js';
+import { kMaxColorAttachmentsToTest, kQueryTypes } from '../../../capability_info.js';
+import { GPUConst } from '../../../constants.js';
+import {
+ computeBytesPerSampleFromFormats,
+ kDepthStencilFormats,
+ kRenderableColorTextureFormats,
+ kTextureFormatInfo,
+} from '../../../format_info.js';
+import { ValidationTest } from '../validation_test.js';
+
+class F extends ValidationTest {
+ createTexture(
+ options: {
+ format?: GPUTextureFormat;
+ width?: number;
+ height?: number;
+ arrayLayerCount?: number;
+ mipLevelCount?: number;
+ sampleCount?: number;
+ usage?: GPUTextureUsageFlags;
+ } = {}
+ ): GPUTexture {
+ const {
+ format = 'rgba8unorm',
+ width = 16,
+ height = 16,
+ arrayLayerCount = 1,
+ mipLevelCount = 1,
+ sampleCount = 1,
+ usage = GPUTextureUsage.RENDER_ATTACHMENT,
+ } = options;
+
+ return this.device.createTexture({
+ size: { width, height, depthOrArrayLayers: arrayLayerCount },
+ format,
+ mipLevelCount,
+ sampleCount,
+ usage,
+ });
+ }
+
+ getColorAttachment(
+ texture: GPUTexture,
+ textureViewDescriptor?: GPUTextureViewDescriptor
+ ): GPURenderPassColorAttachment {
+ const view = texture.createView(textureViewDescriptor);
+
+ return {
+ view,
+ clearValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ };
+ }
+
+ getDepthStencilAttachment(
+ texture: GPUTexture,
+ textureViewDescriptor?: GPUTextureViewDescriptor
+ ): GPURenderPassDepthStencilAttachment {
+ const view = texture.createView(textureViewDescriptor);
+
+ return {
+ view,
+ depthClearValue: 1.0,
+ depthLoadOp: 'clear',
+ depthStoreOp: 'store',
+ stencilClearValue: 0,
+ stencilLoadOp: 'clear',
+ stencilStoreOp: 'store',
+ };
+ }
+
+ tryRenderPass(success: boolean, descriptor: GPURenderPassDescriptor): void {
+ const commandEncoder = this.device.createCommandEncoder();
+ const renderPass = commandEncoder.beginRenderPass(descriptor);
+ renderPass.end();
+
+ this.expectValidationError(() => {
+ commandEncoder.finish();
+ }, !success);
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('attachments,one_color_attachment')
+ .desc(`Test that a render pass works with only one color attachment.`)
+ .fn(t => {
+ const colorTexture = t.createTexture({ format: 'rgba8unorm' });
+ const descriptor = {
+ colorAttachments: [t.getColorAttachment(colorTexture)],
+ };
+
+ t.tryRenderPass(true, descriptor);
+ });
+
+g.test('attachments,one_depth_stencil_attachment')
+ .desc(`Test that a render pass works with only one depthStencil attachment.`)
+ .fn(t => {
+ const depthStencilTexture = t.createTexture({ format: 'depth24plus-stencil8' });
+ const descriptor = {
+ colorAttachments: [],
+ depthStencilAttachment: t.getDepthStencilAttachment(depthStencilTexture),
+ };
+
+ t.tryRenderPass(true, descriptor);
+ });
+
+g.test('color_attachments,empty')
+ .desc(
+ `
+ Test that when colorAttachments has all values be 'undefined' or the sequence is empty, the
+ depthStencilAttachment must not be 'undefined'.
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('unclampedColorAttachments', [
+ [],
+ [undefined],
+ [undefined, undefined],
+ new Array(8).fill(undefined),
+ [{ format: 'rgba8unorm' }],
+ ])
+ .combine('hasDepthStencilAttachment', [false, true])
+ )
+ .fn(t => {
+ const { unclampedColorAttachments, hasDepthStencilAttachment } = t.params;
+ const colorAttachments = unclampedColorAttachments.slice(
+ 0,
+ t.device.limits.maxColorAttachments
+ );
+
+ let isEmptyColorTargets = true;
+ for (let i = 0; i < colorAttachments.length; i++) {
+ if (colorAttachments[i] !== undefined) {
+ isEmptyColorTargets = false;
+ const colorTexture = t.createTexture();
+ colorAttachments[i] = t.getColorAttachment(colorTexture);
+ }
+ }
+
+ const _success = !isEmptyColorTargets || hasDepthStencilAttachment;
+ t.tryRenderPass(_success, {
+ colorAttachments,
+ depthStencilAttachment: hasDepthStencilAttachment
+ ? t.getDepthStencilAttachment(t.createTexture({ format: 'depth24plus-stencil8' }))
+ : undefined,
+ });
+ });
+
+g.test('color_attachments,limits,maxColorAttachments')
+ .desc(
+ `
+ Test that the out of bound of color attachment indexes are handled.
+ - a validation error is generated when color attachments exceed the maximum limit(8).
+ `
+ )
+ .paramsSimple([
+ { colorAttachmentsCountVariant: { mult: 1, add: 0 }, _success: true }, // Control case
+ { colorAttachmentsCountVariant: { mult: 1, add: 1 }, _success: false }, // Out of bounds
+ ])
+ .fn(t => {
+ const { colorAttachmentsCountVariant, _success } = t.params;
+ const colorAttachmentsCount = t.makeLimitVariant(
+ 'maxColorAttachments',
+ colorAttachmentsCountVariant
+ );
+
+ const colorAttachments = [];
+ for (let i = 0; i < colorAttachmentsCount; i++) {
+ const colorTexture = t.createTexture({ format: 'r8unorm' });
+ colorAttachments.push(t.getColorAttachment(colorTexture));
+ }
+
+ t.tryRenderPass(_success, { colorAttachments });
+ });
+
+g.test('color_attachments,limits,maxColorAttachmentBytesPerSample,aligned')
+ .desc(
+ `
+ Test that the total bytes per sample of the formats of the color attachments must be no greater
+ than maxColorAttachmentBytesPerSample when the components are aligned (same format).
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kRenderableColorTextureFormats)
+ .beginSubcases()
+ .combine(
+ 'attachmentCount',
+ range(kMaxColorAttachmentsToTest, i => i + 1)
+ )
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ })
+ .fn(t => {
+ const { format, attachmentCount } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ t.skipIf(
+ attachmentCount > t.device.limits.maxColorAttachments,
+ `attachmentCount: ${attachmentCount} > maxColorAttachments: ${t.device.limits.maxColorAttachments}`
+ );
+
+ const colorAttachments = [];
+ for (let i = 0; i < attachmentCount; i++) {
+ const colorTexture = t.createTexture({ format });
+ colorAttachments.push(t.getColorAttachment(colorTexture));
+ }
+ const shouldError =
+ info.colorRender === undefined ||
+ computeBytesPerSampleFromFormats(range(attachmentCount, () => format)) >
+ t.device.limits.maxColorAttachmentBytesPerSample;
+
+ t.tryRenderPass(!shouldError, { colorAttachments });
+ });
+
+g.test('color_attachments,limits,maxColorAttachmentBytesPerSample,unaligned')
+ .desc(
+ `
+ Test that the total bytes per sample of the formats of the color attachments must be no greater
+ than maxColorAttachmentBytesPerSample when the components are (potentially) unaligned.
+ `
+ )
+ .params(u =>
+ u.combineWithParams([
+ // Alignment causes the first 1 byte R8Unorm to become 4 bytes. So even though
+ // 1+4+8+16+1 < 32, the 4 byte alignment requirement of R32Float makes the first R8Unorm
+ // become 4 and 4+4+8+16+1 > 32. Re-ordering this so the R8Unorm's are at the end, however
+ // is allowed: 4+8+16+1+1 < 32.
+ {
+ formats: [
+ 'r8unorm',
+ 'r32float',
+ 'rgba8unorm',
+ 'rgba32float',
+ 'r8unorm',
+ ] as GPUTextureFormat[],
+ },
+ {
+ formats: [
+ 'r32float',
+ 'rgba8unorm',
+ 'rgba32float',
+ 'r8unorm',
+ 'r8unorm',
+ ] as GPUTextureFormat[],
+ },
+ ])
+ )
+ .fn(t => {
+ const { formats } = t.params;
+
+ t.skipIf(
+ formats.length > t.device.limits.maxColorAttachments,
+ `numColorAttachments: ${formats.length} > maxColorAttachments: ${t.device.limits.maxColorAttachments}`
+ );
+
+ const colorAttachments = [];
+ for (const format of formats) {
+ const colorTexture = t.createTexture({ format });
+ colorAttachments.push(t.getColorAttachment(colorTexture));
+ }
+
+ const success =
+ computeBytesPerSampleFromFormats(formats) <= t.device.limits.maxColorAttachmentBytesPerSample;
+
+ t.tryRenderPass(success, { colorAttachments });
+ });
+
+g.test('attachments,same_size')
+ .desc(
+ `
+ Test that attachments have the same size. Otherwise, a validation error should be generated.
+ - Succeed if all attachments have the same size.
+ - Fail if one of the color attachments has a different size.
+ - Fail if the depth stencil attachment has a different size.
+ `
+ )
+ .fn(t => {
+ const colorTexture1x1A = t.createTexture({ width: 1, height: 1, format: 'rgba8unorm' });
+ const colorTexture1x1B = t.createTexture({ width: 1, height: 1, format: 'rgba8unorm' });
+ const colorTexture2x2 = t.createTexture({ width: 2, height: 2, format: 'rgba8unorm' });
+ const depthStencilTexture1x1 = t.createTexture({
+ width: 1,
+ height: 1,
+ format: 'depth24plus-stencil8',
+ });
+ const depthStencilTexture2x2 = t.createTexture({
+ width: 2,
+ height: 2,
+ format: 'depth24plus-stencil8',
+ });
+
+ {
+ // Control case: all the same size (1x1)
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [
+ t.getColorAttachment(colorTexture1x1A),
+ t.getColorAttachment(colorTexture1x1B),
+ ],
+ depthStencilAttachment: t.getDepthStencilAttachment(depthStencilTexture1x1),
+ };
+
+ t.tryRenderPass(true, descriptor);
+ }
+ {
+ // One of the color attachments has a different size
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [
+ t.getColorAttachment(colorTexture1x1A),
+ t.getColorAttachment(colorTexture2x2),
+ ],
+ };
+
+ t.tryRenderPass(false, descriptor);
+ }
+ {
+ // The depth stencil attachment has a different size
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [
+ t.getColorAttachment(colorTexture1x1A),
+ t.getColorAttachment(colorTexture1x1B),
+ ],
+ depthStencilAttachment: t.getDepthStencilAttachment(depthStencilTexture2x2),
+ };
+
+ t.tryRenderPass(false, descriptor);
+ }
+ });
+
+g.test('attachments,color_depth_mismatch')
+ .desc(`Test that attachments match whether they are used for color or depth stencil.`)
+ .fn(t => {
+ const colorTexture = t.createTexture({ format: 'rgba8unorm' });
+ const depthStencilTexture = t.createTexture({ format: 'depth24plus-stencil8' });
+
+ {
+ // Using depth-stencil for color
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [t.getColorAttachment(depthStencilTexture)],
+ };
+
+ t.tryRenderPass(false, descriptor);
+ }
+ {
+ // Using color for depth-stencil
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [],
+ depthStencilAttachment: t.getDepthStencilAttachment(colorTexture),
+ };
+
+ t.tryRenderPass(false, descriptor);
+ }
+ });
+
+g.test('attachments,layer_count')
+ .desc(
+ `
+ Test the layer counts for color or depth stencil.
+ - Fail if using 2D array texture view with arrayLayerCount > 1.
+ - Succeed if using 2D array texture view that covers the first layer of the texture.
+ - Succeed if using 2D array texture view that covers the last layer for depth stencil.
+ `
+ )
+ .paramsSimple([
+ { arrayLayerCount: 5, baseArrayLayer: 0, _success: false },
+ { arrayLayerCount: 1, baseArrayLayer: 0, _success: true },
+ { arrayLayerCount: 1, baseArrayLayer: 9, _success: true },
+ ])
+ .fn(t => {
+ const { arrayLayerCount, baseArrayLayer, _success } = t.params;
+
+ const ARRAY_LAYER_COUNT = 10;
+ const MIP_LEVEL_COUNT = 1;
+ const COLOR_FORMAT = 'rgba8unorm';
+ const DEPTH_STENCIL_FORMAT = 'depth24plus-stencil8';
+
+ const colorTexture = t.createTexture({
+ format: COLOR_FORMAT,
+ width: 32,
+ height: 32,
+ mipLevelCount: MIP_LEVEL_COUNT,
+ arrayLayerCount: ARRAY_LAYER_COUNT,
+ });
+ const depthStencilTexture = t.createTexture({
+ format: DEPTH_STENCIL_FORMAT,
+ width: 32,
+ height: 32,
+ mipLevelCount: MIP_LEVEL_COUNT,
+ arrayLayerCount: ARRAY_LAYER_COUNT,
+ });
+
+ const baseTextureViewDescriptor: GPUTextureViewDescriptor = {
+ dimension: '2d-array',
+ baseArrayLayer,
+ arrayLayerCount,
+ baseMipLevel: 0,
+ mipLevelCount: MIP_LEVEL_COUNT,
+ };
+
+ {
+ // Check 2D array texture view for color
+ const textureViewDescriptor: GPUTextureViewDescriptor = {
+ ...baseTextureViewDescriptor,
+ format: COLOR_FORMAT,
+ };
+
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [t.getColorAttachment(colorTexture, textureViewDescriptor)],
+ };
+
+ t.tryRenderPass(_success, descriptor);
+ }
+ {
+ // Check 2D array texture view for depth stencil
+ const textureViewDescriptor: GPUTextureViewDescriptor = {
+ ...baseTextureViewDescriptor,
+ format: DEPTH_STENCIL_FORMAT,
+ };
+
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [],
+ depthStencilAttachment: t.getDepthStencilAttachment(
+ depthStencilTexture,
+ textureViewDescriptor
+ ),
+ };
+
+ t.tryRenderPass(_success, descriptor);
+ }
+ });
+
+g.test('attachments,mip_level_count')
+ .desc(
+ `
+ Test the mip level count for color or depth stencil.
+ - Fail if using 2D texture view with mipLevelCount > 1.
+ - Succeed if using 2D texture view that covers the first level of the texture.
+ - Succeed if using 2D texture view that covers the last level of the texture.
+ `
+ )
+ .paramsSimple([
+ { mipLevelCount: 2, baseMipLevel: 0, _success: false },
+ { mipLevelCount: 1, baseMipLevel: 0, _success: true },
+ { mipLevelCount: 1, baseMipLevel: 3, _success: true },
+ ])
+ .fn(t => {
+ const { mipLevelCount, baseMipLevel, _success } = t.params;
+
+ const ARRAY_LAYER_COUNT = 1;
+ const MIP_LEVEL_COUNT = 4;
+ const COLOR_FORMAT = 'rgba8unorm';
+ const DEPTH_STENCIL_FORMAT = 'depth24plus-stencil8';
+
+ const colorTexture = t.createTexture({
+ format: COLOR_FORMAT,
+ width: 32,
+ height: 32,
+ mipLevelCount: MIP_LEVEL_COUNT,
+ arrayLayerCount: ARRAY_LAYER_COUNT,
+ });
+ const depthStencilTexture = t.createTexture({
+ format: DEPTH_STENCIL_FORMAT,
+ width: 32,
+ height: 32,
+ mipLevelCount: MIP_LEVEL_COUNT,
+ arrayLayerCount: ARRAY_LAYER_COUNT,
+ });
+
+ const baseTextureViewDescriptor: GPUTextureViewDescriptor = {
+ dimension: '2d',
+ baseArrayLayer: 0,
+ arrayLayerCount: ARRAY_LAYER_COUNT,
+ baseMipLevel,
+ mipLevelCount,
+ };
+
+ {
+ // Check 2D texture view for color
+ const textureViewDescriptor: GPUTextureViewDescriptor = {
+ ...baseTextureViewDescriptor,
+ format: COLOR_FORMAT,
+ };
+
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [t.getColorAttachment(colorTexture, textureViewDescriptor)],
+ };
+
+ t.tryRenderPass(_success, descriptor);
+ }
+ {
+ // Check 2D texture view for depth stencil
+ const textureViewDescriptor: GPUTextureViewDescriptor = {
+ ...baseTextureViewDescriptor,
+ format: DEPTH_STENCIL_FORMAT,
+ };
+
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [],
+ depthStencilAttachment: t.getDepthStencilAttachment(
+ depthStencilTexture,
+ textureViewDescriptor
+ ),
+ };
+
+ t.tryRenderPass(_success, descriptor);
+ }
+ });
+
+g.test('color_attachments,non_multisampled')
+ .desc(
+ `
+ Test that setting a resolve target is invalid if the color attachments is non multisampled.
+ `
+ )
+ .fn(t => {
+ const colorTexture = t.createTexture({ sampleCount: 1 });
+ const resolveTargetTexture = t.createTexture({ sampleCount: 1 });
+
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [
+ {
+ view: colorTexture.createView(),
+ resolveTarget: resolveTargetTexture.createView(),
+ clearValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ };
+
+ t.tryRenderPass(false, descriptor);
+ });
+
+g.test('color_attachments,sample_count')
+ .desc(
+ `
+ Test the usages of multisampled textures for color attachments.
+ - Succeed if using a multisampled color attachment without setting a resolve target.
+ - Fail if using multiple color attachments with different sample counts.
+ `
+ )
+ .fn(t => {
+ const colorTexture = t.createTexture({ sampleCount: 1 });
+ const multisampledColorTexture = t.createTexture({ sampleCount: 4 });
+
+ {
+ // It is allowed to use a multisampled color attachment without setting resolve target
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [t.getColorAttachment(multisampledColorTexture)],
+ };
+ t.tryRenderPass(true, descriptor);
+ }
+ {
+ // It is not allowed to use multiple color attachments with different sample counts
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [
+ t.getColorAttachment(colorTexture),
+ t.getColorAttachment(multisampledColorTexture),
+ ],
+ };
+
+ t.tryRenderPass(false, descriptor);
+ }
+ });
+
+g.test('resolveTarget,sample_count')
+ .desc(
+ `
+ Test that using multisampled resolve target is invalid for color attachments.
+ `
+ )
+ .fn(t => {
+ const multisampledColorTexture = t.createTexture({ sampleCount: 4 });
+ const multisampledResolveTargetTexture = t.createTexture({ sampleCount: 4 });
+
+ const colorAttachment = t.getColorAttachment(multisampledColorTexture);
+ colorAttachment.resolveTarget = multisampledResolveTargetTexture.createView();
+
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [colorAttachment],
+ };
+
+ t.tryRenderPass(false, descriptor);
+ });
+
+g.test('resolveTarget,array_layer_count')
+ .desc(
+ `
+ Test that using a resolve target with array layer count is greater than 1 is invalid for color
+ attachments.
+ `
+ )
+ .fn(t => {
+ const multisampledColorTexture = t.createTexture({ sampleCount: 4 });
+ const resolveTargetTexture = t.createTexture({ arrayLayerCount: 2 });
+
+ const colorAttachment = t.getColorAttachment(multisampledColorTexture);
+ colorAttachment.resolveTarget = resolveTargetTexture.createView({ dimension: '2d-array' });
+
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [colorAttachment],
+ };
+
+ t.tryRenderPass(false, descriptor);
+ });
+
+g.test('resolveTarget,mipmap_level_count')
+ .desc(
+ `
+ Test that using a resolve target with that mipmap level count is greater than 1 is invalid for
+ color attachments.
+ `
+ )
+ .fn(t => {
+ const multisampledColorTexture = t.createTexture({ sampleCount: 4 });
+ const resolveTargetTexture = t.createTexture({ mipLevelCount: 2 });
+
+ const colorAttachment = t.getColorAttachment(multisampledColorTexture);
+ colorAttachment.resolveTarget = resolveTargetTexture.createView();
+
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [colorAttachment],
+ };
+
+ t.tryRenderPass(false, descriptor);
+ });
+
+g.test('resolveTarget,usage')
+ .desc(
+ `
+ Test that using a resolve target whose usage is not RENDER_ATTACHMENT is invalid for color
+ attachments.
+ `
+ )
+ .paramsSimple([
+ { usage: GPUConst.TextureUsage.COPY_SRC | GPUConst.TextureUsage.COPY_DST },
+ { usage: GPUConst.TextureUsage.STORAGE_BINDING | GPUConst.TextureUsage.TEXTURE_BINDING },
+ { usage: GPUConst.TextureUsage.STORAGE_BINDING | GPUConst.TextureUsage.STORAGE },
+ { usage: GPUConst.TextureUsage.RENDER_ATTACHMENT | GPUConst.TextureUsage.TEXTURE_BINDING },
+ ])
+ .fn(t => {
+ const { usage } = t.params;
+
+ const multisampledColorTexture = t.createTexture({ sampleCount: 4 });
+ const resolveTargetTexture = t.createTexture({ usage });
+
+ const colorAttachment = t.getColorAttachment(multisampledColorTexture);
+ colorAttachment.resolveTarget = resolveTargetTexture.createView();
+
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [colorAttachment],
+ };
+
+ const isValid = usage & GPUConst.TextureUsage.RENDER_ATTACHMENT ? true : false;
+ t.tryRenderPass(isValid, descriptor);
+ });
+
+g.test('resolveTarget,error_state')
+ .desc(`Test that a resolve target that has a error is invalid for color attachments.`)
+ .fn(t => {
+ const ARRAY_LAYER_COUNT = 1;
+
+ const multisampledColorTexture = t.createTexture({ sampleCount: 4 });
+ const resolveTargetTexture = t.createTexture({ arrayLayerCount: ARRAY_LAYER_COUNT });
+
+ const colorAttachment = t.getColorAttachment(multisampledColorTexture);
+ t.expectValidationError(() => {
+ colorAttachment.resolveTarget = resolveTargetTexture.createView({
+ dimension: '2d',
+ format: 'rgba8unorm',
+ baseArrayLayer: ARRAY_LAYER_COUNT + 1,
+ });
+ });
+
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [colorAttachment],
+ };
+
+ t.tryRenderPass(false, descriptor);
+ });
+
+g.test('resolveTarget,single_sample_count')
+ .desc(
+ `
+ Test that a resolve target that has multi sample color attachment and a single resolve target is
+ valid.
+ `
+ )
+ .fn(t => {
+ const multisampledColorTexture = t.createTexture({ sampleCount: 4 });
+ const resolveTargetTexture = t.createTexture({ sampleCount: 1 });
+
+ const colorAttachment = t.getColorAttachment(multisampledColorTexture);
+ colorAttachment.resolveTarget = resolveTargetTexture.createView();
+
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [colorAttachment],
+ };
+
+ t.tryRenderPass(true, descriptor);
+ });
+
+g.test('resolveTarget,different_format')
+ .desc(`Test that a resolve target that has a different format is invalid.`)
+ .fn(t => {
+ const multisampledColorTexture = t.createTexture({ sampleCount: 4 });
+ const resolveTargetTexture = t.createTexture({ format: 'bgra8unorm' });
+
+ const colorAttachment = t.getColorAttachment(multisampledColorTexture);
+ colorAttachment.resolveTarget = resolveTargetTexture.createView();
+
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [colorAttachment],
+ };
+
+ t.tryRenderPass(false, descriptor);
+ });
+
+g.test('resolveTarget,different_size')
+ .desc(
+ `
+ Test that a resolve target that has a different size with the color attachment is invalid.
+ `
+ )
+ .fn(t => {
+ const size = 16;
+ const multisampledColorTexture = t.createTexture({ width: size, height: size, sampleCount: 4 });
+ const resolveTargetTexture = t.createTexture({
+ width: size * 2,
+ height: size * 2,
+ mipLevelCount: 2,
+ });
+
+ {
+ const resolveTargetTextureView = resolveTargetTexture.createView({
+ baseMipLevel: 0,
+ mipLevelCount: 1,
+ });
+
+ const colorAttachment = t.getColorAttachment(multisampledColorTexture);
+ colorAttachment.resolveTarget = resolveTargetTextureView;
+
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [colorAttachment],
+ };
+
+ t.tryRenderPass(false, descriptor);
+ }
+ {
+ const resolveTargetTextureView = resolveTargetTexture.createView({ baseMipLevel: 1 });
+
+ const colorAttachment = t.getColorAttachment(multisampledColorTexture);
+ colorAttachment.resolveTarget = resolveTargetTextureView;
+
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [colorAttachment],
+ };
+
+ t.tryRenderPass(true, descriptor);
+ }
+ });
+
+g.test('depth_stencil_attachment,sample_counts_mismatch')
+ .desc(
+ `
+ Test that the depth stencil attachment that has different number of samples with the color
+ attachment is invalid.
+ `
+ )
+ .fn(t => {
+ const multisampledDepthStencilTexture = t.createTexture({
+ sampleCount: 4,
+ format: 'depth24plus-stencil8',
+ });
+
+ {
+ // It is not allowed to use a depth stencil attachment whose sample count is different from
+ // the one of the color attachment.
+ const depthStencilTexture = t.createTexture({
+ sampleCount: 1,
+ format: 'depth24plus-stencil8',
+ });
+ const multisampledColorTexture = t.createTexture({ sampleCount: 4 });
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [t.getColorAttachment(multisampledColorTexture)],
+ depthStencilAttachment: t.getDepthStencilAttachment(depthStencilTexture),
+ };
+
+ t.tryRenderPass(false, descriptor);
+ }
+ {
+ const colorTexture = t.createTexture({ sampleCount: 1 });
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [t.getColorAttachment(colorTexture)],
+ depthStencilAttachment: t.getDepthStencilAttachment(multisampledDepthStencilTexture),
+ };
+
+ t.tryRenderPass(false, descriptor);
+ }
+ {
+ // It is allowed to use a multisampled depth stencil attachment whose sample count is equal to
+ // the one of the color attachment.
+ const multisampledColorTexture = t.createTexture({ sampleCount: 4 });
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [t.getColorAttachment(multisampledColorTexture)],
+ depthStencilAttachment: t.getDepthStencilAttachment(multisampledDepthStencilTexture),
+ };
+
+ t.tryRenderPass(true, descriptor);
+ }
+ {
+ // It is allowed to use a multisampled depth stencil attachment with no color attachment.
+ const descriptor: GPURenderPassDescriptor = {
+ colorAttachments: [],
+ depthStencilAttachment: t.getDepthStencilAttachment(multisampledDepthStencilTexture),
+ };
+
+ t.tryRenderPass(true, descriptor);
+ }
+ });
+
+g.test('depth_stencil_attachment,loadOp_storeOp_match_depthReadOnly_stencilReadOnly')
+ .desc(
+ `
+ Test GPURenderPassDepthStencilAttachment Usage:
+ - if the format has a depth aspect:
+ - if depthReadOnly is true
+ - depthLoadOp and depthStoreOp must not be provided
+ - else:
+ - depthLoadOp and depthStoreOp must be provided
+ - if the format has a stencil aspect:
+ - if stencilReadOnly is true
+ - stencilLoadOp and stencilStoreOp must not be provided
+ - else:
+ - stencilLoadOp and stencilStoreOp must be provided
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kDepthStencilFormats)
+ .beginSubcases() // Note: It's easier to debug if you comment this line out as you can then run an individual case.
+ .combine('depthReadOnly', [undefined, true, false])
+ .combine('depthLoadOp', [undefined, 'clear', 'load'] as GPULoadOp[])
+ .combine('depthStoreOp', [undefined, 'discard', 'store'] as GPUStoreOp[])
+ .combine('stencilReadOnly', [undefined, true, false])
+ .combine('stencilLoadOp', [undefined, 'clear', 'load'] as GPULoadOp[])
+ .combine('stencilStoreOp', [undefined, 'discard', 'store'] as GPUStoreOp[])
+ )
+ .beforeAllSubcases(t => {
+ const info = kTextureFormatInfo[t.params.format as GPUTextureFormat];
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const {
+ format,
+ depthReadOnly,
+ depthLoadOp,
+ depthStoreOp,
+ stencilReadOnly,
+ stencilLoadOp,
+ stencilStoreOp,
+ } = t.params;
+
+ const depthAttachment = t.trackForCleanup(
+ t.device.createTexture({
+ format,
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ })
+ );
+ const depthAttachmentView = depthAttachment.createView();
+
+ const encoder = t.device.createCommandEncoder();
+
+ // If depthLoadOp is "clear", depthClearValue must be provided and must be between 0.0 and 1.0,
+ // and it will be ignored if depthLoadOp is not "clear".
+ const depthClearValue = depthLoadOp === 'clear' ? 0 : undefined;
+ const renderPassDescriptor: GPURenderPassDescriptor = {
+ colorAttachments: [],
+ depthStencilAttachment: {
+ view: depthAttachmentView,
+ depthLoadOp,
+ depthStoreOp,
+ depthReadOnly,
+ stencilLoadOp,
+ stencilStoreOp,
+ stencilReadOnly,
+ depthClearValue,
+ },
+ };
+ const pass = encoder.beginRenderPass(renderPassDescriptor);
+ pass.end();
+
+ const info = kTextureFormatInfo[format];
+ const hasDepthSettings = !!depthLoadOp && !!depthStoreOp && !depthReadOnly;
+ const hasStencilSettings = !!stencilLoadOp && !!stencilStoreOp && !stencilReadOnly;
+ const hasDepth = info.depth;
+ const hasStencil = info.stencil;
+
+ const goodAspectCombo =
+ (hasDepth && hasStencil ? !depthReadOnly === !stencilReadOnly : true) &&
+ (hasDepthSettings ? hasDepth : true) &&
+ (hasStencilSettings ? hasStencil : true);
+
+ const hasBothDepthOps = !!depthLoadOp && !!depthStoreOp;
+ const hasBothStencilOps = !!stencilLoadOp && !!stencilStoreOp;
+ const hasNeitherDepthOps = !depthLoadOp && !depthStoreOp;
+ const hasNeitherStencilOps = !stencilLoadOp && !stencilStoreOp;
+
+ const goodDepthCombo = hasDepth && !depthReadOnly ? hasBothDepthOps : hasNeitherDepthOps;
+ const goodStencilCombo =
+ hasStencil && !stencilReadOnly ? hasBothStencilOps : hasNeitherStencilOps;
+
+ const shouldError = !goodAspectCombo || !goodDepthCombo || !goodStencilCombo;
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, shouldError);
+ });
+
+g.test('depth_stencil_attachment,depth_clear_value')
+ .desc(
+ `
+ Test that depthClearValue is invalid if the value is out of the range(0.0 and 1.0) only when
+ depthLoadOp is 'clear'.
+ `
+ )
+ .params(u =>
+ u
+ .combine('depthLoadOp', ['load', 'clear', undefined] as const)
+ .combine('depthClearValue', [undefined, -1.0, 0.0, 0.5, 1.0, 1.5] as const)
+ )
+ .fn(t => {
+ const { depthLoadOp, depthClearValue } = t.params;
+
+ const depthStencilTexture = t.createTexture({
+ format: depthLoadOp === undefined ? 'stencil8' : 'depth24plus-stencil8',
+ });
+ const depthStencilAttachment = t.getDepthStencilAttachment(depthStencilTexture);
+ depthStencilAttachment.depthClearValue = depthClearValue;
+ depthStencilAttachment.depthLoadOp = depthLoadOp;
+ if (depthLoadOp === undefined) {
+ depthStencilAttachment.depthStoreOp = undefined;
+ }
+
+ const descriptor = {
+ colorAttachments: [t.getColorAttachment(t.createTexture())],
+ depthStencilAttachment,
+ };
+
+ // We can not check for out of range because NaN is not out of range.
+ // So (v < 0.0 || v > 1.0) would return false when depthClearValue is undefined (NaN)
+ const isDepthValueInRange = depthClearValue! >= 0.0 && depthClearValue! <= 1.0;
+ const isInvalid = depthLoadOp === 'clear' && !isDepthValueInRange;
+
+ t.tryRenderPass(!isInvalid, descriptor);
+ });
+
+g.test('resolveTarget,format_supports_resolve')
+ .desc(
+ `
+ For all formats that support 'multisample', test that they can be used as a resolveTarget
+ if and only if they support 'resolve'.
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kRenderableColorTextureFormats)
+ .filter(t => kTextureFormatInfo[t.format].multisample)
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ })
+ .fn(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ const multisampledColorTexture = t.createTexture({ format, sampleCount: 4 });
+ const resolveTarget = t.createTexture({ format });
+
+ const colorAttachment = t.getColorAttachment(multisampledColorTexture);
+ colorAttachment.resolveTarget = resolveTarget.createView();
+
+ t.tryRenderPass(!!info.colorRender?.resolve, {
+ colorAttachments: [colorAttachment],
+ });
+ });
+
+g.test('timestampWrites,query_set_type')
+ .desc(
+ `
+ Test that all entries of the timestampWrites must have type 'timestamp'. If all query types are
+ not 'timestamp', a validation error should be generated.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('queryType', kQueryTypes)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase(['timestamp-query']);
+ })
+ .fn(t => {
+ const { queryType } = t.params;
+
+ const timestampWrites = {
+ querySet: t.device.createQuerySet({ type: queryType, count: 2 }),
+ beginningOfPassWriteIndex: 0,
+ endOfPassWriteIndex: 1,
+ };
+
+ const isValid = queryType === 'timestamp';
+
+ const colorTexture = t.createTexture();
+ const descriptor = {
+ colorAttachments: [t.getColorAttachment(colorTexture)],
+ timestampWrites,
+ };
+
+ t.tryRenderPass(isValid, descriptor);
+ });
+
+g.test('timestampWrite,query_index')
+ .desc(
+ `Test that querySet.count should be greater than timestampWrite.queryIndex, and that the
+ query indexes are unique.`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('beginningOfPassWriteIndex', [undefined, 0, 1, 2, 3] as const)
+ .combine('endOfPassWriteIndex', [undefined, 0, 1, 2, 3] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase(['timestamp-query']);
+ })
+ .fn(t => {
+ const { beginningOfPassWriteIndex, endOfPassWriteIndex } = t.params;
+
+ const querySetCount = 2;
+
+ const timestampWrites = {
+ querySet: t.device.createQuerySet({ type: 'timestamp', count: querySetCount }),
+ beginningOfPassWriteIndex,
+ endOfPassWriteIndex,
+ };
+
+ const isValid =
+ beginningOfPassWriteIndex !== endOfPassWriteIndex &&
+ (beginningOfPassWriteIndex === undefined || beginningOfPassWriteIndex < querySetCount) &&
+ (endOfPassWriteIndex === undefined || endOfPassWriteIndex < querySetCount);
+
+ const colorTexture = t.createTexture();
+ const descriptor = {
+ colorAttachments: [t.getColorAttachment(colorTexture)],
+ timestampWrites,
+ };
+
+ t.tryRenderPass(isValid, descriptor);
+ });
+
+g.test('occlusionQuerySet,query_set_type')
+ .desc(`Test that occlusionQuerySet must have type 'occlusion'.`)
+ .params(u => u.combine('queryType', kQueryTypes))
+ .beforeAllSubcases(t => {
+ if (t.params.queryType === 'timestamp') {
+ t.selectDeviceOrSkipTestCase(['timestamp-query']);
+ }
+ })
+ .fn(t => {
+ const { queryType } = t.params;
+
+ const querySet = t.device.createQuerySet({
+ type: queryType,
+ count: 1,
+ });
+
+ const colorTexture = t.createTexture();
+ const descriptor = {
+ colorAttachments: [t.getColorAttachment(colorTexture)],
+ occlusionQuerySet: querySet,
+ };
+
+ const isValid = queryType === 'occlusion';
+ t.tryRenderPass(isValid, descriptor);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pass/resolve.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pass/resolve.spec.ts
new file mode 100644
index 0000000000..975cc8f23a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pass/resolve.spec.ts
@@ -0,0 +1,192 @@
+export const description = `
+Validation tests for render pass resolve.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUConst } from '../../../constants.js';
+import { ValidationTest } from '../validation_test.js';
+
+const kNumColorAttachments = 4;
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('resolve_attachment')
+ .desc(
+ `
+Test various validation behaviors when a resolveTarget is provided.
+
+- base case (valid).
+- resolve source is not multisampled.
+- resolve target is not single sampled.
+- resolve target missing RENDER_ATTACHMENT usage.
+- resolve target must have exactly one subresource:
+ - base mip level {0, >0}, mip level count {1, >1}.
+ - base array layer {0, >0}, array layer count {1, >1}.
+- resolve target GPUTextureView is invalid
+- resolve source and target have different formats.
+ - rgba8unorm -> {bgra8unorm, rgba8unorm-srgb}
+ - {bgra8unorm, rgba8unorm-srgb} -> rgba8unorm
+ - test with other color attachments having a different format
+- resolve source and target have different sizes.
+`
+ )
+ .paramsSimple([
+ // control case should be valid
+ { _valid: true },
+ // a single sampled resolve source should cause a validation error.
+ { colorAttachmentSamples: 1, _valid: false },
+ // a multisampled resolve target should cause a validation error.
+ { resolveTargetSamples: 4, _valid: false },
+ // resolveTargetUsage without RENDER_ATTACHMENT usage should cause a validation error.
+ { resolveTargetUsage: GPUConst.TextureUsage.COPY_SRC, _valid: false },
+ // non-zero resolve target base mip level should be valid.
+ {
+ resolveTargetViewBaseMipLevel: 1,
+ resolveTargetHeight: 4,
+ resolveTargetWidth: 4,
+ _valid: true,
+ },
+ // a validation error should be created when resolveTarget is invalid.
+ { resolveTargetInvalid: true, _valid: false },
+ // a validation error should be created when mip count > 1
+ { resolveTargetViewMipCount: 2, _valid: false },
+ {
+ resolveTargetViewBaseMipLevel: 1,
+ resolveTargetViewMipCount: 2,
+ resolveTargetHeight: 4,
+ resolveTargetWidth: 4,
+ _valid: false,
+ },
+ // non-zero resolve target base array layer should be valid.
+ { resolveTargetViewBaseArrayLayer: 1, _valid: true },
+ // a validation error should be created when array layer count > 1
+ { resolveTargetViewArrayLayerCount: 2, _valid: false },
+ { resolveTargetViewBaseArrayLayer: 1, resolveTargetViewArrayLayerCount: 2, _valid: false },
+ // other color attachments resolving with a different format should be valid.
+ { otherAttachmentFormat: 'bgra8unorm', _valid: true },
+ // mismatched colorAttachment and resolveTarget formats should cause a validation error.
+ { colorAttachmentFormat: 'bgra8unorm', _valid: false },
+ { colorAttachmentFormat: 'rgba8unorm-srgb', _valid: false },
+ { resolveTargetFormat: 'bgra8unorm', _valid: false },
+ { resolveTargetFormat: 'rgba8unorm-srgb', _valid: false },
+ // mismatched colorAttachment and resolveTarget sizes should cause a validation error.
+ { colorAttachmentHeight: 4, _valid: false },
+ { colorAttachmentWidth: 4, _valid: false },
+ { resolveTargetHeight: 4, _valid: false },
+ { resolveTargetWidth: 4, _valid: false },
+ ] as const)
+ .fn(t => {
+ const {
+ colorAttachmentFormat = 'rgba8unorm',
+ resolveTargetFormat = 'rgba8unorm',
+ otherAttachmentFormat = 'rgba8unorm',
+ colorAttachmentSamples = 4,
+ resolveTargetSamples = 1,
+ resolveTargetUsage = GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ resolveTargetInvalid = false,
+ resolveTargetViewMipCount = 1,
+ resolveTargetViewBaseMipLevel = 0,
+ resolveTargetViewArrayLayerCount = 1,
+ resolveTargetViewBaseArrayLayer = 0,
+ colorAttachmentHeight = 2,
+ colorAttachmentWidth = 2,
+ resolveTargetHeight = 2,
+ resolveTargetWidth = 2,
+ _valid,
+ } = t.params;
+
+ // Run the test in a nested loop such that the configured color attachment with resolve target
+ // is tested while occupying each individual colorAttachment slot.
+ for (let resolveSlot = 0; resolveSlot < kNumColorAttachments; resolveSlot++) {
+ const renderPassColorAttachmentDescriptors: GPURenderPassColorAttachment[] = [];
+ for (
+ let colorAttachmentSlot = 0;
+ colorAttachmentSlot < kNumColorAttachments;
+ colorAttachmentSlot++
+ ) {
+ // resolveSlot === colorAttachmentSlot denotes the color attachment slot that contains the
+ // color attachment with resolve target.
+ if (resolveSlot === colorAttachmentSlot) {
+ // Create the color attachment with resolve target with the configurable parameters.
+ const resolveSourceColorAttachment = t.device.createTexture({
+ format: colorAttachmentFormat,
+ size: {
+ width: colorAttachmentWidth,
+ height: colorAttachmentHeight,
+ depthOrArrayLayers: 1,
+ },
+ sampleCount: colorAttachmentSamples,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const resolveTarget = t.device.createTexture({
+ format: resolveTargetFormat,
+ size: {
+ width: resolveTargetWidth,
+ height: resolveTargetHeight,
+ depthOrArrayLayers:
+ resolveTargetViewBaseArrayLayer + resolveTargetViewArrayLayerCount,
+ },
+ sampleCount: resolveTargetSamples,
+ mipLevelCount: resolveTargetViewBaseMipLevel + resolveTargetViewMipCount,
+ usage: resolveTargetUsage,
+ });
+
+ renderPassColorAttachmentDescriptors.push({
+ view: resolveSourceColorAttachment.createView(),
+ loadOp: 'load',
+ storeOp: 'discard',
+ resolveTarget: resolveTargetInvalid
+ ? t.getErrorTextureView()
+ : resolveTarget.createView({
+ dimension: resolveTargetViewArrayLayerCount === 1 ? '2d' : '2d-array',
+ mipLevelCount: resolveTargetViewMipCount,
+ arrayLayerCount: resolveTargetViewArrayLayerCount,
+ baseMipLevel: resolveTargetViewBaseMipLevel,
+ baseArrayLayer: resolveTargetViewBaseArrayLayer,
+ }),
+ });
+ } else {
+ // Create a basic texture to fill other color attachment slots. This texture's dimensions
+ // and sample count must match the resolve source color attachment to be valid.
+ const colorAttachment = t.device.createTexture({
+ format: otherAttachmentFormat,
+ size: {
+ width: colorAttachmentWidth,
+ height: colorAttachmentHeight,
+ depthOrArrayLayers: 1,
+ },
+ sampleCount: colorAttachmentSamples,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const resolveTarget = t.device.createTexture({
+ format: otherAttachmentFormat,
+ size: {
+ width: colorAttachmentWidth,
+ height: colorAttachmentHeight,
+ depthOrArrayLayers: 1,
+ },
+ sampleCount: 1,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ renderPassColorAttachmentDescriptors.push({
+ view: colorAttachment.createView(),
+ loadOp: 'load',
+ storeOp: 'discard',
+ resolveTarget: resolveTarget.createView(),
+ });
+ }
+ }
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: renderPassColorAttachmentDescriptors,
+ });
+ pass.end();
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, !_valid);
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/common.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/common.ts
new file mode 100644
index 0000000000..93b0932042
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/common.ts
@@ -0,0 +1,68 @@
+import { kTextureFormatInfo } from '../../../format_info.js';
+import {
+ getFragmentShaderCodeWithOutput,
+ getPlainTypeInfo,
+ kDefaultVertexShaderCode,
+} from '../../../util/shader.js';
+import { ValidationTest } from '../validation_test.js';
+
+const values = [0, 1, 0, 1];
+export class CreateRenderPipelineValidationTest extends ValidationTest {
+ getDescriptor(
+ options: {
+ primitive?: GPUPrimitiveState;
+ targets?: GPUColorTargetState[];
+ multisample?: GPUMultisampleState;
+ depthStencil?: GPUDepthStencilState;
+ fragmentShaderCode?: string;
+ noFragment?: boolean;
+ fragmentConstants?: Record<string, GPUPipelineConstantValue>;
+ } = {}
+ ): GPURenderPipelineDescriptor {
+ const defaultTargets: GPUColorTargetState[] = [{ format: 'rgba8unorm' }];
+ const {
+ primitive = {},
+ targets = defaultTargets,
+ multisample = {},
+ depthStencil,
+ fragmentShaderCode = getFragmentShaderCodeWithOutput([
+ {
+ values,
+ plainType: getPlainTypeInfo(
+ kTextureFormatInfo[targets[0] ? targets[0].format : 'rgba8unorm'].sampleType
+ ),
+ componentCount: 4,
+ },
+ ]),
+ noFragment = false,
+ fragmentConstants = {},
+ } = options;
+
+ return {
+ vertex: {
+ module: this.device.createShaderModule({
+ code: kDefaultVertexShaderCode,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: noFragment
+ ? undefined
+ : {
+ module: this.device.createShaderModule({
+ code: fragmentShaderCode,
+ }),
+ entryPoint: 'main',
+ targets,
+ constants: fragmentConstants,
+ },
+ layout: this.getPipelineLayout(),
+ primitive,
+ multisample,
+ depthStencil,
+ };
+ }
+
+ getPipelineLayout(): GPUPipelineLayout {
+ return this.device.createPipelineLayout({ bindGroupLayouts: [] });
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/depth_stencil_state.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/depth_stencil_state.spec.ts
new file mode 100644
index 0000000000..eaaf78af66
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/depth_stencil_state.spec.ts
@@ -0,0 +1,304 @@
+export const description = `
+This test dedicatedly tests validation of GPUDepthStencilState of createRenderPipeline.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { unreachable } from '../../../../common/util/util.js';
+import { kCompareFunctions, kStencilOperations } from '../../../capability_info.js';
+import { kTextureFormats, kTextureFormatInfo, kDepthStencilFormats } from '../../../format_info.js';
+import { getFragmentShaderCodeWithOutput } from '../../../util/shader.js';
+
+import { CreateRenderPipelineValidationTest } from './common.js';
+
+export const g = makeTestGroup(CreateRenderPipelineValidationTest);
+
+g.test('format')
+ .desc(`The texture format in depthStencilState must be a depth/stencil format.`)
+ .params(u => u.combine('isAsync', [false, true]).combine('format', kTextureFormats))
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.skipIfTextureFormatNotSupported(format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { isAsync, format } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ const descriptor = t.getDescriptor({
+ depthStencil: { format, depthWriteEnabled: false, depthCompare: 'always' },
+ });
+
+ t.doCreateRenderPipelineTest(isAsync, !!info.depth || !!info.stencil, descriptor);
+ });
+
+g.test('depthCompare_optional')
+ .desc(
+ `The depthCompare in depthStencilState is optional for stencil-only formats but
+ required for formats with a depth if depthCompare is not used for anything.`
+ )
+ .params(u =>
+ u
+ .combine('isAsync', [false, true])
+ .combine('format', kDepthStencilFormats)
+ .beginSubcases()
+ .combine('depthCompare', ['always', undefined] as const)
+ .combine('depthWriteEnabled', [false, true, undefined] as const)
+ .combine('stencilFrontDepthFailOp', ['keep', 'zero'] as const)
+ .combine('stencilBackDepthFailOp', ['keep', 'zero'] as const)
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.skipIfTextureFormatNotSupported(format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const {
+ isAsync,
+ format,
+ depthCompare,
+ depthWriteEnabled,
+ stencilFrontDepthFailOp,
+ stencilBackDepthFailOp,
+ } = t.params;
+ const info = kTextureFormatInfo[format];
+ const descriptor = t.getDescriptor({
+ depthStencil: {
+ format,
+ depthCompare,
+ depthWriteEnabled,
+ stencilFront: { depthFailOp: stencilFrontDepthFailOp },
+ stencilBack: { depthFailOp: stencilBackDepthFailOp },
+ },
+ });
+
+ const depthFailOpsAreKeep =
+ stencilFrontDepthFailOp === 'keep' && stencilBackDepthFailOp === 'keep';
+ const stencilStateIsDefault = depthFailOpsAreKeep;
+ let success = true;
+ if (depthWriteEnabled || (depthCompare && depthCompare !== 'always')) {
+ if (!info.depth) success = false;
+ }
+ if (!stencilStateIsDefault) {
+ if (!info.stencil) success = false;
+ }
+ if (info.depth) {
+ if (depthWriteEnabled === undefined) success = false;
+ if (depthWriteEnabled || !depthFailOpsAreKeep) {
+ if (depthCompare === undefined) success = false;
+ }
+ }
+
+ t.doCreateRenderPipelineTest(isAsync, success, descriptor);
+ });
+
+g.test('depthWriteEnabled_optional')
+ .desc(
+ `The depthWriteEnabled in depthStencilState is optional for stencil-only formats but required for formats with a depth.`
+ )
+ .params(u => u.combine('isAsync', [false, true]).combine('format', kDepthStencilFormats))
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.skipIfTextureFormatNotSupported(format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { isAsync, format } = t.params;
+ const info = kTextureFormatInfo[format];
+ const descriptor = t.getDescriptor({
+ depthStencil: { format, depthCompare: 'always', depthWriteEnabled: undefined },
+ });
+
+ t.doCreateRenderPipelineTest(isAsync, !info.depth, descriptor);
+ });
+
+g.test('depth_test')
+ .desc(
+ `Depth aspect must be contained in the format if depth test is enabled in depthStencilState.`
+ )
+ .params(u =>
+ u
+ .combine('isAsync', [false, true])
+ .combine('format', kDepthStencilFormats)
+ .combine('depthCompare', kCompareFunctions)
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { isAsync, format, depthCompare } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ const descriptor = t.getDescriptor({
+ depthStencil: { format, depthCompare, depthWriteEnabled: false },
+ });
+
+ const depthTestEnabled = depthCompare !== undefined && depthCompare !== 'always';
+ t.doCreateRenderPipelineTest(isAsync, !depthTestEnabled || !!info.depth, descriptor);
+ });
+
+g.test('depth_write')
+ .desc(
+ `Depth aspect must be contained in the format if depth write is enabled in depthStencilState.`
+ )
+ .params(u =>
+ u
+ .combine('isAsync', [false, true])
+ .combine('format', kDepthStencilFormats)
+ .combine('depthWriteEnabled', [false, true])
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { isAsync, format, depthWriteEnabled } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ const descriptor = t.getDescriptor({
+ depthStencil: { format, depthWriteEnabled, depthCompare: 'always' },
+ });
+ t.doCreateRenderPipelineTest(isAsync, !depthWriteEnabled || !!info.depth, descriptor);
+ });
+
+g.test('depth_write,frag_depth')
+ .desc(`Depth aspect must be contained in the format if frag_depth is written in fragment stage.`)
+ .params(u =>
+ u.combine('isAsync', [false, true]).combine('format', [undefined, ...kDepthStencilFormats])
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ if (format !== undefined) {
+ const info = kTextureFormatInfo[format];
+ t.selectDeviceOrSkipTestCase(info.feature);
+ }
+ })
+ .fn(t => {
+ const { isAsync, format } = t.params;
+
+ const descriptor = t.getDescriptor({
+ // Keep one color target so that the pipeline is still valid with no depth stencil target.
+ targets: [{ format: 'rgba8unorm' }],
+ depthStencil: format
+ ? { format, depthWriteEnabled: true, depthCompare: 'always' }
+ : undefined,
+ fragmentShaderCode: getFragmentShaderCodeWithOutput(
+ [{ values: [1, 1, 1, 1], plainType: 'f32', componentCount: 4 }],
+ { value: 0.5 }
+ ),
+ });
+
+ const hasDepth = format ? !!kTextureFormatInfo[format].depth : false;
+ t.doCreateRenderPipelineTest(isAsync, hasDepth, descriptor);
+ });
+
+g.test('stencil_test')
+ .desc(
+ `Stencil aspect must be contained in the format if stencil test is enabled in depthStencilState.`
+ )
+ .params(u =>
+ u
+ .combine('isAsync', [false, true])
+ .combine('format', kDepthStencilFormats)
+ .combine('face', ['front', 'back'] as const)
+ .combine('compare', [undefined, ...kCompareFunctions])
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { isAsync, format, face, compare } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ let descriptor: GPURenderPipelineDescriptor;
+ if (face === 'front') {
+ descriptor = t.getDescriptor({
+ depthStencil: {
+ format,
+ depthWriteEnabled: false,
+ depthCompare: 'always',
+ stencilFront: { compare },
+ },
+ });
+ } else {
+ descriptor = t.getDescriptor({
+ depthStencil: {
+ format,
+ depthWriteEnabled: false,
+ depthCompare: 'always',
+ stencilBack: { compare },
+ },
+ });
+ }
+
+ const stencilTestEnabled = compare !== undefined && compare !== 'always';
+ t.doCreateRenderPipelineTest(isAsync, !stencilTestEnabled || !!info.stencil, descriptor);
+ });
+
+g.test('stencil_write')
+ .desc(
+ `Stencil aspect must be contained in the format if stencil write is enabled in depthStencilState.`
+ )
+ .params(u =>
+ u
+ .combine('isAsync', [false, true])
+ .combine('format', kDepthStencilFormats)
+ .combine('faceAndOpType', [
+ 'frontFailOp',
+ 'frontDepthFailOp',
+ 'frontPassOp',
+ 'backFailOp',
+ 'backDepthFailOp',
+ 'backPassOp',
+ ] as const)
+ .combine('op', [undefined, ...kStencilOperations])
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { isAsync, format, faceAndOpType, op } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ const common = {
+ format,
+ depthWriteEnabled: false,
+ depthCompare: 'always' as GPUCompareFunction,
+ };
+ let depthStencil: GPUDepthStencilState;
+ switch (faceAndOpType) {
+ case 'frontFailOp':
+ depthStencil = { ...common, stencilFront: { failOp: op } };
+ break;
+ case 'frontDepthFailOp':
+ depthStencil = { ...common, stencilFront: { depthFailOp: op } };
+ break;
+ case 'frontPassOp':
+ depthStencil = { ...common, stencilFront: { passOp: op } };
+ break;
+ case 'backFailOp':
+ depthStencil = { ...common, stencilBack: { failOp: op } };
+ break;
+ case 'backDepthFailOp':
+ depthStencil = { ...common, stencilBack: { depthFailOp: op } };
+ break;
+ case 'backPassOp':
+ depthStencil = { ...common, stencilBack: { passOp: op } };
+ break;
+ default:
+ unreachable();
+ }
+ const descriptor = t.getDescriptor({ depthStencil });
+
+ const stencilWriteEnabled = op !== undefined && op !== 'keep';
+ t.doCreateRenderPipelineTest(isAsync, !stencilWriteEnabled || !!info.stencil, descriptor);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/fragment_state.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/fragment_state.spec.ts
new file mode 100644
index 0000000000..0206431eee
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/fragment_state.spec.ts
@@ -0,0 +1,427 @@
+export const description = `
+This test dedicatedly tests validation of GPUFragmentState of createRenderPipeline.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { range } from '../../../../common/util/util.js';
+import {
+ kBlendFactors,
+ kBlendOperations,
+ kMaxColorAttachmentsToTest,
+} from '../../../capability_info.js';
+import {
+ kTextureFormats,
+ kRenderableColorTextureFormats,
+ kTextureFormatInfo,
+ computeBytesPerSampleFromFormats,
+} from '../../../format_info.js';
+import {
+ getFragmentShaderCodeWithOutput,
+ getPlainTypeInfo,
+ kDefaultFragmentShaderCode,
+} from '../../../util/shader.js';
+import { kTexelRepresentationInfo } from '../../../util/texture/texel_data.js';
+
+import { CreateRenderPipelineValidationTest } from './common.js';
+
+export const g = makeTestGroup(CreateRenderPipelineValidationTest);
+
+const values = [0, 1, 0, 1];
+
+g.test('color_target_exists')
+ .desc(`Tests creating a complete render pipeline requires at least one color target state.`)
+ .params(u => u.combine('isAsync', [false, true]))
+ .fn(t => {
+ const { isAsync } = t.params;
+
+ const goodDescriptor = t.getDescriptor({
+ targets: [{ format: 'rgba8unorm' }],
+ });
+
+ // Control case
+ t.doCreateRenderPipelineTest(isAsync, true, goodDescriptor);
+
+ // Fail because lack of color states
+ const badDescriptor = t.getDescriptor({
+ targets: [],
+ });
+
+ t.doCreateRenderPipelineTest(isAsync, false, badDescriptor);
+ });
+
+g.test('targets_format_renderable')
+ .desc(`Tests that color target state format must have RENDER_ATTACHMENT capability.`)
+ .params(u => u.combine('isAsync', [false, true]).combine('format', kTextureFormats))
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { isAsync, format } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ const descriptor = t.getDescriptor({ targets: [{ format }] });
+
+ t.doCreateRenderPipelineTest(isAsync, !!info.colorRender, descriptor);
+ });
+
+g.test('limits,maxColorAttachments')
+ .desc(
+ `Tests that color state targets length must not be larger than device.limits.maxColorAttachments.`
+ )
+ .params(u =>
+ u.combine('isAsync', [false, true]).combine('targetsLengthVariant', [
+ { mult: 1, add: 0 },
+ { mult: 1, add: 1 },
+ ])
+ )
+ .fn(t => {
+ const { isAsync, targetsLengthVariant } = t.params;
+ const targetsLength = t.makeLimitVariant('maxColorAttachments', targetsLengthVariant);
+
+ const descriptor = t.getDescriptor({
+ targets: range(targetsLength, _i => {
+ return { format: 'rg8unorm', writeMask: 0 };
+ }),
+ fragmentShaderCode: kDefaultFragmentShaderCode,
+ // add a depth stencil so that we can set writeMask to 0 for all color attachments
+ depthStencil: {
+ format: 'depth24plus',
+ depthWriteEnabled: true,
+ depthCompare: 'always',
+ },
+ });
+
+ t.doCreateRenderPipelineTest(
+ isAsync,
+ targetsLength <= t.device.limits.maxColorAttachments,
+ descriptor
+ );
+ });
+
+g.test('limits,maxColorAttachmentBytesPerSample,aligned')
+ .desc(
+ `
+ Tests that the total color attachment bytes per sample must not be larger than
+ maxColorAttachmentBytesPerSample when using the same format for multiple attachments.
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kRenderableColorTextureFormats)
+ .beginSubcases()
+ .combine(
+ 'attachmentCount',
+ range(kMaxColorAttachmentsToTest, i => i + 1)
+ )
+ .combine('isAsync', [false, true])
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ })
+ .fn(t => {
+ const { format, attachmentCount, isAsync } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ t.skipIf(
+ attachmentCount > t.device.limits.maxColorAttachments,
+ `attachmentCount: ${attachmentCount} > maxColorAttachments: ${t.device.limits.maxColorAttachments}`
+ );
+
+ const descriptor = t.getDescriptor({
+ targets: range(attachmentCount, () => {
+ return { format, writeMask: 0 };
+ }),
+ });
+ const shouldError =
+ info.colorRender === undefined ||
+ info.colorRender.byteCost * attachmentCount >
+ t.device.limits.maxColorAttachmentBytesPerSample;
+
+ t.doCreateRenderPipelineTest(isAsync, !shouldError, descriptor);
+ });
+
+g.test('limits,maxColorAttachmentBytesPerSample,unaligned')
+ .desc(
+ `
+ Tests that the total color attachment bytes per sample must not be larger than
+ maxColorAttachmentBytesPerSample when using various sets of (potentially) unaligned formats.
+ `
+ )
+ .params(u =>
+ u
+ .combineWithParams([
+ // Alignment causes the first 1 byte R8Unorm to become 4 bytes. So even though
+ // 1+4+8+16+1 < 32, the 4 byte alignment requirement of R32Float makes the first R8Unorm
+ // become 4 and 4+4+8+16+1 > 32. Re-ordering this so the R8Unorm's are at the end, however
+ // is allowed: 4+8+16+1+1 < 32.
+ {
+ formats: [
+ 'r8unorm',
+ 'r32float',
+ 'rgba8unorm',
+ 'rgba32float',
+ 'r8unorm',
+ ] as GPUTextureFormat[],
+ },
+ {
+ formats: [
+ 'r32float',
+ 'rgba8unorm',
+ 'rgba32float',
+ 'r8unorm',
+ 'r8unorm',
+ ] as GPUTextureFormat[],
+ },
+ ])
+ .beginSubcases()
+ .combine('isAsync', [false, true])
+ )
+ .fn(t => {
+ const { formats, isAsync } = t.params;
+
+ t.skipIf(
+ formats.length > t.device.limits.maxColorAttachments,
+ `numColorAttachments: ${formats.length} > maxColorAttachments: ${t.device.limits.maxColorAttachments}`
+ );
+
+ const success =
+ computeBytesPerSampleFromFormats(formats) <= t.device.limits.maxColorAttachmentBytesPerSample;
+
+ const descriptor = t.getDescriptor({
+ targets: formats.map(f => {
+ return { format: f, writeMask: 0 };
+ }),
+ });
+
+ t.doCreateRenderPipelineTest(isAsync, success, descriptor);
+ });
+
+g.test('targets_format_filterable')
+ .desc(
+ `
+ Tests that color target state format must be filterable if blend is not undefined.
+
+ TODO: info.colorRender.blend now directly says whether the format is blendable. Use that.`
+ )
+ .params(u =>
+ u
+ .combine('isAsync', [false, true])
+ .combine('format', kRenderableColorTextureFormats)
+ .beginSubcases()
+ .combine('hasBlend', [false, true])
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.skipIfTextureFormatNotSupported(format);
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const { isAsync, format, hasBlend } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ const descriptor = t.getDescriptor({
+ targets: [
+ {
+ format,
+ blend: hasBlend ? { color: {}, alpha: {} } : undefined,
+ },
+ ],
+ });
+
+ t.doCreateRenderPipelineTest(isAsync, !hasBlend || info.color.type === 'float', descriptor);
+ });
+
+g.test('targets_blend')
+ .desc(
+ `
+ For the blend components on either GPUBlendState.color or GPUBlendState.alpha:
+ - Tests if the combination of 'srcFactor', 'dstFactor' and 'operation' is valid (if the blend
+ operation is "min" or "max", srcFactor and dstFactor must be "one").
+ `
+ )
+ .params(u =>
+ u
+ .combine('isAsync', [false, true])
+ .combine('component', ['color', 'alpha'] as const)
+ .beginSubcases()
+ .combine('srcFactor', kBlendFactors)
+ .combine('dstFactor', kBlendFactors)
+ .combine('operation', kBlendOperations)
+ )
+ .fn(t => {
+ const { isAsync, component, srcFactor, dstFactor, operation } = t.params;
+
+ const defaultBlendComponent: GPUBlendComponent = {
+ srcFactor: 'src-alpha',
+ dstFactor: 'dst-alpha',
+ operation: 'add',
+ };
+ const blendComponentToTest: GPUBlendComponent = {
+ srcFactor,
+ dstFactor,
+ operation,
+ };
+ const format = 'rgba8unorm';
+
+ const descriptor = t.getDescriptor({
+ targets: [
+ {
+ format,
+ blend: {
+ color: component === 'color' ? blendComponentToTest : defaultBlendComponent,
+ alpha: component === 'alpha' ? blendComponentToTest : defaultBlendComponent,
+ },
+ },
+ ],
+ });
+
+ if (operation === 'min' || operation === 'max') {
+ const _success = srcFactor === 'one' && dstFactor === 'one';
+ t.doCreateRenderPipelineTest(isAsync, _success, descriptor);
+ } else {
+ t.doCreateRenderPipelineTest(isAsync, true, descriptor);
+ }
+ });
+
+g.test('targets_write_mask')
+ .desc(`Tests that color target state write mask must be < 16.`)
+ .params(u => u.combine('isAsync', [false, true]).combine('writeMask', [0, 0xf, 0x10, 0x80000001]))
+ .fn(t => {
+ const { isAsync, writeMask } = t.params;
+
+ const descriptor = t.getDescriptor({
+ targets: [
+ {
+ format: 'rgba8unorm',
+ writeMask,
+ },
+ ],
+ });
+
+ t.doCreateRenderPipelineTest(isAsync, writeMask < 16, descriptor);
+ });
+
+g.test('pipeline_output_targets')
+ .desc(
+ `Pipeline fragment output types must be compatible with target color state format
+ - The scalar type (f32, i32, or u32) must match the sample type of the format.
+ - The componentCount of the fragment output (e.g. f32, vec2, vec3, vec4) must not have fewer
+ channels than that of the color attachment texture formats. Extra components are allowed and are discarded.
+
+ Otherwise, color state write mask must be 0.`
+ )
+ .params(u =>
+ u
+ .combine('isAsync', [false, true])
+ .combine('format', [undefined, ...kRenderableColorTextureFormats] as const)
+ .beginSubcases()
+ .combine('shaderOutput', [
+ undefined,
+ ...u.combine('scalar', ['f32', 'u32', 'i32'] as const).combine('count', [1, 2, 3, 4]),
+ ])
+ // We only care about testing writeMask if there is an attachment but no shader output.
+ .expand('writeMask', p =>
+ p.format !== undefined && p.shaderOutput !== undefined ? [0, 0x1, 0x2, 0x4, 0x8] : [0xf]
+ )
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceForTextureFormatOrSkipTestCase(t.params.format);
+ })
+ .fn(t => {
+ const { isAsync, format, writeMask, shaderOutput } = t.params;
+
+ const descriptor = t.getDescriptor({
+ targets: format ? [{ format, writeMask }] : [],
+ // To have a dummy depthStencil attachment to avoid having no attachment at all which is invalid
+ depthStencil: { format: 'depth24plus', depthWriteEnabled: false, depthCompare: 'always' },
+ fragmentShaderCode: getFragmentShaderCodeWithOutput(
+ shaderOutput
+ ? [{ values, plainType: shaderOutput.scalar, componentCount: shaderOutput.count }]
+ : []
+ ),
+ });
+
+ let success = true;
+ if (format) {
+ // There is a color target
+ if (shaderOutput) {
+ // The shader outputs to the color target
+ const info = kTextureFormatInfo[format];
+ success =
+ shaderOutput.scalar === getPlainTypeInfo(info.color.type) &&
+ shaderOutput.count >= kTexelRepresentationInfo[format].componentOrder.length;
+ } else {
+ // The shader does not output to the color target
+ success = writeMask === 0;
+ }
+ }
+
+ t.doCreateRenderPipelineTest(isAsync, success, descriptor);
+ });
+
+g.test('pipeline_output_targets,blend')
+ .desc(
+ `On top of requirements from pipeline_output_targets, when blending is enabled and alpha channel is read indicated by any blend factor, an extra requirement is added:
+ - fragment output must be vec4.
+ `
+ )
+ .params(u =>
+ u
+ .combine('isAsync', [false, true])
+ .combine('format', ['r8unorm', 'rg8unorm', 'rgba8unorm', 'bgra8unorm'] as const)
+ .combine('componentCount', [1, 2, 3, 4])
+ .beginSubcases()
+ // The default srcFactor and dstFactor are 'one' and 'zero'. Override just one at a time.
+ .combineWithParams([
+ ...u.combine('colorSrcFactor', kBlendFactors),
+ ...u.combine('colorDstFactor', kBlendFactors),
+ ...u.combine('alphaSrcFactor', kBlendFactors),
+ ...u.combine('alphaDstFactor', kBlendFactors),
+ ] as const)
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ const info = kTextureFormatInfo[format];
+ t.selectDeviceOrSkipTestCase(info.feature);
+ })
+ .fn(t => {
+ const sampleType = 'float';
+ const {
+ isAsync,
+ format,
+ componentCount,
+ colorSrcFactor,
+ colorDstFactor,
+ alphaSrcFactor,
+ alphaDstFactor,
+ } = t.params;
+ const info = kTextureFormatInfo[format];
+
+ const descriptor = t.getDescriptor({
+ targets: [
+ {
+ format,
+ blend: {
+ color: { srcFactor: colorSrcFactor, dstFactor: colorDstFactor },
+ alpha: { srcFactor: alphaSrcFactor, dstFactor: alphaDstFactor },
+ },
+ },
+ ],
+ fragmentShaderCode: getFragmentShaderCodeWithOutput([
+ { values, plainType: getPlainTypeInfo(sampleType), componentCount },
+ ]),
+ });
+
+ const colorBlendReadsSrcAlpha =
+ colorSrcFactor?.includes('src-alpha') || colorDstFactor?.includes('src-alpha');
+ const meetsExtraBlendingRequirement = !colorBlendReadsSrcAlpha || componentCount === 4;
+ const _success =
+ info.color.type === sampleType &&
+ componentCount >= kTexelRepresentationInfo[format].componentOrder.length &&
+ meetsExtraBlendingRequirement;
+ t.doCreateRenderPipelineTest(isAsync, _success, descriptor);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/inter_stage.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/inter_stage.spec.ts
new file mode 100644
index 0000000000..91aabb0ab8
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/inter_stage.spec.ts
@@ -0,0 +1,324 @@
+export const description = `
+Interface matching between vertex and fragment shader validation for createRenderPipeline.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { assert, range } from '../../../../common/util/util.js';
+
+import { CreateRenderPipelineValidationTest } from './common.js';
+
+function getVarName(i: number) {
+ return `v${i}`;
+}
+
+class InterStageMatchingValidationTest extends CreateRenderPipelineValidationTest {
+ getVertexStateWithOutputs(outputs: readonly string[]): GPUVertexState {
+ return {
+ module: this.device.createShaderModule({
+ code: `
+ struct A {
+ ${outputs.map((v, i) => v.replace('__', getVarName(i))).join(',\n')},
+ @builtin(position) pos: vec4<f32>,
+ }
+ @vertex fn main() -> A {
+ var vertexOut: A;
+ vertexOut.pos = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ return vertexOut;
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ };
+ }
+
+ getFragmentStateWithInputs(
+ inputs: readonly string[],
+ hasBuiltinPosition: boolean = false
+ ): GPUFragmentState {
+ return {
+ targets: [{ format: 'rgba8unorm' }],
+ module: this.device.createShaderModule({
+ code: `
+ struct B {
+ ${inputs.map((v, i) => v.replace('__', getVarName(i))).join(',\n')},
+ ${hasBuiltinPosition ? '@builtin(position) pos: vec4<f32>' : ''}
+ }
+ @fragment fn main(fragmentIn: B) -> @location(0) vec4<f32> {
+ return vec4<f32>(1.0, 1.0, 1.0, 1.0);
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ };
+ }
+
+ getDescriptorWithStates(
+ vertex: GPUVertexState,
+ fragment: GPUFragmentState
+ ): GPURenderPipelineDescriptor {
+ return {
+ layout: 'auto',
+ vertex,
+ fragment,
+ };
+ }
+}
+
+export const g = makeTestGroup(InterStageMatchingValidationTest);
+
+g.test('location,mismatch')
+ .desc(`Tests that missing declaration at the same location should fail validation.`)
+ .params(u =>
+ u.combine('isAsync', [false, true]).combineWithParams([
+ { outputs: ['@location(0) __: f32'], inputs: ['@location(0) __: f32'], _success: true },
+ { outputs: ['@location(0) __: f32'], inputs: ['@location(1) __: f32'], _success: false },
+ { outputs: ['@location(1) __: f32'], inputs: ['@location(0) __: f32'], _success: false },
+ {
+ outputs: ['@location(0) __: f32', '@location(1) __: f32'],
+ inputs: ['@location(1) __: f32', '@location(0) __: f32'],
+ _success: true,
+ },
+ {
+ outputs: ['@location(1) __: f32', '@location(0) __: f32'],
+ inputs: ['@location(0) __: f32', '@location(1) __: f32'],
+ _success: true,
+ },
+ ])
+ )
+ .fn(t => {
+ const { isAsync, outputs, inputs, _success } = t.params;
+
+ const descriptor = t.getDescriptorWithStates(
+ t.getVertexStateWithOutputs(outputs),
+ t.getFragmentStateWithInputs(inputs)
+ );
+
+ t.doCreateRenderPipelineTest(isAsync, _success, descriptor);
+ });
+
+g.test('location,superset')
+ .desc(`TODO: implement after spec is settled: https://github.com/gpuweb/gpuweb/issues/2038`)
+ .unimplemented();
+
+g.test('location,subset')
+ .desc(`Tests that validation should fail when vertex output is a subset of fragment input.`)
+ .params(u => u.combine('isAsync', [false, true]))
+ .fn(t => {
+ const { isAsync } = t.params;
+
+ const descriptor = t.getDescriptorWithStates(
+ t.getVertexStateWithOutputs(['@location(0) vout0: f32']),
+ t.getFragmentStateWithInputs(['@location(0) fin0: f32', '@location(1) fin1: f32'])
+ );
+
+ t.doCreateRenderPipelineTest(isAsync, false, descriptor);
+ });
+
+g.test('type')
+ .desc(
+ `Tests that validation should fail when type of vertex output and fragment input at the same location doesn't match.`
+ )
+ .params(u =>
+ u.combine('isAsync', [false, true]).combineWithParams([
+ { output: 'f32', input: 'f32' },
+ { output: 'i32', input: 'f32' },
+ { output: 'u32', input: 'f32' },
+ { output: 'u32', input: 'i32' },
+ { output: 'i32', input: 'u32' },
+ { output: 'vec2<f32>', input: 'vec2<f32>' },
+ { output: 'vec3<f32>', input: 'vec2<f32>' },
+ { output: 'vec2<f32>', input: 'vec3<f32>' },
+ { output: 'vec2<f32>', input: 'f32' },
+ { output: 'f32', input: 'vec2<f32>' },
+ ])
+ )
+ .fn(t => {
+ const { isAsync, output, input } = t.params;
+
+ const descriptor = t.getDescriptorWithStates(
+ t.getVertexStateWithOutputs([`@location(0) @interpolate(flat) vout0: ${output}`]),
+ t.getFragmentStateWithInputs([`@location(0) @interpolate(flat) fin0: ${input}`])
+ );
+
+ t.doCreateRenderPipelineTest(isAsync, output === input, descriptor);
+ });
+
+g.test('interpolation_type')
+ .desc(
+ `Tests that validation should fail when interpolation type of vertex output and fragment input at the same location doesn't match.`
+ )
+ .params(u =>
+ u.combine('isAsync', [false, true]).combineWithParams([
+ // default is @interpolate(perspective, center)
+ { output: '', input: '' },
+ { output: '', input: '@interpolate(perspective)', _success: true },
+ { output: '', input: '@interpolate(perspective, center)', _success: true },
+ { output: '@interpolate(perspective)', input: '', _success: true },
+ { output: '', input: '@interpolate(linear)' },
+ { output: '@interpolate(perspective)', input: '@interpolate(perspective)' },
+ { output: '@interpolate(linear)', input: '@interpolate(perspective)' },
+ { output: '@interpolate(flat)', input: '@interpolate(perspective)' },
+ { output: '@interpolate(linear)', input: '@interpolate(flat)' },
+ { output: '@interpolate(linear, center)', input: '@interpolate(linear, center)' },
+ ])
+ )
+ .fn(t => {
+ const { isAsync, output, input, _success } = t.params;
+
+ const descriptor = t.getDescriptorWithStates(
+ t.getVertexStateWithOutputs([`@location(0) ${output} vout0: f32`]),
+ t.getFragmentStateWithInputs([`@location(0) ${input} fin0: f32`])
+ );
+
+ t.doCreateRenderPipelineTest(isAsync, _success ?? output === input, descriptor);
+ });
+
+g.test('interpolation_sampling')
+ .desc(
+ `Tests that validation should fail when interpolation sampling of vertex output and fragment input at the same location doesn't match.`
+ )
+ .params(u =>
+ u.combine('isAsync', [false, true]).combineWithParams([
+ // default is @interpolate(perspective, center)
+ { output: '@interpolate(perspective)', input: '@interpolate(perspective)' },
+ {
+ output: '@interpolate(perspective)',
+ input: '@interpolate(perspective, center)',
+ _success: true,
+ },
+ { output: '@interpolate(linear, center)', input: '@interpolate(linear)', _success: true },
+ { output: '@interpolate(flat)', input: '@interpolate(flat)' },
+ { output: '@interpolate(perspective)', input: '@interpolate(perspective, sample)' },
+ { output: '@interpolate(perspective, center)', input: '@interpolate(perspective, sample)' },
+ {
+ output: '@interpolate(perspective, center)',
+ input: '@interpolate(perspective, centroid)',
+ },
+ { output: '@interpolate(perspective, centroid)', input: '@interpolate(perspective)' },
+ ])
+ )
+ .fn(t => {
+ const { isAsync, output, input, _success } = t.params;
+
+ const descriptor = t.getDescriptorWithStates(
+ t.getVertexStateWithOutputs([`@location(0) ${output} vout0: f32`]),
+ t.getFragmentStateWithInputs([`@location(0) ${input} fin0: f32`])
+ );
+
+ t.doCreateRenderPipelineTest(isAsync, _success ?? output === input, descriptor);
+ });
+
+g.test('max_shader_variable_location')
+ .desc(
+ `Tests that validation should fail when there is location of user-defined output/input variable >= device.limits.maxInterStageShaderVariables`
+ )
+ .params(u =>
+ u
+ .combine('isAsync', [false, true])
+ // User defined variable location = maxInterStageShaderVariables + locationDelta
+ .combine('locationDelta', [0, -1, -2])
+ )
+ .fn(t => {
+ const { isAsync, locationDelta } = t.params;
+ const maxInterStageShaderVariables = t.device.limits.maxInterStageShaderVariables;
+ const location = maxInterStageShaderVariables + locationDelta;
+
+ const descriptor = t.getDescriptorWithStates(
+ t.getVertexStateWithOutputs([`@location(${location}) vout0: f32`]),
+ t.getFragmentStateWithInputs([`@location(${location}) fin0: f32`])
+ );
+
+ t.doCreateRenderPipelineTest(isAsync, location < maxInterStageShaderVariables, descriptor);
+ });
+
+g.test('max_components_count,output')
+ .desc(
+ `Tests that validation should fail when scalar components of all user-defined outputs > max vertex shader output components.`
+ )
+ .params(u =>
+ u.combine('isAsync', [false, true]).combineWithParams([
+ // Number of user-defined output scalar components in test shader = device.limits.maxInterStageShaderComponents + numScalarDelta.
+ { numScalarDelta: 0, topology: 'triangle-list', _success: true },
+ { numScalarDelta: 1, topology: 'triangle-list', _success: false },
+ { numScalarDelta: 0, topology: 'point-list', _success: false },
+ { numScalarDelta: -1, topology: 'point-list', _success: true },
+ ] as const)
+ )
+ .fn(t => {
+ const { isAsync, numScalarDelta, topology, _success } = t.params;
+
+ const numScalarComponents = t.device.limits.maxInterStageShaderComponents + numScalarDelta;
+
+ const numVec4 = Math.floor(numScalarComponents / 4);
+ const numTrailingScalars = numScalarComponents % 4;
+ const numUserDefinedInterStageVariables = numTrailingScalars > 0 ? numVec4 + 1 : numVec4;
+
+ assert(numUserDefinedInterStageVariables <= t.device.limits.maxInterStageShaderVariables);
+
+ const outputs = range(numVec4, i => `@location(${i}) vout${i}: vec4<f32>`);
+ const inputs = range(numVec4, i => `@location(${i}) fin${i}: vec4<f32>`);
+
+ if (numTrailingScalars > 0) {
+ const typeString = numTrailingScalars === 1 ? 'f32' : `vec${numTrailingScalars}<f32>`;
+ outputs.push(`@location(${numVec4}) vout${numVec4}: ${typeString}`);
+ inputs.push(`@location(${numVec4}) fin${numVec4}: ${typeString}`);
+ }
+
+ const descriptor = t.getDescriptorWithStates(
+ t.getVertexStateWithOutputs(outputs),
+ t.getFragmentStateWithInputs(inputs)
+ );
+ descriptor.primitive = { topology };
+
+ t.doCreateRenderPipelineTest(isAsync, _success, descriptor);
+ });
+
+g.test('max_components_count,input')
+ .desc(
+ `Tests that validation should fail when scalar components of all user-defined inputs > max vertex shader output components.`
+ )
+ .params(u =>
+ u.combine('isAsync', [false, true]).combineWithParams([
+ // Number of user-defined input scalar components in test shader = device.limits.maxInterStageShaderComponents + numScalarDelta.
+ { numScalarDelta: 0, useExtraBuiltinInputs: false, _success: true },
+ { numScalarDelta: 1, useExtraBuiltinInputs: false, _success: false },
+ { numScalarDelta: 0, useExtraBuiltinInputs: true, _success: false },
+ { numScalarDelta: -3, useExtraBuiltinInputs: true, _success: true },
+ { numScalarDelta: -2, useExtraBuiltinInputs: true, _success: false },
+ ] as const)
+ )
+ .fn(t => {
+ const { isAsync, numScalarDelta, useExtraBuiltinInputs, _success } = t.params;
+
+ const numScalarComponents = t.device.limits.maxInterStageShaderComponents + numScalarDelta;
+
+ const numVec4 = Math.floor(numScalarComponents / 4);
+ const numTrailingScalars = numScalarComponents % 4;
+ const numUserDefinedInterStageVariables = numTrailingScalars > 0 ? numVec4 + 1 : numVec4;
+
+ assert(numUserDefinedInterStageVariables <= t.device.limits.maxInterStageShaderVariables);
+
+ const outputs = range(numVec4, i => `@location(${i}) vout${i}: vec4<f32>`);
+ const inputs = range(numVec4, i => `@location(${i}) fin${i}: vec4<f32>`);
+
+ if (numTrailingScalars > 0) {
+ const typeString = numTrailingScalars === 1 ? 'f32' : `vec${numTrailingScalars}<f32>`;
+ outputs.push(`@location(${numVec4}) vout${numVec4}: ${typeString}`);
+ inputs.push(`@location(${numVec4}) fin${numVec4}: ${typeString}`);
+ }
+
+ if (useExtraBuiltinInputs) {
+ inputs.push(
+ '@builtin(front_facing) front_facing_in: bool',
+ '@builtin(sample_index) sample_index_in: u32',
+ '@builtin(sample_mask) sample_mask_in: u32'
+ );
+ }
+
+ const descriptor = t.getDescriptorWithStates(
+ t.getVertexStateWithOutputs(outputs),
+ t.getFragmentStateWithInputs(inputs, true)
+ );
+
+ t.doCreateRenderPipelineTest(isAsync, _success, descriptor);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/misc.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/misc.spec.ts
new file mode 100644
index 0000000000..1e3ccf5637
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/misc.spec.ts
@@ -0,0 +1,98 @@
+export const description = `
+misc createRenderPipeline and createRenderPipelineAsync validation tests.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { kDefaultVertexShaderCode, kDefaultFragmentShaderCode } from '../../../util/shader.js';
+
+import { CreateRenderPipelineValidationTest } from './common.js';
+
+export const g = makeTestGroup(CreateRenderPipelineValidationTest);
+
+g.test('basic')
+ .desc(`Test basic usage of createRenderPipeline.`)
+ .params(u => u.combine('isAsync', [false, true]))
+ .fn(t => {
+ const { isAsync } = t.params;
+ const descriptor = t.getDescriptor();
+
+ t.doCreateRenderPipelineTest(isAsync, true, descriptor);
+ });
+
+g.test('vertex_state_only')
+ .desc(
+ `Tests creating vertex-state-only render pipeline. A vertex-only render pipeline has no fragment
+state (and thus has no color state), and can be created with or without depth stencil state.`
+ )
+ .params(u =>
+ u
+ .combine('isAsync', [false, true])
+ .beginSubcases()
+ .combine('depthStencilFormat', [
+ 'depth24plus',
+ 'depth24plus-stencil8',
+ 'depth32float',
+ '',
+ ] as const)
+ .combine('hasColor', [false, true])
+ )
+ .fn(t => {
+ const { isAsync, depthStencilFormat, hasColor } = t.params;
+
+ let depthStencilState: GPUDepthStencilState | undefined;
+ if (depthStencilFormat === '') {
+ depthStencilState = undefined;
+ } else {
+ depthStencilState = {
+ format: depthStencilFormat,
+ depthWriteEnabled: false,
+ depthCompare: 'always',
+ };
+ }
+
+ // Having targets or not should have no effect in result, since it will not appear in the
+ // descriptor in vertex-only render pipeline
+ const descriptor = t.getDescriptor({
+ noFragment: true,
+ depthStencil: depthStencilState,
+ targets: hasColor ? [{ format: 'rgba8unorm' }] : [],
+ });
+
+ t.doCreateRenderPipelineTest(isAsync, true, descriptor);
+ });
+
+g.test('pipeline_layout,device_mismatch')
+ .desc(
+ 'Tests createRenderPipeline(Async) cannot be called with a pipeline layout created from another device'
+ )
+ .paramsSubcasesOnly(u => u.combine('isAsync', [true, false]).combine('mismatched', [true, false]))
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { isAsync, mismatched } = t.params;
+
+ const sourceDevice = mismatched ? t.mismatchedDevice : t.device;
+
+ const layout = sourceDevice.createPipelineLayout({ bindGroupLayouts: [] });
+
+ const format = 'rgba8unorm';
+ const descriptor = {
+ layout,
+ vertex: {
+ module: t.device.createShaderModule({
+ code: kDefaultVertexShaderCode,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: kDefaultFragmentShaderCode,
+ }),
+ entryPoint: 'main',
+ targets: [{ format }] as const,
+ },
+ };
+
+ t.doCreateRenderPipelineTest(isAsync, !mismatched, descriptor);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/multisample_state.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/multisample_state.spec.ts
new file mode 100644
index 0000000000..53d135eeec
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/multisample_state.spec.ts
@@ -0,0 +1,87 @@
+export const description = `
+This test dedicatedly tests validation of GPUMultisampleState of createRenderPipeline.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { kDefaultFragmentShaderCode } from '../../../util/shader.js';
+
+import { CreateRenderPipelineValidationTest } from './common.js';
+
+export const g = makeTestGroup(CreateRenderPipelineValidationTest);
+
+g.test('count')
+ .desc(`If multisample.count must either be 1 or 4.`)
+ .params(u =>
+ u
+ .combine('isAsync', [false, true])
+ .beginSubcases()
+ .combine('count', [0, 1, 2, 3, 4, 8, 16, 1024])
+ )
+ .fn(t => {
+ const { isAsync, count } = t.params;
+
+ const descriptor = t.getDescriptor({ multisample: { count, alphaToCoverageEnabled: false } });
+
+ const _success = count === 1 || count === 4;
+ t.doCreateRenderPipelineTest(isAsync, _success, descriptor);
+ });
+
+g.test('alpha_to_coverage,count')
+ .desc(
+ `If multisample.alphaToCoverageEnabled is true, multisample.count must be greater than 1, e.g. it can only be 4.`
+ )
+ .params(u =>
+ u
+ .combine('isAsync', [false, true])
+ .combine('alphaToCoverageEnabled', [false, true])
+ .beginSubcases()
+ .combine('count', [1, 4])
+ )
+ .fn(t => {
+ const { isAsync, alphaToCoverageEnabled, count } = t.params;
+
+ const descriptor = t.getDescriptor({ multisample: { count, alphaToCoverageEnabled } });
+
+ const _success = alphaToCoverageEnabled ? count === 4 : count === 1 || count === 4;
+ t.doCreateRenderPipelineTest(isAsync, _success, descriptor);
+ });
+
+g.test('alpha_to_coverage,sample_mask')
+ .desc(
+ `If sample_mask builtin is a pipeline output of fragment, multisample.alphaToCoverageEnabled should be false.`
+ )
+ .params(u =>
+ u
+ .combine('isAsync', [false, true])
+ .combine('alphaToCoverageEnabled', [false, true])
+ .beginSubcases()
+ .combine('hasSampleMaskOutput', [false, true])
+ )
+ .fn(t => {
+ const { isAsync, alphaToCoverageEnabled, hasSampleMaskOutput } = t.params;
+
+ if (t.isCompatibility && hasSampleMaskOutput) {
+ t.skip('WGSL sample_mask is not supported in compatibility mode');
+ }
+
+ const descriptor = t.getDescriptor({
+ multisample: { alphaToCoverageEnabled, count: 4 },
+ fragmentShaderCode: hasSampleMaskOutput
+ ? `
+ struct Output {
+ @builtin(sample_mask) mask_out: u32,
+ @location(0) color : vec4<f32>,
+ }
+ @fragment fn main() -> Output {
+ var o: Output;
+ // We need to make sure this sample_mask isn't optimized out even its value equals "no op".
+ o.mask_out = 0xFFFFFFFFu;
+ o.color = vec4<f32>(1.0, 1.0, 1.0, 1.0);
+ return o;
+ }`
+ : kDefaultFragmentShaderCode,
+ });
+
+ const _success = !hasSampleMaskOutput || !alphaToCoverageEnabled;
+ t.doCreateRenderPipelineTest(isAsync, _success, descriptor);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/overrides.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/overrides.spec.ts
new file mode 100644
index 0000000000..2cb8893a26
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/overrides.spec.ts
@@ -0,0 +1,535 @@
+export const description = `
+This test dedicatedly tests validation of pipeline overridable constants of createRenderPipeline.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { kValue } from '../../../util/constants.js';
+
+import { CreateRenderPipelineValidationTest } from './common.js';
+
+export const g = makeTestGroup(CreateRenderPipelineValidationTest);
+
+g.test('identifier,vertex')
+ .desc(
+ `
+Tests calling createRenderPipeline(Async) validation for overridable constants identifiers in vertex state.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combineWithParams([
+ { vertexConstants: {}, _success: true },
+ { vertexConstants: { x: 1, y: 1 }, _success: true },
+ { vertexConstants: { x: 1, y: 1, 1: 1, 1000: 1 }, _success: true },
+ { vertexConstants: { 'x\0': 1, y: 1 }, _success: false },
+ { vertexConstants: { xxx: 1 }, _success: false },
+ { vertexConstants: { 1: 1 }, _success: true },
+ { vertexConstants: { 2: 1 }, _success: false },
+ { vertexConstants: { z: 1 }, _success: false }, // pipeline constant id is specified for z
+ { vertexConstants: { w: 1 }, _success: false }, // pipeline constant id is specified for w
+ { vertexConstants: { 1: 1, z: 1 }, _success: false }, // pipeline constant id is specified for z
+ { vertexConstants: { 数: 1 }, _success: true }, // test non-ASCII
+ { vertexConstants: { séquençage: 0 }, _success: false }, // test unicode normalization
+ ] as { vertexConstants: Record<string, GPUPipelineConstantValue>; _success: boolean }[])
+ )
+ .fn(t => {
+ const { isAsync, vertexConstants, _success } = t.params;
+
+ t.doCreateRenderPipelineTest(isAsync, _success, {
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ override x: f32 = 0.0;
+ override y: f32 = 0.0;
+ override 数: f32 = 0.0;
+ override séquençage: f32 = 0.0;
+ @id(1) override z: f32 = 0.0;
+ @id(1000) override w: f32 = 1.0;
+ @vertex fn main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(x, y, z, w + 数 + séquençage);
+ }`,
+ }),
+ entryPoint: 'main',
+ constants: vertexConstants,
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `@fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ });
+ });
+
+g.test('identifier,fragment')
+ .desc(
+ `
+Tests calling createRenderPipeline(Async) validation for overridable constants identifiers in fragment state.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combineWithParams([
+ { fragmentConstants: {}, _success: true },
+ { fragmentConstants: { r: 1, g: 1 }, _success: true },
+ { fragmentConstants: { r: 1, g: 1, 1: 1, 1000: 1 }, _success: true },
+ { fragmentConstants: { 'r\0': 1 }, _success: false },
+ { fragmentConstants: { xxx: 1 }, _success: false },
+ { fragmentConstants: { 1: 1 }, _success: true },
+ { fragmentConstants: { 2: 1 }, _success: false },
+ { fragmentConstants: { b: 1 }, _success: false }, // pipeline constant id is specified for b
+ { fragmentConstants: { a: 1 }, _success: false }, // pipeline constant id is specified for a
+ { fragmentConstants: { 1: 1, b: 1 }, _success: false }, // pipeline constant id is specified for b
+ { fragmentConstants: { 数: 1 }, _success: true }, // test non-ASCII
+ { fragmentConstants: { séquençage: 0 }, _success: false }, // test unicode is not normalized
+ ] as { fragmentConstants: Record<string, GPUPipelineConstantValue>; _success: boolean }[])
+ )
+ .fn(t => {
+ const { isAsync, fragmentConstants, _success } = t.params;
+
+ const descriptor = t.getDescriptor({
+ fragmentShaderCode: `
+ override r: f32 = 0.0;
+ override g: f32 = 0.0;
+ override 数: f32 = 0.0;
+ override sequencage: f32 = 0.0;
+ @id(1) override b: f32 = 0.0;
+ @id(1000) override a: f32 = 0.0;
+ @fragment fn main()
+ -> @location(0) vec4<f32> {
+ return vec4<f32>(r, g, b, a + 数 + sequencage);
+ }`,
+ fragmentConstants,
+ });
+
+ t.doCreateRenderPipelineTest(isAsync, _success, descriptor);
+ });
+
+g.test('uninitialized,vertex')
+ .desc(
+ `
+Tests calling createRenderPipeline(Async) validation for uninitialized overridable constants in vertex state.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combineWithParams([
+ { vertexConstants: {}, _success: false },
+ { vertexConstants: { x: 1, y: 1 }, _success: false }, // z is missing
+ { vertexConstants: { x: 1, z: 1 }, _success: true },
+ { vertexConstants: { x: 1, y: 1, z: 1, w: 1 }, _success: true },
+ ] as { vertexConstants: Record<string, GPUPipelineConstantValue>; _success: boolean }[])
+ )
+ .fn(t => {
+ const { isAsync, vertexConstants, _success } = t.params;
+
+ t.doCreateRenderPipelineTest(isAsync, _success, {
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ override x: f32;
+ override y: f32 = 0.0;
+ override z: f32;
+ override w: f32 = 1.0;
+ @vertex fn main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(x, y, z, w);
+ }`,
+ }),
+ entryPoint: 'main',
+ constants: vertexConstants,
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `@fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ });
+ });
+
+g.test('uninitialized,fragment')
+ .desc(
+ `
+Tests calling createRenderPipeline(Async) validation for uninitialized overridable constants in fragment state.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combineWithParams([
+ { fragmentConstants: {}, _success: false },
+ { fragmentConstants: { r: 1, g: 1 }, _success: false }, // b is missing
+ { fragmentConstants: { r: 1, b: 1 }, _success: true },
+ { fragmentConstants: { r: 1, g: 1, b: 1, a: 1 }, _success: true },
+ ] as { fragmentConstants: Record<string, GPUPipelineConstantValue>; _success: boolean }[])
+ )
+ .fn(t => {
+ const { isAsync, fragmentConstants, _success } = t.params;
+
+ const descriptor = t.getDescriptor({
+ fragmentShaderCode: `
+ override r: f32;
+ override g: f32 = 0.0;
+ override b: f32;
+ override a: f32 = 0.0;
+ @fragment fn main()
+ -> @location(0) vec4<f32> {
+ return vec4<f32>(r, g, b, a);
+ }
+ `,
+ fragmentConstants,
+ });
+
+ t.doCreateRenderPipelineTest(isAsync, _success, descriptor);
+ });
+
+g.test('value,type_error,vertex')
+ .desc(
+ `
+Tests calling createRenderPipeline(Async) validation for invalid constant values like inf, NaN will results in TypeError.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combineWithParams([
+ { vertexConstants: { cf: 1 }, _success: true }, // control
+ { vertexConstants: { cf: NaN }, _success: false },
+ { vertexConstants: { cf: Number.POSITIVE_INFINITY }, _success: false },
+ { vertexConstants: { cf: Number.NEGATIVE_INFINITY }, _success: false },
+ ] as { vertexConstants: Record<string, GPUPipelineConstantValue>; _success: boolean }[])
+ )
+ .fn(t => {
+ const { isAsync, vertexConstants, _success } = t.params;
+
+ t.doCreateRenderPipelineTest(
+ isAsync,
+ _success,
+ {
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ override cf: f32 = 0.0;
+ @vertex fn main() -> @builtin(position) vec4<f32> {
+ _ = cf;
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ constants: vertexConstants,
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `@fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ },
+ 'TypeError'
+ );
+ });
+
+g.test('value,type_error,fragment')
+ .desc(
+ `
+Tests calling createRenderPipeline(Async) validation for invalid constant values like inf, NaN will results in TypeError.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combineWithParams([
+ { fragmentConstants: { cf: 1 }, _success: true }, // control
+ { fragmentConstants: { cf: NaN }, _success: false },
+ { fragmentConstants: { cf: Number.POSITIVE_INFINITY }, _success: false },
+ { fragmentConstants: { cf: Number.NEGATIVE_INFINITY }, _success: false },
+ ] as const)
+ )
+ .fn(t => {
+ const { isAsync, fragmentConstants, _success } = t.params;
+
+ const descriptor = t.getDescriptor({
+ fragmentShaderCode: `
+ override cf: f32 = 0.0;
+ @fragment fn main()
+ -> @location(0) vec4<f32> {
+ _ = cf;
+ return vec4<f32>(1.0, 1.0, 1.0, 1.0);
+ }
+ `,
+ fragmentConstants,
+ });
+
+ t.doCreateRenderPipelineTest(isAsync, _success, descriptor, 'TypeError');
+ });
+
+g.test('value,validation_error,vertex')
+ .desc(
+ `
+Tests calling createRenderPipeline(Async) validation for unrepresentable constant values in vertex stage.
+
+TODO(#2060): test with last_f64_castable.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combineWithParams([
+ { vertexConstants: { cu: kValue.u32.min }, _success: true },
+ { vertexConstants: { cu: kValue.u32.min - 1 }, _success: false },
+ { vertexConstants: { cu: kValue.u32.max }, _success: true },
+ { vertexConstants: { cu: kValue.u32.max + 1 }, _success: false },
+ { vertexConstants: { ci: kValue.i32.negative.min }, _success: true },
+ { vertexConstants: { ci: kValue.i32.negative.min - 1 }, _success: false },
+ { vertexConstants: { ci: kValue.i32.positive.max }, _success: true },
+ { vertexConstants: { ci: kValue.i32.positive.max + 1 }, _success: false },
+ { vertexConstants: { cf: kValue.f32.negative.min }, _success: true },
+ {
+ vertexConstants: { cf: kValue.f32.negative.first_non_castable_pipeline_override },
+ _success: false,
+ },
+ { vertexConstants: { cf: kValue.f32.positive.max }, _success: true },
+ {
+ vertexConstants: { cf: kValue.f32.positive.first_non_castable_pipeline_override },
+ _success: false,
+ },
+ // Conversion to boolean can't fail
+ { vertexConstants: { cb: Number.MAX_VALUE }, _success: true },
+ { vertexConstants: { cb: kValue.i32.negative.min - 1 }, _success: true },
+ ] as { vertexConstants: Record<string, GPUPipelineConstantValue>; _success: boolean }[])
+ )
+ .fn(t => {
+ const { isAsync, vertexConstants, _success } = t.params;
+
+ t.doCreateRenderPipelineTest(isAsync, _success, {
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ override cb: bool = false;
+ override cu: u32 = 0u;
+ override ci: i32 = 0;
+ override cf: f32 = 0.0;
+ @vertex fn main() -> @builtin(position) vec4<f32> {
+ _ = cb;
+ _ = cu;
+ _ = ci;
+ _ = cf;
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ constants: vertexConstants,
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `@fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ });
+ });
+
+g.test('value,validation_error,fragment')
+ .desc(
+ `
+Tests calling createRenderPipeline(Async) validation for unrepresentable constant values in fragment stage.
+
+TODO(#2060): test with last_f64_castable.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combineWithParams([
+ { fragmentConstants: { cu: kValue.u32.min }, _success: true },
+ { fragmentConstants: { cu: kValue.u32.min - 1 }, _success: false },
+ { fragmentConstants: { cu: kValue.u32.max }, _success: true },
+ { fragmentConstants: { cu: kValue.u32.max + 1 }, _success: false },
+ { fragmentConstants: { ci: kValue.i32.negative.min }, _success: true },
+ { fragmentConstants: { ci: kValue.i32.negative.min - 1 }, _success: false },
+ { fragmentConstants: { ci: kValue.i32.positive.max }, _success: true },
+ { fragmentConstants: { ci: kValue.i32.positive.max + 1 }, _success: false },
+ { fragmentConstants: { cf: kValue.f32.negative.min }, _success: true },
+ {
+ fragmentConstants: { cf: kValue.f32.negative.first_non_castable_pipeline_override },
+ _success: false,
+ },
+ { fragmentConstants: { cf: kValue.f32.positive.max }, _success: true },
+ {
+ fragmentConstants: { cf: kValue.f32.positive.first_non_castable_pipeline_override },
+ _success: false,
+ },
+ // Conversion to boolean can't fail
+ { fragmentConstants: { cb: Number.MAX_VALUE }, _success: true },
+ { fragmentConstants: { cb: kValue.i32.negative.min - 1 }, _success: true },
+ ] as { fragmentConstants: Record<string, GPUPipelineConstantValue>; _success: boolean }[])
+ )
+ .fn(t => {
+ const { isAsync, fragmentConstants, _success } = t.params;
+
+ const descriptor = t.getDescriptor({
+ fragmentShaderCode: `
+ override cb: bool = false;
+ override cu: u32 = 0u;
+ override ci: i32 = 0;
+ override cf: f32 = 0.0;
+ @fragment fn main()
+ -> @location(0) vec4<f32> {
+ _ = cb;
+ _ = cu;
+ _ = ci;
+ _ = cf;
+ return vec4<f32>(1.0, 1.0, 1.0, 1.0);
+ }
+ `,
+ fragmentConstants,
+ });
+
+ t.doCreateRenderPipelineTest(isAsync, _success, descriptor);
+ });
+
+g.test('value,validation_error,f16,vertex')
+ .desc(
+ `
+Tests calling createRenderPipeline(Async) validation for unrepresentable f16 constant values in vertex stage.
+
+TODO(#2060): Tighten the cases around the valid/invalid boundary once we have WGSL spec
+clarity on whether values like f16.positive.last_f64_castable would be valid. See issue.
+`
+ )
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combineWithParams([
+ { vertexConstants: { cf16: kValue.f16.negative.min }, _success: true },
+ {
+ vertexConstants: { cf16: kValue.f16.negative.first_non_castable_pipeline_override },
+ _success: false,
+ },
+ { vertexConstants: { cf16: kValue.f16.positive.max }, _success: true },
+ {
+ vertexConstants: { cf16: kValue.f16.positive.first_non_castable_pipeline_override },
+ _success: false,
+ },
+ { vertexConstants: { cf16: kValue.f32.negative.min }, _success: false },
+ { vertexConstants: { cf16: kValue.f32.positive.max }, _success: false },
+ {
+ vertexConstants: { cf16: kValue.f32.negative.first_non_castable_pipeline_override },
+ _success: false,
+ },
+ {
+ vertexConstants: { cf16: kValue.f32.positive.first_non_castable_pipeline_override },
+ _success: false,
+ },
+ ] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(t => {
+ const { isAsync, vertexConstants, _success } = t.params;
+
+ t.doCreateRenderPipelineTest(isAsync, _success, {
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ enable f16;
+
+ override cf16: f16 = 0.0h;
+ @vertex fn main() -> @builtin(position) vec4<f32> {
+ _ = cf16;
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ constants: vertexConstants,
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `@fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ });
+ });
+
+g.test('value,validation_error,f16,fragment')
+ .desc(
+ `
+Tests calling createRenderPipeline(Async) validation for unrepresentable f16 constant values in fragment stage.
+
+TODO(#2060): Tighten the cases around the valid/invalid boundary once we have WGSL spec
+clarity on whether values like f16.positive.last_f64_castable would be valid. See issue.
+`
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .params(u =>
+ u //
+ .combine('isAsync', [true, false])
+ .combineWithParams([
+ { fragmentConstants: { cf16: kValue.f16.negative.min }, _success: true },
+ {
+ fragmentConstants: { cf16: kValue.f16.negative.first_non_castable_pipeline_override },
+ _success: false,
+ },
+ { fragmentConstants: { cf16: kValue.f16.positive.max }, _success: true },
+ {
+ fragmentConstants: { cf16: kValue.f16.positive.first_non_castable_pipeline_override },
+ _success: false,
+ },
+ { fragmentConstants: { cf16: kValue.f32.negative.min }, _success: false },
+ { fragmentConstants: { cf16: kValue.f32.positive.max }, _success: false },
+ {
+ fragmentConstants: { cf16: kValue.f32.negative.first_non_castable_pipeline_override },
+ _success: false,
+ },
+ {
+ fragmentConstants: { cf16: kValue.f32.positive.first_non_castable_pipeline_override },
+ _success: false,
+ },
+ ] as const)
+ )
+ .fn(t => {
+ const { isAsync, fragmentConstants, _success } = t.params;
+
+ const descriptor = t.getDescriptor({
+ fragmentShaderCode: `
+ enable f16;
+
+ override cf16: f16 = 0.0h;
+ @fragment fn main()
+ -> @location(0) vec4<f32> {
+ _ = cf16;
+ return vec4<f32>(1.0, 1.0, 1.0, 1.0);
+ }
+ `,
+ fragmentConstants,
+ });
+
+ t.doCreateRenderPipelineTest(isAsync, _success, descriptor);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/primitive_state.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/primitive_state.spec.ts
new file mode 100644
index 0000000000..6f42e8314a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/primitive_state.spec.ts
@@ -0,0 +1,42 @@
+export const description = `
+This test dedicatedly tests validation of GPUPrimitiveState of createRenderPipeline.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { kPrimitiveTopology, kIndexFormat } from '../../../capability_info.js';
+
+import { CreateRenderPipelineValidationTest } from './common.js';
+
+export const g = makeTestGroup(CreateRenderPipelineValidationTest);
+
+g.test('strip_index_format')
+ .desc(
+ `If primitive.topology is not "line-strip" or "triangle-strip", primitive.stripIndexFormat must be undefined.`
+ )
+ .params(u =>
+ u
+ .combine('isAsync', [false, true])
+ .combine('topology', [undefined, ...kPrimitiveTopology] as const)
+ .combine('stripIndexFormat', [undefined, ...kIndexFormat] as const)
+ )
+ .fn(t => {
+ const { isAsync, topology, stripIndexFormat } = t.params;
+
+ const descriptor = t.getDescriptor({ primitive: { topology, stripIndexFormat } });
+
+ const _success =
+ topology === 'line-strip' || topology === 'triangle-strip' || stripIndexFormat === undefined;
+ t.doCreateRenderPipelineTest(isAsync, _success, descriptor);
+ });
+
+g.test('unclipped_depth')
+ .desc(`If primitive.unclippedDepth is true, features must contain "depth-clip-control".`)
+ .params(u => u.combine('isAsync', [false, true]).combine('unclippedDepth', [false, true]))
+ .fn(t => {
+ const { isAsync, unclippedDepth } = t.params;
+
+ const descriptor = t.getDescriptor({ primitive: { unclippedDepth } });
+
+ const _success = !unclippedDepth || t.device.features.has('depth-clip-control');
+ t.doCreateRenderPipelineTest(isAsync, _success, descriptor);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/shader_module.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/shader_module.spec.ts
new file mode 100644
index 0000000000..5d9c4b4612
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/shader_module.spec.ts
@@ -0,0 +1,112 @@
+export const description = `
+This test dedicatedly tests createRenderPipeline validation issues related to the shader modules.
+
+Note: entry point matching tests are in ../shader_module/entry_point.spec.ts
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import {
+ getFragmentShaderCodeWithOutput,
+ kDefaultVertexShaderCode,
+ kDefaultFragmentShaderCode,
+} from '../../../util/shader.js';
+
+import { CreateRenderPipelineValidationTest } from './common.js';
+
+export const g = makeTestGroup(CreateRenderPipelineValidationTest);
+
+const values = [0, 1, 0, 1];
+
+g.test('device_mismatch')
+ .desc(
+ 'Tests createRenderPipeline(Async) cannot be called with a shader module created from another device'
+ )
+ .paramsSubcasesOnly(u =>
+ u.combine('isAsync', [true, false]).combineWithParams([
+ { vertex_mismatched: false, fragment_mismatched: false, _success: true },
+ { vertex_mismatched: true, fragment_mismatched: false, _success: false },
+ { vertex_mismatched: false, fragment_mismatched: true, _success: false },
+ ])
+ )
+ .beforeAllSubcases(t => {
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { isAsync, vertex_mismatched, fragment_mismatched, _success } = t.params;
+
+ const code = `
+ @vertex fn main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }
+ `;
+
+ const descriptor = {
+ vertex: {
+ module: vertex_mismatched
+ ? t.mismatchedDevice.createShaderModule({ code })
+ : t.device.createShaderModule({ code }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: fragment_mismatched
+ ? t.mismatchedDevice.createShaderModule({
+ code: getFragmentShaderCodeWithOutput([
+ { values, plainType: 'f32', componentCount: 4 },
+ ]),
+ })
+ : t.device.createShaderModule({
+ code: getFragmentShaderCodeWithOutput([
+ { values, plainType: 'f32', componentCount: 4 },
+ ]),
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }] as const,
+ },
+ layout: t.getPipelineLayout(),
+ };
+
+ t.doCreateRenderPipelineTest(isAsync, _success, descriptor);
+ });
+
+g.test('invalid,vertex')
+ .desc(`Tests shader module must be valid.`)
+ .params(u => u.combine('isAsync', [true, false]).combine('isVertexShaderValid', [true, false]))
+ .fn(t => {
+ const { isAsync, isVertexShaderValid } = t.params;
+ t.doCreateRenderPipelineTest(isAsync, isVertexShaderValid, {
+ layout: 'auto',
+ vertex: {
+ module: isVertexShaderValid
+ ? t.device.createShaderModule({
+ code: kDefaultVertexShaderCode,
+ })
+ : t.createInvalidShaderModule(),
+ entryPoint: 'main',
+ },
+ });
+ });
+
+g.test('invalid,fragment')
+ .desc(`Tests shader module must be valid.`)
+ .params(u => u.combine('isAsync', [true, false]).combine('isFragmentShaderValid', [true, false]))
+ .fn(t => {
+ const { isAsync, isFragmentShaderValid } = t.params;
+ t.doCreateRenderPipelineTest(isAsync, isFragmentShaderValid, {
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: kDefaultVertexShaderCode,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: isFragmentShaderValid
+ ? t.device.createShaderModule({
+ code: kDefaultFragmentShaderCode,
+ })
+ : t.createInvalidShaderModule(),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/vertex_state.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/vertex_state.spec.ts
new file mode 100644
index 0000000000..0a64494c89
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/render_pipeline/vertex_state.spec.ts
@@ -0,0 +1,765 @@
+export const description = `
+This test dedicatedly tests validation of GPUVertexState of createRenderPipeline.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import {
+ filterUniqueValueTestVariants,
+ makeValueTestVariant,
+} from '../../../../common/util/util.js';
+import { kVertexFormats, kVertexFormatInfo } from '../../../capability_info.js';
+import { ValidationTest } from '../validation_test.js';
+
+const VERTEX_SHADER_CODE_WITH_NO_INPUT = `
+ @vertex fn main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+ }
+`;
+
+function addTestAttributes(
+ attributes: GPUVertexAttribute[],
+ {
+ testAttribute,
+ testAttributeAtStart = true,
+ extraAttributeCount = 0,
+ extraAttributeSkippedLocations = [],
+ }: {
+ testAttribute?: GPUVertexAttribute;
+ testAttributeAtStart?: boolean;
+ extraAttributeCount?: Number;
+ extraAttributeSkippedLocations?: Number[];
+ }
+) {
+ // Add a bunch of dummy attributes each with a different location such that none of the locations
+ // are in extraAttributeSkippedLocations
+ let currentLocation = 0;
+ let extraAttribsAdded = 0;
+ while (extraAttribsAdded !== extraAttributeCount) {
+ if (extraAttributeSkippedLocations.includes(currentLocation)) {
+ currentLocation++;
+ continue;
+ }
+
+ attributes.push({ format: 'float32', shaderLocation: currentLocation, offset: 0 });
+ currentLocation++;
+ extraAttribsAdded++;
+ }
+
+ // Add the test attribute at the start or the end of the attributes.
+ if (testAttribute) {
+ if (testAttributeAtStart) {
+ attributes.unshift(testAttribute);
+ } else {
+ attributes.push(testAttribute);
+ }
+ }
+}
+
+class F extends ValidationTest {
+ getDescriptor(
+ buffers: Iterable<GPUVertexBufferLayout>,
+ vertexShaderCode: string
+ ): GPURenderPipelineDescriptor {
+ const descriptor: GPURenderPipelineDescriptor = {
+ layout: 'auto',
+ vertex: {
+ module: this.device.createShaderModule({ code: vertexShaderCode }),
+ entryPoint: 'main',
+ buffers,
+ },
+ fragment: {
+ module: this.device.createShaderModule({
+ code: `
+ @fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ primitive: { topology: 'triangle-list' },
+ };
+ return descriptor;
+ }
+
+ testVertexState(
+ success: boolean,
+ buffers: Iterable<GPUVertexBufferLayout>,
+ vertexShader: string = VERTEX_SHADER_CODE_WITH_NO_INPUT
+ ) {
+ const vsModule = this.device.createShaderModule({ code: vertexShader });
+ const fsModule = this.device.createShaderModule({
+ code: `
+ @fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`,
+ });
+
+ this.expectValidationError(() => {
+ this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: vsModule,
+ entryPoint: 'main',
+ buffers,
+ },
+ fragment: {
+ module: fsModule,
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ primitive: { topology: 'triangle-list' },
+ });
+ }, !success);
+ }
+
+ generateTestVertexShader(inputs: { type: string; location: number }[]): string {
+ let interfaces = '';
+ let body = '';
+
+ let count = 0;
+ for (const input of inputs) {
+ interfaces += `@location(${input.location}) input${count} : ${input.type},\n`;
+ body += `var i${count} : ${input.type} = input.input${count};\n`;
+ count++;
+ }
+
+ return `
+ struct Inputs {
+ ${interfaces}
+ };
+ @vertex fn main(input : Inputs) -> @builtin(position) vec4<f32> {
+ ${body}
+ return vec4<f32>(0.0, 0.0, 0.0, 0.0);
+ }
+ `;
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('max_vertex_buffer_limit')
+ .desc(
+ `Test that only up to <maxVertexBuffers> vertex buffers are allowed.
+ - Tests with 0, 1, limits, limits + 1 vertex buffers.
+ - Tests with the last buffer having an attribute or not.
+ This also happens to test that vertex buffers with no attributes are allowed and that a vertex state with no buffers is allowed.`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('countVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: 0 },
+ { mult: 1, add: 1 },
+ ])
+ .combine('lastEmpty', [false, true])
+ )
+ .fn(t => {
+ const { countVariant, lastEmpty } = t.params;
+ const count = t.makeLimitVariant('maxVertexBuffers', countVariant);
+ const vertexBuffers = [];
+ for (let i = 0; i < count; i++) {
+ if (lastEmpty || i !== count - 1) {
+ vertexBuffers.push({ attributes: [], arrayStride: 0 });
+ } else {
+ vertexBuffers.push({
+ attributes: [{ format: 'float32', offset: 0, shaderLocation: 0 }],
+ arrayStride: 0,
+ } as const);
+ }
+ }
+
+ const success = count <= t.device.limits.maxVertexBuffers;
+ t.testVertexState(success, vertexBuffers);
+ });
+
+g.test('max_vertex_attribute_limit')
+ .desc(
+ `Test that only up to <maxVertexAttributes> vertex attributes are allowed.
+ - Tests with 0, 1, limit, limits + 1 vertex attribute.
+ - Tests with 0, 1, 4 attributes per buffer (with remaining attributes in the last buffer).`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('attribCountVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: 0 },
+ { mult: 1, add: 1 },
+ ])
+ .combine('attribsPerBuffer', [0, 1, 4])
+ )
+ .fn(t => {
+ const { attribCountVariant, attribsPerBuffer } = t.params;
+ const attribCount = t.makeLimitVariant('maxVertexAttributes', attribCountVariant);
+
+ const vertexBuffers = [];
+
+ let attribsAdded = 0;
+ while (attribsAdded !== attribCount) {
+ // Choose how many attributes to add for this buffer. The last buffer gets all remaining attributes.
+ let targetCount = Math.min(attribCount, attribsAdded + attribsPerBuffer);
+ if (vertexBuffers.length === t.device.limits.maxVertexBuffers - 1) {
+ targetCount = attribCount;
+ }
+
+ const attributes = [];
+ while (attribsAdded !== targetCount) {
+ attributes.push({ format: 'float32', offset: 0, shaderLocation: attribsAdded } as const);
+ attribsAdded++;
+ }
+
+ vertexBuffers.push({ arrayStride: 0, attributes });
+ }
+
+ const success = attribCount <= t.device.limits.maxVertexAttributes;
+ t.testVertexState(success, vertexBuffers);
+ });
+
+g.test('max_vertex_buffer_array_stride_limit')
+ .desc(
+ `Test that the vertex buffer arrayStride must be at most <maxVertexBufferArrayStride>.
+ - Test for various vertex buffer indices
+ - Test for array strides 0, 4, 256, limit - 4, limit, limit + 4`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('vertexBufferIndexVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ ])
+ .combine('arrayStrideVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 4 },
+ { mult: 0, add: 256 },
+ { mult: 1, add: -4 },
+ { mult: 1, add: 0 },
+ { mult: 1, add: +4 },
+ ])
+ )
+ .fn(t => {
+ const { vertexBufferIndexVariant, arrayStrideVariant } = t.params;
+ const vertexBufferIndex = t.makeLimitVariant('maxVertexBuffers', vertexBufferIndexVariant);
+ const arrayStride = t.makeLimitVariant('maxVertexBufferArrayStride', arrayStrideVariant);
+ const vertexBuffers = [];
+ vertexBuffers[vertexBufferIndex] = { arrayStride, attributes: [] };
+
+ const success = arrayStride <= t.device.limits.maxVertexBufferArrayStride;
+ t.testVertexState(success, vertexBuffers);
+ });
+
+g.test('vertex_buffer_array_stride_limit_alignment')
+ .desc(
+ `Test that the vertex buffer arrayStride must be a multiple of 4 (including 0).
+ - Test for various vertex buffer indices
+ - Test for array strides 0, 1, 2, 4, limit - 4, limit - 2, limit`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('vertexBufferIndexVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ ])
+ .combine('arrayStrideVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 0, add: 2 },
+ { mult: 0, add: 4 },
+ { mult: 1, add: -4 },
+ { mult: 1, add: -2 },
+ { mult: 1, add: 0 },
+ ])
+ )
+ .fn(t => {
+ const { vertexBufferIndexVariant, arrayStrideVariant } = t.params;
+ const vertexBufferIndex = t.makeLimitVariant('maxVertexBuffers', vertexBufferIndexVariant);
+ const arrayStride = t.makeLimitVariant('maxVertexBufferArrayStride', arrayStrideVariant);
+
+ const vertexBuffers = [];
+ vertexBuffers[vertexBufferIndex] = { arrayStride, attributes: [] };
+
+ const success = arrayStride % 4 === 0;
+ t.testVertexState(success, vertexBuffers);
+ });
+
+g.test('vertex_attribute_shaderLocation_limit')
+ .desc(
+ `Test shaderLocation must be less than maxVertexAttributes.
+ - Test for various vertex buffer indices
+ - Test for various amounts of attributes in that vertex buffer
+ - Test for shaderLocation 0, 1, limit - 1, limit`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('vertexBufferIndexVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ ])
+ .combine('extraAttributeCountVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ ])
+ .combine('testAttributeAtStart', [false, true])
+ .combine('testShaderLocationVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ { mult: 1, add: 0 },
+ ])
+ )
+ .fn(t => {
+ const {
+ vertexBufferIndexVariant,
+ extraAttributeCountVariant,
+ testShaderLocationVariant,
+ testAttributeAtStart,
+ } = t.params;
+ const vertexBufferIndex = t.makeLimitVariant('maxVertexBuffers', vertexBufferIndexVariant);
+ const extraAttributeCount = t.makeLimitVariant(
+ 'maxVertexAttributes',
+ extraAttributeCountVariant
+ );
+ const testShaderLocation = t.makeLimitVariant('maxVertexAttributes', testShaderLocationVariant);
+
+ const attributes: GPUVertexAttribute[] = [];
+ addTestAttributes(attributes, {
+ testAttribute: { format: 'float32', offset: 0, shaderLocation: testShaderLocation },
+ testAttributeAtStart,
+ extraAttributeCount,
+ extraAttributeSkippedLocations: [testShaderLocation],
+ });
+
+ const vertexBuffers = [];
+ vertexBuffers[vertexBufferIndex] = { arrayStride: 256, attributes };
+
+ const success = testShaderLocation < t.device.limits.maxVertexAttributes;
+ t.testVertexState(success, vertexBuffers);
+ });
+
+g.test('vertex_attribute_shaderLocation_unique')
+ .desc(
+ `Test that shaderLocation must be unique in the vertex state.
+ - Test for various pairs of buffers that contain the potentially conflicting attributes
+ - Test for the potentially conflicting attributes in various places in the buffers (with dummy attributes)
+ - Test for various shaderLocations that conflict or not`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('vertexBufferIndexAVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ ])
+ .combine('vertexBufferIndexBVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ ])
+ .combine('testAttributeAtStartA', [false, true])
+ .combine('testAttributeAtStartB', [false, true])
+ .combine('shaderLocationAVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 0, add: 7 },
+ { mult: 1, add: -1 },
+ ])
+ .combine('shaderLocationBVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 0, add: 7 },
+ { mult: 1, add: -1 },
+ ])
+ .combine('extraAttributeCount', [0, 4])
+ )
+ .fn(t => {
+ const {
+ vertexBufferIndexAVariant,
+ vertexBufferIndexBVariant,
+ testAttributeAtStartA,
+ testAttributeAtStartB,
+ shaderLocationAVariant,
+ shaderLocationBVariant,
+ extraAttributeCount,
+ } = t.params;
+ const vertexBufferIndexA = t.makeLimitVariant('maxVertexBuffers', vertexBufferIndexAVariant);
+ const vertexBufferIndexB = t.makeLimitVariant('maxVertexBuffers', vertexBufferIndexBVariant);
+ const shaderLocationA = t.makeLimitVariant('maxVertexAttributes', shaderLocationAVariant);
+ const shaderLocationB = t.makeLimitVariant('maxVertexAttributes', shaderLocationBVariant);
+
+ // Depending on the params, the vertexBuffer for A and B can be the same or different. To support
+ // both cases without code changes we treat `vertexBufferAttributes` as a map from indices to
+ // vertex buffer descriptors, with A and B potentially reusing the same JS object if they have the
+ // same index.
+ const vertexBufferAttributes = [];
+ vertexBufferAttributes[vertexBufferIndexA] = [];
+ vertexBufferAttributes[vertexBufferIndexB] = [];
+
+ // Add the dummy attributes for attribute A
+ const attributesA = vertexBufferAttributes[vertexBufferIndexA];
+ addTestAttributes(attributesA, {
+ testAttribute: { format: 'float32', offset: 0, shaderLocation: shaderLocationA },
+ testAttributeAtStart: testAttributeAtStartA,
+ extraAttributeCount,
+ extraAttributeSkippedLocations: [shaderLocationA, shaderLocationB],
+ });
+
+ // Add attribute B. Not that attributesB can be the same object as attributesA so they end
+ // up in the same vertex buffer.
+ const attributesB = vertexBufferAttributes[vertexBufferIndexB];
+ addTestAttributes(attributesB, {
+ testAttribute: { format: 'float32', offset: 0, shaderLocation: shaderLocationB },
+ testAttributeAtStart: testAttributeAtStartB,
+ });
+
+ // Use the attributes to make the list of vertex buffers. Note that we might be setting the same vertex
+ // buffer twice, but that only happens when it is the only vertex buffer.
+ const vertexBuffers = [];
+ vertexBuffers[vertexBufferIndexA] = { arrayStride: 256, attributes: attributesA };
+ vertexBuffers[vertexBufferIndexB] = { arrayStride: 256, attributes: attributesB };
+
+ // Note that an empty vertex shader will be used so errors only happens because of the conflict
+ // in the vertex state.
+ const success = shaderLocationA !== shaderLocationB;
+ t.testVertexState(success, vertexBuffers);
+ });
+
+g.test('vertex_shader_input_location_limit')
+ .desc(
+ `Test that vertex shader's input's location decoration must be less than maxVertexAttributes.
+ - Test for shaderLocation 0, 1, limit - 1, limit, MAX_I32 (the WGSL spec requires a non-negative i32)`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('testLocationVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ { mult: 1, add: 0 },
+ { mult: 0, add: 2 ** 31 - 1 },
+ ])
+ )
+ .fn(t => {
+ const { testLocationVariant } = t.params;
+ const testLocation = t.makeLimitVariant('maxVertexAttributes', testLocationVariant);
+
+ const shader = t.generateTestVertexShader([
+ {
+ type: 'vec4<f32>',
+ location: testLocation,
+ },
+ ]);
+
+ const vertexBuffers = [
+ {
+ arrayStride: 512,
+ attributes: [
+ {
+ format: 'float32',
+ offset: 0,
+ shaderLocation: testLocation,
+ } as const,
+ ],
+ },
+ ];
+
+ const success = testLocation < t.device.limits.maxVertexAttributes;
+ t.testVertexState(success, vertexBuffers, shader);
+ });
+
+g.test('vertex_shader_input_location_in_vertex_state')
+ .desc(
+ `Test that a vertex shader defined in the shader must have a corresponding attribute in the vertex state.
+ - Test for various input locations.
+ - Test for the attribute in various places in the list of vertex buffer and various places inside the vertex buffer descriptor`
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('vertexBufferIndexVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ ])
+ .combine('extraAttributeCountVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ ])
+ .combine('testAttributeAtStart', [false, true])
+ .combine('testShaderLocationVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 0, add: 4 },
+ { mult: 0, add: 5 },
+ { mult: 1, add: -1 },
+ ])
+ )
+ .fn(t => {
+ const {
+ vertexBufferIndexVariant,
+ extraAttributeCountVariant,
+ testAttributeAtStart,
+ testShaderLocationVariant,
+ } = t.params;
+ const vertexBufferIndex = t.makeLimitVariant('maxVertexBuffers', vertexBufferIndexVariant);
+ const extraAttributeCount = t.makeLimitVariant(
+ 'maxVertexAttributes',
+ extraAttributeCountVariant
+ );
+ const testShaderLocation = t.makeLimitVariant('maxVertexAttributes', testShaderLocationVariant);
+ // We have a shader using `testShaderLocation`.
+ const shader = t.generateTestVertexShader([
+ {
+ type: 'vec4<f32>',
+ location: testShaderLocation,
+ },
+ ]);
+
+ const attributes: GPUVertexAttribute[] = [];
+ const vertexBuffers = [];
+ vertexBuffers[vertexBufferIndex] = { arrayStride: 256, attributes };
+
+ // Fill attributes with a bunch of attributes for other locations.
+ // Using that vertex state is invalid because the vertex state doesn't contain the test location
+ addTestAttributes(attributes, {
+ extraAttributeCount,
+ extraAttributeSkippedLocations: [testShaderLocation],
+ });
+ t.testVertexState(false, vertexBuffers, shader);
+
+ // Add an attribute for the test location and try again.
+ addTestAttributes(attributes, {
+ testAttribute: { format: 'float32', shaderLocation: testShaderLocation, offset: 0 },
+ testAttributeAtStart,
+ });
+ t.testVertexState(true, vertexBuffers, shader);
+ });
+
+g.test('vertex_shader_type_matches_attribute_format')
+ .desc(
+ `
+ Test that the vertex shader declaration must have a type compatible with the vertex format.
+ - Test for all formats.
+ - Test for all combinations of u/i/f32 with and without vectors.`
+ )
+ .params(u =>
+ u
+ .combine('format', kVertexFormats)
+ .beginSubcases()
+ .combine('shaderBaseType', ['u32', 'i32', 'f32'])
+ .expand('shaderType', p => [
+ p.shaderBaseType,
+ `vec2<${p.shaderBaseType}>`,
+ `vec3<${p.shaderBaseType}>`,
+ `vec4<${p.shaderBaseType}>`,
+ ])
+ )
+ .fn(t => {
+ const { format, shaderBaseType, shaderType } = t.params;
+ const shader = t.generateTestVertexShader([
+ {
+ type: shaderType,
+ location: 0,
+ },
+ ]);
+
+ const requiredBaseType = {
+ sint: 'i32',
+ uint: 'u32',
+ snorm: 'f32',
+ unorm: 'f32',
+ float: 'f32',
+ }[kVertexFormatInfo[format].type];
+
+ const success = requiredBaseType === shaderBaseType;
+ t.testVertexState(
+ success,
+ [
+ {
+ arrayStride: 0,
+ attributes: [{ offset: 0, shaderLocation: 0, format }],
+ },
+ ],
+ shader
+ );
+ });
+
+g.test('vertex_attribute_offset_alignment')
+ .desc(
+ `
+ Test that vertex attribute offsets must be aligned to the format's component byte size.
+ - Test for all formats.
+ - Test for various arrayStrides and offsets within that stride
+ - Test for various vertex buffer indices
+ - Test for various amounts of attributes in that vertex buffer`
+ )
+ .params(u =>
+ u
+ .combine('format', kVertexFormats)
+ .combine('arrayStrideVariant', [
+ { mult: 0, add: 256 },
+ { mult: 1, add: 0 },
+ ])
+ .expand('offsetVariant', p => {
+ const formatSize = kVertexFormatInfo[p.format].byteSize;
+ return filterUniqueValueTestVariants([
+ { mult: 0, add: 0 },
+ { mult: 0, add: Math.floor(formatSize / 2) },
+ { mult: 0, add: formatSize },
+ { mult: 0, add: 2 },
+ { mult: 0, add: 4 },
+ { mult: 1, add: -formatSize },
+ { mult: 1, add: -formatSize - Math.floor(formatSize / 2) },
+ { mult: 1, add: -formatSize - 4 },
+ { mult: 1, add: -formatSize - 2 },
+ ]);
+ })
+ .beginSubcases()
+ .combine('vertexBufferIndexVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ ])
+ .combine('extraAttributeCountVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ ])
+ .combine('testAttributeAtStart', [false, true])
+ )
+ .fn(t => {
+ const {
+ format,
+ arrayStrideVariant,
+ offsetVariant,
+ vertexBufferIndexVariant,
+ extraAttributeCountVariant,
+ testAttributeAtStart,
+ } = t.params;
+ const arrayStride = t.makeLimitVariant('maxVertexBufferArrayStride', arrayStrideVariant);
+ const vertexBufferIndex = t.makeLimitVariant('maxVertexBuffers', vertexBufferIndexVariant);
+ const extraAttributeCount = t.makeLimitVariant(
+ 'maxVertexAttributes',
+ extraAttributeCountVariant
+ );
+ const offset = makeValueTestVariant(arrayStride, offsetVariant);
+
+ const attributes: GPUVertexAttribute[] = [];
+ addTestAttributes(attributes, {
+ testAttribute: { format, offset, shaderLocation: 0 },
+ testAttributeAtStart,
+ extraAttributeCount,
+ extraAttributeSkippedLocations: [0],
+ });
+
+ const vertexBuffers = [];
+ vertexBuffers[vertexBufferIndex] = { arrayStride, attributes };
+
+ const formatInfo = kVertexFormatInfo[format];
+ const formatSize = formatInfo.byteSize;
+ const success = offset % Math.min(4, formatSize) === 0;
+
+ t.testVertexState(success, vertexBuffers);
+ });
+
+g.test('vertex_attribute_contained_in_stride')
+ .desc(
+ `
+ Test that vertex attribute [offset, offset + formatSize) must be contained in the arrayStride if arrayStride is not 0:
+ - Test for all formats.
+ - Test for various arrayStrides and offsets within that stride
+ - Test for various vertex buffer indices
+ - Test for various amounts of attributes in that vertex buffer`
+ )
+ .params(u =>
+ u
+ .combine('format', kVertexFormats)
+ .beginSubcases()
+ .combine('arrayStrideVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 256 },
+ { mult: 1, add: -4 },
+ { mult: 1, add: 0 },
+ ])
+ .expand('offsetVariant', function* (p) {
+ // Compute a bunch of test offsets to test.
+ const formatSize = kVertexFormatInfo[p.format].byteSize;
+ yield { mult: 0, add: 0 };
+ yield { mult: 0, add: 4 };
+ yield { mult: 1, add: -formatSize };
+ yield { mult: 1, add: -formatSize + 4 };
+
+ // Avoid adding duplicate cases when formatSize == 4 (it is already tested above)
+ if (formatSize !== 4) {
+ yield { mult: 0, add: formatSize };
+ yield { mult: 1, add: 0 };
+ }
+ })
+ .combine('vertexBufferIndexVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ ])
+ .combine('extraAttributeCountVariant', [
+ { mult: 0, add: 0 },
+ { mult: 0, add: 1 },
+ { mult: 1, add: -1 },
+ ])
+ .combine('testAttributeAtStart', [false, true])
+ )
+ .fn(t => {
+ const {
+ format,
+ arrayStrideVariant,
+ offsetVariant,
+ vertexBufferIndexVariant,
+ extraAttributeCountVariant,
+ testAttributeAtStart,
+ } = t.params;
+ const arrayStride = t.makeLimitVariant('maxVertexBufferArrayStride', arrayStrideVariant);
+ const vertexBufferIndex = t.makeLimitVariant('maxVertexBuffers', vertexBufferIndexVariant);
+ const extraAttributeCount = t.makeLimitVariant(
+ 'maxVertexAttributes',
+ extraAttributeCountVariant
+ );
+ // arrayStride = 0 is a special case because for the offset validation it acts the same
+ // as arrayStride = device.limits.maxVertexBufferArrayStride. We special case here so as to avoid adding
+ // negative offsets that would cause an IDL exception to be thrown instead of a validation
+ // error.
+ const stride = arrayStride !== 0 ? arrayStride : t.device.limits.maxVertexBufferArrayStride;
+ const offset = makeValueTestVariant(stride, offsetVariant);
+
+ const attributes: GPUVertexAttribute[] = [];
+ addTestAttributes(attributes, {
+ testAttribute: { format, offset, shaderLocation: 0 },
+ testAttributeAtStart,
+ extraAttributeCount,
+ extraAttributeSkippedLocations: [0],
+ });
+
+ const vertexBuffers = [];
+ vertexBuffers[vertexBufferIndex] = { arrayStride, attributes };
+
+ const formatSize = kVertexFormatInfo[format].byteSize;
+ const limit = arrayStride === 0 ? t.device.limits.maxVertexBufferArrayStride : arrayStride;
+
+ const success = offset + formatSize <= limit;
+ t.testVertexState(success, vertexBuffers);
+ });
+
+g.test('many_attributes_overlapping')
+ .desc(`Test that it is valid to have many vertex attributes overlap`)
+ .fn(t => {
+ // Create many attributes, each of them intersects with at least 3 others.
+ const attributes = [];
+ const formats = ['float32x4', 'uint32x4', 'sint32x4'] as const;
+ for (let i = 0; i < t.device.limits.maxVertexAttributes; i++) {
+ attributes.push({ format: formats[i % 3], offset: i * 4, shaderLocation: i } as const);
+ }
+
+ t.testVertexState(true, [{ arrayStride: 0, attributes }]);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/buffer/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/buffer/README.txt
new file mode 100644
index 0000000000..e3aa0bb9e7
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/buffer/README.txt
@@ -0,0 +1 @@
+TODO: look at texture,*
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/buffer/in_pass_encoder.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/buffer/in_pass_encoder.spec.ts
new file mode 100644
index 0000000000..1b36538376
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/buffer/in_pass_encoder.spec.ts
@@ -0,0 +1,928 @@
+export const description = `
+Buffer Usages Validation Tests in Render Pass and Compute Pass.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { assert, unreachable } from '../../../../../common/util/util.js';
+import { ValidationTest } from '../../validation_test.js';
+
+const kBoundBufferSize = 256;
+
+export type BufferUsage =
+ | 'uniform'
+ | 'storage'
+ | 'read-only-storage'
+ | 'vertex'
+ | 'index'
+ | 'indirect'
+ | 'indexedIndirect';
+
+export const kAllBufferUsages: BufferUsage[] = [
+ 'uniform',
+ 'storage',
+ 'read-only-storage',
+ 'vertex',
+ 'index',
+ 'indirect',
+ 'indexedIndirect',
+];
+
+export class BufferResourceUsageTest extends ValidationTest {
+ createBindGroupLayoutForTest(
+ type: 'uniform' | 'storage' | 'read-only-storage',
+ resourceVisibility: 'compute' | 'fragment'
+ ): GPUBindGroupLayout {
+ const bindGroupLayoutEntry: GPUBindGroupLayoutEntry = {
+ binding: 0,
+ visibility:
+ resourceVisibility === 'compute' ? GPUShaderStage.COMPUTE : GPUShaderStage.FRAGMENT,
+ buffer: {
+ type,
+ },
+ };
+ return this.device.createBindGroupLayout({
+ entries: [bindGroupLayoutEntry],
+ });
+ }
+
+ createBindGroupForTest(
+ buffer: GPUBuffer,
+ offset: number,
+ type: 'uniform' | 'storage' | 'read-only-storage',
+ resourceVisibility: 'compute' | 'fragment'
+ ): GPUBindGroup {
+ return this.device.createBindGroup({
+ layout: this.createBindGroupLayoutForTest(type, resourceVisibility),
+ entries: [
+ {
+ binding: 0,
+ resource: { buffer, offset, size: kBoundBufferSize },
+ },
+ ],
+ });
+ }
+
+ beginSimpleRenderPass(encoder: GPUCommandEncoder) {
+ const colorTexture = this.device.createTexture({
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ size: [16, 16, 1],
+ });
+ return encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorTexture.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ });
+ }
+
+ createRenderPipelineForTest(
+ pipelineLayout: GPUPipelineLayout | GPUAutoLayoutMode,
+ vertexBufferCount: number
+ ): GPURenderPipeline {
+ const vertexBuffers: GPUVertexBufferLayout[] = [];
+ for (let i = 0; i < vertexBufferCount; ++i) {
+ vertexBuffers.push({
+ arrayStride: 4,
+ attributes: [
+ {
+ format: 'float32',
+ shaderLocation: i,
+ offset: 0,
+ },
+ ],
+ });
+ }
+
+ return this.device.createRenderPipeline({
+ layout: pipelineLayout,
+ vertex: {
+ module: this.device.createShaderModule({
+ code: this.getNoOpShaderCode('VERTEX'),
+ }),
+ entryPoint: 'main',
+ buffers: vertexBuffers,
+ },
+ fragment: {
+ module: this.device.createShaderModule({
+ code: `
+ @fragment fn main()
+ -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ primitive: { topology: 'point-list' },
+ });
+ }
+}
+
+function IsBufferUsageInBindGroup(bufferUsage: BufferUsage): boolean {
+ switch (bufferUsage) {
+ case 'uniform':
+ case 'storage':
+ case 'read-only-storage':
+ return true;
+ case 'vertex':
+ case 'index':
+ case 'indirect':
+ case 'indexedIndirect':
+ return false;
+ default:
+ unreachable();
+ }
+}
+
+export const g = makeTestGroup(BufferResourceUsageTest);
+
+g.test('subresources,buffer_usage_in_one_compute_pass_with_no_dispatch')
+ .desc(
+ `
+Test that it is always allowed to set multiple bind groups with same buffer in a compute pass
+encoder without any dispatch calls as state-setting compute pass commands, like setBindGroup(index,
+bindGroup, dynamicOffsets), do not contribute directly to a usage scope.`
+ )
+ .params(u =>
+ u
+ .combine('usage0', ['uniform', 'storage', 'read-only-storage'] as const)
+ .combine('usage1', ['uniform', 'storage', 'read-only-storage'] as const)
+ .beginSubcases()
+ .combine('visibility0', ['compute', 'fragment'] as const)
+ .combine('visibility1', ['compute', 'fragment'] as const)
+ .combine('hasOverlap', [true, false])
+ )
+ .fn(t => {
+ const { usage0, usage1, visibility0, visibility1, hasOverlap } = t.params;
+
+ const buffer = t.createBufferWithState('valid', {
+ size: kBoundBufferSize * 2,
+ usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.STORAGE,
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const computePassEncoder = encoder.beginComputePass();
+
+ const offset0 = 0;
+ const bindGroup0 = t.createBindGroupForTest(buffer, offset0, usage0, visibility0);
+ computePassEncoder.setBindGroup(0, bindGroup0);
+
+ const offset1 = hasOverlap ? offset0 : kBoundBufferSize;
+ const bindGroup1 = t.createBindGroupForTest(buffer, offset1, usage1, visibility1);
+ computePassEncoder.setBindGroup(1, bindGroup1);
+
+ computePassEncoder.end();
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, false);
+ });
+
+g.test('subresources,buffer_usage_in_one_compute_pass_with_one_dispatch')
+ .desc(
+ `
+Test that when one buffer is used in one compute pass encoder, its list of internal usages within
+one usage scope can only be a compatible usage list. According to WebGPU SPEC, within one dispatch,
+for each bind group slot that is used by the current GPUComputePipeline's layout, every subresource
+referenced by that bind group is "used" in the usage scope.
+
+For both usage === storage, there is writable buffer binding aliasing so we skip this case and will
+have tests covered (https://github.com/gpuweb/cts/issues/2232)
+`
+ )
+ .params(u =>
+ u
+ .combine('usage0AccessibleInDispatch', [true, false])
+ .combine('usage1AccessibleInDispatch', [true, false])
+ .combine('dispatchBeforeUsage1', [true, false])
+ .beginSubcases()
+ .combine('usage0', ['uniform', 'storage', 'read-only-storage', 'indirect'] as const)
+ .combine('visibility0', ['compute', 'fragment'] as const)
+ .filter(t => {
+ // The buffer with `indirect` usage is always accessible in the dispatch call.
+ if (
+ t.usage0 === 'indirect' &&
+ (!t.usage0AccessibleInDispatch || t.visibility0 !== 'compute' || !t.dispatchBeforeUsage1)
+ ) {
+ return false;
+ }
+ if (t.usage0AccessibleInDispatch && t.visibility0 !== 'compute') {
+ return false;
+ }
+ if (t.dispatchBeforeUsage1 && t.usage1AccessibleInDispatch) {
+ return false;
+ }
+ return true;
+ })
+ .combine('usage1', ['uniform', 'storage', 'read-only-storage', 'indirect'] as const)
+ .combine('visibility1', ['compute', 'fragment'] as const)
+ .filter(t => {
+ if (
+ t.usage1 === 'indirect' &&
+ (!t.usage1AccessibleInDispatch || t.visibility1 !== 'compute' || t.dispatchBeforeUsage1)
+ ) {
+ return false;
+ }
+ // When the first buffer usage is `indirect`, there has already been one dispatch call, so
+ // in this test we always make the second usage inaccessible in the dispatch call.
+ if (
+ t.usage1AccessibleInDispatch &&
+ (t.visibility1 !== 'compute' || t.usage0 === 'indirect')
+ ) {
+ return false;
+ }
+
+ // Avoid writable storage buffer bindings aliasing.
+ if (t.usage0 === 'storage' && t.usage1 === 'storage') {
+ return false;
+ }
+ return true;
+ })
+ .combine('hasOverlap', [true, false])
+ )
+ .fn(t => {
+ const {
+ usage0AccessibleInDispatch,
+ usage1AccessibleInDispatch,
+ dispatchBeforeUsage1,
+ usage0,
+ visibility0,
+ usage1,
+ visibility1,
+ hasOverlap,
+ } = t.params;
+
+ const buffer = t.createBufferWithState('valid', {
+ size: kBoundBufferSize * 2,
+ usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.STORAGE | GPUBufferUsage.INDIRECT,
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const computePassEncoder = encoder.beginComputePass();
+
+ const offset0 = 0;
+ switch (usage0) {
+ case 'uniform':
+ case 'storage':
+ case 'read-only-storage': {
+ const bindGroup0 = t.createBindGroupForTest(buffer, offset0, usage0, visibility0);
+ computePassEncoder.setBindGroup(0, bindGroup0);
+
+ /*
+ * setBindGroup(bindGroup0);
+ * dispatchWorkgroups();
+ * setBindGroup(bindGroup1);
+ */
+ if (dispatchBeforeUsage1) {
+ let pipelineLayout: GPUPipelineLayout | undefined = undefined;
+ if (usage0AccessibleInDispatch) {
+ const bindGroupLayout0 = t.createBindGroupLayoutForTest(usage0, visibility0);
+ pipelineLayout = t.device.createPipelineLayout({
+ bindGroupLayouts: [bindGroupLayout0],
+ });
+ }
+ const computePipeline = t.createNoOpComputePipeline(pipelineLayout);
+ computePassEncoder.setPipeline(computePipeline);
+ computePassEncoder.dispatchWorkgroups(1);
+ }
+ break;
+ }
+ case 'indirect': {
+ /*
+ * dispatchWorkgroupsIndirect(buffer);
+ * setBindGroup(bindGroup1);
+ */
+ assert(dispatchBeforeUsage1);
+ const computePipeline = t.createNoOpComputePipeline();
+ computePassEncoder.setPipeline(computePipeline);
+ computePassEncoder.dispatchWorkgroupsIndirect(buffer, offset0);
+ break;
+ }
+ }
+
+ const offset1 = hasOverlap ? offset0 : kBoundBufferSize;
+ switch (usage1) {
+ case 'uniform':
+ case 'storage':
+ case 'read-only-storage': {
+ const bindGroup1 = t.createBindGroupForTest(buffer, offset1, usage1, visibility1);
+ const bindGroupIndex = usage0AccessibleInDispatch ? 1 : 0;
+ computePassEncoder.setBindGroup(bindGroupIndex, bindGroup1);
+
+ /*
+ * setBindGroup(bindGroup0);
+ * setBindGroup(bindGroup1);
+ * dispatchWorkgroups();
+ */
+ if (!dispatchBeforeUsage1) {
+ const bindGroupLayouts: GPUBindGroupLayout[] = [];
+ if (usage0AccessibleInDispatch && usage0 !== 'indirect') {
+ const bindGroupLayout0 = t.createBindGroupLayoutForTest(usage0, visibility0);
+ bindGroupLayouts.push(bindGroupLayout0);
+ }
+ if (usage1AccessibleInDispatch) {
+ const bindGroupLayout1 = t.createBindGroupLayoutForTest(usage1, visibility1);
+ bindGroupLayouts.push(bindGroupLayout1);
+ }
+ const pipelineLayout: GPUPipelineLayout | undefined = bindGroupLayouts
+ ? t.device.createPipelineLayout({
+ bindGroupLayouts,
+ })
+ : undefined;
+ const computePipeline = t.createNoOpComputePipeline(pipelineLayout);
+ computePassEncoder.setPipeline(computePipeline);
+ computePassEncoder.dispatchWorkgroups(1);
+ }
+ break;
+ }
+ case 'indirect': {
+ /*
+ * setBindGroup(bindGroup0);
+ * dispatchWorkgroupsIndirect(buffer);
+ */
+ assert(!dispatchBeforeUsage1);
+ let pipelineLayout: GPUPipelineLayout | undefined = undefined;
+ if (usage0AccessibleInDispatch) {
+ assert(usage0 !== 'indirect');
+ pipelineLayout = t.device.createPipelineLayout({
+ bindGroupLayouts: [t.createBindGroupLayoutForTest(usage0, visibility0)],
+ });
+ }
+ const computePipeline = t.createNoOpComputePipeline(pipelineLayout);
+ computePassEncoder.setPipeline(computePipeline);
+ computePassEncoder.dispatchWorkgroupsIndirect(buffer, offset1);
+ break;
+ }
+ }
+ computePassEncoder.end();
+
+ const usageHasConflict =
+ (usage0 === 'storage' && usage1 !== 'storage') ||
+ (usage0 !== 'storage' && usage1 === 'storage');
+ const fail =
+ usageHasConflict &&
+ visibility0 === 'compute' &&
+ visibility1 === 'compute' &&
+ usage0AccessibleInDispatch &&
+ usage1AccessibleInDispatch;
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, fail);
+ });
+
+g.test('subresources,buffer_usage_in_compute_pass_with_two_dispatches')
+ .desc(
+ `
+Test that it is always allowed to use one buffer in different dispatch calls as in WebGPU SPEC,
+within one dispatch, for each bind group slot that is used by the current GPUComputePipeline's
+layout, every subresource referenced by that bind group is "used" in the usage scope, and different
+dispatch calls refer to different usage scopes.`
+ )
+ .params(u =>
+ u
+ .combine('usage0', ['uniform', 'storage', 'read-only-storage', 'indirect'] as const)
+ .combine('usage1', ['uniform', 'storage', 'read-only-storage', 'indirect'] as const)
+ .beginSubcases()
+ .combine('inSamePass', [true, false])
+ .combine('hasOverlap', [true, false])
+ )
+ .fn(t => {
+ const { usage0, usage1, inSamePass, hasOverlap } = t.params;
+
+ const UseBufferOnComputePassEncoder = (
+ computePassEncoder: GPUComputePassEncoder,
+ buffer: GPUBuffer,
+ usage: 'uniform' | 'storage' | 'read-only-storage' | 'indirect',
+ offset: number
+ ) => {
+ switch (usage) {
+ case 'uniform':
+ case 'storage':
+ case 'read-only-storage': {
+ const bindGroup = t.createBindGroupForTest(buffer, offset, usage, 'compute');
+ computePassEncoder.setBindGroup(0, bindGroup);
+
+ const bindGroupLayout = t.createBindGroupLayoutForTest(usage, 'compute');
+ const pipelineLayout = t.device.createPipelineLayout({
+ bindGroupLayouts: [bindGroupLayout],
+ });
+ const computePipeline = t.createNoOpComputePipeline(pipelineLayout);
+ computePassEncoder.setPipeline(computePipeline);
+ computePassEncoder.dispatchWorkgroups(1);
+ break;
+ }
+ case 'indirect': {
+ const computePipeline = t.createNoOpComputePipeline();
+ computePassEncoder.setPipeline(computePipeline);
+ computePassEncoder.dispatchWorkgroupsIndirect(buffer, offset);
+ break;
+ }
+ default:
+ unreachable();
+ break;
+ }
+ };
+
+ const buffer = t.createBufferWithState('valid', {
+ size: kBoundBufferSize * 2,
+ usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.STORAGE | GPUBufferUsage.INDIRECT,
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const computePassEncoder = encoder.beginComputePass();
+
+ const offset0 = 0;
+ const offset1 = hasOverlap ? offset0 : kBoundBufferSize;
+ UseBufferOnComputePassEncoder(computePassEncoder, buffer, usage0, offset0);
+
+ if (inSamePass) {
+ UseBufferOnComputePassEncoder(computePassEncoder, buffer, usage1, offset1);
+ computePassEncoder.end();
+ } else {
+ computePassEncoder.end();
+ const anotherComputePassEncoder = encoder.beginComputePass();
+ UseBufferOnComputePassEncoder(anotherComputePassEncoder, buffer, usage1, offset1);
+ anotherComputePassEncoder.end();
+ }
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, false);
+ });
+
+g.test('subresources,buffer_usage_in_one_render_pass_with_no_draw')
+ .desc(
+ `
+Test that when one buffer is used in one render pass encoder, its list of internal usages within one
+usage scope (all the commands in the whole render pass) can only be a compatible usage list even if
+there is no draw call in the render pass.
+ `
+ )
+ .params(u =>
+ u
+ .combine('usage0', ['uniform', 'storage', 'read-only-storage', 'vertex', 'index'] as const)
+ .combine('usage1', ['uniform', 'storage', 'read-only-storage', 'vertex', 'index'] as const)
+ .beginSubcases()
+ .combine('hasOverlap', [true, false])
+ .combine('visibility0', ['compute', 'fragment'] as const)
+ .unless(t => t.visibility0 === 'compute' && !IsBufferUsageInBindGroup(t.usage0))
+ .combine('visibility1', ['compute', 'fragment'] as const)
+ .unless(t => t.visibility1 === 'compute' && !IsBufferUsageInBindGroup(t.usage1))
+ )
+ .fn(t => {
+ const { usage0, usage1, hasOverlap, visibility0, visibility1 } = t.params;
+
+ const UseBufferOnRenderPassEncoder = (
+ buffer: GPUBuffer,
+ offset: number,
+ type: BufferUsage,
+ bindGroupVisibility: 'compute' | 'fragment',
+ renderPassEncoder: GPURenderPassEncoder
+ ) => {
+ switch (type) {
+ case 'uniform':
+ case 'storage':
+ case 'read-only-storage': {
+ const bindGroup = t.createBindGroupForTest(buffer, offset, type, bindGroupVisibility);
+ renderPassEncoder.setBindGroup(0, bindGroup);
+ break;
+ }
+ case 'vertex': {
+ renderPassEncoder.setVertexBuffer(0, buffer, offset, kBoundBufferSize);
+ break;
+ }
+ case 'index': {
+ renderPassEncoder.setIndexBuffer(buffer, 'uint16', offset, kBoundBufferSize);
+ break;
+ }
+ case 'indirect':
+ case 'indexedIndirect':
+ unreachable();
+ break;
+ }
+ };
+
+ const buffer = t.createBufferWithState('valid', {
+ size: kBoundBufferSize * 2,
+ usage:
+ GPUBufferUsage.UNIFORM |
+ GPUBufferUsage.STORAGE |
+ GPUBufferUsage.VERTEX |
+ GPUBufferUsage.INDEX,
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const renderPassEncoder = t.beginSimpleRenderPass(encoder);
+ const offset0 = 0;
+ UseBufferOnRenderPassEncoder(buffer, offset0, usage0, visibility0, renderPassEncoder);
+ const offset1 = hasOverlap ? offset0 : kBoundBufferSize;
+ UseBufferOnRenderPassEncoder(buffer, offset1, usage1, visibility1, renderPassEncoder);
+ renderPassEncoder.end();
+
+ const fail = (usage0 === 'storage') !== (usage1 === 'storage');
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, fail);
+ });
+
+g.test('subresources,buffer_usage_in_one_render_pass_with_one_draw')
+ .desc(
+ `
+Test that when one buffer is used in one render pass encoder where there is one draw call, its list
+of internal usages within one usage scope (all the commands in the whole render pass) can only be a
+compatible usage list. The usage scope rules are not related to the buffer offset or the bind group
+layout visibilities.
+
+For both usage === storage, there is writable buffer binding aliasing so we skip this case and will
+have tests covered (https://github.com/gpuweb/cts/issues/2232)
+`
+ )
+ .params(u =>
+ u
+ .combine('usage0', kAllBufferUsages)
+ .combine('usage1', kAllBufferUsages)
+ .beginSubcases()
+ .combine('usage0AccessibleInDraw', [true, false])
+ .combine('usage1AccessibleInDraw', [true, false])
+ .combine('drawBeforeUsage1', [true, false])
+ .combine('visibility0', ['compute', 'fragment'] as const)
+ .filter(t => {
+ // The buffer with `indirect` or `indexedIndirect` usage is always accessible in the draw
+ // call.
+ if (
+ (t.usage0 === 'indirect' || t.usage0 === 'indexedIndirect') &&
+ (!t.usage0AccessibleInDraw || t.visibility0 !== 'fragment' || !t.drawBeforeUsage1)
+ ) {
+ return false;
+ }
+ // The buffer usages `vertex` and `index` do nothing with shader visibilities.
+ if ((t.usage0 === 'vertex' || t.usage0 === 'index') && t.visibility0 !== 'fragment') {
+ return false;
+ }
+
+ // As usage0 is accessible in the draw call, visibility0 can only be 'fragment'.
+ if (t.usage0AccessibleInDraw && t.visibility0 !== 'fragment') {
+ return false;
+ }
+ // As usage1 is accessible in the draw call, the draw call cannot be before usage1.
+ if (t.drawBeforeUsage1 && t.usage1AccessibleInDraw) {
+ return false;
+ }
+
+ // Avoid writable storage buffer bindings aliasing.
+ if (t.usage0 === 'storage' && t.usage1 === 'storage') {
+ return false;
+ }
+ return true;
+ })
+ .combine('visibility1', ['compute', 'fragment'] as const)
+ .filter(t => {
+ if (
+ (t.usage1 === 'indirect' || t.usage1 === 'indexedIndirect') &&
+ (!t.usage1AccessibleInDraw || t.visibility1 !== 'fragment' || t.drawBeforeUsage1)
+ ) {
+ return false;
+ }
+ if ((t.usage1 === 'vertex' || t.usage1 === 'index') && t.visibility1 !== 'fragment') {
+ return false;
+ }
+ // When the first buffer usage is `indirect` or `indexedIndirect`, there has already been
+ // one draw call, so in this test we always make the second usage inaccessible in the draw
+ // call.
+ if (
+ t.usage1AccessibleInDraw &&
+ (t.visibility1 !== 'fragment' ||
+ t.usage0 === 'indirect' ||
+ t.usage0 === 'indexedIndirect')
+ ) {
+ return false;
+ }
+ // When the first buffer usage is `index` and is accessible in the draw call, the second
+ // usage cannot be `indirect` (it should be `indexedIndirect` for the tests on indirect draw
+ // calls)
+ if (t.usage0 === 'index' && t.usage0AccessibleInDraw && t.usage1 === 'indirect') {
+ return false;
+ }
+ return true;
+ })
+ .combine('hasOverlap', [true, false])
+ )
+ .fn(t => {
+ const {
+ // Buffer with usage0 will be "used" in the draw call if this value is true.
+ usage0AccessibleInDraw,
+ // Buffer with usage1 will be "used" in the draw call if this value is true.
+ usage1AccessibleInDraw,
+ // Whether we will have the draw call before setting the buffer usage as "usage1" or not.
+ // If it is true: set-usage0 -> draw -> set-usage1 or indirect-draw -> set-usage1
+ // Otherwise: set-usage0 -> set-usage1 -> draw or set-usage0 -> indirect-draw
+ drawBeforeUsage1,
+ usage0,
+ visibility0,
+ usage1,
+ visibility1,
+ hasOverlap,
+ } = t.params;
+ const buffer = t.createBufferWithState('valid', {
+ size: kBoundBufferSize * 2,
+ usage:
+ GPUBufferUsage.UNIFORM |
+ GPUBufferUsage.STORAGE |
+ GPUBufferUsage.VERTEX |
+ GPUBufferUsage.INDEX |
+ GPUBufferUsage.INDIRECT,
+ });
+
+ const UseBufferOnRenderPassEncoder = (
+ bufferAccessibleInDraw: boolean,
+ bufferIndex: number,
+ offset: number,
+ usage: BufferUsage,
+ bindGroupVisibility: 'compute' | 'fragment',
+ renderPassEncoder: GPURenderPassEncoder,
+ usedBindGroupLayouts: GPUBindGroupLayout[]
+ ) => {
+ switch (usage) {
+ case 'uniform':
+ case 'storage':
+ case 'read-only-storage': {
+ const bindGroup = t.createBindGroupForTest(buffer, offset, usage, bindGroupVisibility);
+ renderPassEncoder.setBindGroup(bufferIndex, bindGroup);
+ // To "use" the bind group we will set the corresponding bind group layout in the
+ // pipeline layout when creating the render pipeline.
+ if (bufferAccessibleInDraw && bindGroupVisibility === 'fragment') {
+ usedBindGroupLayouts.push(t.createBindGroupLayoutForTest(usage, bindGroupVisibility));
+ }
+ break;
+ }
+ case 'vertex': {
+ renderPassEncoder.setVertexBuffer(bufferIndex, buffer, offset);
+ break;
+ }
+ case 'index': {
+ renderPassEncoder.setIndexBuffer(buffer, 'uint16', offset);
+ break;
+ }
+ case 'indirect':
+ case 'indexedIndirect': {
+ // We will handle the indirect draw calls later.
+ break;
+ }
+ }
+ };
+
+ const MakeDrawCallWithOneUsage = (
+ usage: BufferUsage,
+ offset: number,
+ renderPassEncoder: GPURenderPassEncoder
+ ) => {
+ switch (usage) {
+ case 'uniform':
+ case 'read-only-storage':
+ case 'storage':
+ case 'vertex':
+ renderPassEncoder.draw(1);
+ break;
+ case 'index':
+ renderPassEncoder.drawIndexed(1);
+ break;
+ case 'indirect':
+ renderPassEncoder.drawIndirect(buffer, offset);
+ break;
+ case 'indexedIndirect': {
+ const indexBuffer = t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.INDEX,
+ });
+ renderPassEncoder.setIndexBuffer(indexBuffer, 'uint16');
+ renderPassEncoder.drawIndexedIndirect(buffer, offset);
+ break;
+ }
+ }
+ };
+
+ const encoder = t.device.createCommandEncoder();
+ const renderPassEncoder = t.beginSimpleRenderPass(encoder);
+
+ // Set buffer with usage0
+ const offset0 = 0;
+ // Invisible bind groups or vertex buffers are all bound to the slot 1.
+ const bufferIndex0 = visibility0 === 'fragment' ? 0 : 1;
+ const usedBindGroupLayouts: GPUBindGroupLayout[] = [];
+
+ UseBufferOnRenderPassEncoder(
+ usage0AccessibleInDraw,
+ bufferIndex0,
+ offset0,
+ usage0,
+ visibility0,
+ renderPassEncoder,
+ usedBindGroupLayouts
+ );
+
+ let vertexBufferCount = 0;
+
+ // Set pipeline and do draw call if drawBeforeUsage1 === true
+ if (drawBeforeUsage1) {
+ const pipelineLayout = t.device.createPipelineLayout({
+ bindGroupLayouts: usedBindGroupLayouts,
+ });
+ // To "use" the vertex buffer we need to set the corresponding vertex buffer layout when
+ // creating the render pipeline.
+ if (usage0 === 'vertex' && usage0AccessibleInDraw) {
+ ++vertexBufferCount;
+ }
+ const pipeline = t.createRenderPipelineForTest(pipelineLayout, vertexBufferCount);
+ renderPassEncoder.setPipeline(pipeline);
+ if (!usage0AccessibleInDraw) {
+ renderPassEncoder.draw(1);
+ } else {
+ MakeDrawCallWithOneUsage(usage0, offset0, renderPassEncoder);
+ }
+ }
+
+ // Set buffer with usage1.
+ const offset1 = hasOverlap ? offset0 : kBoundBufferSize;
+ let bufferIndex1 = 0;
+ if (visibility1 !== 'fragment') {
+ // Invisible bind groups or vertex buffers are all bound to the slot 1.
+ bufferIndex1 = 1;
+ } else if (visibility0 === 'fragment' && usage0AccessibleInDraw) {
+ // When buffer is bound to different bind groups or bound as vertex buffers in one render pass
+ // encoder, the second buffer binding should consume the slot 1.
+ if (IsBufferUsageInBindGroup(usage0) && IsBufferUsageInBindGroup(usage1)) {
+ bufferIndex1 = 1;
+ } else if (usage0 === 'vertex' && usage1 === 'vertex') {
+ bufferIndex1 = 1;
+ }
+ }
+
+ UseBufferOnRenderPassEncoder(
+ usage1AccessibleInDraw,
+ bufferIndex1,
+ offset1,
+ usage1,
+ visibility1,
+ renderPassEncoder,
+ usedBindGroupLayouts
+ );
+
+ // Set pipeline and do draw call if drawBeforeUsage1 === false
+ if (!drawBeforeUsage1) {
+ const pipelineLayout = t.device.createPipelineLayout({
+ bindGroupLayouts: usedBindGroupLayouts,
+ });
+ if (usage1 === 'vertex' && usage1AccessibleInDraw) {
+ // To "use" the vertex buffer we need to set the corresponding vertex buffer layout when
+ // creating the render pipeline.
+ ++vertexBufferCount;
+ }
+ const pipeline = t.createRenderPipelineForTest(pipelineLayout, vertexBufferCount);
+ renderPassEncoder.setPipeline(pipeline);
+
+ assert(usage0 !== 'indirect');
+ if (!usage0AccessibleInDraw && !usage1AccessibleInDraw) {
+ renderPassEncoder.draw(1);
+ } else if (usage0AccessibleInDraw && !usage1AccessibleInDraw) {
+ MakeDrawCallWithOneUsage(usage0, offset0, renderPassEncoder);
+ } else if (!usage0AccessibleInDraw && usage1AccessibleInDraw) {
+ MakeDrawCallWithOneUsage(usage1, offset1, renderPassEncoder);
+ } else {
+ if (usage1 === 'indexedIndirect') {
+ // If the index buffer has already been set (as usage0), we won't need to set another
+ // index buffer.
+ if (usage0 !== 'index') {
+ const indexBuffer = t.createBufferWithState('valid', {
+ size: 4,
+ usage: GPUBufferUsage.INDEX,
+ });
+ renderPassEncoder.setIndexBuffer(indexBuffer, 'uint16');
+ }
+ renderPassEncoder.drawIndexedIndirect(buffer, offset1);
+ } else if (usage1 === 'indirect') {
+ assert(usage0 !== 'index');
+ renderPassEncoder.drawIndirect(buffer, offset1);
+ } else if (usage0 === 'index' || usage1 === 'index') {
+ // We need to call drawIndexed to "use" the index buffer (as usage0 or usage1).
+ renderPassEncoder.drawIndexed(1);
+ } else {
+ renderPassEncoder.draw(1);
+ }
+ }
+ }
+ renderPassEncoder.end();
+
+ const fail = (usage0 === 'storage') !== (usage1 === 'storage');
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, fail);
+ });
+
+g.test('subresources,buffer_usage_in_one_render_pass_with_two_draws')
+ .desc(
+ `
+Test that when one buffer is used in different draw calls in one render pass, its list of internal
+usages within one usage scope (all the commands in the whole render pass) can only be a compatible
+usage list, and the usage scope rules are not related to the buffer offset, while the draw calls in
+different render pass encoders belong to different usage scopes.`
+ )
+ .params(u =>
+ u
+ .combine('usage0', kAllBufferUsages)
+ .combine('usage1', kAllBufferUsages)
+ .beginSubcases()
+ .combine('inSamePass', [true, false])
+ .combine('hasOverlap', [true, false])
+ )
+ .fn(t => {
+ const { usage0, usage1, inSamePass, hasOverlap } = t.params;
+ const buffer = t.createBufferWithState('valid', {
+ size: kBoundBufferSize * 2,
+ usage:
+ GPUBufferUsage.UNIFORM |
+ GPUBufferUsage.STORAGE |
+ GPUBufferUsage.VERTEX |
+ GPUBufferUsage.INDEX |
+ GPUBufferUsage.INDIRECT,
+ });
+ const UseBufferOnRenderPassEncoderInDrawCall = (
+ offset: number,
+ usage: BufferUsage,
+ renderPassEncoder: GPURenderPassEncoder
+ ) => {
+ switch (usage) {
+ case 'uniform':
+ case 'storage':
+ case 'read-only-storage': {
+ const bindGroupLayout = t.createBindGroupLayoutForTest(usage, 'fragment');
+ const pipelineLayout = t.device.createPipelineLayout({
+ bindGroupLayouts: [bindGroupLayout],
+ });
+ const pipeline = t.createRenderPipelineForTest(pipelineLayout, 0);
+ renderPassEncoder.setPipeline(pipeline);
+ const bindGroup = t.createBindGroupForTest(buffer, offset, usage, 'fragment');
+ renderPassEncoder.setBindGroup(0, bindGroup);
+ renderPassEncoder.draw(1);
+ break;
+ }
+ case 'vertex': {
+ const kVertexBufferCount = 1;
+ const pipeline = t.createRenderPipelineForTest('auto', kVertexBufferCount);
+ renderPassEncoder.setPipeline(pipeline);
+ renderPassEncoder.setVertexBuffer(0, buffer, offset);
+ renderPassEncoder.draw(1);
+ break;
+ }
+ case 'index': {
+ const pipeline = t.createRenderPipelineForTest('auto', 0);
+ renderPassEncoder.setPipeline(pipeline);
+ renderPassEncoder.setIndexBuffer(buffer, 'uint16', offset);
+ renderPassEncoder.drawIndexed(1);
+ break;
+ }
+ case 'indirect': {
+ const pipeline = t.createRenderPipelineForTest('auto', 0);
+ renderPassEncoder.setPipeline(pipeline);
+ renderPassEncoder.drawIndirect(buffer, offset);
+ break;
+ }
+ case 'indexedIndirect': {
+ const pipeline = t.createRenderPipelineForTest('auto', 0);
+ renderPassEncoder.setPipeline(pipeline);
+ const indexBuffer = t.createBufferWithState('valid', {
+ size: 4,
+ usage: GPUBufferUsage.INDEX,
+ });
+ renderPassEncoder.setIndexBuffer(indexBuffer, 'uint16');
+ renderPassEncoder.drawIndexedIndirect(buffer, offset);
+ break;
+ }
+ }
+ };
+
+ const encoder = t.device.createCommandEncoder();
+ const renderPassEncoder = t.beginSimpleRenderPass(encoder);
+
+ const offset0 = 0;
+ UseBufferOnRenderPassEncoderInDrawCall(offset0, usage0, renderPassEncoder);
+
+ const offset1 = hasOverlap ? offset0 : kBoundBufferSize;
+ if (inSamePass) {
+ UseBufferOnRenderPassEncoderInDrawCall(offset1, usage1, renderPassEncoder);
+ renderPassEncoder.end();
+ } else {
+ renderPassEncoder.end();
+ const anotherRenderPassEncoder = t.beginSimpleRenderPass(encoder);
+ UseBufferOnRenderPassEncoderInDrawCall(offset1, usage1, anotherRenderPassEncoder);
+ anotherRenderPassEncoder.end();
+ }
+
+ const fail = inSamePass && (usage0 === 'storage') !== (usage1 === 'storage');
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, fail);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/buffer/in_pass_misc.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/buffer/in_pass_misc.spec.ts
new file mode 100644
index 0000000000..9aa1a37537
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/buffer/in_pass_misc.spec.ts
@@ -0,0 +1,409 @@
+export const description = `
+Test other buffer usage validation rules that are not tests in ./in_pass_encoder.spec.js.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { unreachable } from '../../../../../common/util/util.js';
+
+import { BufferUsage, BufferResourceUsageTest, kAllBufferUsages } from './in_pass_encoder.spec.js';
+
+export const g = makeTestGroup(BufferResourceUsageTest);
+
+const kBufferSize = 256;
+
+g.test('subresources,reset_buffer_usage_before_dispatch')
+ .desc(
+ `
+Test that the buffer usages which are reset by another state-setting commands before a dispatch call
+do not contribute directly to any usage scope in a compute pass.`
+ )
+ .params(u =>
+ u
+ .combine('usage0', ['uniform', 'storage', 'read-only-storage'] as const)
+ .combine('usage1', ['uniform', 'storage', 'read-only-storage', 'indirect'] as const)
+ )
+ .fn(t => {
+ const { usage0, usage1 } = t.params;
+
+ const kUsages = GPUBufferUsage.UNIFORM | GPUBufferUsage.STORAGE | GPUBufferUsage.INDIRECT;
+ const buffer = t.createBufferWithState('valid', {
+ size: kBufferSize,
+ usage: kUsages,
+ });
+ const anotherBuffer = t.createBufferWithState('valid', {
+ size: kBufferSize,
+ usage: kUsages,
+ });
+
+ const bindGroupLayouts: GPUBindGroupLayout[] = [
+ t.createBindGroupLayoutForTest(usage0, 'compute'),
+ ];
+ if (usage1 !== 'indirect') {
+ bindGroupLayouts.push(t.createBindGroupLayoutForTest(usage1, 'compute'));
+ }
+ const pipelineLayout = t.device.createPipelineLayout({ bindGroupLayouts });
+ const computePipeline = t.createNoOpComputePipeline(pipelineLayout);
+
+ const encoder = t.device.createCommandEncoder();
+ const computePassEncoder = encoder.beginComputePass();
+ computePassEncoder.setPipeline(computePipeline);
+
+ // Set usage0 for buffer at bind group index 0
+ const bindGroup0 = t.createBindGroupForTest(buffer, 0, usage0, 'compute');
+ computePassEncoder.setBindGroup(0, bindGroup0);
+
+ // Reset bind group index 0 with another bind group that uses anotherBuffer
+ const anotherBindGroup = t.createBindGroupForTest(anotherBuffer, 0, usage0, 'compute');
+ computePassEncoder.setBindGroup(0, anotherBindGroup);
+
+ // Set usage1 for buffer
+ switch (usage1) {
+ case 'uniform':
+ case 'storage':
+ case 'read-only-storage': {
+ const bindGroup1 = t.createBindGroupForTest(buffer, 0, usage1, 'compute');
+ computePassEncoder.setBindGroup(1, bindGroup1);
+ computePassEncoder.dispatchWorkgroups(1);
+ break;
+ }
+ case 'indirect': {
+ computePassEncoder.dispatchWorkgroupsIndirect(buffer, 0);
+ break;
+ }
+ }
+ computePassEncoder.end();
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, false);
+ });
+
+g.test('subresources,reset_buffer_usage_before_draw')
+ .desc(
+ `
+Test that the buffer usages which are reset by another state-setting commands before a draw call
+still contribute directly to the usage scope of the draw call.`
+ )
+ .params(u =>
+ u
+ .combine('usage0', ['uniform', 'storage', 'read-only-storage', 'vertex', 'index'] as const)
+ .combine('usage1', kAllBufferUsages)
+ .unless(t => {
+ return t.usage0 === 'index' && t.usage1 === 'indirect';
+ })
+ )
+ .fn(t => {
+ const { usage0, usage1 } = t.params;
+
+ const kUsages =
+ GPUBufferUsage.UNIFORM |
+ GPUBufferUsage.STORAGE |
+ GPUBufferUsage.INDIRECT |
+ GPUBufferUsage.VERTEX |
+ GPUBufferUsage.INDEX;
+ const buffer = t.createBufferWithState('valid', {
+ size: kBufferSize,
+ usage: kUsages,
+ });
+ const anotherBuffer = t.createBufferWithState('valid', {
+ size: kBufferSize,
+ usage: kUsages,
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const renderPassEncoder = t.beginSimpleRenderPass(encoder);
+
+ const bindGroupLayouts: GPUBindGroupLayout[] = [];
+ let vertexBufferCount = 0;
+
+ // Set buffer as usage0 and reset buffer with anotherBuffer as usage0
+ switch (usage0) {
+ case 'uniform':
+ case 'storage':
+ case 'read-only-storage': {
+ const bindGroup0 = t.createBindGroupForTest(buffer, 0, usage0, 'fragment');
+ renderPassEncoder.setBindGroup(bindGroupLayouts.length, bindGroup0);
+
+ const anotherBindGroup = t.createBindGroupForTest(anotherBuffer, 0, usage0, 'fragment');
+ renderPassEncoder.setBindGroup(bindGroupLayouts.length, anotherBindGroup);
+
+ bindGroupLayouts.push(t.createBindGroupLayoutForTest(usage0, 'fragment'));
+ break;
+ }
+ case 'vertex': {
+ renderPassEncoder.setVertexBuffer(vertexBufferCount, buffer);
+ renderPassEncoder.setVertexBuffer(vertexBufferCount, anotherBuffer);
+
+ ++vertexBufferCount;
+ break;
+ }
+ case 'index': {
+ renderPassEncoder.setIndexBuffer(buffer, 'uint16');
+ renderPassEncoder.setIndexBuffer(anotherBuffer, 'uint16');
+ break;
+ }
+ }
+
+ // Set buffer as usage1
+ switch (usage1) {
+ case 'uniform':
+ case 'storage':
+ case 'read-only-storage': {
+ const bindGroup1 = t.createBindGroupForTest(buffer, 0, usage1, 'fragment');
+ renderPassEncoder.setBindGroup(bindGroupLayouts.length, bindGroup1);
+
+ bindGroupLayouts.push(t.createBindGroupLayoutForTest(usage1, 'fragment'));
+ break;
+ }
+ case 'vertex': {
+ renderPassEncoder.setVertexBuffer(vertexBufferCount, buffer);
+ ++vertexBufferCount;
+ break;
+ }
+ case 'index': {
+ renderPassEncoder.setIndexBuffer(buffer, 'uint16');
+ break;
+ }
+ case 'indirect':
+ case 'indexedIndirect':
+ break;
+ }
+
+ // Add draw call
+ const pipelineLayout = t.device.createPipelineLayout({
+ bindGroupLayouts,
+ });
+ const renderPipeline = t.createRenderPipelineForTest(pipelineLayout, vertexBufferCount);
+ renderPassEncoder.setPipeline(renderPipeline);
+ switch (usage1) {
+ case 'indexedIndirect': {
+ if (usage0 !== 'index') {
+ const indexBuffer = t.createBufferWithState('valid', {
+ size: 4,
+ usage: GPUBufferUsage.INDEX,
+ });
+ renderPassEncoder.setIndexBuffer(indexBuffer, 'uint16');
+ }
+ renderPassEncoder.drawIndexedIndirect(buffer, 0);
+ break;
+ }
+ case 'indirect': {
+ renderPassEncoder.drawIndirect(buffer, 0);
+ break;
+ }
+ case 'index': {
+ renderPassEncoder.drawIndexed(1);
+ break;
+ }
+ case 'vertex':
+ case 'uniform':
+ case 'storage':
+ case 'read-only-storage': {
+ if (usage0 === 'index') {
+ renderPassEncoder.drawIndexed(1);
+ } else {
+ renderPassEncoder.draw(1);
+ }
+ break;
+ }
+ }
+
+ renderPassEncoder.end();
+
+ const fail = (usage0 === 'storage') !== (usage1 === 'storage');
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, fail);
+ });
+
+g.test('subresources,buffer_usages_in_copy_and_pass')
+ .desc(
+ `
+ Test that using one buffer in a copy command, a render or compute pass encoder is always allowed
+ as WebGPU SPEC (chapter 3.4.5) defines that out of any pass encoder, each command belongs to one
+ separated usage scope.`
+ )
+ .params(u =>
+ u
+ .combine('usage0', [
+ 'copy-src',
+ 'copy-dst',
+ 'uniform',
+ 'storage',
+ 'read-only-storage',
+ 'vertex',
+ 'index',
+ 'indirect',
+ 'indexedIndirect',
+ ] as const)
+ .combine('usage1', [
+ 'copy-src',
+ 'copy-dst',
+ 'uniform',
+ 'storage',
+ 'read-only-storage',
+ 'vertex',
+ 'index',
+ 'indirect',
+ 'indexedIndirect',
+ ] as const)
+ .combine('pass', ['render', 'compute'])
+ .unless(({ usage0, usage1, pass }) => {
+ const IsCopy = (usage: BufferUsage | 'copy-src' | 'copy-dst') => {
+ return usage === 'copy-src' || usage === 'copy-dst';
+ };
+ // We intend to test copy usages in this test.
+ if (!IsCopy(usage0) && !IsCopy(usage1)) {
+ return true;
+ }
+ // When both usage0 and usage1 are copy usages, 'pass' is meaningless so in such situation
+ // we just need to reserve one value as 'pass'.
+ if (IsCopy(usage0) && IsCopy(usage1)) {
+ return pass === 'compute';
+ }
+
+ const IsValidComputeUsage = (usage: BufferUsage | 'copy-src' | 'copy-dst') => {
+ switch (usage) {
+ case 'vertex':
+ case 'index':
+ case 'indexedIndirect':
+ return false;
+ default:
+ return true;
+ }
+ };
+ if (pass === 'compute') {
+ return !IsValidComputeUsage(usage0) || !IsValidComputeUsage(usage1);
+ }
+
+ return false;
+ })
+ )
+ .fn(t => {
+ const { usage0, usage1, pass } = t.params;
+
+ const kUsages =
+ GPUBufferUsage.COPY_SRC |
+ GPUBufferUsage.COPY_DST |
+ GPUBufferUsage.UNIFORM |
+ GPUBufferUsage.STORAGE |
+ GPUBufferUsage.INDIRECT |
+ GPUBufferUsage.VERTEX |
+ GPUBufferUsage.INDEX;
+ const buffer = t.createBufferWithState('valid', {
+ size: kBufferSize,
+ usage: kUsages,
+ });
+
+ const UseBufferOnCommandEncoder = (
+ usage:
+ | 'copy-src'
+ | 'copy-dst'
+ | 'uniform'
+ | 'storage'
+ | 'read-only-storage'
+ | 'vertex'
+ | 'index'
+ | 'indirect'
+ | 'indexedIndirect',
+ encoder: GPUCommandEncoder
+ ) => {
+ switch (usage) {
+ case 'copy-src': {
+ const destinationBuffer = t.createBufferWithState('valid', {
+ size: 4,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+ encoder.copyBufferToBuffer(buffer, 0, destinationBuffer, 0, 4);
+ break;
+ }
+ case 'copy-dst': {
+ const sourceBuffer = t.createBufferWithState('valid', {
+ size: 4,
+ usage: GPUBufferUsage.COPY_SRC,
+ });
+ encoder.copyBufferToBuffer(sourceBuffer, 0, buffer, 0, 4);
+ break;
+ }
+ case 'uniform':
+ case 'storage':
+ case 'read-only-storage': {
+ const bindGroup = t.createBindGroupForTest(buffer, 0, usage, 'fragment');
+ switch (pass) {
+ case 'render': {
+ const renderPassEncoder = t.beginSimpleRenderPass(encoder);
+ renderPassEncoder.setBindGroup(0, bindGroup);
+ renderPassEncoder.end();
+ break;
+ }
+ case 'compute': {
+ const computePassEncoder = encoder.beginComputePass();
+ computePassEncoder.setBindGroup(0, bindGroup);
+ computePassEncoder.end();
+ break;
+ }
+ default:
+ unreachable();
+ }
+ break;
+ }
+ case 'vertex': {
+ const renderPassEncoder = t.beginSimpleRenderPass(encoder);
+ renderPassEncoder.setVertexBuffer(0, buffer);
+ renderPassEncoder.end();
+ break;
+ }
+ case 'index': {
+ const renderPassEncoder = t.beginSimpleRenderPass(encoder);
+ renderPassEncoder.setIndexBuffer(buffer, 'uint16');
+ renderPassEncoder.end();
+ break;
+ }
+ case 'indirect': {
+ switch (pass) {
+ case 'render': {
+ const renderPassEncoder = t.beginSimpleRenderPass(encoder);
+ const renderPipeline = t.createNoOpRenderPipeline();
+ renderPassEncoder.setPipeline(renderPipeline);
+ renderPassEncoder.drawIndirect(buffer, 0);
+ renderPassEncoder.end();
+ break;
+ }
+ case 'compute': {
+ const computePassEncoder = encoder.beginComputePass();
+ const computePipeline = t.createNoOpComputePipeline();
+ computePassEncoder.setPipeline(computePipeline);
+ computePassEncoder.dispatchWorkgroupsIndirect(buffer, 0);
+ computePassEncoder.end();
+ break;
+ }
+ default:
+ unreachable();
+ }
+ break;
+ }
+ case 'indexedIndirect': {
+ const renderPassEncoder = t.beginSimpleRenderPass(encoder);
+ const renderPipeline = t.createNoOpRenderPipeline();
+ renderPassEncoder.setPipeline(renderPipeline);
+ const indexBuffer = t.createBufferWithState('valid', {
+ size: 4,
+ usage: GPUBufferUsage.INDEX,
+ });
+ renderPassEncoder.setIndexBuffer(indexBuffer, 'uint16');
+ renderPassEncoder.drawIndexedIndirect(buffer, 0);
+ renderPassEncoder.end();
+ break;
+ }
+ default:
+ unreachable();
+ }
+ };
+
+ const encoder = t.device.createCommandEncoder();
+ UseBufferOnCommandEncoder(usage0, encoder);
+ UseBufferOnCommandEncoder(usage1, encoder);
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, false);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/texture/in_pass_encoder.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/texture/in_pass_encoder.spec.ts
new file mode 100644
index 0000000000..d316f26c06
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/texture/in_pass_encoder.spec.ts
@@ -0,0 +1,1395 @@
+export const description = `
+Texture Usages Validation Tests in Render Pass and Compute Pass.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { pp } from '../../../../../common/util/preprocessor.js';
+import { assert } from '../../../../../common/util/util.js';
+import { GPUConst } from '../../../../constants.js';
+import {
+ kDepthStencilFormats,
+ kDepthStencilFormatResolvedAspect,
+ kTextureFormatInfo,
+} from '../../../../format_info.js';
+import { ValidationTest } from '../../validation_test.js';
+
+type TextureBindingType = 'sampled-texture' | 'multisampled-texture' | 'writeonly-storage-texture';
+const kTextureBindingTypes = [
+ 'sampled-texture',
+ 'multisampled-texture',
+ 'writeonly-storage-texture',
+] as const;
+
+const SIZE = 32;
+class TextureUsageTracking extends ValidationTest {
+ createTexture(
+ options: {
+ width?: number;
+ height?: number;
+ arrayLayerCount?: number;
+ mipLevelCount?: number;
+ sampleCount?: number;
+ format?: GPUTextureFormat;
+ usage?: GPUTextureUsageFlags;
+ } = {}
+ ): GPUTexture {
+ const {
+ width = SIZE,
+ height = SIZE,
+ arrayLayerCount = 1,
+ mipLevelCount = 1,
+ sampleCount = 1,
+ format = 'rgba8unorm',
+ usage = GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,
+ } = options;
+
+ return this.device.createTexture({
+ size: { width, height, depthOrArrayLayers: arrayLayerCount },
+ mipLevelCount,
+ sampleCount,
+ dimension: '2d',
+ format,
+ usage,
+ });
+ }
+
+ createBindGroupLayout(
+ binding: number,
+ bindingType: TextureBindingType,
+ viewDimension: GPUTextureViewDimension,
+ options: {
+ format?: GPUTextureFormat;
+ sampleType?: GPUTextureSampleType;
+ } = {}
+ ): GPUBindGroupLayout {
+ const { sampleType, format } = options;
+ let entry: Omit<GPUBindGroupLayoutEntry, 'binding' | 'visibility'>;
+ switch (bindingType) {
+ case 'sampled-texture':
+ entry = { texture: { viewDimension, sampleType } };
+ break;
+ case 'multisampled-texture':
+ entry = { texture: { viewDimension, multisampled: true, sampleType } };
+ break;
+ case 'writeonly-storage-texture':
+ assert(format !== undefined);
+ entry = { storageTexture: { access: 'write-only', format, viewDimension } };
+ break;
+ }
+
+ return this.device.createBindGroupLayout({
+ entries: [
+ { binding, visibility: GPUShaderStage.COMPUTE | GPUShaderStage.FRAGMENT, ...entry },
+ ],
+ });
+ }
+
+ createBindGroup(
+ binding: number,
+ resource: GPUTextureView,
+ bindingType: TextureBindingType,
+ viewDimension: GPUTextureViewDimension,
+ options: {
+ format?: GPUTextureFormat;
+ sampleType?: GPUTextureSampleType;
+ } = {}
+ ): GPUBindGroup {
+ return this.device.createBindGroup({
+ entries: [{ binding, resource }],
+ layout: this.createBindGroupLayout(binding, bindingType, viewDimension, options),
+ });
+ }
+
+ createAndExecuteBundle(
+ binding: number,
+ bindGroup: GPUBindGroup,
+ pass: GPURenderPassEncoder,
+ depthStencilFormat?: GPUTextureFormat
+ ) {
+ const bundleEncoder = this.device.createRenderBundleEncoder({
+ colorFormats: ['rgba8unorm'],
+ depthStencilFormat,
+ });
+ bundleEncoder.setBindGroup(binding, bindGroup);
+ const bundle = bundleEncoder.finish();
+ pass.executeBundles([bundle]);
+ }
+
+ beginSimpleRenderPass(encoder: GPUCommandEncoder, view: GPUTextureView): GPURenderPassEncoder {
+ return encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view,
+ clearValue: { r: 0.0, g: 1.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ }
+
+ /**
+ * Create two bind groups. Resource usages conflict between these two bind groups. But resource
+ * usage inside each bind group doesn't conflict.
+ */
+ makeConflictingBindGroups() {
+ const view = this.createTexture({
+ usage: GPUTextureUsage.STORAGE_BINDING | GPUTextureUsage.TEXTURE_BINDING,
+ }).createView();
+ const bindGroupLayouts = [
+ this.createBindGroupLayout(0, 'sampled-texture', '2d'),
+ this.createBindGroupLayout(0, 'writeonly-storage-texture', '2d', { format: 'rgba8unorm' }),
+ ];
+ return {
+ bindGroupLayouts,
+ bindGroups: [
+ this.device.createBindGroup({
+ layout: bindGroupLayouts[0],
+ entries: [{ binding: 0, resource: view }],
+ }),
+ this.device.createBindGroup({
+ layout: bindGroupLayouts[1],
+ entries: [{ binding: 0, resource: view }],
+ }),
+ ],
+ };
+ }
+
+ testValidationScope(compute: boolean): {
+ bindGroup0: GPUBindGroup;
+ bindGroup1: GPUBindGroup;
+ encoder: GPUCommandEncoder;
+ pass: GPURenderPassEncoder | GPUComputePassEncoder;
+ pipeline: GPURenderPipeline | GPUComputePipeline;
+ } {
+ const { bindGroupLayouts, bindGroups } = this.makeConflictingBindGroups();
+
+ const encoder = this.device.createCommandEncoder();
+ const pass = compute
+ ? encoder.beginComputePass()
+ : this.beginSimpleRenderPass(encoder, this.createTexture().createView());
+
+ // Create pipeline. Note that bindings unused in pipeline should be validated too.
+ const pipelineLayout = this.device.createPipelineLayout({
+ bindGroupLayouts,
+ });
+ const pipeline = compute
+ ? this.createNoOpComputePipeline(pipelineLayout)
+ : this.createNoOpRenderPipeline(pipelineLayout);
+ return {
+ bindGroup0: bindGroups[0],
+ bindGroup1: bindGroups[1],
+ encoder,
+ pass,
+ pipeline,
+ };
+ }
+
+ setPipeline(
+ pass: GPURenderPassEncoder | GPUComputePassEncoder,
+ pipeline: GPURenderPipeline | GPUComputePipeline
+ ) {
+ if (pass instanceof GPUComputePassEncoder) {
+ pass.setPipeline(pipeline as GPUComputePipeline);
+ } else {
+ pass.setPipeline(pipeline as GPURenderPipeline);
+ }
+ }
+
+ issueDrawOrDispatch(pass: GPURenderPassEncoder | GPUComputePassEncoder) {
+ if (pass instanceof GPUComputePassEncoder) {
+ pass.dispatchWorkgroups(1);
+ } else {
+ pass.draw(3, 1, 0, 0);
+ }
+ }
+
+ setComputePipelineAndCallDispatch(pass: GPUComputePassEncoder, layout?: GPUPipelineLayout) {
+ const pipeline = this.createNoOpComputePipeline(layout);
+ pass.setPipeline(pipeline);
+ pass.dispatchWorkgroups(1);
+ }
+}
+
+export const g = makeTestGroup(TextureUsageTracking);
+
+const BASE_LEVEL = 1;
+const TOTAL_LEVELS = 6;
+const BASE_LAYER = 1;
+const TOTAL_LAYERS = 6;
+const SLICE_COUNT = 2;
+
+g.test('subresources_and_binding_types_combination_for_color')
+ .desc(
+ `
+ Test the resource usage rules by using two views of the same GPUTexture in a usage scope. Tests
+ various combinations of {sampled, storage, render target} usages, mip-level ranges, and
+ array-layer ranges, in {compute pass, render pass, render pass via bundle}.
+ - Error if a subresource (level/layer) is used as read+write or write+write in the scope,
+ except when both usages are writeonly-storage-texture which is allowed.
+ `
+ )
+ .params(u =>
+ u
+ .combine('compute', [false, true])
+ .expandWithParams(
+ p =>
+ [
+ { _usageOK: true, type0: 'sampled-texture', type1: 'sampled-texture' },
+ { _usageOK: false, type0: 'sampled-texture', type1: 'writeonly-storage-texture' },
+ { _usageOK: false, type0: 'sampled-texture', type1: 'render-target' },
+ // Race condition upon multiple writable storage texture is valid.
+ // For p.compute === true, fails at pass.dispatch because aliasing exists.
+ {
+ _usageOK: !p.compute,
+ type0: 'writeonly-storage-texture',
+ type1: 'writeonly-storage-texture',
+ },
+ { _usageOK: false, type0: 'writeonly-storage-texture', type1: 'render-target' },
+ { _usageOK: false, type0: 'render-target', type1: 'render-target' },
+ ] as const
+ )
+ .beginSubcases()
+ .combine('binding0InBundle', [false, true])
+ .combine('binding1InBundle', [false, true])
+ .unless(
+ p =>
+ // We can't set 'render-target' in bundle, so we need to exclude it from bundle.
+ (p.binding0InBundle && p.type0 === 'render-target') ||
+ (p.binding1InBundle && p.type1 === 'render-target') ||
+ // We can't set 'render-target' or bundle in compute.
+ (p.compute &&
+ (p.binding0InBundle ||
+ p.binding1InBundle ||
+ p.type0 === 'render-target' ||
+ p.type1 === 'render-target'))
+ )
+ .combineWithParams([
+ // Two texture usages are binding to the same texture subresource.
+ {
+ levelCount0: 1,
+ layerCount0: 1,
+ baseLevel1: BASE_LEVEL,
+ levelCount1: 1,
+ baseLayer1: BASE_LAYER,
+ layerCount1: 1,
+ _resourceSuccess: false,
+ },
+
+ // Two texture usages are binding to different mip levels of the same texture.
+ {
+ levelCount0: 1,
+ layerCount0: 1,
+ baseLevel1: BASE_LEVEL + 1,
+ levelCount1: 1,
+ baseLayer1: BASE_LAYER,
+ layerCount1: 1,
+ _resourceSuccess: true,
+ },
+
+ // Two texture usages are binding to different array layers of the same texture.
+ {
+ levelCount0: 1,
+ layerCount0: 1,
+ baseLevel1: BASE_LEVEL,
+ levelCount1: 1,
+ baseLayer1: BASE_LAYER + 1,
+ layerCount1: 1,
+ _resourceSuccess: true,
+ },
+
+ // The second texture usage contains the whole mip chain where the first texture usage is
+ // using.
+ {
+ levelCount0: 1,
+ layerCount0: 1,
+ baseLevel1: 0,
+ levelCount1: TOTAL_LEVELS,
+ baseLayer1: BASE_LAYER,
+ layerCount1: 1,
+ _resourceSuccess: false,
+ },
+
+ // The second texture usage contains all layers where the first texture usage is using.
+ {
+ levelCount0: 1,
+ layerCount0: 1,
+ baseLevel1: BASE_LEVEL,
+ levelCount1: 1,
+ baseLayer1: 0,
+ layerCount1: TOTAL_LAYERS,
+ _resourceSuccess: false,
+ },
+
+ // The second texture usage contains all subresources where the first texture usage is
+ // using.
+ {
+ levelCount0: 1,
+ layerCount0: 1,
+ baseLevel1: 0,
+ levelCount1: TOTAL_LEVELS,
+ baseLayer1: 0,
+ layerCount1: TOTAL_LAYERS,
+ _resourceSuccess: false,
+ },
+
+ // Both of the two usages access a few mip levels on the same layer but they don't overlap.
+ {
+ levelCount0: SLICE_COUNT,
+ layerCount0: 1,
+ baseLevel1: BASE_LEVEL + SLICE_COUNT,
+ levelCount1: 3,
+ baseLayer1: BASE_LAYER,
+ layerCount1: 1,
+ _resourceSuccess: true,
+ },
+
+ // Both of the two usages access a few mip levels on the same layer and they overlap.
+ {
+ levelCount0: SLICE_COUNT,
+ layerCount0: 1,
+ baseLevel1: BASE_LEVEL + SLICE_COUNT - 1,
+ levelCount1: 3,
+ baseLayer1: BASE_LAYER,
+ layerCount1: 1,
+ _resourceSuccess: false,
+ },
+
+ // Both of the two usages access a few array layers on the same level but they don't
+ // overlap.
+ {
+ levelCount0: 1,
+ layerCount0: SLICE_COUNT,
+ baseLevel1: BASE_LEVEL,
+ levelCount1: 1,
+ baseLayer1: BASE_LAYER + SLICE_COUNT,
+ layerCount1: 3,
+ _resourceSuccess: true,
+ },
+
+ // Both of the two usages access a few array layers on the same level and they overlap.
+ {
+ levelCount0: 1,
+ layerCount0: SLICE_COUNT,
+ baseLevel1: BASE_LEVEL,
+ levelCount1: 1,
+ baseLayer1: BASE_LAYER + SLICE_COUNT - 1,
+ layerCount1: 3,
+ _resourceSuccess: false,
+ },
+
+ // Both of the two usages access a few array layers and mip levels but they don't overlap.
+ {
+ levelCount0: SLICE_COUNT,
+ layerCount0: SLICE_COUNT,
+ baseLevel1: BASE_LEVEL + SLICE_COUNT,
+ levelCount1: 3,
+ baseLayer1: BASE_LAYER + SLICE_COUNT,
+ layerCount1: 3,
+ _resourceSuccess: true,
+ },
+
+ // Both of the two usages access a few array layers and mip levels and they overlap.
+ {
+ levelCount0: SLICE_COUNT,
+ layerCount0: SLICE_COUNT,
+ baseLevel1: BASE_LEVEL + SLICE_COUNT - 1,
+ levelCount1: 3,
+ baseLayer1: BASE_LAYER + SLICE_COUNT - 1,
+ layerCount1: 3,
+ _resourceSuccess: false,
+ },
+ ])
+ .unless(
+ p =>
+ // Every color attachment or storage texture can use only one single subresource.
+ (p.type0 !== 'sampled-texture' && (p.levelCount0 !== 1 || p.layerCount0 !== 1)) ||
+ (p.type1 !== 'sampled-texture' && (p.levelCount1 !== 1 || p.layerCount1 !== 1)) ||
+ // All color attachments' size should be the same.
+ (p.type0 === 'render-target' &&
+ p.type1 === 'render-target' &&
+ p.baseLevel1 !== BASE_LEVEL)
+ )
+ )
+ .fn(t => {
+ const {
+ compute,
+ binding0InBundle,
+ binding1InBundle,
+ levelCount0,
+ layerCount0,
+ baseLevel1,
+ baseLayer1,
+ levelCount1,
+ layerCount1,
+ type0,
+ type1,
+ _usageOK,
+ _resourceSuccess,
+ } = t.params;
+
+ const texture = t.createTexture({
+ arrayLayerCount: TOTAL_LAYERS,
+ mipLevelCount: TOTAL_LEVELS,
+ usage:
+ GPUTextureUsage.TEXTURE_BINDING |
+ GPUTextureUsage.STORAGE_BINDING |
+ GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const dimension0 = layerCount0 !== 1 ? '2d-array' : '2d';
+ const view0 = texture.createView({
+ dimension: dimension0,
+ baseMipLevel: BASE_LEVEL,
+ mipLevelCount: levelCount0,
+ baseArrayLayer: BASE_LAYER,
+ arrayLayerCount: layerCount0,
+ });
+
+ const dimension1 = layerCount1 !== 1 ? '2d-array' : '2d';
+ const view1 = texture.createView({
+ dimension: dimension1,
+ baseMipLevel: baseLevel1,
+ mipLevelCount: levelCount1,
+ baseArrayLayer: baseLayer1,
+ arrayLayerCount: layerCount1,
+ });
+
+ const viewsAreSame =
+ dimension0 === dimension1 &&
+ layerCount0 === layerCount1 &&
+ BASE_LEVEL === baseLevel1 &&
+ levelCount0 === levelCount1 &&
+ BASE_LAYER === baseLayer1 &&
+ layerCount0 === layerCount1;
+ if (!viewsAreSame && t.isCompatibility) {
+ t.skip('different views of same texture are not supported in compatibility mode');
+ }
+
+ const encoder = t.device.createCommandEncoder();
+ if (type0 === 'render-target') {
+ // Note that type1 is 'render-target' too. So we don't need to create bindings.
+ assert(type1 === 'render-target');
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: view0,
+ clearValue: { r: 0.0, g: 1.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ {
+ view: view1,
+ clearValue: { r: 0.0, g: 1.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.end();
+ } else {
+ const pass = compute
+ ? encoder.beginComputePass()
+ : t.beginSimpleRenderPass(
+ encoder,
+ type1 === 'render-target' ? view1 : t.createTexture().createView()
+ );
+
+ const bgls: GPUBindGroupLayout[] = [];
+ // Create bind groups. Set bind groups in pass directly or set bind groups in bundle.
+ const storageTextureFormat0 = type0 === 'sampled-texture' ? undefined : 'rgba8unorm';
+
+ const bgl0 = t.createBindGroupLayout(0, type0, dimension0, { format: storageTextureFormat0 });
+ const bindGroup0 = t.device.createBindGroup({
+ layout: bgl0,
+ entries: [{ binding: 0, resource: view0 }],
+ });
+ bgls.push(bgl0);
+
+ if (binding0InBundle) {
+ assert(pass instanceof GPURenderPassEncoder);
+ t.createAndExecuteBundle(0, bindGroup0, pass);
+ } else {
+ pass.setBindGroup(0, bindGroup0);
+ }
+ if (type1 !== 'render-target') {
+ const storageTextureFormat1 = type1 === 'sampled-texture' ? undefined : 'rgba8unorm';
+
+ const bgl1 = t.createBindGroupLayout(1, type1, dimension1, {
+ format: storageTextureFormat1,
+ });
+ const bindGroup1 = t.device.createBindGroup({
+ layout: bgl1,
+ entries: [{ binding: 1, resource: view1 }],
+ });
+ bgls.push(bgl1);
+
+ if (binding1InBundle) {
+ assert(pass instanceof GPURenderPassEncoder);
+ t.createAndExecuteBundle(1, bindGroup1, pass);
+ } else {
+ pass.setBindGroup(1, bindGroup1);
+ }
+ }
+ if (compute) {
+ t.setComputePipelineAndCallDispatch(
+ pass as GPUComputePassEncoder,
+ t.device.createPipelineLayout({ bindGroupLayouts: bgls })
+ );
+ }
+ pass.end();
+ }
+
+ const success = _resourceSuccess || _usageOK;
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, !success);
+ });
+
+g.test('subresources_and_binding_types_combination_for_aspect')
+ .desc(
+ `
+ Test the resource usage rules by using two views of the same GPUTexture in a usage scope. Tests
+ various combinations of {sampled, render target} usages, {all, depth-only, stencil-only} aspects
+ that overlap a given subresources in {compute pass, render pass, render pass via bundle}.
+ - Error if a subresource (level/layer/aspect) is used as read+write or write+write in the
+ scope.
+ `
+ )
+ .params(u =>
+ u
+ .combine('compute', [false, true])
+ .combine('binding0InBundle', [false, true])
+ .combine('binding1InBundle', [false, true])
+ .combine('format', kDepthStencilFormats)
+ .beginSubcases()
+ .combineWithParams([
+ {
+ baseLevel: BASE_LEVEL,
+ baseLayer: BASE_LAYER,
+ _resourceSuccess: false,
+ },
+ {
+ baseLevel: BASE_LEVEL + 1,
+ baseLayer: BASE_LAYER,
+ _resourceSuccess: true,
+ },
+ {
+ baseLevel: BASE_LEVEL,
+ baseLayer: BASE_LAYER + 1,
+ _resourceSuccess: true,
+ },
+ ])
+ .combine('aspect0', ['all', 'depth-only', 'stencil-only'] as const)
+ .combine('aspect1', ['all', 'depth-only', 'stencil-only'] as const)
+ .unless(
+ p =>
+ (p.aspect0 === 'stencil-only' && !kTextureFormatInfo[p.format].stencil) ||
+ (p.aspect1 === 'stencil-only' && !kTextureFormatInfo[p.format].stencil)
+ )
+ .unless(
+ p =>
+ (p.aspect0 === 'depth-only' && !kTextureFormatInfo[p.format].depth) ||
+ (p.aspect1 === 'depth-only' && !kTextureFormatInfo[p.format].depth)
+ )
+ .combineWithParams([
+ {
+ type0: 'sampled-texture',
+ type1: 'sampled-texture',
+ _usageSuccess: true,
+ },
+ {
+ type0: 'sampled-texture',
+ type1: 'render-target',
+ _usageSuccess: false,
+ },
+ ] as const)
+ .unless(
+ // Can't sample a multiplanar texture without selecting an aspect.
+ p =>
+ !!kTextureFormatInfo[p.format].depth &&
+ !!kTextureFormatInfo[p.format].stencil &&
+ ((p.aspect0 === 'all' && p.type0 === 'sampled-texture') ||
+ (p.aspect1 === 'all' && p.type1 === 'sampled-texture'))
+ )
+ .unless(
+ p =>
+ // We can't set 'render-target' in bundle, so we need to exclude it from bundle.
+ p.binding1InBundle && p.type1 === 'render-target'
+ )
+ .unless(
+ p =>
+ // We can't set 'render-target' or bundle in compute. Note that type0 is definitely not
+ // 'render-target'
+ p.compute && (p.binding0InBundle || p.binding1InBundle || p.type1 === 'render-target')
+ )
+ .unless(
+ p =>
+ // Depth-stencil attachment views must encompass all aspects of the texture. Invalid
+ // cases are for depth-stencil textures when the aspect is not 'all'.
+ p.type1 === 'render-target' &&
+ !!kTextureFormatInfo[p.format].depth &&
+ !!kTextureFormatInfo[p.format].stencil &&
+ p.aspect1 !== 'all'
+ )
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ t.selectDeviceOrSkipTestCase(kTextureFormatInfo[format].feature);
+ })
+ .fn(t => {
+ const {
+ compute,
+ binding0InBundle,
+ binding1InBundle,
+ format,
+ baseLevel,
+ baseLayer,
+ aspect0,
+ aspect1,
+ type0,
+ type1,
+ _resourceSuccess,
+ _usageSuccess,
+ } = t.params;
+
+ const texture = t.createTexture({
+ arrayLayerCount: TOTAL_LAYERS,
+ mipLevelCount: TOTAL_LEVELS,
+ format,
+ });
+
+ const view0 = texture.createView({
+ dimension: '2d',
+ baseMipLevel: BASE_LEVEL,
+ mipLevelCount: 1,
+ baseArrayLayer: BASE_LAYER,
+ arrayLayerCount: 1,
+ aspect: aspect0,
+ });
+
+ const view1 = texture.createView({
+ dimension: '2d',
+ baseMipLevel: baseLevel,
+ mipLevelCount: 1,
+ baseArrayLayer: baseLayer,
+ arrayLayerCount: 1,
+ aspect: aspect1,
+ });
+ const view1ResolvedFormat = kDepthStencilFormatResolvedAspect[format][aspect1]!;
+ const view1HasDepth = kTextureFormatInfo[view1ResolvedFormat].depth;
+ const view1HasStencil = kTextureFormatInfo[view1ResolvedFormat].stencil;
+
+ const encoder = t.device.createCommandEncoder();
+ // Color attachment's size should match depth/stencil attachment's size. Note that if
+ // type1 !== 'render-target' then there's no depthStencilAttachment to match anyway.
+ const depthStencilFormat = type1 === 'render-target' ? view1ResolvedFormat : undefined;
+
+ const size = SIZE >> baseLevel;
+ const pass = compute
+ ? encoder.beginComputePass()
+ : encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: t.createTexture({ width: size, height: size }).createView(),
+ clearValue: { r: 0.0, g: 1.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ depthStencilAttachment: depthStencilFormat
+ ? {
+ view: view1,
+ depthStoreOp: view1HasDepth ? 'discard' : undefined,
+ depthLoadOp: view1HasDepth ? 'load' : undefined,
+ stencilStoreOp: view1HasStencil ? 'discard' : undefined,
+ stencilLoadOp: view1HasStencil ? 'load' : undefined,
+ }
+ : undefined,
+ });
+
+ const aspectSampleType = (format: GPUTextureFormat, aspect: typeof aspect0) => {
+ switch (aspect) {
+ case 'depth-only':
+ return 'depth';
+ case 'stencil-only':
+ return 'uint';
+ case 'all':
+ assert(kTextureFormatInfo[format].depth !== kTextureFormatInfo[format].stencil);
+ if (kTextureFormatInfo[format].stencil) {
+ return 'uint';
+ }
+ return 'depth';
+ }
+ };
+
+ // Create bind groups. Set bind groups in pass directly or set bind groups in bundle.
+ const bindGroup0 = t.createBindGroup(0, view0, type0, '2d', {
+ sampleType: type0 === 'sampled-texture' ? aspectSampleType(format, aspect0) : undefined,
+ });
+ if (binding0InBundle) {
+ assert(pass instanceof GPURenderPassEncoder);
+ t.createAndExecuteBundle(0, bindGroup0, pass, depthStencilFormat);
+ } else {
+ pass.setBindGroup(0, bindGroup0);
+ }
+ if (type1 !== 'render-target') {
+ const bindGroup1 = t.createBindGroup(1, view1, type1, '2d', {
+ sampleType: type1 === 'sampled-texture' ? aspectSampleType(format, aspect1) : undefined,
+ });
+ if (binding1InBundle) {
+ assert(pass instanceof GPURenderPassEncoder);
+ t.createAndExecuteBundle(1, bindGroup1, pass, depthStencilFormat);
+ } else {
+ pass.setBindGroup(1, bindGroup1);
+ }
+ }
+ if (compute) t.setComputePipelineAndCallDispatch(pass as GPUComputePassEncoder);
+ pass.end();
+
+ const disjointAspects =
+ (aspect0 === 'depth-only' && aspect1 === 'stencil-only') ||
+ (aspect0 === 'stencil-only' && aspect1 === 'depth-only');
+
+ // If subresources' mip/array slices has no overlap, or their binding types don't conflict,
+ // it will definitely success no matter what aspects they are binding to.
+ const success = disjointAspects || _resourceSuccess || _usageSuccess;
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, !success);
+ });
+
+g.test('shader_stages_and_visibility,storage_write')
+ .desc(
+ `
+ Test that stage visibility doesn't affect resource usage validation.
+ - Use a texture as sampled, with 'readVisibility' {0,VERTEX,FRAGMENT,COMPUTE}
+ - Use a {same,different} texture as storage, with 'writeVisibility' {0,FRAGMENT,COMPUTE}
+
+ There should be a validation error IFF the same texture was used.
+ `
+ )
+ .params(u =>
+ u
+ .combine('compute', [false, true])
+ .beginSubcases()
+ .combine('secondUseConflicts', [false, true])
+ .combine('readVisibility', [
+ 0,
+ GPUConst.ShaderStage.VERTEX,
+ GPUConst.ShaderStage.FRAGMENT,
+ GPUConst.ShaderStage.COMPUTE,
+ ])
+ .combine('writeVisibility', [0, GPUConst.ShaderStage.FRAGMENT, GPUConst.ShaderStage.COMPUTE])
+ )
+ .fn(t => {
+ const { compute, readVisibility, writeVisibility, secondUseConflicts } = t.params;
+
+ const usage = GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.STORAGE_BINDING;
+ const view = t.createTexture({ usage }).createView();
+ const view2 = secondUseConflicts ? view : t.createTexture({ usage }).createView();
+
+ const bgl = t.device.createBindGroupLayout({
+ entries: [
+ { binding: 0, visibility: readVisibility, texture: {} },
+ {
+ binding: 1,
+ visibility: writeVisibility,
+ storageTexture: { access: 'write-only', format: 'rgba8unorm' },
+ },
+ ],
+ });
+ const bindGroup = t.device.createBindGroup({
+ layout: bgl,
+ entries: [
+ { binding: 0, resource: view },
+ { binding: 1, resource: view2 },
+ ],
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ if (compute) {
+ const pass = encoder.beginComputePass();
+ pass.setBindGroup(0, bindGroup);
+
+ t.setComputePipelineAndCallDispatch(
+ pass,
+ t.device.createPipelineLayout({
+ bindGroupLayouts: [bgl],
+ })
+ );
+ pass.end();
+ } else {
+ const pass = t.beginSimpleRenderPass(encoder, t.createTexture().createView());
+ pass.setBindGroup(0, bindGroup);
+ pass.end();
+ }
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, secondUseConflicts);
+ });
+
+g.test('shader_stages_and_visibility,attachment_write')
+ .desc(
+ `
+ Test that stage visibility doesn't affect resource usage validation.
+ - Use a texture as sampled, with 'readVisibility' {0,VERTEX,FRAGMENT,COMPUTE}
+ - Use a {same,different} texture as a render pass attachment
+
+ There should be a validation error IFF the same texture was used.
+ `
+ )
+ .params(u =>
+ u
+ .beginSubcases()
+ .combine('secondUseConflicts', [false, true])
+ .combine('readVisibility', [
+ 0,
+ GPUConst.ShaderStage.VERTEX,
+ GPUConst.ShaderStage.FRAGMENT,
+ GPUConst.ShaderStage.COMPUTE,
+ ])
+ )
+ .fn(t => {
+ const { readVisibility, secondUseConflicts } = t.params;
+
+ // writeonly-storage-texture binding type is not supported in vertex stage. So, this test
+ // uses writeonly-storage-texture binding as writable binding upon the same subresource if
+ // vertex stage is not included. Otherwise, it uses output attachment instead.
+ const usage = GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.RENDER_ATTACHMENT;
+
+ const view = t.createTexture({ usage }).createView();
+ const view2 = secondUseConflicts ? view : t.createTexture({ usage }).createView();
+ const bgl = t.device.createBindGroupLayout({
+ entries: [{ binding: 0, visibility: readVisibility, texture: {} }],
+ });
+ const bindGroup = t.device.createBindGroup({
+ layout: bgl,
+ entries: [{ binding: 0, resource: view }],
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = t.beginSimpleRenderPass(encoder, view2);
+ pass.setBindGroup(0, bindGroup);
+ pass.end();
+
+ // Texture usages in bindings with invisible shader stages should be validated. Invisible shader
+ // stages include shader stage with visibility none, compute shader stage in render pass, and
+ // vertex/fragment shader stage in compute pass.
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, secondUseConflicts);
+ });
+
+g.test('replaced_binding')
+ .desc(
+ `
+ Test whether a binding that's been replaced by another setBindGroup call can still
+ cause validation to fail (with a write/write conflict).
+ - In render pass, all setBindGroup calls contribute to the validation even if they're
+ shadowed.
+ - In compute pass, only the bindings visible at dispatchWorkgroups() contribute to validation.
+ `
+ )
+ .params(u =>
+ u
+ .combine('compute', [false, true])
+ .combine('callDrawOrDispatch', [false, true])
+ .combine('entry', [
+ { texture: {} },
+ { storageTexture: { access: 'write-only', format: 'rgba8unorm' } },
+ ] as const)
+ )
+ .fn(t => {
+ const { compute, callDrawOrDispatch, entry } = t.params;
+
+ const sampledView = t.createTexture().createView();
+ const sampledStorageView = t
+ .createTexture({ usage: GPUTextureUsage.STORAGE_BINDING | GPUTextureUsage.TEXTURE_BINDING })
+ .createView();
+
+ // Create bindGroup0. It has two bindings. These two bindings use different views/subresources.
+ const bglEntries0: GPUBindGroupLayoutEntry[] = [
+ { binding: 0, visibility: GPUShaderStage.FRAGMENT, texture: {} },
+ {
+ binding: 1,
+ visibility: GPUShaderStage.FRAGMENT,
+ ...entry,
+ },
+ ];
+ const bgEntries0: GPUBindGroupEntry[] = [
+ { binding: 0, resource: sampledView },
+ { binding: 1, resource: sampledStorageView },
+ ];
+ const bindGroup0 = t.device.createBindGroup({
+ entries: bgEntries0,
+ layout: t.device.createBindGroupLayout({ entries: bglEntries0 }),
+ });
+
+ // Create bindGroup1. It has one binding, which use the same view/subresource of a binding in
+ // bindGroup0. So it may or may not conflicts with that binding in bindGroup0.
+ const bindGroup1 = t.createBindGroup(0, sampledStorageView, 'sampled-texture', '2d', undefined);
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = compute
+ ? encoder.beginComputePass()
+ : t.beginSimpleRenderPass(encoder, t.createTexture().createView());
+
+ // Set bindGroup0 and bindGroup1. bindGroup0 is replaced by bindGroup1 in the current pass.
+ // But bindings in bindGroup0 should be validated too.
+ pass.setBindGroup(0, bindGroup0);
+ if (callDrawOrDispatch) {
+ const pipeline = compute ? t.createNoOpComputePipeline() : t.createNoOpRenderPipeline();
+ t.setPipeline(pass, pipeline);
+ t.issueDrawOrDispatch(pass);
+ }
+ pass.setBindGroup(0, bindGroup1);
+ pass.end();
+
+ // MAINTENANCE_TODO: If the Compatible Usage List
+ // (https://gpuweb.github.io/gpuweb/#compatible-usage-list) gets programmatically defined in
+ // capability_info, use it here, instead of this logic, for clarity.
+ let success = entry.storageTexture?.access !== 'write-only';
+ // Replaced bindings should not be validated in compute pass, because validation only occurs
+ // inside dispatchWorkgroups() which only looks at the current resource usages.
+ success ||= compute;
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, !success);
+ });
+
+g.test('bindings_in_bundle')
+ .desc(
+ `
+ Test the texture usages in bundles by using two bindings of the same texture with various
+ combination of {sampled, storage, render target} usages.
+ `
+ )
+ .params(u =>
+ u
+ .combine('type0', ['render-target', ...kTextureBindingTypes] as const)
+ .combine('type1', ['render-target', ...kTextureBindingTypes] as const)
+ .beginSubcases()
+ .combine('binding0InBundle', [false, true])
+ .combine('binding1InBundle', [false, true])
+ .expandWithParams(function* ({ type0, type1 }) {
+ const usageForType = (type: typeof type0 | typeof type1) => {
+ switch (type) {
+ case 'multisampled-texture':
+ case 'sampled-texture':
+ return 'TEXTURE_BINDING' as const;
+ case 'writeonly-storage-texture':
+ return 'STORAGE_BINDING' as const;
+ case 'render-target':
+ return 'RENDER_ATTACHMENT' as const;
+ }
+ };
+
+ yield {
+ _usage0: usageForType(type0),
+ _usage1: usageForType(type1),
+ _sampleCount:
+ type0 === 'multisampled-texture' || type1 === 'multisampled-texture'
+ ? (4 as const)
+ : undefined,
+ };
+ })
+ .unless(
+ p =>
+ // We can't set 'render-target' in bundle, so we need to exclude it from bundle.
+ // In addition, if both bindings are non-bundle, there is no need to test it because
+ // we have far more comprehensive test cases for that situation in this file.
+ (p.binding0InBundle && p.type0 === 'render-target') ||
+ (p.binding1InBundle && p.type1 === 'render-target') ||
+ (!p.binding0InBundle && !p.binding1InBundle) ||
+ // Storage textures can't be multisampled.
+ (p._sampleCount !== undefined &&
+ p._sampleCount > 1 &&
+ (p._usage0 === 'STORAGE_BINDING' || p._usage1 === 'STORAGE_BINDING')) ||
+ // If both are sampled, we create two views of the same texture, so both must be
+ // multisampled.
+ (p.type0 === 'multisampled-texture' && p.type1 === 'sampled-texture') ||
+ (p.type0 === 'sampled-texture' && p.type1 === 'multisampled-texture')
+ )
+ )
+ .fn(t => {
+ const { binding0InBundle, binding1InBundle, type0, type1, _usage0, _usage1, _sampleCount } =
+ t.params;
+
+ // Two bindings are attached to the same texture view.
+ const usage =
+ _sampleCount === 4
+ ? GPUTextureUsage[_usage0] | GPUTextureUsage[_usage1] | GPUTextureUsage.RENDER_ATTACHMENT
+ : GPUTextureUsage[_usage0] | GPUTextureUsage[_usage1];
+ const view = t
+ .createTexture({
+ usage,
+ sampleCount: _sampleCount,
+ })
+ .createView();
+
+ const bindGroups: GPUBindGroup[] = [];
+ if (type0 !== 'render-target') {
+ const binding0TexFormat = type0 === 'sampled-texture' ? undefined : 'rgba8unorm';
+ bindGroups[0] = t.createBindGroup(0, view, type0, '2d', {
+ format: binding0TexFormat,
+ sampleType: _sampleCount && 'unfilterable-float',
+ });
+ }
+ if (type1 !== 'render-target') {
+ const binding1TexFormat = type1 === 'sampled-texture' ? undefined : 'rgba8unorm';
+ bindGroups[1] = t.createBindGroup(1, view, type1, '2d', {
+ format: binding1TexFormat,
+ sampleType: _sampleCount && 'unfilterable-float',
+ });
+ }
+
+ const encoder = t.device.createCommandEncoder();
+ // At least one binding is in bundle, which means that its type is not 'render-target'.
+ // As a result, only one binding's type is 'render-target' at most.
+ const pass = t.beginSimpleRenderPass(
+ encoder,
+ type0 === 'render-target' || type1 === 'render-target' ? view : t.createTexture().createView()
+ );
+
+ const bindingsInBundle: boolean[] = [binding0InBundle, binding1InBundle];
+ for (let i = 0; i < 2; i++) {
+ // Create a bundle for each bind group if its bindings is required to be in bundle on purpose.
+ // Otherwise, call setBindGroup directly in pass if needed (when its binding is not
+ // 'render-target').
+ if (bindingsInBundle[i]) {
+ const bundleEncoder = t.device.createRenderBundleEncoder({
+ colorFormats: ['rgba8unorm'],
+ });
+ bundleEncoder.setBindGroup(i, bindGroups[i]);
+ const bundleInPass = bundleEncoder.finish();
+ pass.executeBundles([bundleInPass]);
+ } else if (bindGroups[i] !== undefined) {
+ pass.setBindGroup(i, bindGroups[i]);
+ }
+ }
+
+ pass.end();
+
+ const isReadOnly = (t: typeof type0 | typeof type1) => {
+ switch (t) {
+ case 'sampled-texture':
+ case 'multisampled-texture':
+ return true;
+ default:
+ return false;
+ }
+ };
+
+ let success = false;
+ if (isReadOnly(type0) && isReadOnly(type1)) {
+ success = true;
+ }
+
+ if (type0 === 'writeonly-storage-texture' && type1 === 'writeonly-storage-texture') {
+ success = true;
+ }
+
+ // Resource usages in bundle should be validated.
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, !success);
+ });
+
+g.test('unused_bindings_in_pipeline')
+ .desc(
+ `
+ Test that for compute pipelines with 'auto' layout, only bindings used by the pipeline count
+ toward the usage scope. For render passes, test the pipeline doesn't matter because only the
+ calls to setBindGroup count toward the usage scope.
+ `
+ )
+ .params(u =>
+ u
+ .combine('compute', [false, true])
+ .combine('useBindGroup0', [false, true])
+ .combine('useBindGroup1', [false, true])
+ .combine('setBindGroupsOrder', ['common', 'reversed'] as const)
+ .combine('setPipeline', ['before', 'middle', 'after', 'none'] as const)
+ .combine('callDrawOrDispatch', [false, true])
+ )
+ .fn(t => {
+ const {
+ compute,
+ useBindGroup0,
+ useBindGroup1,
+ setBindGroupsOrder,
+ setPipeline,
+ callDrawOrDispatch,
+ } = t.params;
+ const view = t
+ .createTexture({ usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.STORAGE_BINDING })
+ .createView();
+ const bindGroup0 = t.createBindGroup(0, view, 'sampled-texture', '2d', {
+ format: 'rgba8unorm',
+ });
+ const bindGroup1 = t.createBindGroup(0, view, 'writeonly-storage-texture', '2d', {
+ format: 'rgba8unorm',
+ });
+
+ const wgslVertex = `@vertex fn main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>();
+}`;
+ const wgslFragment = pp`
+ ${pp._if(useBindGroup0)}
+ @group(0) @binding(0) var image0 : texture_storage_2d<rgba8unorm, write>;
+ ${pp._endif}
+ ${pp._if(useBindGroup1)}
+ @group(1) @binding(0) var image1 : texture_storage_2d<rgba8unorm, write>;
+ ${pp._endif}
+ @fragment fn main() {}
+ `;
+
+ const wgslCompute = pp`
+ ${pp._if(useBindGroup0)}
+ @group(0) @binding(0) var image0 : texture_storage_2d<rgba8unorm, write>;
+ ${pp._endif}
+ ${pp._if(useBindGroup1)}
+ @group(1) @binding(0) var image1 : texture_storage_2d<rgba8unorm, write>;
+ ${pp._endif}
+ @compute @workgroup_size(1) fn main() {}
+ `;
+
+ const pipeline = compute
+ ? t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({
+ code: wgslCompute,
+ }),
+ entryPoint: 'main',
+ },
+ })
+ : t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: wgslVertex,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: wgslFragment,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm', writeMask: 0 }],
+ },
+ primitive: { topology: 'triangle-list' },
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = compute
+ ? encoder.beginComputePass()
+ : encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: t.createTexture().createView(),
+ clearValue: { r: 0.0, g: 1.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ const index0 = setBindGroupsOrder === 'common' ? 0 : 1;
+ const index1 = setBindGroupsOrder === 'common' ? 1 : 0;
+ if (setPipeline === 'before') t.setPipeline(pass, pipeline);
+ pass.setBindGroup(index0, bindGroup0);
+ if (setPipeline === 'middle') t.setPipeline(pass, pipeline);
+ pass.setBindGroup(index1, bindGroup1);
+ if (setPipeline === 'after') t.setPipeline(pass, pipeline);
+ if (callDrawOrDispatch) t.issueDrawOrDispatch(pass);
+ pass.end();
+
+ // Resource usage validation scope is defined by the whole render pass or by dispatch calls.
+ // Regardless of whether or not dispatch is called, in a compute pass, we always succeed
+ // because in this test, none of the bindings are used by the pipeline.
+ // In a render pass, we always fail because usage is based on any bindings used in the
+ // render pass, regardless of whether the pipeline uses them.
+ let success = compute;
+
+ // Also fails if we try to draw/dispatch without a pipeline.
+ if (callDrawOrDispatch && setPipeline === 'none') {
+ success = false;
+ }
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, !success);
+ });
+
+g.test('scope,dispatch')
+ .desc(
+ `
+ Tests that in a compute pass, no usage validation occurs without a dispatch call.
+ {Sets,skips} each of two conflicting bind groups in a pass {with,without} a dispatch call.
+ If both are set, AND there is a dispatch call, validation should fail.
+ `
+ )
+ .params(u =>
+ u
+ .combine('dispatch', ['none', 'direct', 'indirect'])
+ .beginSubcases()
+ .expand('setBindGroup0', p => (p.dispatch ? [true] : [false, true]))
+ .expand('setBindGroup1', p => (p.dispatch ? [true] : [false, true]))
+ )
+ .fn(t => {
+ const { dispatch, setBindGroup0, setBindGroup1 } = t.params;
+
+ const { bindGroup0, bindGroup1, encoder, pass, pipeline } = t.testValidationScope(true);
+ assert(pass instanceof GPUComputePassEncoder);
+ t.setPipeline(pass, pipeline);
+
+ if (setBindGroup0) pass.setBindGroup(0, bindGroup0);
+ if (setBindGroup1) pass.setBindGroup(1, bindGroup1);
+
+ switch (dispatch) {
+ case 'direct':
+ pass.dispatchWorkgroups(1);
+ break;
+ case 'indirect':
+ {
+ const indirectBuffer = t.device.createBuffer({ size: 4, usage: GPUBufferUsage.INDIRECT });
+ pass.dispatchWorkgroupsIndirect(indirectBuffer, 0);
+ }
+ break;
+ }
+
+ pass.end();
+
+ t.expectValidationError(
+ () => {
+ encoder.finish();
+ },
+ dispatch !== 'none' && setBindGroup0 && setBindGroup1
+ );
+ });
+
+g.test('scope,basic,render')
+ .desc(
+ `
+ Tests that in a render pass, validation occurs even without a pipeline or draw call.
+ {Set,skip} each of two conflicting bind groups. If both are set, validation should fail.
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('setBindGroup0', [false, true])
+ .combine('setBindGroup1', [false, true])
+ )
+ .fn(t => {
+ const { setBindGroup0, setBindGroup1 } = t.params;
+
+ const { bindGroup0, bindGroup1, encoder, pass } = t.testValidationScope(false);
+ assert(pass instanceof GPURenderPassEncoder);
+
+ if (setBindGroup0) pass.setBindGroup(0, bindGroup0);
+ if (setBindGroup1) pass.setBindGroup(1, bindGroup1);
+
+ pass.end();
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, setBindGroup0 && setBindGroup1);
+ });
+
+g.test('scope,pass_boundary,compute')
+ .desc(
+ `
+ Test using two conflicting bind groups in separate dispatch calls, {with,without} a pass
+ boundary in between. This should always be valid.
+ `
+ )
+ .paramsSubcasesOnly(u => u.combine('splitPass', [false, true]))
+ .fn(t => {
+ const { splitPass } = t.params;
+
+ const { bindGroupLayouts, bindGroups } = t.makeConflictingBindGroups();
+
+ const encoder = t.device.createCommandEncoder();
+
+ const pipelineUsingBG0 = t.createNoOpComputePipeline(
+ t.device.createPipelineLayout({
+ bindGroupLayouts: [bindGroupLayouts[0]],
+ })
+ );
+ const pipelineUsingBG1 = t.createNoOpComputePipeline(
+ t.device.createPipelineLayout({
+ bindGroupLayouts: [bindGroupLayouts[1]],
+ })
+ );
+
+ let pass = encoder.beginComputePass();
+ pass.setPipeline(pipelineUsingBG0);
+ pass.setBindGroup(0, bindGroups[0]);
+ pass.dispatchWorkgroups(1);
+ if (splitPass) {
+ pass.end();
+ pass = encoder.beginComputePass();
+ }
+ pass.setPipeline(pipelineUsingBG1);
+ pass.setBindGroup(0, bindGroups[1]);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+
+ // Always valid
+ encoder.finish();
+ });
+
+g.test('scope,pass_boundary,render')
+ .desc(
+ `
+ Test using two conflicting bind groups in separate draw calls, {with,without} a pass
+ boundary in between. This should be valid only if there is a pass boundary.
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combine('splitPass', [false, true])
+ .combine('draw', [false, true])
+ )
+ .fn(t => {
+ const { splitPass, draw } = t.params;
+
+ const { bindGroupLayouts, bindGroups } = t.makeConflictingBindGroups();
+
+ const encoder = t.device.createCommandEncoder();
+
+ const pipelineUsingBG0 = t.createNoOpRenderPipeline(
+ t.device.createPipelineLayout({
+ bindGroupLayouts: [bindGroupLayouts[0]],
+ })
+ );
+ const pipelineUsingBG1 = t.createNoOpRenderPipeline(
+ t.device.createPipelineLayout({
+ bindGroupLayouts: [bindGroupLayouts[1]],
+ })
+ );
+
+ const attachment = t.createTexture().createView();
+
+ let pass = t.beginSimpleRenderPass(encoder, attachment);
+ pass.setPipeline(pipelineUsingBG0);
+ pass.setBindGroup(0, bindGroups[0]);
+ if (draw) pass.draw(3);
+ if (splitPass) {
+ pass.end();
+ pass = t.beginSimpleRenderPass(encoder, attachment);
+ }
+ pass.setPipeline(pipelineUsingBG1);
+ pass.setBindGroup(0, bindGroups[1]);
+ if (draw) pass.draw(3);
+ pass.end();
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, !splitPass);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/texture/in_render_common.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/texture/in_render_common.spec.ts
new file mode 100644
index 0000000000..0c41098556
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/texture/in_render_common.spec.ts
@@ -0,0 +1,566 @@
+export const description = `
+Texture Usages Validation Tests in Same or Different Render Pass Encoders.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { assert, unreachable } from '../../../../../common/util/util.js';
+import { ValidationTest } from '../../validation_test.js';
+
+class F extends ValidationTest {
+ getColorAttachment(
+ texture: GPUTexture,
+ textureViewDescriptor?: GPUTextureViewDescriptor
+ ): GPURenderPassColorAttachment {
+ const view = texture.createView(textureViewDescriptor);
+
+ return {
+ view,
+ clearValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ };
+ }
+
+ createBindGroupForTest(
+ textureView: GPUTextureView,
+ textureUsage: 'texture' | 'storage',
+ sampleType: 'float' | 'depth' | 'uint'
+ ) {
+ const bindGroupLayoutEntry: GPUBindGroupLayoutEntry = {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ };
+ switch (textureUsage) {
+ case 'texture':
+ bindGroupLayoutEntry.texture = { viewDimension: '2d-array', sampleType };
+ break;
+ case 'storage':
+ bindGroupLayoutEntry.storageTexture = {
+ access: 'write-only',
+ format: 'rgba8unorm',
+ viewDimension: '2d-array',
+ };
+ break;
+ default:
+ unreachable();
+ break;
+ }
+ const layout = this.device.createBindGroupLayout({
+ entries: [bindGroupLayoutEntry],
+ });
+ return this.device.createBindGroup({
+ layout,
+ entries: [{ binding: 0, resource: textureView }],
+ });
+ }
+
+ isRangeNotOverlapped(start0: number, end0: number, start1: number, end1: number): boolean {
+ assert(start0 <= end0 && start1 <= end1);
+ // There are only two possibilities for two non-overlapped ranges:
+ // [start0, end0] [start1, end1] or
+ // [start1, end1] [start0, end0]
+ return end0 < start1 || end1 < start0;
+ }
+}
+
+export const g = makeTestGroup(F);
+
+const kTextureSize = 16;
+const kTextureLevels = 3;
+const kTextureLayers = 3;
+
+g.test('subresources,color_attachments')
+ .desc(
+ `
+ Test that the different subresource of the same texture are allowed to be used as color
+ attachments in same / different render pass encoder, while the same subresource is only allowed
+ to be used as different color attachments in different render pass encoders.`
+ )
+ .params(u =>
+ u
+ .combine('layer0', [0, 1])
+ .combine('level0', [0, 1])
+ .combine('layer1', [0, 1])
+ .combine('level1', [0, 1])
+ .combine('inSamePass', [true, false])
+ .unless(t => t.inSamePass && t.level0 !== t.level1)
+ )
+ .fn(t => {
+ const { layer0, level0, layer1, level1, inSamePass } = t.params;
+
+ const texture = t.device.createTexture({
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ size: [kTextureSize, kTextureSize, kTextureLayers],
+ mipLevelCount: kTextureLevels,
+ });
+
+ const colorAttachment1 = t.getColorAttachment(texture, {
+ dimension: '2d',
+ baseArrayLayer: layer0,
+ arrayLayerCount: 1,
+ baseMipLevel: level0,
+ mipLevelCount: 1,
+ });
+ const colorAttachment2 = t.getColorAttachment(texture, {
+ dimension: '2d',
+ baseArrayLayer: layer1,
+ baseMipLevel: level1,
+ mipLevelCount: 1,
+ });
+ const encoder = t.device.createCommandEncoder();
+ if (inSamePass) {
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [colorAttachment1, colorAttachment2],
+ });
+ renderPass.end();
+ } else {
+ const renderPass1 = encoder.beginRenderPass({
+ colorAttachments: [colorAttachment1],
+ });
+ renderPass1.end();
+ const renderPass2 = encoder.beginRenderPass({
+ colorAttachments: [colorAttachment2],
+ });
+ renderPass2.end();
+ }
+
+ const success = inSamePass ? layer0 !== layer1 : true;
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, !success);
+ });
+
+g.test('subresources,color_attachment_and_bind_group')
+ .desc(
+ `
+ Test that when one subresource of a texture is used as a color attachment, it cannot be used in a
+ bind group simultaneously in the same render pass encoder. It is allowed when the bind group is
+ used in another render pass encoder instead of the same one.`
+ )
+ .params(u =>
+ u
+ .combine('colorAttachmentLevel', [0, 1])
+ .combine('colorAttachmentLayer', [0, 1])
+ .combineWithParams([
+ { bgLevel: 0, bgLevelCount: 1 },
+ { bgLevel: 1, bgLevelCount: 1 },
+ { bgLevel: 1, bgLevelCount: 2 },
+ ])
+ .combineWithParams([
+ { bgLayer: 0, bgLayerCount: 1 },
+ { bgLayer: 1, bgLayerCount: 1 },
+ { bgLayer: 1, bgLayerCount: 2 },
+ ])
+ .combine('bgUsage', ['texture', 'storage'] as const)
+ .unless(t => t.bgUsage === 'storage' && t.bgLevelCount > 1)
+ .combine('inSamePass', [true, false])
+ )
+ .fn(t => {
+ const {
+ colorAttachmentLevel,
+ colorAttachmentLayer,
+ bgLevel,
+ bgLevelCount,
+ bgLayer,
+ bgLayerCount,
+ bgUsage,
+ inSamePass,
+ } = t.params;
+
+ const texture = t.device.createTexture({
+ format: 'rgba8unorm',
+ usage:
+ GPUTextureUsage.RENDER_ATTACHMENT |
+ GPUTextureUsage.TEXTURE_BINDING |
+ GPUTextureUsage.STORAGE_BINDING,
+ size: [kTextureSize, kTextureSize, kTextureLayers],
+ mipLevelCount: kTextureLevels,
+ });
+ const bindGroupView = texture.createView({
+ dimension: '2d-array',
+ baseArrayLayer: bgLayer,
+ arrayLayerCount: bgLayerCount,
+ baseMipLevel: bgLevel,
+ mipLevelCount: bgLevelCount,
+ });
+ const bindGroup = t.createBindGroupForTest(bindGroupView, bgUsage, 'float');
+
+ const colorAttachment = t.getColorAttachment(texture, {
+ dimension: '2d',
+ baseArrayLayer: colorAttachmentLayer,
+ arrayLayerCount: 1,
+ baseMipLevel: colorAttachmentLevel,
+ mipLevelCount: 1,
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [colorAttachment],
+ });
+ if (inSamePass) {
+ renderPass.setBindGroup(0, bindGroup);
+ renderPass.end();
+ } else {
+ renderPass.end();
+
+ const texture2 = t.device.createTexture({
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ size: [kTextureSize, kTextureSize, 1],
+ mipLevelCount: 1,
+ });
+ const colorAttachment2 = t.getColorAttachment(texture2);
+ const renderPass2 = encoder.beginRenderPass({
+ colorAttachments: [colorAttachment2],
+ });
+ renderPass2.setBindGroup(0, bindGroup);
+ renderPass2.end();
+ }
+
+ const isMipLevelNotOverlapped = t.isRangeNotOverlapped(
+ colorAttachmentLevel,
+ colorAttachmentLevel,
+ bgLevel,
+ bgLevel + bgLevelCount - 1
+ );
+ const isArrayLayerNotOverlapped = t.isRangeNotOverlapped(
+ colorAttachmentLayer,
+ colorAttachmentLayer,
+ bgLayer,
+ bgLayer + bgLayerCount - 1
+ );
+ const isNotOverlapped = isMipLevelNotOverlapped || isArrayLayerNotOverlapped;
+
+ const success = inSamePass ? isNotOverlapped : true;
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, !success);
+ });
+
+g.test('subresources,depth_stencil_attachment_and_bind_group')
+ .desc(
+ `
+ Test that when one subresource of a texture is used as a depth stencil attachment, it cannot be
+ used in a bind group simultaneously in the same render pass encoder. It is allowed when the bind
+ group is used in another render pass encoder instead of the same one, or the subresource is used
+ as a read-only depth stencil attachment.`
+ )
+ .params(u =>
+ u
+ .combine('dsLevel', [0, 1])
+ .combine('dsLayer', [0, 1])
+ .combineWithParams([
+ { bgLevel: 0, bgLevelCount: 1 },
+ { bgLevel: 1, bgLevelCount: 1 },
+ { bgLevel: 1, bgLevelCount: 2 },
+ ])
+ .combineWithParams([
+ { bgLayer: 0, bgLayerCount: 1 },
+ { bgLayer: 1, bgLayerCount: 1 },
+ { bgLayer: 1, bgLayerCount: 2 },
+ ])
+ .beginSubcases()
+ .combine('dsReadOnly', [true, false])
+ .combine('bgAspect', ['depth-only', 'stencil-only'] as const)
+ .combine('inSamePass', [true, false])
+ )
+ .fn(t => {
+ const {
+ dsLevel,
+ dsLayer,
+ bgLevel,
+ bgLevelCount,
+ bgLayer,
+ bgLayerCount,
+ dsReadOnly,
+ bgAspect,
+ inSamePass,
+ } = t.params;
+
+ const texture = t.device.createTexture({
+ format: 'depth24plus-stencil8',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.TEXTURE_BINDING,
+ size: [kTextureSize, kTextureSize, kTextureLayers],
+ mipLevelCount: kTextureLevels,
+ });
+ const bindGroupView = texture.createView({
+ dimension: '2d-array',
+ baseArrayLayer: bgLayer,
+ arrayLayerCount: bgLayerCount,
+ baseMipLevel: bgLevel,
+ mipLevelCount: bgLevelCount,
+ aspect: bgAspect,
+ });
+ const sampleType = bgAspect === 'depth-only' ? 'depth' : 'uint';
+ const bindGroup = t.createBindGroupForTest(bindGroupView, 'texture', sampleType);
+
+ const attachmentView = texture.createView({
+ dimension: '2d',
+ baseArrayLayer: dsLayer,
+ arrayLayerCount: 1,
+ baseMipLevel: dsLevel,
+ mipLevelCount: 1,
+ });
+ const depthStencilAttachment: GPURenderPassDepthStencilAttachment = {
+ view: attachmentView,
+ depthReadOnly: dsReadOnly,
+ depthLoadOp: dsReadOnly ? undefined : 'load',
+ depthStoreOp: dsReadOnly ? undefined : 'store',
+ stencilReadOnly: dsReadOnly,
+ stencilLoadOp: dsReadOnly ? undefined : 'load',
+ stencilStoreOp: dsReadOnly ? undefined : 'store',
+ };
+
+ const encoder = t.device.createCommandEncoder();
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [],
+ depthStencilAttachment,
+ });
+ if (inSamePass) {
+ renderPass.setBindGroup(0, bindGroup);
+ renderPass.end();
+ } else {
+ renderPass.end();
+
+ const texture2 = t.device.createTexture({
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ size: [kTextureSize, kTextureSize, 1],
+ mipLevelCount: 1,
+ });
+ const colorAttachment2 = t.getColorAttachment(texture2);
+ const renderPass2 = encoder.beginRenderPass({
+ colorAttachments: [colorAttachment2],
+ });
+ renderPass2.setBindGroup(0, bindGroup);
+ renderPass2.end();
+ }
+
+ const isMipLevelNotOverlapped = t.isRangeNotOverlapped(
+ dsLevel,
+ dsLevel,
+ bgLevel,
+ bgLevel + bgLevelCount - 1
+ );
+ const isArrayLayerNotOverlapped = t.isRangeNotOverlapped(
+ dsLayer,
+ dsLayer,
+ bgLayer,
+ bgLayer + bgLayerCount - 1
+ );
+ const isNotOverlapped = isMipLevelNotOverlapped || isArrayLayerNotOverlapped;
+
+ const success = !inSamePass || isNotOverlapped || dsReadOnly;
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, !success);
+ });
+
+g.test('subresources,multiple_bind_groups')
+ .desc(
+ `
+ Test that when one color texture subresource is bound to different bind groups, its list of
+ internal usages within one usage scope can only be a compatible usage list. For texture
+ subresources in bind groups, the compatible usage lists are {TEXTURE_BINDING} and
+ {STORAGE_BINDING}, which means it can only be bound as both TEXTURE_BINDING and STORAGE_BINDING in
+ different render pass encoders, otherwise a validation error will occur.`
+ )
+ .params(u =>
+ u
+ .combine('bg0Levels', [
+ { base: 0, count: 1 },
+ { base: 1, count: 1 },
+ { base: 1, count: 2 },
+ ])
+ .combine('bg0Layers', [
+ { base: 0, count: 1 },
+ { base: 1, count: 1 },
+ { base: 1, count: 2 },
+ ])
+ .combine('bg1Levels', [
+ { base: 0, count: 1 },
+ { base: 1, count: 1 },
+ { base: 1, count: 2 },
+ ])
+ .combine('bg1Layers', [
+ { base: 0, count: 1 },
+ { base: 1, count: 1 },
+ { base: 1, count: 2 },
+ ])
+ .combine('bgUsage0', ['texture', 'storage'] as const)
+ .combine('bgUsage1', ['texture', 'storage'] as const)
+ .unless(
+ t =>
+ (t.bgUsage0 === 'storage' && t.bg0Levels.count > 1) ||
+ (t.bgUsage1 === 'storage' && t.bg1Levels.count > 1)
+ )
+ .combine('inSamePass', [true, false])
+ )
+ .fn(t => {
+ const { bg0Levels, bg0Layers, bg1Levels, bg1Layers, bgUsage0, bgUsage1, inSamePass } = t.params;
+
+ const texture = t.device.createTexture({
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.STORAGE_BINDING | GPUTextureUsage.TEXTURE_BINDING,
+ size: [kTextureSize, kTextureSize, kTextureLayers],
+ mipLevelCount: kTextureLevels,
+ });
+ const bg0 = texture.createView({
+ dimension: '2d-array',
+ baseArrayLayer: bg0Layers.base,
+ arrayLayerCount: bg0Layers.count,
+ baseMipLevel: bg0Levels.base,
+ mipLevelCount: bg0Levels.count,
+ });
+ const bg1 = texture.createView({
+ dimension: '2d-array',
+ baseArrayLayer: bg1Layers.base,
+ arrayLayerCount: bg1Layers.count,
+ baseMipLevel: bg1Levels.base,
+ mipLevelCount: bg1Levels.count,
+ });
+ const bindGroup0 = t.createBindGroupForTest(bg0, bgUsage0, 'float');
+ const bindGroup1 = t.createBindGroupForTest(bg1, bgUsage1, 'float');
+
+ const colorTexture = t.device.createTexture({
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ size: [kTextureSize, kTextureSize, 1],
+ mipLevelCount: 1,
+ });
+ const colorAttachment = t.getColorAttachment(colorTexture);
+ const encoder = t.device.createCommandEncoder();
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [colorAttachment],
+ });
+ if (inSamePass) {
+ renderPass.setBindGroup(0, bindGroup0);
+ renderPass.setBindGroup(1, bindGroup1);
+ renderPass.end();
+ } else {
+ renderPass.setBindGroup(0, bindGroup0);
+ renderPass.end();
+
+ const renderPass2 = encoder.beginRenderPass({
+ colorAttachments: [colorAttachment],
+ });
+ renderPass2.setBindGroup(1, bindGroup1);
+ renderPass2.end();
+ }
+
+ const isMipLevelNotOverlapped = t.isRangeNotOverlapped(
+ bg0Levels.base,
+ bg0Levels.base + bg0Levels.count - 1,
+ bg1Levels.base,
+ bg1Levels.base + bg1Levels.count - 1
+ );
+ const isArrayLayerNotOverlapped = t.isRangeNotOverlapped(
+ bg0Layers.base,
+ bg0Layers.base + bg0Layers.count - 1,
+ bg1Layers.base,
+ bg1Layers.base + bg1Layers.count - 1
+ );
+ const isNotOverlapped = isMipLevelNotOverlapped || isArrayLayerNotOverlapped;
+
+ const success = !inSamePass || isNotOverlapped || bgUsage0 === bgUsage1;
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, !success);
+ });
+
+g.test('subresources,depth_stencil_texture_in_bind_groups')
+ .desc(
+ `
+ Test that when one depth stencil texture subresource is bound to different bind groups, we can
+ always bind these two bind groups in either the same or different render pass encoder as the depth
+ stencil texture can only be bound as TEXTURE_BINDING in the bind group.`
+ )
+ .params(u =>
+ u
+ .combine('view0Levels', [
+ { base: 0, count: 1 },
+ { base: 1, count: 1 },
+ { base: 1, count: 2 },
+ ])
+ .combine('view0Layers', [
+ { base: 0, count: 1 },
+ { base: 1, count: 1 },
+ { base: 1, count: 2 },
+ ])
+ .combine('view1Levels', [
+ { base: 0, count: 1 },
+ { base: 1, count: 1 },
+ { base: 1, count: 2 },
+ ])
+ .combine('view1Layers', [
+ { base: 0, count: 1 },
+ { base: 1, count: 1 },
+ { base: 1, count: 2 },
+ ])
+ .combine('aspect0', ['depth-only', 'stencil-only'] as const)
+ .combine('aspect1', ['depth-only', 'stencil-only'] as const)
+ .combine('inSamePass', [true, false])
+ )
+ .fn(t => {
+ const { view0Levels, view0Layers, view1Levels, view1Layers, aspect0, aspect1, inSamePass } =
+ t.params;
+
+ const texture = t.device.createTexture({
+ format: 'depth24plus-stencil8',
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ size: [kTextureSize, kTextureSize, kTextureLayers],
+ mipLevelCount: kTextureLevels,
+ });
+ const bindGroupView0 = texture.createView({
+ dimension: '2d-array',
+ baseArrayLayer: view0Layers.base,
+ arrayLayerCount: view0Layers.count,
+ baseMipLevel: view0Levels.base,
+ mipLevelCount: view0Levels.count,
+ aspect: aspect0,
+ });
+ const bindGroupView1 = texture.createView({
+ dimension: '2d-array',
+ baseArrayLayer: view1Layers.base,
+ arrayLayerCount: view1Layers.count,
+ baseMipLevel: view1Levels.base,
+ mipLevelCount: view1Levels.count,
+ aspect: aspect1,
+ });
+
+ const sampleType0 = aspect0 === 'depth-only' ? 'depth' : 'uint';
+ const sampleType1 = aspect1 === 'depth-only' ? 'depth' : 'uint';
+ const bindGroup0 = t.createBindGroupForTest(bindGroupView0, 'texture', sampleType0);
+ const bindGroup1 = t.createBindGroupForTest(bindGroupView1, 'texture', sampleType1);
+
+ const colorTexture = t.device.createTexture({
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ size: [kTextureSize, kTextureSize, 1],
+ mipLevelCount: 1,
+ });
+ const colorAttachment = t.getColorAttachment(colorTexture);
+ const encoder = t.device.createCommandEncoder();
+ const renderPass = encoder.beginRenderPass({
+ colorAttachments: [colorAttachment],
+ });
+ if (inSamePass) {
+ renderPass.setBindGroup(0, bindGroup0);
+ renderPass.setBindGroup(1, bindGroup1);
+ renderPass.end();
+ } else {
+ renderPass.setBindGroup(0, bindGroup0);
+ renderPass.end();
+
+ const renderPass2 = encoder.beginRenderPass({
+ colorAttachments: [colorAttachment],
+ });
+ renderPass2.setBindGroup(1, bindGroup1);
+ renderPass2.end();
+ }
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, false);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/texture/in_render_misc.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/texture/in_render_misc.spec.ts
new file mode 100644
index 0000000000..1b80a2f73e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/resource_usages/texture/in_render_misc.spec.ts
@@ -0,0 +1,420 @@
+export const description = `
+Texture Usages Validation Tests on All Kinds of WebGPU Subresource Usage Scopes.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { unreachable } from '../../../../../common/util/util.js';
+import { ValidationTest } from '../../validation_test.js';
+
+class F extends ValidationTest {
+ createBindGroupLayoutForTest(
+ textureUsage: 'texture' | 'storage',
+ sampleType: 'float' | 'depth' | 'uint',
+ visibility: GPUShaderStage['FRAGMENT'] | GPUShaderStage['COMPUTE'] = GPUShaderStage['FRAGMENT']
+ ): GPUBindGroupLayout {
+ const bindGroupLayoutEntry: GPUBindGroupLayoutEntry = {
+ binding: 0,
+ visibility,
+ };
+
+ switch (textureUsage) {
+ case 'texture':
+ bindGroupLayoutEntry.texture = { viewDimension: '2d-array', sampleType };
+ break;
+ case 'storage':
+ bindGroupLayoutEntry.storageTexture = {
+ access: 'write-only',
+ format: 'rgba8unorm',
+ viewDimension: '2d-array',
+ };
+ break;
+ default:
+ unreachable();
+ break;
+ }
+ return this.device.createBindGroupLayout({
+ entries: [bindGroupLayoutEntry],
+ });
+ }
+
+ createBindGroupForTest(
+ textureView: GPUTextureView,
+ textureUsage: 'texture' | 'storage',
+ sampleType: 'float' | 'depth' | 'uint',
+ visibility: GPUShaderStage['FRAGMENT'] | GPUShaderStage['COMPUTE'] = GPUShaderStage['FRAGMENT']
+ ) {
+ return this.device.createBindGroup({
+ layout: this.createBindGroupLayoutForTest(textureUsage, sampleType, visibility),
+ entries: [{ binding: 0, resource: textureView }],
+ });
+ }
+}
+
+export const g = makeTestGroup(F);
+
+const kTextureSize = 16;
+const kTextureLayers = 3;
+
+g.test('subresources,set_bind_group_on_same_index_color_texture')
+ .desc(
+ `
+ Test that when one color texture subresource is bound to different bind groups, whether the bind
+ groups are reset by another compatible ones or not, its list of internal usages within one usage
+ scope can only be a compatible usage list.`
+ )
+ .params(u =>
+ u
+ .combineWithParams([
+ { useDifferentTextureAsTexture2: true, baseLayer2: 0, view2Binding: 'texture' },
+ { useDifferentTextureAsTexture2: false, baseLayer2: 0, view2Binding: 'texture' },
+ { useDifferentTextureAsTexture2: false, baseLayer2: 1, view2Binding: 'texture' },
+ { useDifferentTextureAsTexture2: false, baseLayer2: 0, view2Binding: 'storage' },
+ { useDifferentTextureAsTexture2: false, baseLayer2: 1, view2Binding: 'storage' },
+ ] as const)
+ .combine('hasConflict', [true, false])
+ )
+ .fn(t => {
+ const { useDifferentTextureAsTexture2, baseLayer2, view2Binding, hasConflict } = t.params;
+
+ const texture0 = t.device.createTexture({
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.STORAGE_BINDING,
+ size: [kTextureSize, kTextureSize, kTextureLayers],
+ });
+ // We always bind the first layer of the texture to bindGroup0.
+ const textureView0 = texture0.createView({
+ dimension: '2d-array',
+ baseArrayLayer: 0,
+ arrayLayerCount: 1,
+ });
+ const bindGroup0 = t.createBindGroupForTest(textureView0, view2Binding, 'float');
+
+ // In one renderPassEncoder it is an error to set both bindGroup0 and bindGroup1.
+ const view1Binding = hasConflict
+ ? view2Binding === 'texture'
+ ? 'storage'
+ : 'texture'
+ : view2Binding;
+ const bindGroup1 = t.createBindGroupForTest(textureView0, view1Binding, 'float');
+
+ const texture2 = useDifferentTextureAsTexture2
+ ? t.device.createTexture({
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.STORAGE_BINDING,
+ size: [kTextureSize, kTextureSize, kTextureLayers],
+ })
+ : texture0;
+ const textureView2 = texture2.createView({
+ dimension: '2d-array',
+ baseArrayLayer: baseLayer2,
+ arrayLayerCount: kTextureLayers - baseLayer2,
+ });
+ // There should be no conflict between bindGroup0 and validBindGroup2.
+ const validBindGroup2 = t.createBindGroupForTest(textureView2, view2Binding, 'float');
+
+ const colorTexture = t.device.createTexture({
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ size: [kTextureSize, kTextureSize, 1],
+ });
+ const encoder = t.device.createCommandEncoder();
+ const renderPassEncoder = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorTexture.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPassEncoder.setBindGroup(0, bindGroup0);
+ renderPassEncoder.setBindGroup(1, bindGroup1);
+ renderPassEncoder.setBindGroup(1, validBindGroup2);
+ renderPassEncoder.end();
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, hasConflict);
+ });
+
+g.test('subresources,set_bind_group_on_same_index_depth_stencil_texture')
+ .desc(
+ `
+ Test that when one depth stencil texture subresource is bound to different bind groups, whether
+ the bind groups are reset by another compatible ones or not, its list of internal usages within
+ one usage scope can only be a compatible usage list.`
+ )
+ .params(u =>
+ u
+ .combine('bindAspect', ['depth-only', 'stencil-only'] as const)
+ .combine('depthStencilReadOnly', [true, false])
+ )
+ .fn(t => {
+ const { bindAspect, depthStencilReadOnly } = t.params;
+ const depthStencilTexture = t.device.createTexture({
+ format: 'depth24plus-stencil8',
+ usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.RENDER_ATTACHMENT,
+ size: [kTextureSize, kTextureSize, 1],
+ });
+
+ const conflictedToNonReadOnlyAttachmentBindGroup = t.createBindGroupForTest(
+ depthStencilTexture.createView({
+ dimension: '2d-array',
+ aspect: bindAspect,
+ }),
+ 'texture',
+ bindAspect === 'depth-only' ? 'depth' : 'uint'
+ );
+
+ const colorTexture = t.device.createTexture({
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.STORAGE_BINDING,
+ size: [kTextureSize, kTextureSize, 1],
+ });
+ const validBindGroup = t.createBindGroupForTest(
+ colorTexture.createView({
+ dimension: '2d-array',
+ }),
+ 'texture',
+ 'float'
+ );
+
+ const encoder = t.device.createCommandEncoder();
+ const renderPassEncoder = encoder.beginRenderPass({
+ colorAttachments: [],
+ depthStencilAttachment: {
+ view: depthStencilTexture.createView(),
+ depthReadOnly: depthStencilReadOnly,
+ stencilReadOnly: depthStencilReadOnly,
+ },
+ });
+ renderPassEncoder.setBindGroup(0, conflictedToNonReadOnlyAttachmentBindGroup);
+ renderPassEncoder.setBindGroup(0, validBindGroup);
+ renderPassEncoder.end();
+
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, !depthStencilReadOnly);
+ });
+
+g.test('subresources,set_unused_bind_group')
+ .desc(
+ `
+ Test that when one texture subresource is bound to different bind groups and the bind groups are
+ used in the same render or compute pass encoder, its list of internal usages within one usage
+ scope can only be a compatible usage list.`
+ )
+ .params(u => u.combine('inRenderPass', [true, false]).combine('hasConflict', [true, false]))
+ .fn(t => {
+ const { inRenderPass, hasConflict } = t.params;
+
+ const texture0 = t.device.createTexture({
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.STORAGE_BINDING,
+ size: [kTextureSize, kTextureSize, kTextureLayers],
+ });
+ // We always bind the first layer of the texture to bindGroup0.
+ const textureView0 = texture0.createView({
+ dimension: '2d-array',
+ baseArrayLayer: 0,
+ arrayLayerCount: 1,
+ });
+ const visibility = inRenderPass ? GPUShaderStage.FRAGMENT : GPUShaderStage.COMPUTE;
+ // bindGroup0 is used by the pipelines, and bindGroup1 is not used by the pipelines.
+ const textureUsage0 = inRenderPass ? 'texture' : 'storage';
+ const textureUsage1 = hasConflict ? (inRenderPass ? 'storage' : 'texture') : textureUsage0;
+ const bindGroup0 = t.createBindGroupForTest(textureView0, textureUsage0, 'float', visibility);
+ const bindGroup1 = t.createBindGroupForTest(textureView0, textureUsage1, 'float', visibility);
+
+ const encoder = t.device.createCommandEncoder();
+ const colorTexture = t.device.createTexture({
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ size: [kTextureSize, kTextureSize, 1],
+ });
+ const pipelineLayout = t.device.createPipelineLayout({
+ bindGroupLayouts: [t.createBindGroupLayoutForTest(textureUsage0, 'float', visibility)],
+ });
+ if (inRenderPass) {
+ const renderPipeline = t.device.createRenderPipeline({
+ layout: pipelineLayout,
+ vertex: {
+ module: t.device.createShaderModule({
+ code: t.getNoOpShaderCode('VERTEX'),
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var texture0 : texture_2d_array<f32>;
+ @fragment fn main()
+ -> @location(0) vec4<f32> {
+ return textureLoad(texture0, vec2<i32>(), 0, 0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ });
+
+ const renderPassEncoder = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorTexture.createView(),
+ loadOp: 'load',
+ storeOp: 'store',
+ },
+ ],
+ });
+ renderPassEncoder.setBindGroup(0, bindGroup0);
+ renderPassEncoder.setBindGroup(1, bindGroup1);
+ renderPassEncoder.setPipeline(renderPipeline);
+ renderPassEncoder.draw(1);
+ renderPassEncoder.end();
+ } else {
+ const computePipeline = t.device.createComputePipeline({
+ layout: pipelineLayout,
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var texture0 : texture_storage_2d_array<rgba8unorm, write>;
+ @compute @workgroup_size(1)
+ fn main() {
+ textureStore(texture0, vec2<i32>(), 0, vec4<f32>());
+ }`,
+ }),
+ entryPoint: 'main',
+ },
+ });
+ const computePassEncoder = encoder.beginComputePass();
+ computePassEncoder.setBindGroup(0, bindGroup0);
+ computePassEncoder.setBindGroup(1, bindGroup1);
+ computePassEncoder.setPipeline(computePipeline);
+ computePassEncoder.dispatchWorkgroups(1);
+ computePassEncoder.end();
+ }
+
+ // In WebGPU SPEC (Chapter 3.4.5, Synchronization):
+ // This specification defines the following usage scopes:
+ // - In a compute pass, each dispatch command (dispatchWorkgroups() or
+ // dispatchWorkgroupsIndirect()) is one usage scope. A subresource is "used" in the usage
+ // scope if it is potentially accessible by the command. State-setting compute pass commands,
+ // like setBindGroup(index, bindGroup, dynamicOffsets), do not contribute directly to a usage
+ // scope.
+ // - One render pass is one usage scope. A subresource is "used" in the usage scope if it’s
+ // referenced by any (state-setting or non-state-setting) command. For example, in
+ // setBindGroup(index, bindGroup, dynamicOffsets), every subresource in bindGroup is "used" in
+ // the render pass’s usage scope.
+ const success = !inRenderPass || !hasConflict;
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, !success);
+ });
+
+g.test('subresources,texture_usages_in_copy_and_render_pass')
+ .desc(
+ `
+ Test that using one texture subresource in a render pass encoder and a copy command is always
+ allowed as WebGPU SPEC (chapter 3.4.5) defines that out of any pass encoder, each command always
+ belongs to one usage scope.`
+ )
+ .params(u =>
+ u
+ .combine('usage0', [
+ 'copy-src',
+ 'copy-dst',
+ 'texture',
+ 'storage',
+ 'color-attachment',
+ ] as const)
+ .combine('usage1', [
+ 'copy-src',
+ 'copy-dst',
+ 'texture',
+ 'storage',
+ 'color-attachment',
+ ] as const)
+ .filter(
+ ({ usage0, usage1 }) =>
+ usage0 === 'copy-src' ||
+ usage0 === 'copy-dst' ||
+ usage1 === 'copy-src' ||
+ usage1 === 'copy-dst'
+ )
+ )
+ .fn(t => {
+ const { usage0, usage1 } = t.params;
+
+ const texture = t.device.createTexture({
+ format: 'rgba8unorm',
+ usage:
+ GPUTextureUsage.COPY_SRC |
+ GPUTextureUsage.COPY_DST |
+ GPUTextureUsage.TEXTURE_BINDING |
+ GPUTextureUsage.STORAGE_BINDING |
+ GPUTextureUsage.RENDER_ATTACHMENT,
+ size: [kTextureSize, kTextureSize, 1],
+ });
+
+ const UseTextureOnCommandEncoder = (
+ texture: GPUTexture,
+ usage: 'copy-src' | 'copy-dst' | 'texture' | 'storage' | 'color-attachment',
+ encoder: GPUCommandEncoder
+ ) => {
+ switch (usage) {
+ case 'copy-src': {
+ const buffer = t.createBufferWithState('valid', {
+ size: 4,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+ encoder.copyTextureToBuffer({ texture }, { buffer }, [1, 1, 1]);
+ break;
+ }
+ case 'copy-dst': {
+ const buffer = t.createBufferWithState('valid', {
+ size: 4,
+ usage: GPUBufferUsage.COPY_SRC,
+ });
+ encoder.copyBufferToTexture({ buffer }, { texture }, [1, 1, 1]);
+ break;
+ }
+ case 'color-attachment': {
+ const renderPassEncoder = encoder.beginRenderPass({
+ colorAttachments: [{ view: texture.createView(), loadOp: 'load', storeOp: 'store' }],
+ });
+ renderPassEncoder.end();
+ break;
+ }
+ case 'texture':
+ case 'storage': {
+ const colorTexture = t.device.createTexture({
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ size: [kTextureSize, kTextureSize, 1],
+ });
+ const renderPassEncoder = encoder.beginRenderPass({
+ colorAttachments: [
+ { view: colorTexture.createView(), loadOp: 'load', storeOp: 'store' },
+ ],
+ });
+ const bindGroup = t.createBindGroupForTest(
+ texture.createView({
+ dimension: '2d-array',
+ }),
+ usage,
+ 'float'
+ );
+ renderPassEncoder.setBindGroup(0, bindGroup);
+ renderPassEncoder.end();
+ break;
+ }
+ }
+ };
+ const encoder = t.device.createCommandEncoder();
+ UseTextureOnCommandEncoder(texture, usage0, encoder);
+ UseTextureOnCommandEncoder(texture, usage1, encoder);
+ t.expectValidationError(() => {
+ encoder.finish();
+ }, false);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/shader_module/entry_point.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/shader_module/entry_point.spec.ts
new file mode 100644
index 0000000000..1a8da470a4
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/shader_module/entry_point.spec.ts
@@ -0,0 +1,117 @@
+export const description = `
+This tests entry point validation of compute/render pipelines and their shader modules.
+
+The entryPoint in shader module include standard "main" and others.
+The entryPoint assigned in descriptor include:
+- Matching case (control case)
+- Empty string
+- Mistyping
+- Containing invalid char, including space and control codes (Null character)
+- Unicode entrypoints and their ASCIIfied version
+
+TODO:
+- Test unicode normalization (gpuweb/gpuweb#1160)
+- Fine-tune test cases to reduce number by removing trivially similar cases
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { kDefaultVertexShaderCode, getShaderWithEntryPoint } from '../../../util/shader.js';
+import { ValidationTest } from '../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+const kEntryPointTestCases = [
+ { shaderModuleEntryPoint: 'main', stageEntryPoint: 'main' },
+ { shaderModuleEntryPoint: 'main', stageEntryPoint: '' },
+ { shaderModuleEntryPoint: 'main', stageEntryPoint: 'main\0' },
+ { shaderModuleEntryPoint: 'main', stageEntryPoint: 'main\0a' },
+ { shaderModuleEntryPoint: 'main', stageEntryPoint: 'mian' },
+ { shaderModuleEntryPoint: 'main', stageEntryPoint: 'main ' },
+ { shaderModuleEntryPoint: 'main', stageEntryPoint: 'ma in' },
+ { shaderModuleEntryPoint: 'main', stageEntryPoint: 'main\n' },
+ { shaderModuleEntryPoint: 'mian', stageEntryPoint: 'mian' },
+ { shaderModuleEntryPoint: 'mian', stageEntryPoint: 'main' },
+ { shaderModuleEntryPoint: 'mainmain', stageEntryPoint: 'mainmain' },
+ { shaderModuleEntryPoint: 'mainmain', stageEntryPoint: 'foo' },
+ { shaderModuleEntryPoint: 'main_t12V3', stageEntryPoint: 'main_t12V3' },
+ { shaderModuleEntryPoint: 'main_t12V3', stageEntryPoint: 'main_t12V5' },
+ { shaderModuleEntryPoint: 'main_t12V3', stageEntryPoint: '_main_t12V3' },
+ { shaderModuleEntryPoint: 'séquençage', stageEntryPoint: 'séquençage' },
+ { shaderModuleEntryPoint: 'séquençage', stageEntryPoint: 'séquençage' },
+];
+
+g.test('compute')
+ .desc(
+ `
+Tests calling createComputePipeline(Async) with valid vertex stage shader and different entryPoints,
+and check that the APIs only accept matching entryPoint.
+`
+ )
+ .params(u => u.combine('isAsync', [true, false]).combineWithParams(kEntryPointTestCases))
+ .fn(t => {
+ const { isAsync, shaderModuleEntryPoint, stageEntryPoint } = t.params;
+ const descriptor: GPUComputePipelineDescriptor = {
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({
+ code: getShaderWithEntryPoint('compute', shaderModuleEntryPoint),
+ }),
+ entryPoint: stageEntryPoint,
+ },
+ };
+ const _success = shaderModuleEntryPoint === stageEntryPoint;
+ t.doCreateComputePipelineTest(isAsync, _success, descriptor);
+ });
+
+g.test('vertex')
+ .desc(
+ `
+Tests calling createRenderPipeline(Async) with valid vertex stage shader and different entryPoints,
+and check that the APIs only accept matching entryPoint.
+`
+ )
+ .params(u => u.combine('isAsync', [true, false]).combineWithParams(kEntryPointTestCases))
+ .fn(t => {
+ const { isAsync, shaderModuleEntryPoint, stageEntryPoint } = t.params;
+ const descriptor: GPURenderPipelineDescriptor = {
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: getShaderWithEntryPoint('vertex', shaderModuleEntryPoint),
+ }),
+ entryPoint: stageEntryPoint,
+ },
+ };
+ const _success = shaderModuleEntryPoint === stageEntryPoint;
+ t.doCreateRenderPipelineTest(isAsync, _success, descriptor);
+ });
+
+g.test('fragment')
+ .desc(
+ `
+Tests calling createRenderPipeline(Async) with valid fragment stage shader and different entryPoints,
+and check that the APIs only accept matching entryPoint.
+`
+ )
+ .params(u => u.combine('isAsync', [true, false]).combineWithParams(kEntryPointTestCases))
+ .fn(t => {
+ const { isAsync, shaderModuleEntryPoint, stageEntryPoint } = t.params;
+ const descriptor: GPURenderPipelineDescriptor = {
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: kDefaultVertexShaderCode,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: getShaderWithEntryPoint('fragment', shaderModuleEntryPoint),
+ }),
+ entryPoint: stageEntryPoint,
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ };
+ const _success = shaderModuleEntryPoint === stageEntryPoint;
+ t.doCreateRenderPipelineTest(isAsync, _success, descriptor);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/shader_module/overrides.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/shader_module/overrides.spec.ts
new file mode 100644
index 0000000000..a7a060fe26
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/shader_module/overrides.spec.ts
@@ -0,0 +1,96 @@
+export const description = `
+This tests overrides numeric identifiers should not conflict.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ValidationTest } from '../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('id_conflict')
+ .desc(
+ `
+Tests that overrides' explicit numeric identifier should not conflict.
+`
+ )
+ .fn(t => {
+ t.expectValidationError(() => {
+ t.device.createShaderModule({
+ code: `
+@id(1234) override c0: u32;
+@id(4321) override c1: u32;
+
+@compute @workgroup_size(1) fn main() {
+ // make sure the overridable constants are not optimized out
+ _ = c0;
+ _ = c1;
+}
+ `,
+ });
+ }, false);
+
+ t.expectValidationError(() => {
+ t.device.createShaderModule({
+ code: `
+@id(1234) override c0: u32;
+@id(1234) override c1: u32;
+
+@compute @workgroup_size(1) fn main() {
+ // make sure the overridable constants are not optimized out
+ _ = c0;
+ _ = c1;
+}
+ `,
+ });
+ }, true);
+ });
+
+g.test('name_conflict')
+ .desc(
+ `
+Tests that overrides' variable name should not conflict, regardless of their numeric identifiers.
+`
+ )
+ .fn(t => {
+ t.expectValidationError(() => {
+ t.device.createShaderModule({
+ code: `
+override c0: u32;
+override c0: u32;
+
+@compute @workgroup_size(1) fn main() {
+ // make sure the overridable constants are not optimized out
+ _ = c0;
+}
+ `,
+ });
+ }, true);
+
+ t.expectValidationError(() => {
+ t.device.createShaderModule({
+ code: `
+@id(1) override c0: u32;
+override c0: u32;
+
+@compute @workgroup_size(1) fn main() {
+ // make sure the overridable constants are not optimized out
+ _ = c0;
+}
+ `,
+ });
+ }, true);
+
+ t.expectValidationError(() => {
+ t.device.createShaderModule({
+ code: `
+@id(1) override c0: u32;
+@id(2) override c0: u32;
+
+@compute @workgroup_size(1) fn main() {
+ // make sure the overridable constants are not optimized out
+ _ = c0;
+}
+ `,
+ });
+ }, true);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/state/device_lost/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/state/device_lost/README.txt
new file mode 100644
index 0000000000..319cc76e5c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/state/device_lost/README.txt
@@ -0,0 +1,5 @@
+Tests of behavior while the device is lost.
+
+- x= every method in the API.
+
+TODO: implement
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/state/device_lost/destroy.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/state/device_lost/destroy.spec.ts
new file mode 100644
index 0000000000..df03427a0a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/state/device_lost/destroy.spec.ts
@@ -0,0 +1,1170 @@
+export const description = `
+Tests for device lost induced via destroy.
+ - Tests that prior to device destruction, valid APIs do not generate errors (control case).
+ - After device destruction, runs the same APIs. No expected observable results, so test crash or future failures are the only current failure indicators.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { assert } from '../../../../../common/util/util.js';
+import {
+ allBindingEntries,
+ bindingTypeInfo,
+ kBindableResources,
+ kBufferUsageKeys,
+ kBufferUsageInfo,
+ kBufferUsageCopy,
+ kBufferUsageCopyInfo,
+ kQueryTypes,
+ kTextureUsageType,
+ kTextureUsageTypeInfo,
+ kTextureUsageCopy,
+ kTextureUsageCopyInfo,
+ kShaderStageKeys,
+} from '../../../../capability_info.js';
+import {
+ kCompressedTextureFormats,
+ kRegularTextureFormats,
+ kRenderableColorTextureFormats,
+ kTextureFormatInfo,
+} from '../../../../format_info.js';
+import { CommandBufferMaker, EncoderType } from '../../../../util/command_buffer_maker.js';
+import {
+ createCanvas,
+ kAllCanvasTypes,
+ kValidCanvasContextIds,
+} from '../../../../util/create_elements.js';
+import {
+ startPlayingAndWaitForVideo,
+ getVideoElement,
+ getVideoFrameFromVideoElement,
+} from '../../../../web_platform/util.js';
+import { ValidationTest } from '../../validation_test.js';
+
+const kCommandValidationStages = ['finish', 'submit'];
+type CommandValidationStage = (typeof kCommandValidationStages)[number];
+
+class DeviceDestroyTests extends ValidationTest {
+ /**
+ * Expects that `fn` does not produce any errors before the device is destroyed, and then calls
+ * `fn` after the device is destroyed without any specific expectation. If `awaitLost` is true, we
+ * also wait for device.lost to resolve before executing `fn` in the destroy case.
+ */
+ async executeAfterDestroy(fn: () => void, awaitLost: boolean): Promise<void> {
+ this.expectDeviceLost('destroyed');
+
+ this.expectValidationError(fn, false);
+ this.device.destroy();
+ if (awaitLost) {
+ const lostInfo = await this.device.lost;
+ this.expect(lostInfo.reason === 'destroyed');
+ }
+ fn();
+ }
+
+ /**
+ * Expects that encoders can finish and submit the resulting commands before the device is
+ * destroyed, then repeats the same process after the device is destroyed without any specific
+ * expectations.
+ * There are two valid stages: 'finish' and 'submit'.
+ * 'finish': Tests [encode, finish] and [encoder, destroy, finish]
+ * 'submit': Tests [encoder, finish, submit] and [encoder, finish, destroy, submit]
+ */
+ async executeCommandsAfterDestroy<T extends EncoderType>(
+ stage: CommandValidationStage,
+ awaitLost: boolean,
+ encoderType: T,
+ fn: (maker: CommandBufferMaker<T>) => CommandBufferMaker<T>
+ ): Promise<void> {
+ this.expectDeviceLost('destroyed');
+
+ switch (stage) {
+ case 'finish': {
+ // Control case
+ fn(this.createEncoder(encoderType)).validateFinish(true);
+ // Validation case
+ const encoder = fn(this.createEncoder(encoderType));
+ await this.executeAfterDestroy(() => {
+ encoder.finish();
+ }, awaitLost);
+ break;
+ }
+ case 'submit': {
+ // Control case
+ fn(this.createEncoder(encoderType)).validateFinishAndSubmit(true, true);
+ // Validation case
+ const commands = fn(this.createEncoder(encoderType)).validateFinish(true);
+ await this.executeAfterDestroy(() => {
+ this.queue.submit([commands]);
+ }, awaitLost);
+ break;
+ }
+ }
+ }
+}
+
+export const g = makeTestGroup(DeviceDestroyTests);
+
+g.test('createBuffer')
+ .desc(
+ `
+Tests creating buffers on destroyed device. Tests valid combinations of:
+ - Various usages
+ - Mapped at creation or not
+ `
+ )
+ .params(u =>
+ u
+ .combine('usageType', kBufferUsageKeys)
+
+ .combine('usageCopy', kBufferUsageCopy)
+ .combine('awaitLost', [true, false])
+ .filter(({ usageType, usageCopy }) => {
+ if (usageType === 'COPY_SRC' || usageType === 'COPY_DST') {
+ return false;
+ }
+ if (usageType === 'MAP_READ') {
+ return usageCopy === 'COPY_NONE' || usageCopy === 'COPY_DST';
+ }
+ if (usageType === 'MAP_WRITE') {
+ return usageCopy === 'COPY_NONE' || usageCopy === 'COPY_SRC';
+ }
+ return true;
+ })
+ .combine('mappedAtCreation', [true, false])
+ )
+ .fn(async t => {
+ const { awaitLost, usageType, usageCopy, mappedAtCreation } = t.params;
+ await t.executeAfterDestroy(() => {
+ t.device.createBuffer({
+ size: 16,
+ usage: kBufferUsageInfo[usageType] | kBufferUsageCopyInfo[usageCopy],
+ mappedAtCreation,
+ });
+ }, awaitLost);
+ });
+
+g.test('createTexture,2d,uncompressed_format')
+ .desc(
+ `
+Tests creating 2d uncompressed textures on destroyed device. Tests valid combinations of:
+ - Various uncompressed texture formats
+ - Various usages
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kRegularTextureFormats)
+
+ .combine('usageType', kTextureUsageType)
+ .combine('usageCopy', kTextureUsageCopy)
+ .combine('awaitLost', [true, false])
+ .filter(({ format, usageType }) => {
+ const info = kTextureFormatInfo[format];
+ return !(
+ (!info.colorRender && usageType === 'render') ||
+ (!info.color.storage && usageType === 'storage')
+ );
+ })
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ })
+ .fn(async t => {
+ const { awaitLost, format, usageType, usageCopy } = t.params;
+ const { blockWidth, blockHeight } = kTextureFormatInfo[format];
+ await t.executeAfterDestroy(() => {
+ t.device.createTexture({
+ size: { width: blockWidth, height: blockHeight },
+ usage: kTextureUsageTypeInfo[usageType] | kTextureUsageCopyInfo[usageCopy],
+ format,
+ });
+ }, awaitLost);
+ });
+
+g.test('createTexture,2d,compressed_format')
+ .desc(
+ `
+Tests creating 2d compressed textures on destroyed device. Tests valid combinations of:
+ - Various compressed texture formats
+ - Various usages
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kCompressedTextureFormats)
+
+ .combine('usageType', kTextureUsageType)
+ .combine('usageCopy', kTextureUsageCopy)
+ .combine('awaitLost', [true, false])
+ .filter(({ format, usageType }) => {
+ const info = kTextureFormatInfo[format];
+ return !(
+ (!info.colorRender && usageType === 'render') ||
+ (!info.color.storage && usageType === 'storage')
+ );
+ })
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ t.selectDeviceOrSkipTestCase(kTextureFormatInfo[format].feature);
+ })
+ .fn(async t => {
+ const { awaitLost, format, usageType, usageCopy } = t.params;
+ const { blockWidth, blockHeight } = kTextureFormatInfo[format];
+ await t.executeAfterDestroy(() => {
+ t.device.createTexture({
+ size: { width: blockWidth, height: blockHeight },
+ usage: kTextureUsageTypeInfo[usageType] | kTextureUsageCopyInfo[usageCopy],
+ format,
+ });
+ }, awaitLost);
+ });
+
+g.test('createView,2d,uncompressed_format')
+ .desc(
+ `
+Tests creating texture views on 2d uncompressed textures from destroyed device. Tests valid combinations of:
+ - Various uncompressed texture formats
+ - Various usages
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kRegularTextureFormats)
+
+ .combine('usageType', kTextureUsageType)
+ .combine('usageCopy', kTextureUsageCopy)
+ .combine('awaitLost', [true, false])
+ .filter(({ format, usageType }) => {
+ const info = kTextureFormatInfo[format];
+ return !(
+ (!info.colorRender && usageType === 'render') ||
+ (!info.color.storage && usageType === 'storage')
+ );
+ })
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ })
+ .fn(async t => {
+ const { awaitLost, format, usageType, usageCopy } = t.params;
+ const { blockWidth, blockHeight } = kTextureFormatInfo[format];
+ const texture = t.device.createTexture({
+ size: { width: blockWidth, height: blockHeight },
+ usage: kTextureUsageTypeInfo[usageType] | kTextureUsageCopyInfo[usageCopy],
+ format,
+ });
+ await t.executeAfterDestroy(() => {
+ texture.createView({ format });
+ }, awaitLost);
+ });
+
+g.test('createView,2d,compressed_format')
+ .desc(
+ `
+Tests creating texture views on 2d compressed textures from destroyed device. Tests valid combinations of:
+ - Various compressed texture formats
+ - Various usages
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kCompressedTextureFormats)
+
+ .combine('usageType', kTextureUsageType)
+ .combine('usageCopy', kTextureUsageCopy)
+ .combine('awaitLost', [true, false])
+ .filter(({ format, usageType }) => {
+ const info = kTextureFormatInfo[format];
+ return !(
+ (!info.colorRender && usageType === 'render') ||
+ (!info.color.storage && usageType === 'storage')
+ );
+ })
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ t.selectDeviceOrSkipTestCase(kTextureFormatInfo[format].feature);
+ })
+ .fn(async t => {
+ const { awaitLost, format, usageType, usageCopy } = t.params;
+ const { blockWidth, blockHeight } = kTextureFormatInfo[format];
+ const texture = t.device.createTexture({
+ size: { width: blockWidth, height: blockHeight },
+ usage: kTextureUsageTypeInfo[usageType] | kTextureUsageCopyInfo[usageCopy],
+ format,
+ });
+ await t.executeAfterDestroy(() => {
+ texture.createView({ format });
+ }, awaitLost);
+ });
+
+g.test('createSampler')
+ .desc(
+ `
+Tests creating samplers on destroyed device.
+ `
+ )
+ .params(u => u.combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { awaitLost } = t.params;
+ await t.executeAfterDestroy(() => {
+ t.device.createSampler();
+ }, awaitLost);
+ });
+
+g.test('createBindGroupLayout')
+ .desc(
+ `
+Tests creating bind group layouts on destroyed device. Tests valid combinations of:
+ - Various valid binding entries
+ - Maximum set of visibility for each binding entry
+ `
+ )
+ .params(u => u.combine('entry', allBindingEntries(false)).combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { awaitLost, entry } = t.params;
+ const visibility = bindingTypeInfo(entry).validStages;
+ await t.executeAfterDestroy(() => {
+ t.device.createBindGroupLayout({
+ entries: [{ binding: 0, visibility, ...entry }],
+ });
+ }, awaitLost);
+ });
+
+g.test('createBindGroup')
+ .desc(
+ `
+Tests creating bind group on destroyed device. Tests valid combinations of:
+ - Various bound resource types
+ - Various valid binding entries
+ - Maximum set of visibility for each binding entry
+ `
+ )
+ .desc(`A destroyed device should not be able to create any valid bind groups.`)
+ .params(u =>
+ u
+ .combine('resourceType', kBindableResources)
+ .combine('entry', allBindingEntries(false))
+ .filter(({ resourceType, entry }) => {
+ const info = bindingTypeInfo(entry);
+ switch (info.resource) {
+ // Either type of sampler may be bound to a filtering sampler binding.
+ case 'filtSamp':
+ return resourceType === 'filtSamp' || resourceType === 'nonFiltSamp';
+ // But only non-filtering samplers can be used with non-filtering sampler bindings.
+ case 'nonFiltSamp':
+ return resourceType === 'nonFiltSamp';
+ default:
+ return info.resource === resourceType;
+ }
+ })
+
+ .combine('awaitLost', [true, false])
+ )
+ .fn(async t => {
+ const { awaitLost, resourceType, entry } = t.params;
+ const visibility = bindingTypeInfo(entry).validStages;
+ const layout = t.device.createBindGroupLayout({
+ entries: [{ binding: 0, visibility, ...entry }],
+ });
+ const resource = t.getBindingResource(resourceType);
+ await t.executeAfterDestroy(() => {
+ t.device.createBindGroup({ layout, entries: [{ binding: 0, resource }] });
+ }, awaitLost);
+ });
+
+g.test('createPipelineLayout')
+ .desc(
+ `
+Tests creating pipeline layouts on destroyed device. Tests valid combinations of:
+ - Various bind groups with valid binding entries
+ - Maximum set of visibility for each binding entry
+ `
+ )
+ .params(u => u.combine('entry', allBindingEntries(false)).combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { awaitLost, entry } = t.params;
+ const visibility = bindingTypeInfo(entry).validStages;
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [{ binding: 0, visibility, ...entry }],
+ });
+ await t.executeAfterDestroy(() => {
+ t.device.createPipelineLayout({
+ bindGroupLayouts: [bindGroupLayout],
+ });
+ }, awaitLost);
+ });
+
+g.test('createShaderModule')
+ .desc(
+ `
+Tests creating shader modules on destroyed device.
+ - Tests all shader stages: vertex, fragment, compute
+ `
+ )
+ .params(u => u.combine('stage', kShaderStageKeys).combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { awaitLost, stage } = t.params;
+ await t.executeAfterDestroy(() => {
+ t.device.createShaderModule({ code: t.getNoOpShaderCode(stage) });
+ }, awaitLost);
+ });
+
+g.test('createComputePipeline')
+ .desc(
+ `
+Tests creating compute pipeline on destroyed device.
+ - Tests with a valid no-op compute shader
+ `
+ )
+ .params(u => u.combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { awaitLost } = t.params;
+ const cShader = t.device.createShaderModule({ code: t.getNoOpShaderCode('COMPUTE') });
+ await t.executeAfterDestroy(() => {
+ t.device.createComputePipeline({
+ layout: 'auto',
+ compute: { module: cShader, entryPoint: 'main' },
+ });
+ }, awaitLost);
+ });
+
+g.test('createRenderPipeline')
+ .desc(
+ `
+Tests creating render pipeline on destroyed device.
+ - Tests with valid no-op vertex and fragment shaders
+ `
+ )
+ .params(u => u.combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { awaitLost } = t.params;
+ const vShader = t.device.createShaderModule({ code: t.getNoOpShaderCode('VERTEX') });
+ const fShader = t.device.createShaderModule({ code: t.getNoOpShaderCode('FRAGMENT') });
+ await t.executeAfterDestroy(() => {
+ t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: { module: vShader, entryPoint: 'main' },
+ fragment: {
+ module: fShader,
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm', writeMask: 0 }],
+ },
+ });
+ }, awaitLost);
+ });
+
+g.test('createComputePipelineAsync')
+ .desc(
+ `
+Tests creating a pipeline asynchronously while destroying the device and on a destroyed device
+- valid={true, false}, use an invalid or valid pipeline descriptor
+- awaitLost={true, false}, check results before/after waiting for the device lost promise
+ `
+ )
+ .params(u => u.combine('valid', [true, false]).combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { valid, awaitLost } = t.params;
+ const cShader = t.device.createShaderModule({ code: t.getNoOpShaderCode('COMPUTE') });
+ const fn = () =>
+ t.device.createComputePipelineAsync({
+ layout: 'auto',
+ compute: { module: cShader, entryPoint: valid ? 'main' : 'does_not_exist' },
+ });
+
+ // Kick off async creation
+ const p = fn();
+
+ // Track whether or not the device is lost.
+ let isLost = false;
+ void t.device.lost.then(() => {
+ isLost = true;
+ });
+
+ if (valid) {
+ // The async creation should resolve successfully.
+ t.shouldResolve(
+ (async () => {
+ const pipeline = await p;
+ assert(pipeline instanceof GPUComputePipeline, 'Pipeline was not a GPUComputePipeline');
+ })()
+ );
+ } else {
+ // The async creation should resolve successfully if the device is lost.
+ // If the device is not lost, it should see a validation error.
+ // Note: this could be a race!
+ t.shouldResolve(
+ p.then(
+ pipeline => {
+ assert(
+ isLost,
+ 'Invalid async creation should "succeed" if the device is already lost.'
+ );
+ assert(pipeline instanceof GPUComputePipeline, 'Pipeline was not a GPUComputePipeline');
+ },
+ err => {
+ assert(
+ !isLost,
+ 'Invalid async creation should only fail if the device is not yet lost.'
+ );
+ assert(err instanceof GPUPipelineError, 'Error was not a GPUPipelineError');
+ assert(err.reason === 'validation', 'Expected validation error');
+ }
+ )
+ );
+ }
+
+ // Destroy the device, and expect it to be lost.
+ t.expectDeviceLost('destroyed');
+ t.device.destroy();
+ if (awaitLost) {
+ const lostInfo = await t.device.lost;
+ t.expect(lostInfo.reason === 'destroyed');
+ }
+
+ // After device destroy, creation should still resolve successfully.
+ t.shouldResolve(
+ (async () => {
+ const pipeline = await fn();
+ assert(pipeline instanceof GPUComputePipeline, 'Pipeline was not a GPUComputePipeline');
+ })()
+ );
+ });
+
+g.test('createRenderPipelineAsync')
+ .desc(
+ `
+Tests creating a pipeline asynchronously while destroying the device and on a destroyed device
+- valid={true, false}, use an invalid or valid pipeline descriptor
+- awaitLost={true, false}, check results before/after waiting for the device lost promise
+ `
+ )
+ .params(u => u.combine('valid', [true, false]).combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { valid, awaitLost } = t.params;
+ const vShader = t.device.createShaderModule({ code: t.getNoOpShaderCode('VERTEX') });
+ const fShader = t.device.createShaderModule({ code: t.getNoOpShaderCode('FRAGMENT') });
+ const fn = () =>
+ t.device.createRenderPipelineAsync({
+ layout: 'auto',
+ vertex: { module: vShader, entryPoint: 'main' },
+ fragment: {
+ module: fShader,
+ entryPoint: valid ? 'main' : 'does_not_exist',
+ targets: [{ format: 'rgba8unorm', writeMask: 0 }],
+ },
+ });
+
+ // Kick off async creation
+ const p = fn();
+
+ // Track whether or not the device is lost.
+ let isLost = false;
+ void t.device.lost.then(() => {
+ isLost = true;
+ });
+
+ if (valid) {
+ // The async creation should resolve successfully.
+ t.shouldResolve(
+ (async () => {
+ const pipeline = await p;
+ assert(pipeline instanceof GPURenderPipeline, 'Pipeline was not a GPURenderPipeline');
+ })()
+ );
+ } else {
+ // The async creation should resolve successfully if the device is lost.
+ // If the device is not lost, it should see a validation error.
+ // Note: this could be a race!
+ t.shouldResolve(
+ p.then(
+ pipeline => {
+ assert(
+ isLost,
+ 'Invalid async creation should "succeed" if the device is already lost.'
+ );
+ assert(pipeline instanceof GPURenderPipeline, 'Pipeline was not a GPURenderPipeline');
+ },
+ err => {
+ assert(
+ !isLost,
+ 'Invalid async creation should only fail if the device is not yet lost.'
+ );
+ assert(err instanceof GPUPipelineError, 'Error was not a GPUPipelineError');
+ assert(err.reason === 'validation', 'Expected validation error');
+ }
+ )
+ );
+ }
+
+ // Destroy the device, and expect it to be lost.
+ t.expectDeviceLost('destroyed');
+ t.device.destroy();
+ if (awaitLost) {
+ const lostInfo = await t.device.lost;
+ t.expect(lostInfo.reason === 'destroyed');
+ }
+
+ // After device destroy, creation should still resolve successfully.
+ t.shouldResolve(
+ (async () => {
+ const pipeline = await fn();
+ assert(pipeline instanceof GPURenderPipeline, 'Pipeline was not a GPURenderPipeline');
+ })()
+ );
+ });
+
+g.test('createCommandEncoder')
+ .desc(
+ `
+Tests creating command encoders on destroyed device.
+ `
+ )
+ .params(u => u.combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { awaitLost } = t.params;
+ await t.executeAfterDestroy(() => {
+ t.device.createCommandEncoder();
+ }, awaitLost);
+ });
+
+g.test('createRenderBundleEncoder')
+ .desc(
+ `
+Tests creating render bundle encoders on destroyed device.
+ - Tests various renderable texture color formats
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kRenderableColorTextureFormats)
+
+ .combine('awaitLost', [true, false])
+ )
+ .fn(async t => {
+ const { awaitLost, format } = t.params;
+ await t.executeAfterDestroy(() => {
+ t.device.createRenderBundleEncoder({ colorFormats: [format] });
+ }, awaitLost);
+ });
+
+g.test('createQuerySet')
+ .desc(
+ `
+Tests creating query sets on destroyed device.
+ - Tests various query set types
+ `
+ )
+ .params(u => u.combine('type', kQueryTypes).combine('awaitLost', [true, false]))
+ .beforeAllSubcases(t => {
+ const { type } = t.params;
+ t.selectDeviceForQueryTypeOrSkipTestCase(type);
+ })
+ .fn(async t => {
+ const { awaitLost, type } = t.params;
+ await t.executeAfterDestroy(() => {
+ t.device.createQuerySet({ type, count: 4 });
+ }, awaitLost);
+ });
+
+g.test('importExternalTexture')
+ .desc(
+ `
+Tests import external texture on destroyed device. Tests valid combinations of:
+ - Various valid source type
+ `
+ )
+ .params(u =>
+ u
+ .combine('sourceType', ['VideoElement', 'VideoFrame'] as const)
+
+ .combine('awaitLost', [true, false])
+ )
+ .fn(async t => {
+ const { awaitLost, sourceType } = t.params;
+
+ const videoElement = getVideoElement(t, 'four-colors-vp9-bt601.webm');
+ if (!('requestVideoFrameCallback' in videoElement)) {
+ t.skip('HTMLVideoElement.requestVideoFrameCallback is not supported');
+ }
+
+ let source: HTMLVideoElement | VideoFrame;
+ await startPlayingAndWaitForVideo(videoElement, async () => {
+ source =
+ sourceType === 'VideoFrame'
+ ? await getVideoFrameFromVideoElement(t, videoElement)
+ : videoElement;
+
+ await t.executeAfterDestroy(() => {
+ t.device.createBindGroup({
+ layout: t.device.createBindGroupLayout({
+ entries: [{ binding: 0, visibility: GPUShaderStage.FRAGMENT, externalTexture: {} }],
+ }),
+ entries: [
+ {
+ binding: 0,
+ resource: t.device.importExternalTexture({
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ source: source as any,
+ }),
+ },
+ ],
+ });
+ }, awaitLost);
+ });
+ });
+
+g.test('command,copyBufferToBuffer')
+ .desc(
+ `
+Tests copyBufferToBuffer command with various uncompressed formats on destroyed device.
+ `
+ )
+ .params(u => u.combine('stage', kCommandValidationStages).combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { stage, awaitLost } = t.params;
+ const kBufferSize = 16;
+ const src = t.device.createBuffer({
+ size: kBufferSize,
+ usage: GPUBufferUsage.COPY_SRC,
+ });
+ const dst = t.device.createBuffer({
+ size: kBufferSize,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+ await t.executeCommandsAfterDestroy(stage, awaitLost, 'non-pass', maker => {
+ maker.encoder.copyBufferToBuffer(src, 0, dst, 0, kBufferSize);
+ return maker;
+ });
+ });
+
+g.test('command,copyBufferToTexture')
+ .desc(
+ `
+Tests copyBufferToTexture command on destroyed device.
+ - Tests finishing encoding on destroyed device
+ - Tests submitting command on destroyed device
+ `
+ )
+ .params(u => u.combine('stage', kCommandValidationStages).combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { stage, awaitLost } = t.params;
+ const format = 'rgba32uint';
+ const {
+ color: { bytes: bytesPerBlock },
+ blockWidth,
+ blockHeight,
+ } = kTextureFormatInfo[format];
+ const src = {
+ buffer: t.device.createBuffer({
+ size: bytesPerBlock,
+ usage: GPUBufferUsage.COPY_SRC,
+ }),
+ };
+ const dst = {
+ texture: t.device.createTexture({
+ size: { width: blockWidth, height: blockHeight },
+ usage: GPUTextureUsage.COPY_DST,
+ format,
+ }),
+ };
+ const copySize = { width: blockWidth, height: blockHeight };
+ await t.executeCommandsAfterDestroy(stage, awaitLost, 'non-pass', maker => {
+ maker.encoder.copyBufferToTexture(src, dst, copySize);
+ return maker;
+ });
+ });
+
+g.test('command,copyTextureToBuffer')
+ .desc(
+ `
+Tests copyTextureToBuffer command on destroyed device.
+ - Tests finishing encoding on destroyed device
+ - Tests submitting command on destroyed device
+ `
+ )
+ .params(u => u.combine('stage', kCommandValidationStages).combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { stage, awaitLost } = t.params;
+ const format = 'rgba32uint';
+ const {
+ color: { bytes: bytesPerBlock },
+ blockWidth,
+ blockHeight,
+ } = kTextureFormatInfo[format];
+ const src = {
+ texture: t.device.createTexture({
+ size: { width: blockWidth, height: blockHeight },
+ usage: GPUTextureUsage.COPY_SRC,
+ format,
+ }),
+ };
+ const dst = {
+ buffer: t.device.createBuffer({
+ size: bytesPerBlock,
+ usage: GPUBufferUsage.COPY_DST,
+ }),
+ };
+ const copySize = { width: blockWidth, height: blockHeight };
+ await t.executeCommandsAfterDestroy(stage, awaitLost, 'non-pass', maker => {
+ maker.encoder.copyTextureToBuffer(src, dst, copySize);
+ return maker;
+ });
+ });
+
+g.test('command,copyTextureToTexture')
+ .desc(
+ `
+Tests copyTextureToTexture command on destroyed device.
+ - Tests finishing encoding on destroyed device
+ - Tests submitting command on destroyed device
+ `
+ )
+ .params(u => u.combine('stage', kCommandValidationStages).combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { stage, awaitLost } = t.params;
+ const format = 'rgba32uint';
+ const { blockWidth, blockHeight } = kTextureFormatInfo[format];
+ const src = {
+ texture: t.device.createTexture({
+ size: { width: blockWidth, height: blockHeight },
+ usage: GPUTextureUsage.COPY_SRC,
+ format,
+ }),
+ };
+ const dst = {
+ texture: t.device.createTexture({
+ size: { width: blockWidth, height: blockHeight },
+ usage: GPUBufferUsage.COPY_DST,
+ format,
+ }),
+ };
+ const copySize = { width: blockWidth, height: blockHeight };
+ await t.executeCommandsAfterDestroy(stage, awaitLost, 'non-pass', maker => {
+ maker.encoder.copyTextureToTexture(src, dst, copySize);
+ return maker;
+ });
+ });
+
+g.test('command,clearBuffer')
+ .desc(
+ `
+Tests encoding and finishing a clearBuffer command on destroyed device.
+ - Tests finishing encoding on destroyed device
+ - Tests submitting command on destroyed device
+ `
+ )
+ .params(u => u.combine('stage', kCommandValidationStages).combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { stage, awaitLost } = t.params;
+ const kBufferSize = 16;
+ const buffer = t.device.createBuffer({
+ size: kBufferSize,
+ usage: GPUBufferUsage.COPY_SRC,
+ });
+ await t.executeCommandsAfterDestroy(stage, awaitLost, 'non-pass', maker => {
+ maker.encoder.clearBuffer(buffer, 0, kBufferSize);
+ return maker;
+ });
+ });
+
+g.test('command,writeTimestamp')
+ .desc(
+ `
+Tests encoding and finishing a writeTimestamp command on destroyed device.
+ - Tests finishing encoding on destroyed device
+ - Tests submitting command on destroyed device
+ `
+ )
+ .params(u =>
+ u
+ .combine('type', kQueryTypes)
+
+ .combine('stage', kCommandValidationStages)
+ .combine('awaitLost', [true, false])
+ )
+ .beforeAllSubcases(t => {
+ const { type } = t.params;
+
+ // writeTimestamp is only available for devices that enable the 'timestamp-query' feature.
+ const queryTypes: GPUQueryType[] = ['timestamp'];
+ if (type !== 'timestamp') {
+ queryTypes.push(type);
+ }
+
+ t.selectDeviceForQueryTypeOrSkipTestCase(queryTypes);
+ })
+ .fn(async t => {
+ const { type, stage, awaitLost } = t.params;
+ const querySet = t.device.createQuerySet({ type, count: 2 });
+ await t.executeCommandsAfterDestroy(stage, awaitLost, 'non-pass', maker => {
+ maker.encoder.writeTimestamp(querySet, 0);
+ return maker;
+ });
+ });
+
+g.test('command,resolveQuerySet')
+ .desc(
+ `
+Tests encoding and finishing a resolveQuerySet command on destroyed device.
+ - Tests finishing encoding on destroyed device
+ - Tests submitting command on destroyed device
+ `
+ )
+ .params(u => u.combine('stage', kCommandValidationStages).combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { stage, awaitLost } = t.params;
+ const kQueryCount = 2;
+ const querySet = t.createQuerySetWithState('valid');
+ const destination = t.createBufferWithState('valid', {
+ size: kQueryCount * 8,
+ usage: GPUBufferUsage.QUERY_RESOLVE,
+ });
+ await t.executeCommandsAfterDestroy(stage, awaitLost, 'non-pass', maker => {
+ maker.encoder.resolveQuerySet(querySet, 0, 1, destination, 0);
+ return maker;
+ });
+ });
+
+g.test('command,computePass,dispatch')
+ .desc(
+ `
+Tests encoding and dispatching a simple valid compute pass on destroyed device.
+ - Binds valid pipeline and bindgroups, then dispatches
+ - Tests finishing encoding on destroyed device
+ - Tests submitting command on destroyed device
+ `
+ )
+ .params(u => u.combine('stage', kCommandValidationStages).combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { stage, awaitLost } = t.params;
+ const cShader = t.device.createShaderModule({ code: t.getNoOpShaderCode('COMPUTE') });
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: { module: cShader, entryPoint: 'main' },
+ });
+ await t.executeCommandsAfterDestroy(stage, awaitLost, 'compute pass', maker => {
+ maker.encoder.setPipeline(pipeline);
+ maker.encoder.dispatchWorkgroups(1);
+ return maker;
+ });
+ });
+
+g.test('command,renderPass,draw')
+ .desc(
+ `
+Tests encoding and finishing a simple valid render pass on destroyed device.
+ - Binds valid pipeline and bindgroups, then draws
+ - Tests finishing encoding on destroyed device
+ - Tests submitting command on destroyed device
+ `
+ )
+ .params(u => u.combine('stage', kCommandValidationStages).combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { stage, awaitLost } = t.params;
+ const vShader = t.device.createShaderModule({ code: t.getNoOpShaderCode('VERTEX') });
+ const fShader = t.device.createShaderModule({ code: t.getNoOpShaderCode('FRAGMENT') });
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: { module: vShader, entryPoint: 'main' },
+ fragment: {
+ module: fShader,
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm', writeMask: 0 }],
+ },
+ });
+ await t.executeCommandsAfterDestroy(stage, awaitLost, 'render pass', maker => {
+ maker.encoder.setPipeline(pipeline);
+ maker.encoder.draw(0);
+ return maker;
+ });
+ });
+
+g.test('command,renderPass,renderBundle')
+ .desc(
+ `
+Tests encoding and drawing a render pass including a render bundle on destroyed device.
+ - Binds valid pipeline and bindgroups, executes render bundle, then draws
+ - Tests finishing encoding on destroyed device
+ - Tests submitting command on destroyed device
+ `
+ )
+ .params(u => u.combine('stage', kCommandValidationStages).combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { stage, awaitLost } = t.params;
+ const vShader = t.device.createShaderModule({ code: t.getNoOpShaderCode('VERTEX') });
+ const fShader = t.device.createShaderModule({ code: t.getNoOpShaderCode('FRAGMENT') });
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: { module: vShader, entryPoint: 'main' },
+ fragment: {
+ module: fShader,
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm', writeMask: 0 }],
+ },
+ });
+ await t.executeCommandsAfterDestroy(stage, awaitLost, 'render bundle', maker => {
+ maker.encoder.setPipeline(pipeline);
+ maker.encoder.draw(0);
+ return maker;
+ });
+ });
+
+g.test('queue,writeBuffer')
+ .desc(
+ `
+Tests writeBuffer on queue on destroyed device.
+ `
+ )
+ .params(u => u.combine('numElements', [4, 8, 16]).combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { numElements, awaitLost } = t.params;
+ const buffer = t.device.createBuffer({
+ size: numElements,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+ const data = new Uint8Array(numElements);
+ await t.executeAfterDestroy(() => {
+ t.device.queue.writeBuffer(buffer, 0, data);
+ }, awaitLost);
+ });
+
+g.test('queue,writeTexture,2d,uncompressed_format')
+ .desc(
+ `
+Tests writeTexture on queue on destroyed device with uncompressed formats.
+ `
+ )
+ .params(u => u.combine('format', kRegularTextureFormats).combine('awaitLost', [true, false]))
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ })
+ .fn(async t => {
+ const { format, awaitLost } = t.params;
+ const {
+ blockWidth,
+ blockHeight,
+ color: { bytes: bytesPerBlock },
+ } = kTextureFormatInfo[format];
+ const data = new Uint8Array(bytesPerBlock);
+ const texture = t.device.createTexture({
+ size: { width: blockWidth, height: blockHeight },
+ usage: GPUTextureUsage.COPY_DST,
+ format,
+ });
+ await t.executeAfterDestroy(() => {
+ t.device.queue.writeTexture(
+ { texture },
+ data,
+ {},
+ { width: blockWidth, height: blockHeight }
+ );
+ }, awaitLost);
+ });
+
+g.test('queue,writeTexture,2d,compressed_format')
+ .desc(
+ `
+Tests writeTexture on queue on destroyed device with compressed formats.
+ `
+ )
+ .params(u =>
+ u
+ .combine('format', kCompressedTextureFormats)
+
+ .combine('awaitLost', [true, false])
+ )
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ t.selectDeviceOrSkipTestCase(kTextureFormatInfo[format].feature);
+ })
+ .fn(async t => {
+ const { format, awaitLost } = t.params;
+ const {
+ blockWidth,
+ blockHeight,
+ color: { bytes: bytesPerBlock },
+ } = kTextureFormatInfo[format];
+ const data = new Uint8Array(bytesPerBlock);
+ const texture = t.device.createTexture({
+ size: { width: blockWidth, height: blockHeight },
+ usage: GPUTextureUsage.COPY_DST,
+ format,
+ });
+ await t.executeAfterDestroy(() => {
+ t.device.queue.writeTexture(
+ { texture },
+ data,
+ {},
+ { width: blockWidth, height: blockHeight }
+ );
+ }, awaitLost);
+ });
+
+g.test('queue,copyExternalImageToTexture,canvas')
+ .desc(
+ `
+Tests copyExternalImageToTexture from canvas on queue on destroyed device.
+ `
+ )
+ .params(u =>
+ u
+ .combine('canvasType', kAllCanvasTypes)
+ .combine('contextType', kValidCanvasContextIds)
+
+ .combine('awaitLost', [true, false])
+ )
+ .fn(async t => {
+ const { canvasType, contextType, awaitLost } = t.params;
+ const canvas = createCanvas(t, canvasType, 1, 1);
+ const texture = t.device.createTexture({
+ size: { width: 1, height: 1 },
+ format: 'bgra8unorm',
+ usage: GPUTextureUsage.COPY_DST,
+ });
+
+ const ctx = (canvas as unknown as HTMLCanvasElement).getContext(contextType);
+ if (ctx === null) {
+ t.skip('Failed to get context for canvas element');
+ return;
+ }
+ t.tryTrackForCleanup(ctx);
+
+ await t.executeAfterDestroy(() => {
+ t.device.queue.copyExternalImageToTexture(
+ { source: canvas },
+ { texture },
+ { width: 1, height: 1 }
+ );
+ }, awaitLost);
+ });
+
+g.test('queue,copyExternalImageToTexture,imageBitmap')
+ .desc(
+ `
+Tests copyExternalImageToTexture from canvas on queue on destroyed device.
+ `
+ )
+ .params(u => u.combine('awaitLost', [true, false]))
+ .fn(async t => {
+ const { awaitLost } = t.params;
+ if (typeof createImageBitmap === 'undefined') {
+ t.skip('Creating ImageBitmaps is not supported.');
+ }
+ const imageBitmap = await createImageBitmap(new ImageData(new Uint8ClampedArray(4), 1, 1));
+
+ const texture = t.device.createTexture({
+ size: { width: 1, height: 1 },
+ format: 'bgra8unorm',
+ usage: GPUTextureUsage.COPY_DST,
+ });
+
+ await t.executeAfterDestroy(() => {
+ t.device.queue.copyExternalImageToTexture(
+ { source: imageBitmap },
+ { texture },
+ { width: 1, height: 1 }
+ );
+ }, awaitLost);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/texture/bgra8unorm_storage.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/texture/bgra8unorm_storage.spec.ts
new file mode 100644
index 0000000000..80872fd5d3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/texture/bgra8unorm_storage.spec.ts
@@ -0,0 +1,205 @@
+export const description = `
+Tests for capabilities added by bgra8unorm-storage flag.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { assert } from '../../../../common/util/util.js';
+import { kTextureUsages } from '../../../capability_info.js';
+import { GPUConst } from '../../../constants.js';
+import { kAllCanvasTypes, createCanvas } from '../../../util/create_elements.js';
+import { ValidationTest } from '../validation_test.js';
+
+class BGRA8UnormStorageValidationTests extends ValidationTest {
+ testCreateShaderModuleWithBGRA8UnormStorage(
+ shaderType: 'fragment' | 'compute',
+ success: boolean
+ ): void {
+ let shaderDeclaration = '';
+ switch (shaderType) {
+ case 'fragment':
+ shaderDeclaration = '@fragment';
+ break;
+ case 'compute':
+ shaderDeclaration = '@compute @workgroup_size(1)';
+ break;
+ }
+ this.expectValidationError(() => {
+ this.device.createShaderModule({
+ code: `
+ @group(0) @binding(1) var outputTex: texture_storage_2d<bgra8unorm, write>;
+ ${shaderDeclaration} fn main() {
+ textureStore(outputTex, vec2<i32>(), vec4<f32>());
+ }
+ `,
+ });
+ }, !success);
+ }
+}
+
+export const g = makeTestGroup(BGRA8UnormStorageValidationTests);
+
+g.test('create_texture')
+ .desc(
+ `
+Test that it is valid to create bgra8unorm texture with STORAGE usage iff the feature
+bgra8unorm-storage is enabled. Note, the createTexture test suite covers the validation cases where
+this feature is not enabled, which are skipped here.
+`
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('bgra8unorm-storage');
+ })
+ .fn(t => {
+ const descriptor = {
+ size: [1, 1, 1],
+ format: 'bgra8unorm' as const,
+ usage: GPUConst.TextureUsage.STORAGE,
+ };
+ t.device.createTexture(descriptor);
+ });
+
+g.test('create_bind_group_layout')
+ .desc(
+ `
+Test that it is valid to create GPUBindGroupLayout that uses bgra8unorm as storage texture format
+iff the feature bgra8unorm-storage is enabled. Note, the createBindGroupLayout test suite covers the
+validation cases where this feature is not enabled, which are skipped here.
+`
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('bgra8unorm-storage');
+ })
+ .fn(t => {
+ t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ storageTexture: { format: 'bgra8unorm' },
+ },
+ ],
+ });
+ });
+
+g.test('create_shader_module_with_bgra8unorm_storage')
+ .desc(
+ `
+Test that it is valid to declare the format of a storage texture as bgra8unorm in a shader module if
+the feature bgra8unorm-storage is enabled.
+`
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('bgra8unorm-storage');
+ })
+ .params(u => u.combine('shaderType', ['fragment', 'compute'] as const))
+ .fn(t => {
+ const { shaderType } = t.params;
+
+ t.testCreateShaderModuleWithBGRA8UnormStorage(shaderType, true);
+ });
+
+g.test('create_shader_module_without_bgra8unorm_storage')
+ .desc(
+ `
+Test that it is invalid to declare the format of a storage texture as bgra8unorm in a shader module
+if the feature bgra8unorm-storage is not enabled.
+`
+ )
+ .params(u => u.combine('shaderType', ['fragment', 'compute'] as const))
+ .fn(t => {
+ const { shaderType } = t.params;
+
+ t.testCreateShaderModuleWithBGRA8UnormStorage(shaderType, false);
+ });
+
+g.test('configure_storage_usage_on_canvas_context_without_bgra8unorm_storage')
+ .desc(
+ `
+Test that it is invalid to configure a GPUCanvasContext to 'GPUStorageBinding' usage with
+'bgra8unorm' format on a GPUDevice with 'bgra8unorm-storage' disabled.
+`
+ )
+ .params(u =>
+ u
+ .combine('canvasType', kAllCanvasTypes)
+ .beginSubcases()
+ .expand('usage', () => {
+ const usageSet = new Set<number>();
+ for (const usage0 of kTextureUsages) {
+ for (const usage1 of kTextureUsages) {
+ usageSet.add(usage0 | usage1);
+ }
+ }
+ return usageSet;
+ })
+ )
+ .fn(t => {
+ const { canvasType, usage } = t.params;
+ const canvas = createCanvas(t, canvasType, 1, 1);
+ const ctx = canvas.getContext('webgpu');
+ assert(ctx instanceof GPUCanvasContext, 'Failed to get WebGPU context from canvas');
+
+ const requiredStorageBinding = !!(usage & GPUTextureUsage.STORAGE_BINDING);
+ t.expectValidationError(() => {
+ ctx.configure({
+ device: t.device,
+ format: 'bgra8unorm',
+ usage,
+ });
+ }, requiredStorageBinding);
+ });
+
+g.test('configure_storage_usage_on_canvas_context_with_bgra8unorm_storage')
+ .desc(
+ `
+Test that it is valid to configure a GPUCanvasContext with GPUStorageBinding usage and a GPUDevice
+with 'bgra8unorm-storage' enabled.
+`
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('bgra8unorm-storage');
+ })
+ .params(u =>
+ u
+ .combine('canvasType', kAllCanvasTypes)
+ .beginSubcases()
+ .expand('usage', () => {
+ const usageSet = new Set<number>();
+ for (const usage of kTextureUsages) {
+ usageSet.add(usage | GPUConst.TextureUsage.STORAGE_BINDING);
+ }
+ return usageSet;
+ })
+ )
+ .fn(t => {
+ const { canvasType, usage } = t.params;
+ const canvas = createCanvas(t, canvasType, 1, 1);
+ const ctx = canvas.getContext('webgpu');
+ assert(ctx instanceof GPUCanvasContext, 'Failed to get WebGPU context from canvas');
+
+ ctx.configure({
+ device: t.device,
+ format: 'bgra8unorm',
+ usage,
+ });
+
+ const currentTexture = ctx.getCurrentTexture();
+ const bindGroupLayout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ storageTexture: { access: 'write-only', format: currentTexture.format },
+ },
+ ],
+ });
+ t.device.createBindGroup({
+ layout: bindGroupLayout,
+ entries: [
+ {
+ binding: 0,
+ resource: currentTexture.createView(),
+ },
+ ],
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/texture/destroy.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/texture/destroy.spec.ts
new file mode 100644
index 0000000000..10e6e89448
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/texture/destroy.spec.ts
@@ -0,0 +1,139 @@
+export const description = `
+Destroying a texture more than once is allowed.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { kTextureAspects } from '../../../capability_info.js';
+import { kTextureFormatInfo } from '../../../format_info.js';
+import { ValidationTest } from '../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('base')
+ .desc(`Test that it is valid to destroy a texture.`)
+ .fn(t => {
+ const texture = t.getSampledTexture();
+ texture.destroy();
+ });
+
+g.test('twice')
+ .desc(`Test that it is valid to destroy a destroyed texture.`)
+ .fn(t => {
+ const texture = t.getSampledTexture();
+ texture.destroy();
+ texture.destroy();
+ });
+
+g.test('invalid_texture')
+ .desc('Test that invalid textures may be destroyed without generating validation errors.')
+ .fn(async t => {
+ t.device.pushErrorScope('validation');
+
+ const invalidTexture = t.device.createTexture({
+ size: [t.device.limits.maxTextureDimension2D + 1, 1, 1],
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ });
+
+ // Expect error because it's invalid.
+ const error = await t.device.popErrorScope();
+ t.expect(!!error);
+
+ // This line should not generate an error
+ invalidTexture.destroy();
+ });
+
+g.test('submit_a_destroyed_texture_as_attachment')
+ .desc(
+ `
+Test that it is invalid to submit with a texture as {color, depth, stencil, depth-stencil} attachment
+that was destroyed {before, after} encoding finishes.
+`
+ )
+ .params(u =>
+ u //
+ .combine('depthStencilTextureAspect', kTextureAspects)
+ .combine('colorTextureState', [
+ 'valid',
+ 'destroyedBeforeEncode',
+ 'destroyedAfterEncode',
+ ] as const)
+ .combine('depthStencilTextureState', [
+ 'valid',
+ 'destroyedBeforeEncode',
+ 'destroyedAfterEncode',
+ ] as const)
+ )
+ .fn(t => {
+ const { colorTextureState, depthStencilTextureAspect, depthStencilTextureState } = t.params;
+
+ const isSubmitSuccess = colorTextureState === 'valid' && depthStencilTextureState === 'valid';
+
+ const colorTextureFormat: GPUTextureFormat = 'rgba32float';
+ const depthStencilTextureFormat: GPUTextureFormat =
+ depthStencilTextureAspect === 'all'
+ ? 'depth24plus-stencil8'
+ : depthStencilTextureAspect === 'depth-only'
+ ? 'depth32float'
+ : 'stencil8';
+
+ const colorTextureDesc: GPUTextureDescriptor = {
+ size: { width: 16, height: 16, depthOrArrayLayers: 1 },
+ format: colorTextureFormat,
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ };
+
+ const depthStencilTextureDesc: GPUTextureDescriptor = {
+ size: { width: 16, height: 16, depthOrArrayLayers: 1 },
+ format: depthStencilTextureFormat,
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ };
+
+ const colorTexture = t.device.createTexture(colorTextureDesc);
+ const depthStencilTexture = t.device.createTexture(depthStencilTextureDesc);
+
+ if (colorTextureState === 'destroyedBeforeEncode') {
+ colorTexture.destroy();
+ }
+ if (depthStencilTextureState === 'destroyedBeforeEncode') {
+ depthStencilTexture.destroy();
+ }
+
+ const commandEncoder = t.device.createCommandEncoder();
+ const depthStencilAttachment: GPURenderPassDepthStencilAttachment = {
+ view: depthStencilTexture.createView({ aspect: depthStencilTextureAspect }),
+ };
+ if (kTextureFormatInfo[depthStencilTextureFormat].depth) {
+ depthStencilAttachment.depthClearValue = 0;
+ depthStencilAttachment.depthLoadOp = 'clear';
+ depthStencilAttachment.depthStoreOp = 'discard';
+ }
+ if (kTextureFormatInfo[depthStencilTextureFormat].stencil) {
+ depthStencilAttachment.stencilClearValue = 0;
+ depthStencilAttachment.stencilLoadOp = 'clear';
+ depthStencilAttachment.stencilStoreOp = 'discard';
+ }
+ const renderPass = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorTexture.createView(),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ depthStencilAttachment,
+ });
+ renderPass.end();
+
+ const cmd = commandEncoder.finish();
+
+ if (colorTextureState === 'destroyedAfterEncode') {
+ colorTexture.destroy();
+ }
+ if (depthStencilTextureState === 'destroyedAfterEncode') {
+ depthStencilTexture.destroy();
+ }
+
+ t.expectValidationError(() => t.queue.submit([cmd]), !isSubmitSuccess);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/texture/float32_filterable.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/texture/float32_filterable.spec.ts
new file mode 100644
index 0000000000..4c2803c8a0
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/texture/float32_filterable.spec.ts
@@ -0,0 +1,58 @@
+export const description = `
+Tests for capabilities added by float32-filterable flag.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { kTextureSampleTypes } from '../../../capability_info.js';
+import { ValidationTest } from '../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+const kFloat32Formats: GPUTextureFormat[] = ['r32float', 'rg32float', 'rgba32float'];
+
+g.test('create_bind_group')
+ .desc(
+ `
+Test that it is valid to bind a float32 texture format to a 'float' sampled texture iff
+float32-filterable is enabled.
+`
+ )
+ .params(u =>
+ u
+ .combine('enabled', [true, false] as const)
+ .beginSubcases()
+ .combine('format', kFloat32Formats)
+ .combine('sampleType', kTextureSampleTypes)
+ )
+ .beforeAllSubcases(t => {
+ if (t.params.enabled) {
+ t.selectDeviceOrSkipTestCase('float32-filterable');
+ }
+ })
+ .fn(t => {
+ const { enabled, format, sampleType } = t.params;
+ const layout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ texture: { sampleType },
+ },
+ ],
+ });
+ const textureDesc = {
+ size: { width: 4, height: 4 },
+ format,
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ };
+ const shouldError = !(
+ (enabled && sampleType === 'float') ||
+ sampleType === 'unfilterable-float'
+ );
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ entries: [{ binding: 0, resource: t.device.createTexture(textureDesc).createView() }],
+ layout,
+ });
+ }, shouldError);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/texture/rg11b10ufloat_renderable.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/texture/rg11b10ufloat_renderable.spec.ts
new file mode 100644
index 0000000000..e0dd38e507
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/texture/rg11b10ufloat_renderable.spec.ts
@@ -0,0 +1,149 @@
+export const description = `
+Tests for capabilities added by rg11b10ufloat-renderable flag.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUConst } from '../../../constants.js';
+import { ValidationTest } from '../validation_test.js';
+
+export const g = makeTestGroup(ValidationTest);
+
+g.test('create_texture')
+ .desc(
+ `
+Test that it is valid to create rg11b10ufloat texture with RENDER_ATTACHMENT usage and/or
+sampleCount > 1, iff rg11b10ufloat-renderable feature is enabled.
+Note, the createTexture tests cover these validation cases where this feature is not enabled.
+`
+ )
+ .params(u => u.combine('sampleCount', [1, 4]))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('rg11b10ufloat-renderable');
+ })
+ .fn(t => {
+ const { sampleCount } = t.params;
+ const descriptor = {
+ size: [1, 1, 1],
+ format: 'rg11b10ufloat' as const,
+ sampleCount,
+ usage: GPUConst.TextureUsage.RENDER_ATTACHMENT,
+ };
+ t.device.createTexture(descriptor);
+ });
+
+g.test('begin_render_pass_single_sampled')
+ .desc(
+ `
+Test that it is valid to begin render pass with rg11b10ufloat texture format
+iff rg11b10ufloat-renderable feature is enabled. Single sampled case.
+`
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('rg11b10ufloat-renderable');
+ })
+ .fn(t => {
+ const texture = t.device.createTexture({
+ size: [1, 1, 1],
+ format: 'rg11b10ufloat',
+ sampleCount: 1,
+ usage: GPUConst.TextureUsage.RENDER_ATTACHMENT,
+ });
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: texture.createView(),
+ clearValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.end();
+ encoder.finish();
+ });
+
+g.test('begin_render_pass_msaa_and_resolve')
+ .desc(
+ `
+Test that it is valid to begin render pass with rg11b10ufloat texture format
+iff rg11b10ufloat-renderable feature is enabled. MSAA and resolve case.
+`
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('rg11b10ufloat-renderable');
+ })
+ .fn(t => {
+ const renderTexture = t.device.createTexture({
+ size: [1, 1, 1],
+ format: 'rg11b10ufloat',
+ sampleCount: 4,
+ usage: GPUConst.TextureUsage.RENDER_ATTACHMENT,
+ });
+ const resolveTexture = t.device.createTexture({
+ size: [1, 1, 1],
+ format: 'rg11b10ufloat',
+ sampleCount: 1,
+ usage: GPUConst.TextureUsage.RENDER_ATTACHMENT,
+ });
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTexture.createView(),
+ resolveTarget: resolveTexture.createView(),
+ clearValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.end();
+ encoder.finish();
+ });
+
+g.test('begin_render_bundle_encoder')
+ .desc(
+ `
+Test that it is valid to begin render bundle encoder with rg11b10ufloat texture
+format iff rg11b10ufloat-renderable feature is enabled.
+`
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('rg11b10ufloat-renderable');
+ })
+ .fn(t => {
+ t.device.createRenderBundleEncoder({
+ colorFormats: ['rg11b10ufloat'],
+ });
+ });
+
+g.test('create_render_pipeline')
+ .desc(
+ `
+Test that it is valid to create render pipeline with rg11b10ufloat texture format
+in descriptor.fragment.targets iff rg11b10ufloat-renderable feature is enabled.
+`
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('rg11b10ufloat-renderable');
+ })
+ .fn(t => {
+ t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: t.getNoOpShaderCode('VERTEX'),
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: t.getNoOpShaderCode('FRAGMENT'),
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rg11b10ufloat', writeMask: 0 }],
+ },
+ primitive: { topology: 'triangle-list' },
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/validation_test.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/validation_test.ts
new file mode 100644
index 0000000000..7ee5b9f7c1
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/api/validation/validation_test.ts
@@ -0,0 +1,448 @@
+import {
+ ValidBindableResource,
+ BindableResource,
+ kMaxQueryCount,
+ ShaderStageKey,
+} from '../../capability_info.js';
+import { GPUTest, ResourceState } from '../../gpu_test.js';
+
+/**
+ * Base fixture for WebGPU validation tests.
+ */
+export class ValidationTest extends GPUTest {
+ /**
+ * Create a GPUTexture in the specified state.
+ * A `descriptor` may optionally be passed, which is used when `state` is not `'invalid'`.
+ */
+ createTextureWithState(
+ state: ResourceState,
+ descriptor?: Readonly<GPUTextureDescriptor>
+ ): GPUTexture {
+ descriptor = descriptor ?? {
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage:
+ GPUTextureUsage.COPY_SRC |
+ GPUTextureUsage.COPY_DST |
+ GPUTextureUsage.TEXTURE_BINDING |
+ GPUTextureUsage.STORAGE_BINDING |
+ GPUTextureUsage.RENDER_ATTACHMENT,
+ };
+
+ switch (state) {
+ case 'valid':
+ return this.trackForCleanup(this.device.createTexture(descriptor));
+ case 'invalid':
+ return this.getErrorTexture();
+ case 'destroyed': {
+ const texture = this.device.createTexture(descriptor);
+ texture.destroy();
+ return texture;
+ }
+ }
+ }
+
+ /**
+ * Create a GPUTexture in the specified state. A `descriptor` may optionally be passed;
+ * if `state` is `'invalid'`, it will be modified to add an invalid combination of usages.
+ */
+ createBufferWithState(
+ state: ResourceState,
+ descriptor?: Readonly<GPUBufferDescriptor>
+ ): GPUBuffer {
+ descriptor = descriptor ?? {
+ size: 4,
+ usage: GPUBufferUsage.VERTEX,
+ };
+
+ switch (state) {
+ case 'valid':
+ return this.trackForCleanup(this.device.createBuffer(descriptor));
+
+ case 'invalid': {
+ // Make the buffer invalid because of an invalid combination of usages but keep the
+ // descriptor passed as much as possible (for mappedAtCreation and friends).
+ this.device.pushErrorScope('validation');
+ const buffer = this.device.createBuffer({
+ ...descriptor,
+ usage: descriptor.usage | GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_SRC,
+ });
+ void this.device.popErrorScope();
+ return buffer;
+ }
+ case 'destroyed': {
+ const buffer = this.device.createBuffer(descriptor);
+ buffer.destroy();
+ return buffer;
+ }
+ }
+ }
+
+ /**
+ * Create a GPUQuerySet in the specified state.
+ * A `descriptor` may optionally be passed, which is used when `state` is not `'invalid'`.
+ */
+ createQuerySetWithState(
+ state: ResourceState,
+ desc?: Readonly<GPUQuerySetDescriptor>
+ ): GPUQuerySet {
+ const descriptor = { type: 'occlusion' as const, count: 2, ...desc };
+
+ switch (state) {
+ case 'valid':
+ return this.trackForCleanup(this.device.createQuerySet(descriptor));
+ case 'invalid': {
+ // Make the queryset invalid because of the count out of bounds.
+ descriptor.count = kMaxQueryCount + 1;
+ return this.expectGPUError('validation', () => this.device.createQuerySet(descriptor));
+ }
+ case 'destroyed': {
+ const queryset = this.device.createQuerySet(descriptor);
+ queryset.destroy();
+ return queryset;
+ }
+ }
+ }
+
+ /** Create an arbitrarily-sized GPUBuffer with the STORAGE usage. */
+ getStorageBuffer(): GPUBuffer {
+ return this.trackForCleanup(
+ this.device.createBuffer({ size: 1024, usage: GPUBufferUsage.STORAGE })
+ );
+ }
+
+ /** Create an arbitrarily-sized GPUBuffer with the UNIFORM usage. */
+ getUniformBuffer(): GPUBuffer {
+ return this.trackForCleanup(
+ this.device.createBuffer({ size: 1024, usage: GPUBufferUsage.UNIFORM })
+ );
+ }
+
+ /** Return an invalid GPUBuffer. */
+ getErrorBuffer(): GPUBuffer {
+ return this.createBufferWithState('invalid');
+ }
+
+ /** Return an invalid GPUSampler. */
+ getErrorSampler(): GPUSampler {
+ this.device.pushErrorScope('validation');
+ const sampler = this.device.createSampler({ lodMinClamp: -1 });
+ void this.device.popErrorScope();
+ return sampler;
+ }
+
+ /**
+ * Return an arbitrarily-configured GPUTexture with the `TEXTURE_BINDING` usage and specified
+ * sampleCount. The `RENDER_ATTACHMENT` usage will also be specified if sampleCount > 1 as is
+ * required by WebGPU SPEC.
+ */
+ getSampledTexture(sampleCount: number = 1): GPUTexture {
+ const usage =
+ sampleCount > 1
+ ? GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.RENDER_ATTACHMENT
+ : GPUTextureUsage.TEXTURE_BINDING;
+ return this.trackForCleanup(
+ this.device.createTexture({
+ size: { width: 16, height: 16, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage,
+ sampleCount,
+ })
+ );
+ }
+
+ /** Return an arbitrarily-configured GPUTexture with the `STORAGE_BINDING` usage. */
+ getStorageTexture(): GPUTexture {
+ return this.trackForCleanup(
+ this.device.createTexture({
+ size: { width: 16, height: 16, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.STORAGE_BINDING,
+ })
+ );
+ }
+
+ /** Return an arbitrarily-configured GPUTexture with the `RENDER_ATTACHMENT` usage. */
+ getRenderTexture(sampleCount: number = 1): GPUTexture {
+ return this.trackForCleanup(
+ this.device.createTexture({
+ size: { width: 16, height: 16, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ sampleCount,
+ })
+ );
+ }
+
+ /** Return an invalid GPUTexture. */
+ getErrorTexture(): GPUTexture {
+ this.device.pushErrorScope('validation');
+ const texture = this.device.createTexture({
+ size: { width: 0, height: 0, depthOrArrayLayers: 0 },
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ });
+ void this.device.popErrorScope();
+ return texture;
+ }
+
+ /** Return an invalid GPUTextureView (created from an invalid GPUTexture). */
+ getErrorTextureView(): GPUTextureView {
+ this.device.pushErrorScope('validation');
+ const view = this.getErrorTexture().createView();
+ void this.device.popErrorScope();
+ return view;
+ }
+
+ /**
+ * Return an arbitrary object of the specified {@link webgpu/capability_info!BindableResource} type
+ * (e.g. `'errorBuf'`, `'nonFiltSamp'`, `sampledTexMS`, etc.)
+ */
+ getBindingResource(bindingType: BindableResource): GPUBindingResource {
+ switch (bindingType) {
+ case 'errorBuf':
+ return { buffer: this.getErrorBuffer() };
+ case 'errorSamp':
+ return this.getErrorSampler();
+ case 'errorTex':
+ return this.getErrorTextureView();
+ case 'uniformBuf':
+ return { buffer: this.getUniformBuffer() };
+ case 'storageBuf':
+ return { buffer: this.getStorageBuffer() };
+ case 'filtSamp':
+ return this.device.createSampler({ minFilter: 'linear' });
+ case 'nonFiltSamp':
+ return this.device.createSampler();
+ case 'compareSamp':
+ return this.device.createSampler({ compare: 'never' });
+ case 'sampledTex':
+ return this.getSampledTexture(1).createView();
+ case 'sampledTexMS':
+ return this.getSampledTexture(4).createView();
+ case 'storageTex':
+ return this.getStorageTexture().createView();
+ }
+ }
+
+ /** Create an arbitrarily-sized GPUBuffer with the STORAGE usage from mismatched device. */
+ getDeviceMismatchedStorageBuffer(): GPUBuffer {
+ return this.trackForCleanup(
+ this.mismatchedDevice.createBuffer({ size: 4, usage: GPUBufferUsage.STORAGE })
+ );
+ }
+
+ /** Create an arbitrarily-sized GPUBuffer with the UNIFORM usage from mismatched device. */
+ getDeviceMismatchedUniformBuffer(): GPUBuffer {
+ return this.trackForCleanup(
+ this.mismatchedDevice.createBuffer({ size: 4, usage: GPUBufferUsage.UNIFORM })
+ );
+ }
+
+ /** Return a GPUTexture with descriptor from mismatched device. */
+ getDeviceMismatchedTexture(descriptor: GPUTextureDescriptor): GPUTexture {
+ return this.trackForCleanup(this.mismatchedDevice.createTexture(descriptor));
+ }
+
+ /** Return an arbitrarily-configured GPUTexture with the `SAMPLED` usage from mismatched device. */
+ getDeviceMismatchedSampledTexture(sampleCount: number = 1): GPUTexture {
+ return this.getDeviceMismatchedTexture({
+ size: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ sampleCount,
+ });
+ }
+
+ /** Return an arbitrarily-configured GPUTexture with the `STORAGE` usage from mismatched device. */
+ getDeviceMismatchedStorageTexture(): GPUTexture {
+ return this.getDeviceMismatchedTexture({
+ size: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.STORAGE_BINDING,
+ });
+ }
+
+ /** Return an arbitrarily-configured GPUTexture with the `RENDER_ATTACHMENT` usage from mismatched device. */
+ getDeviceMismatchedRenderTexture(sampleCount: number = 1): GPUTexture {
+ return this.getDeviceMismatchedTexture({
+ size: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ sampleCount,
+ });
+ }
+
+ getDeviceMismatchedBindingResource(bindingType: ValidBindableResource): GPUBindingResource {
+ switch (bindingType) {
+ case 'uniformBuf':
+ return { buffer: this.getDeviceMismatchedStorageBuffer() };
+ case 'storageBuf':
+ return { buffer: this.getDeviceMismatchedUniformBuffer() };
+ case 'filtSamp':
+ return this.mismatchedDevice.createSampler({ minFilter: 'linear' });
+ case 'nonFiltSamp':
+ return this.mismatchedDevice.createSampler();
+ case 'compareSamp':
+ return this.mismatchedDevice.createSampler({ compare: 'never' });
+ case 'sampledTex':
+ return this.getDeviceMismatchedSampledTexture(1).createView();
+ case 'sampledTexMS':
+ return this.getDeviceMismatchedSampledTexture(4).createView();
+ case 'storageTex':
+ return this.getDeviceMismatchedStorageTexture().createView();
+ }
+ }
+
+ /** Return a no-op shader code snippet for the specified shader stage. */
+ getNoOpShaderCode(stage: ShaderStageKey): string {
+ switch (stage) {
+ case 'VERTEX':
+ return `
+ @vertex fn main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>();
+ }
+ `;
+ case 'FRAGMENT':
+ return `@fragment fn main() {}`;
+ case 'COMPUTE':
+ return `@compute @workgroup_size(1) fn main() {}`;
+ }
+ }
+
+ /** Create a GPURenderPipeline in the specified state. */
+ createRenderPipelineWithState(state: 'valid' | 'invalid'): GPURenderPipeline {
+ return state === 'valid' ? this.createNoOpRenderPipeline() : this.createErrorRenderPipeline();
+ }
+
+ /** Return a GPURenderPipeline with default options and no-op vertex and fragment shaders. */
+ createNoOpRenderPipeline(
+ layout: GPUPipelineLayout | GPUAutoLayoutMode = 'auto'
+ ): GPURenderPipeline {
+ return this.device.createRenderPipeline({
+ layout,
+ vertex: {
+ module: this.device.createShaderModule({
+ code: this.getNoOpShaderCode('VERTEX'),
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: this.device.createShaderModule({
+ code: this.getNoOpShaderCode('FRAGMENT'),
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm', writeMask: 0 }],
+ },
+ primitive: { topology: 'triangle-list' },
+ });
+ }
+
+ /** Return an invalid GPURenderPipeline. */
+ createErrorRenderPipeline(): GPURenderPipeline {
+ this.device.pushErrorScope('validation');
+ const pipeline = this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: this.device.createShaderModule({
+ code: '',
+ }),
+ entryPoint: '',
+ },
+ });
+ void this.device.popErrorScope();
+ return pipeline;
+ }
+
+ /** Return a GPUComputePipeline with a no-op shader. */
+ createNoOpComputePipeline(
+ layout: GPUPipelineLayout | GPUAutoLayoutMode = 'auto'
+ ): GPUComputePipeline {
+ return this.device.createComputePipeline({
+ layout,
+ compute: {
+ module: this.device.createShaderModule({
+ code: this.getNoOpShaderCode('COMPUTE'),
+ }),
+ entryPoint: 'main',
+ },
+ });
+ }
+
+ /** Return an invalid GPUComputePipeline. */
+ createErrorComputePipeline(): GPUComputePipeline {
+ this.device.pushErrorScope('validation');
+ const pipeline = this.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: this.device.createShaderModule({
+ code: '',
+ }),
+ entryPoint: '',
+ },
+ });
+ void this.device.popErrorScope();
+ return pipeline;
+ }
+
+ /** Return an invalid GPUShaderModule. */
+ createInvalidShaderModule(): GPUShaderModule {
+ this.device.pushErrorScope('validation');
+ const code = 'deadbeaf'; // Something make no sense
+ const shaderModule = this.device.createShaderModule({ code });
+ void this.device.popErrorScope();
+ return shaderModule;
+ }
+
+ /** Helper for testing createRenderPipeline(Async) validation */
+ doCreateRenderPipelineTest(
+ isAsync: boolean,
+ _success: boolean,
+ descriptor: GPURenderPipelineDescriptor,
+ errorTypeName: 'GPUPipelineError' | 'TypeError' = 'GPUPipelineError'
+ ) {
+ if (isAsync) {
+ if (_success) {
+ this.shouldResolve(this.device.createRenderPipelineAsync(descriptor));
+ } else {
+ this.shouldReject(errorTypeName, this.device.createRenderPipelineAsync(descriptor));
+ }
+ } else {
+ if (errorTypeName === 'GPUPipelineError') {
+ this.expectValidationError(() => {
+ this.device.createRenderPipeline(descriptor);
+ }, !_success);
+ } else {
+ this.shouldThrow(_success ? false : errorTypeName, () => {
+ this.device.createRenderPipeline(descriptor);
+ });
+ }
+ }
+ }
+
+ /** Helper for testing createComputePipeline(Async) validation */
+ doCreateComputePipelineTest(
+ isAsync: boolean,
+ _success: boolean,
+ descriptor: GPUComputePipelineDescriptor,
+ errorTypeName: 'GPUPipelineError' | 'TypeError' = 'GPUPipelineError'
+ ) {
+ if (isAsync) {
+ if (_success) {
+ this.shouldResolve(this.device.createComputePipelineAsync(descriptor));
+ } else {
+ this.shouldReject(errorTypeName, this.device.createComputePipelineAsync(descriptor));
+ }
+ } else {
+ if (errorTypeName === 'GPUPipelineError') {
+ this.expectValidationError(() => {
+ this.device.createComputePipeline(descriptor);
+ }, !_success);
+ } else {
+ this.shouldThrow(_success ? false : errorTypeName, () => {
+ this.device.createComputePipeline(descriptor);
+ });
+ }
+ }
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/capability_info.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/capability_info.ts
new file mode 100644
index 0000000000..d65313c006
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/capability_info.ts
@@ -0,0 +1,792 @@
+// MAINTENANCE_TODO: The generated Typedoc for this file is hard to navigate because it's
+// alphabetized. Consider using namespaces or renames to fix this?
+
+/* eslint-disable no-sparse-arrays */
+
+import {
+ keysOf,
+ makeTable,
+ makeTableRenameAndFilter,
+ numericKeysOf,
+ valueof,
+} from '../common/util/data_tables.js';
+import { assertTypeTrue, TypeEqual } from '../common/util/types.js';
+import { unreachable } from '../common/util/util.js';
+
+import { GPUConst, kMaxUnsignedLongValue, kMaxUnsignedLongLongValue } from './constants.js';
+
+// Base device limits can be found in constants.ts.
+
+// Queries
+
+/** Maximum number of queries in GPUQuerySet, by spec. */
+export const kMaxQueryCount = 4096;
+/** Per-GPUQueryType info. */
+export type QueryTypeInfo = {
+ /** Optional feature required to use this GPUQueryType. */
+ readonly feature: GPUFeatureName | undefined;
+ // Add fields as needed
+};
+export const kQueryTypeInfo: {
+ readonly [k in GPUQueryType]: QueryTypeInfo;
+} =
+ /* prettier-ignore */ {
+ 'occlusion': { feature: undefined },
+ 'timestamp': { feature: 'timestamp-query' },
+};
+/** List of all GPUQueryType values. */
+export const kQueryTypes = keysOf(kQueryTypeInfo);
+
+// Buffers
+
+/** Required alignment of a GPUBuffer size, by spec. */
+export const kBufferSizeAlignment = 4;
+
+/** Per-GPUBufferUsage copy info. */
+export const kBufferUsageCopyInfo: {
+ readonly [name: string]: GPUBufferUsageFlags;
+} =
+ /* prettier-ignore */ {
+ 'COPY_NONE': 0,
+ 'COPY_SRC': GPUConst.BufferUsage.COPY_SRC,
+ 'COPY_DST': GPUConst.BufferUsage.COPY_DST,
+ 'COPY_SRC_DST': GPUConst.BufferUsage.COPY_SRC | GPUConst.BufferUsage.COPY_DST,
+};
+/** List of all GPUBufferUsage copy values. */
+export const kBufferUsageCopy = keysOf(kBufferUsageCopyInfo);
+
+/** Per-GPUBufferUsage keys and info. */
+type BufferUsageKey = keyof typeof GPUConst.BufferUsage;
+export const kBufferUsageKeys = keysOf(GPUConst.BufferUsage);
+export const kBufferUsageInfo: {
+ readonly [k in BufferUsageKey]: GPUBufferUsageFlags;
+} = {
+ ...GPUConst.BufferUsage,
+};
+
+/** List of all GPUBufferUsage values. */
+export const kBufferUsages = Object.values(GPUConst.BufferUsage);
+export const kAllBufferUsageBits = kBufferUsages.reduce(
+ (previousSet, currentUsage) => previousSet | currentUsage,
+ 0
+);
+
+// Errors
+
+/** Per-GPUErrorFilter info. */
+export const kErrorScopeFilterInfo: {
+ readonly [k in GPUErrorFilter]: {
+ generatable: boolean;
+ };
+} =
+ /* prettier-ignore */ {
+ 'internal': { generatable: false },
+ 'out-of-memory': { generatable: true },
+ 'validation': { generatable: true },
+};
+/** List of all GPUErrorFilter values. */
+export const kErrorScopeFilters = keysOf(kErrorScopeFilterInfo);
+export const kGeneratableErrorScopeFilters = kErrorScopeFilters.filter(
+ e => kErrorScopeFilterInfo[e].generatable
+);
+
+// Canvases
+
+// The formats of GPUTextureFormat for canvas context.
+export const kCanvasTextureFormats = ['bgra8unorm', 'rgba8unorm', 'rgba16float'] as const;
+
+// The alpha mode for canvas context.
+export const kCanvasAlphaModesInfo: {
+ readonly [k in GPUCanvasAlphaMode]: {};
+} = /* prettier-ignore */ {
+ 'opaque': {},
+ 'premultiplied': {},
+};
+export const kCanvasAlphaModes = keysOf(kCanvasAlphaModesInfo);
+
+// The color spaces for canvas context
+export const kCanvasColorSpacesInfo: {
+ readonly [k in PredefinedColorSpace]: {};
+} = /* prettier-ignore */ {
+ 'srgb': {},
+ 'display-p3': {},
+};
+export const kCanvasColorSpaces = keysOf(kCanvasColorSpacesInfo);
+
+// Textures (except for texture format info)
+
+/** Per-GPUTextureDimension info. */
+export const kTextureDimensionInfo: {
+ readonly [k in GPUTextureDimension]: {};
+} = /* prettier-ignore */ {
+ '1d': {},
+ '2d': {},
+ '3d': {},
+};
+/** List of all GPUTextureDimension values. */
+export const kTextureDimensions = keysOf(kTextureDimensionInfo);
+
+/** Per-GPUTextureAspect info. */
+export const kTextureAspectInfo: {
+ readonly [k in GPUTextureAspect]: {};
+} = /* prettier-ignore */ {
+ 'all': {},
+ 'depth-only': {},
+ 'stencil-only': {},
+};
+/** List of all GPUTextureAspect values. */
+export const kTextureAspects = keysOf(kTextureAspectInfo);
+
+// Misc
+
+/** Per-GPUCompareFunction info. */
+export const kCompareFunctionInfo: {
+ readonly [k in GPUCompareFunction]: {};
+} =
+ /* prettier-ignore */ {
+ 'never': {},
+ 'less': {},
+ 'equal': {},
+ 'less-equal': {},
+ 'greater': {},
+ 'not-equal': {},
+ 'greater-equal': {},
+ 'always': {},
+};
+/** List of all GPUCompareFunction values. */
+export const kCompareFunctions = keysOf(kCompareFunctionInfo);
+
+/** Per-GPUStencilOperation info. */
+export const kStencilOperationInfo: {
+ readonly [k in GPUStencilOperation]: {};
+} =
+ /* prettier-ignore */ {
+ 'keep': {},
+ 'zero': {},
+ 'replace': {},
+ 'invert': {},
+ 'increment-clamp': {},
+ 'decrement-clamp': {},
+ 'increment-wrap': {},
+ 'decrement-wrap': {},
+};
+/** List of all GPUStencilOperation values. */
+export const kStencilOperations = keysOf(kStencilOperationInfo);
+
+// More textures (except for texture format info)
+
+/** Per-GPUTextureUsage type info. */
+export const kTextureUsageTypeInfo: {
+ readonly [name: string]: number;
+} =
+ /* prettier-ignore */ {
+ 'texture': Number(GPUConst.TextureUsage.TEXTURE_BINDING),
+ 'storage': Number(GPUConst.TextureUsage.STORAGE_BINDING),
+ 'render': Number(GPUConst.TextureUsage.RENDER_ATTACHMENT),
+};
+/** List of all GPUTextureUsage type values. */
+export const kTextureUsageType = keysOf(kTextureUsageTypeInfo);
+
+/** Per-GPUTextureUsage copy info. */
+export const kTextureUsageCopyInfo: {
+ readonly [name: string]: number;
+} =
+ /* prettier-ignore */ {
+ 'none': 0,
+ 'src': Number(GPUConst.TextureUsage.COPY_SRC),
+ 'dst': Number(GPUConst.TextureUsage.COPY_DST),
+ 'src-dest': Number(GPUConst.TextureUsage.COPY_SRC) | Number(GPUConst.TextureUsage.COPY_DST),
+};
+/** List of all GPUTextureUsage copy values. */
+export const kTextureUsageCopy = keysOf(kTextureUsageCopyInfo);
+
+/** Per-GPUTextureUsage info. */
+export const kTextureUsageInfo: {
+ readonly [k in valueof<typeof GPUConst.TextureUsage>]: {};
+} = {
+ [GPUConst.TextureUsage.COPY_SRC]: {},
+ [GPUConst.TextureUsage.COPY_DST]: {},
+ [GPUConst.TextureUsage.TEXTURE_BINDING]: {},
+ [GPUConst.TextureUsage.STORAGE_BINDING]: {},
+ [GPUConst.TextureUsage.RENDER_ATTACHMENT]: {},
+};
+/** List of all GPUTextureUsage values. */
+export const kTextureUsages = numericKeysOf<GPUTextureUsageFlags>(kTextureUsageInfo);
+
+// Texture View
+
+/** Per-GPUTextureViewDimension info. */
+export type TextureViewDimensionInfo = {
+ /** Whether a storage texture view can have this view dimension. */
+ readonly storage: boolean;
+ // Add fields as needed
+};
+/** Per-GPUTextureViewDimension info. */
+export const kTextureViewDimensionInfo: {
+ readonly [k in GPUTextureViewDimension]: TextureViewDimensionInfo;
+} =
+ /* prettier-ignore */ {
+ '1d': { storage: true },
+ '2d': { storage: true },
+ '2d-array': { storage: true },
+ 'cube': { storage: false },
+ 'cube-array': { storage: false },
+ '3d': { storage: true },
+};
+/** List of all GPUTextureDimension values. */
+export const kTextureViewDimensions = keysOf(kTextureViewDimensionInfo);
+
+// Vertex formats
+
+/** Per-GPUVertexFormat info. */
+// Exists just for documentation. Otherwise could be inferred by `makeTable`.
+export type VertexFormatInfo = {
+ /** Number of bytes in each component. */
+ readonly bytesPerComponent: 1 | 2 | 4 | 'packed';
+ /** The data encoding (float, normalized, or integer) for each component. */
+ readonly type: 'float' | 'unorm' | 'snorm' | 'uint' | 'sint';
+ /** Number of components. */
+ readonly componentCount: 1 | 2 | 3 | 4;
+ /** Size in bytes. */
+ readonly byteSize: 2 | 4 | 8 | 12 | 16;
+ /** The completely matching WGSL type for vertex format */
+ readonly wgslType:
+ | 'f32'
+ | 'vec2<f32>'
+ | 'vec3<f32>'
+ | 'vec4<f32>'
+ | 'u32'
+ | 'vec2<u32>'
+ | 'vec3<u32>'
+ | 'vec4<u32>'
+ | 'i32'
+ | 'vec2<i32>'
+ | 'vec3<i32>'
+ | 'vec4<i32>';
+ // Add fields as needed
+};
+/** Per-GPUVertexFormat info. */
+export const kVertexFormatInfo: {
+ readonly [k in GPUVertexFormat]: VertexFormatInfo;
+} =
+ /* prettier-ignore */ makeTable(
+ ['bytesPerComponent', 'type', 'componentCount', 'byteSize', 'wgslType'] as const,
+ [ , , , , ] as const, {
+ // 8 bit components
+ 'uint8x2': [ 1, 'uint', 2, 2, 'vec2<u32>'],
+ 'uint8x4': [ 1, 'uint', 4, 4, 'vec4<u32>'],
+ 'sint8x2': [ 1, 'sint', 2, 2, 'vec2<i32>'],
+ 'sint8x4': [ 1, 'sint', 4, 4, 'vec4<i32>'],
+ 'unorm8x2': [ 1, 'unorm', 2, 2, 'vec2<f32>'],
+ 'unorm8x4': [ 1, 'unorm', 4, 4, 'vec4<f32>'],
+ 'snorm8x2': [ 1, 'snorm', 2, 2, 'vec2<f32>'],
+ 'snorm8x4': [ 1, 'snorm', 4, 4, 'vec4<f32>'],
+ // 16 bit components
+ 'uint16x2': [ 2, 'uint', 2, 4, 'vec2<u32>'],
+ 'uint16x4': [ 2, 'uint', 4, 8, 'vec4<u32>'],
+ 'sint16x2': [ 2, 'sint', 2, 4, 'vec2<i32>'],
+ 'sint16x4': [ 2, 'sint', 4, 8, 'vec4<i32>'],
+ 'unorm16x2': [ 2, 'unorm', 2, 4, 'vec2<f32>'],
+ 'unorm16x4': [ 2, 'unorm', 4, 8, 'vec4<f32>'],
+ 'snorm16x2': [ 2, 'snorm', 2, 4, 'vec2<f32>'],
+ 'snorm16x4': [ 2, 'snorm', 4, 8, 'vec4<f32>'],
+ 'float16x2': [ 2, 'float', 2, 4, 'vec2<f32>'],
+ 'float16x4': [ 2, 'float', 4, 8, 'vec4<f32>'],
+ // 32 bit components
+ 'float32': [ 4, 'float', 1, 4, 'f32'],
+ 'float32x2': [ 4, 'float', 2, 8, 'vec2<f32>'],
+ 'float32x3': [ 4, 'float', 3, 12, 'vec3<f32>'],
+ 'float32x4': [ 4, 'float', 4, 16, 'vec4<f32>'],
+ 'uint32': [ 4, 'uint', 1, 4, 'u32'],
+ 'uint32x2': [ 4, 'uint', 2, 8, 'vec2<u32>'],
+ 'uint32x3': [ 4, 'uint', 3, 12, 'vec3<u32>'],
+ 'uint32x4': [ 4, 'uint', 4, 16, 'vec4<u32>'],
+ 'sint32': [ 4, 'sint', 1, 4, 'i32'],
+ 'sint32x2': [ 4, 'sint', 2, 8, 'vec2<i32>'],
+ 'sint32x3': [ 4, 'sint', 3, 12, 'vec3<i32>'],
+ 'sint32x4': [ 4, 'sint', 4, 16, 'vec4<i32>'],
+ // 32 bit packed
+ 'unorm10-10-10-2': [ 'packed', 'unorm', 4, 4, 'vec4<f32>']
+} as const);
+/** List of all GPUVertexFormat values. */
+export const kVertexFormats = keysOf(kVertexFormatInfo);
+
+// Typedefs for bindings
+
+/**
+ * Classes of `PerShaderStage` binding limits. Two bindings with the same class
+ * count toward the same `PerShaderStage` limit(s) in the spec (if any).
+ */
+export type PerStageBindingLimitClass =
+ | 'uniformBuf'
+ | 'storageBuf'
+ | 'sampler'
+ | 'sampledTex'
+ | 'storageTex';
+/**
+ * Classes of `PerPipelineLayout` binding limits. Two bindings with the same class
+ * count toward the same `PerPipelineLayout` limit(s) in the spec (if any).
+ */
+export type PerPipelineBindingLimitClass = PerStageBindingLimitClass;
+
+export type ValidBindableResource =
+ | 'uniformBuf'
+ | 'storageBuf'
+ | 'filtSamp'
+ | 'nonFiltSamp'
+ | 'compareSamp'
+ | 'sampledTex'
+ | 'sampledTexMS'
+ | 'storageTex';
+type ErrorBindableResource = 'errorBuf' | 'errorSamp' | 'errorTex';
+
+/**
+ * Types of resource binding which have distinct binding rules, by spec
+ * (e.g. filtering vs non-filtering sampler, multisample vs non-multisample texture).
+ */
+export type BindableResource = ValidBindableResource | ErrorBindableResource;
+export const kBindableResources = [
+ 'uniformBuf',
+ 'storageBuf',
+ 'filtSamp',
+ 'nonFiltSamp',
+ 'compareSamp',
+ 'sampledTex',
+ 'sampledTexMS',
+ 'storageTex',
+ 'errorBuf',
+ 'errorSamp',
+ 'errorTex',
+] as const;
+assertTypeTrue<TypeEqual<BindableResource, (typeof kBindableResources)[number]>>();
+
+// Bindings
+
+/** Dynamic buffer offsets require offset to be divisible by 256, by spec. */
+export const kMinDynamicBufferOffsetAlignment = 256;
+
+/** Default `PerShaderStage` binding limits, by spec. */
+export const kPerStageBindingLimits: {
+ readonly [k in PerStageBindingLimitClass]: {
+ /** Which `PerShaderStage` binding limit class. */
+ readonly class: k;
+ /** Maximum number of allowed bindings in that class. */
+ readonly maxLimit: (typeof kLimits)[number];
+ // Add fields as needed
+ };
+} =
+ /* prettier-ignore */ {
+ 'uniformBuf': { class: 'uniformBuf', maxLimit: 'maxUniformBuffersPerShaderStage', },
+ 'storageBuf': { class: 'storageBuf', maxLimit: 'maxStorageBuffersPerShaderStage', },
+ 'sampler': { class: 'sampler', maxLimit: 'maxSamplersPerShaderStage', },
+ 'sampledTex': { class: 'sampledTex', maxLimit: 'maxSampledTexturesPerShaderStage', },
+ 'storageTex': { class: 'storageTex', maxLimit: 'maxStorageTexturesPerShaderStage', },
+};
+
+/**
+ * Default `PerPipelineLayout` binding limits, by spec.
+ */
+export const kPerPipelineBindingLimits: {
+ readonly [k in PerPipelineBindingLimitClass]: {
+ /** Which `PerPipelineLayout` binding limit class. */
+ readonly class: k;
+ /**
+ * The name of the limit for the maximum number of allowed bindings with `hasDynamicOffset: true` in that class.
+ */
+ readonly maxDynamicLimit: (typeof kLimits)[number] | '';
+ // Add fields as needed
+ };
+} =
+ /* prettier-ignore */ {
+ 'uniformBuf': { class: 'uniformBuf', maxDynamicLimit: 'maxDynamicUniformBuffersPerPipelineLayout', },
+ 'storageBuf': { class: 'storageBuf', maxDynamicLimit: 'maxDynamicStorageBuffersPerPipelineLayout', },
+ 'sampler': { class: 'sampler', maxDynamicLimit: '', },
+ 'sampledTex': { class: 'sampledTex', maxDynamicLimit: '', },
+ 'storageTex': { class: 'storageTex', maxDynamicLimit: '', },
+};
+
+interface BindingKindInfo {
+ readonly resource: ValidBindableResource;
+ readonly perStageLimitClass: (typeof kPerStageBindingLimits)[PerStageBindingLimitClass];
+ readonly perPipelineLimitClass: (typeof kPerPipelineBindingLimits)[PerPipelineBindingLimitClass];
+ // Add fields as needed
+}
+
+const kBindingKind: {
+ readonly [k in ValidBindableResource]: BindingKindInfo;
+} =
+ /* prettier-ignore */ {
+ uniformBuf: { resource: 'uniformBuf', perStageLimitClass: kPerStageBindingLimits.uniformBuf, perPipelineLimitClass: kPerPipelineBindingLimits.uniformBuf, },
+ storageBuf: { resource: 'storageBuf', perStageLimitClass: kPerStageBindingLimits.storageBuf, perPipelineLimitClass: kPerPipelineBindingLimits.storageBuf, },
+ filtSamp: { resource: 'filtSamp', perStageLimitClass: kPerStageBindingLimits.sampler, perPipelineLimitClass: kPerPipelineBindingLimits.sampler, },
+ nonFiltSamp: { resource: 'nonFiltSamp', perStageLimitClass: kPerStageBindingLimits.sampler, perPipelineLimitClass: kPerPipelineBindingLimits.sampler, },
+ compareSamp: { resource: 'compareSamp', perStageLimitClass: kPerStageBindingLimits.sampler, perPipelineLimitClass: kPerPipelineBindingLimits.sampler, },
+ sampledTex: { resource: 'sampledTex', perStageLimitClass: kPerStageBindingLimits.sampledTex, perPipelineLimitClass: kPerPipelineBindingLimits.sampledTex, },
+ sampledTexMS: { resource: 'sampledTexMS', perStageLimitClass: kPerStageBindingLimits.sampledTex, perPipelineLimitClass: kPerPipelineBindingLimits.sampledTex, },
+ storageTex: { resource: 'storageTex', perStageLimitClass: kPerStageBindingLimits.storageTex, perPipelineLimitClass: kPerPipelineBindingLimits.storageTex, },
+};
+
+// Binding type info
+
+const kValidStagesAll = {
+ validStages:
+ GPUConst.ShaderStage.VERTEX | GPUConst.ShaderStage.FRAGMENT | GPUConst.ShaderStage.COMPUTE,
+} as const;
+const kValidStagesStorageWrite = {
+ validStages: GPUConst.ShaderStage.FRAGMENT | GPUConst.ShaderStage.COMPUTE,
+} as const;
+
+/** Binding type info (including class limits) for the specified GPUBufferBindingLayout. */
+export function bufferBindingTypeInfo(d: GPUBufferBindingLayout) {
+ /* prettier-ignore */
+ switch (d.type ?? 'uniform') {
+ case 'uniform': return { usage: GPUConst.BufferUsage.UNIFORM, ...kBindingKind.uniformBuf, ...kValidStagesAll, };
+ case 'storage': return { usage: GPUConst.BufferUsage.STORAGE, ...kBindingKind.storageBuf, ...kValidStagesStorageWrite, };
+ case 'read-only-storage': return { usage: GPUConst.BufferUsage.STORAGE, ...kBindingKind.storageBuf, ...kValidStagesAll, };
+ }
+}
+/** List of all GPUBufferBindingType values. */
+export const kBufferBindingTypes = ['uniform', 'storage', 'read-only-storage'] as const;
+assertTypeTrue<TypeEqual<GPUBufferBindingType, (typeof kBufferBindingTypes)[number]>>();
+
+/** Binding type info (including class limits) for the specified GPUSamplerBindingLayout. */
+export function samplerBindingTypeInfo(d: GPUSamplerBindingLayout) {
+ /* prettier-ignore */
+ switch (d.type ?? 'filtering') {
+ case 'filtering': return { ...kBindingKind.filtSamp, ...kValidStagesAll, };
+ case 'non-filtering': return { ...kBindingKind.nonFiltSamp, ...kValidStagesAll, };
+ case 'comparison': return { ...kBindingKind.compareSamp, ...kValidStagesAll, };
+ }
+}
+/** List of all GPUSamplerBindingType values. */
+export const kSamplerBindingTypes = ['filtering', 'non-filtering', 'comparison'] as const;
+assertTypeTrue<TypeEqual<GPUSamplerBindingType, (typeof kSamplerBindingTypes)[number]>>();
+
+/** Binding type info (including class limits) for the specified GPUTextureBindingLayout. */
+export function sampledTextureBindingTypeInfo(d: GPUTextureBindingLayout) {
+ /* prettier-ignore */
+ if (d.multisampled) {
+ return { usage: GPUConst.TextureUsage.TEXTURE_BINDING, ...kBindingKind.sampledTexMS, ...kValidStagesAll, };
+ } else {
+ return { usage: GPUConst.TextureUsage.TEXTURE_BINDING, ...kBindingKind.sampledTex, ...kValidStagesAll, };
+ }
+}
+/** List of all GPUTextureSampleType values. */
+export const kTextureSampleTypes = [
+ 'float',
+ 'unfilterable-float',
+ 'depth',
+ 'sint',
+ 'uint',
+] as const;
+assertTypeTrue<TypeEqual<GPUTextureSampleType, (typeof kTextureSampleTypes)[number]>>();
+
+/** Binding type info (including class limits) for the specified GPUStorageTextureBindingLayout. */
+export function storageTextureBindingTypeInfo(d: GPUStorageTextureBindingLayout) {
+ return {
+ usage: GPUConst.TextureUsage.STORAGE_BINDING,
+ ...kBindingKind.storageTex,
+ ...kValidStagesStorageWrite,
+ };
+}
+/** List of all GPUStorageTextureAccess values. */
+export const kStorageTextureAccessValues = ['write-only'] as const;
+assertTypeTrue<TypeEqual<GPUStorageTextureAccess, (typeof kStorageTextureAccessValues)[number]>>();
+
+/** GPUBindGroupLayoutEntry, but only the "union" fields, not the common fields. */
+export type BGLEntry = Omit<GPUBindGroupLayoutEntry, 'binding' | 'visibility'>;
+/** Binding type info (including class limits) for the specified BGLEntry. */
+export function texBindingTypeInfo(e: BGLEntry) {
+ if (e.texture !== undefined) return sampledTextureBindingTypeInfo(e.texture);
+ if (e.storageTexture !== undefined) return storageTextureBindingTypeInfo(e.storageTexture);
+ unreachable();
+}
+/** BindingTypeInfo (including class limits) for the specified BGLEntry. */
+export function bindingTypeInfo(e: BGLEntry) {
+ if (e.buffer !== undefined) return bufferBindingTypeInfo(e.buffer);
+ if (e.texture !== undefined) return sampledTextureBindingTypeInfo(e.texture);
+ if (e.sampler !== undefined) return samplerBindingTypeInfo(e.sampler);
+ if (e.storageTexture !== undefined) return storageTextureBindingTypeInfo(e.storageTexture);
+ unreachable('GPUBindGroupLayoutEntry has no BindingLayout');
+}
+
+/**
+ * Generate a list of possible buffer-typed BGLEntry values.
+ *
+ * Note: Generates different `type` options, but not `hasDynamicOffset` options.
+ */
+export function bufferBindingEntries(includeUndefined: boolean): readonly BGLEntry[] {
+ return [
+ ...(includeUndefined ? [{ buffer: { type: undefined } }] : []),
+ { buffer: { type: 'uniform' } },
+ { buffer: { type: 'storage' } },
+ { buffer: { type: 'read-only-storage' } },
+ ] as const;
+}
+/** Generate a list of possible sampler-typed BGLEntry values. */
+export function samplerBindingEntries(includeUndefined: boolean): readonly BGLEntry[] {
+ return [
+ ...(includeUndefined ? [{ sampler: { type: undefined } }] : []),
+ { sampler: { type: 'comparison' } },
+ { sampler: { type: 'filtering' } },
+ { sampler: { type: 'non-filtering' } },
+ ] as const;
+}
+/**
+ * Generate a list of possible texture-typed BGLEntry values.
+ *
+ * Note: Generates different `multisampled` options, but not `sampleType` or `viewDimension` options.
+ */
+export function textureBindingEntries(includeUndefined: boolean): readonly BGLEntry[] {
+ return [
+ ...(includeUndefined ? [{ texture: { multisampled: undefined } }] : []),
+ { texture: { multisampled: false } },
+ { texture: { multisampled: true, sampleType: 'unfilterable-float' } },
+ ] as const;
+}
+/**
+ * Generate a list of possible storageTexture-typed BGLEntry values.
+ *
+ * Note: Generates different `access` options, but not `format` or `viewDimension` options.
+ */
+export function storageTextureBindingEntries(format: GPUTextureFormat): readonly BGLEntry[] {
+ return [{ storageTexture: { access: 'write-only', format } }] as const;
+}
+/** Generate a list of possible texture-or-storageTexture-typed BGLEntry values. */
+export function sampledAndStorageBindingEntries(
+ includeUndefined: boolean,
+ storageTextureFormat: GPUTextureFormat = 'rgba8unorm'
+): readonly BGLEntry[] {
+ return [
+ ...textureBindingEntries(includeUndefined),
+ ...storageTextureBindingEntries(storageTextureFormat),
+ ] as const;
+}
+/**
+ * Generate a list of possible BGLEntry values of every type, but not variants with different:
+ * - buffer.hasDynamicOffset
+ * - texture.sampleType
+ * - texture.viewDimension
+ * - storageTexture.viewDimension
+ */
+export function allBindingEntries(
+ includeUndefined: boolean,
+ storageTextureFormat: GPUTextureFormat = 'rgba8unorm'
+): readonly BGLEntry[] {
+ return [
+ ...bufferBindingEntries(includeUndefined),
+ ...samplerBindingEntries(includeUndefined),
+ ...sampledAndStorageBindingEntries(includeUndefined, storageTextureFormat),
+ ] as const;
+}
+
+// Shader stages
+
+/** List of all GPUShaderStage values. */
+export type ShaderStageKey = keyof typeof GPUConst.ShaderStage;
+export const kShaderStageKeys = Object.keys(GPUConst.ShaderStage) as ShaderStageKey[];
+export const kShaderStages: readonly GPUShaderStageFlags[] = [
+ GPUConst.ShaderStage.VERTEX,
+ GPUConst.ShaderStage.FRAGMENT,
+ GPUConst.ShaderStage.COMPUTE,
+];
+/** List of all possible combinations of GPUShaderStage values. */
+export const kShaderStageCombinations: readonly GPUShaderStageFlags[] = [0, 1, 2, 3, 4, 5, 6, 7];
+export const kShaderStageCombinationsWithStage: readonly GPUShaderStageFlags[] = [
+ 1, 2, 3, 4, 5, 6, 7,
+];
+
+/**
+ * List of all possible texture sampleCount values.
+ *
+ * MAINTENANCE_TODO: Switch existing tests to use kTextureSampleCounts
+ */
+export const kTextureSampleCounts = [1, 4] as const;
+
+// Sampler info
+
+/** List of all mipmap filter modes. */
+export const kMipmapFilterModes: readonly GPUMipmapFilterMode[] = ['nearest', 'linear'];
+assertTypeTrue<TypeEqual<GPUMipmapFilterMode, (typeof kMipmapFilterModes)[number]>>();
+
+/** List of address modes. */
+export const kAddressModes: readonly GPUAddressMode[] = [
+ 'clamp-to-edge',
+ 'repeat',
+ 'mirror-repeat',
+];
+assertTypeTrue<TypeEqual<GPUAddressMode, (typeof kAddressModes)[number]>>();
+
+// Blend factors and Blend components
+
+/** List of all GPUBlendFactor values. */
+export const kBlendFactors: readonly GPUBlendFactor[] = [
+ 'zero',
+ 'one',
+ 'src',
+ 'one-minus-src',
+ 'src-alpha',
+ 'one-minus-src-alpha',
+ 'dst',
+ 'one-minus-dst',
+ 'dst-alpha',
+ 'one-minus-dst-alpha',
+ 'src-alpha-saturated',
+ 'constant',
+ 'one-minus-constant',
+];
+
+/** List of all GPUBlendOperation values. */
+export const kBlendOperations: readonly GPUBlendOperation[] = [
+ 'add', //
+ 'subtract',
+ 'reverse-subtract',
+ 'min',
+ 'max',
+];
+
+// Primitive topologies
+export const kPrimitiveTopology: readonly GPUPrimitiveTopology[] = [
+ 'point-list',
+ 'line-list',
+ 'line-strip',
+ 'triangle-list',
+ 'triangle-strip',
+];
+assertTypeTrue<TypeEqual<GPUPrimitiveTopology, (typeof kPrimitiveTopology)[number]>>();
+
+export const kIndexFormat: readonly GPUIndexFormat[] = ['uint16', 'uint32'];
+assertTypeTrue<TypeEqual<GPUIndexFormat, (typeof kIndexFormat)[number]>>();
+
+/** Info for each entry of GPUSupportedLimits */
+const [kLimitInfoKeys, kLimitInfoDefaults, kLimitInfoData] =
+ /* prettier-ignore */ [
+ [ 'class', 'core', 'compatibility', 'maximumValue'] as const,
+ [ 'maximum', , , kMaxUnsignedLongValue] as const, {
+ 'maxTextureDimension1D': [ , 8192, 4096, ],
+ 'maxTextureDimension2D': [ , 8192, 4096, ],
+ 'maxTextureDimension3D': [ , 2048, 1024, ],
+ 'maxTextureArrayLayers': [ , 256, 256, ],
+
+ 'maxBindGroups': [ , 4, 4, ],
+ 'maxBindGroupsPlusVertexBuffers': [ , 24, 24, ],
+ 'maxBindingsPerBindGroup': [ , 1000, 1000, ],
+ 'maxDynamicUniformBuffersPerPipelineLayout': [ , 8, 8, ],
+ 'maxDynamicStorageBuffersPerPipelineLayout': [ , 4, 4, ],
+ 'maxSampledTexturesPerShaderStage': [ , 16, 16, ],
+ 'maxSamplersPerShaderStage': [ , 16, 16, ],
+ 'maxStorageBuffersPerShaderStage': [ , 8, 4, ],
+ 'maxStorageTexturesPerShaderStage': [ , 4, 4, ],
+ 'maxUniformBuffersPerShaderStage': [ , 12, 12, ],
+
+ 'maxUniformBufferBindingSize': [ , 65536, 16384, kMaxUnsignedLongLongValue],
+ 'maxStorageBufferBindingSize': [ , 134217728, 134217728, kMaxUnsignedLongLongValue],
+ 'minUniformBufferOffsetAlignment': ['alignment', 256, 256, ],
+ 'minStorageBufferOffsetAlignment': ['alignment', 256, 256, ],
+
+ 'maxVertexBuffers': [ , 8, 8, ],
+ 'maxBufferSize': [ , 268435456, 268435456, kMaxUnsignedLongLongValue],
+ 'maxVertexAttributes': [ , 16, 16, ],
+ 'maxVertexBufferArrayStride': [ , 2048, 2048, ],
+ 'maxInterStageShaderComponents': [ , 60, 60, ],
+ 'maxInterStageShaderVariables': [ , 16, 16, ],
+
+ 'maxColorAttachments': [ , 8, 4, ],
+ 'maxColorAttachmentBytesPerSample': [ , 32, 32, ],
+
+ 'maxComputeWorkgroupStorageSize': [ , 16384, 16384, ],
+ 'maxComputeInvocationsPerWorkgroup': [ , 256, 128, ],
+ 'maxComputeWorkgroupSizeX': [ , 256, 128, ],
+ 'maxComputeWorkgroupSizeY': [ , 256, 128, ],
+ 'maxComputeWorkgroupSizeZ': [ , 64, 64, ],
+ 'maxComputeWorkgroupsPerDimension': [ , 65535, 65535, ],
+} as const];
+
+/**
+ * Feature levels corresponding to core WebGPU and WebGPU
+ * in compatibility mode. They can be passed to
+ * getDefaultLimits though if you have access to an adapter
+ * it's preferred to use getDefaultLimitsForAdapter.
+ */
+export const kFeatureLevels = ['core', 'compatibility'] as const;
+export type FeatureLevel = (typeof kFeatureLevels)[number];
+
+const kLimitKeys = ['class', 'default', 'maximumValue'] as const;
+
+const kLimitInfoCore = makeTableRenameAndFilter(
+ { default: 'core' },
+ kLimitKeys,
+ kLimitInfoKeys,
+ kLimitInfoDefaults,
+ kLimitInfoData
+);
+
+const kLimitInfoCompatibility = makeTableRenameAndFilter(
+ { default: 'compatibility' },
+ kLimitKeys,
+ kLimitInfoKeys,
+ kLimitInfoDefaults,
+ kLimitInfoData
+);
+
+const kLimitInfos = {
+ core: kLimitInfoCore,
+ compatibility: kLimitInfoCompatibility,
+} as const;
+
+export const kLimitClasses = Object.fromEntries(
+ Object.entries(kLimitInfoCore).map(([k, { class: c }]) => [k, c])
+);
+
+export function getDefaultLimits(featureLevel: FeatureLevel) {
+ return kLimitInfos[featureLevel];
+}
+
+export function getDefaultLimitsForAdapter(adapter: GPUAdapter) {
+ // MAINTENANCE_TODO: Remove casts when GPUAdapter IDL has isCompatibilityMode.
+ return getDefaultLimits(
+ (adapter as unknown as { isCompatibilityMode: boolean }).isCompatibilityMode
+ ? 'compatibility'
+ : 'core'
+ );
+}
+
+/** List of all entries of GPUSupportedLimits. */
+export const kLimits = keysOf(kLimitInfoCore);
+
+/**
+ * The number of color attachments to test.
+ * The CTS needs to generate a consistent list of tests.
+ * We can't use any default limits since they different from core to compat mode
+ * So, tests should use this value and filter out any values that are out of
+ * range for the current device.
+ *
+ * The test in maxColorAttachments.spec.ts tests that kMaxColorAttachmentsToTest
+ * is large enough to cover all devices tested.
+ */
+export const kMaxColorAttachmentsToTest = 32;
+
+/** The size of indirect draw parameters in the indirectBuffer of drawIndirect */
+export const kDrawIndirectParametersSize = 4;
+/** The size of indirect drawIndexed parameters in the indirectBuffer of drawIndexedIndirect */
+export const kDrawIndexedIndirectParametersSize = 5;
+
+/** Per-GPUFeatureName info. */
+export const kFeatureNameInfo: {
+ readonly [k in GPUFeatureName]: {};
+} =
+ /* prettier-ignore */ {
+ 'bgra8unorm-storage': {},
+ 'depth-clip-control': {},
+ 'depth32float-stencil8': {},
+ 'texture-compression-bc': {},
+ 'texture-compression-etc2': {},
+ 'texture-compression-astc': {},
+ 'timestamp-query': {},
+ 'indirect-first-instance': {},
+ 'shader-f16': {},
+ 'rg11b10ufloat-renderable': {},
+ 'float32-filterable': {},
+};
+/** List of all GPUFeatureName values. */
+export const kFeatureNames = keysOf(kFeatureNameInfo);
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/compat/README.md b/dom/webgpu/tests/cts/checkout/src/webgpu/compat/README.md
new file mode 100644
index 0000000000..3f7139ce39
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/compat/README.md
@@ -0,0 +1,3 @@
+# Compatibility mode tests
+
+To run these tests in compatibility mode use `q=webgpu:compat,*&compatibility=1`.
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/encoding/cmds/copyTextureToBuffer.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/encoding/cmds/copyTextureToBuffer.spec.ts
new file mode 100644
index 0000000000..a9af7795b3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/encoding/cmds/copyTextureToBuffer.spec.ts
@@ -0,0 +1,44 @@
+export const description = `
+Tests limitations of copyTextureToBuffer in compat mode.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { kCompressedTextureFormats, kTextureFormatInfo } from '../../../../../format_info.js';
+import { align } from '../../../../../util/math.js';
+import { CompatibilityTest } from '../../../../compatibility_test.js';
+
+export const g = makeTestGroup(CompatibilityTest);
+
+g.test('compressed')
+ .desc(`Tests that you can not call copyTextureToBuffer with compressed textures in compat mode.`)
+ .params(u => u.combine('format', kCompressedTextureFormats))
+ .beforeAllSubcases(t => {
+ const { format } = t.params;
+ t.selectDeviceOrSkipTestCase([kTextureFormatInfo[format].feature]);
+ })
+ .fn(t => {
+ const { format } = t.params;
+
+ const { blockWidth, blockHeight, bytesPerBlock } = kTextureFormatInfo[format];
+
+ const texture = t.device.createTexture({
+ size: [blockWidth, blockHeight, 1],
+ format,
+ usage: GPUTextureUsage.COPY_SRC,
+ });
+ t.trackForCleanup(texture);
+
+ const bytesPerRow = align(bytesPerBlock, 256);
+
+ const buffer = t.device.createBuffer({
+ size: bytesPerRow,
+ usage: GPUBufferUsage.COPY_DST,
+ });
+ t.trackForCleanup(buffer);
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.copyTextureToBuffer({ texture }, { buffer, bytesPerRow }, [blockWidth, blockHeight, 1]);
+ t.expectGPUError('validation', () => {
+ encoder.finish();
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/encoding/programmable/pipeline_bind_group_compat.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/encoding/programmable/pipeline_bind_group_compat.spec.ts
new file mode 100644
index 0000000000..996e8b28e7
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/encoding/programmable/pipeline_bind_group_compat.spec.ts
@@ -0,0 +1,423 @@
+export const description = `
+Tests limitations of bind group usage in a pipeline in compat mode.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../../../common/util/data_tables.js';
+import { kRenderEncodeTypes } from '../../../../../util/command_buffer_maker.js';
+import { CompatibilityTest } from '../../../../compatibility_test.js';
+
+const kTextureTypes = ['regular', 'storage'];
+type TextureType = (typeof kTextureTypes)[number];
+
+function getTextureTypeWGSL(textureType: TextureType) {
+ return textureType === 'storage' ? 'texture_storage_2d<rgba8unorm, write>' : 'texture_2d<f32>';
+}
+
+type kBindConfigs = ['one bindgroup', 'two bindgroups'];
+type BindConfig = kBindConfigs[number];
+
+/**
+ * Gets the WGSL needed for testing a render pipeline using texture_2d or texture_storage_2d
+ * and either 2 bindgroups or 1
+ */
+function getRenderShaderModule(
+ device: GPUDevice,
+ textureType: TextureType,
+ bindConfig: BindConfig
+) {
+ const textureTypeWGSL = getTextureTypeWGSL(textureType);
+ const secondGroup = bindConfig === 'one bindgroup' ? 0 : 1;
+ const secondBinding = secondGroup === 0 ? 1 : 0;
+ return device.createShaderModule({
+ code: `
+ @vertex
+ fn vs(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4f {
+ var pos = array(
+ vec4f(-1, 3, 0, 1),
+ vec4f( 3, -1, 0, 1),
+ vec4f(-1, -1, 0, 1));
+ return pos[VertexIndex];
+ }
+
+ @group(0) @binding(0) var tex0 : ${textureTypeWGSL};
+ @group(${secondGroup}) @binding(${secondBinding}) var tex1 : ${textureTypeWGSL};
+
+ @fragment
+ fn fs(@builtin(position) pos: vec4f) -> @location(0) vec4f {
+ _ = tex0;
+ _ = tex1;
+ return vec4f(0);
+ }
+ `,
+ });
+}
+
+/**
+ * Gets the WGSL needed for testing a compute pipeline using texture_2d or texture_storage_2d
+ * and either 2 bindgroups or 1
+ */
+function getComputeShaderModule(
+ device: GPUDevice,
+ textureType: TextureType,
+ bindConfig: BindConfig
+) {
+ const textureTypeWGSL = getTextureTypeWGSL(textureType);
+ const secondGroup = bindConfig === 'one bindgroup' ? 0 : 1;
+ const secondBinding = secondGroup === 0 ? 1 : 0;
+ return device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var tex0 : ${textureTypeWGSL};
+ @group(${secondGroup}) @binding(${secondBinding}) var tex1 : ${textureTypeWGSL};
+
+ @compute @workgroup_size(1)
+ fn cs() {
+ _ = tex0;
+ _ = tex1;
+ }
+ `,
+ });
+}
+
+type GPUEncoderType = GPURenderPassEncoder | GPUComputePassEncoder | GPURenderBundleEncoder;
+
+const kBindCases: {
+ [key: string]: {
+ bindConfig: BindConfig;
+ fn: (
+ device: GPUDevice,
+ pipeline: GPUPipelineBase,
+ encoder: GPUEncoderType,
+ texture: GPUTexture
+ ) => {
+ shouldSucceed: boolean;
+ };
+ };
+} = {
+ 'incompatible views in the same bindGroup': {
+ bindConfig: 'one bindgroup',
+ fn(device: GPUDevice, pipeline: GPUPipelineBase, encoder: GPUEncoderType, texture: GPUTexture) {
+ const bindGroup = device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: texture.createView({ baseMipLevel: 0, mipLevelCount: 1 }) },
+ { binding: 1, resource: texture.createView({ baseMipLevel: 1, mipLevelCount: 1 }) },
+ ],
+ });
+ encoder.setBindGroup(0, bindGroup);
+ return { shouldSucceed: false };
+ },
+ },
+ 'incompatible views in different bindGroups': {
+ bindConfig: 'two bindgroups',
+ fn(device: GPUDevice, pipeline: GPUPipelineBase, encoder: GPUEncoderType, texture: GPUTexture) {
+ const bindGroup0 = device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: texture.createView({ baseMipLevel: 0, mipLevelCount: 1 }) },
+ ],
+ });
+ const bindGroup1 = device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(1),
+ entries: [
+ { binding: 0, resource: texture.createView({ baseMipLevel: 1, mipLevelCount: 1 }) },
+ ],
+ });
+ encoder.setBindGroup(0, bindGroup0);
+ encoder.setBindGroup(1, bindGroup1);
+ return { shouldSucceed: false };
+ },
+ },
+ 'can bind same view in different bindGroups': {
+ bindConfig: 'two bindgroups',
+ fn(device: GPUDevice, pipeline: GPUPipelineBase, encoder: GPUEncoderType, texture: GPUTexture) {
+ const bindGroup0 = device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: texture.createView({ baseMipLevel: 1, mipLevelCount: 1 }) },
+ ],
+ });
+ const bindGroup1 = device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(1),
+ entries: [
+ { binding: 0, resource: texture.createView({ baseMipLevel: 1, mipLevelCount: 1 }) },
+ ],
+ });
+ encoder.setBindGroup(0, bindGroup0);
+ encoder.setBindGroup(1, bindGroup1);
+ return { shouldSucceed: true };
+ },
+ },
+ 'binding incompatible bindGroups then fix': {
+ bindConfig: 'one bindgroup',
+ fn(device: GPUDevice, pipeline: GPUPipelineBase, encoder: GPUEncoderType, texture: GPUTexture) {
+ const badBindGroup = device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: texture.createView({ baseMipLevel: 0, mipLevelCount: 1 }) },
+ { binding: 1, resource: texture.createView({ baseMipLevel: 1, mipLevelCount: 1 }) },
+ ],
+ });
+ const goodBindGroup = device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: texture.createView({ baseMipLevel: 1, mipLevelCount: 1 }) },
+ { binding: 1, resource: texture.createView({ baseMipLevel: 1, mipLevelCount: 1 }) },
+ ],
+ });
+ encoder.setBindGroup(0, badBindGroup);
+ encoder.setBindGroup(0, goodBindGroup);
+ return { shouldSucceed: true };
+ },
+ },
+};
+
+function createAndBindTwoBindGroupsWithDifferentViewsOfSameTexture(
+ device: GPUDevice,
+ pipeline: GPUPipelineBase,
+ encoder: GPUEncoderType,
+ texture: GPUTexture
+) {
+ const bindGroup0 = device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: texture.createView({ baseMipLevel: 0, mipLevelCount: 1 }) }],
+ });
+ const bindGroup1 = device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(1),
+ entries: [{ binding: 0, resource: texture.createView({ baseMipLevel: 1, mipLevelCount: 1 }) }],
+ });
+ encoder.setBindGroup(0, bindGroup0);
+ encoder.setBindGroup(1, bindGroup1);
+}
+
+const kBindCaseNames = keysOf(kBindCases);
+
+const kDrawUseCases: {
+ [key: string]: (t: CompatibilityTest, encoder: GPURenderCommandsMixin) => void;
+} = {
+ draw: (_t: CompatibilityTest, encoder: GPURenderCommandsMixin) => {
+ encoder.draw(3);
+ },
+ drawIndexed: (t: CompatibilityTest, encoder: GPURenderCommandsMixin) => {
+ const indexBuffer = t.makeBufferWithContents(new Uint16Array([0, 1, 2]), GPUBufferUsage.INDEX);
+ encoder.setIndexBuffer(indexBuffer, 'uint16');
+ encoder.drawIndexed(3);
+ },
+ drawIndirect(t: CompatibilityTest, encoder: GPURenderCommandsMixin) {
+ const indirectBuffer = t.makeBufferWithContents(
+ new Uint32Array([3, 1, 0, 0]),
+ GPUBufferUsage.INDIRECT
+ );
+ encoder.drawIndirect(indirectBuffer, 0);
+ },
+ drawIndexedIndirect(t: CompatibilityTest, encoder: GPURenderCommandsMixin) {
+ const indexBuffer = t.makeBufferWithContents(new Uint16Array([0, 1, 2]), GPUBufferUsage.INDEX);
+ encoder.setIndexBuffer(indexBuffer, 'uint16');
+ const indirectBuffer = t.makeBufferWithContents(
+ new Uint32Array([3, 1, 0, 0, 0]),
+ GPUBufferUsage.INDIRECT
+ );
+ encoder.drawIndexedIndirect(indirectBuffer, 0);
+ },
+};
+const kDrawCaseNames = keysOf(kDrawUseCases);
+
+const kDispatchUseCases: {
+ [key: string]: (t: CompatibilityTest, encoder: GPUComputePassEncoder) => void;
+} = {
+ dispatchWorkgroups(_t: CompatibilityTest, encoder: GPUComputePassEncoder) {
+ encoder.dispatchWorkgroups(1);
+ },
+ dispatchWorkgroupsIndirect(t: CompatibilityTest, encoder: GPUComputePassEncoder) {
+ const indirectBuffer = t.makeBufferWithContents(
+ new Uint32Array([1, 1, 1]),
+ GPUBufferUsage.INDIRECT
+ );
+ encoder.dispatchWorkgroupsIndirect(indirectBuffer, 0);
+ },
+};
+const kDispatchCaseNames = keysOf(kDispatchUseCases);
+
+function createResourcesForRenderPassTest(
+ t: CompatibilityTest,
+ textureType: TextureType,
+ bindConfig: BindConfig
+) {
+ const texture = t.device.createTexture({
+ size: [2, 1, 1],
+ mipLevelCount: 2,
+ format: 'rgba8unorm',
+ usage:
+ textureType === 'storage' ? GPUTextureUsage.STORAGE_BINDING : GPUTextureUsage.TEXTURE_BINDING,
+ });
+ t.trackForCleanup(texture);
+
+ const module = getRenderShaderModule(t.device, textureType, bindConfig);
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vs',
+ },
+ fragment: {
+ module,
+ entryPoint: 'fs',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ });
+
+ return { texture, pipeline };
+}
+
+function createResourcesForComputePassTest(
+ t: CompatibilityTest,
+ textureType: TextureType,
+ bindConfig: BindConfig
+) {
+ const texture = t.device.createTexture({
+ size: [2, 1, 1],
+ mipLevelCount: 2,
+ format: 'rgba8unorm',
+ usage:
+ textureType === 'storage' ? GPUTextureUsage.STORAGE_BINDING : GPUTextureUsage.TEXTURE_BINDING,
+ });
+ t.trackForCleanup(texture);
+
+ const module = getComputeShaderModule(t.device, textureType, bindConfig);
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module,
+ entryPoint: 'cs',
+ },
+ });
+
+ return { texture, pipeline };
+}
+
+export const g = makeTestGroup(CompatibilityTest);
+
+g.test('twoDifferentTextureViews,render_pass,used')
+ .desc(
+ `
+Tests that you can not use 2 different views of the same texture in a render pass in compat mode.
+
+- Test you can not use incompatible views in the same bindGroup
+- Test you can not use incompatible views in different bindGroups
+- Test you can bind the same view in different bindGroups
+- Test binding incompatible bindGroups is ok as long as they are fixed before draw/dispatch
+
+ The last test is to check validation happens at the correct time (draw/dispatch) and not
+ at setBindGroup.
+ `
+ )
+ .params(u =>
+ u
+ .combine('encoderType', kRenderEncodeTypes)
+ .combine('bindCase', kBindCaseNames)
+ .combine('useCase', kDrawCaseNames)
+ .combine('textureType', kTextureTypes)
+ .filter(
+ // storage textures can't have 2 bind groups point to the same
+ // view even in non-compat. They can have different views in
+ // non-compat but not compat.
+ p =>
+ !(
+ p.textureType === 'storage' &&
+ (p.bindCase === 'can bind same view in different bindGroups' ||
+ p.bindCase === 'binding incompatible bindGroups then fix')
+ )
+ )
+ )
+ .fn(t => {
+ const { encoderType, bindCase, useCase, textureType } = t.params;
+ const { bindConfig, fn } = kBindCases[bindCase];
+ const { texture, pipeline } = createResourcesForRenderPassTest(t, textureType, bindConfig);
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ encoder.setPipeline(pipeline);
+ const { shouldSucceed } = fn(t.device, pipeline, encoder, texture);
+ kDrawUseCases[useCase](t, encoder as GPURenderCommandsMixin);
+ validateFinish(shouldSucceed);
+ });
+
+g.test('twoDifferentTextureViews,render_pass,unused')
+ .desc(
+ `
+Tests that binding 2 different views of the same texture but not using them does not generate a validation error.
+ `
+ )
+ .params(u => u.combine('encoderType', kRenderEncodeTypes).combine('textureType', kTextureTypes))
+ .fn(t => {
+ const { encoderType, textureType } = t.params;
+ const { texture, pipeline } = createResourcesForRenderPassTest(
+ t,
+ textureType,
+ 'two bindgroups'
+ );
+ const { encoder, validateFinish } = t.createEncoder(encoderType);
+ encoder.setPipeline(pipeline);
+ createAndBindTwoBindGroupsWithDifferentViewsOfSameTexture(t.device, pipeline, encoder, texture);
+ validateFinish(true);
+ });
+
+g.test('twoDifferentTextureViews,compute_pass,used')
+ .desc(
+ `
+Tests that you can not use 2 different views of the same texture in a compute pass in compat mode.
+
+- Test you can not use incompatible views in the same bindGroup
+- Test you can not use incompatible views in different bindGroups
+- Test can bind the same view in different bindGroups
+- Test that binding incompatible bindGroups is ok as long as they are fixed before draw/dispatch
+
+ The last test is to check validation happens at the correct time (draw/dispatch) and not
+ at setBindGroup.
+ `
+ )
+ .params(u =>
+ u
+ .combine('bindCase', kBindCaseNames)
+ .combine('useCase', kDispatchCaseNames)
+ .combine('textureType', kTextureTypes)
+ .filter(
+ // storage textures can't have 2 bind groups point to the same
+ // view even in non-compat. They can have different views in
+ // non-compat but not compat.
+ p =>
+ !(
+ p.textureType === 'storage' &&
+ (p.bindCase === 'can bind same view in different bindGroups' ||
+ p.bindCase === 'binding incompatible bindGroups then fix')
+ )
+ )
+ )
+ .fn(t => {
+ const { bindCase, useCase, textureType } = t.params;
+ const { bindConfig, fn } = kBindCases[bindCase];
+ const { texture, pipeline } = createResourcesForComputePassTest(t, textureType, bindConfig);
+ const { encoder, validateFinish } = t.createEncoder('compute pass');
+ encoder.setPipeline(pipeline);
+ const { shouldSucceed } = fn(t.device, pipeline, encoder, texture);
+ kDispatchUseCases[useCase](t, encoder);
+ validateFinish(shouldSucceed);
+ });
+
+g.test('twoDifferentTextureViews,compute_pass,unused')
+ .desc(
+ `
+Tests that binding 2 different views of the same texture but not using them does not generate a validation error.
+ `
+ )
+ .params(u => u.combine('textureType', kTextureTypes))
+ .fn(t => {
+ const { textureType } = t.params;
+ const { texture, pipeline } = createResourcesForComputePassTest(
+ t,
+ textureType,
+ 'two bindgroups'
+ );
+ const { encoder, validateFinish } = t.createEncoder('compute pass');
+ encoder.setPipeline(pipeline);
+ createAndBindTwoBindGroupsWithDifferentViewsOfSameTexture(t.device, pipeline, encoder, texture);
+ validateFinish(true);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/render_pipeline/fragment_state.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/render_pipeline/fragment_state.spec.ts
new file mode 100644
index 0000000000..6536ec9d01
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/render_pipeline/fragment_state.spec.ts
@@ -0,0 +1,128 @@
+export const description = `
+Tests that you can not create a render pipeline with different per target blend state or write mask in compat mode.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../../common/util/data_tables.js';
+import { CompatibilityTest } from '../../../compatibility_test.js';
+
+export const g = makeTestGroup(CompatibilityTest);
+
+type ThreeColorTargets = [GPUColorTargetState, GPUColorTargetState | null, GPUColorTargetState];
+
+const cases = {
+ default(_targets: ThreeColorTargets) {
+ return true;
+ },
+ noBlendTarget0(targets: ThreeColorTargets) {
+ delete targets[0].blend;
+ return false;
+ },
+ noBlendTarget1(targets: ThreeColorTargets) {
+ delete targets[2].blend;
+ return false;
+ },
+ colorOperation(targets: ThreeColorTargets) {
+ targets[2].blend!.color.operation = 'subtract';
+ return false;
+ },
+ colorSrcFactor(targets: ThreeColorTargets) {
+ targets[2].blend!.color.srcFactor = 'src-alpha';
+ return false;
+ },
+ colorDstFactor(targets: ThreeColorTargets) {
+ targets[2].blend!.color.dstFactor = 'dst-alpha';
+ return false;
+ },
+ alphaOperation(targets: ThreeColorTargets) {
+ targets[2].blend!.alpha.operation = 'subtract';
+ return false;
+ },
+ alphaSrcFactor(targets: ThreeColorTargets) {
+ targets[2].blend!.alpha.srcFactor = 'src-alpha';
+ return false;
+ },
+ alphaDstFactor(targets: ThreeColorTargets) {
+ targets[2].blend!.alpha.dstFactor = 'dst-alpha';
+ return false;
+ },
+ writeMask(targets: ThreeColorTargets) {
+ targets[2].writeMask = GPUColorWrite.GREEN;
+ return false;
+ },
+};
+const caseNames = keysOf(cases);
+
+g.test('colorState')
+ .desc(
+ `
+Tests that you can not create a render pipeline with different per target blend state or write mask in compat mode.
+
+- Test no blend state vs some blend state
+- Test different operation, srcFactor, dstFactor for color and alpha
+- Test different writeMask
+ `
+ )
+ .params(u => u.combine('caseName', caseNames))
+ .fn(t => {
+ const { caseName } = t.params;
+
+ const module = t.device.createShaderModule({
+ code: `
+ @vertex fn vs() -> @builtin(position) vec4f {
+ return vec4f(0);
+ }
+
+ struct FragmentOut {
+ @location(0) fragColor0 : vec4f,
+ @location(1) fragColor1 : vec4f,
+ @location(2) fragColor2 : vec4f,
+ }
+
+ @fragment fn fs() -> FragmentOut {
+ var output : FragmentOut;
+ output.fragColor0 = vec4f(0);
+ output.fragColor1 = vec4f(0);
+ output.fragColor2 = vec4f(0);
+ return output;
+ }
+ `,
+ });
+
+ const targets: ThreeColorTargets = [
+ {
+ format: 'rgba8unorm',
+ blend: {
+ color: {},
+ alpha: {},
+ },
+ },
+ null,
+ {
+ format: 'rgba8unorm',
+ blend: {
+ color: {},
+ alpha: {},
+ },
+ },
+ ];
+
+ const pipelineDescriptor: GPURenderPipelineDescriptor = {
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vs',
+ },
+ fragment: {
+ module,
+ entryPoint: 'fs',
+ targets,
+ },
+ };
+ const isValid = cases[caseName](targets);
+ t.expectGPUError(
+ 'validation',
+ () => t.device.createRenderPipeline(pipelineDescriptor),
+ !isValid
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/render_pipeline/shader_module.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/render_pipeline/shader_module.spec.ts
new file mode 100644
index 0000000000..abe2b063e7
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/render_pipeline/shader_module.spec.ts
@@ -0,0 +1,74 @@
+export const description = `
+Tests limitations of createRenderPipeline related to shader modules in compat mode.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { CompatibilityTest } from '../../../compatibility_test.js';
+
+export const g = makeTestGroup(CompatibilityTest);
+
+g.test('sample_mask')
+ .desc(
+ `
+Tests that you can not create a render pipeline with a shader module that uses sample_mask in compat mode.
+
+- Test that a pipeline with a shader that uses sample_mask fails.
+- Test that a pipeline that references a module that has a shader that uses sample_mask
+ but the pipeline does not reference that shader succeeds.
+ `
+ )
+ .params(u =>
+ u.combine('entryPoint', ['fsWithoutSampleMaskUsage', 'fsWithSampleMaskUsage'] as const)
+ )
+ .fn(t => {
+ const { entryPoint } = t.params;
+
+ const module = t.device.createShaderModule({
+ code: `
+ @vertex fn vs() -> @builtin(position) vec4f {
+ return vec4f(1);
+ }
+ struct Output {
+ @builtin(sample_mask) mask_out: u32,
+ @location(0) color : vec4f,
+ }
+ @fragment fn fsWithoutSampleMaskUsage() -> @location(0) vec4f {
+ return vec4f(1.0, 1.0, 1.0, 1.0);
+ }
+ @fragment fn fsWithSampleMaskUsage() -> Output {
+ var o: Output;
+ // We need to make sure this sample_mask isn't optimized out even if its value equals "no op".
+ o.mask_out = 0xFFFFFFFFu;
+ o.color = vec4f(1.0, 1.0, 1.0, 1.0);
+ return o;
+ }
+ `,
+ });
+
+ const pipelineDescriptor: GPURenderPipelineDescriptor = {
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vs',
+ },
+ fragment: {
+ module,
+ entryPoint,
+ targets: [
+ {
+ format: 'rgba8unorm',
+ },
+ ],
+ },
+ multisample: {
+ count: 4,
+ },
+ };
+
+ const isValid = entryPoint === 'fsWithoutSampleMaskUsage';
+ t.expectGPUError(
+ 'validation',
+ () => t.device.createRenderPipeline(pipelineDescriptor),
+ !isValid
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/render_pipeline/vertex_state.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/render_pipeline/vertex_state.spec.ts
new file mode 100644
index 0000000000..ef72c50ce9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/render_pipeline/vertex_state.spec.ts
@@ -0,0 +1,91 @@
+export const description = `
+Tests limitations of createRenderPipeline related to vertex state in compat mode.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { range } from '../../../../../common/util/util.js';
+import { CompatibilityTest } from '../../../compatibility_test.js';
+
+export const g = makeTestGroup(CompatibilityTest);
+
+g.test('maxVertexAttributesVertexIndexInstanceIndex')
+ .desc(
+ `
+Tests @builtin(vertex_index) and @builtin(instance_index) each count as an attribute.
+
+- Test that you can use maxVertexAttributes
+- Test that you can not use maxVertexAttributes and @builtin(vertex_index)
+- Test that you can not use maxVertexAttributes and @builtin(instance_index)
+- Test that you can use maxVertexAttributes - 1 and @builtin(vertex_index)
+- Test that you can use maxVertexAttributes - 1 and @builtin(instance_index)
+- Test that you can not use maxVertexAttributes - 1 and both @builtin(vertex_index) and @builtin(instance_index)
+- Test that you can use maxVertexAttributes - 2 and both @builtin(vertex_index) and @builtin(instance_index)
+ `
+ )
+ .params(u =>
+ u
+ .combine('useVertexIndex', [false, true] as const)
+ .combine('useInstanceIndex', [false, true] as const)
+ .combine('numAttribsToReserve', [0, 1, 2] as const)
+ .combine('isAsync', [false, true] as const)
+ )
+ .fn(t => {
+ const { useVertexIndex, useInstanceIndex, numAttribsToReserve, isAsync } = t.params;
+ const numAttribs = t.device.limits.maxVertexAttributes - numAttribsToReserve;
+
+ const numBuiltinsUsed = (useVertexIndex ? 1 : 0) + (useInstanceIndex ? 1 : 0);
+ const isValid = numAttribs + numBuiltinsUsed <= t.device.limits.maxVertexAttributes;
+
+ const inputs = range(numAttribs, i => `@location(${i}) v${i}: vec4f`);
+ const outputs = range(numAttribs, i => `v${i}`);
+
+ if (useVertexIndex) {
+ inputs.push('@builtin(vertex_index) vNdx: u32');
+ outputs.push('vec4f(f32(vNdx))');
+ }
+
+ if (useInstanceIndex) {
+ inputs.push('@builtin(instance_index) iNdx: u32');
+ outputs.push('vec4f(f32(iNdx))');
+ }
+
+ const module = t.device.createShaderModule({
+ code: `
+ @fragment fn fs() -> @location(0) vec4f {
+ return vec4f(1);
+ }
+ @vertex fn vs(${inputs.join(', ')}) -> @builtin(position) vec4f {
+ return ${outputs.join(' + ')};
+ }
+ `,
+ });
+
+ const pipelineDescriptor: GPURenderPipelineDescriptor = {
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vs',
+ buffers: [
+ {
+ arrayStride: 16,
+ attributes: range(numAttribs, i => ({
+ shaderLocation: i,
+ format: 'float32x4',
+ offset: 0,
+ })),
+ },
+ ],
+ },
+ fragment: {
+ module,
+ entryPoint: 'fs',
+ targets: [
+ {
+ format: 'rgba8unorm',
+ },
+ ],
+ },
+ };
+
+ t.doCreateRenderPipelineTest(isAsync, isValid, pipelineDescriptor);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/texture/createTexture.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/texture/createTexture.spec.ts
new file mode 100644
index 0000000000..9f0d353268
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/texture/createTexture.spec.ts
@@ -0,0 +1,41 @@
+export const description = `
+Tests that you can not use bgra8unorm-srgb in compat mode.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { CompatibilityTest } from '../../../compatibility_test.js';
+
+export const g = makeTestGroup(CompatibilityTest);
+
+g.test('unsupportedTextureFormats')
+ .desc(`Tests that you can not create a bgra8unorm-srgb texture in compat mode.`)
+ .fn(t => {
+ t.expectGPUError(
+ 'validation',
+ () =>
+ t.device.createTexture({
+ size: [1, 1, 1],
+ format: 'bgra8unorm-srgb',
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ }),
+ true
+ );
+ });
+
+g.test('unsupportedTextureViewFormats')
+ .desc(
+ `Tests that you can not create a bgra8unorm texture with a bgra8unorm-srgb viewFormat in compat mode.`
+ )
+ .fn(t => {
+ t.expectGPUError(
+ 'validation',
+ () =>
+ t.device.createTexture({
+ size: [1, 1, 1],
+ format: 'bgra8unorm',
+ viewFormats: ['bgra8unorm-srgb'],
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ }),
+ true
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/texture/cubeArray.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/texture/cubeArray.spec.ts
new file mode 100644
index 0000000000..1449dece13
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/compat/api/validation/texture/cubeArray.spec.ts
@@ -0,0 +1,26 @@
+export const description = `
+Tests that you can not create cube array views in compat mode.
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { CompatibilityTest } from '../../../compatibility_test.js';
+
+export const g = makeTestGroup(CompatibilityTest);
+g.test('cube_array')
+ .desc('Test you cannot create a cube array texture view.')
+ .params(u => u.combine('dimension', ['cube', 'cube-array'] as const))
+ .fn(t => {
+ const { dimension } = t.params;
+ const texture = t.device.createTexture({
+ size: [1, 1, 6],
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ });
+
+ const isValid = dimension === 'cube';
+ t.expectGPUError(
+ 'validation',
+ () => texture.createView({ dimension, format: 'rgba8unorm' }),
+ !isValid
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/compat/compatibility_test.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/compat/compatibility_test.ts
new file mode 100644
index 0000000000..bdd44b6374
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/compat/compatibility_test.ts
@@ -0,0 +1,10 @@
+import { ValidationTest } from '../api/validation/validation_test.js';
+
+export class CompatibilityTest extends ValidationTest {
+ override async init() {
+ await super.init();
+ if (!this.isCompatibility) {
+ this.skip('compatibility tests do not work on non-compatibility mode');
+ }
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/constants.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/constants.ts
new file mode 100644
index 0000000000..c7a28cb837
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/constants.ts
@@ -0,0 +1,62 @@
+// Note: Types ensure every field is specified.
+
+/* eslint-disable-next-line @typescript-eslint/no-unused-vars */
+function checkType<T>(_: T) {}
+
+const BufferUsage = {
+ MAP_READ: 0x0001,
+ MAP_WRITE: 0x0002,
+ COPY_SRC: 0x0004,
+ COPY_DST: 0x0008,
+ INDEX: 0x0010,
+ VERTEX: 0x0020,
+ UNIFORM: 0x0040,
+ STORAGE: 0x0080,
+ INDIRECT: 0x0100,
+ QUERY_RESOLVE: 0x0200,
+} as const;
+checkType<Omit<GPUBufferUsage, '__brand'>>(BufferUsage);
+
+const TextureUsage = {
+ COPY_SRC: 0x01,
+ COPY_DST: 0x02,
+ TEXTURE_BINDING: 0x04,
+ SAMPLED: 0x04,
+ STORAGE_BINDING: 0x08,
+ STORAGE: 0x08,
+ RENDER_ATTACHMENT: 0x10,
+} as const;
+checkType<Omit<GPUTextureUsage, '__brand'>>(TextureUsage);
+
+const ColorWrite = {
+ RED: 0x1,
+ GREEN: 0x2,
+ BLUE: 0x4,
+ ALPHA: 0x8,
+ ALL: 0xf,
+} as const;
+checkType<Omit<GPUColorWrite, '__brand'>>(ColorWrite);
+
+const ShaderStage = {
+ VERTEX: 0x1,
+ FRAGMENT: 0x2,
+ COMPUTE: 0x4,
+} as const;
+checkType<Omit<GPUShaderStage, '__brand'>>(ShaderStage);
+
+const MapMode = {
+ READ: 0x1,
+ WRITE: 0x2,
+} as const;
+checkType<Omit<GPUMapMode, '__brand'>>(MapMode);
+
+export const GPUConst = {
+ BufferUsage,
+ TextureUsage,
+ ColorWrite,
+ ShaderStage,
+ MapMode,
+} as const;
+
+export const kMaxUnsignedLongValue = 4294967295;
+export const kMaxUnsignedLongLongValue = Number.MAX_SAFE_INTEGER;
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/examples.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/examples.spec.ts
new file mode 100644
index 0000000000..29b26caa01
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/examples.spec.ts
@@ -0,0 +1,275 @@
+export const description = `
+Examples of writing CTS tests with various features.
+
+Start here when looking for examples of basic framework usage.
+`;
+
+import { makeTestGroup } from '../common/framework/test_group.js';
+
+import { GPUTest } from './gpu_test.js';
+
+// To run these tests in the standalone runner, run `npm start` then open:
+// - http://localhost:XXXX/standalone/?runnow=1&q=webgpu:examples:*
+// To run in WPT, copy/symlink the out-wpt/ directory as the webgpu/ directory in WPT, then open:
+// - (wpt server url)/webgpu/cts.https.html?q=webgpu:examples:
+//
+// Tests here can be run individually or in groups:
+// - ?q=webgpu:examples:basic,async:
+// - ?q=webgpu:examples:basic,async:*
+// - ?q=webgpu:examples:basic,*
+// - ?q=webgpu:examples:*
+
+export const g = makeTestGroup(GPUTest);
+
+// Note: spaces aren't allowed in test names; use underscores.
+g.test('test_name').fn(_t => {});
+
+g.test('not_implemented_yet,without_plan').unimplemented();
+g.test('not_implemented_yet,with_plan')
+ .desc(
+ `
+Plan for this test. What it tests. Summary of how it tests that functionality.
+- Description of cases, by describing parameters {a, b, c}
+- x= more parameters {x, y, z}
+`
+ )
+ .unimplemented();
+
+g.test('basic').fn(t => {
+ t.expect(true);
+ t.expect(true, 'true should be true');
+
+ t.shouldThrow(
+ // The expected '.name' of the thrown error.
+ 'TypeError',
+ // This function is run inline inside shouldThrow, and is expected to throw.
+ () => {
+ throw new TypeError();
+ },
+ // Log message.
+ { message: 'function should throw Error' }
+ );
+});
+
+g.test('basic,async').fn(t => {
+ // shouldReject must be awaited to ensure it can wait for the promise before the test ends.
+ t.shouldReject(
+ // The expected '.name' of the thrown error.
+ 'TypeError',
+ // Promise expected to reject.
+ Promise.reject(new TypeError()),
+ // Log message.
+ { message: 'Promise.reject should reject' }
+ );
+
+ // Promise can also be an IIFE (immediately-invoked function expression).
+ t.shouldReject(
+ 'TypeError',
+ // eslint-disable-next-line @typescript-eslint/require-await
+ (async () => {
+ throw new TypeError();
+ })(),
+ { message: 'Promise.reject should reject' }
+ );
+});
+
+g.test('basic,plain_cases')
+ .desc(
+ `
+A test can be parameterized with a simple array of objects using .paramsSimple([ ... ]).
+Each such instance of the test is a "case".
+
+In this example, the following cases are generated (identified by their "query string"),
+each with just one subcase:
+ - webgpu:examples:basic,cases:x=2;y=2 runs 1 subcase, with t.params set to:
+ - { x: 2, y: 2 }
+ - webgpu:examples:basic,cases:x=-10;y=-10 runs 1 subcase, with t.params set to:
+ - { x: -10, y: -10 }
+ `
+ )
+ .paramsSimple([
+ { x: 2, y: 2 }, //
+ { x: -10, y: -10 },
+ ])
+ .fn(t => {
+ t.expect(t.params.x === t.params.y);
+ });
+
+g.test('basic,plain_cases_private')
+ .desc(
+ `
+Parameters can be public ("x", "y") which means they're part of the case name.
+They can also be private by starting with an underscore ("_result"), which passes
+them into the test but does not make them part of the case name:
+
+In this example, the following cases are generated, each with just one subcase:
+ - webgpu:examples:basic,cases:x=2;y=4 runs 1 subcase, with t.params set to:
+ - { x: 2, y: 4, _result: 6 }
+ - webgpu:examples:basic,cases:x=-10;y=18 runs 1 subcase, with t.params set to:
+ - { x: -10, y: 18, _result: 8 }
+ `
+ )
+ .paramsSimple([
+ { x: 2, y: 4, _result: 6 }, //
+ { x: -10, y: 18, _result: 8 },
+ ])
+ .fn(t => {
+ t.expect(t.params.x + t.params.y === t.params._result);
+ });
+// (note the blank comment above to enforce newlines on autoformat)
+
+g.test('basic,builder_cases')
+ .desc(
+ `
+A "CaseParamsBuilder" or "SubcaseParamsBuilder" can be passed to .params() instead.
+The params builder provides facilities for generating tests combinatorially (by cartesian
+product). For convenience, the "unit" CaseParamsBuilder is passed as an argument ("u" below).
+
+In this example, the following cases are generated, each with just one subcase:
+ - webgpu:examples:basic,cases:x=1,y=1 runs 1 subcase, with t.params set to:
+ - { x: 1, y: 1 }
+ - webgpu:examples:basic,cases:x=1,y=2 runs 1 subcase, with t.params set to:
+ - { x: 1, y: 2 }
+ - webgpu:examples:basic,cases:x=2,y=1 runs 1 subcase, with t.params set to:
+ - { x: 2, y: 1 }
+ - webgpu:examples:basic,cases:x=2,y=2 runs 1 subcase, with t.params set to:
+ - { x: 2, y: 2 }
+ `
+ )
+ .params(u =>
+ u //
+ .combineWithParams([{ x: 1 }, { x: 2 }])
+ .combineWithParams([{ y: 1 }, { y: 2 }])
+ )
+ .fn(() => {});
+
+g.test('basic,builder_cases_subcases')
+ .desc(
+ `
+Each case sub-parameterized using .beginSubcases().
+Each such instance of the test is a "subcase", which cannot be run independently of other
+subcases. It is somewhat like wrapping the entire fn body in a for-loop.
+
+In this example, the following cases are generated:
+ - webgpu:examples:basic,cases:x=1 runs 2 subcases, with t.params set to:
+ - { x: 1, y: 1 }
+ - { x: 1, y: 2 }
+ - webgpu:examples:basic,cases:x=2 runs 2 subcases, with t.params set to:
+ - { x: 2, y: 1 }
+ - { x: 2, y: 2 }
+ `
+ )
+ .params(u =>
+ u //
+ .combineWithParams([{ x: 1 }, { x: 2 }])
+ .beginSubcases()
+ .combineWithParams([{ y: 1 }, { y: 2 }])
+ )
+ .fn(() => {});
+
+g.test('basic,builder_subcases')
+ .desc(
+ `
+In this example, the following single case is generated:
+ - webgpu:examples:basic,cases: runs 4 subcases, with t.params set to:
+ - { x: 1, y: 1 }
+ - { x: 1, y: 2 }
+ - { x: 2, y: 1 }
+ - { x: 2, y: 2 }
+ `
+ )
+ .params(u =>
+ u //
+ .beginSubcases()
+ .combineWithParams([{ x: 1 }, { x: 2 }])
+ .combineWithParams([{ y: 1 }, { y: 2 }])
+ )
+ .fn(() => {});
+
+g.test('basic,builder_subcases_short')
+ .desc(
+ `
+As a shorthand, .paramsSubcasesOnly() can be used.
+
+In this example, the following single case is generated:
+ - webgpu:examples:basic,cases: runs 4 subcases, with t.params set to:
+ - { x: 1, y: 1 }
+ - { x: 1, y: 2 }
+ - { x: 2, y: 1 }
+ - { x: 2, y: 2 }
+ `
+ )
+ .paramsSubcasesOnly(u =>
+ u //
+ .combineWithParams([{ x: 1 }, { x: 2 }])
+ .combineWithParams([{ y: 1 }, { y: 2 }])
+ )
+ .fn(() => {});
+
+g.test('gpu,async').fn(async t => {
+ const x = await t.queue.onSubmittedWorkDone();
+ t.expect(x === undefined);
+});
+
+g.test('gpu,buffers').fn(t => {
+ const data = new Uint32Array([0, 1234, 0]);
+ const src = t.makeBufferWithContents(data, GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST);
+
+ // Use the expectGPUBufferValuesEqual helper to check the actual contents of a GPUBuffer.
+ // This makes a copy and then asynchronously checks the contents. The test fixture will
+ // wait on that result before reporting whether the test passed or failed.
+ t.expectGPUBufferValuesEqual(src, data);
+});
+
+// One of the following two tests should be skipped on most platforms.
+
+g.test('gpu,with_texture_compression,bc')
+ .desc(
+ `Example of a test using a device descriptor.
+Tests that a BC format passes validation iff the feature is enabled.`
+ )
+ .params(u => u.combine('textureCompressionBC', [false, true]))
+ .beforeAllSubcases(t => {
+ const { textureCompressionBC } = t.params;
+
+ if (textureCompressionBC) {
+ t.selectDeviceOrSkipTestCase('texture-compression-bc');
+ }
+ })
+ .fn(t => {
+ const { textureCompressionBC } = t.params;
+ const shouldError = !textureCompressionBC;
+ t.shouldThrow(shouldError ? 'TypeError' : false, () => {
+ t.device.createTexture({
+ format: 'bc1-rgba-unorm',
+ size: [4, 4, 1],
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ });
+ });
+ });
+
+g.test('gpu,with_texture_compression,etc2')
+ .desc(
+ `Example of a test using a device descriptor.
+Tests that an ETC2 format passes validation iff the feature is enabled.`
+ )
+ .params(u => u.combine('textureCompressionETC2', [false, true]))
+ .beforeAllSubcases(t => {
+ const { textureCompressionETC2 } = t.params;
+
+ if (textureCompressionETC2) {
+ t.selectDeviceOrSkipTestCase('texture-compression-etc2' as GPUFeatureName);
+ }
+ })
+ .fn(t => {
+ const { textureCompressionETC2 } = t.params;
+
+ const shouldError = !textureCompressionETC2;
+ t.shouldThrow(shouldError ? 'TypeError' : false, () => {
+ t.device.createTexture({
+ format: 'etc2-rgb8unorm',
+ size: [4, 4, 1],
+ usage: GPUTextureUsage.TEXTURE_BINDING,
+ });
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/format_info.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/format_info.ts
new file mode 100644
index 0000000000..566027714f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/format_info.ts
@@ -0,0 +1,1273 @@
+import { keysOf } from '../common/util/data_tables.js';
+import { assert } from '../common/util/util.js';
+
+import { align } from './util/math.js';
+import { ImageCopyType } from './util/texture/layout.js';
+
+//
+// Texture format tables
+//
+
+/**
+ * Defaults applied to all texture format tables automatically. Used only inside
+ * `formatTableWithDefaults`. This ensures keys are never missing, always explicitly `undefined`.
+ *
+ * All top-level keys must be defined here, or they won't be exposed at all.
+ */
+const kFormatUniversalDefaults = {
+ blockWidth: undefined,
+ blockHeight: undefined,
+ color: undefined,
+ depth: undefined,
+ stencil: undefined,
+ colorRender: undefined,
+ multisample: undefined,
+ feature: undefined,
+ baseFormat: undefined,
+
+ sampleType: undefined,
+ copySrc: undefined,
+ copyDst: undefined,
+ bytesPerBlock: undefined,
+ renderable: false,
+ renderTargetPixelByteCost: undefined,
+ renderTargetComponentAlignment: undefined,
+
+ // IMPORTANT:
+ // Add new top-level keys both here and in TextureFormatInfo_TypeCheck.
+} as const;
+/**
+ * Takes `table` and applies `defaults` to every row, i.e. for each row,
+ * `{ ... kUniversalDefaults, ...defaults, ...row }`.
+ * This only operates at the first level; it doesn't support defaults in nested objects.
+ */
+function formatTableWithDefaults<Defaults extends {}, Table extends { readonly [K: string]: {} }>({
+ defaults,
+ table,
+}: {
+ defaults: Defaults;
+ table: Table;
+}): {
+ readonly [F in keyof Table]: {
+ readonly [K in keyof typeof kFormatUniversalDefaults]: K extends keyof Table[F]
+ ? Table[F][K]
+ : K extends keyof Defaults
+ ? Defaults[K]
+ : (typeof kFormatUniversalDefaults)[K];
+ };
+} {
+ return Object.fromEntries(
+ Object.entries(table).map(([k, row]) => [
+ k,
+ { ...kFormatUniversalDefaults, ...defaults, ...row },
+ ])
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ ) as any;
+}
+
+/** "plain color formats", plus rgb9e5ufloat. */
+const kRegularTextureFormatInfo = formatTableWithDefaults({
+ defaults: { blockWidth: 1, blockHeight: 1, copySrc: true, copyDst: true },
+ table: {
+ // plain, 8 bits per component
+
+ r8unorm: {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 1 },
+ colorRender: { blend: true, resolve: true, byteCost: 1, alignment: 1 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ r8snorm: {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 1 },
+ multisample: false,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ r8uint: {
+ color: { type: 'uint', copySrc: true, copyDst: true, storage: false, bytes: 1 },
+ colorRender: { blend: false, resolve: false, byteCost: 1, alignment: 1 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ r8sint: {
+ color: { type: 'sint', copySrc: true, copyDst: true, storage: false, bytes: 1 },
+ colorRender: { blend: false, resolve: false, byteCost: 1, alignment: 1 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ rg8unorm: {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 2 },
+ colorRender: { blend: true, resolve: true, byteCost: 2, alignment: 1 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ rg8snorm: {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 2 },
+ multisample: false,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ rg8uint: {
+ color: { type: 'uint', copySrc: true, copyDst: true, storage: false, bytes: 2 },
+ colorRender: { blend: false, resolve: false, byteCost: 2, alignment: 1 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ rg8sint: {
+ color: { type: 'sint', copySrc: true, copyDst: true, storage: false, bytes: 2 },
+ colorRender: { blend: false, resolve: false, byteCost: 2, alignment: 1 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ rgba8unorm: {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: true, bytes: 4 },
+ colorRender: { blend: true, resolve: true, byteCost: 8, alignment: 1 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ baseFormat: 'rgba8unorm',
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'rgba8unorm-srgb': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 4 },
+ colorRender: { blend: true, resolve: true, byteCost: 8, alignment: 1 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ baseFormat: 'rgba8unorm',
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ rgba8snorm: {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: true, bytes: 4 },
+ multisample: false,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ rgba8uint: {
+ color: { type: 'uint', copySrc: true, copyDst: true, storage: true, bytes: 4 },
+ colorRender: { blend: false, resolve: false, byteCost: 4, alignment: 1 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ rgba8sint: {
+ color: { type: 'sint', copySrc: true, copyDst: true, storage: true, bytes: 4 },
+ colorRender: { blend: false, resolve: false, byteCost: 4, alignment: 1 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ bgra8unorm: {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 4 },
+ colorRender: { blend: true, resolve: true, byteCost: 8, alignment: 1 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ baseFormat: 'bgra8unorm',
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'bgra8unorm-srgb': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 4 },
+ colorRender: { blend: true, resolve: true, byteCost: 8, alignment: 1 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ baseFormat: 'bgra8unorm',
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ // plain, 16 bits per component
+
+ r16uint: {
+ color: { type: 'uint', copySrc: true, copyDst: true, storage: false, bytes: 2 },
+ colorRender: { blend: false, resolve: false, byteCost: 2, alignment: 2 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ r16sint: {
+ color: { type: 'sint', copySrc: true, copyDst: true, storage: false, bytes: 2 },
+ colorRender: { blend: false, resolve: false, byteCost: 2, alignment: 2 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ r16float: {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 2 },
+ colorRender: { blend: true, resolve: true, byteCost: 2, alignment: 2 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ rg16uint: {
+ color: { type: 'uint', copySrc: true, copyDst: true, storage: false, bytes: 4 },
+ colorRender: { blend: false, resolve: false, byteCost: 4, alignment: 2 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ rg16sint: {
+ color: { type: 'sint', copySrc: true, copyDst: true, storage: false, bytes: 4 },
+ colorRender: { blend: false, resolve: false, byteCost: 4, alignment: 2 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ rg16float: {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 4 },
+ colorRender: { blend: true, resolve: true, byteCost: 4, alignment: 2 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ rgba16uint: {
+ color: { type: 'uint', copySrc: true, copyDst: true, storage: true, bytes: 8 },
+ colorRender: { blend: false, resolve: false, byteCost: 8, alignment: 2 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ rgba16sint: {
+ color: { type: 'sint', copySrc: true, copyDst: true, storage: true, bytes: 8 },
+ colorRender: { blend: false, resolve: false, byteCost: 8, alignment: 2 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ rgba16float: {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: true, bytes: 8 },
+ colorRender: { blend: true, resolve: true, byteCost: 8, alignment: 2 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ // plain, 32 bits per component
+
+ r32uint: {
+ color: { type: 'uint', copySrc: true, copyDst: true, storage: true, bytes: 4 },
+ colorRender: { blend: false, resolve: false, byteCost: 4, alignment: 4 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: false,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ r32sint: {
+ color: { type: 'sint', copySrc: true, copyDst: true, storage: true, bytes: 4 },
+ colorRender: { blend: false, resolve: false, byteCost: 4, alignment: 4 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: false,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ r32float: {
+ color: { type: 'unfilterable-float', copySrc: true, copyDst: true, storage: true, bytes: 4 },
+ colorRender: { blend: false, resolve: false, byteCost: 4, alignment: 4 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ rg32uint: {
+ color: { type: 'uint', copySrc: true, copyDst: true, storage: true, bytes: 8 },
+ colorRender: { blend: false, resolve: false, byteCost: 8, alignment: 4 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: false,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ rg32sint: {
+ color: { type: 'sint', copySrc: true, copyDst: true, storage: true, bytes: 8 },
+ colorRender: { blend: false, resolve: false, byteCost: 8, alignment: 4 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: false,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ rg32float: {
+ color: { type: 'unfilterable-float', copySrc: true, copyDst: true, storage: true, bytes: 8 },
+ colorRender: { blend: false, resolve: false, byteCost: 8, alignment: 4 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: false,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ rgba32uint: {
+ color: { type: 'uint', copySrc: true, copyDst: true, storage: true, bytes: 16 },
+ colorRender: { blend: false, resolve: false, byteCost: 16, alignment: 4 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: false,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ rgba32sint: {
+ color: { type: 'sint', copySrc: true, copyDst: true, storage: true, bytes: 16 },
+ colorRender: { blend: false, resolve: false, byteCost: 16, alignment: 4 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: false,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ rgba32float: {
+ color: { type: 'unfilterable-float', copySrc: true, copyDst: true, storage: true, bytes: 16 },
+ colorRender: { blend: false, resolve: false, byteCost: 16, alignment: 4 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: false,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ // plain, mixed component width, 32 bits per texel
+
+ rgb10a2uint: {
+ color: { type: 'uint', copySrc: true, copyDst: true, storage: false, bytes: 4 },
+ colorRender: { blend: false, resolve: false, byteCost: 8, alignment: 4 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ rgb10a2unorm: {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 4 },
+ colorRender: { blend: true, resolve: true, byteCost: 8, alignment: 4 },
+ renderable: true,
+ /*prettier-ignore*/ get renderTargetComponentAlignment() { return this.colorRender.alignment; },
+ /*prettier-ignore*/ get renderTargetPixelByteCost() { return this.colorRender.byteCost; },
+ multisample: true,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ rg11b10ufloat: {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 4 },
+ multisample: false,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ renderTargetPixelByteCost: 8,
+ renderTargetComponentAlignment: 4,
+ },
+
+ // packed
+
+ rgb9e5ufloat: {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 4 },
+ multisample: false,
+ /*prettier-ignore*/ get sampleType() { return this.color.type; },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ },
+} as const);
+
+// MAINTENANCE_TODO: Distinguishing "sized" and "unsized" depth stencil formats doesn't make sense
+// because one aspect can be sized and one can be unsized. This should be cleaned up, but is kept
+// this way during a migration phase.
+const kSizedDepthStencilFormatInfo = formatTableWithDefaults({
+ defaults: { blockWidth: 1, blockHeight: 1, multisample: true, copySrc: true, renderable: true },
+ table: {
+ stencil8: {
+ stencil: { type: 'uint', copySrc: true, copyDst: true, storage: false, bytes: 1 },
+ sampleType: 'uint',
+ copyDst: true,
+ bytesPerBlock: 1,
+ },
+ depth16unorm: {
+ depth: { type: 'depth', copySrc: true, copyDst: true, storage: false, bytes: 2 },
+ sampleType: 'depth',
+ copyDst: true,
+ bytesPerBlock: 2,
+ },
+ depth32float: {
+ depth: { type: 'depth', copySrc: true, copyDst: false, storage: false, bytes: 4 },
+ sampleType: 'depth',
+ copyDst: false,
+ bytesPerBlock: 4,
+ },
+ },
+} as const);
+const kUnsizedDepthStencilFormatInfo = formatTableWithDefaults({
+ defaults: { blockWidth: 1, blockHeight: 1, multisample: true },
+ table: {
+ depth24plus: {
+ depth: { type: 'depth', copySrc: false, copyDst: false, storage: false, bytes: undefined },
+ copySrc: false,
+ copyDst: false,
+ sampleType: 'depth',
+ renderable: true,
+ },
+ 'depth24plus-stencil8': {
+ depth: { type: 'depth', copySrc: false, copyDst: false, storage: false, bytes: undefined },
+ stencil: { type: 'uint', copySrc: true, copyDst: true, storage: false, bytes: 1 },
+ copySrc: false,
+ copyDst: false,
+ sampleType: 'depth',
+ renderable: true,
+ },
+ 'depth32float-stencil8': {
+ depth: { type: 'depth', copySrc: true, copyDst: false, storage: false, bytes: 4 },
+ stencil: { type: 'uint', copySrc: true, copyDst: true, storage: false, bytes: 1 },
+ feature: 'depth32float-stencil8',
+ copySrc: false,
+ copyDst: false,
+ sampleType: 'depth',
+ renderable: true,
+ },
+ },
+} as const);
+
+const kBCTextureFormatInfo = formatTableWithDefaults({
+ defaults: {
+ blockWidth: 4,
+ blockHeight: 4,
+ multisample: false,
+ feature: 'texture-compression-bc',
+ sampleType: 'float',
+ copySrc: true,
+ copyDst: true,
+ },
+ table: {
+ 'bc1-rgba-unorm': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 8 },
+ baseFormat: 'bc1-rgba-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'bc1-rgba-unorm-srgb': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 8 },
+ baseFormat: 'bc1-rgba-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'bc2-rgba-unorm': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'bc2-rgba-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'bc2-rgba-unorm-srgb': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'bc2-rgba-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'bc3-rgba-unorm': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'bc3-rgba-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'bc3-rgba-unorm-srgb': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'bc3-rgba-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'bc4-r-unorm': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 8 },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'bc4-r-snorm': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 8 },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'bc5-rg-unorm': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'bc5-rg-snorm': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'bc6h-rgb-ufloat': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'bc6h-rgb-float': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'bc7-rgba-unorm': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'bc7-rgba-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'bc7-rgba-unorm-srgb': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'bc7-rgba-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ },
+} as const);
+
+const kETC2TextureFormatInfo = formatTableWithDefaults({
+ defaults: {
+ blockWidth: 4,
+ blockHeight: 4,
+ multisample: false,
+ feature: 'texture-compression-etc2',
+ sampleType: 'float',
+ copySrc: true,
+ copyDst: true,
+ },
+ table: {
+ 'etc2-rgb8unorm': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 8 },
+ baseFormat: 'etc2-rgb8unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'etc2-rgb8unorm-srgb': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 8 },
+ baseFormat: 'etc2-rgb8unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'etc2-rgb8a1unorm': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 8 },
+ baseFormat: 'etc2-rgb8a1unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'etc2-rgb8a1unorm-srgb': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 8 },
+ baseFormat: 'etc2-rgb8a1unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'etc2-rgba8unorm': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'etc2-rgba8unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'etc2-rgba8unorm-srgb': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'etc2-rgba8unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'eac-r11unorm': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 8 },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'eac-r11snorm': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 8 },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'eac-rg11unorm': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'eac-rg11snorm': {
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ },
+} as const);
+
+const kASTCTextureFormatInfo = formatTableWithDefaults({
+ defaults: {
+ multisample: false,
+ feature: 'texture-compression-astc',
+ sampleType: 'float',
+ copySrc: true,
+ copyDst: true,
+ },
+ table: {
+ 'astc-4x4-unorm': {
+ blockWidth: 4,
+ blockHeight: 4,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-4x4-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'astc-4x4-unorm-srgb': {
+ blockWidth: 4,
+ blockHeight: 4,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-4x4-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'astc-5x4-unorm': {
+ blockWidth: 5,
+ blockHeight: 4,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-5x4-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'astc-5x4-unorm-srgb': {
+ blockWidth: 5,
+ blockHeight: 4,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-5x4-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'astc-5x5-unorm': {
+ blockWidth: 5,
+ blockHeight: 5,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-5x5-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'astc-5x5-unorm-srgb': {
+ blockWidth: 5,
+ blockHeight: 5,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-5x5-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'astc-6x5-unorm': {
+ blockWidth: 6,
+ blockHeight: 5,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-6x5-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'astc-6x5-unorm-srgb': {
+ blockWidth: 6,
+ blockHeight: 5,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-6x5-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'astc-6x6-unorm': {
+ blockWidth: 6,
+ blockHeight: 6,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-6x6-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'astc-6x6-unorm-srgb': {
+ blockWidth: 6,
+ blockHeight: 6,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-6x6-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'astc-8x5-unorm': {
+ blockWidth: 8,
+ blockHeight: 5,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-8x5-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'astc-8x5-unorm-srgb': {
+ blockWidth: 8,
+ blockHeight: 5,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-8x5-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'astc-8x6-unorm': {
+ blockWidth: 8,
+ blockHeight: 6,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-8x6-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'astc-8x6-unorm-srgb': {
+ blockWidth: 8,
+ blockHeight: 6,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-8x6-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'astc-8x8-unorm': {
+ blockWidth: 8,
+ blockHeight: 8,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-8x8-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'astc-8x8-unorm-srgb': {
+ blockWidth: 8,
+ blockHeight: 8,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-8x8-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'astc-10x5-unorm': {
+ blockWidth: 10,
+ blockHeight: 5,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-10x5-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'astc-10x5-unorm-srgb': {
+ blockWidth: 10,
+ blockHeight: 5,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-10x5-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'astc-10x6-unorm': {
+ blockWidth: 10,
+ blockHeight: 6,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-10x6-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'astc-10x6-unorm-srgb': {
+ blockWidth: 10,
+ blockHeight: 6,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-10x6-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'astc-10x8-unorm': {
+ blockWidth: 10,
+ blockHeight: 8,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-10x8-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'astc-10x8-unorm-srgb': {
+ blockWidth: 10,
+ blockHeight: 8,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-10x8-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'astc-10x10-unorm': {
+ blockWidth: 10,
+ blockHeight: 10,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-10x10-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'astc-10x10-unorm-srgb': {
+ blockWidth: 10,
+ blockHeight: 10,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-10x10-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'astc-12x10-unorm': {
+ blockWidth: 12,
+ blockHeight: 10,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-12x10-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'astc-12x10-unorm-srgb': {
+ blockWidth: 12,
+ blockHeight: 10,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-12x10-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+
+ 'astc-12x12-unorm': {
+ blockWidth: 12,
+ blockHeight: 12,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-12x12-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ 'astc-12x12-unorm-srgb': {
+ blockWidth: 12,
+ blockHeight: 12,
+ color: { type: 'float', copySrc: true, copyDst: true, storage: false, bytes: 16 },
+ baseFormat: 'astc-12x12-unorm',
+ /*prettier-ignore*/ get bytesPerBlock() { return this.color.bytes; },
+ },
+ },
+} as const);
+
+// Definitions for use locally. To access the table entries, use `kTextureFormatInfo`.
+
+// MAINTENANCE_TODO: Consider generating the exports below programmatically by filtering the big list, instead
+// of using these local constants? Requires some type magic though.
+/* prettier-ignore */ const kCompressedTextureFormatInfo = { ...kBCTextureFormatInfo, ...kETC2TextureFormatInfo, ...kASTCTextureFormatInfo } as const;
+/* prettier-ignore */ const kColorTextureFormatInfo = { ...kRegularTextureFormatInfo, ...kCompressedTextureFormatInfo } as const;
+/* prettier-ignore */ const kEncodableTextureFormatInfo = { ...kRegularTextureFormatInfo, ...kSizedDepthStencilFormatInfo } as const;
+/* prettier-ignore */ const kSizedTextureFormatInfo = { ...kRegularTextureFormatInfo, ...kSizedDepthStencilFormatInfo, ...kCompressedTextureFormatInfo } as const;
+/* prettier-ignore */ const kDepthStencilFormatInfo = { ...kSizedDepthStencilFormatInfo, ...kUnsizedDepthStencilFormatInfo } as const;
+/* prettier-ignore */ const kUncompressedTextureFormatInfo = { ...kRegularTextureFormatInfo, ...kSizedDepthStencilFormatInfo, ...kUnsizedDepthStencilFormatInfo } as const;
+/* prettier-ignore */ const kAllTextureFormatInfo = { ...kUncompressedTextureFormatInfo, ...kCompressedTextureFormatInfo } as const;
+
+/** A "regular" texture format (uncompressed, sized, single-plane color formats). */
+/* prettier-ignore */ export type RegularTextureFormat = keyof typeof kRegularTextureFormatInfo;
+/** A sized depth/stencil texture format. */
+/* prettier-ignore */ export type SizedDepthStencilFormat = keyof typeof kSizedDepthStencilFormatInfo;
+/** An unsized depth/stencil texture format. */
+/* prettier-ignore */ export type UnsizedDepthStencilFormat = keyof typeof kUnsizedDepthStencilFormatInfo;
+/** A compressed (block) texture format. */
+/* prettier-ignore */ export type CompressedTextureFormat = keyof typeof kCompressedTextureFormatInfo;
+
+/** A color texture format (regular | compressed). */
+/* prettier-ignore */ export type ColorTextureFormat = keyof typeof kColorTextureFormatInfo;
+/** An encodable texture format (regular | sized depth/stencil). */
+/* prettier-ignore */ export type EncodableTextureFormat = keyof typeof kEncodableTextureFormatInfo;
+/** A sized texture format (regular | sized depth/stencil | compressed). */
+/* prettier-ignore */ export type SizedTextureFormat = keyof typeof kSizedTextureFormatInfo;
+/** A depth/stencil format (sized | unsized). */
+/* prettier-ignore */ export type DepthStencilFormat = keyof typeof kDepthStencilFormatInfo;
+/** An uncompressed (block size 1x1) format (regular | depth/stencil). */
+/* prettier-ignore */ export type UncompressedTextureFormat = keyof typeof kUncompressedTextureFormatInfo;
+
+/* prettier-ignore */ export const kRegularTextureFormats: readonly RegularTextureFormat[] = keysOf( kRegularTextureFormatInfo);
+/* prettier-ignore */ export const kSizedDepthStencilFormats: readonly SizedDepthStencilFormat[] = keysOf( kSizedDepthStencilFormatInfo);
+/* prettier-ignore */ export const kUnsizedDepthStencilFormats: readonly UnsizedDepthStencilFormat[] = keysOf(kUnsizedDepthStencilFormatInfo);
+/* prettier-ignore */ export const kCompressedTextureFormats: readonly CompressedTextureFormat[] = keysOf( kCompressedTextureFormatInfo);
+
+/* prettier-ignore */ export const kColorTextureFormats: readonly ColorTextureFormat[] = keysOf( kColorTextureFormatInfo);
+/* prettier-ignore */ export const kEncodableTextureFormats: readonly EncodableTextureFormat[] = keysOf( kEncodableTextureFormatInfo);
+/* prettier-ignore */ export const kSizedTextureFormats: readonly SizedTextureFormat[] = keysOf( kSizedTextureFormatInfo);
+/* prettier-ignore */ export const kDepthStencilFormats: readonly DepthStencilFormat[] = keysOf( kDepthStencilFormatInfo);
+/* prettier-ignore */ export const kUncompressedTextureFormats: readonly UncompressedTextureFormat[] = keysOf(kUncompressedTextureFormatInfo);
+/* prettier-ignore */ export const kAllTextureFormats: readonly GPUTextureFormat[] = keysOf( kAllTextureFormatInfo);
+
+// CompressedTextureFormat are unrenderable so filter from RegularTextureFormats for color targets is enough
+export const kRenderableColorTextureFormats = kRegularTextureFormats.filter(
+ v => kColorTextureFormatInfo[v].colorRender
+);
+assert(
+ kRenderableColorTextureFormats.every(
+ f =>
+ kAllTextureFormatInfo[f].renderTargetComponentAlignment !== undefined &&
+ kAllTextureFormatInfo[f].renderTargetPixelByteCost !== undefined
+ )
+);
+
+/** Per-GPUTextureFormat-per-aspect info. */
+interface TextureFormatAspectInfo {
+ /** Whether the aspect can be used as `COPY_SRC`. */
+ copySrc: boolean;
+ /** Whether the aspect can be used as `COPY_DST`. */
+ copyDst: boolean;
+ /** Whether the aspect can be used as `STORAGE`. */
+ storage: boolean;
+ /** The "texel block copy footprint" of one texel block; `undefined` if the aspect is unsized. */
+ bytes: number | undefined;
+}
+/** Per GPUTextureFormat-per-aspect info for color aspects. */
+interface TextureFormatColorAspectInfo extends TextureFormatAspectInfo {
+ bytes: number;
+ /** "Best" sample type of the format. "float" also implies "unfilterable-float". */
+ type: 'float' | 'uint' | 'sint' | 'unfilterable-float';
+}
+/** Per GPUTextureFormat-per-aspect info for depth aspects. */
+interface TextureFormatDepthAspectInfo extends TextureFormatAspectInfo {
+ /** "depth" also implies "unfilterable-float". */
+ type: 'depth';
+}
+/** Per GPUTextureFormat-per-aspect info for stencil aspects. */
+interface TextureFormatStencilAspectInfo extends TextureFormatAspectInfo {
+ bytes: 1;
+ type: 'uint';
+}
+
+/**
+ * Per-GPUTextureFormat info.
+ * This is not actually the type of values in kTextureFormatInfo; that type is fully const
+ * so that it can be narrowed very precisely at usage sites by the compiler.
+ * This type exists only as a type check on the inferred type of kTextureFormatInfo.
+ * Documentation is also written here, but not actually visible to the IDE.
+ */
+type TextureFormatInfo_TypeCheck = {
+ /** Texel block width. */
+ blockWidth: number;
+ /** Texel block height. */
+ blockHeight: number;
+ /** Whether the format can be used in a multisample texture. */
+ multisample: boolean;
+ /** The base format for srgb formats. Specified on both srgb and equivalent non-srgb formats. */
+ baseFormat: GPUTextureFormat | undefined;
+ /** Optional feature required to use this format, or `undefined` if none. */
+ feature: GPUFeatureName | undefined;
+
+ /** @deprecated */
+ sampleType: GPUTextureSampleType;
+ /** @deprecated */
+ copySrc: boolean;
+ /** @deprecated */
+ copyDst: boolean;
+ /** @deprecated */
+ bytesPerBlock: number | undefined;
+ /** @deprecated */
+ renderable: boolean;
+ /** @deprecated */
+ renderTargetPixelByteCost: number | undefined;
+ /** @deprecated */
+ renderTargetComponentAlignment: number | undefined;
+
+ // IMPORTANT:
+ // Add new top-level keys both here and in kUniversalDefaults.
+} & (
+ | {
+ /** Color aspect info. */
+ color: TextureFormatColorAspectInfo;
+ /** Defined if the format is a color format that can be used as `RENDER_ATTACHMENT`. */
+ colorRender:
+ | undefined
+ | {
+ /** Whether the format is blendable. */
+ blend: boolean;
+ /** Whether the format can be a multisample resolve target. */
+ resolve: boolean;
+ /** The "render target pixel byte cost" of the format. */
+ byteCost: number;
+ /** The "render target component alignment" of the format. */
+ alignment: number;
+ };
+ }
+ | (
+ | {
+ /** Depth aspect info. */
+ depth: TextureFormatDepthAspectInfo;
+ /** Stencil aspect info. */
+ stencil: undefined | TextureFormatStencilAspectInfo;
+ multisample: true;
+ }
+ | {
+ /** Stencil aspect info. */
+ stencil: TextureFormatStencilAspectInfo;
+ multisample: true;
+ }
+ )
+);
+
+/** Per-GPUTextureFormat info. */
+export const kTextureFormatInfo = {
+ ...kRegularTextureFormatInfo,
+ ...kSizedDepthStencilFormatInfo,
+ ...kUnsizedDepthStencilFormatInfo,
+ ...kBCTextureFormatInfo,
+ ...kETC2TextureFormatInfo,
+ ...kASTCTextureFormatInfo,
+} as const;
+
+/** Defining this variable verifies the type of kTextureFormatInfo2. It is not used. */
+// eslint-disable-next-line @typescript-eslint/no-unused-vars
+const kTextureFormatInfo_TypeCheck: {
+ readonly [F in GPUTextureFormat]: TextureFormatInfo_TypeCheck;
+} = kTextureFormatInfo;
+
+/** List of all GPUTextureFormat values. */
+// MAINTENANCE_TODO: dedup with kAllTextureFormats
+export const kTextureFormats: readonly GPUTextureFormat[] = keysOf(kAllTextureFormatInfo);
+
+/** Valid GPUTextureFormats for `copyExternalImageToTexture`, by spec. */
+export const kValidTextureFormatsForCopyE2T = [
+ 'r8unorm',
+ 'r16float',
+ 'r32float',
+ 'rg8unorm',
+ 'rg16float',
+ 'rg32float',
+ 'rgba8unorm',
+ 'rgba8unorm-srgb',
+ 'bgra8unorm',
+ 'bgra8unorm-srgb',
+ 'rgb10a2unorm',
+ 'rgba16float',
+ 'rgba32float',
+] as const;
+
+//
+// Other related stuff
+//
+
+const kDepthStencilFormatCapabilityInBufferTextureCopy = {
+ // kUnsizedDepthStencilFormats
+ depth24plus: {
+ CopyB2T: [],
+ CopyT2B: [],
+ texelAspectSize: { 'depth-only': -1, 'stencil-only': -1 },
+ },
+ 'depth24plus-stencil8': {
+ CopyB2T: ['stencil-only'],
+ CopyT2B: ['stencil-only'],
+ texelAspectSize: { 'depth-only': -1, 'stencil-only': 1 },
+ },
+
+ // kSizedDepthStencilFormats
+ depth16unorm: {
+ CopyB2T: ['all', 'depth-only'],
+ CopyT2B: ['all', 'depth-only'],
+ texelAspectSize: { 'depth-only': 2, 'stencil-only': -1 },
+ },
+ depth32float: {
+ CopyB2T: [],
+ CopyT2B: ['all', 'depth-only'],
+ texelAspectSize: { 'depth-only': 4, 'stencil-only': -1 },
+ },
+ 'depth32float-stencil8': {
+ CopyB2T: ['stencil-only'],
+ CopyT2B: ['depth-only', 'stencil-only'],
+ texelAspectSize: { 'depth-only': 4, 'stencil-only': 1 },
+ },
+ stencil8: {
+ CopyB2T: ['all', 'stencil-only'],
+ CopyT2B: ['all', 'stencil-only'],
+ texelAspectSize: { 'depth-only': -1, 'stencil-only': 1 },
+ },
+} as const;
+
+/** `kDepthStencilFormatResolvedAspect[format][aspect]` returns the aspect-specific format for a
+ * depth-stencil format, or `undefined` if the format doesn't have the aspect.
+ */
+export const kDepthStencilFormatResolvedAspect: {
+ readonly [k in DepthStencilFormat]: {
+ readonly [a in GPUTextureAspect]: DepthStencilFormat | undefined;
+ };
+} = {
+ // kUnsizedDepthStencilFormats
+ depth24plus: {
+ all: 'depth24plus',
+ 'depth-only': 'depth24plus',
+ 'stencil-only': undefined,
+ },
+ 'depth24plus-stencil8': {
+ all: 'depth24plus-stencil8',
+ 'depth-only': 'depth24plus',
+ 'stencil-only': 'stencil8',
+ },
+
+ // kSizedDepthStencilFormats
+ depth16unorm: {
+ all: 'depth16unorm',
+ 'depth-only': 'depth16unorm',
+ 'stencil-only': undefined,
+ },
+ depth32float: {
+ all: 'depth32float',
+ 'depth-only': 'depth32float',
+ 'stencil-only': undefined,
+ },
+ 'depth32float-stencil8': {
+ all: 'depth32float-stencil8',
+ 'depth-only': 'depth32float',
+ 'stencil-only': 'stencil8',
+ },
+ stencil8: {
+ all: 'stencil8',
+ 'depth-only': undefined,
+ 'stencil-only': 'stencil8',
+ },
+} as const;
+
+/**
+ * @returns the GPUTextureFormat corresponding to the @param aspect of @param format.
+ * This allows choosing the correct format for depth-stencil aspects when creating pipelines that
+ * will have to match the resolved format of views, or to get per-aspect information like the
+ * `blockByteSize`.
+ *
+ * Many helpers use an `undefined` `aspect` to means `'all'` so this is also the default for this
+ * function.
+ */
+export function resolvePerAspectFormat(
+ format: GPUTextureFormat,
+ aspect?: GPUTextureAspect
+): GPUTextureFormat {
+ if (aspect === 'all' || aspect === undefined) {
+ return format;
+ }
+ assert(!!kTextureFormatInfo[format].depth || !!kTextureFormatInfo[format].stencil);
+ const resolved = kDepthStencilFormatResolvedAspect[format as DepthStencilFormat][aspect ?? 'all'];
+ assert(resolved !== undefined);
+ return resolved;
+}
+
+/**
+ * Gets all copyable aspects for copies between texture and buffer for specified depth/stencil format and copy type, by spec.
+ */
+export function depthStencilFormatCopyableAspects(
+ type: ImageCopyType,
+ format: DepthStencilFormat
+): readonly GPUTextureAspect[] {
+ const appliedType = type === 'WriteTexture' ? 'CopyB2T' : type;
+ return kDepthStencilFormatCapabilityInBufferTextureCopy[format][appliedType];
+}
+
+/**
+ * Computes whether a copy between a depth/stencil texture aspect and a buffer is supported, by spec.
+ */
+export function depthStencilBufferTextureCopySupported(
+ type: ImageCopyType,
+ format: DepthStencilFormat,
+ aspect: GPUTextureAspect
+): boolean {
+ const supportedAspects: readonly GPUTextureAspect[] = depthStencilFormatCopyableAspects(
+ type,
+ format
+ );
+ return supportedAspects.includes(aspect);
+}
+
+/**
+ * Returns the byte size of the depth or stencil aspect of the specified depth/stencil format,
+ * or -1 if none.
+ */
+export function depthStencilFormatAspectSize(
+ format: DepthStencilFormat,
+ aspect: 'depth-only' | 'stencil-only'
+) {
+ const texelAspectSize =
+ kDepthStencilFormatCapabilityInBufferTextureCopy[format].texelAspectSize[aspect];
+ assert(texelAspectSize > 0);
+ return texelAspectSize;
+}
+
+/**
+ * Returns true iff a texture can be created with the provided GPUTextureDimension
+ * (defaulting to 2d) and GPUTextureFormat, by spec.
+ */
+export function textureDimensionAndFormatCompatible(
+ dimension: undefined | GPUTextureDimension,
+ format: GPUTextureFormat
+): boolean {
+ const info = kAllTextureFormatInfo[format];
+ return !(
+ (dimension === '1d' || dimension === '3d') &&
+ (info.blockWidth > 1 || info.depth || info.stencil)
+ );
+}
+
+/**
+ * Check if two formats are view format compatible.
+ *
+ * This function may need to be generalized to use `baseFormat` from `kTextureFormatInfo`.
+ */
+export function viewCompatible(a: GPUTextureFormat, b: GPUTextureFormat): boolean {
+ return a === b || a + '-srgb' === b || b + '-srgb' === a;
+}
+
+export function getFeaturesForFormats<T>(
+ formats: readonly (T & (GPUTextureFormat | undefined))[]
+): readonly (GPUFeatureName | undefined)[] {
+ return Array.from(new Set(formats.map(f => (f ? kTextureFormatInfo[f].feature : undefined))));
+}
+
+export function filterFormatsByFeature<T>(
+ feature: GPUFeatureName | undefined,
+ formats: readonly (T & (GPUTextureFormat | undefined))[]
+): readonly (T & (GPUTextureFormat | undefined))[] {
+ return formats.filter(f => f === undefined || kTextureFormatInfo[f].feature === feature);
+}
+
+export function isCompressedTextureFormat(format: GPUTextureFormat) {
+ return format in kCompressedTextureFormatInfo;
+}
+
+export const kFeaturesForFormats = getFeaturesForFormats(kTextureFormats);
+
+/**
+ * Given an array of texture formats return the number of bytes per sample.
+ */
+export function computeBytesPerSampleFromFormats(formats: readonly GPUTextureFormat[]) {
+ let bytesPerSample = 0;
+ for (const format of formats) {
+ const info = kTextureFormatInfo[format];
+ const alignedBytesPerSample = align(bytesPerSample, info.colorRender!.alignment);
+ bytesPerSample = alignedBytesPerSample + info.colorRender!.byteCost;
+ }
+ return bytesPerSample;
+}
+
+/**
+ * Given an array of GPUColorTargetState return the number of bytes per sample
+ */
+export function computeBytesPerSample(targets: GPUColorTargetState[]) {
+ return computeBytesPerSampleFromFormats(targets.map(({ format }) => format));
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/gpu_test.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/gpu_test.ts
new file mode 100644
index 0000000000..f9ef1f2f06
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/gpu_test.ts
@@ -0,0 +1,1681 @@
+import {
+ Fixture,
+ FixtureClass,
+ FixtureClassInterface,
+ FixtureClassWithMixin,
+ SkipTestCase,
+ SubcaseBatchState,
+ TestCaseRecorder,
+ TestParams,
+} from '../common/framework/fixture.js';
+import { globalTestConfig } from '../common/framework/test_config.js';
+import {
+ assert,
+ makeValueTestVariant,
+ memcpy,
+ range,
+ ValueTestVariant,
+ TypedArrayBufferView,
+ TypedArrayBufferViewConstructor,
+ unreachable,
+} from '../common/util/util.js';
+
+import { getDefaultLimits, kLimits, kQueryTypeInfo } from './capability_info.js';
+import {
+ kTextureFormatInfo,
+ kEncodableTextureFormats,
+ resolvePerAspectFormat,
+ SizedTextureFormat,
+ EncodableTextureFormat,
+ isCompressedTextureFormat,
+ ColorTextureFormat,
+} from './format_info.js';
+import { makeBufferWithContents } from './util/buffer.js';
+import { checkElementsEqual, checkElementsBetween } from './util/check_contents.js';
+import { CommandBufferMaker, EncoderType } from './util/command_buffer_maker.js';
+import { ScalarType } from './util/conversion.js';
+import { DevicePool, DeviceProvider, UncanonicalizedDeviceDescriptor } from './util/device_pool.js';
+import { align, roundDown } from './util/math.js';
+import { physicalMipSizeFromTexture, virtualMipSize } from './util/texture/base.js';
+import {
+ bytesInACompleteRow,
+ getTextureCopyLayout,
+ getTextureSubCopyLayout,
+ LayoutOptions as TextureLayoutOptions,
+} from './util/texture/layout.js';
+import { PerTexelComponent, kTexelRepresentationInfo } from './util/texture/texel_data.js';
+import { TexelView } from './util/texture/texel_view.js';
+import {
+ PerPixelComparison,
+ PixelExpectation,
+ TexelCompareOptions,
+ textureContentIsOKByT2B,
+} from './util/texture/texture_ok.js';
+import { createTextureFromTexelView, createTextureFromTexelViews } from './util/texture.js';
+import { reifyOrigin3D } from './util/unions.js';
+
+const devicePool = new DevicePool();
+
+// MAINTENANCE_TODO: When DevicePool becomes able to provide multiple devices at once, use the
+// usual one instead of a new one.
+const mismatchedDevicePool = new DevicePool();
+
+const kResourceStateValues = ['valid', 'invalid', 'destroyed'] as const;
+export type ResourceState = (typeof kResourceStateValues)[number];
+export const kResourceStates: readonly ResourceState[] = kResourceStateValues;
+
+/** Various "convenient" shorthands for GPUDeviceDescriptors for selectDevice functions. */
+type DeviceSelectionDescriptor =
+ | UncanonicalizedDeviceDescriptor
+ | GPUFeatureName
+ | undefined
+ | Array<GPUFeatureName | undefined>;
+
+export function initUncanonicalizedDeviceDescriptor(
+ descriptor: DeviceSelectionDescriptor
+): UncanonicalizedDeviceDescriptor | undefined {
+ if (typeof descriptor === 'string') {
+ return { requiredFeatures: [descriptor] };
+ } else if (descriptor instanceof Array) {
+ return {
+ requiredFeatures: descriptor.filter(f => f !== undefined) as GPUFeatureName[],
+ };
+ } else {
+ return descriptor;
+ }
+}
+
+export class GPUTestSubcaseBatchState extends SubcaseBatchState {
+ /** Provider for default device. */
+ private provider: Promise<DeviceProvider> | undefined;
+ /** Provider for mismatched device. */
+ private mismatchedProvider: Promise<DeviceProvider> | undefined;
+
+ override async postInit(): Promise<void> {
+ // Skip all subcases if there's no device.
+ await this.acquireProvider();
+ }
+
+ override async finalize(): Promise<void> {
+ await super.finalize();
+
+ // Ensure devicePool.release is called for both providers even if one rejects.
+ await Promise.all([
+ this.provider?.then(x => devicePool.release(x)),
+ this.mismatchedProvider?.then(x => devicePool.release(x)),
+ ]);
+ }
+
+ /** @internal MAINTENANCE_TODO: Make this not visible to test code? */
+ acquireProvider(): Promise<DeviceProvider> {
+ if (this.provider === undefined) {
+ this.selectDeviceOrSkipTestCase(undefined);
+ }
+ assert(this.provider !== undefined);
+ return this.provider;
+ }
+
+ get isCompatibility() {
+ return globalTestConfig.compatibility;
+ }
+
+ getDefaultLimits() {
+ return getDefaultLimits(this.isCompatibility ? 'compatibility' : 'core');
+ }
+
+ /**
+ * Some tests or cases need particular feature flags or limits to be enabled.
+ * Call this function with a descriptor or feature name (or `undefined`) to select a
+ * GPUDevice with matching capabilities. If this isn't called, a default device is provided.
+ *
+ * If the request isn't supported, throws a SkipTestCase exception to skip the entire test case.
+ */
+ selectDeviceOrSkipTestCase(descriptor: DeviceSelectionDescriptor): void {
+ assert(this.provider === undefined, "Can't selectDeviceOrSkipTestCase() multiple times");
+ this.provider = devicePool.acquire(
+ this.recorder,
+ initUncanonicalizedDeviceDescriptor(descriptor)
+ );
+ // Suppress uncaught promise rejection (we'll catch it later).
+ this.provider.catch(() => {});
+ }
+
+ /**
+ * Convenience function for {@link selectDeviceOrSkipTestCase}.
+ * Select a device with the features required by these texture format(s).
+ * If the device creation fails, then skip the test case.
+ */
+ selectDeviceForTextureFormatOrSkipTestCase(
+ formats: GPUTextureFormat | undefined | (GPUTextureFormat | undefined)[]
+ ): void {
+ if (!Array.isArray(formats)) {
+ formats = [formats];
+ }
+ const features = new Set<GPUFeatureName | undefined>();
+ for (const format of formats) {
+ if (format !== undefined) {
+ this.skipIfTextureFormatNotSupported(format);
+ features.add(kTextureFormatInfo[format].feature);
+ }
+ }
+
+ this.selectDeviceOrSkipTestCase(Array.from(features));
+ }
+
+ /**
+ * Convenience function for {@link selectDeviceOrSkipTestCase}.
+ * Select a device with the features required by these query type(s).
+ * If the device creation fails, then skip the test case.
+ */
+ selectDeviceForQueryTypeOrSkipTestCase(types: GPUQueryType | GPUQueryType[]): void {
+ if (!Array.isArray(types)) {
+ types = [types];
+ }
+ const features = types.map(t => kQueryTypeInfo[t].feature);
+ this.selectDeviceOrSkipTestCase(features);
+ }
+
+ /** @internal MAINTENANCE_TODO: Make this not visible to test code? */
+ acquireMismatchedProvider(): Promise<DeviceProvider> | undefined {
+ return this.mismatchedProvider;
+ }
+
+ /**
+ * Some tests need a second device which is different from the first.
+ * This requests a second device so it will be available during the test. If it is not called,
+ * no second device will be available.
+ *
+ * If the request isn't supported, throws a SkipTestCase exception to skip the entire test case.
+ */
+ selectMismatchedDeviceOrSkipTestCase(descriptor: DeviceSelectionDescriptor): void {
+ assert(
+ this.mismatchedProvider === undefined,
+ "Can't selectMismatchedDeviceOrSkipTestCase() multiple times"
+ );
+
+ this.mismatchedProvider = mismatchedDevicePool.acquire(
+ this.recorder,
+ initUncanonicalizedDeviceDescriptor(descriptor)
+ );
+ // Suppress uncaught promise rejection (we'll catch it later).
+ this.mismatchedProvider.catch(() => {});
+ }
+
+ /** Throws an exception marking the subcase as skipped. */
+ skip(msg: string): never {
+ throw new SkipTestCase(msg);
+ }
+
+ /** Throws an exception making the subcase as skipped if condition is true */
+ skipIf(cond: boolean, msg: string | (() => string) = '') {
+ if (cond) {
+ this.skip(typeof msg === 'function' ? msg() : msg);
+ }
+ }
+
+ /**
+ * Skips test if any format is not supported.
+ */
+ skipIfTextureFormatNotSupported(...formats: (GPUTextureFormat | undefined)[]) {
+ if (this.isCompatibility) {
+ for (const format of formats) {
+ if (format === 'bgra8unorm-srgb') {
+ this.skip(`texture format '${format} is not supported`);
+ }
+ }
+ }
+ }
+
+ skipIfCopyTextureToTextureNotSupportedForFormat(...formats: (GPUTextureFormat | undefined)[]) {
+ if (this.isCompatibility) {
+ for (const format of formats) {
+ if (format && isCompressedTextureFormat(format)) {
+ this.skip(`copyTextureToTexture with ${format} is not supported`);
+ }
+ }
+ }
+ }
+
+ skipIfTextureViewDimensionNotSupported(...dimensions: (GPUTextureViewDimension | undefined)[]) {
+ if (this.isCompatibility) {
+ for (const dimension of dimensions) {
+ if (dimension === 'cube-array') {
+ this.skip(`texture view dimension '${dimension}' is not supported`);
+ }
+ }
+ }
+ }
+}
+
+/**
+ * Base fixture for WebGPU tests.
+ *
+ * This class is a Fixture + a getter that returns a GPUDevice
+ * as well as helpers that use that device.
+ */
+export class GPUTestBase extends Fixture<GPUTestSubcaseBatchState> {
+ public static override MakeSharedState(
+ recorder: TestCaseRecorder,
+ params: TestParams
+ ): GPUTestSubcaseBatchState {
+ return new GPUTestSubcaseBatchState(recorder, params);
+ }
+
+ // This must be overridden in derived classes
+ get device(): GPUDevice {
+ unreachable();
+ return null as unknown as GPUDevice;
+ }
+
+ /** GPUQueue for the test to use. (Same as `t.device.queue`.) */
+ get queue(): GPUQueue {
+ return this.device.queue;
+ }
+
+ get isCompatibility() {
+ return globalTestConfig.compatibility;
+ }
+
+ getDefaultLimits() {
+ return getDefaultLimits(this.isCompatibility ? 'compatibility' : 'core');
+ }
+
+ getDefaultLimit(limit: (typeof kLimits)[number]) {
+ return this.getDefaultLimits()[limit].default;
+ }
+
+ makeLimitVariant(limit: (typeof kLimits)[number], variant: ValueTestVariant) {
+ return makeValueTestVariant(this.device.limits[limit], variant);
+ }
+
+ canCallCopyTextureToBufferWithTextureFormat(format: GPUTextureFormat) {
+ return !this.isCompatibility || !isCompressedTextureFormat(format);
+ }
+
+ /** Snapshot a GPUBuffer's contents, returning a new GPUBuffer with the `MAP_READ` usage. */
+ private createCopyForMapRead(src: GPUBuffer, srcOffset: number, size: number): GPUBuffer {
+ assert(srcOffset % 4 === 0);
+ assert(size % 4 === 0);
+
+ const dst = this.device.createBuffer({
+ size,
+ usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
+ });
+ this.trackForCleanup(dst);
+
+ const c = this.device.createCommandEncoder();
+ c.copyBufferToBuffer(src, srcOffset, dst, 0, size);
+ this.queue.submit([c.finish()]);
+
+ return dst;
+ }
+
+ /**
+ * Offset and size passed to createCopyForMapRead must be divisible by 4. For that
+ * we might need to copy more bytes from the buffer than we want to map.
+ * begin and end values represent the part of the copied buffer that stores the contents
+ * we initially wanted to map.
+ * The copy will not cause an OOB error because the buffer size must be 4-aligned.
+ */
+ private createAlignedCopyForMapRead(
+ src: GPUBuffer,
+ size: number,
+ offset: number
+ ): { mappable: GPUBuffer; subarrayByteStart: number } {
+ const alignedOffset = roundDown(offset, 4);
+ const subarrayByteStart = offset - alignedOffset;
+ const alignedSize = align(size + subarrayByteStart, 4);
+ const mappable = this.createCopyForMapRead(src, alignedOffset, alignedSize);
+ return { mappable, subarrayByteStart };
+ }
+
+ /**
+ * Snapshot the current contents of a range of a GPUBuffer, and return them as a TypedArray.
+ * Also provides a cleanup() function to unmap and destroy the staging buffer.
+ */
+ async readGPUBufferRangeTyped<T extends TypedArrayBufferView>(
+ src: GPUBuffer,
+ {
+ srcByteOffset = 0,
+ method = 'copy',
+ type,
+ typedLength,
+ }: {
+ srcByteOffset?: number;
+ method?: 'copy' | 'map';
+ type: TypedArrayBufferViewConstructor<T>;
+ typedLength: number;
+ }
+ ): Promise<{ data: T; cleanup(): void }> {
+ assert(
+ srcByteOffset % type.BYTES_PER_ELEMENT === 0,
+ 'srcByteOffset must be a multiple of BYTES_PER_ELEMENT'
+ );
+
+ const byteLength = typedLength * type.BYTES_PER_ELEMENT;
+ let mappable: GPUBuffer;
+ let mapOffset: number | undefined, mapSize: number | undefined, subarrayByteStart: number;
+ if (method === 'copy') {
+ ({ mappable, subarrayByteStart } = this.createAlignedCopyForMapRead(
+ src,
+ byteLength,
+ srcByteOffset
+ ));
+ } else if (method === 'map') {
+ mappable = src;
+ mapOffset = roundDown(srcByteOffset, 8);
+ mapSize = align(byteLength, 4);
+ subarrayByteStart = srcByteOffset - mapOffset;
+ } else {
+ unreachable();
+ }
+
+ assert(subarrayByteStart % type.BYTES_PER_ELEMENT === 0);
+ const subarrayStart = subarrayByteStart / type.BYTES_PER_ELEMENT;
+
+ // 2. Map the staging buffer, and create the TypedArray from it.
+ await mappable.mapAsync(GPUMapMode.READ, mapOffset, mapSize);
+ const mapped = new type(mappable.getMappedRange(mapOffset, mapSize));
+ const data = mapped.subarray(subarrayStart, typedLength) as T;
+
+ return {
+ data,
+ cleanup() {
+ mappable.unmap();
+ mappable.destroy();
+ },
+ };
+ }
+
+ /**
+ * Skips test if any format is not supported.
+ */
+ skipIfTextureFormatNotSupported(...formats: (GPUTextureFormat | undefined)[]) {
+ if (this.isCompatibility) {
+ for (const format of formats) {
+ if (format === 'bgra8unorm-srgb') {
+ this.skip(`texture format '${format} is not supported`);
+ }
+ }
+ }
+ }
+
+ skipIfTextureViewDimensionNotSupported(...dimensions: (GPUTextureViewDimension | undefined)[]) {
+ if (this.isCompatibility) {
+ for (const dimension of dimensions) {
+ if (dimension === 'cube-array') {
+ this.skip(`texture view dimension '${dimension}' is not supported`);
+ }
+ }
+ }
+ }
+
+ skipIfCopyTextureToTextureNotSupportedForFormat(...formats: (GPUTextureFormat | undefined)[]) {
+ if (this.isCompatibility) {
+ for (const format of formats) {
+ if (format && isCompressedTextureFormat(format)) {
+ this.skip(`copyTextureToTexture with ${format} is not supported`);
+ }
+ }
+ }
+ }
+
+ /**
+ * Expect a GPUBuffer's contents to pass the provided check.
+ *
+ * A library of checks can be found in {@link webgpu/util/check_contents}.
+ */
+ expectGPUBufferValuesPassCheck<T extends TypedArrayBufferView>(
+ src: GPUBuffer,
+ check: (actual: T) => Error | undefined,
+ {
+ srcByteOffset = 0,
+ type,
+ typedLength,
+ method = 'copy',
+ mode = 'fail',
+ }: {
+ srcByteOffset?: number;
+ type: TypedArrayBufferViewConstructor<T>;
+ typedLength: number;
+ method?: 'copy' | 'map';
+ mode?: 'fail' | 'warn';
+ }
+ ) {
+ const readbackPromise = this.readGPUBufferRangeTyped(src, {
+ srcByteOffset,
+ type,
+ typedLength,
+ method,
+ });
+ this.eventualAsyncExpectation(async niceStack => {
+ const readback = await readbackPromise;
+ this.expectOK(check(readback.data), { mode, niceStack });
+ readback.cleanup();
+ });
+ }
+
+ /**
+ * Expect a GPUBuffer's contents to equal the values in the provided TypedArray.
+ */
+ expectGPUBufferValuesEqual(
+ src: GPUBuffer,
+ expected: TypedArrayBufferView,
+ srcByteOffset: number = 0,
+ { method = 'copy', mode = 'fail' }: { method?: 'copy' | 'map'; mode?: 'fail' | 'warn' } = {}
+ ): void {
+ this.expectGPUBufferValuesPassCheck(src, a => checkElementsEqual(a, expected), {
+ srcByteOffset,
+ type: expected.constructor as TypedArrayBufferViewConstructor,
+ typedLength: expected.length,
+ method,
+ mode,
+ });
+ }
+
+ /**
+ * Expect a buffer to consist exclusively of rows of some repeated expected value. The size of
+ * `expectedValue` must be 1, 2, or any multiple of 4 bytes. Rows in the buffer are expected to be
+ * zero-padded out to `bytesPerRow`. `minBytesPerRow` is the number of bytes per row that contain
+ * actual (non-padding) data and must be an exact multiple of the byte-length of `expectedValue`.
+ */
+ expectGPUBufferRepeatsSingleValue(
+ buffer: GPUBuffer,
+ {
+ expectedValue,
+ numRows,
+ minBytesPerRow,
+ bytesPerRow,
+ }: {
+ expectedValue: ArrayBuffer;
+ numRows: number;
+ minBytesPerRow: number;
+ bytesPerRow: number;
+ }
+ ) {
+ const valueSize = expectedValue.byteLength;
+ assert(valueSize === 1 || valueSize === 2 || valueSize % 4 === 0);
+ assert(minBytesPerRow % valueSize === 0);
+ assert(bytesPerRow % 4 === 0);
+
+ // If the buffer is small enough, just generate the full expected buffer contents and check
+ // against them on the CPU.
+ const kMaxBufferSizeToCheckOnCpu = 256 * 1024;
+ const bufferSize = bytesPerRow * (numRows - 1) + minBytesPerRow;
+ if (bufferSize <= kMaxBufferSizeToCheckOnCpu) {
+ const valueBytes = Array.from(new Uint8Array(expectedValue));
+ const rowValues = new Array(minBytesPerRow / valueSize).fill(valueBytes);
+ const rowBytes = new Uint8Array([].concat(...rowValues));
+ const expectedContents = new Uint8Array(bufferSize);
+ range(numRows, row => expectedContents.set(rowBytes, row * bytesPerRow));
+ this.expectGPUBufferValuesEqual(buffer, expectedContents);
+ return;
+ }
+
+ // Copy into a buffer suitable for STORAGE usage.
+ const storageBuffer = this.device.createBuffer({
+ size: bufferSize,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
+ });
+ this.trackForCleanup(storageBuffer);
+
+ // This buffer conveys the data we expect to see for a single value read. Since we read 32 bits at
+ // a time, for values smaller than 32 bits we pad this expectation with repeated value data, or
+ // with zeroes if the width of a row in the buffer is less than 4 bytes. For value sizes larger
+ // than 32 bits, we assume they're a multiple of 32 bits and expect to read exact matches of
+ // `expectedValue` as-is.
+ const expectedDataSize = Math.max(4, valueSize);
+ const expectedDataBuffer = this.device.createBuffer({
+ size: expectedDataSize,
+ usage: GPUBufferUsage.STORAGE,
+ mappedAtCreation: true,
+ });
+ this.trackForCleanup(expectedDataBuffer);
+ const expectedData = new Uint32Array(expectedDataBuffer.getMappedRange());
+ if (valueSize === 1) {
+ const value = new Uint8Array(expectedValue)[0];
+ const values = new Array(Math.min(4, minBytesPerRow)).fill(value);
+ const padding = new Array(Math.max(0, 4 - values.length)).fill(0);
+ const expectedBytes = new Uint8Array(expectedData.buffer);
+ expectedBytes.set([...values, ...padding]);
+ } else if (valueSize === 2) {
+ const value = new Uint16Array(expectedValue)[0];
+ const expectedWords = new Uint16Array(expectedData.buffer);
+ expectedWords.set([value, minBytesPerRow > 2 ? value : 0]);
+ } else {
+ expectedData.set(new Uint32Array(expectedValue));
+ }
+ expectedDataBuffer.unmap();
+
+ // The output buffer has one 32-bit entry per buffer row. An entry's value will be 1 if every
+ // read from the corresponding row matches the expected data derived above, or 0 otherwise.
+ const resultBuffer = this.device.createBuffer({
+ size: numRows * 4,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ this.trackForCleanup(resultBuffer);
+
+ const readsPerRow = Math.ceil(minBytesPerRow / expectedDataSize);
+ const reducer = `
+ struct Buffer { data: array<u32>, };
+ @group(0) @binding(0) var<storage, read> expected: Buffer;
+ @group(0) @binding(1) var<storage, read> in: Buffer;
+ @group(0) @binding(2) var<storage, read_write> out: Buffer;
+ @compute @workgroup_size(1) fn reduce(
+ @builtin(global_invocation_id) id: vec3<u32>) {
+ let rowBaseIndex = id.x * ${bytesPerRow / 4}u;
+ let readSize = ${expectedDataSize / 4}u;
+ out.data[id.x] = 1u;
+ for (var i: u32 = 0u; i < ${readsPerRow}u; i = i + 1u) {
+ let elementBaseIndex = rowBaseIndex + i * readSize;
+ for (var j: u32 = 0u; j < readSize; j = j + 1u) {
+ if (in.data[elementBaseIndex + j] != expected.data[j]) {
+ out.data[id.x] = 0u;
+ return;
+ }
+ }
+ }
+ }
+ `;
+
+ const pipeline = this.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: this.device.createShaderModule({ code: reducer }),
+ entryPoint: 'reduce',
+ },
+ });
+
+ const bindGroup = this.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: { buffer: expectedDataBuffer } },
+ { binding: 1, resource: { buffer: storageBuffer } },
+ { binding: 2, resource: { buffer: resultBuffer } },
+ ],
+ });
+
+ const commandEncoder = this.device.createCommandEncoder();
+ commandEncoder.copyBufferToBuffer(buffer, 0, storageBuffer, 0, bufferSize);
+ const pass = commandEncoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(numRows);
+ pass.end();
+ this.device.queue.submit([commandEncoder.finish()]);
+
+ const expectedResults = new Array(numRows).fill(1);
+ this.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array(expectedResults));
+ }
+
+ // MAINTENANCE_TODO: add an expectContents for textures, which logs data: uris on failure
+
+ /**
+ * Expect an entire GPUTexture to have a single color at the given mip level (defaults to 0).
+ * MAINTENANCE_TODO: Remove this and/or replace it with a helper in TextureTestMixin.
+ */
+ expectSingleColor(
+ src: GPUTexture,
+ format: GPUTextureFormat,
+ {
+ size,
+ exp,
+ dimension = '2d',
+ slice = 0,
+ layout,
+ }: {
+ size: [number, number, number];
+ exp: PerTexelComponent<number>;
+ dimension?: GPUTextureDimension;
+ slice?: number;
+ layout?: TextureLayoutOptions;
+ }
+ ): void {
+ assert(
+ slice === 0 || dimension === '2d',
+ 'texture slices are only implemented for 2d textures'
+ );
+
+ format = resolvePerAspectFormat(format, layout?.aspect);
+ const { byteLength, minBytesPerRow, bytesPerRow, rowsPerImage, mipSize } = getTextureCopyLayout(
+ format,
+ dimension,
+ size,
+ layout
+ );
+ // MAINTENANCE_TODO: getTextureCopyLayout does not return the proper size for array textures,
+ // i.e. it will leave the z/depth value as is instead of making it 1 when dealing with 2d
+ // texture arrays. Since we are passing in the dimension, we should update it to return the
+ // corrected size.
+ const copySize = [
+ mipSize[0],
+ dimension !== '1d' ? mipSize[1] : 1,
+ dimension === '3d' ? mipSize[2] : 1,
+ ];
+
+ const rep = kTexelRepresentationInfo[format as EncodableTextureFormat];
+ const expectedTexelData = rep.pack(rep.encode(exp));
+
+ const buffer = this.device.createBuffer({
+ size: byteLength,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+ this.trackForCleanup(buffer);
+
+ const commandEncoder = this.device.createCommandEncoder();
+ commandEncoder.copyTextureToBuffer(
+ {
+ texture: src,
+ mipLevel: layout?.mipLevel,
+ origin: { x: 0, y: 0, z: slice },
+ aspect: layout?.aspect,
+ },
+ { buffer, bytesPerRow, rowsPerImage },
+ copySize
+ );
+ this.queue.submit([commandEncoder.finish()]);
+
+ this.expectGPUBufferRepeatsSingleValue(buffer, {
+ expectedValue: expectedTexelData,
+ numRows: rowsPerImage * copySize[2],
+ minBytesPerRow,
+ bytesPerRow,
+ });
+ }
+
+ /**
+ * Return a GPUBuffer that data are going to be written into.
+ * MAINTENANCE_TODO: Remove this once expectSinglePixelBetweenTwoValuesIn2DTexture is removed.
+ */
+ private readSinglePixelFrom2DTexture(
+ src: GPUTexture,
+ format: SizedTextureFormat,
+ { x, y }: { x: number; y: number },
+ { slice = 0, layout }: { slice?: number; layout?: TextureLayoutOptions }
+ ): GPUBuffer {
+ const { byteLength, bytesPerRow, rowsPerImage } = getTextureSubCopyLayout(
+ format,
+ [1, 1],
+ layout
+ );
+ const buffer = this.device.createBuffer({
+ size: byteLength,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+ this.trackForCleanup(buffer);
+
+ const commandEncoder = this.device.createCommandEncoder();
+ commandEncoder.copyTextureToBuffer(
+ { texture: src, mipLevel: layout?.mipLevel, origin: { x, y, z: slice } },
+ { buffer, bytesPerRow, rowsPerImage },
+ [1, 1]
+ );
+ this.queue.submit([commandEncoder.finish()]);
+
+ return buffer;
+ }
+
+ /**
+ * Take a single pixel of a 2D texture, interpret it using a TypedArray of the `expected` type,
+ * and expect each value in that array to be between the corresponding "expected" values
+ * (either `a[i] <= actual[i] <= b[i]` or `a[i] >= actual[i] => b[i]`).
+ * MAINTENANCE_TODO: Remove this once there is a way to deal with undefined lerp-ed values.
+ */
+ expectSinglePixelBetweenTwoValuesIn2DTexture(
+ src: GPUTexture,
+ format: SizedTextureFormat,
+ { x, y }: { x: number; y: number },
+ {
+ exp,
+ slice = 0,
+ layout,
+ generateWarningOnly = false,
+ checkElementsBetweenFn = (act, [a, b]) => checkElementsBetween(act, [i => a[i], i => b[i]]),
+ }: {
+ exp: [TypedArrayBufferView, TypedArrayBufferView];
+ slice?: number;
+ layout?: TextureLayoutOptions;
+ generateWarningOnly?: boolean;
+ checkElementsBetweenFn?: (
+ actual: TypedArrayBufferView,
+ expected: readonly [TypedArrayBufferView, TypedArrayBufferView]
+ ) => Error | undefined;
+ }
+ ): void {
+ assert(exp[0].constructor === exp[1].constructor);
+ const constructor = exp[0].constructor as TypedArrayBufferViewConstructor;
+ assert(exp[0].length === exp[1].length);
+ const typedLength = exp[0].length;
+
+ const buffer = this.readSinglePixelFrom2DTexture(src, format, { x, y }, { slice, layout });
+ this.expectGPUBufferValuesPassCheck(buffer, a => checkElementsBetweenFn(a, exp), {
+ type: constructor,
+ typedLength,
+ mode: generateWarningOnly ? 'warn' : 'fail',
+ });
+ }
+
+ /**
+ * Emulate a texture to buffer copy by using a compute shader
+ * to load texture value of a single pixel and write to a storage buffer.
+ * For sample count == 1, the buffer contains only one value of the sample.
+ * For sample count > 1, the buffer contains (N = sampleCount) values sorted
+ * in the order of their sample index [0, sampleCount - 1]
+ *
+ * This can be useful when the texture to buffer copy is not available to the texture format
+ * e.g. (depth24plus), or when the texture is multisampled.
+ *
+ * MAINTENANCE_TODO: extend to read multiple pixels with given origin and size.
+ *
+ * @returns storage buffer containing the copied value from the texture.
+ */
+ copySinglePixelTextureToBufferUsingComputePass(
+ type: ScalarType,
+ componentCount: number,
+ textureView: GPUTextureView,
+ sampleCount: number
+ ): GPUBuffer {
+ const textureSrcCode =
+ sampleCount === 1
+ ? `@group(0) @binding(0) var src: texture_2d<${type}>;`
+ : `@group(0) @binding(0) var src: texture_multisampled_2d<${type}>;`;
+ const code = `
+ struct Buffer {
+ data: array<${type}>,
+ };
+
+ ${textureSrcCode}
+ @group(0) @binding(1) var<storage, read_write> dst : Buffer;
+
+ @compute @workgroup_size(1) fn main() {
+ var coord = vec2<i32>(0, 0);
+ for (var sampleIndex = 0; sampleIndex < ${sampleCount};
+ sampleIndex = sampleIndex + 1) {
+ let o = sampleIndex * ${componentCount};
+ let v = textureLoad(src, coord, sampleIndex);
+ for (var component = 0; component < ${componentCount}; component = component + 1) {
+ dst.data[o + component] = v[component];
+ }
+ }
+ }
+ `;
+ const computePipeline = this.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: this.device.createShaderModule({
+ code,
+ }),
+ entryPoint: 'main',
+ },
+ });
+
+ const storageBuffer = this.device.createBuffer({
+ size: sampleCount * type.size * componentCount,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST | GPUBufferUsage.COPY_SRC,
+ });
+ this.trackForCleanup(storageBuffer);
+
+ const uniformBindGroup = this.device.createBindGroup({
+ layout: computePipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: textureView,
+ },
+ {
+ binding: 1,
+ resource: {
+ buffer: storageBuffer,
+ },
+ },
+ ],
+ });
+
+ const encoder = this.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(computePipeline);
+ pass.setBindGroup(0, uniformBindGroup);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ this.device.queue.submit([encoder.finish()]);
+
+ return storageBuffer;
+ }
+
+ /**
+ * Expect the specified WebGPU error to be generated when running the provided function.
+ */
+ expectGPUError<R>(filter: GPUErrorFilter, fn: () => R, shouldError: boolean = true): R {
+ // If no error is expected, we let the scope surrounding the test catch it.
+ if (!shouldError) {
+ return fn();
+ }
+
+ this.device.pushErrorScope(filter);
+ const returnValue = fn();
+ const promise = this.device.popErrorScope();
+
+ this.eventualAsyncExpectation(async niceStack => {
+ const error = await promise;
+
+ let failed = false;
+ switch (filter) {
+ case 'out-of-memory':
+ failed = !(error instanceof GPUOutOfMemoryError);
+ break;
+ case 'validation':
+ failed = !(error instanceof GPUValidationError);
+ break;
+ }
+
+ if (failed) {
+ niceStack.message = `Expected ${filter} error`;
+ this.rec.expectationFailed(niceStack);
+ } else {
+ niceStack.message = `Captured ${filter} error`;
+ if (error instanceof GPUValidationError) {
+ niceStack.message += ` - ${error.message}`;
+ }
+ this.rec.debug(niceStack);
+ }
+ });
+
+ return returnValue;
+ }
+
+ /**
+ * Expect a validation error inside the callback.
+ *
+ * Tests should always do just one WebGPU call in the callback, to make sure that's what's tested.
+ */
+ expectValidationError(fn: () => void, shouldError: boolean = true): void {
+ // If no error is expected, we let the scope surrounding the test catch it.
+ if (shouldError) {
+ this.device.pushErrorScope('validation');
+ }
+
+ // Note: A return value is not allowed for the callback function. This is to avoid confusion
+ // about what the actual behavior would be; either of the following could be reasonable:
+ // - Make expectValidationError async, and have it await on fn(). This causes an async split
+ // between pushErrorScope and popErrorScope, so if the caller doesn't `await` on
+ // expectValidationError (either accidentally or because it doesn't care to do so), then
+ // other test code will be (nondeterministically) caught by the error scope.
+ // - Make expectValidationError NOT await fn(), but just execute its first block (until the
+ // first await) and return the return value (a Promise). This would be confusing because it
+ // would look like the error scope includes the whole async function, but doesn't.
+ // If we do decide we need to return a value, we should use the latter semantic.
+ const returnValue = fn() as unknown;
+ assert(
+ returnValue === undefined,
+ 'expectValidationError callback should not return a value (or be async)'
+ );
+
+ if (shouldError) {
+ const promise = this.device.popErrorScope();
+
+ this.eventualAsyncExpectation(async niceStack => {
+ const gpuValidationError = await promise;
+ if (!gpuValidationError) {
+ niceStack.message = 'Validation succeeded unexpectedly.';
+ this.rec.validationFailed(niceStack);
+ } else if (gpuValidationError instanceof GPUValidationError) {
+ niceStack.message = `Validation failed, as expected - ${gpuValidationError.message}`;
+ this.rec.debug(niceStack);
+ }
+ });
+ }
+ }
+
+ /**
+ * Create a GPUBuffer with the specified contents and usage.
+ *
+ * MAINTENANCE_TODO: Several call sites would be simplified if this took ArrayBuffer as well.
+ */
+ makeBufferWithContents(dataArray: TypedArrayBufferView, usage: GPUBufferUsageFlags): GPUBuffer {
+ return this.trackForCleanup(makeBufferWithContents(this.device, dataArray, usage));
+ }
+
+ /**
+ * Returns a GPUCommandEncoder, GPUComputePassEncoder, GPURenderPassEncoder, or
+ * GPURenderBundleEncoder, and a `finish` method returning a GPUCommandBuffer.
+ * Allows testing methods which have the same signature across multiple encoder interfaces.
+ *
+ * @example
+ * ```
+ * g.test('popDebugGroup')
+ * .params(u => u.combine('encoderType', kEncoderTypes))
+ * .fn(t => {
+ * const { encoder, finish } = t.createEncoder(t.params.encoderType);
+ * encoder.popDebugGroup();
+ * });
+ *
+ * g.test('writeTimestamp')
+ * .params(u => u.combine('encoderType', ['non-pass', 'compute pass', 'render pass'] as const)
+ * .fn(t => {
+ * const { encoder, finish } = t.createEncoder(t.params.encoderType);
+ * // Encoder type is inferred, so `writeTimestamp` can be used even though it doesn't exist
+ * // on GPURenderBundleEncoder.
+ * encoder.writeTimestamp(args);
+ * });
+ * ```
+ */
+ createEncoder<T extends EncoderType>(
+ encoderType: T,
+ {
+ attachmentInfo,
+ occlusionQuerySet,
+ }: {
+ attachmentInfo?: GPURenderBundleEncoderDescriptor;
+ occlusionQuerySet?: GPUQuerySet;
+ } = {}
+ ): CommandBufferMaker<T> {
+ const fullAttachmentInfo = {
+ // Defaults if not overridden:
+ colorFormats: ['rgba8unorm'],
+ sampleCount: 1,
+ // Passed values take precedent.
+ ...attachmentInfo,
+ } as const;
+
+ switch (encoderType) {
+ case 'non-pass': {
+ const encoder = this.device.createCommandEncoder();
+
+ return new CommandBufferMaker(this, encoder, () => {
+ return encoder.finish();
+ });
+ }
+ case 'render bundle': {
+ const device = this.device;
+ const rbEncoder = device.createRenderBundleEncoder(fullAttachmentInfo);
+ const pass = this.createEncoder('render pass', { attachmentInfo });
+
+ return new CommandBufferMaker(this, rbEncoder, () => {
+ pass.encoder.executeBundles([rbEncoder.finish()]);
+ return pass.finish();
+ });
+ }
+ case 'compute pass': {
+ const commandEncoder = this.device.createCommandEncoder();
+ const encoder = commandEncoder.beginComputePass();
+
+ return new CommandBufferMaker(this, encoder, () => {
+ encoder.end();
+ return commandEncoder.finish();
+ });
+ }
+ case 'render pass': {
+ const makeAttachmentView = (format: GPUTextureFormat) =>
+ this.trackForCleanup(
+ this.device.createTexture({
+ size: [16, 16, 1],
+ format,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ sampleCount: fullAttachmentInfo.sampleCount,
+ })
+ ).createView();
+
+ let depthStencilAttachment: GPURenderPassDepthStencilAttachment | undefined = undefined;
+ if (fullAttachmentInfo.depthStencilFormat !== undefined) {
+ depthStencilAttachment = {
+ view: makeAttachmentView(fullAttachmentInfo.depthStencilFormat),
+ depthReadOnly: fullAttachmentInfo.depthReadOnly,
+ stencilReadOnly: fullAttachmentInfo.stencilReadOnly,
+ };
+ if (
+ kTextureFormatInfo[fullAttachmentInfo.depthStencilFormat].depth &&
+ !fullAttachmentInfo.depthReadOnly
+ ) {
+ depthStencilAttachment.depthClearValue = 0;
+ depthStencilAttachment.depthLoadOp = 'clear';
+ depthStencilAttachment.depthStoreOp = 'discard';
+ }
+ if (
+ kTextureFormatInfo[fullAttachmentInfo.depthStencilFormat].stencil &&
+ !fullAttachmentInfo.stencilReadOnly
+ ) {
+ depthStencilAttachment.stencilClearValue = 1;
+ depthStencilAttachment.stencilLoadOp = 'clear';
+ depthStencilAttachment.stencilStoreOp = 'discard';
+ }
+ }
+ const passDesc: GPURenderPassDescriptor = {
+ colorAttachments: Array.from(fullAttachmentInfo.colorFormats, format =>
+ format
+ ? {
+ view: makeAttachmentView(format),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ }
+ : null
+ ),
+ depthStencilAttachment,
+ occlusionQuerySet,
+ };
+
+ const commandEncoder = this.device.createCommandEncoder();
+ const encoder = commandEncoder.beginRenderPass(passDesc);
+ return new CommandBufferMaker(this, encoder, () => {
+ encoder.end();
+ return commandEncoder.finish();
+ });
+ }
+ }
+ unreachable();
+ }
+}
+
+/**
+ * Fixture for WebGPU tests that uses a DeviceProvider
+ */
+export class GPUTest extends GPUTestBase {
+ // Should never be undefined in a test. If it is, init() must not have run/finished.
+ private provider: DeviceProvider | undefined;
+ private mismatchedProvider: DeviceProvider | undefined;
+
+ override async init() {
+ await super.init();
+
+ this.provider = await this.sharedState.acquireProvider();
+ this.mismatchedProvider = await this.sharedState.acquireMismatchedProvider();
+ }
+
+ /**
+ * GPUDevice for the test to use.
+ */
+ override get device(): GPUDevice {
+ assert(this.provider !== undefined, 'internal error: GPUDevice missing?');
+ return this.provider.device;
+ }
+
+ /**
+ * GPUDevice for tests requiring a second device different from the default one,
+ * e.g. for creating objects for by device_mismatch validation tests.
+ */
+ get mismatchedDevice(): GPUDevice {
+ assert(
+ this.mismatchedProvider !== undefined,
+ 'selectMismatchedDeviceOrSkipTestCase was not called in beforeAllSubcases'
+ );
+ return this.mismatchedProvider.device;
+ }
+
+ /**
+ * Expects that the device should be lost for a particular reason at the teardown of the test.
+ */
+ expectDeviceLost(reason: GPUDeviceLostReason): void {
+ assert(this.provider !== undefined, 'internal error: GPUDevice missing?');
+ this.provider.expectDeviceLost(reason);
+ }
+}
+
+/**
+ * Texture expectation mixin can be applied on top of GPUTest to add texture
+ * related expectation helpers.
+ */
+export interface TextureTestMixinType {
+ /**
+ * Creates a 1 mip level texture with the contents of a TexelView and tracks
+ * it for destruction for the test case.
+ */
+ createTextureFromTexelView(
+ texelView: TexelView,
+ desc: Omit<GPUTextureDescriptor, 'format'>
+ ): GPUTexture;
+
+ /**
+ * Creates a mipmapped texture where each mipmap level's (`i`) content is
+ * from `texelViews[i]` and tracks it for destruction for the test case.
+ */
+ createTextureFromTexelViewsMultipleMipmaps(
+ texelViews: TexelView[],
+ desc: Omit<GPUTextureDescriptor, 'format'>
+ ): GPUTexture;
+
+ /**
+ * Expects that comparing the subrect (defined via `size`) of a GPUTexture
+ * to the expected TexelView passes without error.
+ */
+ expectTexelViewComparisonIsOkInTexture(
+ src: GPUImageCopyTexture,
+ exp: TexelView,
+ size: GPUExtent3D,
+ comparisonOptions?: TexelCompareOptions
+ ): void;
+
+ /**
+ * Expects that a sparse set of pixels in the GPUTexture passes comparison against
+ * their expected colors without error.
+ */
+ expectSinglePixelComparisonsAreOkInTexture<E extends PixelExpectation>(
+ src: GPUImageCopyTexture,
+ exp: PerPixelComparison<E>[],
+ comparisonOptions?: TexelCompareOptions
+ ): void;
+
+ /**
+ * Renders the 2 given textures to an rgba8unorm texture at the size of the
+ * specified mipLevel, each time reading the contents of the result.
+ * Expects contents of both renders to match. Also expects contents described
+ * by origin and size to not be a constant value so as to make sure something
+ * interesting was actually compared.
+ *
+ * The point of this function is to compare compressed texture contents in
+ * compatibility mode. `copyTextureToBuffer` does not work for compressed
+ * textures in compatibility mode so instead, we pass 2 compressed texture
+ * to this function. Each one will be rendered to an `rgba8unorm` texture,
+ * the results of that `rgba8unorm` texture read via `copyTextureToBuffer`,
+ * and then results compared. This indirectly lets us compare the contents
+ * of the 2 compressed textures.
+ *
+ * Code calling this function would generate the textures where the
+ * `actualTexture` is generated calling `writeTexture`, `copyBufferToTexture`
+ * or `copyTextureToTexture` and `expectedTexture`'s data is generated entirely
+ * on the CPU in such a way that its content should match whatever process
+ * was used to generate `actualTexture`. Often this involves calling
+ * `updateLinearTextureDataSubBox`
+ */
+ expectTexturesToMatchByRendering(
+ actualTexture: GPUTexture,
+ expectedTexture: GPUTexture,
+ mipLevel: number,
+ origin: Required<GPUOrigin3DDict>,
+ size: Required<GPUExtent3DDict>
+ ): void;
+
+ /**
+ * Copies an entire texture's mipLevel to a buffer
+ */
+ copyWholeTextureToNewBufferSimple(texture: GPUTexture, mipLevel: number): GPUBuffer;
+
+ /**
+ * Copies an texture's mipLevel to a buffer
+ * The size of the buffer is specified by `byteLength`
+ */
+ copyWholeTextureToNewBuffer(
+ { texture, mipLevel }: { texture: GPUTexture; mipLevel: number | undefined },
+ resultDataLayout: {
+ bytesPerBlock: number;
+ byteLength: number;
+ bytesPerRow: number;
+ rowsPerImage: number;
+ mipSize: [number, number, number];
+ }
+ ): GPUBuffer;
+
+ /**
+ * Updates a Uint8Array with a cubic portion of data from another Uint8Array.
+ * Effectively it's a Uint8Array to Uint8Array copy that
+ * does the same thing as `writeTexture` but because the
+ * destination is a buffer you have to provide the parameters
+ * of the destination buffer similarly to how you'd provide them
+ * to `copyTextureToBuffer`
+ */
+ updateLinearTextureDataSubBox(
+ format: ColorTextureFormat,
+ copySize: Required<GPUExtent3DDict>,
+ copyParams: {
+ dest: LinearCopyParameters;
+ src: LinearCopyParameters;
+ }
+ ): void;
+
+ /**
+ * Gets a byte offset to a texel
+ */
+ getTexelOffsetInBytes(
+ textureDataLayout: Required<GPUImageDataLayout>,
+ format: ColorTextureFormat,
+ texel: Required<GPUOrigin3DDict>,
+ origin?: Required<GPUOrigin3DDict>
+ ): number;
+
+ iterateBlockRows(
+ size: Required<GPUExtent3DDict>,
+ format: ColorTextureFormat
+ ): Generator<Required<GPUOrigin3DDict>>;
+}
+
+type ImageCopyTestResources = {
+ pipeline: GPURenderPipeline;
+};
+
+const s_deviceToResourcesMap = new WeakMap<GPUDevice, ImageCopyTestResources>();
+
+/**
+ * Gets a (cached) pipeline to render a texture to an rgba8unorm texture
+ */
+function getPipelineToRenderTextureToRGB8UnormTexture(device: GPUDevice) {
+ if (!s_deviceToResourcesMap.has(device)) {
+ const module = device.createShaderModule({
+ code: `
+ struct VSOutput {
+ @builtin(position) position: vec4f,
+ @location(0) texcoord: vec2f,
+ };
+
+ @vertex fn vs(
+ @builtin(vertex_index) vertexIndex : u32
+ ) -> VSOutput {
+ let pos = array(
+ vec2f(-1, -1),
+ vec2f(-1, 3),
+ vec2f( 3, -1),
+ );
+
+ var vsOutput: VSOutput;
+
+ let xy = pos[vertexIndex];
+
+ vsOutput.position = vec4f(xy, 0.0, 1.0);
+ vsOutput.texcoord = xy * vec2f(0.5, -0.5) + vec2f(0.5);
+
+ return vsOutput;
+ }
+
+ @group(0) @binding(0) var ourSampler: sampler;
+ @group(0) @binding(1) var ourTexture: texture_2d<f32>;
+
+ @fragment fn fs(fsInput: VSOutput) -> @location(0) vec4f {
+ return textureSample(ourTexture, ourSampler, fsInput.texcoord);
+ }
+ `,
+ });
+ const pipeline = device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vs',
+ },
+ fragment: {
+ module,
+ entryPoint: 'fs',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ });
+ s_deviceToResourcesMap.set(device, { pipeline });
+ }
+ const { pipeline } = s_deviceToResourcesMap.get(device)!;
+ return pipeline;
+}
+
+type LinearCopyParameters = {
+ dataLayout: Required<GPUImageDataLayout>;
+ origin: Required<GPUOrigin3DDict>;
+ data: Uint8Array;
+};
+
+export function TextureTestMixin<F extends FixtureClass<GPUTest>>(
+ Base: F
+): FixtureClassWithMixin<F, TextureTestMixinType> {
+ class TextureExpectations
+ extends (Base as FixtureClassInterface<GPUTest>)
+ implements TextureTestMixinType
+ {
+ createTextureFromTexelView(
+ texelView: TexelView,
+ desc: Omit<GPUTextureDescriptor, 'format'>
+ ): GPUTexture {
+ return this.trackForCleanup(createTextureFromTexelView(this.device, texelView, desc));
+ }
+
+ createTextureFromTexelViewsMultipleMipmaps(
+ texelViews: TexelView[],
+ desc: Omit<GPUTextureDescriptor, 'format'>
+ ): GPUTexture {
+ return this.trackForCleanup(createTextureFromTexelViews(this.device, texelViews, desc));
+ }
+
+ expectTexelViewComparisonIsOkInTexture(
+ src: GPUImageCopyTexture,
+ exp: TexelView,
+ size: GPUExtent3D,
+ comparisonOptions = {
+ maxIntDiff: 0,
+ maxDiffULPsForNormFormat: 1,
+ maxDiffULPsForFloatFormat: 1,
+ }
+ ): void {
+ this.eventualExpectOK(
+ textureContentIsOKByT2B(this, src, size, { expTexelView: exp }, comparisonOptions)
+ );
+ }
+
+ expectSinglePixelComparisonsAreOkInTexture<E extends PixelExpectation>(
+ src: GPUImageCopyTexture,
+ exp: PerPixelComparison<E>[],
+ comparisonOptions = {
+ maxIntDiff: 0,
+ maxDiffULPsForNormFormat: 1,
+ maxDiffULPsForFloatFormat: 1,
+ }
+ ): void {
+ assert(exp.length > 0, 'must specify at least one pixel comparison');
+ assert(
+ (kEncodableTextureFormats as GPUTextureFormat[]).includes(src.texture.format),
+ () => `${src.texture.format} is not an encodable format`
+ );
+ const lowerCorner = [src.texture.width, src.texture.height, src.texture.depthOrArrayLayers];
+ const upperCorner = [0, 0, 0];
+ const expMap = new Map<string, E>();
+ const coords: Required<GPUOrigin3DDict>[] = [];
+ for (const e of exp) {
+ const coord = reifyOrigin3D(e.coord);
+ const coordKey = JSON.stringify(coord);
+ coords.push(coord);
+
+ // Compute the minimum sub-rect that encompasses all the pixel comparisons. The
+ // `lowerCorner` will become the origin, and the `upperCorner` will be used to compute the
+ // size.
+ lowerCorner[0] = Math.min(lowerCorner[0], coord.x);
+ lowerCorner[1] = Math.min(lowerCorner[1], coord.y);
+ lowerCorner[2] = Math.min(lowerCorner[2], coord.z);
+ upperCorner[0] = Math.max(upperCorner[0], coord.x);
+ upperCorner[1] = Math.max(upperCorner[1], coord.y);
+ upperCorner[2] = Math.max(upperCorner[2], coord.z);
+
+ // Build a sparse map of the coordinates to the expected colors for the texel view.
+ assert(
+ !expMap.has(coordKey),
+ () => `duplicate pixel expectation at coordinate (${coord.x},${coord.y},${coord.z})`
+ );
+ expMap.set(coordKey, e.exp);
+ }
+ const size: GPUExtent3D = [
+ upperCorner[0] - lowerCorner[0] + 1,
+ upperCorner[1] - lowerCorner[1] + 1,
+ upperCorner[2] - lowerCorner[2] + 1,
+ ];
+ let expTexelView: TexelView;
+ if (Symbol.iterator in exp[0].exp) {
+ expTexelView = TexelView.fromTexelsAsBytes(
+ src.texture.format as EncodableTextureFormat,
+ coord => {
+ const res = expMap.get(JSON.stringify(coord));
+ assert(
+ res !== undefined,
+ () => `invalid coordinate (${coord.x},${coord.y},${coord.z}) in sparse texel view`
+ );
+ return res as Uint8Array;
+ }
+ );
+ } else {
+ expTexelView = TexelView.fromTexelsAsColors(
+ src.texture.format as EncodableTextureFormat,
+ coord => {
+ const res = expMap.get(JSON.stringify(coord));
+ assert(
+ res !== undefined,
+ () => `invalid coordinate (${coord.x},${coord.y},${coord.z}) in sparse texel view`
+ );
+ return res as PerTexelComponent<number>;
+ }
+ );
+ }
+ const coordsF = (function* () {
+ for (const coord of coords) {
+ yield coord;
+ }
+ })();
+
+ this.eventualExpectOK(
+ textureContentIsOKByT2B(
+ this,
+ { ...src, origin: reifyOrigin3D(lowerCorner) },
+ size,
+ { expTexelView },
+ comparisonOptions,
+ coordsF
+ )
+ );
+ }
+
+ expectTexturesToMatchByRendering(
+ actualTexture: GPUTexture,
+ expectedTexture: GPUTexture,
+ mipLevel: number,
+ origin: Required<GPUOrigin3DDict>,
+ size: Required<GPUExtent3DDict>
+ ): void {
+ // Render every layer of both textures at mipLevel to an rgba8unorm texture
+ // that matches the size of the mipLevel. After each render, copy the
+ // result to a buffer and expect the results from both textures to match.
+ const pipeline = getPipelineToRenderTextureToRGB8UnormTexture(this.device);
+ const readbackPromisesPerTexturePerLayer = [actualTexture, expectedTexture].map(
+ (texture, ndx) => {
+ const attachmentSize = virtualMipSize('2d', [texture.width, texture.height, 1], mipLevel);
+ const attachment = this.device.createTexture({
+ label: `readback${ndx}`,
+ size: attachmentSize,
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ this.trackForCleanup(attachment);
+
+ const sampler = this.device.createSampler();
+
+ const numLayers = texture.depthOrArrayLayers;
+ const readbackPromisesPerLayer = [];
+ for (let layer = 0; layer < numLayers; ++layer) {
+ const bindGroup = this.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: sampler },
+ {
+ binding: 1,
+ resource: texture.createView({
+ baseMipLevel: mipLevel,
+ mipLevelCount: 1,
+ baseArrayLayer: layer,
+ arrayLayerCount: 1,
+ dimension: '2d',
+ }),
+ },
+ ],
+ });
+
+ const encoder = this.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: attachment.createView(),
+ clearValue: [0.5, 0.5, 0.5, 0.5],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.draw(3);
+ pass.end();
+ this.queue.submit([encoder.finish()]);
+
+ const buffer = this.copyWholeTextureToNewBufferSimple(attachment, 0);
+
+ readbackPromisesPerLayer.push(
+ this.readGPUBufferRangeTyped(buffer, {
+ type: Uint8Array,
+ typedLength: buffer.size,
+ })
+ );
+ }
+ return readbackPromisesPerLayer;
+ }
+ );
+
+ this.eventualAsyncExpectation(async niceStack => {
+ const readbacksPerTexturePerLayer = [];
+
+ // Wait for all buffers to be ready
+ for (const readbackPromises of readbackPromisesPerTexturePerLayer) {
+ readbacksPerTexturePerLayer.push(await Promise.all(readbackPromises));
+ }
+
+ function arrayNotAllTheSameValue(arr: TypedArrayBufferView | number[], msg?: string) {
+ const first = arr[0];
+ return arr.length <= 1 || arr.findIndex(v => v !== first) >= 0
+ ? undefined
+ : Error(`array is entirely ${first} so likely nothing was tested: ${msg || ''}`);
+ }
+
+ // Compare each layer of each texture as read from buffer.
+ const [actualReadbacksPerLayer, expectedReadbacksPerLayer] = readbacksPerTexturePerLayer;
+ for (let layer = 0; layer < actualReadbacksPerLayer.length; ++layer) {
+ const actualReadback = actualReadbacksPerLayer[layer];
+ const expectedReadback = expectedReadbacksPerLayer[layer];
+ const sameOk =
+ size.width === 0 ||
+ size.height === 0 ||
+ layer < origin.z ||
+ layer >= origin.z + size.depthOrArrayLayers;
+ this.expectOK(
+ sameOk ? undefined : arrayNotAllTheSameValue(actualReadback.data, 'actualTexture')
+ );
+ this.expectOK(
+ sameOk ? undefined : arrayNotAllTheSameValue(expectedReadback.data, 'expectedTexture')
+ );
+ this.expectOK(checkElementsEqual(actualReadback.data, expectedReadback.data), {
+ mode: 'fail',
+ niceStack,
+ });
+ actualReadback.cleanup();
+ expectedReadback.cleanup();
+ }
+ });
+ }
+
+ copyWholeTextureToNewBufferSimple(texture: GPUTexture, mipLevel: number) {
+ const { blockWidth, blockHeight, bytesPerBlock } = kTextureFormatInfo[texture.format];
+ const mipSize = physicalMipSizeFromTexture(texture, mipLevel);
+ assert(bytesPerBlock !== undefined);
+
+ const blocksPerRow = mipSize[0] / blockWidth;
+ const blocksPerColumn = mipSize[1] / blockHeight;
+
+ assert(blocksPerRow % 1 === 0);
+ assert(blocksPerColumn % 1 === 0);
+
+ const bytesPerRow = align(blocksPerRow * bytesPerBlock, 256);
+ const byteLength = bytesPerRow * blocksPerColumn * mipSize[2];
+
+ return this.copyWholeTextureToNewBuffer(
+ { texture, mipLevel },
+ {
+ bytesPerBlock,
+ bytesPerRow,
+ rowsPerImage: blocksPerColumn,
+ byteLength,
+ }
+ );
+ }
+
+ copyWholeTextureToNewBuffer(
+ { texture, mipLevel }: { texture: GPUTexture; mipLevel: number | undefined },
+ resultDataLayout: {
+ bytesPerBlock: number;
+ byteLength: number;
+ bytesPerRow: number;
+ rowsPerImage: number;
+ }
+ ): GPUBuffer {
+ const { byteLength, bytesPerRow, rowsPerImage } = resultDataLayout;
+ const buffer = this.device.createBuffer({
+ size: align(byteLength, 4), // this is necessary because we need to copy and map data from this buffer
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST,
+ });
+ this.trackForCleanup(buffer);
+
+ const mipSize = physicalMipSizeFromTexture(texture, mipLevel || 0);
+ const encoder = this.device.createCommandEncoder();
+ encoder.copyTextureToBuffer(
+ { texture, mipLevel },
+ { buffer, bytesPerRow, rowsPerImage },
+ mipSize
+ );
+ this.device.queue.submit([encoder.finish()]);
+
+ return buffer;
+ }
+
+ updateLinearTextureDataSubBox(
+ format: ColorTextureFormat,
+ copySize: Required<GPUExtent3DDict>,
+ copyParams: {
+ dest: LinearCopyParameters;
+ src: LinearCopyParameters;
+ }
+ ): void {
+ const { src, dest } = copyParams;
+ const rowLength = bytesInACompleteRow(copySize.width, format);
+ for (const texel of this.iterateBlockRows(copySize, format)) {
+ const srcOffsetElements = this.getTexelOffsetInBytes(
+ src.dataLayout,
+ format,
+ texel,
+ src.origin
+ );
+ const dstOffsetElements = this.getTexelOffsetInBytes(
+ dest.dataLayout,
+ format,
+ texel,
+ dest.origin
+ );
+ memcpy(
+ { src: src.data, start: srcOffsetElements, length: rowLength },
+ { dst: dest.data, start: dstOffsetElements }
+ );
+ }
+ }
+
+ /** Offset for a particular texel in the linear texture data */
+ getTexelOffsetInBytes(
+ textureDataLayout: Required<GPUImageDataLayout>,
+ format: ColorTextureFormat,
+ texel: Required<GPUOrigin3DDict>,
+ origin: Required<GPUOrigin3DDict> = { x: 0, y: 0, z: 0 }
+ ): number {
+ const { offset, bytesPerRow, rowsPerImage } = textureDataLayout;
+ const info = kTextureFormatInfo[format];
+
+ assert(texel.x % info.blockWidth === 0);
+ assert(texel.y % info.blockHeight === 0);
+ assert(origin.x % info.blockWidth === 0);
+ assert(origin.y % info.blockHeight === 0);
+
+ const bytesPerImage = rowsPerImage * bytesPerRow;
+
+ return (
+ offset +
+ (texel.z + origin.z) * bytesPerImage +
+ ((texel.y + origin.y) / info.blockHeight) * bytesPerRow +
+ ((texel.x + origin.x) / info.blockWidth) * info.color.bytes
+ );
+ }
+
+ *iterateBlockRows(
+ size: Required<GPUExtent3DDict>,
+ format: ColorTextureFormat
+ ): Generator<Required<GPUOrigin3DDict>> {
+ if (size.width === 0 || size.height === 0 || size.depthOrArrayLayers === 0) {
+ // do not iterate anything for an empty region
+ return;
+ }
+ const info = kTextureFormatInfo[format];
+ assert(size.height % info.blockHeight === 0);
+ // Note: it's important that the order is in increasing memory address order.
+ for (let z = 0; z < size.depthOrArrayLayers; ++z) {
+ for (let y = 0; y < size.height; y += info.blockHeight) {
+ yield {
+ x: 0,
+ y,
+ z,
+ };
+ }
+ }
+ }
+ }
+
+ return TextureExpectations as unknown as FixtureClassWithMixin<F, TextureTestMixinType>;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/idl/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/idl/README.txt
new file mode 100644
index 0000000000..aa7a983b04
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/idl/README.txt
@@ -0,0 +1,7 @@
+Tests to check that the WebGPU IDL is correctly implemented, for examples that objects exposed
+exactly the correct members, and that methods throw when passed incomplete dictionaries.
+
+See https://github.com/gpuweb/cts/issues/332
+
+TODO: exposed.html.ts: Test all WebGPU interfaces instead of just some of them.
+TODO: Check prototype chains. (Add a helper in IDLTest for this.)
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/idl/constants/flags.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/idl/constants/flags.spec.ts
new file mode 100644
index 0000000000..ca78892fb4
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/idl/constants/flags.spec.ts
@@ -0,0 +1,79 @@
+export const description = `
+Test the values of flags interfaces (e.g. GPUTextureUsage).
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { IDLTest } from '../idl_test.js';
+
+export const g = makeTestGroup(IDLTest);
+
+const kBufferUsageExp = {
+ MAP_READ: 0x0001,
+ MAP_WRITE: 0x0002,
+ COPY_SRC: 0x0004,
+ COPY_DST: 0x0008,
+ INDEX: 0x0010,
+ VERTEX: 0x0020,
+ UNIFORM: 0x0040,
+ STORAGE: 0x0080,
+ INDIRECT: 0x0100,
+ QUERY_RESOLVE: 0x0200,
+};
+g.test('BufferUsage,count').fn(t => {
+ t.assertMemberCount(GPUBufferUsage, kBufferUsageExp);
+});
+g.test('BufferUsage,values')
+ .params(u => u.combine('key', Object.keys(kBufferUsageExp)))
+ .fn(t => {
+ const { key } = t.params;
+ t.assertMember(GPUBufferUsage, kBufferUsageExp, key);
+ });
+
+const kTextureUsageExp = {
+ COPY_SRC: 0x01,
+ COPY_DST: 0x02,
+ TEXTURE_BINDING: 0x04,
+ STORAGE_BINDING: 0x08,
+ RENDER_ATTACHMENT: 0x10,
+};
+g.test('TextureUsage,count').fn(t => {
+ t.assertMemberCount(GPUTextureUsage, kTextureUsageExp);
+});
+g.test('TextureUsage,values')
+ .params(u => u.combine('key', Object.keys(kTextureUsageExp)))
+ .fn(t => {
+ const { key } = t.params;
+ t.assertMember(GPUTextureUsage, kTextureUsageExp, key);
+ });
+
+const kColorWriteExp = {
+ RED: 0x1,
+ GREEN: 0x2,
+ BLUE: 0x4,
+ ALPHA: 0x8,
+ ALL: 0xf,
+};
+g.test('ColorWrite,count').fn(t => {
+ t.assertMemberCount(GPUColorWrite, kColorWriteExp);
+});
+g.test('ColorWrite,values')
+ .params(u => u.combine('key', Object.keys(kColorWriteExp)))
+ .fn(t => {
+ const { key } = t.params;
+ t.assertMember(GPUColorWrite, kColorWriteExp, key);
+ });
+
+const kShaderStageExp = {
+ VERTEX: 0x1,
+ FRAGMENT: 0x2,
+ COMPUTE: 0x4,
+};
+g.test('ShaderStage,count').fn(t => {
+ t.assertMemberCount(GPUShaderStage, kShaderStageExp);
+});
+g.test('ShaderStage,values')
+ .params(u => u.combine('key', Object.keys(kShaderStageExp)))
+ .fn(t => {
+ const { key } = t.params;
+ t.assertMember(GPUShaderStage, kShaderStageExp, key);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/idl/exposed.html.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/idl/exposed.html.ts
new file mode 100644
index 0000000000..7aee998a9f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/idl/exposed.html.ts
@@ -0,0 +1,52 @@
+// WPT-specific test checking that WebGPU is available iff isSecureContext.
+
+import { assert } from '../../common/util/util.js';
+
+const items = [
+ globalThis.navigator.gpu,
+ globalThis.GPU,
+ globalThis.GPUAdapter,
+ globalThis.GPUAdapterInfo,
+ globalThis.GPUBindGroup,
+ globalThis.GPUBindGroupLayout,
+ globalThis.GPUBuffer,
+ globalThis.GPUBufferUsage,
+ globalThis.GPUCanvasContext,
+ globalThis.GPUColorWrite,
+ globalThis.GPUCommandBuffer,
+ globalThis.GPUCommandEncoder,
+ globalThis.GPUCompilationInfo,
+ globalThis.GPUCompilationMessage,
+ globalThis.GPUComputePassEncoder,
+ globalThis.GPUComputePipeline,
+ globalThis.GPUDevice,
+ globalThis.GPUDeviceLostInfo,
+ globalThis.GPUError,
+ globalThis.GPUExternalTexture,
+ globalThis.GPUMapMode,
+ globalThis.GPUOutOfMemoryError,
+ globalThis.GPUPipelineLayout,
+ globalThis.GPUQuerySet,
+ globalThis.GPUQueue,
+ globalThis.GPURenderBundle,
+ globalThis.GPURenderBundleEncoder,
+ globalThis.GPURenderPassEncoder,
+ globalThis.GPURenderPipeline,
+ globalThis.GPUSampler,
+ globalThis.GPUShaderModule,
+ globalThis.GPUShaderStage,
+ globalThis.GPUSupportedLimits,
+ globalThis.GPUTexture,
+ globalThis.GPUTextureUsage,
+ globalThis.GPUTextureView,
+ globalThis.GPUUncapturedErrorEvent,
+ globalThis.GPUValidationError,
+];
+
+for (const item of items) {
+ if (globalThis.isSecureContext) {
+ assert(item !== undefined, 'Item/interface should be exposed on secure context');
+ } else {
+ assert(item === undefined, 'Item/interface should not be exposed on insecure context');
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/idl/exposed.http.html b/dom/webgpu/tests/cts/checkout/src/webgpu/idl/exposed.http.html
new file mode 100644
index 0000000000..94a814d005
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/idl/exposed.http.html
@@ -0,0 +1,13 @@
+<!doctype html>
+<html>
+ <head>
+ <meta charset=utf-8>
+ <title>WebGPU exposed items (non-HTTPS)</title>
+ <meta name=assert content="WebGPU should not be exposed on a non-[SecureContext]">
+ <link rel=help href='https://gpuweb.github.io/gpuweb/'>
+ <script src=/resources/testharness.js></script>
+ <script src=/resources/testharnessreport.js></script>
+ <script type=module src=exposed.html.js></script>
+ </head>
+ <body></body>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/idl/exposed.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/idl/exposed.https.html
new file mode 100644
index 0000000000..8d421b7020
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/idl/exposed.https.html
@@ -0,0 +1,13 @@
+<!doctype html>
+<html>
+ <head>
+ <meta charset=utf-8>
+ <title>WebGPU exposed items (HTTPS)</title>
+ <meta name=assert content="All specified WebGPU items/interfaces should be exposed, on a [SecureContext]">
+ <link rel=help href='https://gpuweb.github.io/gpuweb/'>
+ <script src=/resources/testharness.js></script>
+ <script src=/resources/testharnessreport.js></script>
+ <script type=module src=exposed.html.js></script>
+ </head>
+ <body></body>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/idl/idl_test.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/idl/idl_test.ts
new file mode 100644
index 0000000000..5077ac5623
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/idl/idl_test.ts
@@ -0,0 +1,41 @@
+import { Fixture } from '../../common/framework/fixture.js';
+import { getGPU } from '../../common/util/navigator_gpu.js';
+import { assert } from '../../common/util/util.js';
+
+interface UnknownObject {
+ [k: string]: unknown;
+}
+
+/**
+ * Base fixture for testing the exposed interface is correct (without actually using WebGPU).
+ */
+export class IDLTest extends Fixture {
+ override init(): Promise<void> {
+ // Ensure the GPU provider is initialized
+ getGPU(this.rec);
+ return Promise.resolve();
+ }
+
+ /**
+ * Asserts that a member of an IDL interface has the expected value.
+ */
+ assertMember(act: UnknownObject, exp: UnknownObject, key: string) {
+ assert(key in act, () => `Expected key ${key} missing`);
+ assert(act[key] === exp[key], () => `Value of [${key}] was ${act[key]}, expected ${exp[key]}`);
+ }
+
+ /**
+ * Asserts that an IDL interface has the same number of keys as the
+ *
+ * MAINTENANCE_TODO: add a way to check for the types of keys with unknown values, like methods and attributes
+ * MAINTENANCE_TODO: handle extensions
+ */
+ assertMemberCount(act: UnknownObject, exp: UnknownObject) {
+ const expKeys = Object.keys(exp);
+ const actKeys = Object.keys(act);
+ assert(
+ actKeys.length === expKeys.length,
+ () => `Had ${actKeys.length} keys, expected ${expKeys.length}`
+ );
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/listing.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/listing.ts
new file mode 100644
index 0000000000..823639c692
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/listing.ts
@@ -0,0 +1,5 @@
+/* eslint-disable import/no-restricted-paths */
+import { TestSuiteListing } from '../common/internal/test_suite_listing.js';
+import { makeListing } from '../common/tools/crawl.js';
+
+export const listing: Promise<TestSuiteListing> = makeListing(__filename);
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/listing_meta.json b/dom/webgpu/tests/cts/checkout/src/webgpu/listing_meta.json
new file mode 100644
index 0000000000..f9caeefc6e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/listing_meta.json
@@ -0,0 +1,2002 @@
+{
+ "_comment": "SEMI AUTO-GENERATED: Please read docs/adding_timing_metadata.md.",
+ "webgpu:api,operation,adapter,requestAdapter:requestAdapter:*": { "subcaseMS": 152.083 },
+ "webgpu:api,operation,adapter,requestAdapter:requestAdapter_no_parameters:*": { "subcaseMS": 384.601 },
+ "webgpu:api,operation,adapter,requestAdapterInfo:adapter_info:*": { "subcaseMS": 136.601 },
+ "webgpu:api,operation,adapter,requestAdapterInfo:adapter_info_with_hints:*": { "subcaseMS": 0.101 },
+ "webgpu:api,operation,adapter,requestDevice:default:*": { "subcaseMS": 19.450 },
+ "webgpu:api,operation,adapter,requestDevice:features,known:*": { "subcaseMS": 9.637 },
+ "webgpu:api,operation,adapter,requestDevice:features,unknown:*": { "subcaseMS": 13.600 },
+ "webgpu:api,operation,adapter,requestDevice:invalid:*": { "subcaseMS": 27.801 },
+ "webgpu:api,operation,adapter,requestDevice:limit,better_than_supported:*": { "subcaseMS": 3.614 },
+ "webgpu:api,operation,adapter,requestDevice:limit,worse_than_default:*": { "subcaseMS": 6.711 },
+ "webgpu:api,operation,adapter,requestDevice:limits,supported:*": { "subcaseMS": 4.579 },
+ "webgpu:api,operation,adapter,requestDevice:limits,unknown:*": { "subcaseMS": 0.601 },
+ "webgpu:api,operation,adapter,requestDevice:stale:*": { "subcaseMS": 3.590 },
+ "webgpu:api,operation,buffers,map:mapAsync,mapState:*": { "subcaseMS": 6.178 },
+ "webgpu:api,operation,buffers,map:mapAsync,read,typedArrayAccess:*": { "subcaseMS": 10.759 },
+ "webgpu:api,operation,buffers,map:mapAsync,read:*": { "subcaseMS": 8.996 },
+ "webgpu:api,operation,buffers,map:mapAsync,write,unchanged_ranges_preserved:*": { "subcaseMS": 13.050 },
+ "webgpu:api,operation,buffers,map:mapAsync,write:*": { "subcaseMS": 3.944 },
+ "webgpu:api,operation,buffers,map:mappedAtCreation,mapState:*": { "subcaseMS": 4.626 },
+ "webgpu:api,operation,buffers,map:mappedAtCreation:*": { "subcaseMS": 1.039 },
+ "webgpu:api,operation,buffers,map:remapped_for_write:*": { "subcaseMS": 0.930 },
+ "webgpu:api,operation,buffers,map_ArrayBuffer:postMessage:*": { "subcaseMS": 64.775 },
+ "webgpu:api,operation,buffers,map_detach:while_mapped:*": { "subcaseMS": 1.386 },
+ "webgpu:api,operation,buffers,map_oom:mappedAtCreation:*": { "subcaseMS": 0.827 },
+ "webgpu:api,operation,buffers,threading:destroyed:*": { "subcaseMS": 0.700 },
+ "webgpu:api,operation,buffers,threading:serialize:*": { "subcaseMS": 0.900 },
+ "webgpu:api,operation,command_buffer,basic:b2t2b:*": { "subcaseMS": 16.801 },
+ "webgpu:api,operation,command_buffer,basic:b2t2t2b:*": { "subcaseMS": 16.101 },
+ "webgpu:api,operation,command_buffer,basic:empty:*": { "subcaseMS": 14.000 },
+ "webgpu:api,operation,command_buffer,clearBuffer:clear:*": { "subcaseMS": 0.538 },
+ "webgpu:api,operation,command_buffer,copyBufferToBuffer:copy_order:*": { "subcaseMS": 13.401 },
+ "webgpu:api,operation,command_buffer,copyBufferToBuffer:single:*": { "subcaseMS": 0.195 },
+ "webgpu:api,operation,command_buffer,copyBufferToBuffer:state_transitions:*": { "subcaseMS": 19.600 },
+ "webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,array:*": { "subcaseMS": 0.382 },
+ "webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,compressed,non_array:*": { "subcaseMS": 0.281 },
+ "webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,array:*": { "subcaseMS": 1.607 },
+ "webgpu:api,operation,command_buffer,copyTextureToTexture:color_textures,non_compressed,non_array:*": { "subcaseMS": 0.477 },
+ "webgpu:api,operation,command_buffer,copyTextureToTexture:copy_depth_stencil:*": { "subcaseMS": 0.983 },
+ "webgpu:api,operation,command_buffer,copyTextureToTexture:copy_multisampled_color:*": { "subcaseMS": 21.700 },
+ "webgpu:api,operation,command_buffer,copyTextureToTexture:copy_multisampled_depth:*": { "subcaseMS": 5.901 },
+ "webgpu:api,operation,command_buffer,copyTextureToTexture:zero_sized:*": { "subcaseMS": 0.741 },
+ "webgpu:api,operation,command_buffer,image_copy:mip_levels:*": { "subcaseMS": 1.244 },
+ "webgpu:api,operation,command_buffer,image_copy:offsets_and_sizes:*": { "subcaseMS": 0.960 },
+ "webgpu:api,operation,command_buffer,image_copy:offsets_and_sizes_copy_depth_stencil:*": { "subcaseMS": 1.502 },
+ "webgpu:api,operation,command_buffer,image_copy:origins_and_extents:*": { "subcaseMS": 0.618 },
+ "webgpu:api,operation,command_buffer,image_copy:rowsPerImage_and_bytesPerRow:*": { "subcaseMS": 1.001 },
+ "webgpu:api,operation,command_buffer,image_copy:rowsPerImage_and_bytesPerRow_depth_stencil:*": { "subcaseMS": 1.863 },
+ "webgpu:api,operation,command_buffer,image_copy:undefined_params:*": { "subcaseMS": 3.144 },
+ "webgpu:api,operation,command_buffer,programmable,state_tracking:bind_group_before_pipeline:*": { "subcaseMS": 3.375 },
+ "webgpu:api,operation,command_buffer,programmable,state_tracking:bind_group_indices:*": { "subcaseMS": 2.872 },
+ "webgpu:api,operation,command_buffer,programmable,state_tracking:bind_group_multiple_sets:*": { "subcaseMS": 12.300 },
+ "webgpu:api,operation,command_buffer,programmable,state_tracking:bind_group_order:*": { "subcaseMS": 4.428 },
+ "webgpu:api,operation,command_buffer,programmable,state_tracking:compatible_pipelines:*": { "subcaseMS": 12.334 },
+ "webgpu:api,operation,command_buffer,programmable,state_tracking:one_bind_group_multiple_slots:*": { "subcaseMS": 9.734 },
+ "webgpu:api,operation,command_buffer,queries,occlusionQuery:occlusion_query,alpha_to_coverage:*": { "subcaseMS": 12.125 },
+ "webgpu:api,operation,command_buffer,queries,occlusionQuery:occlusion_query,basic:*": { "subcaseMS": 13.125 },
+ "webgpu:api,operation,command_buffer,queries,occlusionQuery:occlusion_query,depth:*": { "subcaseMS": 14.407 },
+ "webgpu:api,operation,command_buffer,queries,occlusionQuery:occlusion_query,empty:*": { "subcaseMS": 16.801 },
+ "webgpu:api,operation,command_buffer,queries,occlusionQuery:occlusion_query,initial:*": { "subcaseMS": 40.000 },
+ "webgpu:api,operation,command_buffer,queries,occlusionQuery:occlusion_query,multi_resolve:*": { "subcaseMS": 15.900 },
+ "webgpu:api,operation,command_buffer,queries,occlusionQuery:occlusion_query,sample_mask:*": { "subcaseMS": 13.352 },
+ "webgpu:api,operation,command_buffer,queries,occlusionQuery:occlusion_query,scissor:*": { "subcaseMS": 13.138 },
+ "webgpu:api,operation,command_buffer,queries,occlusionQuery:occlusion_query,stencil:*": { "subcaseMS": 10.300 },
+ "webgpu:api,operation,command_buffer,render,state_tracking:change_pipeline_before_and_after_vertex_buffer:*": { "subcaseMS": 14.900 },
+ "webgpu:api,operation,command_buffer,render,state_tracking:set_index_buffer_before_non_indexed_draw:*": { "subcaseMS": 16.301 },
+ "webgpu:api,operation,command_buffer,render,state_tracking:set_index_buffer_without_changing_buffer:*": { "subcaseMS": 16.601 },
+ "webgpu:api,operation,command_buffer,render,state_tracking:set_vertex_buffer_but_not_used_in_draw:*": { "subcaseMS": 17.300 },
+ "webgpu:api,operation,command_buffer,render,state_tracking:set_vertex_buffer_without_changing_buffer:*": { "subcaseMS": 16.400 },
+ "webgpu:api,operation,compute,basic:large_dispatch:*": { "subcaseMS": 9.237 },
+ "webgpu:api,operation,compute,basic:memcpy:*": { "subcaseMS": 16.901 },
+ "webgpu:api,operation,compute_pipeline,overrides:basic:*": { "subcaseMS": 15.100 },
+ "webgpu:api,operation,compute_pipeline,overrides:multi_entry_points:*": { "subcaseMS": 15.900 },
+ "webgpu:api,operation,compute_pipeline,overrides:numeric_id:*": { "subcaseMS": 14.300 },
+ "webgpu:api,operation,compute_pipeline,overrides:precision:*": { "subcaseMS": 16.151 },
+ "webgpu:api,operation,compute_pipeline,overrides:shared_shader_module:*": { "subcaseMS": 14.951 },
+ "webgpu:api,operation,compute_pipeline,overrides:workgroup_size:*": { "subcaseMS": 13.184 },
+ "webgpu:api,operation,device,lost:lost_on_destroy:*": { "subcaseMS": 37.500 },
+ "webgpu:api,operation,device,lost:not_lost_on_gc:*": { "subcaseMS": 2066.500 },
+ "webgpu:api,operation,device,lost:same_object:*": { "subcaseMS": 16.601 },
+ "webgpu:api,operation,labels:object_has_descriptor_label:*": { "subcaseMS": 1.942 },
+ "webgpu:api,operation,labels:wrappers_do_not_share_labels:*": { "subcaseMS": 13.701 },
+ "webgpu:api,operation,memory_sync,buffer,multiple_buffers:multiple_pairs_of_dispatches_in_one_compute_pass:*": { "subcaseMS": 28.701 },
+ "webgpu:api,operation,memory_sync,buffer,multiple_buffers:multiple_pairs_of_draws_in_one_render_bundle:*": { "subcaseMS": 30.200 },
+ "webgpu:api,operation,memory_sync,buffer,multiple_buffers:multiple_pairs_of_draws_in_one_render_pass:*": { "subcaseMS": 11.900 },
+ "webgpu:api,operation,memory_sync,buffer,multiple_buffers:rw:*": { "subcaseMS": 30.427 },
+ "webgpu:api,operation,memory_sync,buffer,multiple_buffers:wr:*": { "subcaseMS": 30.007 },
+ "webgpu:api,operation,memory_sync,buffer,multiple_buffers:ww:*": { "subcaseMS": 25.575 },
+ "webgpu:api,operation,memory_sync,buffer,single_buffer:rw:*": { "subcaseMS": 18.337 },
+ "webgpu:api,operation,memory_sync,buffer,single_buffer:two_dispatches_in_the_same_compute_pass:*": { "subcaseMS": 17.500 },
+ "webgpu:api,operation,memory_sync,buffer,single_buffer:two_draws_in_the_same_render_bundle:*": { "subcaseMS": 18.100 },
+ "webgpu:api,operation,memory_sync,buffer,single_buffer:two_draws_in_the_same_render_pass:*": { "subcaseMS": 4.925 },
+ "webgpu:api,operation,memory_sync,buffer,single_buffer:wr:*": { "subcaseMS": 18.296 },
+ "webgpu:api,operation,memory_sync,buffer,single_buffer:ww:*": { "subcaseMS": 18.802 },
+ "webgpu:api,operation,memory_sync,texture,same_subresource:rw,single_pass,load_resolve:*": { "subcaseMS": 1.200 },
+ "webgpu:api,operation,memory_sync,texture,same_subresource:rw,single_pass,load_store:*": { "subcaseMS": 14.200 },
+ "webgpu:api,operation,memory_sync,texture,same_subresource:rw:*": { "subcaseMS": 10.908 },
+ "webgpu:api,operation,memory_sync,texture,same_subresource:wr:*": { "subcaseMS": 10.684 },
+ "webgpu:api,operation,memory_sync,texture,same_subresource:ww:*": { "subcaseMS": 11.198 },
+ "webgpu:api,operation,onSubmittedWorkDone:many,parallel:*": { "subcaseMS": 111.601 },
+ "webgpu:api,operation,onSubmittedWorkDone:many,parallel_order:*": { "subcaseMS": 33.000 },
+ "webgpu:api,operation,onSubmittedWorkDone:many,serial:*": { "subcaseMS": 254.400 },
+ "webgpu:api,operation,onSubmittedWorkDone:with_work:*": { "subcaseMS": 12.400 },
+ "webgpu:api,operation,onSubmittedWorkDone:without_work:*": { "subcaseMS": 10.901 },
+ "webgpu:api,operation,pipeline,default_layout:getBindGroupLayout_js_object:*": { "subcaseMS": 1.300 },
+ "webgpu:api,operation,pipeline,default_layout:incompatible_with_explicit:*": { "subcaseMS": 1.101 },
+ "webgpu:api,operation,pipeline,default_layout:layout:*": { "subcaseMS": 11.500 },
+ "webgpu:api,operation,queue,writeBuffer:array_types:*": { "subcaseMS": 12.032 },
+ "webgpu:api,operation,queue,writeBuffer:multiple_writes_at_different_offsets_and_sizes:*": { "subcaseMS": 2.087 },
+ "webgpu:api,operation,reflection:buffer_reflection_attributes:*": { "subcaseMS": 0.800 },
+ "webgpu:api,operation,reflection:query_set_reflection_attributes:*": { "subcaseMS": 0.634 },
+ "webgpu:api,operation,reflection:texture_reflection_attributes:*": { "subcaseMS": 1.829 },
+ "webgpu:api,operation,render_pass,clear_value:layout:*": { "subcaseMS": 1.401 },
+ "webgpu:api,operation,render_pass,clear_value:loaded:*": { "subcaseMS": 14.300 },
+ "webgpu:api,operation,render_pass,clear_value:srgb:*": { "subcaseMS": 5.601 },
+ "webgpu:api,operation,render_pass,clear_value:stencil_clear_value:*": { "subcaseMS": 12.660 },
+ "webgpu:api,operation,render_pass,clear_value:stored:*": { "subcaseMS": 12.100 },
+ "webgpu:api,operation,render_pass,resolve:render_pass_resolve:*": { "subcaseMS": 1.029 },
+ "webgpu:api,operation,render_pass,storeOp:render_pass_store_op,color_attachment_only:*": { "subcaseMS": 3.607 },
+ "webgpu:api,operation,render_pass,storeOp:render_pass_store_op,color_attachment_with_depth_stencil_attachment:*": { "subcaseMS": 10.125 },
+ "webgpu:api,operation,render_pass,storeOp:render_pass_store_op,depth_stencil_attachment_only:*": { "subcaseMS": 3.754 },
+ "webgpu:api,operation,render_pass,storeOp:render_pass_store_op,multiple_color_attachments:*": { "subcaseMS": 4.263 },
+ "webgpu:api,operation,render_pass,storeop2:storeOp_controls_whether_1x1_drawn_quad_is_stored:*": { "subcaseMS": 17.500 },
+ "webgpu:api,operation,render_pipeline,culling_tests:culling:*": { "subcaseMS": 2.346 },
+ "webgpu:api,operation,render_pipeline,overrides:basic:*": { "subcaseMS": 3.075 },
+ "webgpu:api,operation,render_pipeline,overrides:multi_entry_points:*": { "subcaseMS": 5.400 },
+ "webgpu:api,operation,render_pipeline,overrides:precision:*": { "subcaseMS": 7.675 },
+ "webgpu:api,operation,render_pipeline,overrides:shared_shader_module:*": { "subcaseMS": 5.683 },
+ "webgpu:api,operation,render_pipeline,pipeline_output_targets:color,attachments:*": { "subcaseMS": 1.984 },
+ "webgpu:api,operation,render_pipeline,pipeline_output_targets:color,component_count,blend:*": { "subcaseMS": 1.731 },
+ "webgpu:api,operation,render_pipeline,pipeline_output_targets:color,component_count:*": { "subcaseMS": 6.284 },
+ "webgpu:api,operation,render_pipeline,primitive_topology:basic:*": { "subcaseMS": 11.822 },
+ "webgpu:api,operation,render_pipeline,primitive_topology:unaligned_vertex_count:*": { "subcaseMS": 10.851 },
+ "webgpu:api,operation,render_pipeline,sample_mask:alpha_to_coverage_mask:*": { "subcaseMS": 68.512 },
+ "webgpu:api,operation,render_pipeline,sample_mask:fragment_output_mask:*": { "subcaseMS": 6.154 },
+ "webgpu:api,operation,render_pipeline,vertex_only_render_pipeline:draw_depth_and_stencil_with_vertex_only_pipeline:*": { "subcaseMS": 14.100 },
+ "webgpu:api,operation,rendering,basic:clear:*": { "subcaseMS": 3.700 },
+ "webgpu:api,operation,rendering,basic:fullscreen_quad:*": { "subcaseMS": 16.601 },
+ "webgpu:api,operation,rendering,basic:large_draw:*": { "subcaseMS": 2335.425 },
+ "webgpu:api,operation,rendering,color_target_state:blend_constant,initial:*": { "subcaseMS": 33.901 },
+ "webgpu:api,operation,rendering,color_target_state:blend_constant,not_inherited:*": { "subcaseMS": 41.601 },
+ "webgpu:api,operation,rendering,color_target_state:blend_constant,setting:*": { "subcaseMS": 12.434 },
+ "webgpu:api,operation,rendering,color_target_state:blending,GPUBlendComponent:*": { "subcaseMS": 6.454 },
+ "webgpu:api,operation,rendering,color_target_state:blending,clamping:*": { "subcaseMS": 22.669 },
+ "webgpu:api,operation,rendering,color_target_state:blending,formats:*": { "subcaseMS": 10.350 },
+ "webgpu:api,operation,rendering,color_target_state:color_write_mask,blending_disabled:*": { "subcaseMS": 11.450 },
+ "webgpu:api,operation,rendering,color_target_state:color_write_mask,channel_work:*": { "subcaseMS": 24.850 },
+ "webgpu:api,operation,rendering,depth:depth_compare_func:*": { "subcaseMS": 10.123 },
+ "webgpu:api,operation,rendering,depth:depth_disabled:*": { "subcaseMS": 19.801 },
+ "webgpu:api,operation,rendering,depth:depth_test_fail:*": { "subcaseMS": 13.434 },
+ "webgpu:api,operation,rendering,depth:depth_write_disabled:*": { "subcaseMS": 13.050 },
+ "webgpu:api,operation,rendering,depth:reverse_depth:*": { "subcaseMS": 14.100 },
+ "webgpu:api,operation,rendering,depth_bias:depth_bias:*": { "subcaseMS": 12.386 },
+ "webgpu:api,operation,rendering,depth_bias:depth_bias_24bit_format:*": { "subcaseMS": 9.934 },
+ "webgpu:api,operation,rendering,depth_clip_clamp:depth_clamp_and_clip:*": { "subcaseMS": 13.807 },
+ "webgpu:api,operation,rendering,depth_clip_clamp:depth_test_input_clamped:*": { "subcaseMS": 13.005 },
+ "webgpu:api,operation,rendering,draw:arguments:*": { "subcaseMS": 11.174 },
+ "webgpu:api,operation,rendering,draw:default_arguments:*": { "subcaseMS": 4.446 },
+ "webgpu:api,operation,rendering,draw:largeish_buffer:*": { "subcaseMS": 0.601 },
+ "webgpu:api,operation,rendering,draw:vertex_attributes,basic:*": { "subcaseMS": 21.049 },
+ "webgpu:api,operation,rendering,draw:vertex_attributes,formats:*": { "subcaseMS": 0.901 },
+ "webgpu:api,operation,rendering,indirect_draw:basics:*": { "subcaseMS": 2.138 },
+ "webgpu:api,operation,rendering,stencil:stencil_compare_func:*": { "subcaseMS": 10.328 },
+ "webgpu:api,operation,rendering,stencil:stencil_depthFailOp_operation:*": { "subcaseMS": 10.323 },
+ "webgpu:api,operation,rendering,stencil:stencil_failOp_operation:*": { "subcaseMS": 11.108 },
+ "webgpu:api,operation,rendering,stencil:stencil_passOp_operation:*": { "subcaseMS": 11.123 },
+ "webgpu:api,operation,rendering,stencil:stencil_read_write_mask:*": { "subcaseMS": 11.492 },
+ "webgpu:api,operation,rendering,stencil:stencil_reference_initialized:*": { "subcaseMS": 13.234 },
+ "webgpu:api,operation,resource_init,buffer:copy_buffer_to_buffer_copy_source:*": { "subcaseMS": 15.500 },
+ "webgpu:api,operation,resource_init,buffer:copy_buffer_to_texture:*": { "subcaseMS": 8.350 },
+ "webgpu:api,operation,resource_init,buffer:copy_texture_to_partial_buffer:*": { "subcaseMS": 0.960 },
+ "webgpu:api,operation,resource_init,buffer:index_buffer:*": { "subcaseMS": 7.950 },
+ "webgpu:api,operation,resource_init,buffer:indirect_buffer_for_dispatch_indirect:*": { "subcaseMS": 8.850 },
+ "webgpu:api,operation,resource_init,buffer:indirect_buffer_for_draw_indirect:*": { "subcaseMS": 7.050 },
+ "webgpu:api,operation,resource_init,buffer:map_partial_buffer:*": { "subcaseMS": 5.250 },
+ "webgpu:api,operation,resource_init,buffer:map_whole_buffer:*": { "subcaseMS": 15.550 },
+ "webgpu:api,operation,resource_init,buffer:mapped_at_creation_partial_buffer:*": { "subcaseMS": 3.300 },
+ "webgpu:api,operation,resource_init,buffer:mapped_at_creation_whole_buffer:*": { "subcaseMS": 6.467 },
+ "webgpu:api,operation,resource_init,buffer:partial_write_buffer:*": { "subcaseMS": 5.167 },
+ "webgpu:api,operation,resource_init,buffer:readonly_storage_buffer:*": { "subcaseMS": 8.100 },
+ "webgpu:api,operation,resource_init,buffer:resolve_query_set_to_partial_buffer:*": { "subcaseMS": 6.401 },
+ "webgpu:api,operation,resource_init,buffer:storage_buffer:*": { "subcaseMS": 8.750 },
+ "webgpu:api,operation,resource_init,buffer:uniform_buffer:*": { "subcaseMS": 7.250 },
+ "webgpu:api,operation,resource_init,buffer:vertex_buffer:*": { "subcaseMS": 17.100 },
+ "webgpu:api,operation,resource_init,texture_zero:uninitialized_texture_is_zero:*": { "subcaseMS": 3.578 },
+ "webgpu:api,operation,sampling,anisotropy:anisotropic_filter_checkerboard:*": { "subcaseMS": 24.900 },
+ "webgpu:api,operation,sampling,anisotropy:anisotropic_filter_mipmap_color:*": { "subcaseMS": 11.550 },
+ "webgpu:api,operation,sampling,filter_mode:magFilter,linear:*": { "subcaseMS": 1.138 },
+ "webgpu:api,operation,sampling,filter_mode:magFilter,nearest:*": { "subcaseMS": 1.283 },
+ "webgpu:api,operation,sampling,filter_mode:minFilter,linear:*": { "subcaseMS": 1.146 },
+ "webgpu:api,operation,sampling,filter_mode:minFilter,nearest:*": { "subcaseMS": 1.057 },
+ "webgpu:api,operation,sampling,filter_mode:mipmapFilter:*": { "subcaseMS": 3.445 },
+ "webgpu:api,operation,shader_module,compilation_info:getCompilationInfo_returns:*": { "subcaseMS": 0.284 },
+ "webgpu:api,operation,shader_module,compilation_info:line_number_and_position:*": { "subcaseMS": 1.867 },
+ "webgpu:api,operation,shader_module,compilation_info:offset_and_length:*": { "subcaseMS": 1.648 },
+ "webgpu:api,operation,texture_view,format_reinterpretation:render_and_resolve_attachment:*": { "subcaseMS": 14.488 },
+ "webgpu:api,operation,texture_view,format_reinterpretation:texture_binding:*": { "subcaseMS": 17.225 },
+ "webgpu:api,operation,texture_view,read:aspect:*": { "subcaseMS": 0.601 },
+ "webgpu:api,operation,texture_view,read:dimension:*": { "subcaseMS": 0.701 },
+ "webgpu:api,operation,texture_view,read:format:*": { "subcaseMS": 1.100 },
+ "webgpu:api,operation,texture_view,write:aspect:*": { "subcaseMS": 0.700 },
+ "webgpu:api,operation,texture_view,write:dimension:*": { "subcaseMS": 0.601 },
+ "webgpu:api,operation,texture_view,write:format:*": { "subcaseMS": 0.600 },
+ "webgpu:api,operation,uncapturederror:constructor:*": { "subcaseMS": 0.200 },
+ "webgpu:api,operation,uncapturederror:iff_uncaptured:*": { "subcaseMS": 0.101 },
+ "webgpu:api,operation,uncapturederror:only_original_device_is_event_target:*": { "subcaseMS": 0.101 },
+ "webgpu:api,operation,uncapturederror:uncapturederror_from_non_originating_thread:*": { "subcaseMS": 0.201 },
+ "webgpu:api,operation,vertex_state,correctness:array_stride_zero:*": { "subcaseMS": 4.246 },
+ "webgpu:api,operation,vertex_state,correctness:buffers_with_varying_step_mode:*": { "subcaseMS": 6.100 },
+ "webgpu:api,operation,vertex_state,correctness:discontiguous_location_and_attribs:*": { "subcaseMS": 15.100 },
+ "webgpu:api,operation,vertex_state,correctness:max_buffers_and_attribs:*": { "subcaseMS": 18.577 },
+ "webgpu:api,operation,vertex_state,correctness:non_zero_array_stride_and_attribute_offset:*": { "subcaseMS": 3.816 },
+ "webgpu:api,operation,vertex_state,correctness:overlapping_attributes:*": { "subcaseMS": 17.470 },
+ "webgpu:api,operation,vertex_state,correctness:setVertexBuffer_offset_and_attribute_offset:*": { "subcaseMS": 2.848 },
+ "webgpu:api,operation,vertex_state,correctness:vertex_buffer_used_multiple_times_interleaved:*": { "subcaseMS": 5.398 },
+ "webgpu:api,operation,vertex_state,correctness:vertex_buffer_used_multiple_times_overlapped:*": { "subcaseMS": 5.388 },
+ "webgpu:api,operation,vertex_state,correctness:vertex_format_to_shader_format_conversion:*": { "subcaseMS": 3.697 },
+ "webgpu:api,operation,vertex_state,index_format:index_format,change_pipeline_after_setIndexBuffer:*": { "subcaseMS": 12.550 },
+ "webgpu:api,operation,vertex_state,index_format:index_format,setIndexBuffer_before_setPipeline:*": { "subcaseMS": 13.300 },
+ "webgpu:api,operation,vertex_state,index_format:index_format,setIndexBuffer_different_formats:*": { "subcaseMS": 12.601 },
+ "webgpu:api,operation,vertex_state,index_format:index_format,uint16:*": { "subcaseMS": 5.300 },
+ "webgpu:api,operation,vertex_state,index_format:index_format,uint32:*": { "subcaseMS": 5.900 },
+ "webgpu:api,operation,vertex_state,index_format:primitive_restart:*": { "subcaseMS": 12.080 },
+ "webgpu:api,validation,buffer,create:createBuffer_invalid_and_oom:*": { "subcaseMS": 1.500 },
+ "webgpu:api,validation,buffer,create:limit:*": { "subcaseMS": 31.433 },
+ "webgpu:api,validation,buffer,create:size:*": { "subcaseMS": 5.570 },
+ "webgpu:api,validation,buffer,create:usage:*": { "subcaseMS": 3.971 },
+ "webgpu:api,validation,buffer,destroy:all_usages:*": { "subcaseMS": 3.250 },
+ "webgpu:api,validation,buffer,destroy:error_buffer:*": { "subcaseMS": 29.700 },
+ "webgpu:api,validation,buffer,destroy:twice:*": { "subcaseMS": 5.367 },
+ "webgpu:api,validation,buffer,destroy:while_mapped:*": { "subcaseMS": 1.150 },
+ "webgpu:api,validation,buffer,mapping:gc_behavior,mapAsync:*": { "subcaseMS": 32.200 },
+ "webgpu:api,validation,buffer,mapping:gc_behavior,mappedAtCreation:*": { "subcaseMS": 76.200 },
+ "webgpu:api,validation,buffer,mapping:getMappedRange,disjoinRanges_many:*": { "subcaseMS": 73.700 },
+ "webgpu:api,validation,buffer,mapping:getMappedRange,disjointRanges:*": { "subcaseMS": 2.257 },
+ "webgpu:api,validation,buffer,mapping:getMappedRange,offsetAndSizeAlignment,mapped:*": { "subcaseMS": 3.119 },
+ "webgpu:api,validation,buffer,mapping:getMappedRange,offsetAndSizeAlignment,mappedAtCreation:*": { "subcaseMS": 5.611 },
+ "webgpu:api,validation,buffer,mapping:getMappedRange,sizeAndOffsetOOB,mapped:*": { "subcaseMS": 0.886 },
+ "webgpu:api,validation,buffer,mapping:getMappedRange,sizeAndOffsetOOB,mappedAtCreation:*": { "subcaseMS": 4.415 },
+ "webgpu:api,validation,buffer,mapping:getMappedRange,state,destroyed:*": { "subcaseMS": 61.301 },
+ "webgpu:api,validation,buffer,mapping:getMappedRange,state,invalid_mappedAtCreation:*": { "subcaseMS": 12.401 },
+ "webgpu:api,validation,buffer,mapping:getMappedRange,state,mapped:*": { "subcaseMS": 8.200 },
+ "webgpu:api,validation,buffer,mapping:getMappedRange,state,mappedAgain:*": { "subcaseMS": 8.150 },
+ "webgpu:api,validation,buffer,mapping:getMappedRange,state,mappedAtCreation:*": { "subcaseMS": 2.960 },
+ "webgpu:api,validation,buffer,mapping:getMappedRange,state,mappingPending:*": { "subcaseMS": 28.600 },
+ "webgpu:api,validation,buffer,mapping:getMappedRange,state,unmapped:*": { "subcaseMS": 16.000 },
+ "webgpu:api,validation,buffer,mapping:getMappedRange,subrange,mapped:*": { "subcaseMS": 63.150 },
+ "webgpu:api,validation,buffer,mapping:getMappedRange,subrange,mappedAtCreation:*": { "subcaseMS": 1.500 },
+ "webgpu:api,validation,buffer,mapping:mapAsync,abort_over_invalid_error:*": { "subcaseMS": 3.725 },
+ "webgpu:api,validation,buffer,mapping:mapAsync,earlyRejection:*": { "subcaseMS": 12.900 },
+ "webgpu:api,validation,buffer,mapping:mapAsync,invalidBuffer:*": { "subcaseMS": 18.000 },
+ "webgpu:api,validation,buffer,mapping:mapAsync,offsetAndSizeAlignment:*": { "subcaseMS": 1.794 },
+ "webgpu:api,validation,buffer,mapping:mapAsync,offsetAndSizeOOB:*": { "subcaseMS": 0.953 },
+ "webgpu:api,validation,buffer,mapping:mapAsync,sizeUnspecifiedOOB:*": { "subcaseMS": 2.212 },
+ "webgpu:api,validation,buffer,mapping:mapAsync,state,destroyed:*": { "subcaseMS": 15.450 },
+ "webgpu:api,validation,buffer,mapping:mapAsync,state,mapped:*": { "subcaseMS": 16.050 },
+ "webgpu:api,validation,buffer,mapping:mapAsync,state,mappedAtCreation:*": { "subcaseMS": 15.900 },
+ "webgpu:api,validation,buffer,mapping:mapAsync,state,mappingPending:*": { "subcaseMS": 16.700 },
+ "webgpu:api,validation,buffer,mapping:mapAsync,usage:*": { "subcaseMS": 1.203 },
+ "webgpu:api,validation,buffer,mapping:unmap,state,destroyed:*": { "subcaseMS": 12.701 },
+ "webgpu:api,validation,buffer,mapping:unmap,state,mapped:*": { "subcaseMS": 9.600 },
+ "webgpu:api,validation,buffer,mapping:unmap,state,mappedAtCreation:*": { "subcaseMS": 8.950 },
+ "webgpu:api,validation,buffer,mapping:unmap,state,mappingPending:*": { "subcaseMS": 22.951 },
+ "webgpu:api,validation,buffer,mapping:unmap,state,unmapped:*": { "subcaseMS": 74.200 },
+ "webgpu:api,validation,capability_checks,features,query_types:createQuerySet:*": { "subcaseMS": 10.451 },
+ "webgpu:api,validation,capability_checks,features,query_types:writeTimestamp:*": { "subcaseMS": 1.200 },
+ "webgpu:api,validation,capability_checks,features,texture_formats:canvas_configuration:*": { "subcaseMS": 4.339 },
+ "webgpu:api,validation,capability_checks,features,texture_formats:canvas_configuration_view_formats:*": { "subcaseMS": 4.522 },
+ "webgpu:api,validation,capability_checks,features,texture_formats:check_capability_guarantees:*": { "subcaseMS": 55.901 },
+ "webgpu:api,validation,capability_checks,features,texture_formats:depth_stencil_state:*": { "subcaseMS": 15.701 },
+ "webgpu:api,validation,capability_checks,features,texture_formats:render_bundle_encoder_descriptor_depth_stencil_format:*": { "subcaseMS": 0.800 },
+ "webgpu:api,validation,capability_checks,features,texture_formats:texture_descriptor:*": { "subcaseMS": 3.830 },
+ "webgpu:api,validation,capability_checks,features,texture_formats:texture_descriptor_view_formats:*": { "subcaseMS": 5.734 },
+ "webgpu:api,validation,capability_checks,features,texture_formats:texture_view_descriptor:*": { "subcaseMS": 4.113 },
+ "webgpu:api,validation,capability_checks,limits,maxBindGroups:createPipeline,at_over:*": { "subcaseMS": 10.990 },
+ "webgpu:api,validation,capability_checks,limits,maxBindGroups:createPipelineLayout,at_over:*": { "subcaseMS": 9.310 },
+ "webgpu:api,validation,capability_checks,limits,maxBindGroups:setBindGroup,at_over:*": { "subcaseMS": 9.984 },
+ "webgpu:api,validation,capability_checks,limits,maxBindGroups:validate,maxBindGroupsPlusVertexBuffers:*": { "subcaseMS": 11.200 },
+ "webgpu:api,validation,capability_checks,limits,maxBindingsPerBindGroup:createBindGroupLayout,at_over:*": { "subcaseMS": 12.441 },
+ "webgpu:api,validation,capability_checks,limits,maxBindingsPerBindGroup:createPipeline,at_over:*": { "subcaseMS": 11.179 },
+ "webgpu:api,validation,capability_checks,limits,maxBindingsPerBindGroup:validate:*": { "subcaseMS": 12.401 },
+ "webgpu:api,validation,capability_checks,limits,maxBufferSize:createBuffer,at_over:*": { "subcaseMS": 146.130 },
+ "webgpu:api,validation,capability_checks,limits,maxColorAttachmentBytesPerSample:beginRenderPass,at_over:*": { "subcaseMS": 9.396 },
+ "webgpu:api,validation,capability_checks,limits,maxColorAttachmentBytesPerSample:createRenderBundle,at_over:*": { "subcaseMS": 12.093 },
+ "webgpu:api,validation,capability_checks,limits,maxColorAttachmentBytesPerSample:createRenderPipeline,at_over:*": { "subcaseMS": 11.818 },
+ "webgpu:api,validation,capability_checks,limits,maxColorAttachments:beginRenderPass,at_over:*": { "subcaseMS": 10.320 },
+ "webgpu:api,validation,capability_checks,limits,maxColorAttachments:createRenderBundle,at_over:*": { "subcaseMS": 12.681 },
+ "webgpu:api,validation,capability_checks,limits,maxColorAttachments:createRenderPipeline,at_over:*": { "subcaseMS": 10.450 },
+ "webgpu:api,validation,capability_checks,limits,maxColorAttachments:validate,kMaxColorAttachmentsToTest:*": { "subcaseMS": 1.101 },
+ "webgpu:api,validation,capability_checks,limits,maxColorAttachments:validate,maxColorAttachmentBytesPerSample:*": { "subcaseMS": 1.101 },
+ "webgpu:api,validation,capability_checks,limits,maxComputeInvocationsPerWorkgroup:createComputePipeline,at_over:*": { "subcaseMS": 13.735 },
+ "webgpu:api,validation,capability_checks,limits,maxComputeWorkgroupSizeX:createComputePipeline,at_over:*": { "subcaseMS": 14.465 },
+ "webgpu:api,validation,capability_checks,limits,maxComputeWorkgroupSizeX:validate,maxComputeInvocationsPerWorkgroup:*": { "subcaseMS": 2.701 },
+ "webgpu:api,validation,capability_checks,limits,maxComputeWorkgroupSizeY:createComputePipeline,at_over:*": { "subcaseMS": 14.131 },
+ "webgpu:api,validation,capability_checks,limits,maxComputeWorkgroupSizeY:validate,maxComputeInvocationsPerWorkgroup:*": { "subcaseMS": 2.700 },
+ "webgpu:api,validation,capability_checks,limits,maxComputeWorkgroupSizeZ:createComputePipeline,at_over:*": { "subcaseMS": 14.920 },
+ "webgpu:api,validation,capability_checks,limits,maxComputeWorkgroupSizeZ:validate,maxComputeInvocationsPerWorkgroup:*": { "subcaseMS": 2.601 },
+ "webgpu:api,validation,capability_checks,limits,maxComputeWorkgroupStorageSize:createComputePipeline,at_over:*": { "subcaseMS": 12.009 },
+ "webgpu:api,validation,capability_checks,limits,maxComputeWorkgroupsPerDimension:dispatchWorkgroups,at_over:*": { "subcaseMS": 13.310 },
+ "webgpu:api,validation,capability_checks,limits,maxComputeWorkgroupsPerDimension:validate:*": { "subcaseMS": 138.900 },
+ "webgpu:api,validation,capability_checks,limits,maxDynamicStorageBuffersPerPipelineLayout:createBindGroupLayout,at_over:*": { "subcaseMS": 15.680 },
+ "webgpu:api,validation,capability_checks,limits,maxDynamicUniformBuffersPerPipelineLayout:createBindGroupLayout,at_over:*": { "subcaseMS": 10.268 },
+ "webgpu:api,validation,capability_checks,limits,maxInterStageShaderComponents:createRenderPipeline,at_over:*": { "subcaseMS": 12.916 },
+ "webgpu:api,validation,capability_checks,limits,maxInterStageShaderVariables:createRenderPipeline,at_over:*": { "subcaseMS": 13.700 },
+ "webgpu:api,validation,capability_checks,limits,maxSampledTexturesPerShaderStage:createBindGroupLayout,at_over:*": { "subcaseMS": 47.857 },
+ "webgpu:api,validation,capability_checks,limits,maxSampledTexturesPerShaderStage:createPipeline,at_over:*": { "subcaseMS": 45.611 },
+ "webgpu:api,validation,capability_checks,limits,maxSampledTexturesPerShaderStage:createPipelineLayout,at_over:*": { "subcaseMS": 26.153 },
+ "webgpu:api,validation,capability_checks,limits,maxSamplersPerShaderStage:createBindGroupLayout,at_over:*": { "subcaseMS": 9.645 },
+ "webgpu:api,validation,capability_checks,limits,maxSamplersPerShaderStage:createPipeline,at_over:*": { "subcaseMS": 11.959 },
+ "webgpu:api,validation,capability_checks,limits,maxSamplersPerShaderStage:createPipelineLayout,at_over:*": { "subcaseMS": 10.427 },
+ "webgpu:api,validation,capability_checks,limits,maxStorageBufferBindingSize:createBindGroup,at_over:*": { "subcaseMS": 51.810 },
+ "webgpu:api,validation,capability_checks,limits,maxStorageBufferBindingSize:validate,maxBufferSize:*": { "subcaseMS": 0.900 },
+ "webgpu:api,validation,capability_checks,limits,maxStorageBufferBindingSize:validate:*": { "subcaseMS": 132.400 },
+ "webgpu:api,validation,capability_checks,limits,maxStorageBuffersPerShaderStage:createBindGroupLayout,at_over:*": { "subcaseMS": 4.565 },
+ "webgpu:api,validation,capability_checks,limits,maxStorageBuffersPerShaderStage:createPipeline,at_over:*": { "subcaseMS": 7.884 },
+ "webgpu:api,validation,capability_checks,limits,maxStorageBuffersPerShaderStage:createPipelineLayout,at_over:*": { "subcaseMS": 5.007 },
+ "webgpu:api,validation,capability_checks,limits,maxStorageTexturesPerShaderStage:createBindGroupLayout,at_over:*": { "subcaseMS": 5.147 },
+ "webgpu:api,validation,capability_checks,limits,maxStorageTexturesPerShaderStage:createPipeline,at_over:*": { "subcaseMS": 6.804 },
+ "webgpu:api,validation,capability_checks,limits,maxStorageTexturesPerShaderStage:createPipelineLayout,at_over:*": { "subcaseMS": 5.457 },
+ "webgpu:api,validation,capability_checks,limits,maxTextureArrayLayers:createTexture,at_over:*": { "subcaseMS": 13.651 },
+ "webgpu:api,validation,capability_checks,limits,maxTextureDimension1D:createTexture,at_over:*": { "subcaseMS": 23.431 },
+ "webgpu:api,validation,capability_checks,limits,maxTextureDimension2D:configure,at_over:*": { "subcaseMS": 8.280 },
+ "webgpu:api,validation,capability_checks,limits,maxTextureDimension2D:createTexture,at_over:*": { "subcaseMS": 8.981 },
+ "webgpu:api,validation,capability_checks,limits,maxTextureDimension2D:getCurrentTexture,at_over:*": { "subcaseMS": 21.886 },
+ "webgpu:api,validation,capability_checks,limits,maxTextureDimension3D:createTexture,at_over:*": { "subcaseMS": 9.410 },
+ "webgpu:api,validation,capability_checks,limits,maxUniformBufferBindingSize:createBindGroup,at_over:*": { "subcaseMS": 6.785 },
+ "webgpu:api,validation,capability_checks,limits,maxUniformBufferBindingSize:validate,maxBufferSize:*": { "subcaseMS": 1.700 },
+ "webgpu:api,validation,capability_checks,limits,maxUniformBuffersPerShaderStage:createBindGroupLayout,at_over:*": { "subcaseMS": 5.858 },
+ "webgpu:api,validation,capability_checks,limits,maxUniformBuffersPerShaderStage:createPipeline,at_over:*": { "subcaseMS": 9.105 },
+ "webgpu:api,validation,capability_checks,limits,maxUniformBuffersPerShaderStage:createPipelineLayout,at_over:*": { "subcaseMS": 6.109 },
+ "webgpu:api,validation,capability_checks,limits,maxVertexAttributes:createRenderPipeline,at_over:*": { "subcaseMS": 9.090 },
+ "webgpu:api,validation,capability_checks,limits,maxVertexBufferArrayStride:createRenderPipeline,at_over:*": { "subcaseMS": 10.060 },
+ "webgpu:api,validation,capability_checks,limits,maxVertexBufferArrayStride:validate:*": { "subcaseMS": 139.500 },
+ "webgpu:api,validation,capability_checks,limits,maxVertexBuffers:createRenderPipeline,at_over:*": { "subcaseMS": 8.903 },
+ "webgpu:api,validation,capability_checks,limits,maxVertexBuffers:setVertexBuffer,at_over:*": { "subcaseMS": 7.695 },
+ "webgpu:api,validation,capability_checks,limits,maxVertexBuffers:validate,maxBindGroupsPlusVertexBuffers:*": { "subcaseMS": 153.900 },
+ "webgpu:api,validation,capability_checks,limits,minStorageBufferOffsetAlignment:createBindGroup,at_over:*": { "subcaseMS": 9.650 },
+ "webgpu:api,validation,capability_checks,limits,minStorageBufferOffsetAlignment:setBindGroup,at_over:*": { "subcaseMS": 8.931 },
+ "webgpu:api,validation,capability_checks,limits,minStorageBufferOffsetAlignment:validate,greaterThanOrEqualTo32:*": { "subcaseMS": 31.801 },
+ "webgpu:api,validation,capability_checks,limits,minStorageBufferOffsetAlignment:validate,powerOf2:*": { "subcaseMS": 2.400 },
+ "webgpu:api,validation,capability_checks,limits,minUniformBufferOffsetAlignment:createBindGroup,at_over:*": { "subcaseMS": 9.301 },
+ "webgpu:api,validation,capability_checks,limits,minUniformBufferOffsetAlignment:setBindGroup,at_over:*": { "subcaseMS": 10.341 },
+ "webgpu:api,validation,capability_checks,limits,minUniformBufferOffsetAlignment:validate,greaterThanOrEqualTo32:*": { "subcaseMS": 2.400 },
+ "webgpu:api,validation,capability_checks,limits,minUniformBufferOffsetAlignment:validate,powerOf2:*": { "subcaseMS": 2.301 },
+ "webgpu:api,validation,compute_pipeline:basic:*": { "subcaseMS": 28.050 },
+ "webgpu:api,validation,compute_pipeline:limits,invocations_per_workgroup,each_component:*": { "subcaseMS": 6.582 },
+ "webgpu:api,validation,compute_pipeline:limits,invocations_per_workgroup:*": { "subcaseMS": 8.092 },
+ "webgpu:api,validation,compute_pipeline:limits,workgroup_storage_size:*": { "subcaseMS": 4.025 },
+ "webgpu:api,validation,compute_pipeline:overrides,identifier:*": { "subcaseMS": 5.312 },
+ "webgpu:api,validation,compute_pipeline:overrides,uninitialized:*": { "subcaseMS": 7.801 },
+ "webgpu:api,validation,compute_pipeline:overrides,value,type_error:*": { "subcaseMS": 9.675 },
+ "webgpu:api,validation,compute_pipeline:overrides,value,validation_error,f16:*": { "subcaseMS": 5.908 },
+ "webgpu:api,validation,compute_pipeline:overrides,value,validation_error:*": { "subcaseMS": 13.918 },
+ "webgpu:api,validation,compute_pipeline:overrides,workgroup_size,limits,workgroup_storage_size:*": { "subcaseMS": 10.800 },
+ "webgpu:api,validation,compute_pipeline:overrides,workgroup_size,limits:*": { "subcaseMS": 14.751 },
+ "webgpu:api,validation,compute_pipeline:overrides,workgroup_size:*": { "subcaseMS": 6.376 },
+ "webgpu:api,validation,compute_pipeline:pipeline_layout,device_mismatch:*": { "subcaseMS": 1.175 },
+ "webgpu:api,validation,compute_pipeline:shader_module,compute:*": { "subcaseMS": 6.867 },
+ "webgpu:api,validation,compute_pipeline:shader_module,device_mismatch:*": { "subcaseMS": 15.350 },
+ "webgpu:api,validation,compute_pipeline:shader_module,invalid:*": { "subcaseMS": 2.500 },
+ "webgpu:api,validation,createBindGroup:bind_group_layout,device_mismatch:*": { "subcaseMS": 15.800 },
+ "webgpu:api,validation,createBindGroup:binding_count_mismatch:*": { "subcaseMS": 1.822 },
+ "webgpu:api,validation,createBindGroup:binding_must_be_present_in_layout:*": { "subcaseMS": 3.311 },
+ "webgpu:api,validation,createBindGroup:binding_must_contain_resource_defined_in_layout:*": { "subcaseMS": 0.340 },
+ "webgpu:api,validation,createBindGroup:binding_resources,device_mismatch:*": { "subcaseMS": 4.850 },
+ "webgpu:api,validation,createBindGroup:buffer,effective_buffer_binding_size:*": { "subcaseMS": 0.263 },
+ "webgpu:api,validation,createBindGroup:buffer,resource_binding_size:*": { "subcaseMS": 0.845 },
+ "webgpu:api,validation,createBindGroup:buffer,resource_offset:*": { "subcaseMS": 4.558 },
+ "webgpu:api,validation,createBindGroup:buffer,resource_state:*": { "subcaseMS": 0.301 },
+ "webgpu:api,validation,createBindGroup:buffer,usage:*": { "subcaseMS": 0.525 },
+ "webgpu:api,validation,createBindGroup:buffer_offset_and_size_for_bind_groups_match:*": { "subcaseMS": 1.871 },
+ "webgpu:api,validation,createBindGroup:minBindingSize:*": { "subcaseMS": 3.391 },
+ "webgpu:api,validation,createBindGroup:multisampled_validation:*": { "subcaseMS": 13.325 },
+ "webgpu:api,validation,createBindGroup:sampler,compare_function_with_binding_type:*": { "subcaseMS": 0.702 },
+ "webgpu:api,validation,createBindGroup:sampler,device_mismatch:*": { "subcaseMS": 1.750 },
+ "webgpu:api,validation,createBindGroup:storage_texture,format:*": { "subcaseMS": 5.045 },
+ "webgpu:api,validation,createBindGroup:storage_texture,mip_level_count:*": { "subcaseMS": 8.426 },
+ "webgpu:api,validation,createBindGroup:storage_texture,usage:*": { "subcaseMS": 3.817 },
+ "webgpu:api,validation,createBindGroup:texture,resource_state:*": { "subcaseMS": 2.542 },
+ "webgpu:api,validation,createBindGroup:texture_binding_must_have_correct_usage:*": { "subcaseMS": 1.150 },
+ "webgpu:api,validation,createBindGroup:texture_must_have_correct_component_type:*": { "subcaseMS": 10.767 },
+ "webgpu:api,validation,createBindGroup:texture_must_have_correct_dimension:*": { "subcaseMS": 3.288 },
+ "webgpu:api,validation,createBindGroupLayout:duplicate_bindings:*": { "subcaseMS": 1.200 },
+ "webgpu:api,validation,createBindGroupLayout:max_dynamic_buffers:*": { "subcaseMS": 2.800 },
+ "webgpu:api,validation,createBindGroupLayout:max_resources_per_stage,in_bind_group_layout:*": { "subcaseMS": 0.915 },
+ "webgpu:api,validation,createBindGroupLayout:max_resources_per_stage,in_pipeline_layout:*": { "subcaseMS": 0.682 },
+ "webgpu:api,validation,createBindGroupLayout:maximum_binding_limit:*": { "subcaseMS": 0.400 },
+ "webgpu:api,validation,createBindGroupLayout:multisampled_validation:*": { "subcaseMS": 0.452 },
+ "webgpu:api,validation,createBindGroupLayout:storage_texture,formats:*": { "subcaseMS": 4.996 },
+ "webgpu:api,validation,createBindGroupLayout:storage_texture,layout_dimension:*": { "subcaseMS": 3.829 },
+ "webgpu:api,validation,createBindGroupLayout:visibility,VERTEX_shader_stage_buffer_type:*": { "subcaseMS": 1.342 },
+ "webgpu:api,validation,createBindGroupLayout:visibility,VERTEX_shader_stage_storage_texture_access:*": { "subcaseMS": 4.394 },
+ "webgpu:api,validation,createBindGroupLayout:visibility:*": { "subcaseMS": 1.926 },
+ "webgpu:api,validation,createPipelineLayout:bind_group_layouts,device_mismatch:*": { "subcaseMS": 1.200 },
+ "webgpu:api,validation,createPipelineLayout:number_of_bind_group_layouts_exceeds_the_maximum_value:*": { "subcaseMS": 3.500 },
+ "webgpu:api,validation,createPipelineLayout:number_of_dynamic_buffers_exceeds_the_maximum_value:*": { "subcaseMS": 2.658 },
+ "webgpu:api,validation,createSampler:lodMinAndMaxClamp:*": { "subcaseMS": 0.610 },
+ "webgpu:api,validation,createSampler:maxAnisotropy:*": { "subcaseMS": 0.979 },
+ "webgpu:api,validation,createTexture:dimension_type_and_format_compatibility:*": { "subcaseMS": 4.062 },
+ "webgpu:api,validation,createTexture:mipLevelCount,bound_check,bigger_than_integer_bit_width:*": { "subcaseMS": 2.301 },
+ "webgpu:api,validation,createTexture:mipLevelCount,bound_check:*": { "subcaseMS": 0.801 },
+ "webgpu:api,validation,createTexture:mipLevelCount,format:*": { "subcaseMS": 1.258 },
+ "webgpu:api,validation,createTexture:sampleCount,valid_sampleCount_with_other_parameter_varies:*": { "subcaseMS": 0.525 },
+ "webgpu:api,validation,createTexture:sampleCount,various_sampleCount_with_all_formats:*": { "subcaseMS": 2.336 },
+ "webgpu:api,validation,createTexture:sample_count,1d_2d_array_3d:*": { "subcaseMS": 2.480 },
+ "webgpu:api,validation,createTexture:texture_size,1d_texture:*": { "subcaseMS": 1.372 },
+ "webgpu:api,validation,createTexture:texture_size,2d_texture,compressed_format:*": { "subcaseMS": 4.108 },
+ "webgpu:api,validation,createTexture:texture_size,2d_texture,uncompressed_format:*": { "subcaseMS": 4.729 },
+ "webgpu:api,validation,createTexture:texture_size,3d_texture,compressed_format:*": { "subcaseMS": 4.322 },
+ "webgpu:api,validation,createTexture:texture_size,3d_texture,uncompressed_format:*": { "subcaseMS": 2.039 },
+ "webgpu:api,validation,createTexture:texture_size,default_value_and_smallest_size,compressed_format:*": { "subcaseMS": 1.863 },
+ "webgpu:api,validation,createTexture:texture_size,default_value_and_smallest_size,uncompressed_format:*": { "subcaseMS": 1.694 },
+ "webgpu:api,validation,createTexture:texture_usage:*": { "subcaseMS": 0.870 },
+ "webgpu:api,validation,createTexture:viewFormats:*": { "subcaseMS": 0.632 },
+ "webgpu:api,validation,createTexture:zero_size_and_usage:*": { "subcaseMS": 3.250 },
+ "webgpu:api,validation,createView:array_layers:*": { "subcaseMS": 0.491 },
+ "webgpu:api,validation,createView:aspect:*": { "subcaseMS": 5.556 },
+ "webgpu:api,validation,createView:cube_faces_square:*": { "subcaseMS": 19.340 },
+ "webgpu:api,validation,createView:dimension:*": { "subcaseMS": 9.291 },
+ "webgpu:api,validation,createView:format:*": { "subcaseMS": 0.742 },
+ "webgpu:api,validation,createView:mip_levels:*": { "subcaseMS": 0.436 },
+ "webgpu:api,validation,createView:texture_state:*": { "subcaseMS": 0.400 },
+ "webgpu:api,validation,debugMarker:push_pop_call_count_unbalance,command_encoder:*": { "subcaseMS": 1.522 },
+ "webgpu:api,validation,debugMarker:push_pop_call_count_unbalance,render_compute_pass:*": { "subcaseMS": 0.601 },
+ "webgpu:api,validation,encoding,beginComputePass:timestampWrites,invalid_query_set:*": { "subcaseMS": 0.201 },
+ "webgpu:api,validation,encoding,beginComputePass:timestampWrites,query_index:*": { "subcaseMS": 0.201 },
+ "webgpu:api,validation,encoding,beginComputePass:timestampWrites,query_set_type:*": { "subcaseMS": 0.401 },
+ "webgpu:api,validation,encoding,beginComputePass:timestamp_query_set,device_mismatch:*": { "subcaseMS": 0.301 },
+ "webgpu:api,validation,encoding,beginRenderPass:color_attachments,device_mismatch:*": { "subcaseMS": 10.750 },
+ "webgpu:api,validation,encoding,beginRenderPass:depth_stencil_attachment,device_mismatch:*": { "subcaseMS": 26.100 },
+ "webgpu:api,validation,encoding,beginRenderPass:occlusion_query_set,device_mismatch:*": { "subcaseMS": 0.850 },
+ "webgpu:api,validation,encoding,beginRenderPass:timestamp_query_set,device_mismatch:*": { "subcaseMS": 0.301 },
+ "webgpu:api,validation,encoding,cmds,clearBuffer:buffer,device_mismatch:*": { "subcaseMS": 7.350 },
+ "webgpu:api,validation,encoding,cmds,clearBuffer:buffer_state:*": { "subcaseMS": 44.500 },
+ "webgpu:api,validation,encoding,cmds,clearBuffer:buffer_usage:*": { "subcaseMS": 4.000 },
+ "webgpu:api,validation,encoding,cmds,clearBuffer:default_args:*": { "subcaseMS": 0.233 },
+ "webgpu:api,validation,encoding,cmds,clearBuffer:offset_alignment:*": { "subcaseMS": 2.086 },
+ "webgpu:api,validation,encoding,cmds,clearBuffer:out_of_bounds:*": { "subcaseMS": 0.213 },
+ "webgpu:api,validation,encoding,cmds,clearBuffer:overflow:*": { "subcaseMS": 0.350 },
+ "webgpu:api,validation,encoding,cmds,clearBuffer:size_alignment:*": { "subcaseMS": 0.300 },
+ "webgpu:api,validation,encoding,cmds,compute_pass:dispatch_sizes:*": { "subcaseMS": 4.062 },
+ "webgpu:api,validation,encoding,cmds,compute_pass:indirect_dispatch_buffer,device_mismatch:*": { "subcaseMS": 21.050 },
+ "webgpu:api,validation,encoding,cmds,compute_pass:indirect_dispatch_buffer,usage:*": { "subcaseMS": 0.534 },
+ "webgpu:api,validation,encoding,cmds,compute_pass:indirect_dispatch_buffer_state:*": { "subcaseMS": 2.093 },
+ "webgpu:api,validation,encoding,cmds,compute_pass:pipeline,device_mismatch:*": { "subcaseMS": 7.600 },
+ "webgpu:api,validation,encoding,cmds,compute_pass:set_pipeline:*": { "subcaseMS": 1.000 },
+ "webgpu:api,validation,encoding,cmds,copyBufferToBuffer:buffer,device_mismatch:*": { "subcaseMS": 0.500 },
+ "webgpu:api,validation,encoding,cmds,copyBufferToBuffer:buffer_state:*": { "subcaseMS": 3.178 },
+ "webgpu:api,validation,encoding,cmds,copyBufferToBuffer:buffer_usage:*": { "subcaseMS": 0.591 },
+ "webgpu:api,validation,encoding,cmds,copyBufferToBuffer:copy_offset_alignment:*": { "subcaseMS": 0.400 },
+ "webgpu:api,validation,encoding,cmds,copyBufferToBuffer:copy_out_of_bounds:*": { "subcaseMS": 0.200 },
+ "webgpu:api,validation,encoding,cmds,copyBufferToBuffer:copy_overflow:*": { "subcaseMS": 0.301 },
+ "webgpu:api,validation,encoding,cmds,copyBufferToBuffer:copy_size_alignment:*": { "subcaseMS": 0.680 },
+ "webgpu:api,validation,encoding,cmds,copyBufferToBuffer:copy_within_same_buffer:*": { "subcaseMS": 0.401 },
+ "webgpu:api,validation,encoding,cmds,copyTextureToTexture:copy_aspects:*": { "subcaseMS": 2.182 },
+ "webgpu:api,validation,encoding,cmds,copyTextureToTexture:copy_ranges:*": { "subcaseMS": 11.442 },
+ "webgpu:api,validation,encoding,cmds,copyTextureToTexture:copy_ranges_with_compressed_texture_formats:*": { "subcaseMS": 0.334 },
+ "webgpu:api,validation,encoding,cmds,copyTextureToTexture:copy_with_invalid_or_destroyed_texture:*": { "subcaseMS": 4.844 },
+ "webgpu:api,validation,encoding,cmds,copyTextureToTexture:copy_within_same_texture:*": { "subcaseMS": 0.301 },
+ "webgpu:api,validation,encoding,cmds,copyTextureToTexture:depth_stencil_copy_restrictions:*": { "subcaseMS": 0.480 },
+ "webgpu:api,validation,encoding,cmds,copyTextureToTexture:mipmap_level:*": { "subcaseMS": 0.879 },
+ "webgpu:api,validation,encoding,cmds,copyTextureToTexture:multisampled_copy_restrictions:*": { "subcaseMS": 0.301 },
+ "webgpu:api,validation,encoding,cmds,copyTextureToTexture:sample_count:*": { "subcaseMS": 4.125 },
+ "webgpu:api,validation,encoding,cmds,copyTextureToTexture:texture,device_mismatch:*": { "subcaseMS": 0.567 },
+ "webgpu:api,validation,encoding,cmds,copyTextureToTexture:texture_format_compatibility:*": { "subcaseMS": 0.341 },
+ "webgpu:api,validation,encoding,cmds,copyTextureToTexture:texture_usage:*": { "subcaseMS": 2.308 },
+ "webgpu:api,validation,encoding,cmds,debug:debug_group:*": { "subcaseMS": 3.640 },
+ "webgpu:api,validation,encoding,cmds,debug:debug_group_balanced:*": { "subcaseMS": 1.978 },
+ "webgpu:api,validation,encoding,cmds,debug:debug_marker:*": { "subcaseMS": 0.960 },
+ "webgpu:api,validation,encoding,cmds,index_access:out_of_bounds:*": { "subcaseMS": 7.139 },
+ "webgpu:api,validation,encoding,cmds,index_access:out_of_bounds_zero_sized_index_buffer:*": { "subcaseMS": 12.400 },
+ "webgpu:api,validation,encoding,cmds,render,draw:buffer_binding_overlap:*": { "subcaseMS": 0.446 },
+ "webgpu:api,validation,encoding,cmds,render,draw:index_buffer_OOB:*": { "subcaseMS": 5.825 },
+ "webgpu:api,validation,encoding,cmds,render,draw:last_buffer_setting_take_account:*": { "subcaseMS": 30.801 },
+ "webgpu:api,validation,encoding,cmds,render,draw:max_draw_count:*": { "subcaseMS": 3.521 },
+ "webgpu:api,validation,encoding,cmds,render,draw:unused_buffer_bound:*": { "subcaseMS": 1.413 },
+ "webgpu:api,validation,encoding,cmds,render,draw:vertex_buffer_OOB:*": { "subcaseMS": 0.767 },
+ "webgpu:api,validation,encoding,cmds,render,dynamic_state:setBlendConstant:*": { "subcaseMS": 0.367 },
+ "webgpu:api,validation,encoding,cmds,render,dynamic_state:setScissorRect,x_y_width_height_nonnegative:*": { "subcaseMS": 2.900 },
+ "webgpu:api,validation,encoding,cmds,render,dynamic_state:setScissorRect,xy_rect_contained_in_attachment:*": { "subcaseMS": 1.325 },
+ "webgpu:api,validation,encoding,cmds,render,dynamic_state:setStencilReference:*": { "subcaseMS": 3.450 },
+ "webgpu:api,validation,encoding,cmds,render,dynamic_state:setViewport,depth_rangeAndOrder:*": { "subcaseMS": 1.667 },
+ "webgpu:api,validation,encoding,cmds,render,dynamic_state:setViewport,x_y_width_height_nonnegative:*": { "subcaseMS": 0.400 },
+ "webgpu:api,validation,encoding,cmds,render,dynamic_state:setViewport,xy_rect_contained_in_attachment:*": { "subcaseMS": 0.200 },
+ "webgpu:api,validation,encoding,cmds,render,indirect_draw:indirect_buffer,device_mismatch:*": { "subcaseMS": 2.000 },
+ "webgpu:api,validation,encoding,cmds,render,indirect_draw:indirect_buffer_state:*": { "subcaseMS": 2.708 },
+ "webgpu:api,validation,encoding,cmds,render,indirect_draw:indirect_buffer_usage:*": { "subcaseMS": 2.733 },
+ "webgpu:api,validation,encoding,cmds,render,indirect_draw:indirect_offset_alignment:*": { "subcaseMS": 2.758 },
+ "webgpu:api,validation,encoding,cmds,render,indirect_draw:indirect_offset_oob:*": { "subcaseMS": 0.725 },
+ "webgpu:api,validation,encoding,cmds,render,setIndexBuffer:index_buffer,device_mismatch:*": { "subcaseMS": 7.800 },
+ "webgpu:api,validation,encoding,cmds,render,setIndexBuffer:index_buffer_state:*": { "subcaseMS": 5.200 },
+ "webgpu:api,validation,encoding,cmds,render,setIndexBuffer:index_buffer_usage:*": { "subcaseMS": 2.467 },
+ "webgpu:api,validation,encoding,cmds,render,setIndexBuffer:offset_alignment:*": { "subcaseMS": 2.642 },
+ "webgpu:api,validation,encoding,cmds,render,setIndexBuffer:offset_and_size_oob:*": { "subcaseMS": 1.067 },
+ "webgpu:api,validation,encoding,cmds,render,setPipeline:invalid_pipeline:*": { "subcaseMS": 0.525 },
+ "webgpu:api,validation,encoding,cmds,render,setPipeline:pipeline,device_mismatch:*": { "subcaseMS": 8.500 },
+ "webgpu:api,validation,encoding,cmds,render,setVertexBuffer:offset_alignment:*": { "subcaseMS": 2.550 },
+ "webgpu:api,validation,encoding,cmds,render,setVertexBuffer:offset_and_size_oob:*": { "subcaseMS": 0.200 },
+ "webgpu:api,validation,encoding,cmds,render,setVertexBuffer:slot:*": { "subcaseMS": 5.300 },
+ "webgpu:api,validation,encoding,cmds,render,setVertexBuffer:vertex_buffer,device_mismatch:*": { "subcaseMS": 7.850 },
+ "webgpu:api,validation,encoding,cmds,render,setVertexBuffer:vertex_buffer_state:*": { "subcaseMS": 5.200 },
+ "webgpu:api,validation,encoding,cmds,render,setVertexBuffer:vertex_buffer_usage:*": { "subcaseMS": 0.301 },
+ "webgpu:api,validation,encoding,cmds,render,state_tracking:all_needed_index_buffer_should_be_bound:*": { "subcaseMS": 14.101 },
+ "webgpu:api,validation,encoding,cmds,render,state_tracking:all_needed_vertex_buffer_should_be_bound:*": { "subcaseMS": 31.900 },
+ "webgpu:api,validation,encoding,cmds,render,state_tracking:vertex_buffers_do_not_inherit_between_render_passes:*": { "subcaseMS": 3.400 },
+ "webgpu:api,validation,encoding,cmds,render,state_tracking:vertex_buffers_inherit_from_previous_pipeline:*": { "subcaseMS": 31.701 },
+ "webgpu:api,validation,encoding,cmds,setBindGroup:bind_group,device_mismatch:*": { "subcaseMS": 6.975 },
+ "webgpu:api,validation,encoding,cmds,setBindGroup:buffer_dynamic_offsets:*": { "subcaseMS": 1.990 },
+ "webgpu:api,validation,encoding,cmds,setBindGroup:dynamic_offsets_match_expectations_in_pass_encoder:*": { "subcaseMS": 3.949 },
+ "webgpu:api,validation,encoding,cmds,setBindGroup:dynamic_offsets_passed_but_not_expected:*": { "subcaseMS": 0.900 },
+ "webgpu:api,validation,encoding,cmds,setBindGroup:state_and_binding_index:*": { "subcaseMS": 5.417 },
+ "webgpu:api,validation,encoding,cmds,setBindGroup:u32array_start_and_length:*": { "subcaseMS": 3.020 },
+ "webgpu:api,validation,encoding,createRenderBundleEncoder:attachment_state,empty_color_formats:*": { "subcaseMS": 0.450 },
+ "webgpu:api,validation,encoding,createRenderBundleEncoder:attachment_state,limits,maxColorAttachmentBytesPerSample,aligned:*": { "subcaseMS": 2.641 },
+ "webgpu:api,validation,encoding,createRenderBundleEncoder:attachment_state,limits,maxColorAttachmentBytesPerSample,unaligned:*": { "subcaseMS": 0.750 },
+ "webgpu:api,validation,encoding,createRenderBundleEncoder:attachment_state,limits,maxColorAttachments:*": { "subcaseMS": 0.145 },
+ "webgpu:api,validation,encoding,createRenderBundleEncoder:depth_stencil_readonly:*": { "subcaseMS": 1.804 },
+ "webgpu:api,validation,encoding,createRenderBundleEncoder:depth_stencil_readonly_with_undefined_depth:*": { "subcaseMS": 14.825 },
+ "webgpu:api,validation,encoding,createRenderBundleEncoder:valid_texture_formats:*": { "subcaseMS": 2.130 },
+ "webgpu:api,validation,encoding,encoder_open_state:compute_pass_commands:*": { "subcaseMS": 4.208 },
+ "webgpu:api,validation,encoding,encoder_open_state:non_pass_commands:*": { "subcaseMS": 26.191 },
+ "webgpu:api,validation,encoding,encoder_open_state:render_bundle_commands:*": { "subcaseMS": 2.850 },
+ "webgpu:api,validation,encoding,encoder_open_state:render_pass_commands:*": { "subcaseMS": 3.620 },
+ "webgpu:api,validation,encoding,encoder_state:call_after_successful_finish:*": { "subcaseMS": 0.800 },
+ "webgpu:api,validation,encoding,encoder_state:pass_end_invalid_order:*": { "subcaseMS": 1.303 },
+ "webgpu:api,validation,encoding,encoder_state:pass_end_none:*": { "subcaseMS": 8.150 },
+ "webgpu:api,validation,encoding,encoder_state:pass_end_twice,basic:*": { "subcaseMS": 0.300 },
+ "webgpu:api,validation,encoding,encoder_state:pass_end_twice,render_pass_invalid:*": { "subcaseMS": 15.850 },
+ "webgpu:api,validation,encoding,programmable,pipeline_bind_group_compat:bgl_binding_mismatch:*": { "subcaseMS": 1.301 },
+ "webgpu:api,validation,encoding,programmable,pipeline_bind_group_compat:bgl_resource_type_mismatch:*": { "subcaseMS": 0.977 },
+ "webgpu:api,validation,encoding,programmable,pipeline_bind_group_compat:bgl_visibility_mismatch:*": { "subcaseMS": 0.608 },
+ "webgpu:api,validation,encoding,programmable,pipeline_bind_group_compat:bind_groups_and_pipeline_layout_mismatch:*": { "subcaseMS": 1.535 },
+ "webgpu:api,validation,encoding,programmable,pipeline_bind_group_compat:buffer_binding,render_pipeline:*": { "subcaseMS": 1.734 },
+ "webgpu:api,validation,encoding,programmable,pipeline_bind_group_compat:empty_bind_group_layouts_requires_empty_bind_groups,compute_pass:*": { "subcaseMS": 2.325 },
+ "webgpu:api,validation,encoding,programmable,pipeline_bind_group_compat:empty_bind_group_layouts_requires_empty_bind_groups,render_pass:*": { "subcaseMS": 10.838 },
+ "webgpu:api,validation,encoding,programmable,pipeline_bind_group_compat:sampler_binding,render_pipeline:*": { "subcaseMS": 10.523 },
+ "webgpu:api,validation,encoding,queries,begin_end:nesting:*": { "subcaseMS": 1.101 },
+ "webgpu:api,validation,encoding,queries,begin_end:occlusion_query,begin_end_balance:*": { "subcaseMS": 0.820 },
+ "webgpu:api,validation,encoding,queries,begin_end:occlusion_query,begin_end_invalid_nesting:*": { "subcaseMS": 1.000 },
+ "webgpu:api,validation,encoding,queries,begin_end:occlusion_query,disjoint_queries_with_same_query_index:*": { "subcaseMS": 0.550 },
+ "webgpu:api,validation,encoding,queries,general:occlusion_query,invalid_query_set:*": { "subcaseMS": 1.651 },
+ "webgpu:api,validation,encoding,queries,general:occlusion_query,query_index:*": { "subcaseMS": 0.500 },
+ "webgpu:api,validation,encoding,queries,general:occlusion_query,query_type:*": { "subcaseMS": 4.702 },
+ "webgpu:api,validation,encoding,queries,general:timestamp_query,device_mismatch:*": { "subcaseMS": 0.101 },
+ "webgpu:api,validation,encoding,queries,general:timestamp_query,invalid_query_set:*": { "subcaseMS": 0.101 },
+ "webgpu:api,validation,encoding,queries,general:timestamp_query,query_type_and_index:*": { "subcaseMS": 0.301 },
+ "webgpu:api,validation,encoding,queries,resolveQuerySet:destination_buffer_usage:*": { "subcaseMS": 16.050 },
+ "webgpu:api,validation,encoding,queries,resolveQuerySet:destination_offset_alignment:*": { "subcaseMS": 0.325 },
+ "webgpu:api,validation,encoding,queries,resolveQuerySet:first_query_and_query_count:*": { "subcaseMS": 0.250 },
+ "webgpu:api,validation,encoding,queries,resolveQuerySet:query_set_buffer,device_mismatch:*": { "subcaseMS": 1.000 },
+ "webgpu:api,validation,encoding,queries,resolveQuerySet:queryset_and_destination_buffer_state:*": { "subcaseMS": 9.078 },
+ "webgpu:api,validation,encoding,queries,resolveQuerySet:resolve_buffer_oob:*": { "subcaseMS": 6.300 },
+ "webgpu:api,validation,encoding,render_bundle:color_formats_mismatch:*": { "subcaseMS": 10.940 },
+ "webgpu:api,validation,encoding,render_bundle:depth_stencil_formats_mismatch:*": { "subcaseMS": 4.050 },
+ "webgpu:api,validation,encoding,render_bundle:depth_stencil_readonly_mismatch:*": { "subcaseMS": 4.488 },
+ "webgpu:api,validation,encoding,render_bundle:device_mismatch:*": { "subcaseMS": 0.633 },
+ "webgpu:api,validation,encoding,render_bundle:empty_bundle_list:*": { "subcaseMS": 30.301 },
+ "webgpu:api,validation,encoding,render_bundle:sample_count_mismatch:*": { "subcaseMS": 8.325 },
+ "webgpu:api,validation,error_scope:balanced_nesting:*": { "subcaseMS": 56.817 },
+ "webgpu:api,validation,error_scope:balanced_siblings:*": { "subcaseMS": 95.950 },
+ "webgpu:api,validation,error_scope:current_scope:*": { "subcaseMS": 1177.650 },
+ "webgpu:api,validation,error_scope:empty:*": { "subcaseMS": 0.801 },
+ "webgpu:api,validation,error_scope:parent_scope:*": { "subcaseMS": 11.601 },
+ "webgpu:api,validation,error_scope:simple:*": { "subcaseMS": 10.317 },
+ "webgpu:api,validation,getBindGroupLayout:index_range,auto_layout:*": { "subcaseMS": 6.300 },
+ "webgpu:api,validation,getBindGroupLayout:index_range,explicit_layout:*": { "subcaseMS": 30.334 },
+ "webgpu:api,validation,getBindGroupLayout:unique_js_object,auto_layout:*": { "subcaseMS": 1.601 },
+ "webgpu:api,validation,getBindGroupLayout:unique_js_object,explicit_layout:*": { "subcaseMS": 0.900 },
+ "webgpu:api,validation,gpu_external_texture_expiration:import_and_use_in_different_microtask:*": { "subcaseMS": 40.700 },
+ "webgpu:api,validation,gpu_external_texture_expiration:import_and_use_in_different_task:*": { "subcaseMS": 41.901 },
+ "webgpu:api,validation,gpu_external_texture_expiration:import_from_different_video_frame:*": { "subcaseMS": 82.101 },
+ "webgpu:api,validation,gpu_external_texture_expiration:import_multiple_times_in_same_task_scope:*": { "subcaseMS": 130.150 },
+ "webgpu:api,validation,gpu_external_texture_expiration:use_import_to_refresh:*": { "subcaseMS": 48.700 },
+ "webgpu:api,validation,gpu_external_texture_expiration:webcodec_video_frame_close_expire_immediately:*": { "subcaseMS": 48.801 },
+ "webgpu:api,validation,image_copy,buffer_related:buffer,device_mismatch:*": { "subcaseMS": 0.400 },
+ "webgpu:api,validation,image_copy,buffer_related:buffer_state:*": { "subcaseMS": 1.034 },
+ "webgpu:api,validation,image_copy,buffer_related:bytes_per_row_alignment:*": { "subcaseMS": 2.635 },
+ "webgpu:api,validation,image_copy,buffer_related:usage:*": { "subcaseMS": 0.384 },
+ "webgpu:api,validation,image_copy,buffer_texture_copies:depth_stencil_format,copy_buffer_offset:*": { "subcaseMS": 4.996 },
+ "webgpu:api,validation,image_copy,buffer_texture_copies:depth_stencil_format,copy_buffer_size:*": { "subcaseMS": 1.728 },
+ "webgpu:api,validation,image_copy,buffer_texture_copies:depth_stencil_format,copy_usage_and_aspect:*": { "subcaseMS": 6.467 },
+ "webgpu:api,validation,image_copy,buffer_texture_copies:device_mismatch:*": { "subcaseMS": 2.767 },
+ "webgpu:api,validation,image_copy,buffer_texture_copies:sample_count:*": { "subcaseMS": 14.575 },
+ "webgpu:api,validation,image_copy,buffer_texture_copies:texture_buffer_usages:*": { "subcaseMS": 1.001 },
+ "webgpu:api,validation,image_copy,layout_related:bound_on_bytes_per_row:*": { "subcaseMS": 1.133 },
+ "webgpu:api,validation,image_copy,layout_related:bound_on_offset:*": { "subcaseMS": 0.833 },
+ "webgpu:api,validation,image_copy,layout_related:bound_on_rows_per_image:*": { "subcaseMS": 2.666 },
+ "webgpu:api,validation,image_copy,layout_related:copy_end_overflows_u64:*": { "subcaseMS": 0.567 },
+ "webgpu:api,validation,image_copy,layout_related:offset_alignment:*": { "subcaseMS": 1.107 },
+ "webgpu:api,validation,image_copy,layout_related:required_bytes_in_copy:*": { "subcaseMS": 1.051 },
+ "webgpu:api,validation,image_copy,layout_related:rows_per_image_alignment:*": { "subcaseMS": 2.239 },
+ "webgpu:api,validation,image_copy,texture_related:copy_rectangle:*": { "subcaseMS": 0.599 },
+ "webgpu:api,validation,image_copy,texture_related:format:*": { "subcaseMS": 4.790 },
+ "webgpu:api,validation,image_copy,texture_related:mip_level:*": { "subcaseMS": 2.632 },
+ "webgpu:api,validation,image_copy,texture_related:origin_alignment:*": { "subcaseMS": 1.252 },
+ "webgpu:api,validation,image_copy,texture_related:sample_count:*": { "subcaseMS": 5.717 },
+ "webgpu:api,validation,image_copy,texture_related:size_alignment:*": { "subcaseMS": 1.076 },
+ "webgpu:api,validation,image_copy,texture_related:texture,device_mismatch:*": { "subcaseMS": 5.417 },
+ "webgpu:api,validation,image_copy,texture_related:usage:*": { "subcaseMS": 1.224 },
+ "webgpu:api,validation,image_copy,texture_related:valid:*": { "subcaseMS": 3.678 },
+ "webgpu:api,validation,query_set,create:count:*": { "subcaseMS": 0.967 },
+ "webgpu:api,validation,query_set,destroy:invalid_queryset:*": { "subcaseMS": 0.801 },
+ "webgpu:api,validation,query_set,destroy:twice:*": { "subcaseMS": 0.700 },
+ "webgpu:api,validation,queue,buffer_mapped:copyBufferToBuffer:*": { "subcaseMS": 36.601 },
+ "webgpu:api,validation,queue,buffer_mapped:copyBufferToTexture:*": { "subcaseMS": 33.000 },
+ "webgpu:api,validation,queue,buffer_mapped:copyTextureToBuffer:*": { "subcaseMS": 32.301 },
+ "webgpu:api,validation,queue,buffer_mapped:map_command_recording_order:*": { "subcaseMS": 3.091 },
+ "webgpu:api,validation,queue,buffer_mapped:writeBuffer:*": { "subcaseMS": 34.901 },
+ "webgpu:api,validation,queue,copyToTexture,CopyExternalImageToTexture:OOB,destination:*": { "subcaseMS": 0.512 },
+ "webgpu:api,validation,queue,copyToTexture,CopyExternalImageToTexture:OOB,source:*": { "subcaseMS": 0.389 },
+ "webgpu:api,validation,queue,copyToTexture,CopyExternalImageToTexture:destination_texture,device_mismatch:*": { "subcaseMS": 35.550 },
+ "webgpu:api,validation,queue,copyToTexture,CopyExternalImageToTexture:destination_texture,format:*": { "subcaseMS": 2.180 },
+ "webgpu:api,validation,queue,copyToTexture,CopyExternalImageToTexture:destination_texture,mipLevel:*": { "subcaseMS": 5.834 },
+ "webgpu:api,validation,queue,copyToTexture,CopyExternalImageToTexture:destination_texture,sample_count:*": { "subcaseMS": 35.500 },
+ "webgpu:api,validation,queue,copyToTexture,CopyExternalImageToTexture:destination_texture,state:*": { "subcaseMS": 26.667 },
+ "webgpu:api,validation,queue,copyToTexture,CopyExternalImageToTexture:destination_texture,usage:*": { "subcaseMS": 22.760 },
+ "webgpu:api,validation,queue,copyToTexture,CopyExternalImageToTexture:source_canvas,state:*": { "subcaseMS": 10.250 },
+ "webgpu:api,validation,queue,copyToTexture,CopyExternalImageToTexture:source_image,crossOrigin:*": { "subcaseMS": 15.435 },
+ "webgpu:api,validation,queue,copyToTexture,CopyExternalImageToTexture:source_imageBitmap,state:*": { "subcaseMS": 9.100 },
+ "webgpu:api,validation,queue,copyToTexture,CopyExternalImageToTexture:source_offscreenCanvas,state:*": { "subcaseMS": 11.334 },
+ "webgpu:api,validation,queue,destroyed,buffer:copyBufferToBuffer:*": { "subcaseMS": 0.800 },
+ "webgpu:api,validation,queue,destroyed,buffer:copyBufferToTexture:*": { "subcaseMS": 1.401 },
+ "webgpu:api,validation,queue,destroyed,buffer:copyTextureToBuffer:*": { "subcaseMS": 1.500 },
+ "webgpu:api,validation,queue,destroyed,buffer:resolveQuerySet:*": { "subcaseMS": 16.550 },
+ "webgpu:api,validation,queue,destroyed,buffer:setBindGroup:*": { "subcaseMS": 2.983 },
+ "webgpu:api,validation,queue,destroyed,buffer:setIndexBuffer:*": { "subcaseMS": 8.150 },
+ "webgpu:api,validation,queue,destroyed,buffer:setVertexBuffer:*": { "subcaseMS": 8.550 },
+ "webgpu:api,validation,queue,destroyed,buffer:writeBuffer:*": { "subcaseMS": 2.151 },
+ "webgpu:api,validation,queue,destroyed,query_set:beginOcclusionQuery:*": { "subcaseMS": 17.401 },
+ "webgpu:api,validation,queue,destroyed,query_set:resolveQuerySet:*": { "subcaseMS": 16.401 },
+ "webgpu:api,validation,queue,destroyed,query_set:writeTimestamp:*": { "subcaseMS": 0.901 },
+ "webgpu:api,validation,queue,destroyed,texture:beginRenderPass:*": { "subcaseMS": 0.350 },
+ "webgpu:api,validation,queue,destroyed,texture:copyBufferToTexture:*": { "subcaseMS": 16.550 },
+ "webgpu:api,validation,queue,destroyed,texture:copyTextureToBuffer:*": { "subcaseMS": 15.900 },
+ "webgpu:api,validation,queue,destroyed,texture:copyTextureToTexture:*": { "subcaseMS": 8.500 },
+ "webgpu:api,validation,queue,destroyed,texture:setBindGroup:*": { "subcaseMS": 5.783 },
+ "webgpu:api,validation,queue,destroyed,texture:writeTexture:*": { "subcaseMS": 16.601 },
+ "webgpu:api,validation,queue,submit:command_buffer,device_mismatch:*": { "subcaseMS": 0.467 },
+ "webgpu:api,validation,queue,writeBuffer:buffer,device_mismatch:*": { "subcaseMS": 16.000 },
+ "webgpu:api,validation,queue,writeBuffer:buffer_state:*": { "subcaseMS": 6.201 },
+ "webgpu:api,validation,queue,writeBuffer:ranges:*": { "subcaseMS": 17.600 },
+ "webgpu:api,validation,queue,writeBuffer:usages:*": { "subcaseMS": 8.525 },
+ "webgpu:api,validation,queue,writeTexture:sample_count:*": { "subcaseMS": 2.050 },
+ "webgpu:api,validation,queue,writeTexture:texture,device_mismatch:*": { "subcaseMS": 7.850 },
+ "webgpu:api,validation,queue,writeTexture:texture_state:*": { "subcaseMS": 18.567 },
+ "webgpu:api,validation,queue,writeTexture:usages:*": { "subcaseMS": 0.700 },
+ "webgpu:api,validation,render_pass,attachment_compatibility:render_pass_and_bundle,color_count:*": { "subcaseMS": 0.627 },
+ "webgpu:api,validation,render_pass,attachment_compatibility:render_pass_and_bundle,color_format:*": { "subcaseMS": 0.200 },
+ "webgpu:api,validation,render_pass,attachment_compatibility:render_pass_and_bundle,color_sparse:*": { "subcaseMS": 0.784 },
+ "webgpu:api,validation,render_pass,attachment_compatibility:render_pass_and_bundle,depth_format:*": { "subcaseMS": 1.000 },
+ "webgpu:api,validation,render_pass,attachment_compatibility:render_pass_and_bundle,device_mismatch:*": { "subcaseMS": 0.650 },
+ "webgpu:api,validation,render_pass,attachment_compatibility:render_pass_and_bundle,sample_count:*": { "subcaseMS": 0.775 },
+ "webgpu:api,validation,render_pass,attachment_compatibility:render_pass_or_bundle_and_pipeline,color_count:*": { "subcaseMS": 0.543 },
+ "webgpu:api,validation,render_pass,attachment_compatibility:render_pass_or_bundle_and_pipeline,color_format:*": { "subcaseMS": 0.400 },
+ "webgpu:api,validation,render_pass,attachment_compatibility:render_pass_or_bundle_and_pipeline,color_sparse:*": { "subcaseMS": 0.511 },
+ "webgpu:api,validation,render_pass,attachment_compatibility:render_pass_or_bundle_and_pipeline,depth_format:*": { "subcaseMS": 0.840 },
+ "webgpu:api,validation,render_pass,attachment_compatibility:render_pass_or_bundle_and_pipeline,depth_stencil_read_only_write_state:*": { "subcaseMS": 0.361 },
+ "webgpu:api,validation,render_pass,attachment_compatibility:render_pass_or_bundle_and_pipeline,sample_count:*": { "subcaseMS": 0.456 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:attachments,color_depth_mismatch:*": { "subcaseMS": 33.000 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:attachments,layer_count:*": { "subcaseMS": 18.667 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:attachments,mip_level_count:*": { "subcaseMS": 5.468 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:attachments,one_color_attachment:*": { "subcaseMS": 33.401 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:attachments,one_depth_stencil_attachment:*": { "subcaseMS": 15.301 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:attachments,same_size:*": { "subcaseMS": 33.400 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:color_attachments,empty:*": { "subcaseMS": 0.400 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:color_attachments,limits,maxColorAttachmentBytesPerSample,aligned:*": { "subcaseMS": 1.825 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:color_attachments,limits,maxColorAttachmentBytesPerSample,unaligned:*": { "subcaseMS": 17.151 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:color_attachments,limits,maxColorAttachments:*": { "subcaseMS": 0.950 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:color_attachments,non_multisampled:*": { "subcaseMS": 32.601 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:color_attachments,sample_count:*": { "subcaseMS": 33.600 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:depth_stencil_attachment,depth_clear_value:*": { "subcaseMS": 39.956 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:depth_stencil_attachment,loadOp_storeOp_match_depthReadOnly_stencilReadOnly:*": { "subcaseMS": 1.701 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:depth_stencil_attachment,sample_counts_mismatch:*": { "subcaseMS": 15.801 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:occlusionQuerySet,query_set_type:*": { "subcaseMS": 32.400 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:resolveTarget,array_layer_count:*": { "subcaseMS": 32.200 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:resolveTarget,different_format:*": { "subcaseMS": 1.500 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:resolveTarget,different_size:*": { "subcaseMS": 0.901 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:resolveTarget,error_state:*": { "subcaseMS": 1.101 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:resolveTarget,format_supports_resolve:*": { "subcaseMS": 3.370 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:resolveTarget,mipmap_level_count:*": { "subcaseMS": 33.201 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:resolveTarget,sample_count:*": { "subcaseMS": 32.500 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:resolveTarget,single_sample_count:*": { "subcaseMS": 0.601 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:resolveTarget,usage:*": { "subcaseMS": 15.125 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:timestampWrite,query_index:*": { "subcaseMS": 0.200 },
+ "webgpu:api,validation,render_pass,render_pass_descriptor:timestampWrites,query_set_type:*": { "subcaseMS": 0.501 },
+ "webgpu:api,validation,render_pass,resolve:resolve_attachment:*": { "subcaseMS": 6.205 },
+ "webgpu:api,validation,render_pipeline,depth_stencil_state:depthCompare_optional:*": { "subcaseMS": 21.401 },
+ "webgpu:api,validation,render_pipeline,depth_stencil_state:depthWriteEnabled_optional:*": { "subcaseMS": 16.950 },
+ "webgpu:api,validation,render_pipeline,depth_stencil_state:depth_test:*": { "subcaseMS": 3.407 },
+ "webgpu:api,validation,render_pipeline,depth_stencil_state:depth_write,frag_depth:*": { "subcaseMS": 6.465 },
+ "webgpu:api,validation,render_pipeline,depth_stencil_state:depth_write:*": { "subcaseMS": 4.113 },
+ "webgpu:api,validation,render_pipeline,depth_stencil_state:format:*": { "subcaseMS": 3.521 },
+ "webgpu:api,validation,render_pipeline,depth_stencil_state:stencil_test:*": { "subcaseMS": 3.124 },
+ "webgpu:api,validation,render_pipeline,depth_stencil_state:stencil_write:*": { "subcaseMS": 3.183 },
+ "webgpu:api,validation,render_pipeline,fragment_state:color_target_exists:*": { "subcaseMS": 29.150 },
+ "webgpu:api,validation,render_pipeline,fragment_state:limits,maxColorAttachmentBytesPerSample,aligned:*": { "subcaseMS": 0.991 },
+ "webgpu:api,validation,render_pipeline,fragment_state:limits,maxColorAttachmentBytesPerSample,unaligned:*": { "subcaseMS": 14.750 },
+ "webgpu:api,validation,render_pipeline,fragment_state:limits,maxColorAttachments:*": { "subcaseMS": 9.351 },
+ "webgpu:api,validation,render_pipeline,fragment_state:pipeline_output_targets,blend:*": { "subcaseMS": 0.551 },
+ "webgpu:api,validation,render_pipeline,fragment_state:pipeline_output_targets:*": { "subcaseMS": 0.497 },
+ "webgpu:api,validation,render_pipeline,fragment_state:targets_blend:*": { "subcaseMS": 1.203 },
+ "webgpu:api,validation,render_pipeline,fragment_state:targets_format_filterable:*": { "subcaseMS": 2.143 },
+ "webgpu:api,validation,render_pipeline,fragment_state:targets_format_renderable:*": { "subcaseMS": 3.339 },
+ "webgpu:api,validation,render_pipeline,fragment_state:targets_write_mask:*": { "subcaseMS": 12.272 },
+ "webgpu:api,validation,render_pipeline,inter_stage:interpolation_sampling:*": { "subcaseMS": 3.126 },
+ "webgpu:api,validation,render_pipeline,inter_stage:interpolation_type:*": { "subcaseMS": 4.071 },
+ "webgpu:api,validation,render_pipeline,inter_stage:location,mismatch:*": { "subcaseMS": 7.280 },
+ "webgpu:api,validation,render_pipeline,inter_stage:location,subset:*": { "subcaseMS": 1.250 },
+ "webgpu:api,validation,render_pipeline,inter_stage:location,superset:*": { "subcaseMS": 0.901 },
+ "webgpu:api,validation,render_pipeline,inter_stage:max_components_count,input:*": { "subcaseMS": 6.560 },
+ "webgpu:api,validation,render_pipeline,inter_stage:max_components_count,output:*": { "subcaseMS": 8.426 },
+ "webgpu:api,validation,render_pipeline,inter_stage:max_shader_variable_location:*": { "subcaseMS": 11.050 },
+ "webgpu:api,validation,render_pipeline,inter_stage:type:*": { "subcaseMS": 6.170 },
+ "webgpu:api,validation,render_pipeline,misc:basic:*": { "subcaseMS": 0.901 },
+ "webgpu:api,validation,render_pipeline,misc:pipeline_layout,device_mismatch:*": { "subcaseMS": 8.700 },
+ "webgpu:api,validation,render_pipeline,misc:vertex_state_only:*": { "subcaseMS": 1.125 },
+ "webgpu:api,validation,render_pipeline,multisample_state:alpha_to_coverage,count:*": { "subcaseMS": 3.200 },
+ "webgpu:api,validation,render_pipeline,multisample_state:alpha_to_coverage,sample_mask:*": { "subcaseMS": 0.725 },
+ "webgpu:api,validation,render_pipeline,multisample_state:count:*": { "subcaseMS": 2.325 },
+ "webgpu:api,validation,render_pipeline,overrides:identifier,fragment:*": { "subcaseMS": 6.330 },
+ "webgpu:api,validation,render_pipeline,overrides:identifier,vertex:*": { "subcaseMS": 4.784 },
+ "webgpu:api,validation,render_pipeline,overrides:uninitialized,fragment:*": { "subcaseMS": 11.525 },
+ "webgpu:api,validation,render_pipeline,overrides:uninitialized,vertex:*": { "subcaseMS": 5.513 },
+ "webgpu:api,validation,render_pipeline,overrides:value,type_error,fragment:*": { "subcaseMS": 7.700 },
+ "webgpu:api,validation,render_pipeline,overrides:value,type_error,vertex:*": { "subcaseMS": 5.200 },
+ "webgpu:api,validation,render_pipeline,overrides:value,validation_error,f16,fragment:*": { "subcaseMS": 4.708 },
+ "webgpu:api,validation,render_pipeline,overrides:value,validation_error,f16,vertex:*": { "subcaseMS": 5.610 },
+ "webgpu:api,validation,render_pipeline,overrides:value,validation_error,fragment:*": { "subcaseMS": 6.840 },
+ "webgpu:api,validation,render_pipeline,overrides:value,validation_error,vertex:*": { "subcaseMS": 6.022 },
+ "webgpu:api,validation,render_pipeline,primitive_state:strip_index_format:*": { "subcaseMS": 5.267 },
+ "webgpu:api,validation,render_pipeline,primitive_state:unclipped_depth:*": { "subcaseMS": 1.025 },
+ "webgpu:api,validation,render_pipeline,shader_module:device_mismatch:*": { "subcaseMS": 0.700 },
+ "webgpu:api,validation,render_pipeline,shader_module:invalid,fragment:*": { "subcaseMS": 5.800 },
+ "webgpu:api,validation,render_pipeline,shader_module:invalid,vertex:*": { "subcaseMS": 15.151 },
+ "webgpu:api,validation,render_pipeline,vertex_state:many_attributes_overlapping:*": { "subcaseMS": 2.000 },
+ "webgpu:api,validation,render_pipeline,vertex_state:max_vertex_attribute_limit:*": { "subcaseMS": 2.817 },
+ "webgpu:api,validation,render_pipeline,vertex_state:max_vertex_buffer_array_stride_limit:*": { "subcaseMS": 1.972 },
+ "webgpu:api,validation,render_pipeline,vertex_state:max_vertex_buffer_limit:*": { "subcaseMS": 4.550 },
+ "webgpu:api,validation,render_pipeline,vertex_state:vertex_attribute_contained_in_stride:*": { "subcaseMS": 0.244 },
+ "webgpu:api,validation,render_pipeline,vertex_state:vertex_attribute_offset_alignment:*": { "subcaseMS": 1.213 },
+ "webgpu:api,validation,render_pipeline,vertex_state:vertex_attribute_shaderLocation_limit:*": { "subcaseMS": 0.649 },
+ "webgpu:api,validation,render_pipeline,vertex_state:vertex_attribute_shaderLocation_unique:*": { "subcaseMS": 0.200 },
+ "webgpu:api,validation,render_pipeline,vertex_state:vertex_buffer_array_stride_limit_alignment:*": { "subcaseMS": 0.300 },
+ "webgpu:api,validation,render_pipeline,vertex_state:vertex_shader_input_location_in_vertex_state:*": { "subcaseMS": 0.819 },
+ "webgpu:api,validation,render_pipeline,vertex_state:vertex_shader_input_location_limit:*": { "subcaseMS": 7.000 },
+ "webgpu:api,validation,render_pipeline,vertex_state:vertex_shader_type_matches_attribute_format:*": { "subcaseMS": 1.647 },
+ "webgpu:api,validation,resource_usages,buffer,in_pass_encoder:subresources,buffer_usage_in_compute_pass_with_two_dispatches:*": { "subcaseMS": 2.950 },
+ "webgpu:api,validation,resource_usages,buffer,in_pass_encoder:subresources,buffer_usage_in_one_compute_pass_with_no_dispatch:*": { "subcaseMS": 1.913 },
+ "webgpu:api,validation,resource_usages,buffer,in_pass_encoder:subresources,buffer_usage_in_one_compute_pass_with_one_dispatch:*": { "subcaseMS": 0.834 },
+ "webgpu:api,validation,resource_usages,buffer,in_pass_encoder:subresources,buffer_usage_in_one_render_pass_with_no_draw:*": { "subcaseMS": 1.458 },
+ "webgpu:api,validation,resource_usages,buffer,in_pass_encoder:subresources,buffer_usage_in_one_render_pass_with_one_draw:*": { "subcaseMS": 0.987 },
+ "webgpu:api,validation,resource_usages,buffer,in_pass_encoder:subresources,buffer_usage_in_one_render_pass_with_two_draws:*": { "subcaseMS": 2.027 },
+ "webgpu:api,validation,resource_usages,buffer,in_pass_misc:subresources,buffer_usages_in_copy_and_pass:*": { "subcaseMS": 7.673 },
+ "webgpu:api,validation,resource_usages,buffer,in_pass_misc:subresources,reset_buffer_usage_before_dispatch:*": { "subcaseMS": 8.242 },
+ "webgpu:api,validation,resource_usages,buffer,in_pass_misc:subresources,reset_buffer_usage_before_draw:*": { "subcaseMS": 4.953 },
+ "webgpu:api,validation,resource_usages,texture,in_pass_encoder:bindings_in_bundle:*": { "subcaseMS": 3.281 },
+ "webgpu:api,validation,resource_usages,texture,in_pass_encoder:replaced_binding:*": { "subcaseMS": 0.888 },
+ "webgpu:api,validation,resource_usages,texture,in_pass_encoder:scope,basic,render:*": { "subcaseMS": 8.500 },
+ "webgpu:api,validation,resource_usages,texture,in_pass_encoder:scope,dispatch:*": { "subcaseMS": 12.034 },
+ "webgpu:api,validation,resource_usages,texture,in_pass_encoder:scope,pass_boundary,compute:*": { "subcaseMS": 16.550 },
+ "webgpu:api,validation,resource_usages,texture,in_pass_encoder:scope,pass_boundary,render:*": { "subcaseMS": 8.700 },
+ "webgpu:api,validation,resource_usages,texture,in_pass_encoder:shader_stages_and_visibility,attachment_write:*": { "subcaseMS": 4.425 },
+ "webgpu:api,validation,resource_usages,texture,in_pass_encoder:shader_stages_and_visibility,storage_write:*": { "subcaseMS": 1.415 },
+ "webgpu:api,validation,resource_usages,texture,in_pass_encoder:subresources_and_binding_types_combination_for_aspect:*": { "subcaseMS": 1.152 },
+ "webgpu:api,validation,resource_usages,texture,in_pass_encoder:subresources_and_binding_types_combination_for_color:*": { "subcaseMS": 2.052 },
+ "webgpu:api,validation,resource_usages,texture,in_pass_encoder:unused_bindings_in_pipeline:*": { "subcaseMS": 4.219 },
+ "webgpu:api,validation,resource_usages,texture,in_render_common:subresources,color_attachment_and_bind_group:*": { "subcaseMS": 3.042 },
+ "webgpu:api,validation,resource_usages,texture,in_render_common:subresources,color_attachments:*": { "subcaseMS": 3.175 },
+ "webgpu:api,validation,resource_usages,texture,in_render_common:subresources,depth_stencil_attachment_and_bind_group:*": { "subcaseMS": 1.667 },
+ "webgpu:api,validation,resource_usages,texture,in_render_common:subresources,depth_stencil_texture_in_bind_groups:*": { "subcaseMS": 3.050 },
+ "webgpu:api,validation,resource_usages,texture,in_render_common:subresources,multiple_bind_groups:*": { "subcaseMS": 3.045 },
+ "webgpu:api,validation,resource_usages,texture,in_render_misc:subresources,set_bind_group_on_same_index_color_texture:*": { "subcaseMS": 4.541 },
+ "webgpu:api,validation,resource_usages,texture,in_render_misc:subresources,set_bind_group_on_same_index_depth_stencil_texture:*": { "subcaseMS": 0.925 },
+ "webgpu:api,validation,resource_usages,texture,in_render_misc:subresources,set_unused_bind_group:*": { "subcaseMS": 6.200 },
+ "webgpu:api,validation,resource_usages,texture,in_render_misc:subresources,texture_usages_in_copy_and_render_pass:*": { "subcaseMS": 4.763 },
+ "webgpu:api,validation,shader_module,entry_point:compute:*": { "subcaseMS": 4.439 },
+ "webgpu:api,validation,shader_module,entry_point:fragment:*": { "subcaseMS": 5.865 },
+ "webgpu:api,validation,shader_module,entry_point:vertex:*": { "subcaseMS": 5.803 },
+ "webgpu:api,validation,shader_module,overrides:id_conflict:*": { "subcaseMS": 36.700 },
+ "webgpu:api,validation,shader_module,overrides:name_conflict:*": { "subcaseMS": 1.500 },
+ "webgpu:api,validation,state,device_lost,destroy:command,clearBuffer:*": { "subcaseMS": 11.826 },
+ "webgpu:api,validation,state,device_lost,destroy:command,computePass,dispatch:*": { "subcaseMS": 75.850 },
+ "webgpu:api,validation,state,device_lost,destroy:command,copyBufferToBuffer:*": { "subcaseMS": 32.100 },
+ "webgpu:api,validation,state,device_lost,destroy:command,copyBufferToTexture:*": { "subcaseMS": 1.450 },
+ "webgpu:api,validation,state,device_lost,destroy:command,copyTextureToBuffer:*": { "subcaseMS": 32.400 },
+ "webgpu:api,validation,state,device_lost,destroy:command,copyTextureToTexture:*": { "subcaseMS": 9.650 },
+ "webgpu:api,validation,state,device_lost,destroy:command,renderPass,draw:*": { "subcaseMS": 26.526 },
+ "webgpu:api,validation,state,device_lost,destroy:command,renderPass,renderBundle:*": { "subcaseMS": 21.125 },
+ "webgpu:api,validation,state,device_lost,destroy:command,resolveQuerySet:*": { "subcaseMS": 32.725 },
+ "webgpu:api,validation,state,device_lost,destroy:command,writeTimestamp:*": { "subcaseMS": 0.704 },
+ "webgpu:api,validation,state,device_lost,destroy:createBindGroup:*": { "subcaseMS": 91.575 },
+ "webgpu:api,validation,state,device_lost,destroy:createBindGroupLayout:*": { "subcaseMS": 22.984 },
+ "webgpu:api,validation,state,device_lost,destroy:createBuffer:*": { "subcaseMS": 5.030 },
+ "webgpu:api,validation,state,device_lost,destroy:createCommandEncoder:*": { "subcaseMS": 35.100 },
+ "webgpu:api,validation,state,device_lost,destroy:createComputePipeline:*": { "subcaseMS": 39.750 },
+ "webgpu:api,validation,state,device_lost,destroy:createComputePipelineAsync:*": { "subcaseMS": 11.476 },
+ "webgpu:api,validation,state,device_lost,destroy:createPipelineLayout:*": { "subcaseMS": 22.145 },
+ "webgpu:api,validation,state,device_lost,destroy:createQuerySet:*": { "subcaseMS": 30.001 },
+ "webgpu:api,validation,state,device_lost,destroy:createRenderBundleEncoder:*": { "subcaseMS": 13.350 },
+ "webgpu:api,validation,state,device_lost,destroy:createRenderPipeline:*": { "subcaseMS": 39.450 },
+ "webgpu:api,validation,state,device_lost,destroy:createRenderPipelineAsync:*": { "subcaseMS": 19.025 },
+ "webgpu:api,validation,state,device_lost,destroy:createSampler:*": { "subcaseMS": 31.401 },
+ "webgpu:api,validation,state,device_lost,destroy:createShaderModule:*": { "subcaseMS": 19.750 },
+ "webgpu:api,validation,state,device_lost,destroy:createTexture,2d,compressed_format:*": { "subcaseMS": 14.241 },
+ "webgpu:api,validation,state,device_lost,destroy:createTexture,2d,uncompressed_format:*": { "subcaseMS": 7.622 },
+ "webgpu:api,validation,state,device_lost,destroy:createView,2d,compressed_format:*": { "subcaseMS": 19.612 },
+ "webgpu:api,validation,state,device_lost,destroy:createView,2d,uncompressed_format:*": { "subcaseMS": 19.895 },
+ "webgpu:api,validation,state,device_lost,destroy:importExternalTexture:*": { "subcaseMS": 92.051 },
+ "webgpu:api,validation,state,device_lost,destroy:queue,copyExternalImageToTexture,canvas:*": { "subcaseMS": 28.596 },
+ "webgpu:api,validation,state,device_lost,destroy:queue,copyExternalImageToTexture,imageBitmap:*": { "subcaseMS": 31.950 },
+ "webgpu:api,validation,state,device_lost,destroy:queue,writeBuffer:*": { "subcaseMS": 18.851 },
+ "webgpu:api,validation,state,device_lost,destroy:queue,writeTexture,2d,compressed_format:*": { "subcaseMS": 18.115 },
+ "webgpu:api,validation,state,device_lost,destroy:queue,writeTexture,2d,uncompressed_format:*": { "subcaseMS": 17.620 },
+ "webgpu:api,validation,texture,bgra8unorm_storage:configure_storage_usage_on_canvas_context_with_bgra8unorm_storage:*": { "subcaseMS": 3.230 },
+ "webgpu:api,validation,texture,bgra8unorm_storage:configure_storage_usage_on_canvas_context_without_bgra8unorm_storage:*": { "subcaseMS": 1.767 },
+ "webgpu:api,validation,texture,bgra8unorm_storage:create_bind_group_layout:*": { "subcaseMS": 21.500 },
+ "webgpu:api,validation,texture,bgra8unorm_storage:create_shader_module_with_bgra8unorm_storage:*": { "subcaseMS": 11.201 },
+ "webgpu:api,validation,texture,bgra8unorm_storage:create_shader_module_without_bgra8unorm_storage:*": { "subcaseMS": 1.601 },
+ "webgpu:api,validation,texture,bgra8unorm_storage:create_texture:*": { "subcaseMS": 22.900 },
+ "webgpu:api,validation,texture,destroy:base:*": { "subcaseMS": 4.000 },
+ "webgpu:api,validation,texture,destroy:invalid_texture:*": { "subcaseMS": 27.200 },
+ "webgpu:api,validation,texture,destroy:submit_a_destroyed_texture_as_attachment:*": { "subcaseMS": 11.812 },
+ "webgpu:api,validation,texture,destroy:twice:*": { "subcaseMS": 1.400 },
+ "webgpu:api,validation,texture,float32_filterable:create_bind_group:*": { "subcaseMS": 0.901 },
+ "webgpu:api,validation,texture,rg11b10ufloat_renderable:begin_render_bundle_encoder:*": { "subcaseMS": 1.101 },
+ "webgpu:api,validation,texture,rg11b10ufloat_renderable:begin_render_pass_msaa_and_resolve:*": { "subcaseMS": 0.900 },
+ "webgpu:api,validation,texture,rg11b10ufloat_renderable:begin_render_pass_single_sampled:*": { "subcaseMS": 1.200 },
+ "webgpu:api,validation,texture,rg11b10ufloat_renderable:create_render_pipeline:*": { "subcaseMS": 2.400 },
+ "webgpu:api,validation,texture,rg11b10ufloat_renderable:create_texture:*": { "subcaseMS": 12.700 },
+ "webgpu:compat,api,validation,encoding,cmds,copyTextureToBuffer:compressed:*": { "subcaseMS": 202.929 },
+ "webgpu:compat,api,validation,encoding,programmable,pipeline_bind_group_compat:twoDifferentTextureViews,compute_pass,unused:*": { "subcaseMS": 1.501 },
+ "webgpu:compat,api,validation,encoding,programmable,pipeline_bind_group_compat:twoDifferentTextureViews,compute_pass,used:*": { "subcaseMS": 49.405 },
+ "webgpu:compat,api,validation,encoding,programmable,pipeline_bind_group_compat:twoDifferentTextureViews,render_pass,unused:*": { "subcaseMS": 16.002 },
+ "webgpu:compat,api,validation,encoding,programmable,pipeline_bind_group_compat:twoDifferentTextureViews,render_pass,used:*": { "subcaseMS": 0.000 },
+ "webgpu:compat,api,validation,render_pipeline,fragment_state:colorState:*": { "subcaseMS": 32.604 },
+ "webgpu:compat,api,validation,render_pipeline,shader_module:sample_mask:*": { "subcaseMS": 14.801 },
+ "webgpu:compat,api,validation,render_pipeline,vertex_state:maxVertexAttributesVertexIndexInstanceIndex:*": { "subcaseMS": 3.700 },
+ "webgpu:compat,api,validation,texture,createTexture:unsupportedTextureFormats:*": { "subcaseMS": 0.700 },
+ "webgpu:compat,api,validation,texture,createTexture:unsupportedTextureViewFormats:*": { "subcaseMS": 0.601 },
+ "webgpu:compat,api,validation,texture,cubeArray:cube_array:*": { "subcaseMS": 13.701 },
+ "webgpu:examples:basic,async:*": { "subcaseMS": 16.401 },
+ "webgpu:examples:basic,builder_cases:*": { "subcaseMS": 7.275 },
+ "webgpu:examples:basic,builder_cases_subcases:*": { "subcaseMS": 0.425 },
+ "webgpu:examples:basic,builder_subcases:*": { "subcaseMS": 0.175 },
+ "webgpu:examples:basic,builder_subcases_short:*": { "subcaseMS": 3.300 },
+ "webgpu:examples:basic,plain_cases:*": { "subcaseMS": 8.450 },
+ "webgpu:examples:basic,plain_cases_private:*": { "subcaseMS": 9.850 },
+ "webgpu:examples:basic:*": { "subcaseMS": 0.901 },
+ "webgpu:examples:gpu,async:*": { "subcaseMS": 1.600 },
+ "webgpu:examples:gpu,buffers:*": { "subcaseMS": 17.301 },
+ "webgpu:examples:gpu,with_texture_compression,bc:*": { "subcaseMS": 7.500 },
+ "webgpu:examples:gpu,with_texture_compression,etc2:*": { "subcaseMS": 0.750 },
+ "webgpu:examples:not_implemented_yet,with_plan:*": { "subcaseMS": 0.500 },
+ "webgpu:examples:not_implemented_yet,without_plan:*": { "subcaseMS": 0.701 },
+ "webgpu:examples:test_name:*": { "subcaseMS": 14.601 },
+ "webgpu:idl,constants,flags:BufferUsage,count:*": { "subcaseMS": 0.301 },
+ "webgpu:idl,constants,flags:BufferUsage,values:*": { "subcaseMS": 0.120 },
+ "webgpu:idl,constants,flags:ColorWrite,count:*": { "subcaseMS": 0.101 },
+ "webgpu:idl,constants,flags:ColorWrite,values:*": { "subcaseMS": 0.101 },
+ "webgpu:idl,constants,flags:ShaderStage,count:*": { "subcaseMS": 0.101 },
+ "webgpu:idl,constants,flags:ShaderStage,values:*": { "subcaseMS": 0.034 },
+ "webgpu:idl,constants,flags:TextureUsage,count:*": { "subcaseMS": 0.101 },
+ "webgpu:idl,constants,flags:TextureUsage,values:*": { "subcaseMS": 0.040 },
+ "webgpu:shader,execution,expression,binary,af_addition:scalar:*": { "subcaseMS": 815.300 },
+ "webgpu:shader,execution,expression,binary,af_addition:scalar_vector:*": { "subcaseMS": 1803.434 },
+ "webgpu:shader,execution,expression,binary,af_addition:vector:*": { "subcaseMS": 719.600 },
+ "webgpu:shader,execution,expression,binary,af_addition:vector_scalar:*": { "subcaseMS": 1770.734 },
+ "webgpu:shader,execution,expression,binary,af_comparison:equals:*": { "subcaseMS": 23.000 },
+ "webgpu:shader,execution,expression,binary,af_comparison:greater_equals:*": { "subcaseMS": 20.651 },
+ "webgpu:shader,execution,expression,binary,af_comparison:greater_than:*": { "subcaseMS": 19.901 },
+ "webgpu:shader,execution,expression,binary,af_comparison:less_equals:*": { "subcaseMS": 19.651 },
+ "webgpu:shader,execution,expression,binary,af_comparison:less_than:*": { "subcaseMS": 19.975 },
+ "webgpu:shader,execution,expression,binary,af_comparison:not_equals:*": { "subcaseMS": 19.651 },
+ "webgpu:shader,execution,expression,binary,af_division:scalar:*": { "subcaseMS": 563.200 },
+ "webgpu:shader,execution,expression,binary,af_division:scalar_vector:*": { "subcaseMS": 567.101 },
+ "webgpu:shader,execution,expression,binary,af_division:vector:*": { "subcaseMS": 237.134 },
+ "webgpu:shader,execution,expression,binary,af_division:vector_scalar:*": { "subcaseMS": 580.000 },
+ "webgpu:shader,execution,expression,binary,af_matrix_addition:matrix:*": { "subcaseMS": 11169.534 },
+ "webgpu:shader,execution,expression,binary,af_matrix_subtraction:matrix:*": { "subcaseMS": 14060.956 },
+ "webgpu:shader,execution,expression,binary,af_multiplication:scalar:*": { "subcaseMS": 777.901 },
+ "webgpu:shader,execution,expression,binary,af_multiplication:scalar_vector:*": { "subcaseMS": 2025.534 },
+ "webgpu:shader,execution,expression,binary,af_multiplication:vector:*": { "subcaseMS": 710.667 },
+ "webgpu:shader,execution,expression,binary,af_multiplication:vector_scalar:*": { "subcaseMS": 2085.300 },
+ "webgpu:shader,execution,expression,binary,af_remainder:scalar:*": { "subcaseMS": 1103.701 },
+ "webgpu:shader,execution,expression,binary,af_remainder:scalar_vector:*": { "subcaseMS": 756.800 },
+ "webgpu:shader,execution,expression,binary,af_remainder:vector:*": { "subcaseMS": 299.701 },
+ "webgpu:shader,execution,expression,binary,af_remainder:vector_scalar:*": { "subcaseMS": 777.701 },
+ "webgpu:shader,execution,expression,binary,af_subtraction:scalar:*": { "subcaseMS": 854.100 },
+ "webgpu:shader,execution,expression,binary,af_subtraction:scalar_vector:*": { "subcaseMS": 2336.534 },
+ "webgpu:shader,execution,expression,binary,af_subtraction:vector:*": { "subcaseMS": 764.201 },
+ "webgpu:shader,execution,expression,binary,af_subtraction:vector_scalar:*": { "subcaseMS": 2437.701 },
+ "webgpu:shader,execution,expression,binary,bitwise:bitwise_and:*": { "subcaseMS": 20.982 },
+ "webgpu:shader,execution,expression,binary,bitwise:bitwise_and_compound:*": { "subcaseMS": 22.513 },
+ "webgpu:shader,execution,expression,binary,bitwise:bitwise_exclusive_or:*": { "subcaseMS": 21.294 },
+ "webgpu:shader,execution,expression,binary,bitwise:bitwise_exclusive_or_compound:*": { "subcaseMS": 21.326 },
+ "webgpu:shader,execution,expression,binary,bitwise:bitwise_or:*": { "subcaseMS": 23.782 },
+ "webgpu:shader,execution,expression,binary,bitwise:bitwise_or_compound:*": { "subcaseMS": 27.088 },
+ "webgpu:shader,execution,expression,binary,bitwise_shift:shift_left_concrete:*": { "subcaseMS": 10.466 },
+ "webgpu:shader,execution,expression,binary,bitwise_shift:shift_left_concrete_compound:*": { "subcaseMS": 9.657 },
+ "webgpu:shader,execution,expression,binary,bitwise_shift:shift_right_concrete:*": { "subcaseMS": 11.744 },
+ "webgpu:shader,execution,expression,binary,bitwise_shift:shift_right_concrete_compound:*": { "subcaseMS": 11.097 },
+ "webgpu:shader,execution,expression,binary,bool_logical:and:*": { "subcaseMS": 7.325 },
+ "webgpu:shader,execution,expression,binary,bool_logical:and_compound:*": { "subcaseMS": 8.044 },
+ "webgpu:shader,execution,expression,binary,bool_logical:and_short_circuit:*": { "subcaseMS": 8.950 },
+ "webgpu:shader,execution,expression,binary,bool_logical:equals:*": { "subcaseMS": 7.075 },
+ "webgpu:shader,execution,expression,binary,bool_logical:not_equals:*": { "subcaseMS": 8.800 },
+ "webgpu:shader,execution,expression,binary,bool_logical:or:*": { "subcaseMS": 6.663 },
+ "webgpu:shader,execution,expression,binary,bool_logical:or_compound:*": { "subcaseMS": 7.407 },
+ "webgpu:shader,execution,expression,binary,bool_logical:or_short_circuit:*": { "subcaseMS": 10.050 },
+ "webgpu:shader,execution,expression,binary,f16_addition:scalar:*": { "subcaseMS": 106.501 },
+ "webgpu:shader,execution,expression,binary,f16_addition:scalar_compound:*": { "subcaseMS": 5.912 },
+ "webgpu:shader,execution,expression,binary,f16_addition:scalar_vector:*": { "subcaseMS": 4.408 },
+ "webgpu:shader,execution,expression,binary,f16_addition:vector:*": { "subcaseMS": 8.204 },
+ "webgpu:shader,execution,expression,binary,f16_addition:vector_scalar:*": { "subcaseMS": 4.308 },
+ "webgpu:shader,execution,expression,binary,f16_addition:vector_scalar_compound:*": { "subcaseMS": 4.406 },
+ "webgpu:shader,execution,expression,binary,f16_comparison:equals:*": { "subcaseMS": 3.907 },
+ "webgpu:shader,execution,expression,binary,f16_comparison:greater_equals:*": { "subcaseMS": 3.507 },
+ "webgpu:shader,execution,expression,binary,f16_comparison:greater_than:*": { "subcaseMS": 3.908 },
+ "webgpu:shader,execution,expression,binary,f16_comparison:less_equals:*": { "subcaseMS": 3.108 },
+ "webgpu:shader,execution,expression,binary,f16_comparison:less_than:*": { "subcaseMS": 3.508 },
+ "webgpu:shader,execution,expression,binary,f16_comparison:not_equals:*": { "subcaseMS": 3.405 },
+ "webgpu:shader,execution,expression,binary,f16_division:scalar:*": { "subcaseMS": 125.300 },
+ "webgpu:shader,execution,expression,binary,f16_division:scalar_compound:*": { "subcaseMS": 5.909 },
+ "webgpu:shader,execution,expression,binary,f16_division:scalar_vector:*": { "subcaseMS": 3.509 },
+ "webgpu:shader,execution,expression,binary,f16_division:vector:*": { "subcaseMS": 5.505 },
+ "webgpu:shader,execution,expression,binary,f16_division:vector_scalar:*": { "subcaseMS": 3.908 },
+ "webgpu:shader,execution,expression,binary,f16_division:vector_scalar_compound:*": { "subcaseMS": 97.109 },
+ "webgpu:shader,execution,expression,binary,f16_matrix_addition:matrix:*": { "subcaseMS": 775.164 },
+ "webgpu:shader,execution,expression,binary,f16_matrix_addition:matrix_compound:*": { "subcaseMS": 1251.350 },
+ "webgpu:shader,execution,expression,binary,f16_matrix_matrix_multiplication:matrix_matrix:*": { "subcaseMS": 2049.029 },
+ "webgpu:shader,execution,expression,binary,f16_matrix_matrix_multiplication:matrix_matrix_compound:*": { "subcaseMS": 916.581 },
+ "webgpu:shader,execution,expression,binary,f16_matrix_scalar_multiplication:matrix_scalar:*": { "subcaseMS": 760.517 },
+ "webgpu:shader,execution,expression,binary,f16_matrix_scalar_multiplication:matrix_scalar_compound:*": { "subcaseMS": 663.923 },
+ "webgpu:shader,execution,expression,binary,f16_matrix_scalar_multiplication:scalar_matrix:*": { "subcaseMS": 644.509 },
+ "webgpu:shader,execution,expression,binary,f16_matrix_subtraction:matrix:*": { "subcaseMS": 725.975 },
+ "webgpu:shader,execution,expression,binary,f16_matrix_subtraction:matrix_compound:*": { "subcaseMS": 1106.414 },
+ "webgpu:shader,execution,expression,binary,f16_matrix_vector_multiplication:matrix_vector:*": { "subcaseMS": 1173.548 },
+ "webgpu:shader,execution,expression,binary,f16_matrix_vector_multiplication:vector_matrix:*": { "subcaseMS": 85.495 },
+ "webgpu:shader,execution,expression,binary,f16_matrix_vector_multiplication:vector_matrix_compound:*": { "subcaseMS": 833.634 },
+ "webgpu:shader,execution,expression,binary,f16_multiplication:scalar:*": { "subcaseMS": 105.202 },
+ "webgpu:shader,execution,expression,binary,f16_multiplication:scalar_compound:*": { "subcaseMS": 8.111 },
+ "webgpu:shader,execution,expression,binary,f16_multiplication:scalar_vector:*": { "subcaseMS": 3.907 },
+ "webgpu:shader,execution,expression,binary,f16_multiplication:vector:*": { "subcaseMS": 6.104 },
+ "webgpu:shader,execution,expression,binary,f16_multiplication:vector_scalar:*": { "subcaseMS": 3.908 },
+ "webgpu:shader,execution,expression,binary,f16_multiplication:vector_scalar_compound:*": { "subcaseMS": 4.205 },
+ "webgpu:shader,execution,expression,binary,f16_remainder:scalar:*": { "subcaseMS": 101.202 },
+ "webgpu:shader,execution,expression,binary,f16_remainder:scalar_compound:*": { "subcaseMS": 6.409 },
+ "webgpu:shader,execution,expression,binary,f16_remainder:scalar_vector:*": { "subcaseMS": 3.910 },
+ "webgpu:shader,execution,expression,binary,f16_remainder:vector:*": { "subcaseMS": 4.410 },
+ "webgpu:shader,execution,expression,binary,f16_remainder:vector_scalar:*": { "subcaseMS": 4.409 },
+ "webgpu:shader,execution,expression,binary,f16_remainder:vector_scalar_compound:*": { "subcaseMS": 4.510 },
+ "webgpu:shader,execution,expression,binary,f16_subtraction:scalar:*": { "subcaseMS": 101.600 },
+ "webgpu:shader,execution,expression,binary,f16_subtraction:scalar_compound:*": { "subcaseMS": 5.611 },
+ "webgpu:shader,execution,expression,binary,f16_subtraction:scalar_vector:*": { "subcaseMS": 4.308 },
+ "webgpu:shader,execution,expression,binary,f16_subtraction:vector:*": { "subcaseMS": 7.105 },
+ "webgpu:shader,execution,expression,binary,f16_subtraction:vector_scalar:*": { "subcaseMS": 4.107 },
+ "webgpu:shader,execution,expression,binary,f16_subtraction:vector_scalar_compound:*": { "subcaseMS": 4.606 },
+ "webgpu:shader,execution,expression,binary,f32_addition:scalar:*": { "subcaseMS": 352.326 },
+ "webgpu:shader,execution,expression,binary,f32_addition:scalar_compound:*": { "subcaseMS": 146.513 },
+ "webgpu:shader,execution,expression,binary,f32_addition:scalar_vector:*": { "subcaseMS": 148.117 },
+ "webgpu:shader,execution,expression,binary,f32_addition:vector:*": { "subcaseMS": 117.209 },
+ "webgpu:shader,execution,expression,binary,f32_addition:vector_scalar:*": { "subcaseMS": 150.450 },
+ "webgpu:shader,execution,expression,binary,f32_addition:vector_scalar_compound:*": { "subcaseMS": 152.842 },
+ "webgpu:shader,execution,expression,binary,f32_comparison:equals:*": { "subcaseMS": 9.638 },
+ "webgpu:shader,execution,expression,binary,f32_comparison:greater_equals:*": { "subcaseMS": 7.882 },
+ "webgpu:shader,execution,expression,binary,f32_comparison:greater_than:*": { "subcaseMS": 7.388 },
+ "webgpu:shader,execution,expression,binary,f32_comparison:less_equals:*": { "subcaseMS": 6.632 },
+ "webgpu:shader,execution,expression,binary,f32_comparison:less_than:*": { "subcaseMS": 6.969 },
+ "webgpu:shader,execution,expression,binary,f32_comparison:not_equals:*": { "subcaseMS": 6.819 },
+ "webgpu:shader,execution,expression,binary,f32_division:scalar:*": { "subcaseMS": 372.550 },
+ "webgpu:shader,execution,expression,binary,f32_division:scalar_compound:*": { "subcaseMS": 140.819 },
+ "webgpu:shader,execution,expression,binary,f32_division:scalar_vector:*": { "subcaseMS": 82.709 },
+ "webgpu:shader,execution,expression,binary,f32_division:vector:*": { "subcaseMS": 119.475 },
+ "webgpu:shader,execution,expression,binary,f32_division:vector_scalar:*": { "subcaseMS": 75.375 },
+ "webgpu:shader,execution,expression,binary,f32_division:vector_scalar_compound:*": { "subcaseMS": 76.017 },
+ "webgpu:shader,execution,expression,binary,f32_matrix_addition:matrix:*": { "subcaseMS": 35.020 },
+ "webgpu:shader,execution,expression,binary,f32_matrix_addition:matrix_compound:*": { "subcaseMS": 27.534 },
+ "webgpu:shader,execution,expression,binary,f32_matrix_matrix_multiplication:matrix_matrix:*": { "subcaseMS": 134.680 },
+ "webgpu:shader,execution,expression,binary,f32_matrix_matrix_multiplication:matrix_matrix_compound:*": { "subcaseMS": 24.848 },
+ "webgpu:shader,execution,expression,binary,f32_matrix_scalar_multiplication:matrix_scalar:*": { "subcaseMS": 96.756 },
+ "webgpu:shader,execution,expression,binary,f32_matrix_scalar_multiplication:matrix_scalar_compound:*": { "subcaseMS": 21.181 },
+ "webgpu:shader,execution,expression,binary,f32_matrix_scalar_multiplication:scalar_matrix:*": { "subcaseMS": 21.600 },
+ "webgpu:shader,execution,expression,binary,f32_matrix_subtraction:matrix:*": { "subcaseMS": 34.489 },
+ "webgpu:shader,execution,expression,binary,f32_matrix_subtraction:matrix_compound:*": { "subcaseMS": 27.645 },
+ "webgpu:shader,execution,expression,binary,f32_matrix_vector_multiplication:matrix_vector:*": { "subcaseMS": 105.139 },
+ "webgpu:shader,execution,expression,binary,f32_matrix_vector_multiplication:vector_matrix:*": { "subcaseMS": 22.501 },
+ "webgpu:shader,execution,expression,binary,f32_matrix_vector_multiplication:vector_matrix_compound:*": { "subcaseMS": 16.217 },
+ "webgpu:shader,execution,expression,binary,f32_multiplication:scalar:*": { "subcaseMS": 360.475 },
+ "webgpu:shader,execution,expression,binary,f32_multiplication:scalar_compound:*": { "subcaseMS": 155.044 },
+ "webgpu:shader,execution,expression,binary,f32_multiplication:scalar_vector:*": { "subcaseMS": 153.642 },
+ "webgpu:shader,execution,expression,binary,f32_multiplication:vector:*": { "subcaseMS": 121.692 },
+ "webgpu:shader,execution,expression,binary,f32_multiplication:vector_scalar:*": { "subcaseMS": 156.909 },
+ "webgpu:shader,execution,expression,binary,f32_multiplication:vector_scalar_compound:*": { "subcaseMS": 157.576 },
+ "webgpu:shader,execution,expression,binary,f32_remainder:scalar:*": { "subcaseMS": 313.175 },
+ "webgpu:shader,execution,expression,binary,f32_remainder:scalar_compound:*": { "subcaseMS": 66.207 },
+ "webgpu:shader,execution,expression,binary,f32_remainder:scalar_vector:*": { "subcaseMS": 64.125 },
+ "webgpu:shader,execution,expression,binary,f32_remainder:vector:*": { "subcaseMS": 60.517 },
+ "webgpu:shader,execution,expression,binary,f32_remainder:vector_scalar:*": { "subcaseMS": 56.025 },
+ "webgpu:shader,execution,expression,binary,f32_remainder:vector_scalar_compound:*": { "subcaseMS": 57.101 },
+ "webgpu:shader,execution,expression,binary,f32_subtraction:scalar:*": { "subcaseMS": 335.951 },
+ "webgpu:shader,execution,expression,binary,f32_subtraction:scalar_compound:*": { "subcaseMS": 149.525 },
+ "webgpu:shader,execution,expression,binary,f32_subtraction:scalar_vector:*": { "subcaseMS": 159.659 },
+ "webgpu:shader,execution,expression,binary,f32_subtraction:vector:*": { "subcaseMS": 117.142 },
+ "webgpu:shader,execution,expression,binary,f32_subtraction:vector_scalar:*": { "subcaseMS": 152.067 },
+ "webgpu:shader,execution,expression,binary,f32_subtraction:vector_scalar_compound:*": { "subcaseMS": 159.417 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:addition:*": { "subcaseMS": 23.975 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:addition_compound:*": { "subcaseMS": 9.219 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:addition_scalar_vector:*": { "subcaseMS": 33.059 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:addition_vector_scalar:*": { "subcaseMS": 32.475 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:addition_vector_scalar_compound:*": { "subcaseMS": 30.875 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:division:*": { "subcaseMS": 8.444 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:division_compound:*": { "subcaseMS": 8.407 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:division_scalar_vector:*": { "subcaseMS": 27.809 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:division_vector_scalar:*": { "subcaseMS": 28.550 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:division_vector_scalar_compound:*": { "subcaseMS": 28.950 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:multiplication:*": { "subcaseMS": 8.976 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:multiplication_compound:*": { "subcaseMS": 9.601 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:multiplication_scalar_vector:*": { "subcaseMS": 33.742 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:multiplication_vector_scalar:*": { "subcaseMS": 33.042 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:multiplication_vector_scalar_compound:*": { "subcaseMS": 31.425 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:remainder:*": { "subcaseMS": 8.600 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:remainder_compound:*": { "subcaseMS": 9.119 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:remainder_scalar_vector:*": { "subcaseMS": 27.192 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:remainder_vector_scalar:*": { "subcaseMS": 27.284 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:remainder_vector_scalar_compound:*": { "subcaseMS": 29.875 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:subtraction:*": { "subcaseMS": 9.513 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:subtraction_compound:*": { "subcaseMS": 7.994 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:subtraction_scalar_vector:*": { "subcaseMS": 34.034 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:subtraction_vector_scalar:*": { "subcaseMS": 32.642 },
+ "webgpu:shader,execution,expression,binary,i32_arithmetic:subtraction_vector_scalar_compound:*": { "subcaseMS": 30.400 },
+ "webgpu:shader,execution,expression,binary,i32_comparison:equals:*": { "subcaseMS": 9.544 },
+ "webgpu:shader,execution,expression,binary,i32_comparison:greater_equals:*": { "subcaseMS": 7.657 },
+ "webgpu:shader,execution,expression,binary,i32_comparison:greater_than:*": { "subcaseMS": 7.169 },
+ "webgpu:shader,execution,expression,binary,i32_comparison:less_equals:*": { "subcaseMS": 8.063 },
+ "webgpu:shader,execution,expression,binary,i32_comparison:less_than:*": { "subcaseMS": 7.894 },
+ "webgpu:shader,execution,expression,binary,i32_comparison:not_equals:*": { "subcaseMS": 7.588 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:addition:*": { "subcaseMS": 9.806 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:addition_compound:*": { "subcaseMS": 8.494 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:addition_scalar_vector:*": { "subcaseMS": 10.409 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:addition_vector_scalar:*": { "subcaseMS": 9.676 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:addition_vector_scalar_compound:*": { "subcaseMS": 9.925 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:division:*": { "subcaseMS": 7.138 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:division_compound:*": { "subcaseMS": 7.544 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:division_scalar_vector:*": { "subcaseMS": 9.959 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:division_vector_scalar:*": { "subcaseMS": 9.767 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:division_vector_scalar_compound:*": { "subcaseMS": 10.167 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:multiplication:*": { "subcaseMS": 7.544 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:multiplication_compound:*": { "subcaseMS": 7.332 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:multiplication_scalar_vector:*": { "subcaseMS": 9.867 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:multiplication_vector_scalar:*": { "subcaseMS": 9.159 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:multiplication_vector_scalar_compound:*": { "subcaseMS": 9.667 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:remainder:*": { "subcaseMS": 8.188 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:remainder_compound:*": { "subcaseMS": 7.994 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:remainder_scalar_vector:*": { "subcaseMS": 9.842 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:remainder_vector_scalar:*": { "subcaseMS": 10.292 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:remainder_vector_scalar_compound:*": { "subcaseMS": 9.617 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:subtraction:*": { "subcaseMS": 16.119 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:subtraction_compound:*": { "subcaseMS": 7.982 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:subtraction_scalar_vector:*": { "subcaseMS": 9.842 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:subtraction_vector_scalar:*": { "subcaseMS": 9.667 },
+ "webgpu:shader,execution,expression,binary,u32_arithmetic:subtraction_vector_scalar_compound:*": { "subcaseMS": 10.859 },
+ "webgpu:shader,execution,expression,binary,u32_comparison:equals:*": { "subcaseMS": 8.938 },
+ "webgpu:shader,execution,expression,binary,u32_comparison:greater_equals:*": { "subcaseMS": 6.732 },
+ "webgpu:shader,execution,expression,binary,u32_comparison:greater_than:*": { "subcaseMS": 7.232 },
+ "webgpu:shader,execution,expression,binary,u32_comparison:less_equals:*": { "subcaseMS": 7.844 },
+ "webgpu:shader,execution,expression,binary,u32_comparison:less_than:*": { "subcaseMS": 6.700 },
+ "webgpu:shader,execution,expression,binary,u32_comparison:not_equals:*": { "subcaseMS": 6.850 },
+ "webgpu:shader,execution,expression,call,builtin,abs:abstract_float:*": { "subcaseMS": 464.126 },
+ "webgpu:shader,execution,expression,call,builtin,abs:abstract_int:*": { "subcaseMS": 16.810 },
+ "webgpu:shader,execution,expression,call,builtin,abs:f16:*": { "subcaseMS": 22.910 },
+ "webgpu:shader,execution,expression,call,builtin,abs:f32:*": { "subcaseMS": 9.844 },
+ "webgpu:shader,execution,expression,call,builtin,abs:i32:*": { "subcaseMS": 7.088 },
+ "webgpu:shader,execution,expression,call,builtin,abs:u32:*": { "subcaseMS": 7.513 },
+ "webgpu:shader,execution,expression,call,builtin,acos:abstract_float:*": { "subcaseMS": 15.505 },
+ "webgpu:shader,execution,expression,call,builtin,acos:f16:*": { "subcaseMS": 26.005 },
+ "webgpu:shader,execution,expression,call,builtin,acos:f32:*": { "subcaseMS": 33.063 },
+ "webgpu:shader,execution,expression,call,builtin,acosh:abstract_float:*": { "subcaseMS": 17.210 },
+ "webgpu:shader,execution,expression,call,builtin,acosh:f16:*": { "subcaseMS": 140.494 },
+ "webgpu:shader,execution,expression,call,builtin,acosh:f32:*": { "subcaseMS": 12.588 },
+ "webgpu:shader,execution,expression,call,builtin,all:bool:*": { "subcaseMS": 6.938 },
+ "webgpu:shader,execution,expression,call,builtin,any:bool:*": { "subcaseMS": 6.475 },
+ "webgpu:shader,execution,expression,call,builtin,arrayLength:binding_subregion:*": { "subcaseMS": 19.900 },
+ "webgpu:shader,execution,expression,call,builtin,arrayLength:multiple_elements:*": { "subcaseMS": 6.261 },
+ "webgpu:shader,execution,expression,call,builtin,arrayLength:read_only:*": { "subcaseMS": 4.500 },
+ "webgpu:shader,execution,expression,call,builtin,arrayLength:single_element:*": { "subcaseMS": 6.569 },
+ "webgpu:shader,execution,expression,call,builtin,arrayLength:struct_member:*": { "subcaseMS": 6.819 },
+ "webgpu:shader,execution,expression,call,builtin,asin:abstract_float:*": { "subcaseMS": 16.606 },
+ "webgpu:shader,execution,expression,call,builtin,asin:f16:*": { "subcaseMS": 6.708 },
+ "webgpu:shader,execution,expression,call,builtin,asin:f32:*": { "subcaseMS": 33.969 },
+ "webgpu:shader,execution,expression,call,builtin,asinh:abstract_float:*": { "subcaseMS": 23.305 },
+ "webgpu:shader,execution,expression,call,builtin,asinh:f16:*": { "subcaseMS": 59.538 },
+ "webgpu:shader,execution,expression,call,builtin,asinh:f32:*": { "subcaseMS": 9.731 },
+ "webgpu:shader,execution,expression,call,builtin,atan2:abstract_float:*": { "subcaseMS": 24.705 },
+ "webgpu:shader,execution,expression,call,builtin,atan2:f16:*": { "subcaseMS": 32.506 },
+ "webgpu:shader,execution,expression,call,builtin,atan2:f32:*": { "subcaseMS": 25.938 },
+ "webgpu:shader,execution,expression,call,builtin,atan:abstract_float:*": { "subcaseMS": 32.408 },
+ "webgpu:shader,execution,expression,call,builtin,atan:f16:*": { "subcaseMS": 21.106 },
+ "webgpu:shader,execution,expression,call,builtin,atan:f32:*": { "subcaseMS": 10.251 },
+ "webgpu:shader,execution,expression,call,builtin,atanh:abstract_float:*": { "subcaseMS": 16.807 },
+ "webgpu:shader,execution,expression,call,builtin,atanh:f16:*": { "subcaseMS": 81.619 },
+ "webgpu:shader,execution,expression,call,builtin,atanh:f32:*": { "subcaseMS": 12.332 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicAdd:add_storage:*": { "subcaseMS": 6.482 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicAdd:add_workgroup:*": { "subcaseMS": 7.222 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicAnd:and_storage:*": { "subcaseMS": 6.711 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicAnd:and_workgroup:*": { "subcaseMS": 8.028 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicCompareExchangeWeak:compare_exchange_weak_storage_advanced:*": { "subcaseMS": 10.090 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicCompareExchangeWeak:compare_exchange_weak_storage_basic:*": { "subcaseMS": 9.529 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicCompareExchangeWeak:compare_exchange_weak_workgroup_advanced:*": { "subcaseMS": 10.012 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicCompareExchangeWeak:compare_exchange_weak_workgroup_basic:*": { "subcaseMS": 10.368 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicExchange:exchange_storage_advanced:*": { "subcaseMS": 8.755 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicExchange:exchange_storage_basic:*": { "subcaseMS": 5.725 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicExchange:exchange_workgroup_advanced:*": { "subcaseMS": 9.885 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicExchange:exchange_workgroup_basic:*": { "subcaseMS": 6.966 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicLoad:load_storage:*": { "subcaseMS": 5.354 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicLoad:load_workgroup:*": { "subcaseMS": 6.269 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicMax:max_storage:*": { "subcaseMS": 6.116 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicMax:max_workgroup:*": { "subcaseMS": 7.010 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicMin:min_storage:*": { "subcaseMS": 6.235 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicMin:min_workgroup:*": { "subcaseMS": 7.307 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicOr:or_storage:*": { "subcaseMS": 6.791 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicOr:or_workgroup:*": { "subcaseMS": 7.814 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicStore:store_storage_advanced:*": { "subcaseMS": 5.707 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicStore:store_storage_basic:*": { "subcaseMS": 5.524 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicStore:store_workgroup_advanced:*": { "subcaseMS": 6.029 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicStore:store_workgroup_basic:*": { "subcaseMS": 6.632 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicSub:sub_storage:*": { "subcaseMS": 5.757 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicSub:sub_workgroup:*": { "subcaseMS": 7.238 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicXor:xor_storage:*": { "subcaseMS": 6.807 },
+ "webgpu:shader,execution,expression,call,builtin,atomics,atomicXor:xor_workgroup:*": { "subcaseMS": 7.821 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:f16_to_f16:*": { "subcaseMS": 21.112 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:f32_to_f32:*": { "subcaseMS": 8.625 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:f32_to_i32:*": { "subcaseMS": 8.175 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:f32_to_u32:*": { "subcaseMS": 8.016 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:f32_to_vec2h:*": { "subcaseMS": 22.212 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:i32_to_f32:*": { "subcaseMS": 31.814 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:i32_to_i32:*": { "subcaseMS": 23.863 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:i32_to_u32:*": { "subcaseMS": 7.263 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:i32_to_vec2h:*": { "subcaseMS": 28.214 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:u32_to_f32:*": { "subcaseMS": 20.716 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:u32_to_i32:*": { "subcaseMS": 6.982 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:u32_to_u32:*": { "subcaseMS": 6.907 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:u32_to_vec2h:*": { "subcaseMS": 22.210 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:vec2f_to_vec4h:*": { "subcaseMS": 24.015 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:vec2h_to_f32:*": { "subcaseMS": 21.412 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:vec2h_to_i32:*": { "subcaseMS": 38.312 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:vec2h_to_u32:*": { "subcaseMS": 23.711 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:vec2i_to_vec4h:*": { "subcaseMS": 23.211 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:vec2u_to_vec4h:*": { "subcaseMS": 23.010 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:vec4h_to_vec2f:*": { "subcaseMS": 22.812 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:vec4h_to_vec2i:*": { "subcaseMS": 20.915 },
+ "webgpu:shader,execution,expression,call,builtin,bitcast:vec4h_to_vec2u:*": { "subcaseMS": 29.514 },
+ "webgpu:shader,execution,expression,call,builtin,ceil:abstract_float:*": { "subcaseMS": 23.611 },
+ "webgpu:shader,execution,expression,call,builtin,ceil:f16:*": { "subcaseMS": 29.209 },
+ "webgpu:shader,execution,expression,call,builtin,ceil:f32:*": { "subcaseMS": 11.132 },
+ "webgpu:shader,execution,expression,call,builtin,clamp:abstract_float:*": { "subcaseMS": 11800.350 },
+ "webgpu:shader,execution,expression,call,builtin,clamp:abstract_int:*": { "subcaseMS": 18.104 },
+ "webgpu:shader,execution,expression,call,builtin,clamp:f16:*": { "subcaseMS": 32.809 },
+ "webgpu:shader,execution,expression,call,builtin,clamp:f32:*": { "subcaseMS": 159.926 },
+ "webgpu:shader,execution,expression,call,builtin,clamp:i32:*": { "subcaseMS": 54.200 },
+ "webgpu:shader,execution,expression,call,builtin,clamp:u32:*": { "subcaseMS": 272.419 },
+ "webgpu:shader,execution,expression,call,builtin,cos:abstract_float:*": { "subcaseMS": 16.706 },
+ "webgpu:shader,execution,expression,call,builtin,cos:f16:*": { "subcaseMS": 23.905 },
+ "webgpu:shader,execution,expression,call,builtin,cos:f32:*": { "subcaseMS": 25.275 },
+ "webgpu:shader,execution,expression,call,builtin,cosh:abstract_float:*": { "subcaseMS": 22.909 },
+ "webgpu:shader,execution,expression,call,builtin,cosh:f16:*": { "subcaseMS": 58.475 },
+ "webgpu:shader,execution,expression,call,builtin,cosh:f32:*": { "subcaseMS": 9.694 },
+ "webgpu:shader,execution,expression,call,builtin,countLeadingZeros:i32:*": { "subcaseMS": 7.494 },
+ "webgpu:shader,execution,expression,call,builtin,countLeadingZeros:u32:*": { "subcaseMS": 8.088 },
+ "webgpu:shader,execution,expression,call,builtin,countOneBits:i32:*": { "subcaseMS": 7.400 },
+ "webgpu:shader,execution,expression,call,builtin,countOneBits:u32:*": { "subcaseMS": 8.644 },
+ "webgpu:shader,execution,expression,call,builtin,countTrailingZeros:i32:*": { "subcaseMS": 7.844 },
+ "webgpu:shader,execution,expression,call,builtin,countTrailingZeros:u32:*": { "subcaseMS": 7.851 },
+ "webgpu:shader,execution,expression,call,builtin,cross:abstract_float:*": { "subcaseMS": 3.002 },
+ "webgpu:shader,execution,expression,call,builtin,cross:f16:*": { "subcaseMS": 115.503 },
+ "webgpu:shader,execution,expression,call,builtin,cross:f32:*": { "subcaseMS": 664.926 },
+ "webgpu:shader,execution,expression,call,builtin,degrees:abstract_float:*": { "subcaseMS": 533.052 },
+ "webgpu:shader,execution,expression,call,builtin,degrees:f16:*": { "subcaseMS": 29.308 },
+ "webgpu:shader,execution,expression,call,builtin,degrees:f32:*": { "subcaseMS": 79.525 },
+ "webgpu:shader,execution,expression,call,builtin,determinant:abstract_float:*": { "subcaseMS": 15.306 },
+ "webgpu:shader,execution,expression,call,builtin,determinant:f16:*": { "subcaseMS": 37.192 },
+ "webgpu:shader,execution,expression,call,builtin,determinant:f32:*": { "subcaseMS": 10.742 },
+ "webgpu:shader,execution,expression,call,builtin,distance:abstract_float:*": { "subcaseMS": 14.503 },
+ "webgpu:shader,execution,expression,call,builtin,distance:f16:*": { "subcaseMS": 6675.626 },
+ "webgpu:shader,execution,expression,call,builtin,distance:f16_vec2:*": { "subcaseMS": 78.300 },
+ "webgpu:shader,execution,expression,call,builtin,distance:f16_vec3:*": { "subcaseMS": 47.925 },
+ "webgpu:shader,execution,expression,call,builtin,distance:f16_vec4:*": { "subcaseMS": 57.825 },
+ "webgpu:shader,execution,expression,call,builtin,distance:f32:*": { "subcaseMS": 875.325 },
+ "webgpu:shader,execution,expression,call,builtin,distance:f32_vec2:*": { "subcaseMS": 9.826 },
+ "webgpu:shader,execution,expression,call,builtin,distance:f32_vec3:*": { "subcaseMS": 10.901 },
+ "webgpu:shader,execution,expression,call,builtin,distance:f32_vec4:*": { "subcaseMS": 12.700 },
+ "webgpu:shader,execution,expression,call,builtin,dot:abstract_float:*": { "subcaseMS": 8.902 },
+ "webgpu:shader,execution,expression,call,builtin,dot:abstract_int:*": { "subcaseMS": 2.902 },
+ "webgpu:shader,execution,expression,call,builtin,dot:f16_vec2:*": { "subcaseMS": 981.225 },
+ "webgpu:shader,execution,expression,call,builtin,dot:f16_vec3:*": { "subcaseMS": 50.350 },
+ "webgpu:shader,execution,expression,call,builtin,dot:f16_vec4:*": { "subcaseMS": 52.250 },
+ "webgpu:shader,execution,expression,call,builtin,dot:f32_vec2:*": { "subcaseMS": 210.350 },
+ "webgpu:shader,execution,expression,call,builtin,dot:f32_vec3:*": { "subcaseMS": 11.176 },
+ "webgpu:shader,execution,expression,call,builtin,dot:f32_vec4:*": { "subcaseMS": 11.876 },
+ "webgpu:shader,execution,expression,call,builtin,dot:i32:*": { "subcaseMS": 3.103 },
+ "webgpu:shader,execution,expression,call,builtin,dot:u32:*": { "subcaseMS": 3.101 },
+ "webgpu:shader,execution,expression,call,builtin,dpdx:f32:*": { "subcaseMS": 22.804 },
+ "webgpu:shader,execution,expression,call,builtin,dpdxCoarse:f32:*": { "subcaseMS": 22.404 },
+ "webgpu:shader,execution,expression,call,builtin,dpdxFine:f32:*": { "subcaseMS": 17.708 },
+ "webgpu:shader,execution,expression,call,builtin,dpdy:f32:*": { "subcaseMS": 17.006 },
+ "webgpu:shader,execution,expression,call,builtin,dpdyCoarse:f32:*": { "subcaseMS": 17.909 },
+ "webgpu:shader,execution,expression,call,builtin,dpdyFine:f32:*": { "subcaseMS": 16.806 },
+ "webgpu:shader,execution,expression,call,builtin,exp2:abstract_float:*": { "subcaseMS": 22.705 },
+ "webgpu:shader,execution,expression,call,builtin,exp2:f16:*": { "subcaseMS": 79.501 },
+ "webgpu:shader,execution,expression,call,builtin,exp2:f32:*": { "subcaseMS": 12.169 },
+ "webgpu:shader,execution,expression,call,builtin,exp:abstract_float:*": { "subcaseMS": 17.210 },
+ "webgpu:shader,execution,expression,call,builtin,exp:f16:*": { "subcaseMS": 135.363 },
+ "webgpu:shader,execution,expression,call,builtin,exp:f32:*": { "subcaseMS": 12.557 },
+ "webgpu:shader,execution,expression,call,builtin,extractBits:i32:*": { "subcaseMS": 8.125 },
+ "webgpu:shader,execution,expression,call,builtin,extractBits:u32:*": { "subcaseMS": 7.838 },
+ "webgpu:shader,execution,expression,call,builtin,faceForward:abstract_float:*": { "subcaseMS": 120.702 },
+ "webgpu:shader,execution,expression,call,builtin,faceForward:f16_vec2:*": { "subcaseMS": 485.775 },
+ "webgpu:shader,execution,expression,call,builtin,faceForward:f16_vec3:*": { "subcaseMS": 560.225 },
+ "webgpu:shader,execution,expression,call,builtin,faceForward:f16_vec4:*": { "subcaseMS": 670.325 },
+ "webgpu:shader,execution,expression,call,builtin,faceForward:f32_vec2:*": { "subcaseMS": 12009.850 },
+ "webgpu:shader,execution,expression,call,builtin,faceForward:f32_vec3:*": { "subcaseMS": 440.001 },
+ "webgpu:shader,execution,expression,call,builtin,faceForward:f32_vec4:*": { "subcaseMS": 500.675 },
+ "webgpu:shader,execution,expression,call,builtin,firstLeadingBit:i32:*": { "subcaseMS": 20.613 },
+ "webgpu:shader,execution,expression,call,builtin,firstLeadingBit:u32:*": { "subcaseMS": 9.363 },
+ "webgpu:shader,execution,expression,call,builtin,firstTrailingBit:i32:*": { "subcaseMS": 8.132 },
+ "webgpu:shader,execution,expression,call,builtin,firstTrailingBit:u32:*": { "subcaseMS": 9.047 },
+ "webgpu:shader,execution,expression,call,builtin,floor:abstract_float:*": { "subcaseMS": 34.108 },
+ "webgpu:shader,execution,expression,call,builtin,floor:f16:*": { "subcaseMS": 30.708 },
+ "webgpu:shader,execution,expression,call,builtin,floor:f32:*": { "subcaseMS": 10.119 },
+ "webgpu:shader,execution,expression,call,builtin,fma:abstract_float:*": { "subcaseMS": 18.208 },
+ "webgpu:shader,execution,expression,call,builtin,fma:f16:*": { "subcaseMS": 485.857 },
+ "webgpu:shader,execution,expression,call,builtin,fma:f32:*": { "subcaseMS": 80.388 },
+ "webgpu:shader,execution,expression,call,builtin,fract:abstract_float:*": { "subcaseMS": 17.408 },
+ "webgpu:shader,execution,expression,call,builtin,fract:f16:*": { "subcaseMS": 46.500 },
+ "webgpu:shader,execution,expression,call,builtin,fract:f32:*": { "subcaseMS": 12.269 },
+ "webgpu:shader,execution,expression,call,builtin,frexp:f16_exp:*": { "subcaseMS": 8.503 },
+ "webgpu:shader,execution,expression,call,builtin,frexp:f16_fract:*": { "subcaseMS": 17.900 },
+ "webgpu:shader,execution,expression,call,builtin,frexp:f16_vec2_exp:*": { "subcaseMS": 1.801 },
+ "webgpu:shader,execution,expression,call,builtin,frexp:f16_vec2_fract:*": { "subcaseMS": 2.802 },
+ "webgpu:shader,execution,expression,call,builtin,frexp:f16_vec3_exp:*": { "subcaseMS": 1.701 },
+ "webgpu:shader,execution,expression,call,builtin,frexp:f16_vec3_fract:*": { "subcaseMS": 1.702 },
+ "webgpu:shader,execution,expression,call,builtin,frexp:f16_vec4_exp:*": { "subcaseMS": 1.603 },
+ "webgpu:shader,execution,expression,call,builtin,frexp:f16_vec4_fract:*": { "subcaseMS": 1.503 },
+ "webgpu:shader,execution,expression,call,builtin,frexp:f32_exp:*": { "subcaseMS": 8.501 },
+ "webgpu:shader,execution,expression,call,builtin,frexp:f32_fract:*": { "subcaseMS": 27.475 },
+ "webgpu:shader,execution,expression,call,builtin,frexp:f32_vec2_exp:*": { "subcaseMS": 8.300 },
+ "webgpu:shader,execution,expression,call,builtin,frexp:f32_vec2_fract:*": { "subcaseMS": 8.876 },
+ "webgpu:shader,execution,expression,call,builtin,frexp:f32_vec3_exp:*": { "subcaseMS": 8.975 },
+ "webgpu:shader,execution,expression,call,builtin,frexp:f32_vec3_fract:*": { "subcaseMS": 9.700 },
+ "webgpu:shader,execution,expression,call,builtin,frexp:f32_vec4_exp:*": { "subcaseMS": 10.250 },
+ "webgpu:shader,execution,expression,call,builtin,frexp:f32_vec4_fract:*": { "subcaseMS": 11.800 },
+ "webgpu:shader,execution,expression,call,builtin,fwidth:f32:*": { "subcaseMS": 29.807 },
+ "webgpu:shader,execution,expression,call,builtin,fwidthCoarse:f32:*": { "subcaseMS": 17.110 },
+ "webgpu:shader,execution,expression,call,builtin,fwidthFine:f32:*": { "subcaseMS": 16.906 },
+ "webgpu:shader,execution,expression,call,builtin,insertBits:integer:*": { "subcaseMS": 9.569 },
+ "webgpu:shader,execution,expression,call,builtin,inversesqrt:abstract_float:*": { "subcaseMS": 24.310 },
+ "webgpu:shader,execution,expression,call,builtin,inversesqrt:f16:*": { "subcaseMS": 21.411 },
+ "webgpu:shader,execution,expression,call,builtin,inversesqrt:f32:*": { "subcaseMS": 50.125 },
+ "webgpu:shader,execution,expression,call,builtin,ldexp:abstract_float:*": { "subcaseMS": 142.805 },
+ "webgpu:shader,execution,expression,call,builtin,ldexp:f16:*": { "subcaseMS": 271.038 },
+ "webgpu:shader,execution,expression,call,builtin,ldexp:f32:*": { "subcaseMS": 161.250 },
+ "webgpu:shader,execution,expression,call,builtin,length:abstract_float:*": { "subcaseMS": 31.303 },
+ "webgpu:shader,execution,expression,call,builtin,length:f16:*": { "subcaseMS": 490.450 },
+ "webgpu:shader,execution,expression,call,builtin,length:f16_vec2:*": { "subcaseMS": 33.551 },
+ "webgpu:shader,execution,expression,call,builtin,length:f16_vec3:*": { "subcaseMS": 79.301 },
+ "webgpu:shader,execution,expression,call,builtin,length:f16_vec4:*": { "subcaseMS": 156.826 },
+ "webgpu:shader,execution,expression,call,builtin,length:f32:*": { "subcaseMS": 107.275 },
+ "webgpu:shader,execution,expression,call,builtin,length:f32_vec2:*": { "subcaseMS": 9.751 },
+ "webgpu:shader,execution,expression,call,builtin,length:f32_vec3:*": { "subcaseMS": 10.825 },
+ "webgpu:shader,execution,expression,call,builtin,length:f32_vec4:*": { "subcaseMS": 9.476 },
+ "webgpu:shader,execution,expression,call,builtin,log2:abstract_float:*": { "subcaseMS": 23.607 },
+ "webgpu:shader,execution,expression,call,builtin,log2:f16:*": { "subcaseMS": 9.404 },
+ "webgpu:shader,execution,expression,call,builtin,log2:f32:*": { "subcaseMS": 27.838 },
+ "webgpu:shader,execution,expression,call,builtin,log:abstract_float:*": { "subcaseMS": 17.911 },
+ "webgpu:shader,execution,expression,call,builtin,log:f16:*": { "subcaseMS": 8.603 },
+ "webgpu:shader,execution,expression,call,builtin,log:f32:*": { "subcaseMS": 26.725 },
+ "webgpu:shader,execution,expression,call,builtin,max:abstract_float:*": { "subcaseMS": 2810.001 },
+ "webgpu:shader,execution,expression,call,builtin,max:abstract_int:*": { "subcaseMS": 33.508 },
+ "webgpu:shader,execution,expression,call,builtin,max:f16:*": { "subcaseMS": 37.404 },
+ "webgpu:shader,execution,expression,call,builtin,max:f32:*": { "subcaseMS": 300.619 },
+ "webgpu:shader,execution,expression,call,builtin,max:i32:*": { "subcaseMS": 7.350 },
+ "webgpu:shader,execution,expression,call,builtin,max:u32:*": { "subcaseMS": 6.700 },
+ "webgpu:shader,execution,expression,call,builtin,min:abstract_float:*": { "subcaseMS": 3054.101 },
+ "webgpu:shader,execution,expression,call,builtin,min:abstract_int:*": { "subcaseMS": 19.806 },
+ "webgpu:shader,execution,expression,call,builtin,min:f16:*": { "subcaseMS": 8.006 },
+ "webgpu:shader,execution,expression,call,builtin,min:f32:*": { "subcaseMS": 298.463 },
+ "webgpu:shader,execution,expression,call,builtin,min:i32:*": { "subcaseMS": 7.825 },
+ "webgpu:shader,execution,expression,call,builtin,min:u32:*": { "subcaseMS": 6.932 },
+ "webgpu:shader,execution,expression,call,builtin,mix:abstract_float_matching:*": { "subcaseMS": 215.206 },
+ "webgpu:shader,execution,expression,call,builtin,mix:abstract_float_nonmatching_vec2:*": { "subcaseMS": 14.601 },
+ "webgpu:shader,execution,expression,call,builtin,mix:abstract_float_nonmatching_vec3:*": { "subcaseMS": 18.302 },
+ "webgpu:shader,execution,expression,call,builtin,mix:abstract_float_nonmatching_vec4:*": { "subcaseMS": 12.602 },
+ "webgpu:shader,execution,expression,call,builtin,mix:f16_matching:*": { "subcaseMS": 321.700 },
+ "webgpu:shader,execution,expression,call,builtin,mix:f16_nonmatching_vec2:*": { "subcaseMS": 653.851 },
+ "webgpu:shader,execution,expression,call,builtin,mix:f16_nonmatching_vec3:*": { "subcaseMS": 832.076 },
+ "webgpu:shader,execution,expression,call,builtin,mix:f16_nonmatching_vec4:*": { "subcaseMS": 1021.126 },
+ "webgpu:shader,execution,expression,call,builtin,mix:f32_matching:*": { "subcaseMS": 4306.763 },
+ "webgpu:shader,execution,expression,call,builtin,mix:f32_nonmatching_vec2:*": { "subcaseMS": 503.551 },
+ "webgpu:shader,execution,expression,call,builtin,mix:f32_nonmatching_vec3:*": { "subcaseMS": 590.326 },
+ "webgpu:shader,execution,expression,call,builtin,mix:f32_nonmatching_vec4:*": { "subcaseMS": 679.901 },
+ "webgpu:shader,execution,expression,call,builtin,modf:abstract_fract:*": { "subcaseMS": 327.601 },
+ "webgpu:shader,execution,expression,call,builtin,modf:abstract_vec2_fract:*": { "subcaseMS": 435.400 },
+ "webgpu:shader,execution,expression,call,builtin,modf:abstract_vec2_whole:*": { "subcaseMS": 434.300 },
+ "webgpu:shader,execution,expression,call,builtin,modf:abstract_vec3_fract:*": { "subcaseMS": 942.701 },
+ "webgpu:shader,execution,expression,call,builtin,modf:abstract_vec3_whole:*": { "subcaseMS": 932.000 },
+ "webgpu:shader,execution,expression,call,builtin,modf:abstract_vec4_fract:*": { "subcaseMS": 1645.901 },
+ "webgpu:shader,execution,expression,call,builtin,modf:abstract_vec4_whole:*": { "subcaseMS": 1812.500 },
+ "webgpu:shader,execution,expression,call,builtin,modf:abstract_whole:*": { "subcaseMS": 296.601 },
+ "webgpu:shader,execution,expression,call,builtin,modf:f16_fract:*": { "subcaseMS": 11.801 },
+ "webgpu:shader,execution,expression,call,builtin,modf:f16_vec2_fract:*": { "subcaseMS": 9.200 },
+ "webgpu:shader,execution,expression,call,builtin,modf:f16_vec2_whole:*": { "subcaseMS": 8.404 },
+ "webgpu:shader,execution,expression,call,builtin,modf:f16_vec3_fract:*": { "subcaseMS": 3.102 },
+ "webgpu:shader,execution,expression,call,builtin,modf:f16_vec3_whole:*": { "subcaseMS": 7.202 },
+ "webgpu:shader,execution,expression,call,builtin,modf:f16_vec4_fract:*": { "subcaseMS": 8.503 },
+ "webgpu:shader,execution,expression,call,builtin,modf:f16_vec4_whole:*": { "subcaseMS": 3.001 },
+ "webgpu:shader,execution,expression,call,builtin,modf:f16_whole:*": { "subcaseMS": 17.103 },
+ "webgpu:shader,execution,expression,call,builtin,modf:f32_fract:*": { "subcaseMS": 319.500 },
+ "webgpu:shader,execution,expression,call,builtin,modf:f32_vec2_fract:*": { "subcaseMS": 59.401 },
+ "webgpu:shader,execution,expression,call,builtin,modf:f32_vec2_whole:*": { "subcaseMS": 86.501 },
+ "webgpu:shader,execution,expression,call,builtin,modf:f32_vec3_fract:*": { "subcaseMS": 96.550 },
+ "webgpu:shader,execution,expression,call,builtin,modf:f32_vec3_whole:*": { "subcaseMS": 94.475 },
+ "webgpu:shader,execution,expression,call,builtin,modf:f32_vec4_fract:*": { "subcaseMS": 147.876 },
+ "webgpu:shader,execution,expression,call,builtin,modf:f32_vec4_whole:*": { "subcaseMS": 134.576 },
+ "webgpu:shader,execution,expression,call,builtin,modf:f32_whole:*": { "subcaseMS": 94.025 },
+ "webgpu:shader,execution,expression,call,builtin,normalize:abstract_float:*": { "subcaseMS": 28.508 },
+ "webgpu:shader,execution,expression,call,builtin,normalize:f16_vec2:*": { "subcaseMS": 635.100 },
+ "webgpu:shader,execution,expression,call,builtin,normalize:f16_vec3:*": { "subcaseMS": 112.501 },
+ "webgpu:shader,execution,expression,call,builtin,normalize:f16_vec4:*": { "subcaseMS": 210.526 },
+ "webgpu:shader,execution,expression,call,builtin,normalize:f32_vec2:*": { "subcaseMS": 65.975 },
+ "webgpu:shader,execution,expression,call,builtin,normalize:f32_vec3:*": { "subcaseMS": 12.825 },
+ "webgpu:shader,execution,expression,call,builtin,normalize:f32_vec4:*": { "subcaseMS": 14.500 },
+ "webgpu:shader,execution,expression,call,builtin,pack2x16float:pack:*": { "subcaseMS": 284.150 },
+ "webgpu:shader,execution,expression,call,builtin,pack2x16snorm:pack:*": { "subcaseMS": 9.925 },
+ "webgpu:shader,execution,expression,call,builtin,pack2x16unorm:pack:*": { "subcaseMS": 9.525 },
+ "webgpu:shader,execution,expression,call,builtin,pack4x8snorm:pack:*": { "subcaseMS": 14.751 },
+ "webgpu:shader,execution,expression,call,builtin,pack4x8unorm:pack:*": { "subcaseMS": 14.575 },
+ "webgpu:shader,execution,expression,call,builtin,pow:abstract_float:*": { "subcaseMS": 23.106 },
+ "webgpu:shader,execution,expression,call,builtin,pow:f16:*": { "subcaseMS": 816.063 },
+ "webgpu:shader,execution,expression,call,builtin,pow:f32:*": { "subcaseMS": 151.269 },
+ "webgpu:shader,execution,expression,call,builtin,quantizeToF16:f32:*": { "subcaseMS": 11.063 },
+ "webgpu:shader,execution,expression,call,builtin,radians:abstract_float:*": { "subcaseMS": 492.827 },
+ "webgpu:shader,execution,expression,call,builtin,radians:f16:*": { "subcaseMS": 18.707 },
+ "webgpu:shader,execution,expression,call,builtin,radians:f32:*": { "subcaseMS": 74.432 },
+ "webgpu:shader,execution,expression,call,builtin,reflect:abstract_float:*": { "subcaseMS": 47.108 },
+ "webgpu:shader,execution,expression,call,builtin,reflect:f16_vec2:*": { "subcaseMS": 76.975 },
+ "webgpu:shader,execution,expression,call,builtin,reflect:f16_vec3:*": { "subcaseMS": 69.451 },
+ "webgpu:shader,execution,expression,call,builtin,reflect:f16_vec4:*": { "subcaseMS": 79.826 },
+ "webgpu:shader,execution,expression,call,builtin,reflect:f32_vec2:*": { "subcaseMS": 1182.226 },
+ "webgpu:shader,execution,expression,call,builtin,reflect:f32_vec3:*": { "subcaseMS": 56.326 },
+ "webgpu:shader,execution,expression,call,builtin,reflect:f32_vec4:*": { "subcaseMS": 65.250 },
+ "webgpu:shader,execution,expression,call,builtin,refract:abstract_float:*": { "subcaseMS": 114.404 },
+ "webgpu:shader,execution,expression,call,builtin,refract:f16_vec2:*": { "subcaseMS": 536.225 },
+ "webgpu:shader,execution,expression,call,builtin,refract:f16_vec3:*": { "subcaseMS": 627.450 },
+ "webgpu:shader,execution,expression,call,builtin,refract:f16_vec4:*": { "subcaseMS": 699.801 },
+ "webgpu:shader,execution,expression,call,builtin,refract:f32_vec2:*": { "subcaseMS": 33934.000 },
+ "webgpu:shader,execution,expression,call,builtin,refract:f32_vec3:*": { "subcaseMS": 503.976 },
+ "webgpu:shader,execution,expression,call,builtin,refract:f32_vec4:*": { "subcaseMS": 610.150 },
+ "webgpu:shader,execution,expression,call,builtin,reverseBits:i32:*": { "subcaseMS": 9.594 },
+ "webgpu:shader,execution,expression,call,builtin,reverseBits:u32:*": { "subcaseMS": 7.969 },
+ "webgpu:shader,execution,expression,call,builtin,round:abstract_float:*": { "subcaseMS": 19.408 },
+ "webgpu:shader,execution,expression,call,builtin,round:f16:*": { "subcaseMS": 30.509 },
+ "webgpu:shader,execution,expression,call,builtin,round:f32:*": { "subcaseMS": 12.407 },
+ "webgpu:shader,execution,expression,call,builtin,saturate:abstract_float:*": { "subcaseMS": 527.425 },
+ "webgpu:shader,execution,expression,call,builtin,saturate:f16:*": { "subcaseMS": 23.407 },
+ "webgpu:shader,execution,expression,call,builtin,saturate:f32:*": { "subcaseMS": 116.275 },
+ "webgpu:shader,execution,expression,call,builtin,select:scalar:*": { "subcaseMS": 6.882 },
+ "webgpu:shader,execution,expression,call,builtin,select:vector:*": { "subcaseMS": 7.096 },
+ "webgpu:shader,execution,expression,call,builtin,sign:abstract_float:*": { "subcaseMS": 412.925 },
+ "webgpu:shader,execution,expression,call,builtin,sign:abstract_int:*": { "subcaseMS": 25.806 },
+ "webgpu:shader,execution,expression,call,builtin,sign:f16:*": { "subcaseMS": 25.103 },
+ "webgpu:shader,execution,expression,call,builtin,sign:f32:*": { "subcaseMS": 8.188 },
+ "webgpu:shader,execution,expression,call,builtin,sign:i32:*": { "subcaseMS": 10.225 },
+ "webgpu:shader,execution,expression,call,builtin,sin:abstract_float:*": { "subcaseMS": 19.206 },
+ "webgpu:shader,execution,expression,call,builtin,sin:f16:*": { "subcaseMS": 8.707 },
+ "webgpu:shader,execution,expression,call,builtin,sin:f32:*": { "subcaseMS": 26.826 },
+ "webgpu:shader,execution,expression,call,builtin,sinh:abstract_float:*": { "subcaseMS": 22.009 },
+ "webgpu:shader,execution,expression,call,builtin,sinh:f16:*": { "subcaseMS": 58.288 },
+ "webgpu:shader,execution,expression,call,builtin,sinh:f32:*": { "subcaseMS": 11.038 },
+ "webgpu:shader,execution,expression,call,builtin,smoothstep:abstract_float:*": { "subcaseMS": 23.807 },
+ "webgpu:shader,execution,expression,call,builtin,smoothstep:f16:*": { "subcaseMS": 616.457 },
+ "webgpu:shader,execution,expression,call,builtin,smoothstep:f32:*": { "subcaseMS": 88.063 },
+ "webgpu:shader,execution,expression,call,builtin,sqrt:abstract_float:*": { "subcaseMS": 19.004 },
+ "webgpu:shader,execution,expression,call,builtin,sqrt:f16:*": { "subcaseMS": 22.908 },
+ "webgpu:shader,execution,expression,call,builtin,sqrt:f32:*": { "subcaseMS": 10.813 },
+ "webgpu:shader,execution,expression,call,builtin,step:abstract_float:*": { "subcaseMS": 19.104 },
+ "webgpu:shader,execution,expression,call,builtin,step:f16:*": { "subcaseMS": 32.508 },
+ "webgpu:shader,execution,expression,call,builtin,step:f32:*": { "subcaseMS": 291.363 },
+ "webgpu:shader,execution,expression,call,builtin,storageBarrier:barrier:*": { "subcaseMS": 0.801 },
+ "webgpu:shader,execution,expression,call,builtin,storageBarrier:stage:*": { "subcaseMS": 2.402 },
+ "webgpu:shader,execution,expression,call,builtin,tan:abstract_float:*": { "subcaseMS": 31.007 },
+ "webgpu:shader,execution,expression,call,builtin,tan:f16:*": { "subcaseMS": 116.157 },
+ "webgpu:shader,execution,expression,call,builtin,tan:f32:*": { "subcaseMS": 13.532 },
+ "webgpu:shader,execution,expression,call,builtin,tanh:abstract_float:*": { "subcaseMS": 18.406 },
+ "webgpu:shader,execution,expression,call,builtin,tanh:f16:*": { "subcaseMS": 75.982 },
+ "webgpu:shader,execution,expression,call,builtin,tanh:f32:*": { "subcaseMS": 32.719 },
+ "webgpu:shader,execution,expression,call,builtin,textureDimension:depth:*": { "subcaseMS": 20.801 },
+ "webgpu:shader,execution,expression,call,builtin,textureDimension:external:*": { "subcaseMS": 1.700 },
+ "webgpu:shader,execution,expression,call,builtin,textureDimension:sampled:*": { "subcaseMS": 16.506 },
+ "webgpu:shader,execution,expression,call,builtin,textureDimension:storage:*": { "subcaseMS": 25.907 },
+ "webgpu:shader,execution,expression,call,builtin,textureGather:depth_2d_coords:*": { "subcaseMS": 11.601 },
+ "webgpu:shader,execution,expression,call,builtin,textureGather:depth_3d_coords:*": { "subcaseMS": 2.200 },
+ "webgpu:shader,execution,expression,call,builtin,textureGather:depth_array_2d_coords:*": { "subcaseMS": 23.801 },
+ "webgpu:shader,execution,expression,call,builtin,textureGather:depth_array_3d_coords:*": { "subcaseMS": 10.200 },
+ "webgpu:shader,execution,expression,call,builtin,textureGather:sampled_2d_coords:*": { "subcaseMS": 343.301 },
+ "webgpu:shader,execution,expression,call,builtin,textureGather:sampled_3d_coords:*": { "subcaseMS": 63.200 },
+ "webgpu:shader,execution,expression,call,builtin,textureGather:sampled_array_2d_coords:*": { "subcaseMS": 304.401 },
+ "webgpu:shader,execution,expression,call,builtin,textureGather:sampled_array_3d_coords:*": { "subcaseMS": 60.700 },
+ "webgpu:shader,execution,expression,call,builtin,textureGatherCompare:array_2d_coords:*": { "subcaseMS": 291.301 },
+ "webgpu:shader,execution,expression,call,builtin,textureGatherCompare:array_3d_coords:*": { "subcaseMS": 191.101 },
+ "webgpu:shader,execution,expression,call,builtin,textureGatherCompare:sampled_array_2d_coords:*": { "subcaseMS": 57.600 },
+ "webgpu:shader,execution,expression,call,builtin,textureGatherCompare:sampled_array_3d_coords:*": { "subcaseMS": 10.101 },
+ "webgpu:shader,execution,expression,call,builtin,textureLoad:arrayed:*": { "subcaseMS": 30.501 },
+ "webgpu:shader,execution,expression,call,builtin,textureLoad:depth:*": { "subcaseMS": 3.200 },
+ "webgpu:shader,execution,expression,call,builtin,textureLoad:external:*": { "subcaseMS": 1.401 },
+ "webgpu:shader,execution,expression,call,builtin,textureLoad:multisampled:*": { "subcaseMS": 11.601 },
+ "webgpu:shader,execution,expression,call,builtin,textureLoad:sampled_1d:*": { "subcaseMS": 83.312 },
+ "webgpu:shader,execution,expression,call,builtin,textureLoad:sampled_2d:*": { "subcaseMS": 96.737 },
+ "webgpu:shader,execution,expression,call,builtin,textureLoad:sampled_3d:*": { "subcaseMS": 158.534 },
+ "webgpu:shader,execution,expression,call,builtin,textureNumLayers:arrayed:*": { "subcaseMS": 8.102 },
+ "webgpu:shader,execution,expression,call,builtin,textureNumLayers:sampled:*": { "subcaseMS": 2.101 },
+ "webgpu:shader,execution,expression,call,builtin,textureNumLayers:storage:*": { "subcaseMS": 8.000 },
+ "webgpu:shader,execution,expression,call,builtin,textureNumLevels:depth:*": { "subcaseMS": 3.801 },
+ "webgpu:shader,execution,expression,call,builtin,textureNumLevels:sampled:*": { "subcaseMS": 6.201 },
+ "webgpu:shader,execution,expression,call,builtin,textureNumSamples:depth:*": { "subcaseMS": 1.101 },
+ "webgpu:shader,execution,expression,call,builtin,textureNumSamples:sampled:*": { "subcaseMS": 6.600 },
+ "webgpu:shader,execution,expression,call,builtin,textureSample:control_flow:*": { "subcaseMS": 2.801 },
+ "webgpu:shader,execution,expression,call,builtin,textureSample:depth_2d_coords:*": { "subcaseMS": 12.301 },
+ "webgpu:shader,execution,expression,call,builtin,textureSample:depth_3d_coords:*": { "subcaseMS": 2.101 },
+ "webgpu:shader,execution,expression,call,builtin,textureSample:depth_array_2d_coords:*": { "subcaseMS": 92.601 },
+ "webgpu:shader,execution,expression,call,builtin,textureSample:depth_array_3d_coords:*": { "subcaseMS": 20.301 },
+ "webgpu:shader,execution,expression,call,builtin,textureSample:sampled_1d_coords:*": { "subcaseMS": 1.200 },
+ "webgpu:shader,execution,expression,call,builtin,textureSample:sampled_2d_coords:*": { "subcaseMS": 12.500 },
+ "webgpu:shader,execution,expression,call,builtin,textureSample:sampled_3d_coords:*": { "subcaseMS": 36.002 },
+ "webgpu:shader,execution,expression,call,builtin,textureSample:sampled_array_2d_coords:*": { "subcaseMS": 92.500 },
+ "webgpu:shader,execution,expression,call,builtin,textureSample:sampled_array_3d_coords:*": { "subcaseMS": 20.200 },
+ "webgpu:shader,execution,expression,call,builtin,textureSample:stage:*": { "subcaseMS": 3.000 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleBias:arrayed_2d_coords:*": { "subcaseMS": 585.100 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleBias:arrayed_3d_coords:*": { "subcaseMS": 121.600 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleBias:control_flow:*": { "subcaseMS": 2.502 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleBias:sampled_2d_coords:*": { "subcaseMS": 48.601 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleBias:sampled_3d_coords:*": { "subcaseMS": 133.600 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleBias:stage:*": { "subcaseMS": 2.803 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleCompare:2d_coords:*": { "subcaseMS": 24.000 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleCompare:3d_coords:*": { "subcaseMS": 9.000 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleCompare:arrayed_2d_coords:*": { "subcaseMS": 295.601 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleCompare:arrayed_3d_coords:*": { "subcaseMS": 60.301 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleCompare:control_flow:*": { "subcaseMS": 2.702 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleCompare:stage:*": { "subcaseMS": 7.701 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleCompareLevel:2d_coords:*": { "subcaseMS": 30.401 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleCompareLevel:3d_coords:*": { "subcaseMS": 10.301 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleCompareLevel:arrayed_2d_coords:*": { "subcaseMS": 705.100 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleCompareLevel:arrayed_3d_coords:*": { "subcaseMS": 622.700 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleCompareLevel:control_flow:*": { "subcaseMS": 2.202 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleCompareLevel:stage:*": { "subcaseMS": 7.901 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleGrad:sampled_2d_coords:*": { "subcaseMS": 82.401 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleGrad:sampled_3d_coords:*": { "subcaseMS": 309.101 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleGrad:sampled_array_2d_coords:*": { "subcaseMS": 352.900 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleGrad:sampled_array_3d_coords:*": { "subcaseMS": 332.000 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleLevel:depth_2d_coords:*": { "subcaseMS": 545.401 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleLevel:depth_3d_coords:*": { "subcaseMS": 183.000 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleLevel:depth_array_2d_coords:*": { "subcaseMS": 547.500 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleLevel:sampled_2d_coords:*": { "subcaseMS": 35.601 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleLevel:sampled_3d_coords:*": { "subcaseMS": 118.901 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleLevel:sampled_array_2d_coords:*": { "subcaseMS": 822.400 },
+ "webgpu:shader,execution,expression,call,builtin,textureSampleLevel:sampled_array_3d_coords:*": { "subcaseMS": 817.200 },
+ "webgpu:shader,execution,expression,call,builtin,textureStore:store_1d_coords:*": { "subcaseMS": 19.907 },
+ "webgpu:shader,execution,expression,call,builtin,textureStore:store_2d_coords:*": { "subcaseMS": 28.809 },
+ "webgpu:shader,execution,expression,call,builtin,textureStore:store_3d_coords:*": { "subcaseMS": 37.206 },
+ "webgpu:shader,execution,expression,call,builtin,textureStore:store_array_2d_coords:*": { "subcaseMS": 98.804 },
+ "webgpu:shader,execution,expression,call,builtin,transpose:abstract_float:*": { "subcaseMS": 755.012 },
+ "webgpu:shader,execution,expression,call,builtin,transpose:f16:*": { "subcaseMS": 33.311 },
+ "webgpu:shader,execution,expression,call,builtin,transpose:f32:*": { "subcaseMS": 75.887 },
+ "webgpu:shader,execution,expression,call,builtin,trunc:abstract_float:*": { "subcaseMS": 455.726 },
+ "webgpu:shader,execution,expression,call,builtin,trunc:f16:*": { "subcaseMS": 120.204 },
+ "webgpu:shader,execution,expression,call,builtin,trunc:f32:*": { "subcaseMS": 48.544 },
+ "webgpu:shader,execution,expression,call,builtin,unpack2x16float:unpack:*": { "subcaseMS": 11.651 },
+ "webgpu:shader,execution,expression,call,builtin,unpack2x16snorm:unpack:*": { "subcaseMS": 9.275 },
+ "webgpu:shader,execution,expression,call,builtin,unpack2x16unorm:unpack:*": { "subcaseMS": 8.701 },
+ "webgpu:shader,execution,expression,call,builtin,unpack4x8snorm:unpack:*": { "subcaseMS": 12.275 },
+ "webgpu:shader,execution,expression,call,builtin,unpack4x8unorm:unpack:*": { "subcaseMS": 11.776 },
+ "webgpu:shader,execution,expression,call,builtin,workgroupBarrier:barrier:*": { "subcaseMS": 0.701 },
+ "webgpu:shader,execution,expression,call,builtin,workgroupBarrier:stage:*": { "subcaseMS": 1.801 },
+ "webgpu:shader,execution,expression,unary,af_arithmetic:negation:*": { "subcaseMS": 2165.950 },
+ "webgpu:shader,execution,expression,unary,af_assignment:abstract:*": { "subcaseMS": 788.400 },
+ "webgpu:shader,execution,expression,unary,af_assignment:f16:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,execution,expression,unary,af_assignment:f32:*": { "subcaseMS": 42.000 },
+ "webgpu:shader,execution,expression,unary,bool_conversion:bool:*": { "subcaseMS": 8.357 },
+ "webgpu:shader,execution,expression,unary,bool_conversion:f16:*": { "subcaseMS": 44.794 },
+ "webgpu:shader,execution,expression,unary,bool_conversion:f32:*": { "subcaseMS": 41.276 },
+ "webgpu:shader,execution,expression,unary,bool_conversion:i32:*": { "subcaseMS": 8.219 },
+ "webgpu:shader,execution,expression,unary,bool_conversion:u32:*": { "subcaseMS": 7.401 },
+ "webgpu:shader,execution,expression,unary,bool_logical:negation:*": { "subcaseMS": 6.413 },
+ "webgpu:shader,execution,expression,unary,f16_arithmetic:negation:*": { "subcaseMS": 117.604 },
+ "webgpu:shader,execution,expression,unary,f16_conversion:bool:*": { "subcaseMS": 34.694 },
+ "webgpu:shader,execution,expression,unary,f16_conversion:f16:*": { "subcaseMS": 36.013 },
+ "webgpu:shader,execution,expression,unary,f16_conversion:f16_mat:*": { "subcaseMS": 47.109 },
+ "webgpu:shader,execution,expression,unary,f16_conversion:f32:*": { "subcaseMS": 30.900 },
+ "webgpu:shader,execution,expression,unary,f16_conversion:f32_mat:*": { "subcaseMS": 37.820 },
+ "webgpu:shader,execution,expression,unary,f16_conversion:i32:*": { "subcaseMS": 24.557 },
+ "webgpu:shader,execution,expression,unary,f16_conversion:u32:*": { "subcaseMS": 84.500 },
+ "webgpu:shader,execution,expression,unary,f32_arithmetic:negation:*": { "subcaseMS": 16.400 },
+ "webgpu:shader,execution,expression,unary,f32_conversion:bool:*": { "subcaseMS": 7.182 },
+ "webgpu:shader,execution,expression,unary,f32_conversion:f16:*": { "subcaseMS": 107.463 },
+ "webgpu:shader,execution,expression,unary,f32_conversion:f16_mat:*": { "subcaseMS": 60.170 },
+ "webgpu:shader,execution,expression,unary,f32_conversion:f32:*": { "subcaseMS": 7.538 },
+ "webgpu:shader,execution,expression,unary,f32_conversion:f32_mat:*": { "subcaseMS": 7.759 },
+ "webgpu:shader,execution,expression,unary,f32_conversion:i32:*": { "subcaseMS": 7.701 },
+ "webgpu:shader,execution,expression,unary,f32_conversion:u32:*": { "subcaseMS": 7.132 },
+ "webgpu:shader,execution,expression,unary,i32_arithmetic:negation:*": { "subcaseMS": 7.244 },
+ "webgpu:shader,execution,expression,unary,i32_complement:i32_complement:*": { "subcaseMS": 9.075 },
+ "webgpu:shader,execution,expression,unary,i32_conversion:bool:*": { "subcaseMS": 6.457 },
+ "webgpu:shader,execution,expression,unary,i32_conversion:f16:*": { "subcaseMS": 44.363 },
+ "webgpu:shader,execution,expression,unary,i32_conversion:f32:*": { "subcaseMS": 8.275 },
+ "webgpu:shader,execution,expression,unary,i32_conversion:i32:*": { "subcaseMS": 7.707 },
+ "webgpu:shader,execution,expression,unary,i32_conversion:u32:*": { "subcaseMS": 6.969 },
+ "webgpu:shader,execution,expression,unary,u32_complement:u32_complement:*": { "subcaseMS": 7.632 },
+ "webgpu:shader,execution,expression,unary,u32_conversion:abstract_int:*": { "subcaseMS": 20.406 },
+ "webgpu:shader,execution,expression,unary,u32_conversion:bool:*": { "subcaseMS": 7.713 },
+ "webgpu:shader,execution,expression,unary,u32_conversion:f16:*": { "subcaseMS": 34.251 },
+ "webgpu:shader,execution,expression,unary,u32_conversion:f32:*": { "subcaseMS": 7.913 },
+ "webgpu:shader,execution,expression,unary,u32_conversion:i32:*": { "subcaseMS": 8.319 },
+ "webgpu:shader,execution,expression,unary,u32_conversion:u32:*": { "subcaseMS": 7.057 },
+ "webgpu:shader,execution,float_parse:valid:*": { "subcaseMS": 6.801 },
+ "webgpu:shader,execution,flow_control,call:call_basic:*": { "subcaseMS": 4.901 },
+ "webgpu:shader,execution,flow_control,call:call_nested:*": { "subcaseMS": 5.500 },
+ "webgpu:shader,execution,flow_control,call:call_repeated:*": { "subcaseMS": 10.851 },
+ "webgpu:shader,execution,flow_control,complex:continue_in_switch_in_for_loop:*": { "subcaseMS": 13.650 },
+ "webgpu:shader,execution,flow_control,eval_order:1d_array_assignment:*": { "subcaseMS": 17.500 },
+ "webgpu:shader,execution,flow_control,eval_order:1d_array_compound_assignment:*": { "subcaseMS": 5.400 },
+ "webgpu:shader,execution,flow_control,eval_order:1d_array_constructor:*": { "subcaseMS": 5.600 },
+ "webgpu:shader,execution,flow_control,eval_order:1d_array_increment:*": { "subcaseMS": 5.500 },
+ "webgpu:shader,execution,flow_control,eval_order:2d_array_assignment:*": { "subcaseMS": 11.000 },
+ "webgpu:shader,execution,flow_control,eval_order:2d_array_compound_assignment:*": { "subcaseMS": 21.601 },
+ "webgpu:shader,execution,flow_control,eval_order:2d_array_constructor:*": { "subcaseMS": 11.101 },
+ "webgpu:shader,execution,flow_control,eval_order:2d_array_increment:*": { "subcaseMS": 10.601 },
+ "webgpu:shader,execution,flow_control,eval_order:array_index:*": { "subcaseMS": 5.700 },
+ "webgpu:shader,execution,flow_control,eval_order:array_index_lhs_assignment:*": { "subcaseMS": 11.301 },
+ "webgpu:shader,execution,flow_control,eval_order:array_index_lhs_member_assignment:*": { "subcaseMS": 17.101 },
+ "webgpu:shader,execution,flow_control,eval_order:array_index_via_ptrs:*": { "subcaseMS": 10.200 },
+ "webgpu:shader,execution,flow_control,eval_order:array_index_via_struct_members:*": { "subcaseMS": 6.000 },
+ "webgpu:shader,execution,flow_control,eval_order:binary_op:*": { "subcaseMS": 5.900 },
+ "webgpu:shader,execution,flow_control,eval_order:binary_op_chain:*": { "subcaseMS": 21.000 },
+ "webgpu:shader,execution,flow_control,eval_order:binary_op_chain_C_C_C_R:*": { "subcaseMS": 22.400 },
+ "webgpu:shader,execution,flow_control,eval_order:binary_op_chain_C_C_R_C:*": { "subcaseMS": 6.601 },
+ "webgpu:shader,execution,flow_control,eval_order:binary_op_chain_C_R_C_C:*": { "subcaseMS": 5.101 },
+ "webgpu:shader,execution,flow_control,eval_order:binary_op_chain_R_C_C_C:*": { "subcaseMS": 6.000 },
+ "webgpu:shader,execution,flow_control,eval_order:binary_op_lhs_const:*": { "subcaseMS": 5.401 },
+ "webgpu:shader,execution,flow_control,eval_order:binary_op_parenthesized_expr:*": { "subcaseMS": 11.000 },
+ "webgpu:shader,execution,flow_control,eval_order:binary_op_rhs_const:*": { "subcaseMS": 10.200 },
+ "webgpu:shader,execution,flow_control,eval_order:bitwise_and:*": { "subcaseMS": 5.500 },
+ "webgpu:shader,execution,flow_control,eval_order:bitwise_or:*": { "subcaseMS": 22.301 },
+ "webgpu:shader,execution,flow_control,eval_order:builtin_fn_args:*": { "subcaseMS": 20.000 },
+ "webgpu:shader,execution,flow_control,eval_order:logical_and:*": { "subcaseMS": 5.101 },
+ "webgpu:shader,execution,flow_control,eval_order:logical_or:*": { "subcaseMS": 6.801 },
+ "webgpu:shader,execution,flow_control,eval_order:matrix_index:*": { "subcaseMS": 9.900 },
+ "webgpu:shader,execution,flow_control,eval_order:matrix_index_via_ptr:*": { "subcaseMS": 19.000 },
+ "webgpu:shader,execution,flow_control,eval_order:nested_builtin_fn_args:*": { "subcaseMS": 10.500 },
+ "webgpu:shader,execution,flow_control,eval_order:nested_fn_args:*": { "subcaseMS": 11.100 },
+ "webgpu:shader,execution,flow_control,eval_order:nested_struct_constructor:*": { "subcaseMS": 10.500 },
+ "webgpu:shader,execution,flow_control,eval_order:nested_vec4_constructor:*": { "subcaseMS": 10.700 },
+ "webgpu:shader,execution,flow_control,eval_order:struct_constructor:*": { "subcaseMS": 5.701 },
+ "webgpu:shader,execution,flow_control,eval_order:user_fn_args:*": { "subcaseMS": 5.801 },
+ "webgpu:shader,execution,flow_control,eval_order:vec4_constructor:*": { "subcaseMS": 22.900 },
+ "webgpu:shader,execution,flow_control,for:for_basic:*": { "subcaseMS": 14.150 },
+ "webgpu:shader,execution,flow_control,for:for_break:*": { "subcaseMS": 5.700 },
+ "webgpu:shader,execution,flow_control,for:for_complex_condition:*": { "subcaseMS": 12.450 },
+ "webgpu:shader,execution,flow_control,for:for_complex_continuing:*": { "subcaseMS": 12.000 },
+ "webgpu:shader,execution,flow_control,for:for_complex_initalizer:*": { "subcaseMS": 11.700 },
+ "webgpu:shader,execution,flow_control,for:for_condition:*": { "subcaseMS": 6.050 },
+ "webgpu:shader,execution,flow_control,for:for_continue:*": { "subcaseMS": 10.601 },
+ "webgpu:shader,execution,flow_control,for:for_continuing:*": { "subcaseMS": 5.000 },
+ "webgpu:shader,execution,flow_control,for:for_initalizer:*": { "subcaseMS": 7.751 },
+ "webgpu:shader,execution,flow_control,for:nested_for_break:*": { "subcaseMS": 5.901 },
+ "webgpu:shader,execution,flow_control,for:nested_for_continue:*": { "subcaseMS": 12.851 },
+ "webgpu:shader,execution,flow_control,if:else_if:*": { "subcaseMS": 7.950 },
+ "webgpu:shader,execution,flow_control,if:if_false:*": { "subcaseMS": 11.201 },
+ "webgpu:shader,execution,flow_control,if:if_true:*": { "subcaseMS": 4.850 },
+ "webgpu:shader,execution,flow_control,if:nested_if_else:*": { "subcaseMS": 11.650 },
+ "webgpu:shader,execution,flow_control,loop:loop_break:*": { "subcaseMS": 6.000 },
+ "webgpu:shader,execution,flow_control,loop:loop_continue:*": { "subcaseMS": 11.200 },
+ "webgpu:shader,execution,flow_control,loop:loop_continuing_basic:*": { "subcaseMS": 12.450 },
+ "webgpu:shader,execution,flow_control,loop:nested_loops:*": { "subcaseMS": 12.900 },
+ "webgpu:shader,execution,flow_control,phony:phony_assign_call_basic:*": { "subcaseMS": 6.750 },
+ "webgpu:shader,execution,flow_control,phony:phony_assign_call_builtin:*": { "subcaseMS": 12.001 },
+ "webgpu:shader,execution,flow_control,phony:phony_assign_call_must_use:*": { "subcaseMS": 6.450 },
+ "webgpu:shader,execution,flow_control,phony:phony_assign_call_nested:*": { "subcaseMS": 12.300 },
+ "webgpu:shader,execution,flow_control,phony:phony_assign_call_nested_must_use:*": { "subcaseMS": 5.250 },
+ "webgpu:shader,execution,flow_control,return:return:*": { "subcaseMS": 4.250 },
+ "webgpu:shader,execution,flow_control,return:return_conditional_false:*": { "subcaseMS": 5.851 },
+ "webgpu:shader,execution,flow_control,return:return_conditional_true:*": { "subcaseMS": 12.650 },
+ "webgpu:shader,execution,flow_control,switch:switch:*": { "subcaseMS": 12.750 },
+ "webgpu:shader,execution,flow_control,switch:switch_default:*": { "subcaseMS": 5.400 },
+ "webgpu:shader,execution,flow_control,switch:switch_default_only:*": { "subcaseMS": 12.550 },
+ "webgpu:shader,execution,flow_control,switch:switch_multiple_case:*": { "subcaseMS": 5.550 },
+ "webgpu:shader,execution,flow_control,switch:switch_multiple_case_default:*": { "subcaseMS": 12.000 },
+ "webgpu:shader,execution,flow_control,while:while_basic:*": { "subcaseMS": 5.951 },
+ "webgpu:shader,execution,flow_control,while:while_break:*": { "subcaseMS": 12.450 },
+ "webgpu:shader,execution,flow_control,while:while_continue:*": { "subcaseMS": 5.650 },
+ "webgpu:shader,execution,flow_control,while:while_nested_break:*": { "subcaseMS": 12.701 },
+ "webgpu:shader,execution,flow_control,while:while_nested_continue:*": { "subcaseMS": 5.450 },
+ "webgpu:shader,execution,memory_model,adjacent:f16:*": { "subcaseMS": 23.625 },
+ "webgpu:shader,execution,memory_model,atomicity:atomicity:*": { "subcaseMS": 77.201 },
+ "webgpu:shader,execution,memory_model,barrier:workgroup_barrier_load_store:*": { "subcaseMS": 65.850 },
+ "webgpu:shader,execution,memory_model,barrier:workgroup_barrier_store_load:*": { "subcaseMS": 78.800 },
+ "webgpu:shader,execution,memory_model,barrier:workgroup_barrier_store_store:*": { "subcaseMS": 61.701 },
+ "webgpu:shader,execution,memory_model,coherence:corr:*": { "subcaseMS": 238.167 },
+ "webgpu:shader,execution,memory_model,coherence:corw1:*": { "subcaseMS": 250.467 },
+ "webgpu:shader,execution,memory_model,coherence:corw2:*": { "subcaseMS": 244.384 },
+ "webgpu:shader,execution,memory_model,coherence:cowr:*": { "subcaseMS": 250.484 },
+ "webgpu:shader,execution,memory_model,coherence:coww:*": { "subcaseMS": 245.850 },
+ "webgpu:shader,execution,memory_model,weak:2_plus_2_write:*": { "subcaseMS": 185.150 },
+ "webgpu:shader,execution,memory_model,weak:load_buffer:*": { "subcaseMS": 184.900 },
+ "webgpu:shader,execution,memory_model,weak:message_passing:*": { "subcaseMS": 196.550 },
+ "webgpu:shader,execution,memory_model,weak:read:*": { "subcaseMS": 185.400 },
+ "webgpu:shader,execution,memory_model,weak:store:*": { "subcaseMS": 184.500 },
+ "webgpu:shader,execution,memory_model,weak:store_buffer:*": { "subcaseMS": 185.850 },
+ "webgpu:shader,execution,padding:array_of_matCx3:*": { "subcaseMS": 8.650 },
+ "webgpu:shader,execution,padding:array_of_struct:*": { "subcaseMS": 5.801 },
+ "webgpu:shader,execution,padding:array_of_vec3:*": { "subcaseMS": 10.500 },
+ "webgpu:shader,execution,padding:matCx3:*": { "subcaseMS": 10.050 },
+ "webgpu:shader,execution,padding:struct_explicit:*": { "subcaseMS": 12.000 },
+ "webgpu:shader,execution,padding:struct_implicit:*": { "subcaseMS": 33.201 },
+ "webgpu:shader,execution,padding:struct_nested:*": { "subcaseMS": 21.400 },
+ "webgpu:shader,execution,padding:vec3:*": { "subcaseMS": 8.700 },
+ "webgpu:shader,execution,robust_access:linear_memory:*": { "subcaseMS": 5.293 },
+ "webgpu:shader,execution,robust_access_vertex:vertex_buffer_access:*": { "subcaseMS": 6.487 },
+ "webgpu:shader,execution,shader_io,compute_builtins:inputs:*": { "subcaseMS": 19.342 },
+ "webgpu:shader,execution,shader_io,shared_structs:shared_between_stages:*": { "subcaseMS": 9.601 },
+ "webgpu:shader,execution,shader_io,shared_structs:shared_with_buffer:*": { "subcaseMS": 20.701 },
+ "webgpu:shader,execution,shader_io,shared_structs:shared_with_non_entry_point_function:*": { "subcaseMS": 6.801 },
+ "webgpu:shader,execution,shadow:builtin:*": { "subcaseMS": 4.700 },
+ "webgpu:shader,execution,shadow:declaration:*": { "subcaseMS": 9.700 },
+ "webgpu:shader,execution,shadow:for_loop:*": { "subcaseMS": 17.201 },
+ "webgpu:shader,execution,shadow:if:*": { "subcaseMS": 6.700 },
+ "webgpu:shader,execution,shadow:loop:*": { "subcaseMS": 4.901 },
+ "webgpu:shader,execution,shadow:switch:*": { "subcaseMS": 4.601 },
+ "webgpu:shader,execution,shadow:while:*": { "subcaseMS": 7.400 },
+ "webgpu:shader,execution,statement,increment_decrement:frexp_exp_increment:*": { "subcaseMS": 4.700 },
+ "webgpu:shader,execution,statement,increment_decrement:scalar_i32_decrement:*": { "subcaseMS": 20.301 },
+ "webgpu:shader,execution,statement,increment_decrement:scalar_i32_decrement_underflow:*": { "subcaseMS": 4.900 },
+ "webgpu:shader,execution,statement,increment_decrement:scalar_i32_increment:*": { "subcaseMS": 17.801 },
+ "webgpu:shader,execution,statement,increment_decrement:scalar_i32_increment_overflow:*": { "subcaseMS": 9.301 },
+ "webgpu:shader,execution,statement,increment_decrement:scalar_u32_decrement:*": { "subcaseMS": 4.800 },
+ "webgpu:shader,execution,statement,increment_decrement:scalar_u32_decrement_underflow:*": { "subcaseMS": 21.600 },
+ "webgpu:shader,execution,statement,increment_decrement:scalar_u32_increment:*": { "subcaseMS": 5.900 },
+ "webgpu:shader,execution,statement,increment_decrement:scalar_u32_increment_overflow:*": { "subcaseMS": 4.700 },
+ "webgpu:shader,execution,statement,increment_decrement:vec2_element_decrement:*": { "subcaseMS": 5.200 },
+ "webgpu:shader,execution,statement,increment_decrement:vec2_element_increment:*": { "subcaseMS": 5.000 },
+ "webgpu:shader,execution,statement,increment_decrement:vec3_element_decrement:*": { "subcaseMS": 17.700 },
+ "webgpu:shader,execution,statement,increment_decrement:vec3_element_increment:*": { "subcaseMS": 4.801 },
+ "webgpu:shader,execution,statement,increment_decrement:vec4_element_decrement:*": { "subcaseMS": 5.300 },
+ "webgpu:shader,execution,statement,increment_decrement:vec4_element_increment:*": { "subcaseMS": 6.300 },
+ "webgpu:shader,execution,zero_init:compute,zero_init:*": { "subcaseMS": 2.944 },
+ "webgpu:shader,validation,const_assert,const_assert:constant_expression_assert:*": { "subcaseMS": 1.456 },
+ "webgpu:shader,validation,const_assert,const_assert:constant_expression_logical_and_assert:*": { "subcaseMS": 1.493 },
+ "webgpu:shader,validation,const_assert,const_assert:constant_expression_logical_and_no_assert:*": { "subcaseMS": 1.339 },
+ "webgpu:shader,validation,const_assert,const_assert:constant_expression_logical_or_assert:*": { "subcaseMS": 1.501 },
+ "webgpu:shader,validation,const_assert,const_assert:constant_expression_logical_or_no_assert:*": { "subcaseMS": 1.373 },
+ "webgpu:shader,validation,const_assert,const_assert:constant_expression_no_assert:*": { "subcaseMS": 1.655 },
+ "webgpu:shader,validation,const_assert,const_assert:evaluation_stage:*": { "subcaseMS": 3.367 },
+ "webgpu:shader,validation,decl,const:no_direct_recursion:*": { "subcaseMS": 0.951 },
+ "webgpu:shader,validation,decl,const:no_indirect_recursion:*": { "subcaseMS": 0.950 },
+ "webgpu:shader,validation,decl,const:no_indirect_recursion_via_array_size:*": { "subcaseMS": 2.601 },
+ "webgpu:shader,validation,decl,const:no_indirect_recursion_via_struct_attribute:*": { "subcaseMS": 1.034 },
+ "webgpu:shader,validation,decl,override:no_direct_recursion:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,decl,override:no_indirect_recursion:*": { "subcaseMS": 0.951 },
+ "webgpu:shader,validation,decl,ptr_spelling:let_ptr_explicit_type_matches_var:*": { "subcaseMS": 1.500 },
+ "webgpu:shader,validation,decl,ptr_spelling:let_ptr_reads:*": { "subcaseMS": 1.216 },
+ "webgpu:shader,validation,decl,ptr_spelling:let_ptr_writes:*": { "subcaseMS": 1.250 },
+ "webgpu:shader,validation,decl,ptr_spelling:ptr_address_space_never_uses_access_mode:*": { "subcaseMS": 1.141 },
+ "webgpu:shader,validation,decl,ptr_spelling:ptr_bad_store_type:*": { "subcaseMS": 0.967 },
+ "webgpu:shader,validation,decl,ptr_spelling:ptr_handle_space_invalid:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,decl,ptr_spelling:ptr_not_instantiable:*": { "subcaseMS": 1.310 },
+ "webgpu:shader,validation,decl,var_access_mode:explicit_access_mode:*": { "subcaseMS": 1.373 },
+ "webgpu:shader,validation,decl,var_access_mode:implicit_access_mode:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,decl,var_access_mode:read_access:*": { "subcaseMS": 1.177 },
+ "webgpu:shader,validation,decl,var_access_mode:write_access:*": { "subcaseMS": 1.154 },
+ "webgpu:shader,validation,expression,access,vector:vector:*": { "subcaseMS": 1.407 },
+ "webgpu:shader,validation,expression,binary,bitwise_shift:shift_left_concrete:*": { "subcaseMS": 1.216 },
+ "webgpu:shader,validation,expression,binary,bitwise_shift:shift_left_vec_size_mismatch:*": { "subcaseMS": 1.367 },
+ "webgpu:shader,validation,expression,binary,bitwise_shift:shift_right_concrete:*": { "subcaseMS": 1.237 },
+ "webgpu:shader,validation,expression,binary,bitwise_shift:shift_right_vec_size_mismatch:*": { "subcaseMS": 1.334 },
+ "webgpu:shader,validation,expression,call,builtin,abs:values:*": { "subcaseMS": 0.391 },
+ "webgpu:shader,validation,expression,call,builtin,acos:integer_argument:*": { "subcaseMS": 1.512 },
+ "webgpu:shader,validation,expression,call,builtin,acos:values:*": { "subcaseMS": 0.342 },
+ "webgpu:shader,validation,expression,call,builtin,acosh:integer_argument:*": { "subcaseMS": 1.234 },
+ "webgpu:shader,validation,expression,call,builtin,acosh:values:*": { "subcaseMS": 0.217 },
+ "webgpu:shader,validation,expression,call,builtin,asin:integer_argument:*": { "subcaseMS": 0.878 },
+ "webgpu:shader,validation,expression,call,builtin,asin:values:*": { "subcaseMS": 0.359 },
+ "webgpu:shader,validation,expression,call,builtin,asinh:integer_argument:*": { "subcaseMS": 1.267 },
+ "webgpu:shader,validation,expression,call,builtin,asinh:values:*": { "subcaseMS": 0.372 },
+ "webgpu:shader,validation,expression,call,builtin,atan2:integer_argument_x:*": { "subcaseMS": 0.912 },
+ "webgpu:shader,validation,expression,call,builtin,atan2:integer_argument_y:*": { "subcaseMS": 0.867 },
+ "webgpu:shader,validation,expression,call,builtin,atan2:values:*": { "subcaseMS": 0.359 },
+ "webgpu:shader,validation,expression,call,builtin,atan:integer_argument:*": { "subcaseMS": 1.545 },
+ "webgpu:shader,validation,expression,call,builtin,atan:values:*": { "subcaseMS": 0.335 },
+ "webgpu:shader,validation,expression,call,builtin,atanh:integer_argument:*": { "subcaseMS": 0.912 },
+ "webgpu:shader,validation,expression,call,builtin,atanh:values:*": { "subcaseMS": 0.231 },
+ "webgpu:shader,validation,expression,call,builtin,atomics:stage:*": { "subcaseMS": 1.346 },
+ "webgpu:shader,validation,expression,call,builtin,bitcast:bad_const_to_f16:*": { "subcaseMS": 0.753 },
+ "webgpu:shader,validation,expression,call,builtin,bitcast:bad_const_to_f32:*": { "subcaseMS": 0.844 },
+ "webgpu:shader,validation,expression,call,builtin,bitcast:bad_to_f16:*": { "subcaseMS": 8.518 },
+ "webgpu:shader,validation,expression,call,builtin,bitcast:bad_to_vec3h:*": { "subcaseMS": 17.641 },
+ "webgpu:shader,validation,expression,call,builtin,bitcast:bad_type_constructible:*": { "subcaseMS": 1.214 },
+ "webgpu:shader,validation,expression,call,builtin,bitcast:bad_type_nonconstructible:*": { "subcaseMS": 1.425 },
+ "webgpu:shader,validation,expression,call,builtin,bitcast:valid_vec2h:*": { "subcaseMS": 3.405 },
+ "webgpu:shader,validation,expression,call,builtin,bitcast:valid_vec4h:*": { "subcaseMS": 5.610 },
+ "webgpu:shader,validation,expression,call,builtin,ceil:integer_argument:*": { "subcaseMS": 1.456 },
+ "webgpu:shader,validation,expression,call,builtin,ceil:values:*": { "subcaseMS": 1.539 },
+ "webgpu:shader,validation,expression,call,builtin,clamp:values:*": { "subcaseMS": 0.377 },
+ "webgpu:shader,validation,expression,call,builtin,cos:integer_argument:*": { "subcaseMS": 1.601 },
+ "webgpu:shader,validation,expression,call,builtin,cos:values:*": { "subcaseMS": 0.338 },
+ "webgpu:shader,validation,expression,call,builtin,cosh:integer_argument:*": { "subcaseMS": 0.889 },
+ "webgpu:shader,validation,expression,call,builtin,cosh:values:*": { "subcaseMS": 0.272 },
+ "webgpu:shader,validation,expression,call,builtin,degrees:integer_argument:*": { "subcaseMS": 1.311 },
+ "webgpu:shader,validation,expression,call,builtin,degrees:values:*": { "subcaseMS": 0.303 },
+ "webgpu:shader,validation,expression,call,builtin,exp2:integer_argument:*": { "subcaseMS": 0.967 },
+ "webgpu:shader,validation,expression,call,builtin,exp2:values:*": { "subcaseMS": 0.410 },
+ "webgpu:shader,validation,expression,call,builtin,exp:integer_argument:*": { "subcaseMS": 1.356 },
+ "webgpu:shader,validation,expression,call,builtin,exp:values:*": { "subcaseMS": 0.311 },
+ "webgpu:shader,validation,expression,call,builtin,inverseSqrt:integer_argument:*": { "subcaseMS": 1.356 },
+ "webgpu:shader,validation,expression,call,builtin,inverseSqrt:values:*": { "subcaseMS": 0.315 },
+ "webgpu:shader,validation,expression,call,builtin,length:integer_argument:*": { "subcaseMS": 2.011 },
+ "webgpu:shader,validation,expression,call,builtin,length:scalar:*": { "subcaseMS": 0.245 },
+ "webgpu:shader,validation,expression,call,builtin,length:vec2:*": { "subcaseMS": 0.319 },
+ "webgpu:shader,validation,expression,call,builtin,length:vec3:*": { "subcaseMS": 1.401 },
+ "webgpu:shader,validation,expression,call,builtin,length:vec4:*": { "subcaseMS": 1.301 },
+ "webgpu:shader,validation,expression,call,builtin,log2:integer_argument:*": { "subcaseMS": 1.034 },
+ "webgpu:shader,validation,expression,call,builtin,log2:values:*": { "subcaseMS": 0.398 },
+ "webgpu:shader,validation,expression,call,builtin,log:integer_argument:*": { "subcaseMS": 1.134 },
+ "webgpu:shader,validation,expression,call,builtin,log:values:*": { "subcaseMS": 0.291 },
+ "webgpu:shader,validation,expression,call,builtin,modf:integer_argument:*": { "subcaseMS": 1.089 },
+ "webgpu:shader,validation,expression,call,builtin,modf:values:*": { "subcaseMS": 1.866 },
+ "webgpu:shader,validation,expression,call,builtin,radians:integer_argument:*": { "subcaseMS": 1.811 },
+ "webgpu:shader,validation,expression,call,builtin,radians:values:*": { "subcaseMS": 0.382 },
+ "webgpu:shader,validation,expression,call,builtin,round:integer_argument:*": { "subcaseMS": 1.834 },
+ "webgpu:shader,validation,expression,call,builtin,round:values:*": { "subcaseMS": 0.382 },
+ "webgpu:shader,validation,expression,call,builtin,saturate:integer_argument:*": { "subcaseMS": 1.878 },
+ "webgpu:shader,validation,expression,call,builtin,saturate:values:*": { "subcaseMS": 0.317 },
+ "webgpu:shader,validation,expression,call,builtin,sign:unsigned_integer_argument:*": { "subcaseMS": 1.120 },
+ "webgpu:shader,validation,expression,call,builtin,sign:values:*": { "subcaseMS": 0.343 },
+ "webgpu:shader,validation,expression,call,builtin,sin:integer_argument:*": { "subcaseMS": 1.189 },
+ "webgpu:shader,validation,expression,call,builtin,sin:values:*": { "subcaseMS": 0.349 },
+ "webgpu:shader,validation,expression,call,builtin,sinh:integer_argument:*": { "subcaseMS": 1.078 },
+ "webgpu:shader,validation,expression,call,builtin,sinh:values:*": { "subcaseMS": 0.357 },
+ "webgpu:shader,validation,expression,call,builtin,sqrt:integer_argument:*": { "subcaseMS": 1.356 },
+ "webgpu:shader,validation,expression,call,builtin,sqrt:values:*": { "subcaseMS": 0.302 },
+ "webgpu:shader,validation,expression,call,builtin,tan:integer_argument:*": { "subcaseMS": 1.734 },
+ "webgpu:shader,validation,expression,call,builtin,tan:values:*": { "subcaseMS": 0.350 },
+ "webgpu:shader,validation,functions,alias_analysis:aliasing_inside_function:*": { "subcaseMS": 1.200 },
+ "webgpu:shader,validation,functions,alias_analysis:member_accessors:*": { "subcaseMS": 1.656 },
+ "webgpu:shader,validation,functions,alias_analysis:one_pointer_one_module_scope:*": { "subcaseMS": 1.598 },
+ "webgpu:shader,validation,functions,alias_analysis:same_pointer_read_and_write:*": { "subcaseMS": 1.301 },
+ "webgpu:shader,validation,functions,alias_analysis:subcalls:*": { "subcaseMS": 1.673 },
+ "webgpu:shader,validation,functions,alias_analysis:two_pointers:*": { "subcaseMS": 1.537 },
+ "webgpu:shader,validation,functions,restrictions:call_arg_types_match_params:*": { "subcaseMS": 1.518 },
+ "webgpu:shader,validation,functions,restrictions:entry_point_call_target:*": { "subcaseMS": 1.734 },
+ "webgpu:shader,validation,functions,restrictions:function_parameter_matching:*": { "subcaseMS": 1.953 },
+ "webgpu:shader,validation,functions,restrictions:function_parameter_types:*": { "subcaseMS": 1.520 },
+ "webgpu:shader,validation,functions,restrictions:function_return_types:*": { "subcaseMS": 1.535 },
+ "webgpu:shader,validation,functions,restrictions:no_direct_recursion:*": { "subcaseMS": 2.500 },
+ "webgpu:shader,validation,functions,restrictions:no_indirect_recursion:*": { "subcaseMS": 1.900 },
+ "webgpu:shader,validation,functions,restrictions:param_names_must_differ:*": { "subcaseMS": 1.722 },
+ "webgpu:shader,validation,functions,restrictions:param_number_matches_call:*": { "subcaseMS": 1.803 },
+ "webgpu:shader,validation,functions,restrictions:param_scope_is_function_body:*": { "subcaseMS": 1.340 },
+ "webgpu:shader,validation,functions,restrictions:vertex_returns_position:*": { "subcaseMS": 1.201 },
+ "webgpu:shader,validation,parse,align:multi_align:*": { "subcaseMS": 1.200 },
+ "webgpu:shader,validation,parse,align:parsing:*": { "subcaseMS": 1.272 },
+ "webgpu:shader,validation,parse,align:placement:*": { "subcaseMS": 2.423 },
+ "webgpu:shader,validation,parse,align:required_alignment:*": { "subcaseMS": 1.653 },
+ "webgpu:shader,validation,parse,attribute:expressions:*": { "subcaseMS": 1.410 },
+ "webgpu:shader,validation,parse,binary_ops:all:*": { "subcaseMS": 1.301 },
+ "webgpu:shader,validation,parse,blankspace:blankspace:*": { "subcaseMS": 1.391 },
+ "webgpu:shader,validation,parse,blankspace:bom:*": { "subcaseMS": 1.101 },
+ "webgpu:shader,validation,parse,blankspace:null_characters:*": { "subcaseMS": 3.217 },
+ "webgpu:shader,validation,parse,break:placement:*": { "subcaseMS": 1.254 },
+ "webgpu:shader,validation,parse,builtin:parse:*": { "subcaseMS": 3.277 },
+ "webgpu:shader,validation,parse,builtin:placement:*": { "subcaseMS": 1.267 },
+ "webgpu:shader,validation,parse,comments:comments:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,parse,comments:line_comment_eof:*": { "subcaseMS": 4.500 },
+ "webgpu:shader,validation,parse,comments:line_comment_terminators:*": { "subcaseMS": 1.021 },
+ "webgpu:shader,validation,parse,comments:unterminated_block_comment:*": { "subcaseMS": 8.950 },
+ "webgpu:shader,validation,parse,const:placement:*": { "subcaseMS": 1.167 },
+ "webgpu:shader,validation,parse,const_assert:parse:*": { "subcaseMS": 1.400 },
+ "webgpu:shader,validation,parse,diagnostic:conflicting_attribute_different_location:*": { "subcaseMS": 2.257 },
+ "webgpu:shader,validation,parse,diagnostic:conflicting_attribute_same_location:*": { "subcaseMS": 1.400 },
+ "webgpu:shader,validation,parse,diagnostic:conflicting_directive:*": { "subcaseMS": 1.244 },
+ "webgpu:shader,validation,parse,diagnostic:invalid_locations:*": { "subcaseMS": 1.930 },
+ "webgpu:shader,validation,parse,diagnostic:invalid_severity:*": { "subcaseMS": 1.361 },
+ "webgpu:shader,validation,parse,diagnostic:valid_locations:*": { "subcaseMS": 1.368 },
+ "webgpu:shader,validation,parse,diagnostic:valid_params:*": { "subcaseMS": 1.475 },
+ "webgpu:shader,validation,parse,diagnostic:warning_unknown_rule:*": { "subcaseMS": 1.100 },
+ "webgpu:shader,validation,parse,discard:placement:*": { "subcaseMS": 3.357 },
+ "webgpu:shader,validation,parse,enable:enable:*": { "subcaseMS": 2.303 },
+ "webgpu:shader,validation,parse,identifiers:alias_name:*": { "subcaseMS": 1.262 },
+ "webgpu:shader,validation,parse,identifiers:function_const_name:*": { "subcaseMS": 1.298 },
+ "webgpu:shader,validation,parse,identifiers:function_let_name:*": { "subcaseMS": 1.299 },
+ "webgpu:shader,validation,parse,identifiers:function_name:*": { "subcaseMS": 1.242 },
+ "webgpu:shader,validation,parse,identifiers:function_param_name:*": { "subcaseMS": 1.219 },
+ "webgpu:shader,validation,parse,identifiers:function_var_name:*": { "subcaseMS": 1.326 },
+ "webgpu:shader,validation,parse,identifiers:module_const_name:*": { "subcaseMS": 1.211 },
+ "webgpu:shader,validation,parse,identifiers:module_var_name:*": { "subcaseMS": 1.218 },
+ "webgpu:shader,validation,parse,identifiers:non_normalized:*": { "subcaseMS": 1.101 },
+ "webgpu:shader,validation,parse,identifiers:override_name:*": { "subcaseMS": 1.228 },
+ "webgpu:shader,validation,parse,identifiers:struct_name:*": { "subcaseMS": 1.230 },
+ "webgpu:shader,validation,parse,literal:abstract_float:*": { "subcaseMS": 1.411 },
+ "webgpu:shader,validation,parse,literal:abstract_int:*": { "subcaseMS": 1.296 },
+ "webgpu:shader,validation,parse,literal:bools:*": { "subcaseMS": 2.901 },
+ "webgpu:shader,validation,parse,literal:f16:*": { "subcaseMS": 45.119 },
+ "webgpu:shader,validation,parse,literal:f32:*": { "subcaseMS": 1.393 },
+ "webgpu:shader,validation,parse,literal:i32:*": { "subcaseMS": 1.541 },
+ "webgpu:shader,validation,parse,literal:u32:*": { "subcaseMS": 1.379 },
+ "webgpu:shader,validation,parse,must_use:builtin_must_use:*": { "subcaseMS": 1.400 },
+ "webgpu:shader,validation,parse,must_use:builtin_no_must_use:*": { "subcaseMS": 1.206 },
+ "webgpu:shader,validation,parse,must_use:call:*": { "subcaseMS": 1.275 },
+ "webgpu:shader,validation,parse,must_use:declaration:*": { "subcaseMS": 1.523 },
+ "webgpu:shader,validation,parse,pipeline_stage:compute_parsing:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,parse,pipeline_stage:duplicate_compute_on_function:*": { "subcaseMS": 2.651 },
+ "webgpu:shader,validation,parse,pipeline_stage:duplicate_fragment_on_function:*": { "subcaseMS": 1.001 },
+ "webgpu:shader,validation,parse,pipeline_stage:duplicate_vertex_on_function:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,parse,pipeline_stage:fragment_parsing:*": { "subcaseMS": 2.600 },
+ "webgpu:shader,validation,parse,pipeline_stage:multiple_entry_points:*": { "subcaseMS": 1.100 },
+ "webgpu:shader,validation,parse,pipeline_stage:placement:*": { "subcaseMS": 1.388 },
+ "webgpu:shader,validation,parse,pipeline_stage:vertex_parsing:*": { "subcaseMS": 1.500 },
+ "webgpu:shader,validation,parse,semicolon:after_assignment:*": { "subcaseMS": 1.400 },
+ "webgpu:shader,validation,parse,semicolon:after_call:*": { "subcaseMS": 1.301 },
+ "webgpu:shader,validation,parse,semicolon:after_case:*": { "subcaseMS": 1.301 },
+ "webgpu:shader,validation,parse,semicolon:after_case_break:*": { "subcaseMS": 19.400 },
+ "webgpu:shader,validation,parse,semicolon:after_compound_statement:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,parse,semicolon:after_continuing:*": { "subcaseMS": 0.900 },
+ "webgpu:shader,validation,parse,semicolon:after_default_case:*": { "subcaseMS": 3.100 },
+ "webgpu:shader,validation,parse,semicolon:after_default_case_break:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,parse,semicolon:after_discard:*": { "subcaseMS": 4.400 },
+ "webgpu:shader,validation,parse,semicolon:after_enable:*": { "subcaseMS": 1.301 },
+ "webgpu:shader,validation,parse,semicolon:after_fn_const_assert:*": { "subcaseMS": 1.400 },
+ "webgpu:shader,validation,parse,semicolon:after_fn_const_decl:*": { "subcaseMS": 1.200 },
+ "webgpu:shader,validation,parse,semicolon:after_fn_var_decl:*": { "subcaseMS": 1.101 },
+ "webgpu:shader,validation,parse,semicolon:after_for:*": { "subcaseMS": 0.900 },
+ "webgpu:shader,validation,parse,semicolon:after_for_break:*": { "subcaseMS": 1.201 },
+ "webgpu:shader,validation,parse,semicolon:after_func_decl:*": { "subcaseMS": 1.200 },
+ "webgpu:shader,validation,parse,semicolon:after_if:*": { "subcaseMS": 1.100 },
+ "webgpu:shader,validation,parse,semicolon:after_if_else:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,parse,semicolon:after_let_decl:*": { "subcaseMS": 1.401 },
+ "webgpu:shader,validation,parse,semicolon:after_loop:*": { "subcaseMS": 1.200 },
+ "webgpu:shader,validation,parse,semicolon:after_loop_break:*": { "subcaseMS": 1.301 },
+ "webgpu:shader,validation,parse,semicolon:after_loop_break_if:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,parse,semicolon:after_loop_continue:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,parse,semicolon:after_member:*": { "subcaseMS": 4.801 },
+ "webgpu:shader,validation,parse,semicolon:after_module_const_decl:*": { "subcaseMS": 1.400 },
+ "webgpu:shader,validation,parse,semicolon:after_module_var_decl:*": { "subcaseMS": 0.901 },
+ "webgpu:shader,validation,parse,semicolon:after_return:*": { "subcaseMS": 1.201 },
+ "webgpu:shader,validation,parse,semicolon:after_struct_decl:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,parse,semicolon:after_switch:*": { "subcaseMS": 1.101 },
+ "webgpu:shader,validation,parse,semicolon:after_type_alias_decl:*": { "subcaseMS": 1.200 },
+ "webgpu:shader,validation,parse,semicolon:after_while:*": { "subcaseMS": 0.901 },
+ "webgpu:shader,validation,parse,semicolon:after_while_break:*": { "subcaseMS": 4.801 },
+ "webgpu:shader,validation,parse,semicolon:after_while_continue:*": { "subcaseMS": 1.200 },
+ "webgpu:shader,validation,parse,semicolon:compound_statement_multiple:*": { "subcaseMS": 0.800 },
+ "webgpu:shader,validation,parse,semicolon:compound_statement_single:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,parse,semicolon:function_body_multiple:*": { "subcaseMS": 0.900 },
+ "webgpu:shader,validation,parse,semicolon:function_body_single:*": { "subcaseMS": 0.800 },
+ "webgpu:shader,validation,parse,semicolon:module_scope_multiple:*": { "subcaseMS": 0.900 },
+ "webgpu:shader,validation,parse,semicolon:module_scope_single:*": { "subcaseMS": 2.100 },
+ "webgpu:shader,validation,parse,source:empty:*": { "subcaseMS": 1.101 },
+ "webgpu:shader,validation,parse,source:invalid_source:*": { "subcaseMS": 1.100 },
+ "webgpu:shader,validation,parse,source:valid_source:*": { "subcaseMS": 1.101 },
+ "webgpu:shader,validation,parse,unary_ops:all:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,parse,var_and_let:initializer_type:*": { "subcaseMS": 0.900 },
+ "webgpu:shader,validation,parse,var_and_let:var_access_mode_bad_other_template_contents:*": { "subcaseMS": 4.071 },
+ "webgpu:shader,validation,parse,var_and_let:var_access_mode_bad_template_delim:*": { "subcaseMS": 1.088 },
+ "webgpu:shader,validation,shader_io,binding:binding:*": { "subcaseMS": 1.240 },
+ "webgpu:shader,validation,shader_io,binding:binding_f16:*": { "subcaseMS": 0.500 },
+ "webgpu:shader,validation,shader_io,binding:binding_without_group:*": { "subcaseMS": 0.901 },
+ "webgpu:shader,validation,shader_io,builtins:duplicates:*": { "subcaseMS": 1.913 },
+ "webgpu:shader,validation,shader_io,builtins:missing_vertex_position:*": { "subcaseMS": 0.975 },
+ "webgpu:shader,validation,shader_io,builtins:nesting:*": { "subcaseMS": 2.700 },
+ "webgpu:shader,validation,shader_io,builtins:reuse_builtin_name:*": { "subcaseMS": 1.202 },
+ "webgpu:shader,validation,shader_io,builtins:stage_inout:*": { "subcaseMS": 1.231 },
+ "webgpu:shader,validation,shader_io,builtins:type:*": { "subcaseMS": 1.314 },
+ "webgpu:shader,validation,shader_io,entry_point:missing_attribute_on_param:*": { "subcaseMS": 4.801 },
+ "webgpu:shader,validation,shader_io,entry_point:missing_attribute_on_param_struct:*": { "subcaseMS": 4.676 },
+ "webgpu:shader,validation,shader_io,entry_point:missing_attribute_on_return_type:*": { "subcaseMS": 2.367 },
+ "webgpu:shader,validation,shader_io,entry_point:missing_attribute_on_return_type_struct:*": { "subcaseMS": 1.101 },
+ "webgpu:shader,validation,shader_io,entry_point:no_entry_point_provided:*": { "subcaseMS": 0.801 },
+ "webgpu:shader,validation,shader_io,group:group:*": { "subcaseMS": 1.355 },
+ "webgpu:shader,validation,shader_io,group:group_f16:*": { "subcaseMS": 0.400 },
+ "webgpu:shader,validation,shader_io,group:group_without_binding:*": { "subcaseMS": 1.100 },
+ "webgpu:shader,validation,shader_io,group_and_binding:binding_attributes:*": { "subcaseMS": 1.280 },
+ "webgpu:shader,validation,shader_io,group_and_binding:different_entry_points:*": { "subcaseMS": 1.833 },
+ "webgpu:shader,validation,shader_io,group_and_binding:function_scope:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,shader_io,group_and_binding:function_scope_texture:*": { "subcaseMS": 0.801 },
+ "webgpu:shader,validation,shader_io,group_and_binding:private_function_scope:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,shader_io,group_and_binding:private_module_scope:*": { "subcaseMS": 1.301 },
+ "webgpu:shader,validation,shader_io,group_and_binding:single_entry_point:*": { "subcaseMS": 1.380 },
+ "webgpu:shader,validation,shader_io,id:id:*": { "subcaseMS": 1.132 },
+ "webgpu:shader,validation,shader_io,id:id_fp16:*": { "subcaseMS": 1.001 },
+ "webgpu:shader,validation,shader_io,id:id_in_function:*": { "subcaseMS": 0.750 },
+ "webgpu:shader,validation,shader_io,id:id_non_override:*": { "subcaseMS": 0.767 },
+ "webgpu:shader,validation,shader_io,id:id_struct_member:*": { "subcaseMS": 0.900 },
+ "webgpu:shader,validation,shader_io,interpolate:duplicate:*": { "subcaseMS": 9.350 },
+ "webgpu:shader,validation,shader_io,interpolate:integral_types:*": { "subcaseMS": 1.657 },
+ "webgpu:shader,validation,shader_io,interpolate:interpolation_validation:*": { "subcaseMS": 1.193 },
+ "webgpu:shader,validation,shader_io,interpolate:require_location:*": { "subcaseMS": 3.000 },
+ "webgpu:shader,validation,shader_io,interpolate:type_and_sampling:*": { "subcaseMS": 1.383 },
+ "webgpu:shader,validation,shader_io,invariant:not_valid_on_user_defined_io:*": { "subcaseMS": 1.100 },
+ "webgpu:shader,validation,shader_io,invariant:parsing:*": { "subcaseMS": 1.438 },
+ "webgpu:shader,validation,shader_io,invariant:valid_only_with_vertex_position_builtin:*": { "subcaseMS": 1.461 },
+ "webgpu:shader,validation,shader_io,locations:duplicates:*": { "subcaseMS": 1.906 },
+ "webgpu:shader,validation,shader_io,locations:location_fp16:*": { "subcaseMS": 0.501 },
+ "webgpu:shader,validation,shader_io,locations:nesting:*": { "subcaseMS": 0.967 },
+ "webgpu:shader,validation,shader_io,locations:stage_inout:*": { "subcaseMS": 1.850 },
+ "webgpu:shader,validation,shader_io,locations:type:*": { "subcaseMS": 1.332 },
+ "webgpu:shader,validation,shader_io,locations:validation:*": { "subcaseMS": 1.296 },
+ "webgpu:shader,validation,shader_io,size:size:*": { "subcaseMS": 1.218 },
+ "webgpu:shader,validation,shader_io,size:size_fp16:*": { "subcaseMS": 1.500 },
+ "webgpu:shader,validation,shader_io,size:size_non_struct:*": { "subcaseMS": 0.929 },
+ "webgpu:shader,validation,shader_io,workgroup_size:workgroup_size:*": { "subcaseMS": 1.227 },
+ "webgpu:shader,validation,shader_io,workgroup_size:workgroup_size_const:*": { "subcaseMS": 3.400 },
+ "webgpu:shader,validation,shader_io,workgroup_size:workgroup_size_fp16:*": { "subcaseMS": 0.700 },
+ "webgpu:shader,validation,shader_io,workgroup_size:workgroup_size_fragment_shader:*": { "subcaseMS": 1.301 },
+ "webgpu:shader,validation,shader_io,workgroup_size:workgroup_size_function:*": { "subcaseMS": 0.800 },
+ "webgpu:shader,validation,shader_io,workgroup_size:workgroup_size_var:*": { "subcaseMS": 2.101 },
+ "webgpu:shader,validation,shader_io,workgroup_size:workgroup_size_vertex_shader:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,types,alias:no_direct_recursion:*": { "subcaseMS": 1.450 },
+ "webgpu:shader,validation,types,alias:no_indirect_recursion:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,types,alias:no_indirect_recursion_via_array_element:*": { "subcaseMS": 1.050 },
+ "webgpu:shader,validation,types,alias:no_indirect_recursion_via_array_size:*": { "subcaseMS": 2.851 },
+ "webgpu:shader,validation,types,alias:no_indirect_recursion_via_atomic:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,types,alias:no_indirect_recursion_via_matrix_element:*": { "subcaseMS": 0.851 },
+ "webgpu:shader,validation,types,alias:no_indirect_recursion_via_ptr_store_type:*": { "subcaseMS": 1.050 },
+ "webgpu:shader,validation,types,alias:no_indirect_recursion_via_struct_attribute:*": { "subcaseMS": 1.584 },
+ "webgpu:shader,validation,types,alias:no_indirect_recursion_via_struct_member:*": { "subcaseMS": 1.000 },
+ "webgpu:shader,validation,types,alias:no_indirect_recursion_via_vector_element:*": { "subcaseMS": 1.050 },
+ "webgpu:shader,validation,types,struct:no_direct_recursion:*": { "subcaseMS": 0.951 },
+ "webgpu:shader,validation,types,struct:no_indirect_recursion:*": { "subcaseMS": 0.901 },
+ "webgpu:shader,validation,types,struct:no_indirect_recursion_via_array_element:*": { "subcaseMS": 0.901 },
+ "webgpu:shader,validation,types,struct:no_indirect_recursion_via_array_size:*": { "subcaseMS": 0.900 },
+ "webgpu:shader,validation,types,struct:no_indirect_recursion_via_struct_attribute:*": { "subcaseMS": 1.467 },
+ "webgpu:shader,validation,types,struct:no_indirect_recursion_via_struct_member_nested_in_alias:*": { "subcaseMS": 0.950 },
+ "webgpu:shader,validation,types,vector:vector:*": { "subcaseMS": 1.295 },
+ "webgpu:shader,validation,uniformity,uniformity:basics:*": { "subcaseMS": 1.467 },
+ "webgpu:shader,validation,uniformity,uniformity:binary_expressions:*": { "subcaseMS": 1.758 },
+ "webgpu:shader,validation,uniformity,uniformity:compute_builtin_values:*": { "subcaseMS": 2.500 },
+ "webgpu:shader,validation,uniformity,uniformity:fragment_builtin_values:*": { "subcaseMS": 1.300 },
+ "webgpu:shader,validation,uniformity,uniformity:function_pointer_parameters:*": { "subcaseMS": 1.546 },
+ "webgpu:shader,validation,uniformity,uniformity:function_variables:*": { "subcaseMS": 1.573 },
+ "webgpu:shader,validation,uniformity,uniformity:functions:*": { "subcaseMS": 1.303 },
+ "webgpu:shader,validation,uniformity,uniformity:pointers:*": { "subcaseMS": 1.738 },
+ "webgpu:shader,validation,uniformity,uniformity:short_circuit_expressions:*": { "subcaseMS": 1.401 },
+ "webgpu:shader,validation,uniformity,uniformity:unary_expressions:*": { "subcaseMS": 1.279 },
+ "webgpu:util,texture,texel_data:float_texel_data_in_shader:*": { "subcaseMS": 2.042 },
+ "webgpu:util,texture,texel_data:sint_texel_data_in_shader:*": { "subcaseMS": 2.573 },
+ "webgpu:util,texture,texel_data:snorm_texel_data_in_shader:*": { "subcaseMS": 4.645 },
+ "webgpu:util,texture,texel_data:ufloat_texel_data_in_shader:*": { "subcaseMS": 2.908 },
+ "webgpu:util,texture,texel_data:uint_texel_data_in_shader:*": { "subcaseMS": 4.106 },
+ "webgpu:util,texture,texel_data:unorm_texel_data_in_shader:*": { "subcaseMS": 5.179 },
+ "webgpu:util,texture,texture_ok:float32:*": { "subcaseMS": 1.655 },
+ "webgpu:util,texture,texture_ok:norm:*": { "subcaseMS": 4.019 },
+ "webgpu:util,texture,texture_ok:snorm_min:*": { "subcaseMS": 17.250 },
+ "webgpu:web_platform,canvas,configure:alpha_mode:*": { "subcaseMS": 4.075 },
+ "webgpu:web_platform,canvas,configure:defaults:*": { "subcaseMS": 8.800 },
+ "webgpu:web_platform,canvas,configure:device:*": { "subcaseMS": 14.800 },
+ "webgpu:web_platform,canvas,configure:format:*": { "subcaseMS": 5.455 },
+ "webgpu:web_platform,canvas,configure:size_zero_after_configure:*": { "subcaseMS": 4.425 },
+ "webgpu:web_platform,canvas,configure:size_zero_before_configure:*": { "subcaseMS": 8.400 },
+ "webgpu:web_platform,canvas,configure:usage:*": { "subcaseMS": 1.087 },
+ "webgpu:web_platform,canvas,configure:viewFormats:*": { "subcaseMS": 0.899 },
+ "webgpu:web_platform,canvas,context_creation:return_type:*": { "subcaseMS": 0.700 },
+ "webgpu:web_platform,canvas,getCurrentTexture:configured:*": { "subcaseMS": 13.000 },
+ "webgpu:web_platform,canvas,getCurrentTexture:expiry:*": { "subcaseMS": 2.925 },
+ "webgpu:web_platform,canvas,getCurrentTexture:multiple_frames:*": { "subcaseMS": 32.400 },
+ "webgpu:web_platform,canvas,getCurrentTexture:resize:*": { "subcaseMS": 16.601 },
+ "webgpu:web_platform,canvas,getCurrentTexture:single_frames:*": { "subcaseMS": 10.800 },
+ "webgpu:web_platform,canvas,getPreferredCanvasFormat:value:*": { "subcaseMS": 0.200 },
+ "webgpu:web_platform,canvas,readbackFromWebGPUCanvas:drawTo2DCanvas:*": { "subcaseMS": 12.963 },
+ "webgpu:web_platform,canvas,readbackFromWebGPUCanvas:offscreenCanvas,snapshot:*": { "subcaseMS": 27.148 },
+ "webgpu:web_platform,canvas,readbackFromWebGPUCanvas:onscreenCanvas,snapshot:*": { "subcaseMS": 36.364 },
+ "webgpu:web_platform,canvas,readbackFromWebGPUCanvas:onscreenCanvas,uploadToWebGL:*": { "subcaseMS": 15.859 },
+ "webgpu:web_platform,canvas,readbackFromWebGPUCanvas:transferToImageBitmap_huge_size:*": { "subcaseMS": 571.100 },
+ "webgpu:web_platform,canvas,readbackFromWebGPUCanvas:transferToImageBitmap_unconfigured_nonzero_size:*": { "subcaseMS": 3.200 },
+ "webgpu:web_platform,canvas,readbackFromWebGPUCanvas:transferToImageBitmap_zero_size:*": { "subcaseMS": 7.551 },
+ "webgpu:web_platform,copyToTexture,ImageBitmap:copy_subrect_from_2D_Canvas:*": { "subcaseMS": 5.329 },
+ "webgpu:web_platform,copyToTexture,ImageBitmap:copy_subrect_from_ImageData:*": { "subcaseMS": 3.295 },
+ "webgpu:web_platform,copyToTexture,ImageBitmap:from_ImageData:*": { "subcaseMS": 15.061 },
+ "webgpu:web_platform,copyToTexture,ImageBitmap:from_canvas:*": { "subcaseMS": 21.438 },
+ "webgpu:web_platform,copyToTexture,ImageData:copy_subrect_from_ImageData:*": { "subcaseMS": 3.167 },
+ "webgpu:web_platform,copyToTexture,ImageData:from_ImageData:*": { "subcaseMS": 27.268 },
+ "webgpu:web_platform,copyToTexture,canvas:color_space_conversion:*": { "subcaseMS": 15.391 },
+ "webgpu:web_platform,copyToTexture,canvas:copy_contents_from_2d_context_canvas:*": { "subcaseMS": 3.437 },
+ "webgpu:web_platform,copyToTexture,canvas:copy_contents_from_bitmaprenderer_context_canvas:*": { "subcaseMS": 3.504 },
+ "webgpu:web_platform,copyToTexture,canvas:copy_contents_from_gl_context_canvas:*": { "subcaseMS": 14.659 },
+ "webgpu:web_platform,copyToTexture,canvas:copy_contents_from_gpu_context_canvas:*": { "subcaseMS": 1.859 },
+ "webgpu:web_platform,copyToTexture,image:copy_subrect_from_2D_Canvas:*": { "subcaseMS": 8.754 },
+ "webgpu:web_platform,copyToTexture,image:from_image:*": { "subcaseMS": 21.869 },
+ "webgpu:web_platform,copyToTexture,video:copy_from_video:*": { "subcaseMS": 25.101 },
+ "webgpu:web_platform,external_texture,video:importExternalTexture,compute:*": { "subcaseMS": 36.270 },
+ "webgpu:web_platform,external_texture,video:importExternalTexture,sample:*": { "subcaseMS": 33.380 },
+ "webgpu:web_platform,external_texture,video:importExternalTexture,sampleWithRotationMetadata:*": { "subcaseMS": 34.968 },
+ "webgpu:web_platform,external_texture,video:importExternalTexture,sampleWithVideoFrameWithVisibleRectParam:*": { "subcaseMS": 29.160 },
+ "webgpu:web_platform,worker,worker:worker:*": { "subcaseMS": 245.901 },
+ "_end": ""
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/README.txt
new file mode 100644
index 0000000000..3aadba27ee
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/README.txt
@@ -0,0 +1 @@
+Tests for full coverage of the shaders that can be passed to WebGPU. \ No newline at end of file
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/README.txt
new file mode 100644
index 0000000000..52598f02ab
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/README.txt
@@ -0,0 +1 @@
+Tests that check the result of valid shader execution.
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_addition.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_addition.spec.ts
new file mode 100644
index 0000000000..0f703f0889
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_addition.spec.ts
@@ -0,0 +1,154 @@
+export const description = `
+Execution Tests for non-matrix AbstractFloat addition expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeAbstractFloat, TypeVec } from '../../../../util/conversion.js';
+import { FP, FPVector } from '../../../../util/floating_point.js';
+import { sparseF64Range, sparseVectorF64Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { onlyConstInputSource, run } from '../expression.js';
+
+import { abstractBinary } from './binary.js';
+
+const additionVectorScalarInterval = (v: readonly number[], s: number): FPVector => {
+ return FP.abstract.toVector(v.map(e => FP.abstract.additionInterval(e, s)));
+};
+
+const additionScalarVectorInterval = (s: number, v: readonly number[]): FPVector => {
+ return FP.abstract.toVector(v.map(e => FP.abstract.additionInterval(s, e)));
+};
+
+export const g = makeTestGroup(GPUTest);
+
+const scalar_cases = {
+ ['scalar']: () => {
+ return FP.abstract.generateScalarPairToIntervalCases(
+ sparseF64Range(),
+ sparseF64Range(),
+ 'finite',
+ FP.abstract.additionInterval
+ );
+ },
+};
+
+const vector_scalar_cases = ([2, 3, 4] as const)
+ .map(dim => ({
+ [`vec${dim}_scalar`]: () => {
+ return FP.abstract.generateVectorScalarToVectorCases(
+ sparseVectorF64Range(dim),
+ sparseF64Range(),
+ 'finite',
+ additionVectorScalarInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const scalar_vector_cases = ([2, 3, 4] as const)
+ .map(dim => ({
+ [`scalar_vec${dim}`]: () => {
+ return FP.abstract.generateScalarVectorToVectorCases(
+ sparseF64Range(),
+ sparseVectorF64Range(dim),
+ 'finite',
+ additionScalarVectorInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/af_addition', {
+ ...scalar_cases,
+ ...vector_scalar_cases,
+ ...scalar_vector_cases,
+});
+
+g.test('scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x + y, where x and y are scalars
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource))
+ .fn(async t => {
+ const cases = await d.get('scalar');
+ await run(
+ t,
+ abstractBinary('+'),
+ [TypeAbstractFloat, TypeAbstractFloat],
+ TypeAbstractFloat,
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x + y, where x and y are vectors
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u.combine('inputSource', onlyConstInputSource).combine('vectorize', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('scalar'); // Using vectorize to generate vector cases based on scalar cases
+ await run(
+ t,
+ abstractBinary('+'),
+ [TypeAbstractFloat, TypeAbstractFloat],
+ TypeAbstractFloat,
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x + y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(`vec${dim}_scalar`);
+ await run(
+ t,
+ abstractBinary('+'),
+ [TypeVec(dim, TypeAbstractFloat), TypeAbstractFloat],
+ TypeVec(dim, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
+
+g.test('scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x + y, where x is a scalar and y is a vector
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(`scalar_vec${dim}`);
+ await run(
+ t,
+ abstractBinary('+'),
+ [TypeAbstractFloat, TypeVec(dim, TypeAbstractFloat)],
+ TypeVec(dim, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_comparison.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_comparison.spec.ts
new file mode 100644
index 0000000000..5b8b1637b9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_comparison.spec.ts
@@ -0,0 +1,214 @@
+export const description = `
+Execution Tests for the AbstractFloat comparison operations
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { anyOf } from '../../../../util/compare.js';
+import {
+ abstractFloat,
+ bool,
+ Scalar,
+ TypeAbstractFloat,
+ TypeBool,
+} from '../../../../util/conversion.js';
+import { flushSubnormalNumberF64, vectorF64Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, Case, run } from '../expression.js';
+
+import { binary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+/**
+ * @returns a test case for the provided left hand & right hand values and truth function.
+ * Handles quantization and subnormals.
+ */
+function makeCase(
+ lhs: number,
+ rhs: number,
+ truthFunc: (lhs: Scalar, rhs: Scalar) => boolean
+): Case {
+ // Subnormal float values may be flushed at any time.
+ // https://www.w3.org/TR/WGSL/#floating-point-evaluation
+ const af_lhs = abstractFloat(lhs);
+ const af_rhs = abstractFloat(rhs);
+ const lhs_options = new Set([af_lhs, abstractFloat(flushSubnormalNumberF64(lhs))]);
+ const rhs_options = new Set([af_rhs, abstractFloat(flushSubnormalNumberF64(rhs))]);
+ const expected: Array<Scalar> = [];
+ lhs_options.forEach(l => {
+ rhs_options.forEach(r => {
+ const result = bool(truthFunc(l, r));
+ if (!expected.includes(result)) {
+ expected.push(result);
+ }
+ });
+ });
+
+ return { input: [af_lhs, af_rhs], expected: anyOf(...expected) };
+}
+
+export const d = makeCaseCache('binary/af_logical', {
+ equals: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) === (rhs.value as number);
+ };
+
+ return vectorF64Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ not_equals: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) !== (rhs.value as number);
+ };
+
+ return vectorF64Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ less_than: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) < (rhs.value as number);
+ };
+
+ return vectorF64Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ less_equals: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) <= (rhs.value as number);
+ };
+
+ return vectorF64Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ greater_than: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) > (rhs.value as number);
+ };
+
+ return vectorF64Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ greater_equals: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) >= (rhs.value as number);
+ };
+
+ return vectorF64Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+});
+
+g.test('equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x == y
+Accuracy: Correct result
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', [allInputSources[0]] /* const */)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('equals');
+ await run(t, binary('=='), [TypeAbstractFloat, TypeAbstractFloat], TypeBool, t.params, cases);
+ });
+
+g.test('not_equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x != y
+Accuracy: Correct result
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', [allInputSources[0]] /* const */)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('not_equals');
+ await run(t, binary('!='), [TypeAbstractFloat, TypeAbstractFloat], TypeBool, t.params, cases);
+ });
+
+g.test('less_than')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x < y
+Accuracy: Correct result
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', [allInputSources[0]] /* const */)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('less_than');
+ await run(t, binary('<'), [TypeAbstractFloat, TypeAbstractFloat], TypeBool, t.params, cases);
+ });
+
+g.test('less_equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x <= y
+Accuracy: Correct result
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', [allInputSources[0]] /* const */)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('less_equals');
+ await run(t, binary('<='), [TypeAbstractFloat, TypeAbstractFloat], TypeBool, t.params, cases);
+ });
+
+g.test('greater_than')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x > y
+Accuracy: Correct result
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', [allInputSources[0]] /* const */)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('greater_than');
+ await run(t, binary('>'), [TypeAbstractFloat, TypeAbstractFloat], TypeBool, t.params, cases);
+ });
+
+g.test('greater_equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x >= y
+Accuracy: Correct result
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', [allInputSources[0]] /* const */)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('greater_equals');
+ await run(t, binary('>='), [TypeAbstractFloat, TypeAbstractFloat], TypeBool, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_division.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_division.spec.ts
new file mode 100644
index 0000000000..4c1765d203
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_division.spec.ts
@@ -0,0 +1,154 @@
+export const description = `
+Execution Tests for non-matrix AbstractFloat division expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeAbstractFloat, TypeVec } from '../../../../util/conversion.js';
+import { FP, FPVector } from '../../../../util/floating_point.js';
+import { sparseF64Range, sparseVectorF64Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { onlyConstInputSource, run } from '../expression.js';
+
+import { abstractBinary } from './binary.js';
+
+const divisionVectorScalarInterval = (v: readonly number[], s: number): FPVector => {
+ return FP.abstract.toVector(v.map(e => FP.abstract.divisionInterval(e, s)));
+};
+
+const divisionScalarVectorInterval = (s: number, v: readonly number[]): FPVector => {
+ return FP.abstract.toVector(v.map(e => FP.abstract.divisionInterval(s, e)));
+};
+
+export const g = makeTestGroup(GPUTest);
+
+const scalar_cases = {
+ ['scalar']: () => {
+ return FP.abstract.generateScalarPairToIntervalCases(
+ sparseF64Range(),
+ sparseF64Range(),
+ 'finite',
+ FP.abstract.divisionInterval
+ );
+ },
+};
+
+const vector_scalar_cases = ([2, 3, 4] as const)
+ .map(dim => ({
+ [`vec${dim}_scalar`]: () => {
+ return FP.abstract.generateVectorScalarToVectorCases(
+ sparseVectorF64Range(dim),
+ sparseF64Range(),
+ 'finite',
+ divisionVectorScalarInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const scalar_vector_cases = ([2, 3, 4] as const)
+ .map(dim => ({
+ [`scalar_vec${dim}`]: () => {
+ return FP.abstract.generateScalarVectorToVectorCases(
+ sparseF64Range(),
+ sparseVectorF64Range(dim),
+ 'finite',
+ divisionScalarVectorInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/af_division', {
+ ...scalar_cases,
+ ...vector_scalar_cases,
+ ...scalar_vector_cases,
+});
+
+g.test('scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x / y, where x and y are scalars
+Accuracy: 2.5 ULP for |y| in the range [2^-126, 2^126]
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource))
+ .fn(async t => {
+ const cases = await d.get('scalar');
+ await run(
+ t,
+ abstractBinary('/'),
+ [TypeAbstractFloat, TypeAbstractFloat],
+ TypeAbstractFloat,
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x / y, where x and y are vectors
+Accuracy: 2.5 ULP for |y| in the range [2^-126, 2^126]
+`
+ )
+ .params(u =>
+ u.combine('inputSource', onlyConstInputSource).combine('vectorize', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('scalar'); // Using vectorize to generate vector cases based on scalar cases
+ await run(
+ t,
+ abstractBinary('/'),
+ [TypeAbstractFloat, TypeAbstractFloat],
+ TypeAbstractFloat,
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x / y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(`vec${dim}_scalar`);
+ await run(
+ t,
+ abstractBinary('/'),
+ [TypeVec(dim, TypeAbstractFloat), TypeAbstractFloat],
+ TypeVec(dim, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
+
+g.test('scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x / y, where x is a scalar and y is a vector
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(`scalar_vec${dim}`);
+ await run(
+ t,
+ abstractBinary('/'),
+ [TypeAbstractFloat, TypeVec(dim, TypeAbstractFloat)],
+ TypeVec(dim, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_matrix_addition.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_matrix_addition.spec.ts
new file mode 100644
index 0000000000..86bddec894
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_matrix_addition.spec.ts
@@ -0,0 +1,61 @@
+export const description = `
+Execution Tests for matrix AbstractFloat addition expressions
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeAbstractFloat, TypeMat } from '../../../../util/conversion.js';
+import { FP } from '../../../../util/floating_point.js';
+import { sparseMatrixF64Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { onlyConstInputSource, run } from '../expression.js';
+
+import { abstractBinary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: matCxR
+const mat_cases = ([2, 3, 4] as const)
+ .flatMap(cols =>
+ ([2, 3, 4] as const).map(rows => ({
+ [`mat${cols}x${rows}`]: () => {
+ return FP.abstract.generateMatrixPairToMatrixCases(
+ sparseMatrixF64Range(cols, rows),
+ sparseMatrixF64Range(cols, rows),
+ 'finite',
+ FP.abstract.additionMatrixMatrixInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/af_matrix_addition', mat_cases);
+
+g.test('matrix')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x + y, where x and y are matrices
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', onlyConstInputSource)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(`mat${cols}x${rows}`);
+ await run(
+ t,
+ abstractBinary('+'),
+ [TypeMat(cols, rows, TypeAbstractFloat), TypeMat(cols, rows, TypeAbstractFloat)],
+ TypeMat(cols, rows, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_matrix_subtraction.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_matrix_subtraction.spec.ts
new file mode 100644
index 0000000000..849c11611f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_matrix_subtraction.spec.ts
@@ -0,0 +1,61 @@
+export const description = `
+Execution Tests for matrix AbstractFloat subtraction expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeAbstractFloat, TypeMat } from '../../../../util/conversion.js';
+import { FP } from '../../../../util/floating_point.js';
+import { sparseMatrixF64Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { onlyConstInputSource, run } from '../expression.js';
+
+import { abstractBinary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: matCxR
+const mat_cases = ([2, 3, 4] as const)
+ .flatMap(cols =>
+ ([2, 3, 4] as const).map(rows => ({
+ [`mat${cols}x${rows}`]: () => {
+ return FP.abstract.generateMatrixPairToMatrixCases(
+ sparseMatrixF64Range(cols, rows),
+ sparseMatrixF64Range(cols, rows),
+ 'finite',
+ FP.abstract.subtractionMatrixMatrixInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/af_matrix_subtraction', mat_cases);
+
+g.test('matrix')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x - y, where x and y are matrices
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', onlyConstInputSource)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(`mat${cols}x${rows}`);
+ await run(
+ t,
+ abstractBinary('-'),
+ [TypeMat(cols, rows, TypeAbstractFloat), TypeMat(cols, rows, TypeAbstractFloat)],
+ TypeMat(cols, rows, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_multiplication.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_multiplication.spec.ts
new file mode 100644
index 0000000000..6b15812703
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_multiplication.spec.ts
@@ -0,0 +1,154 @@
+export const description = `
+Execution Tests for non-matrix AbstractFloat multiplication expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeAbstractFloat, TypeVec } from '../../../../util/conversion.js';
+import { FP, FPVector } from '../../../../util/floating_point.js';
+import { sparseF64Range, sparseVectorF64Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { onlyConstInputSource, run } from '../expression.js';
+
+import { abstractBinary } from './binary.js';
+
+const multiplicationVectorScalarInterval = (v: readonly number[], s: number): FPVector => {
+ return FP.abstract.toVector(v.map(e => FP.abstract.multiplicationInterval(e, s)));
+};
+
+const multiplicationScalarVectorInterval = (s: number, v: readonly number[]): FPVector => {
+ return FP.abstract.toVector(v.map(e => FP.abstract.multiplicationInterval(s, e)));
+};
+
+export const g = makeTestGroup(GPUTest);
+
+const scalar_cases = {
+ ['scalar']: () => {
+ return FP.abstract.generateScalarPairToIntervalCases(
+ sparseF64Range(),
+ sparseF64Range(),
+ 'finite',
+ FP.abstract.multiplicationInterval
+ );
+ },
+};
+
+const vector_scalar_cases = ([2, 3, 4] as const)
+ .map(dim => ({
+ [`vec${dim}_scalar`]: () => {
+ return FP.abstract.generateVectorScalarToVectorCases(
+ sparseVectorF64Range(dim),
+ sparseF64Range(),
+ 'finite',
+ multiplicationVectorScalarInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const scalar_vector_cases = ([2, 3, 4] as const)
+ .map(dim => ({
+ [`scalar_vec${dim}`]: () => {
+ return FP.abstract.generateScalarVectorToVectorCases(
+ sparseF64Range(),
+ sparseVectorF64Range(dim),
+ 'finite',
+ multiplicationScalarVectorInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/af_multiplication', {
+ ...scalar_cases,
+ ...vector_scalar_cases,
+ ...scalar_vector_cases,
+});
+
+g.test('scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x and y are scalars
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource))
+ .fn(async t => {
+ const cases = await d.get('scalar');
+ await run(
+ t,
+ abstractBinary('*'),
+ [TypeAbstractFloat, TypeAbstractFloat],
+ TypeAbstractFloat,
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x and y are vectors
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u.combine('inputSource', onlyConstInputSource).combine('vectorize', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('scalar'); // Using vectorize to generate vector cases based on scalar cases
+ await run(
+ t,
+ abstractBinary('*'),
+ [TypeAbstractFloat, TypeAbstractFloat],
+ TypeAbstractFloat,
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(`vec${dim}_scalar`);
+ await run(
+ t,
+ abstractBinary('*'),
+ [TypeVec(dim, TypeAbstractFloat), TypeAbstractFloat],
+ TypeVec(dim, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
+
+g.test('scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x is a scalar and y is a vector
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(`scalar_vec${dim}`);
+ await run(
+ t,
+ abstractBinary('*'),
+ [TypeAbstractFloat, TypeVec(dim, TypeAbstractFloat)],
+ TypeVec(dim, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_remainder.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_remainder.spec.ts
new file mode 100644
index 0000000000..b4ce930bdb
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_remainder.spec.ts
@@ -0,0 +1,154 @@
+export const description = `
+Execution Tests for non-matrix abstract float remainder expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeAbstractFloat, TypeVec } from '../../../../util/conversion.js';
+import { FP, FPVector } from '../../../../util/floating_point.js';
+import { sparseF64Range, sparseVectorF64Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { onlyConstInputSource, run } from '../expression.js';
+
+import { abstractBinary } from './binary.js';
+
+const remainderVectorScalarInterval = (v: readonly number[], s: number): FPVector => {
+ return FP.abstract.toVector(v.map(e => FP.abstract.remainderInterval(e, s)));
+};
+
+const remainderScalarVectorInterval = (s: number, v: readonly number[]): FPVector => {
+ return FP.abstract.toVector(v.map(e => FP.abstract.remainderInterval(s, e)));
+};
+
+export const g = makeTestGroup(GPUTest);
+
+const scalar_cases = {
+ ['scalar']: () => {
+ return FP.abstract.generateScalarPairToIntervalCases(
+ sparseF64Range(),
+ sparseF64Range(),
+ 'finite',
+ FP.abstract.remainderInterval
+ );
+ },
+};
+
+const vector_scalar_cases = ([2, 3, 4] as const)
+ .map(dim => ({
+ [`vec${dim}_scalar`]: () => {
+ return FP.abstract.generateVectorScalarToVectorCases(
+ sparseVectorF64Range(dim),
+ sparseF64Range(),
+ 'finite',
+ remainderVectorScalarInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const scalar_vector_cases = ([2, 3, 4] as const)
+ .map(dim => ({
+ [`scalar_vec${dim}`]: () => {
+ return FP.abstract.generateScalarVectorToVectorCases(
+ sparseF64Range(),
+ sparseVectorF64Range(dim),
+ 'finite',
+ remainderScalarVectorInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/af_remainder', {
+ ...scalar_cases,
+ ...vector_scalar_cases,
+ ...scalar_vector_cases,
+});
+
+g.test('scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x % y, where x and y are scalars
+Accuracy: Derived from x - y * trunc(x/y)
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource))
+ .fn(async t => {
+ const cases = await d.get('scalar');
+ await run(
+ t,
+ abstractBinary('%'),
+ [TypeAbstractFloat, TypeAbstractFloat],
+ TypeAbstractFloat,
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x % y, where x and y are vectors
+Accuracy: Derived from x - y * trunc(x/y)
+`
+ )
+ .params(u =>
+ u.combine('inputSource', onlyConstInputSource).combine('vectorize', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('scalar'); // Using vectorize to generate vector cases based on scalar cases
+ await run(
+ t,
+ abstractBinary('%'),
+ [TypeAbstractFloat, TypeAbstractFloat],
+ TypeAbstractFloat,
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x % y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(`vec${dim}_scalar`);
+ await run(
+ t,
+ abstractBinary('%'),
+ [TypeVec(dim, TypeAbstractFloat), TypeAbstractFloat],
+ TypeVec(dim, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
+
+g.test('scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x % y, where x is a scalar and y is a vector
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(`scalar_vec${dim}`);
+ await run(
+ t,
+ abstractBinary('%'),
+ [TypeAbstractFloat, TypeVec(dim, TypeAbstractFloat)],
+ TypeVec(dim, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_subtraction.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_subtraction.spec.ts
new file mode 100644
index 0000000000..00dc66feb9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/af_subtraction.spec.ts
@@ -0,0 +1,154 @@
+export const description = `
+Execution Tests for non-matrix AbstractFloat subtraction expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeAbstractFloat, TypeVec } from '../../../../util/conversion.js';
+import { FP, FPVector } from '../../../../util/floating_point.js';
+import { sparseF64Range, sparseVectorF64Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { onlyConstInputSource, run } from '../expression.js';
+
+import { abstractBinary } from './binary.js';
+
+const subtractionVectorScalarInterval = (v: readonly number[], s: number): FPVector => {
+ return FP.abstract.toVector(v.map(e => FP.abstract.subtractionInterval(e, s)));
+};
+
+const subtractionScalarVectorInterval = (s: number, v: readonly number[]): FPVector => {
+ return FP.abstract.toVector(v.map(e => FP.abstract.subtractionInterval(s, e)));
+};
+
+export const g = makeTestGroup(GPUTest);
+
+const scalar_cases = {
+ ['scalar']: () => {
+ return FP.abstract.generateScalarPairToIntervalCases(
+ sparseF64Range(),
+ sparseF64Range(),
+ 'finite',
+ FP.abstract.subtractionInterval
+ );
+ },
+};
+
+const vector_scalar_cases = ([2, 3, 4] as const)
+ .map(dim => ({
+ [`vec${dim}_scalar`]: () => {
+ return FP.abstract.generateVectorScalarToVectorCases(
+ sparseVectorF64Range(dim),
+ sparseF64Range(),
+ 'finite',
+ subtractionVectorScalarInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const scalar_vector_cases = ([2, 3, 4] as const)
+ .map(dim => ({
+ [`scalar_vec${dim}`]: () => {
+ return FP.abstract.generateScalarVectorToVectorCases(
+ sparseF64Range(),
+ sparseVectorF64Range(dim),
+ 'finite',
+ subtractionScalarVectorInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/af_subtraction', {
+ ...scalar_cases,
+ ...vector_scalar_cases,
+ ...scalar_vector_cases,
+});
+
+g.test('scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x - y, where x and y are scalars
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource))
+ .fn(async t => {
+ const cases = await d.get('scalar');
+ await run(
+ t,
+ abstractBinary('-'),
+ [TypeAbstractFloat, TypeAbstractFloat],
+ TypeAbstractFloat,
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x - y, where x and y are vectors
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u.combine('inputSource', onlyConstInputSource).combine('vectorize', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('scalar'); // Using vectorize to generate vector cases based on scalar cases
+ await run(
+ t,
+ abstractBinary('-'),
+ [TypeAbstractFloat, TypeAbstractFloat],
+ TypeAbstractFloat,
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x - y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(`vec${dim}_scalar`);
+ await run(
+ t,
+ abstractBinary('-'),
+ [TypeVec(dim, TypeAbstractFloat), TypeAbstractFloat],
+ TypeVec(dim, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
+
+g.test('scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x - y, where x is a scalar and y is a vector
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(`scalar_vec${dim}`);
+ await run(
+ t,
+ abstractBinary('-'),
+ [TypeAbstractFloat, TypeVec(dim, TypeAbstractFloat)],
+ TypeVec(dim, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/binary.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/binary.ts
new file mode 100644
index 0000000000..f0b01b839b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/binary.ts
@@ -0,0 +1,21 @@
+import {
+ ShaderBuilder,
+ basicExpressionBuilder,
+ compoundAssignmentBuilder,
+ abstractFloatShaderBuilder,
+} from '../expression.js';
+
+/* @returns a ShaderBuilder that evaluates a binary operation */
+export function binary(op: string): ShaderBuilder {
+ return basicExpressionBuilder(values => `(${values.map(v => `(${v})`).join(op)})`);
+}
+
+/* @returns a ShaderBuilder that evaluates a compound binary operation */
+export function compoundBinary(op: string): ShaderBuilder {
+ return compoundAssignmentBuilder(op);
+}
+
+/* @returns a ShaderBuilder that evaluates a binary operation that returns AbstractFloats */
+export function abstractBinary(op: string): ShaderBuilder {
+ return abstractFloatShaderBuilder(values => `(${values.map(v => `(${v})`).join(op)})`);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/bitwise.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/bitwise.spec.ts
new file mode 100644
index 0000000000..0d8d775352
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/bitwise.spec.ts
@@ -0,0 +1,303 @@
+export const description = `
+Execution Tests for the bitwise binary expression operations
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { i32, scalarType, u32 } from '../../../../util/conversion.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+function makeBitwiseOrCases(inputType: string) {
+ const V = inputType === 'i32' ? i32 : u32;
+ const cases = [
+ // Static patterns
+ {
+ input: [V(0b00000000000000000000000000000000), V(0b00000000000000000000000000000000)],
+ expected: V(0b00000000000000000000000000000000),
+ },
+ {
+ input: [V(0b11111111111111111111111111111111), V(0b00000000000000000000000000000000)],
+ expected: V(0b11111111111111111111111111111111),
+ },
+ {
+ input: [V(0b00000000000000000000000000000000), V(0b11111111111111111111111111111111)],
+ expected: V(0b11111111111111111111111111111111),
+ },
+ {
+ input: [V(0b11111111111111111111111111111111), V(0b11111111111111111111111111111111)],
+ expected: V(0b11111111111111111111111111111111),
+ },
+ {
+ input: [V(0b10100100010010100100010010100100), V(0b00000000000000000000000000000000)],
+ expected: V(0b10100100010010100100010010100100),
+ },
+ {
+ input: [V(0b00000000000000000000000000000000), V(0b10100100010010100100010010100100)],
+ expected: V(0b10100100010010100100010010100100),
+ },
+ {
+ input: [V(0b01010010001001010010001001010010), V(0b10100100010010100100010010100100)],
+ expected: V(0b11110110011011110110011011110110),
+ },
+ ];
+ // Permute all combinations of a single bit being set for the LHS and RHS
+ for (let i = 0; i < 32; i++) {
+ const lhs = 1 << i;
+ for (let j = 0; j < 32; j++) {
+ const rhs = 1 << j;
+ cases.push({
+ input: [V(lhs), V(rhs)],
+ expected: V(lhs | rhs),
+ });
+ }
+ }
+ return cases;
+}
+
+g.test('bitwise_or')
+ .specURL('https://www.w3.org/TR/WGSL/#bit-expr')
+ .desc(
+ `
+e1 | e2: T
+T is i32, u32, vecN<i32>, or vecN<u32>
+
+Bitwise-or. Component-wise when T is a vector.
+`
+ )
+ .params(u =>
+ u
+ .combine('type', ['i32', 'u32'] as const)
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const type = scalarType(t.params.type);
+ const cases = makeBitwiseOrCases(t.params.type);
+
+ await run(t, binary('|'), [type, type], type, t.params, cases);
+ });
+
+g.test('bitwise_or_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#bit-expr')
+ .desc(
+ `
+e1 |= e2: T
+T is i32, u32, vecN<i32>, or vecN<u32>
+
+Bitwise-or. Component-wise when T is a vector.
+`
+ )
+ .params(u =>
+ u
+ .combine('type', ['i32', 'u32'] as const)
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const type = scalarType(t.params.type);
+ const cases = makeBitwiseOrCases(t.params.type);
+
+ await run(t, compoundBinary('|='), [type, type], type, t.params, cases);
+ });
+
+function makeBitwiseAndCases(inputType: string) {
+ const V = inputType === 'i32' ? i32 : u32;
+ const cases = [
+ // Static patterns
+ {
+ input: [V(0b00000000000000000000000000000000), V(0b00000000000000000000000000000000)],
+ expected: V(0b00000000000000000000000000000000),
+ },
+ {
+ input: [V(0b11111111111111111111111111111111), V(0b00000000000000000000000000000000)],
+ expected: V(0b00000000000000000000000000000000),
+ },
+ {
+ input: [V(0b00000000000000000000000000000000), V(0b11111111111111111111111111111111)],
+ expected: V(0b00000000000000000000000000000000),
+ },
+ {
+ input: [V(0b11111111111111111111111111111111), V(0b11111111111111111111111111111111)],
+ expected: V(0b11111111111111111111111111111111),
+ },
+ {
+ input: [V(0b10100100010010100100010010100100), V(0b00000000000000000000000000000000)],
+ expected: V(0b00000000000000000000000000000000),
+ },
+ {
+ input: [V(0b10100100010010100100010010100100), V(0b11111111111111111111111111111111)],
+ expected: V(0b10100100010010100100010010100100),
+ },
+ {
+ input: [V(0b00000000000000000000000000000000), V(0b10100100010010100100010010100100)],
+ expected: V(0b00000000000000000000000000000000),
+ },
+ {
+ input: [V(0b11111111111111111111111111111111), V(0b10100100010010100100010010100100)],
+ expected: V(0b10100100010010100100010010100100),
+ },
+ {
+ input: [V(0b01010010001001010010001001010010), V(0b01011011101101011011101101011011)],
+ expected: V(0b01010010001001010010001001010010),
+ },
+ ];
+ // Permute all combinations of a single bit being set for the LHS and all but one bit set for the RHS
+ for (let i = 0; i < 32; i++) {
+ const lhs = 1 << i;
+ for (let j = 0; j < 32; j++) {
+ const rhs = 0xffffffff ^ (1 << j);
+ cases.push({
+ input: [V(lhs), V(rhs)],
+ expected: V(lhs & rhs),
+ });
+ }
+ }
+ return cases;
+}
+
+g.test('bitwise_and')
+ .specURL('https://www.w3.org/TR/WGSL/#bit-expr')
+ .desc(
+ `
+e1 & e2: T
+T is i32, u32, vecN<i32>, or vecN<u32>
+
+Bitwise-and. Component-wise when T is a vector.
+`
+ )
+ .params(u =>
+ u
+ .combine('type', ['i32', 'u32'] as const)
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const type = scalarType(t.params.type);
+ const cases = makeBitwiseAndCases(t.params.type);
+ await run(t, binary('&'), [type, type], type, t.params, cases);
+ });
+
+g.test('bitwise_and_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#bit-expr')
+ .desc(
+ `
+e1 &= e2: T
+T is i32, u32, vecN<i32>, or vecN<u32>
+
+Bitwise-and. Component-wise when T is a vector.
+`
+ )
+ .params(u =>
+ u
+ .combine('type', ['i32', 'u32'] as const)
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const type = scalarType(t.params.type);
+ const cases = makeBitwiseAndCases(t.params.type);
+ await run(t, compoundBinary('&='), [type, type], type, t.params, cases);
+ });
+
+function makeBitwiseExcluseOrCases(inputType: string) {
+ const V = inputType === 'i32' ? i32 : u32;
+ const cases = [
+ // Static patterns
+ {
+ input: [V(0b00000000000000000000000000000000), V(0b00000000000000000000000000000000)],
+ expected: V(0b00000000000000000000000000000000),
+ },
+ {
+ input: [V(0b11111111111111111111111111111111), V(0b00000000000000000000000000000000)],
+ expected: V(0b11111111111111111111111111111111),
+ },
+ {
+ input: [V(0b00000000000000000000000000000000), V(0b11111111111111111111111111111111)],
+ expected: V(0b11111111111111111111111111111111),
+ },
+ {
+ input: [V(0b11111111111111111111111111111111), V(0b11111111111111111111111111111111)],
+ expected: V(0b00000000000000000000000000000000),
+ },
+ {
+ input: [V(0b10100100010010100100010010100100), V(0b00000000000000000000000000000000)],
+ expected: V(0b10100100010010100100010010100100),
+ },
+ {
+ input: [V(0b10100100010010100100010010100100), V(0b11111111111111111111111111111111)],
+ expected: V(0b01011011101101011011101101011011),
+ },
+ {
+ input: [V(0b00000000000000000000000000000000), V(0b10100100010010100100010010100100)],
+ expected: V(0b10100100010010100100010010100100),
+ },
+ {
+ input: [V(0b11111111111111111111111111111111), V(0b10100100010010100100010010100100)],
+ expected: V(0b01011011101101011011101101011011),
+ },
+ {
+ input: [V(0b01010010001001010010001001010010), V(0b01011011101101011011101101011011)],
+ expected: V(0b00001001100100001001100100001001),
+ },
+ ];
+ // Permute all combinations of a single bit being set for the LHS and all but one bit set for the RHS
+ for (let i = 0; i < 32; i++) {
+ const lhs = 1 << i;
+ for (let j = 0; j < 32; j++) {
+ const rhs = 0xffffffff ^ (1 << j);
+ cases.push({
+ input: [V(lhs), V(rhs)],
+ expected: V(lhs ^ rhs),
+ });
+ }
+ }
+ return cases;
+}
+
+g.test('bitwise_exclusive_or')
+ .specURL('https://www.w3.org/TR/WGSL/#bit-expr')
+ .desc(
+ `
+e1 ^ e2: T
+T is i32, u32, vecN<i32>, or vecN<u32>
+
+Bitwise-exclusive-or. Component-wise when T is a vector.
+`
+ )
+ .params(u =>
+ u
+ .combine('type', ['i32', 'u32'] as const)
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const type = scalarType(t.params.type);
+ const cases = makeBitwiseExcluseOrCases(t.params.type);
+ await run(t, binary('^'), [type, type], type, t.params, cases);
+ });
+
+g.test('bitwise_exclusive_or_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#bit-expr')
+ .desc(
+ `
+e1 ^= e2: T
+T is i32, u32, vecN<i32>, or vecN<u32>
+
+Bitwise-exclusive-or. Component-wise when T is a vector.
+`
+ )
+ .params(u =>
+ u
+ .combine('type', ['i32', 'u32'] as const)
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const type = scalarType(t.params.type);
+ const cases = makeBitwiseExcluseOrCases(t.params.type);
+ await run(t, compoundBinary('^='), [type, type], type, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/bitwise_shift.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/bitwise_shift.spec.ts
new file mode 100644
index 0000000000..5457b7ceab
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/bitwise_shift.spec.ts
@@ -0,0 +1,343 @@
+export const description = `
+Execution Tests for the bitwise shift binary expression operations
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { i32, scalarType, ScalarType, TypeU32, u32 } from '../../../../util/conversion.js';
+import { allInputSources, CaseList, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+function is_unsiged(type: string) {
+ return type === 'u32';
+}
+
+const bitwidth = 32;
+
+// Returns true if e1 << e2 is valid for const evaluation
+function is_valid_const_shift_left(e1: number, e1Type: string, e2: number) {
+ // Shift by 0 is always valid
+ if (e2 === 0) {
+ return true;
+ }
+
+ // Cannot shift by bitwidth or greater
+ if (e2 >= bitwidth) {
+ return false;
+ }
+
+ if (is_unsiged(e1Type)) {
+ // If T is an unsigned integer type, and any of the e2 most significant bits of e1 are 1, then invalid.
+ const must_be_zero_msb = e2;
+ const mask = ~0 << (bitwidth - must_be_zero_msb);
+ if ((e1 & mask) !== 0) {
+ return false;
+ }
+ } else {
+ // If T is a signed integer type, and the e2+1 most significant bits of e1 do
+ // not have the same bit value, then error.
+ const must_match_msb = e2 + 1;
+ const mask = ~0 << (bitwidth - must_match_msb);
+ if ((e1 & mask) !== 0 && (e1 & mask) !== mask) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Returns true if e1 >> e2 is valid for const evaluation
+function is_valid_const_shift_right(e1: number, e1Type: string, e2: number) {
+ // Shift by 0 is always valid
+ if (e2 === 0) {
+ return true;
+ }
+
+ // Cannot shift by bitwidth or greater
+ if (e2 >= bitwidth) {
+ return false;
+ }
+
+ return true;
+}
+
+// Returns all cases of shifting e1 left by [0,63]. If `is_const` is true, cases that are
+// invalid for const eval are not returned.
+function generate_shift_left_cases(e1: number, e1Type: string, is_const: boolean): CaseList {
+ const V = e1Type === 'i32' ? i32 : u32;
+ const cases: CaseList = [];
+ for (let shift = 0; shift < 64; ++shift) {
+ const e2 = shift;
+ if (is_const && !is_valid_const_shift_left(e1, e1Type, e2)) {
+ continue;
+ }
+ const expected = e1 << e2 % bitwidth;
+ cases.push({ input: [V(e1), u32(e2)], expected: V(expected) });
+ }
+ return cases;
+}
+
+// Returns all cases of shifting e1 right by [0,63]. If `is_const` is true, cases that are
+// invalid for const eval are not returned.
+function generate_shift_right_cases(e1: number, e1Type: string, is_const: boolean): CaseList {
+ const V = e1Type === 'i32' ? i32 : u32;
+ const cases: CaseList = [];
+ for (let shift = 0; shift < 64; ++shift) {
+ const e2 = shift;
+ if (is_const && !is_valid_const_shift_right(e1, e1Type, e2)) {
+ continue;
+ }
+
+ let expected: number = 0;
+ if (is_unsiged(e1Type)) {
+ // zero-fill right shift
+ expected = e1 >>> e2;
+ } else {
+ // arithmetic right shift
+ expected = e1 >> e2;
+ }
+ cases.push({ input: [V(e1), u32(e2)], expected: V(expected) });
+ }
+ return cases;
+}
+
+function makeShiftLeftConcreteCases(inputType: string, inputSource: string, type: ScalarType) {
+ const V = inputType === 'i32' ? i32 : u32;
+ const is_const = inputSource === 'const';
+
+ const cases: CaseList = [
+ {
+ input: /* */ [V(0b00000000000000000000000000000001), u32(1)],
+ expected: /**/ V(0b00000000000000000000000000000010),
+ },
+ {
+ input: /* */ [V(0b00000000000000000000000000000011), u32(1)],
+ expected: /**/ V(0b00000000000000000000000000000110),
+ },
+ ];
+
+ const add_unsigned_overflow_cases = !is_const || is_unsiged(inputType);
+ const add_signed_overflow_cases = !is_const || !is_unsiged(inputType);
+
+ if (add_unsigned_overflow_cases) {
+ // Cases that are fine for unsigned values, but would overflow (sign change) signed
+ // values when const evaluated.
+ cases.push(
+ ...[
+ {
+ input: [/* */ V(0b01000000000000000000000000000000), u32(1)],
+ expected: /**/ V(0b10000000000000000000000000000000),
+ },
+ {
+ input: [/* */ V(0b01111111111111111111111111111111), u32(1)],
+ expected: /**/ V(0b11111111111111111111111111111110),
+ },
+ {
+ input: [/* */ V(0b00000000000000000000000000000001), u32(31)],
+ expected: /**/ V(0b10000000000000000000000000000000),
+ },
+ ]
+ );
+ }
+ if (add_signed_overflow_cases) {
+ // Cases that are fine for signed values (no sign change), but would overflow
+ // unsigned values when const evaluated.
+ cases.push(
+ ...[
+ {
+ input: [/* */ V(0b11000000000000000000000000000000), u32(1)],
+ expected: /**/ V(0b10000000000000000000000000000000),
+ },
+ {
+ input: [/* */ V(0b11111111111111111111111111111111), u32(1)],
+ expected: /**/ V(0b11111111111111111111111111111110),
+ },
+ {
+ input: [/* */ V(0b11111111111111111111111111111111), u32(31)],
+ expected: /**/ V(0b10000000000000000000000000000000),
+ },
+ ]
+ );
+ }
+
+ // Generate cases that shift input value by [0,63] (invalid const eval cases are not returned).
+ cases.push(...generate_shift_left_cases(0b00000000000000000000000000000000, inputType, is_const));
+ cases.push(...generate_shift_left_cases(0b00000000000000000000000000000001, inputType, is_const));
+ cases.push(...generate_shift_left_cases(0b00000000000000000000000000000010, inputType, is_const));
+ cases.push(...generate_shift_left_cases(0b00000000000000000000000000000011, inputType, is_const));
+ cases.push(...generate_shift_left_cases(0b10000000000000000000000000000000, inputType, is_const));
+ cases.push(...generate_shift_left_cases(0b01000000000000000000000000000000, inputType, is_const));
+ cases.push(...generate_shift_left_cases(0b11000000000000000000000000000000, inputType, is_const));
+ cases.push(...generate_shift_left_cases(0b00010000001000001000010001010101, inputType, is_const));
+ cases.push(...generate_shift_left_cases(0b11101111110111110111101110101010, inputType, is_const));
+ return cases;
+}
+
+g.test('shift_left_concrete')
+ .specURL('https://www.w3.org/TR/WGSL/#bit-expr')
+ .desc(
+ `
+e1 << e2
+
+Shift left (shifted value is concrete)
+`
+ )
+ .params(u =>
+ u
+ .combine('type', ['i32', 'u32'] as const)
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const type = scalarType(t.params.type);
+ const cases = makeShiftLeftConcreteCases(t.params.type, t.params.inputSource, type);
+ await run(t, binary('<<'), [type, TypeU32], type, t.params, cases);
+ });
+
+g.test('shift_left_concrete_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#bit-expr')
+ .desc(
+ `
+e1 <<= e2
+
+Shift left (shifted value is concrete)
+`
+ )
+ .params(u =>
+ u
+ .combine('type', ['i32', 'u32'] as const)
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const type = scalarType(t.params.type);
+ const cases = makeShiftLeftConcreteCases(t.params.type, t.params.inputSource, type);
+ await run(t, compoundBinary('<<='), [type, TypeU32], type, t.params, cases);
+ });
+
+function makeShiftRightConcreteCases(inputType: string, inputSource: string, type: ScalarType) {
+ const V = inputType === 'i32' ? i32 : u32;
+ const is_const = inputSource === 'const';
+
+ const cases: CaseList = [
+ {
+ input: /* */ [V(0b00000000000000000000000000000001), u32(1)],
+ expected: /**/ V(0b00000000000000000000000000000000),
+ },
+ {
+ input: /* */ [V(0b00000000000000000000000000000011), u32(1)],
+ expected: /**/ V(0b00000000000000000000000000000001),
+ },
+ {
+ input: /* */ [V(0b01000000000000000000000000000000), u32(1)],
+ expected: /**/ V(0b00100000000000000000000000000000),
+ },
+ {
+ input: /* */ [V(0b01100000000000000000000000000000), u32(1)],
+ expected: /**/ V(0b00110000000000000000000000000000),
+ },
+ ];
+ if (is_unsiged(inputType)) {
+ // No sign extension
+ cases.push(
+ ...[
+ {
+ input: /* */ [V(0b10000000000000000000000000000000), u32(1)],
+ expected: /**/ V(0b01000000000000000000000000000000),
+ },
+ {
+ input: /* */ [V(0b11000000000000000000000000000000), u32(1)],
+ expected: /**/ V(0b01100000000000000000000000000000),
+ },
+ ]
+ );
+ } else {
+ cases.push(
+ // Sign extension if msb is 1
+ ...[
+ {
+ input: /* */ [V(0b10000000000000000000000000000000), u32(1)],
+ expected: /**/ V(0b11000000000000000000000000000000),
+ },
+ {
+ input: /* */ [V(0b11000000000000000000000000000000), u32(1)],
+ expected: /**/ V(0b11100000000000000000000000000000),
+ },
+ ]
+ );
+ }
+
+ // Generate cases that shift input value by [0,63] (invalid const eval cases are not returned).
+ cases.push(
+ ...generate_shift_right_cases(0b00000000000000000000000000000000, inputType, is_const)
+ );
+ cases.push(
+ ...generate_shift_right_cases(0b00000000000000000000000000000001, inputType, is_const)
+ );
+ cases.push(
+ ...generate_shift_right_cases(0b00000000000000000000000000000010, inputType, is_const)
+ );
+ cases.push(
+ ...generate_shift_right_cases(0b00000000000000000000000000000011, inputType, is_const)
+ );
+ cases.push(
+ ...generate_shift_right_cases(0b10000000000000000000000000000000, inputType, is_const)
+ );
+ cases.push(
+ ...generate_shift_right_cases(0b01000000000000000000000000000000, inputType, is_const)
+ );
+ cases.push(
+ ...generate_shift_right_cases(0b11000000000000000000000000000000, inputType, is_const)
+ );
+ cases.push(
+ ...generate_shift_right_cases(0b00010000001000001000010001010101, inputType, is_const)
+ );
+ cases.push(
+ ...generate_shift_right_cases(0b11101111110111110111101110101010, inputType, is_const)
+ );
+ return cases;
+}
+
+g.test('shift_right_concrete')
+ .specURL('https://www.w3.org/TR/WGSL/#bit-expr')
+ .desc(
+ `
+e1 >> e2
+
+Shift right (shifted value is concrete)
+`
+ )
+ .params(u =>
+ u
+ .combine('type', ['i32', 'u32'] as const)
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const type = scalarType(t.params.type);
+ const cases = makeShiftRightConcreteCases(t.params.type, t.params.inputSource, type);
+ await run(t, binary('>>'), [type, TypeU32], type, t.params, cases);
+ });
+
+g.test('shift_right_concrete_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#bit-expr')
+ .desc(
+ `
+e1 >>= e2
+
+Shift right (shifted value is concrete)
+`
+ )
+ .params(u =>
+ u
+ .combine('type', ['i32', 'u32'] as const)
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const type = scalarType(t.params.type);
+ const cases = makeShiftRightConcreteCases(t.params.type, t.params.inputSource, type);
+ await run(t, compoundBinary('>>='), [type, TypeU32], type, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/bool_logical.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/bool_logical.spec.ts
new file mode 100644
index 0000000000..e3aa448fe3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/bool_logical.spec.ts
@@ -0,0 +1,187 @@
+export const description = `
+Execution Tests for the boolean binary logical expression operations
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { bool, TypeBool } from '../../../../util/conversion.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Short circuiting vs no short circuiting is not tested here, it is covered in
+// src/webgpu/shader/execution/evaluation_order.spec.ts
+
+g.test('and')
+ .specURL('https://www.w3.org/TR/WGSL/#logical-expr')
+ .desc(
+ `
+Expression: e1 & e2
+Logical "and". Component-wise when T is a vector. Evaluates both e1 and e2.
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = [
+ { input: [bool(false), bool(false)], expected: bool(false) },
+ { input: [bool(true), bool(false)], expected: bool(false) },
+ { input: [bool(false), bool(true)], expected: bool(false) },
+ { input: [bool(true), bool(true)], expected: bool(true) },
+ ];
+
+ await run(t, binary('&'), [TypeBool, TypeBool], TypeBool, t.params, cases);
+ });
+
+g.test('and_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#logical-expr')
+ .desc(
+ `
+Expression: e1 &= e2
+Logical "and". Component-wise when T is a vector. Evaluates both e1 and e2.
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = [
+ { input: [bool(false), bool(false)], expected: bool(false) },
+ { input: [bool(true), bool(false)], expected: bool(false) },
+ { input: [bool(false), bool(true)], expected: bool(false) },
+ { input: [bool(true), bool(true)], expected: bool(true) },
+ ];
+
+ await run(t, compoundBinary('&='), [TypeBool, TypeBool], TypeBool, t.params, cases);
+ });
+
+g.test('and_short_circuit')
+ .specURL('https://www.w3.org/TR/WGSL/#logical-expr')
+ .desc(
+ `
+Expression: e1 && e2
+short_circuiting "and". Yields true if both e1 and e2 are true; evaluates e2 only if e1 is true.
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = [
+ { input: [bool(false), bool(false)], expected: bool(false) },
+ { input: [bool(true), bool(false)], expected: bool(false) },
+ { input: [bool(false), bool(true)], expected: bool(false) },
+ { input: [bool(true), bool(true)], expected: bool(true) },
+ ];
+
+ await run(t, binary('&&'), [TypeBool, TypeBool], TypeBool, t.params, cases);
+ });
+
+g.test('or')
+ .specURL('https://www.w3.org/TR/WGSL/#logical-expr')
+ .desc(
+ `
+Expression: e1 | e2
+Logical "or". Component-wise when T is a vector. Evaluates both e1 and e2.
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = [
+ { input: [bool(false), bool(false)], expected: bool(false) },
+ { input: [bool(true), bool(false)], expected: bool(true) },
+ { input: [bool(false), bool(true)], expected: bool(true) },
+ { input: [bool(true), bool(true)], expected: bool(true) },
+ ];
+
+ await run(t, binary('|'), [TypeBool, TypeBool], TypeBool, t.params, cases);
+ });
+
+g.test('or_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#logical-expr')
+ .desc(
+ `
+Expression: e1 |= e2
+Logical "or". Component-wise when T is a vector. Evaluates both e1 and e2.
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = [
+ { input: [bool(false), bool(false)], expected: bool(false) },
+ { input: [bool(true), bool(false)], expected: bool(true) },
+ { input: [bool(false), bool(true)], expected: bool(true) },
+ { input: [bool(true), bool(true)], expected: bool(true) },
+ ];
+
+ await run(t, compoundBinary('|='), [TypeBool, TypeBool], TypeBool, t.params, cases);
+ });
+
+g.test('or_short_circuit')
+ .specURL('https://www.w3.org/TR/WGSL/#logical-expr')
+ .desc(
+ `
+Expression: e1 || e2
+short_circuiting "and". Yields true if both e1 and e2 are true; evaluates e2 only if e1 is true.
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = [
+ { input: [bool(false), bool(false)], expected: bool(false) },
+ { input: [bool(true), bool(false)], expected: bool(true) },
+ { input: [bool(false), bool(true)], expected: bool(true) },
+ { input: [bool(true), bool(true)], expected: bool(true) },
+ ];
+
+ await run(t, binary('||'), [TypeBool, TypeBool], TypeBool, t.params, cases);
+ });
+
+g.test('equals')
+ .specURL('https://www.w3.org/TR/WGSL/#logical-expr')
+ .desc(
+ `
+Expression: e1 == e2
+Equality. Component-wise when T is a vector.
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = [
+ { input: [bool(false), bool(false)], expected: bool(true) },
+ { input: [bool(true), bool(false)], expected: bool(false) },
+ { input: [bool(false), bool(true)], expected: bool(false) },
+ { input: [bool(true), bool(true)], expected: bool(true) },
+ ];
+
+ await run(t, binary('=='), [TypeBool, TypeBool], TypeBool, t.params, cases);
+ });
+
+g.test('not_equals')
+ .specURL('https://www.w3.org/TR/WGSL/#logical-expr')
+ .desc(
+ `
+Expression: e1 != e2
+Equality. Component-wise when T is a vector.
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = [
+ { input: [bool(false), bool(false)], expected: bool(false) },
+ { input: [bool(true), bool(false)], expected: bool(true) },
+ { input: [bool(false), bool(true)], expected: bool(true) },
+ { input: [bool(true), bool(true)], expected: bool(false) },
+ ];
+
+ await run(t, binary('!='), [TypeBool, TypeBool], TypeBool, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_addition.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_addition.spec.ts
new file mode 100644
index 0000000000..8948f90499
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_addition.spec.ts
@@ -0,0 +1,212 @@
+export const description = `
+Execution Tests for non-matrix f16 addition expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF16, TypeVec } from '../../../../util/conversion.js';
+import { FP, FPVector } from '../../../../util/floating_point.js';
+import { sparseF16Range, sparseVectorF16Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+const additionVectorScalarInterval = (v: readonly number[], s: number): FPVector => {
+ return FP.f16.toVector(v.map(e => FP.f16.additionInterval(e, s)));
+};
+
+const additionScalarVectorInterval = (s: number, v: readonly number[]): FPVector => {
+ return FP.f16.toVector(v.map(e => FP.f16.additionInterval(s, e)));
+};
+
+export const g = makeTestGroup(GPUTest);
+
+const scalar_cases = ([true, false] as const)
+ .map(nonConst => ({
+ [`scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateScalarPairToIntervalCases(
+ sparseF16Range(),
+ sparseF16Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.additionInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const vector_scalar_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`vec${dim}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateVectorScalarToVectorCases(
+ sparseVectorF16Range(dim),
+ sparseF16Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ additionVectorScalarInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const scalar_vector_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`scalar_vec${dim}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateScalarVectorToVectorCases(
+ sparseF16Range(),
+ sparseVectorF16Range(dim),
+ nonConst ? 'unfiltered' : 'finite',
+ additionScalarVectorInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f16_addition', {
+ ...scalar_cases,
+ ...vector_scalar_cases,
+ ...scalar_vector_cases,
+});
+
+g.test('scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x + y, where x and y are scalars
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, binary('+'), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x + y, where x and y are vectors
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('vectorize', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const' // Using vectorize to generate vector cases based on scalar cases
+ );
+ await run(t, binary('+'), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x += y
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, compoundBinary('+='), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x + y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ binary('+'),
+ [TypeVec(dim, TypeF16), TypeF16],
+ TypeVec(dim, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x += y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('+='),
+ [TypeVec(dim, TypeF16), TypeF16],
+ TypeVec(dim, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x + y, where x is a scalar and y is a vector
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `scalar_vec${dim}_const` : `scalar_vec${dim}_non_const`
+ );
+ await run(
+ t,
+ binary('+'),
+ [TypeF16, TypeVec(dim, TypeF16)],
+ TypeVec(dim, TypeF16),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_comparison.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_comparison.spec.ts
new file mode 100644
index 0000000000..ae7e1675c5
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_comparison.spec.ts
@@ -0,0 +1,280 @@
+export const description = `
+Execution Tests for the f16 comparison operations
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { anyOf } from '../../../../util/compare.js';
+import { bool, f16, Scalar, TypeBool, TypeF16 } from '../../../../util/conversion.js';
+import { flushSubnormalNumberF16, vectorF16Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, Case, run } from '../expression.js';
+
+import { binary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+/**
+ * @returns a test case for the provided left hand & right hand values and truth function.
+ * Handles quantization and subnormals.
+ */
+function makeCase(
+ lhs: number,
+ rhs: number,
+ truthFunc: (lhs: Scalar, rhs: Scalar) => boolean
+): Case {
+ // Subnormal float values may be flushed at any time.
+ // https://www.w3.org/TR/WGSL/#floating-point-evaluation
+ const f16_lhs = f16(lhs);
+ const f16_rhs = f16(rhs);
+ const lhs_options = new Set([f16_lhs, f16(flushSubnormalNumberF16(lhs))]);
+ const rhs_options = new Set([f16_rhs, f16(flushSubnormalNumberF16(rhs))]);
+ const expected: Array<Scalar> = [];
+ lhs_options.forEach(l => {
+ rhs_options.forEach(r => {
+ const result = bool(truthFunc(l, r));
+ if (!expected.includes(result)) {
+ expected.push(result);
+ }
+ });
+ });
+
+ return { input: [f16_lhs, f16_rhs], expected: anyOf(...expected) };
+}
+
+export const d = makeCaseCache('binary/f16_logical', {
+ equals_non_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) === (rhs.value as number);
+ };
+
+ return vectorF16Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ equals_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) === (rhs.value as number);
+ };
+
+ return vectorF16Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ not_equals_non_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) !== (rhs.value as number);
+ };
+
+ return vectorF16Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ not_equals_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) !== (rhs.value as number);
+ };
+
+ return vectorF16Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ less_than_non_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) < (rhs.value as number);
+ };
+
+ return vectorF16Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ less_than_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) < (rhs.value as number);
+ };
+
+ return vectorF16Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ less_equals_non_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) <= (rhs.value as number);
+ };
+
+ return vectorF16Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ less_equals_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) <= (rhs.value as number);
+ };
+
+ return vectorF16Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ greater_than_non_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) > (rhs.value as number);
+ };
+
+ return vectorF16Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ greater_than_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) > (rhs.value as number);
+ };
+
+ return vectorF16Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ greater_equals_non_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) >= (rhs.value as number);
+ };
+
+ return vectorF16Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ greater_equals_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) >= (rhs.value as number);
+ };
+
+ return vectorF16Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+});
+
+g.test('equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x == y
+Accuracy: Correct result
+`
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'equals_const' : 'equals_non_const'
+ );
+ await run(t, binary('=='), [TypeF16, TypeF16], TypeBool, t.params, cases);
+ });
+
+g.test('not_equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x != y
+Accuracy: Correct result
+`
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'not_equals_const' : 'not_equals_non_const'
+ );
+ await run(t, binary('!='), [TypeF16, TypeF16], TypeBool, t.params, cases);
+ });
+
+g.test('less_than')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x < y
+Accuracy: Correct result
+`
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'less_than_const' : 'less_than_non_const'
+ );
+ await run(t, binary('<'), [TypeF16, TypeF16], TypeBool, t.params, cases);
+ });
+
+g.test('less_equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x <= y
+Accuracy: Correct result
+`
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'less_equals_const' : 'less_equals_non_const'
+ );
+ await run(t, binary('<='), [TypeF16, TypeF16], TypeBool, t.params, cases);
+ });
+
+g.test('greater_than')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x > y
+Accuracy: Correct result
+`
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'greater_than_const' : 'greater_than_non_const'
+ );
+ await run(t, binary('>'), [TypeF16, TypeF16], TypeBool, t.params, cases);
+ });
+
+g.test('greater_equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x >= y
+Accuracy: Correct result
+`
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'greater_equals_const' : 'greater_equals_non_const'
+ );
+ await run(t, binary('>='), [TypeF16, TypeF16], TypeBool, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_division.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_division.spec.ts
new file mode 100644
index 0000000000..c3b8fc04db
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_division.spec.ts
@@ -0,0 +1,212 @@
+export const description = `
+Execution Tests for non-matrix f16 division expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF16, TypeVec } from '../../../../util/conversion.js';
+import { FP, FPVector } from '../../../../util/floating_point.js';
+import { sparseF16Range, sparseVectorF16Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+const divisionVectorScalarInterval = (v: readonly number[], s: number): FPVector => {
+ return FP.f16.toVector(v.map(e => FP.f16.divisionInterval(e, s)));
+};
+
+const divisionScalarVectorInterval = (s: number, v: readonly number[]): FPVector => {
+ return FP.f16.toVector(v.map(e => FP.f16.divisionInterval(s, e)));
+};
+
+export const g = makeTestGroup(GPUTest);
+
+const scalar_cases = ([true, false] as const)
+ .map(nonConst => ({
+ [`scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateScalarPairToIntervalCases(
+ sparseF16Range(),
+ sparseF16Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.divisionInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const vector_scalar_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`vec${dim}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateVectorScalarToVectorCases(
+ sparseVectorF16Range(dim),
+ sparseF16Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ divisionVectorScalarInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const scalar_vector_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`scalar_vec${dim}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateScalarVectorToVectorCases(
+ sparseF16Range(),
+ sparseVectorF16Range(dim),
+ nonConst ? 'unfiltered' : 'finite',
+ divisionScalarVectorInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f16_division', {
+ ...scalar_cases,
+ ...vector_scalar_cases,
+ ...scalar_vector_cases,
+});
+
+g.test('scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x / y, where x and y are scalars
+Accuracy: 2.5 ULP for |y| in the range [2^-126, 2^126]
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, binary('/'), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x / y, where x and y are vectors
+Accuracy: 2.5 ULP for |y| in the range [2^-126, 2^126]
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('vectorize', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const' // Using vectorize to generate vector cases based on scalar cases
+ );
+ await run(t, binary('/'), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x /= y
+Accuracy: 2.5 ULP for |y| in the range [2^-126, 2^126]
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, compoundBinary('/='), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x / y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ binary('/'),
+ [TypeVec(dim, TypeF16), TypeF16],
+ TypeVec(dim, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x /= y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('/='),
+ [TypeVec(dim, TypeF16), TypeF16],
+ TypeVec(dim, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x / y, where x is a scalar and y is a vector
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `scalar_vec${dim}_const` : `scalar_vec${dim}_non_const`
+ );
+ await run(
+ t,
+ binary('/'),
+ [TypeF16, TypeVec(dim, TypeF16)],
+ TypeVec(dim, TypeF16),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_addition.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_addition.spec.ts
new file mode 100644
index 0000000000..fe64f41503
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_addition.spec.ts
@@ -0,0 +1,101 @@
+export const description = `
+Execution Tests for matrix f16 addition expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF16, TypeMat } from '../../../../util/conversion.js';
+import { FP } from '../../../../util/floating_point.js';
+import { sparseMatrixF16Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: matCxR_[non_]const
+const mat_cases = ([2, 3, 4] as const)
+ .flatMap(cols =>
+ ([2, 3, 4] as const).flatMap(rows =>
+ ([true, false] as const).map(nonConst => ({
+ [`mat${cols}x${rows}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateMatrixPairToMatrixCases(
+ sparseMatrixF16Range(cols, rows),
+ sparseMatrixF16Range(cols, rows),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.additionMatrixMatrixInterval
+ );
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f16_matrix_addition', mat_cases);
+
+g.test('matrix')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x + y, where x and y are matrices
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `mat${cols}x${rows}_const` : `mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ binary('+'),
+ [TypeMat(cols, rows, TypeF16), TypeMat(cols, rows, TypeF16)],
+ TypeMat(cols, rows, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('matrix_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x =+ y, where x and y are matrices
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `mat${cols}x${rows}_const` : `mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('+='),
+ [TypeMat(cols, rows, TypeF16), TypeMat(cols, rows, TypeF16)],
+ TypeMat(cols, rows, TypeF16),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_matrix_multiplication.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_matrix_multiplication.spec.ts
new file mode 100644
index 0000000000..0c8b3e8c51
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_matrix_multiplication.spec.ts
@@ -0,0 +1,114 @@
+export const description = `
+Execution Tests for matrix-matrix f16 multiplication expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF16, TypeMat } from '../../../../util/conversion.js';
+import { FP } from '../../../../util/floating_point.js';
+import { sparseMatrixF16Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: matKxR_matCxK_[non_]const
+const mat_mat_cases = ([2, 3, 4] as const)
+ .flatMap(k =>
+ ([2, 3, 4] as const).flatMap(cols =>
+ ([2, 3, 4] as const).flatMap(rows =>
+ ([true, false] as const).map(nonConst => ({
+ [`mat${k}x${rows}_mat${cols}x${k}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateMatrixPairToMatrixCases(
+ sparseMatrixF16Range(k, rows),
+ sparseMatrixF16Range(cols, k),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.multiplicationMatrixMatrixInterval
+ );
+ },
+ }))
+ )
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f16_matrix_matrix_multiplication', mat_mat_cases);
+
+g.test('matrix_matrix')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x is a matrix and y is a matrix
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('common_dim', [2, 3, 4] as const)
+ .combine('x_rows', [2, 3, 4] as const)
+ .combine('y_cols', [2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const x_cols = t.params.common_dim;
+ const x_rows = t.params.x_rows;
+ const y_cols = t.params.y_cols;
+ const y_rows = t.params.common_dim;
+
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `mat${x_cols}x${x_rows}_mat${y_cols}x${y_rows}_const`
+ : `mat${x_cols}x${x_rows}_mat${y_cols}x${y_rows}_non_const`
+ );
+ await run(
+ t,
+ binary('*'),
+ [TypeMat(x_cols, x_rows, TypeF16), TypeMat(y_cols, y_rows, TypeF16)],
+ TypeMat(y_cols, x_rows, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('matrix_matrix_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x *= y, where x is a matrix and y is a matrix
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('common_dim', [2, 3, 4] as const)
+ .combine('x_rows', [2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const x_cols = t.params.common_dim;
+ const x_rows = t.params.x_rows;
+ const y_cols = x_cols;
+ const y_rows = t.params.common_dim;
+
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `mat${x_cols}x${x_rows}_mat${y_cols}x${y_rows}_const`
+ : `mat${x_cols}x${x_rows}_mat${y_cols}x${y_rows}_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('*='),
+ [TypeMat(x_cols, x_rows, TypeF16), TypeMat(y_cols, y_rows, TypeF16)],
+ TypeMat(y_cols, x_rows, TypeF16),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_scalar_multiplication.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_scalar_multiplication.spec.ts
new file mode 100644
index 0000000000..29d4700ee6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_scalar_multiplication.spec.ts
@@ -0,0 +1,161 @@
+export const description = `
+Execution Tests for matrix-scalar and scalar-matrix f16 multiplication expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF16, TypeMat } from '../../../../util/conversion.js';
+import { FP } from '../../../../util/floating_point.js';
+import { sparseF16Range, sparseMatrixF16Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: matCxR_scalar_[non_]const
+const mat_scalar_cases = ([2, 3, 4] as const)
+ .flatMap(cols =>
+ ([2, 3, 4] as const).flatMap(rows =>
+ ([true, false] as const).map(nonConst => ({
+ [`mat${cols}x${rows}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateMatrixScalarToMatrixCases(
+ sparseMatrixF16Range(cols, rows),
+ sparseF16Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.multiplicationMatrixScalarInterval
+ );
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+// Cases: scalar_matCxR_[non_]const
+const scalar_mat_cases = ([2, 3, 4] as const)
+ .flatMap(cols =>
+ ([2, 3, 4] as const).flatMap(rows =>
+ ([true, false] as const).map(nonConst => ({
+ [`scalar_mat${cols}x${rows}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateScalarMatrixToMatrixCases(
+ sparseF16Range(),
+ sparseMatrixF16Range(cols, rows),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.multiplicationScalarMatrixInterval
+ );
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f16_matrix_scalar_multiplication', {
+ ...mat_scalar_cases,
+ ...scalar_mat_cases,
+});
+
+g.test('matrix_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x is a matrix and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `mat${cols}x${rows}_scalar_const`
+ : `mat${cols}x${rows}_scalar_non_const`
+ );
+ await run(
+ t,
+ binary('*'),
+ [TypeMat(cols, rows, TypeF16), TypeF16],
+ TypeMat(cols, rows, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('matrix_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x *= y, where x is a matrix and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `mat${cols}x${rows}_scalar_const`
+ : `mat${cols}x${rows}_scalar_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('*='),
+ [TypeMat(cols, rows, TypeF16), TypeF16],
+ TypeMat(cols, rows, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('scalar_matrix')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x is a scalar and y is a matrix
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `scalar_mat${cols}x${rows}_const`
+ : `scalar_mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ binary('*'),
+ [TypeF16, TypeMat(cols, rows, TypeF16)],
+ TypeMat(cols, rows, TypeF16),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_subtraction.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_subtraction.spec.ts
new file mode 100644
index 0000000000..5b5f6ba04e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_subtraction.spec.ts
@@ -0,0 +1,101 @@
+export const description = `
+Execution Tests for matrix f16 subtraction expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF16, TypeMat } from '../../../../util/conversion.js';
+import { FP } from '../../../../util/floating_point.js';
+import { sparseMatrixF16Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: matCxR_[non_]const
+const mat_cases = ([2, 3, 4] as const)
+ .flatMap(cols =>
+ ([2, 3, 4] as const).flatMap(rows =>
+ ([true, false] as const).map(nonConst => ({
+ [`mat${cols}x${rows}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateMatrixPairToMatrixCases(
+ sparseMatrixF16Range(cols, rows),
+ sparseMatrixF16Range(cols, rows),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.subtractionMatrixMatrixInterval
+ );
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f16_matrix_subtraction', mat_cases);
+
+g.test('matrix')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x - y, where x and y are matrices
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `mat${cols}x${rows}_const` : `mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ binary('-'),
+ [TypeMat(cols, rows, TypeF16), TypeMat(cols, rows, TypeF16)],
+ TypeMat(cols, rows, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('matrix_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x -= y, where x and y are matrices
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `mat${cols}x${rows}_const` : `mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('-='),
+ [TypeMat(cols, rows, TypeF16), TypeMat(cols, rows, TypeF16)],
+ TypeMat(cols, rows, TypeF16),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_vector_multiplication.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_vector_multiplication.spec.ts
new file mode 100644
index 0000000000..3e916c7fd4
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_matrix_vector_multiplication.spec.ts
@@ -0,0 +1,156 @@
+export const description = `
+Execution Tests for matrix-vector and vector-matrix f16 multiplication expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF16, TypeMat, TypeVec } from '../../../../util/conversion.js';
+import { FP } from '../../../../util/floating_point.js';
+import { sparseMatrixF16Range, sparseVectorF16Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: matCxR_vecC_[non_]const
+const mat_vec_cases = ([2, 3, 4] as const)
+ .flatMap(cols =>
+ ([2, 3, 4] as const).flatMap(rows =>
+ ([true, false] as const).map(nonConst => ({
+ [`mat${cols}x${rows}_vec${cols}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateMatrixVectorToVectorCases(
+ sparseMatrixF16Range(cols, rows),
+ sparseVectorF16Range(cols),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.multiplicationMatrixVectorInterval
+ );
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+// Cases: vecR_matCxR_[non_]const
+const vec_mat_cases = ([2, 3, 4] as const)
+ .flatMap(rows =>
+ ([2, 3, 4] as const).flatMap(cols =>
+ ([true, false] as const).map(nonConst => ({
+ [`vec${rows}_mat${cols}x${rows}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateVectorMatrixToVectorCases(
+ sparseVectorF16Range(rows),
+ sparseMatrixF16Range(cols, rows),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.multiplicationVectorMatrixInterval
+ );
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f16_matrix_vector_multiplication', {
+ ...mat_vec_cases,
+ ...vec_mat_cases,
+});
+
+g.test('matrix_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x is a matrix and y is a vector
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `mat${cols}x${rows}_vec${cols}_const`
+ : `mat${cols}x${rows}_vec${cols}_non_const`
+ );
+ await run(
+ t,
+ binary('*'),
+ [TypeMat(cols, rows, TypeF16), TypeVec(cols, TypeF16)],
+ TypeVec(rows, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector_matrix')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x is a vector and y is is a matrix
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `vec${rows}_mat${cols}x${rows}_const`
+ : `vec${rows}_mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ binary('*'),
+ [TypeVec(rows, TypeF16), TypeMat(cols, rows, TypeF16)],
+ TypeVec(cols, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector_matrix_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x *= y, where x is a vector and y is is a matrix
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cols = t.params.dim;
+ const rows = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `vec${rows}_mat${cols}x${rows}_const`
+ : `vec${rows}_mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('*='),
+ [TypeVec(rows, TypeF16), TypeMat(cols, rows, TypeF16)],
+ TypeVec(cols, TypeF16),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_multiplication.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_multiplication.spec.ts
new file mode 100644
index 0000000000..10041fbc17
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_multiplication.spec.ts
@@ -0,0 +1,212 @@
+export const description = `
+Execution Tests for non-matrix f16 multiplication expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF16, TypeVec } from '../../../../util/conversion.js';
+import { FP, FPVector } from '../../../../util/floating_point.js';
+import { sparseF16Range, sparseVectorF16Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+const multiplicationVectorScalarInterval = (v: readonly number[], s: number): FPVector => {
+ return FP.f16.toVector(v.map(e => FP.f16.multiplicationInterval(e, s)));
+};
+
+const multiplicationScalarVectorInterval = (s: number, v: readonly number[]): FPVector => {
+ return FP.f16.toVector(v.map(e => FP.f16.multiplicationInterval(s, e)));
+};
+
+export const g = makeTestGroup(GPUTest);
+
+const scalar_cases = ([true, false] as const)
+ .map(nonConst => ({
+ [`scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateScalarPairToIntervalCases(
+ sparseF16Range(),
+ sparseF16Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.multiplicationInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const vector_scalar_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`vec${dim}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateVectorScalarToVectorCases(
+ sparseVectorF16Range(dim),
+ sparseF16Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ multiplicationVectorScalarInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const scalar_vector_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`scalar_vec${dim}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateScalarVectorToVectorCases(
+ sparseF16Range(),
+ sparseVectorF16Range(dim),
+ nonConst ? 'unfiltered' : 'finite',
+ multiplicationScalarVectorInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f16_multiplication', {
+ ...scalar_cases,
+ ...vector_scalar_cases,
+ ...scalar_vector_cases,
+});
+
+g.test('scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x and y are scalars
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, binary('*'), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x and y are vectors
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('vectorize', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const' // Using vectorize to generate vector cases based on scalar cases
+ );
+ await run(t, binary('*'), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x *= y
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, compoundBinary('*='), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ binary('*'),
+ [TypeVec(dim, TypeF16), TypeF16],
+ TypeVec(dim, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x *= y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('*='),
+ [TypeVec(dim, TypeF16), TypeF16],
+ TypeVec(dim, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x is a scalar and y is a vector
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `scalar_vec${dim}_const` : `scalar_vec${dim}_non_const`
+ );
+ await run(
+ t,
+ binary('*'),
+ [TypeF16, TypeVec(dim, TypeF16)],
+ TypeVec(dim, TypeF16),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_remainder.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_remainder.spec.ts
new file mode 100644
index 0000000000..801b84904b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_remainder.spec.ts
@@ -0,0 +1,212 @@
+export const description = `
+Execution Tests for non-matrix f16 remainder expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF16, TypeVec } from '../../../../util/conversion.js';
+import { FP, FPVector } from '../../../../util/floating_point.js';
+import { sparseF16Range, sparseVectorF16Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+const remainderVectorScalarInterval = (v: readonly number[], s: number): FPVector => {
+ return FP.f16.toVector(v.map(e => FP.f16.remainderInterval(e, s)));
+};
+
+const remainderScalarVectorInterval = (s: number, v: readonly number[]): FPVector => {
+ return FP.f16.toVector(v.map(e => FP.f16.remainderInterval(s, e)));
+};
+
+export const g = makeTestGroup(GPUTest);
+
+const scalar_cases = ([true, false] as const)
+ .map(nonConst => ({
+ [`scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateScalarPairToIntervalCases(
+ sparseF16Range(),
+ sparseF16Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.remainderInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const vector_scalar_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`vec${dim}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateVectorScalarToVectorCases(
+ sparseVectorF16Range(dim),
+ sparseF16Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ remainderVectorScalarInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const scalar_vector_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`scalar_vec${dim}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateScalarVectorToVectorCases(
+ sparseF16Range(),
+ sparseVectorF16Range(dim),
+ nonConst ? 'unfiltered' : 'finite',
+ remainderScalarVectorInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f16_remainder', {
+ ...scalar_cases,
+ ...vector_scalar_cases,
+ ...scalar_vector_cases,
+});
+
+g.test('scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x % y, where x and y are scalars
+Accuracy: Derived from x - y * trunc(x/y)
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, binary('%'), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x % y, where x and y are vectors
+Accuracy: Derived from x - y * trunc(x/y)
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('vectorize', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, binary('%'), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x %= y
+Accuracy: Derived from x - y * trunc(x/y)
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, compoundBinary('%='), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x % y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ binary('%'),
+ [TypeVec(dim, TypeF16), TypeF16],
+ TypeVec(dim, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x %= y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('%='),
+ [TypeVec(dim, TypeF16), TypeF16],
+ TypeVec(dim, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x % y, where x is a scalar and y is a vector
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `scalar_vec${dim}_const` : `scalar_vec${dim}_non_const`
+ );
+ await run(
+ t,
+ binary('%'),
+ [TypeF16, TypeVec(dim, TypeF16)],
+ TypeVec(dim, TypeF16),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_subtraction.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_subtraction.spec.ts
new file mode 100644
index 0000000000..a64d556837
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f16_subtraction.spec.ts
@@ -0,0 +1,212 @@
+export const description = `
+Execution Tests for non-matrix f16 subtraction expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF16, TypeVec } from '../../../../util/conversion.js';
+import { FP, FPVector } from '../../../../util/floating_point.js';
+import { sparseF16Range, sparseVectorF16Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+const subtractionVectorScalarInterval = (v: readonly number[], s: number): FPVector => {
+ return FP.f16.toVector(v.map(e => FP.f16.subtractionInterval(e, s)));
+};
+
+const subtractionScalarVectorInterval = (s: number, v: readonly number[]): FPVector => {
+ return FP.f16.toVector(v.map(e => FP.f16.subtractionInterval(s, e)));
+};
+
+export const g = makeTestGroup(GPUTest);
+
+const scalar_cases = ([true, false] as const)
+ .map(nonConst => ({
+ [`scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateScalarPairToIntervalCases(
+ sparseF16Range(),
+ sparseF16Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.subtractionInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const vector_scalar_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`vec${dim}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateVectorScalarToVectorCases(
+ sparseVectorF16Range(dim),
+ sparseF16Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ subtractionVectorScalarInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const scalar_vector_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`scalar_vec${dim}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateScalarVectorToVectorCases(
+ sparseF16Range(),
+ sparseVectorF16Range(dim),
+ nonConst ? 'unfiltered' : 'finite',
+ subtractionScalarVectorInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f16_subtraction', {
+ ...scalar_cases,
+ ...vector_scalar_cases,
+ ...scalar_vector_cases,
+});
+
+g.test('scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x - y, where x and y are scalars
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, binary('-'), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x - y, where x and y are vectors
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('vectorize', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const' // Using vectorize to generate vector cases based on scalar cases
+ );
+ await run(t, binary('-'), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x -= y
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, compoundBinary('-='), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x - y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ binary('-'),
+ [TypeVec(dim, TypeF16), TypeF16],
+ TypeVec(dim, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x -= y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('-='),
+ [TypeVec(dim, TypeF16), TypeF16],
+ TypeVec(dim, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x - y, where x is a scalar and y is a vector
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `scalar_vec${dim}_const` : `scalar_vec${dim}_non_const`
+ );
+ await run(
+ t,
+ binary('-'),
+ [TypeF16, TypeVec(dim, TypeF16)],
+ TypeVec(dim, TypeF16),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_addition.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_addition.spec.ts
new file mode 100644
index 0000000000..65739f67ca
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_addition.spec.ts
@@ -0,0 +1,194 @@
+export const description = `
+Execution Tests for non-matrix f32 addition expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF32, TypeVec } from '../../../../util/conversion.js';
+import { FP, FPVector } from '../../../../util/floating_point.js';
+import { sparseF32Range, sparseVectorF32Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+const additionVectorScalarInterval = (v: readonly number[], s: number): FPVector => {
+ return FP.f32.toVector(v.map(e => FP.f32.additionInterval(e, s)));
+};
+
+const additionScalarVectorInterval = (s: number, v: readonly number[]): FPVector => {
+ return FP.f32.toVector(v.map(e => FP.f32.additionInterval(s, e)));
+};
+
+export const g = makeTestGroup(GPUTest);
+
+const scalar_cases = ([true, false] as const)
+ .map(nonConst => ({
+ [`scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateScalarPairToIntervalCases(
+ sparseF32Range(),
+ sparseF32Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.additionInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const vector_scalar_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`vec${dim}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateVectorScalarToVectorCases(
+ sparseVectorF32Range(dim),
+ sparseF32Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ additionVectorScalarInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const scalar_vector_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`scalar_vec${dim}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateScalarVectorToVectorCases(
+ sparseF32Range(),
+ sparseVectorF32Range(dim),
+ nonConst ? 'unfiltered' : 'finite',
+ additionScalarVectorInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f32_addition', {
+ ...scalar_cases,
+ ...vector_scalar_cases,
+ ...scalar_vector_cases,
+});
+
+g.test('scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x + y, where x and y are scalars
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, binary('+'), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x + y, where x and y are vectors
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('vectorize', [2, 3, 4] as const))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const' // Using vectorize to generate vector cases based on scalar cases
+ );
+ await run(t, binary('+'), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x += y
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, compoundBinary('+='), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x + y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ binary('+'),
+ [TypeVec(dim, TypeF32), TypeF32],
+ TypeVec(dim, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x += y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('+='),
+ [TypeVec(dim, TypeF32), TypeF32],
+ TypeVec(dim, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x + y, where x is a scalar and y is a vector
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `scalar_vec${dim}_const` : `scalar_vec${dim}_non_const`
+ );
+ await run(
+ t,
+ binary('+'),
+ [TypeF32, TypeVec(dim, TypeF32)],
+ TypeVec(dim, TypeF32),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_comparison.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_comparison.spec.ts
new file mode 100644
index 0000000000..ef862e7757
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_comparison.spec.ts
@@ -0,0 +1,262 @@
+export const description = `
+Execution Tests for the f32 comparison operations
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { anyOf } from '../../../../util/compare.js';
+import { bool, f32, Scalar, TypeBool, TypeF32 } from '../../../../util/conversion.js';
+import { flushSubnormalNumberF32, vectorF32Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, Case, run } from '../expression.js';
+
+import { binary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+/**
+ * @returns a test case for the provided left hand & right hand values and truth function.
+ * Handles quantization and subnormals.
+ */
+function makeCase(
+ lhs: number,
+ rhs: number,
+ truthFunc: (lhs: Scalar, rhs: Scalar) => boolean
+): Case {
+ // Subnormal float values may be flushed at any time.
+ // https://www.w3.org/TR/WGSL/#floating-point-evaluation
+ const f32_lhs = f32(lhs);
+ const f32_rhs = f32(rhs);
+ const lhs_options = new Set([f32_lhs, f32(flushSubnormalNumberF32(lhs))]);
+ const rhs_options = new Set([f32_rhs, f32(flushSubnormalNumberF32(rhs))]);
+ const expected: Array<Scalar> = [];
+ lhs_options.forEach(l => {
+ rhs_options.forEach(r => {
+ const result = bool(truthFunc(l, r));
+ if (!expected.includes(result)) {
+ expected.push(result);
+ }
+ });
+ });
+
+ return { input: [f32_lhs, f32_rhs], expected: anyOf(...expected) };
+}
+
+export const d = makeCaseCache('binary/f32_logical', {
+ equals_non_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) === (rhs.value as number);
+ };
+
+ return vectorF32Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ equals_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) === (rhs.value as number);
+ };
+
+ return vectorF32Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ not_equals_non_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) !== (rhs.value as number);
+ };
+
+ return vectorF32Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ not_equals_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) !== (rhs.value as number);
+ };
+
+ return vectorF32Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ less_than_non_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) < (rhs.value as number);
+ };
+
+ return vectorF32Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ less_than_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) < (rhs.value as number);
+ };
+
+ return vectorF32Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ less_equals_non_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) <= (rhs.value as number);
+ };
+
+ return vectorF32Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ less_equals_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) <= (rhs.value as number);
+ };
+
+ return vectorF32Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ greater_than_non_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) > (rhs.value as number);
+ };
+
+ return vectorF32Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ greater_than_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) > (rhs.value as number);
+ };
+
+ return vectorF32Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ greater_equals_non_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) >= (rhs.value as number);
+ };
+
+ return vectorF32Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+ greater_equals_const: () => {
+ const truthFunc = (lhs: Scalar, rhs: Scalar): boolean => {
+ return (lhs.value as number) >= (rhs.value as number);
+ };
+
+ return vectorF32Range(2).map(v => {
+ return makeCase(v[0], v[1], truthFunc);
+ });
+ },
+});
+
+g.test('equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x == y
+Accuracy: Correct result
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'equals_const' : 'equals_non_const'
+ );
+ await run(t, binary('=='), [TypeF32, TypeF32], TypeBool, t.params, cases);
+ });
+
+g.test('not_equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x != y
+Accuracy: Correct result
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'not_equals_const' : 'not_equals_non_const'
+ );
+ await run(t, binary('!='), [TypeF32, TypeF32], TypeBool, t.params, cases);
+ });
+
+g.test('less_than')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x < y
+Accuracy: Correct result
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'less_than_const' : 'less_than_non_const'
+ );
+ await run(t, binary('<'), [TypeF32, TypeF32], TypeBool, t.params, cases);
+ });
+
+g.test('less_equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x <= y
+Accuracy: Correct result
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'less_equals_const' : 'less_equals_non_const'
+ );
+ await run(t, binary('<='), [TypeF32, TypeF32], TypeBool, t.params, cases);
+ });
+
+g.test('greater_than')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x > y
+Accuracy: Correct result
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'greater_than_const' : 'greater_than_non_const'
+ );
+ await run(t, binary('>'), [TypeF32, TypeF32], TypeBool, t.params, cases);
+ });
+
+g.test('greater_equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x >= y
+Accuracy: Correct result
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'greater_equals_const' : 'greater_equals_non_const'
+ );
+ await run(t, binary('>='), [TypeF32, TypeF32], TypeBool, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_division.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_division.spec.ts
new file mode 100644
index 0000000000..bd3793bf8a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_division.spec.ts
@@ -0,0 +1,194 @@
+export const description = `
+Execution Tests for non-matrix f32 division expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF32, TypeVec } from '../../../../util/conversion.js';
+import { FP, FPVector } from '../../../../util/floating_point.js';
+import { sparseF32Range, sparseVectorF32Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+const divisionVectorScalarInterval = (v: readonly number[], s: number): FPVector => {
+ return FP.f32.toVector(v.map(e => FP.f32.divisionInterval(e, s)));
+};
+
+const divisionScalarVectorInterval = (s: number, v: readonly number[]): FPVector => {
+ return FP.f32.toVector(v.map(e => FP.f32.divisionInterval(s, e)));
+};
+
+export const g = makeTestGroup(GPUTest);
+
+const scalar_cases = ([true, false] as const)
+ .map(nonConst => ({
+ [`scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateScalarPairToIntervalCases(
+ sparseF32Range(),
+ sparseF32Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.divisionInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const vector_scalar_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`vec${dim}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateVectorScalarToVectorCases(
+ sparseVectorF32Range(dim),
+ sparseF32Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ divisionVectorScalarInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const scalar_vector_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`scalar_vec${dim}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateScalarVectorToVectorCases(
+ sparseF32Range(),
+ sparseVectorF32Range(dim),
+ nonConst ? 'unfiltered' : 'finite',
+ divisionScalarVectorInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f32_division', {
+ ...scalar_cases,
+ ...vector_scalar_cases,
+ ...scalar_vector_cases,
+});
+
+g.test('scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x / y, where x and y are scalars
+Accuracy: 2.5 ULP for |y| in the range [2^-126, 2^126]
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, binary('/'), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x / y, where x and y are vectors
+Accuracy: 2.5 ULP for |y| in the range [2^-126, 2^126]
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('vectorize', [2, 3, 4] as const))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const' // Using vectorize to generate vector cases based on scalar cases
+ );
+ await run(t, binary('/'), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x /= y
+Accuracy: 2.5 ULP for |y| in the range [2^-126, 2^126]
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, compoundBinary('/='), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x / y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ binary('/'),
+ [TypeVec(dim, TypeF32), TypeF32],
+ TypeVec(dim, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x /= y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('/='),
+ [TypeVec(dim, TypeF32), TypeF32],
+ TypeVec(dim, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x / y, where x is a scalar and y is a vector
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `scalar_vec${dim}_const` : `scalar_vec${dim}_non_const`
+ );
+ await run(
+ t,
+ binary('/'),
+ [TypeF32, TypeVec(dim, TypeF32)],
+ TypeVec(dim, TypeF32),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_addition.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_addition.spec.ts
new file mode 100644
index 0000000000..9f11c3cac1
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_addition.spec.ts
@@ -0,0 +1,95 @@
+export const description = `
+Execution Tests for matrix f32 addition expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF32, TypeMat } from '../../../../util/conversion.js';
+import { FP } from '../../../../util/floating_point.js';
+import { sparseMatrixF32Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: matCxR_[non_]const
+const mat_cases = ([2, 3, 4] as const)
+ .flatMap(cols =>
+ ([2, 3, 4] as const).flatMap(rows =>
+ ([true, false] as const).map(nonConst => ({
+ [`mat${cols}x${rows}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateMatrixPairToMatrixCases(
+ sparseMatrixF32Range(cols, rows),
+ sparseMatrixF32Range(cols, rows),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.additionMatrixMatrixInterval
+ );
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f32_matrix_addition', mat_cases);
+
+g.test('matrix')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x + y, where x and y are matrices
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `mat${cols}x${rows}_const` : `mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ binary('+'),
+ [TypeMat(cols, rows, TypeF32), TypeMat(cols, rows, TypeF32)],
+ TypeMat(cols, rows, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('matrix_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x =+ y, where x and y are matrices
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `mat${cols}x${rows}_const` : `mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('+='),
+ [TypeMat(cols, rows, TypeF32), TypeMat(cols, rows, TypeF32)],
+ TypeMat(cols, rows, TypeF32),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_matrix_multiplication.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_matrix_multiplication.spec.ts
new file mode 100644
index 0000000000..2c48eab187
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_matrix_multiplication.spec.ts
@@ -0,0 +1,108 @@
+export const description = `
+Execution Tests for matrix-matrix f32 multiplication expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF32, TypeMat } from '../../../../util/conversion.js';
+import { FP } from '../../../../util/floating_point.js';
+import { sparseMatrixF32Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: matKxR_matCxK_[non_]const
+const mat_mat_cases = ([2, 3, 4] as const)
+ .flatMap(k =>
+ ([2, 3, 4] as const).flatMap(cols =>
+ ([2, 3, 4] as const).flatMap(rows =>
+ ([true, false] as const).map(nonConst => ({
+ [`mat${k}x${rows}_mat${cols}x${k}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateMatrixPairToMatrixCases(
+ sparseMatrixF32Range(k, rows),
+ sparseMatrixF32Range(cols, k),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.multiplicationMatrixMatrixInterval
+ );
+ },
+ }))
+ )
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f32_matrix_matrix_multiplication', mat_mat_cases);
+
+g.test('matrix_matrix')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x is a matrix and y is a matrix
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('common_dim', [2, 3, 4] as const)
+ .combine('x_rows', [2, 3, 4] as const)
+ .combine('y_cols', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const x_cols = t.params.common_dim;
+ const x_rows = t.params.x_rows;
+ const y_cols = t.params.y_cols;
+ const y_rows = t.params.common_dim;
+
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `mat${x_cols}x${x_rows}_mat${y_cols}x${y_rows}_const`
+ : `mat${x_cols}x${x_rows}_mat${y_cols}x${y_rows}_non_const`
+ );
+ await run(
+ t,
+ binary('*'),
+ [TypeMat(x_cols, x_rows, TypeF32), TypeMat(y_cols, y_rows, TypeF32)],
+ TypeMat(y_cols, x_rows, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('matrix_matrix_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x *= y, where x is a matrix and y is a matrix
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('common_dim', [2, 3, 4] as const)
+ .combine('x_rows', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const x_cols = t.params.common_dim;
+ const x_rows = t.params.x_rows;
+ const y_cols = x_cols;
+ const y_rows = t.params.common_dim;
+
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `mat${x_cols}x${x_rows}_mat${y_cols}x${y_rows}_const`
+ : `mat${x_cols}x${x_rows}_mat${y_cols}x${y_rows}_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('*='),
+ [TypeMat(x_cols, x_rows, TypeF32), TypeMat(y_cols, y_rows, TypeF32)],
+ TypeMat(y_cols, x_rows, TypeF32),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_scalar_multiplication.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_scalar_multiplication.spec.ts
new file mode 100644
index 0000000000..f3d36b8382
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_scalar_multiplication.spec.ts
@@ -0,0 +1,152 @@
+export const description = `
+Execution Tests for matrix-scalar and scalar-matrix f32 multiplication expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF32, TypeMat } from '../../../../util/conversion.js';
+import { FP } from '../../../../util/floating_point.js';
+import { sparseF32Range, sparseMatrixF32Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: matCxR_scalar_[non_]const
+const mat_scalar_cases = ([2, 3, 4] as const)
+ .flatMap(cols =>
+ ([2, 3, 4] as const).flatMap(rows =>
+ ([true, false] as const).map(nonConst => ({
+ [`mat${cols}x${rows}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateMatrixScalarToMatrixCases(
+ sparseMatrixF32Range(cols, rows),
+ sparseF32Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.multiplicationMatrixScalarInterval
+ );
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+// Cases: scalar_matCxR_[non_]const
+const scalar_mat_cases = ([2, 3, 4] as const)
+ .flatMap(cols =>
+ ([2, 3, 4] as const).flatMap(rows =>
+ ([true, false] as const).map(nonConst => ({
+ [`scalar_mat${cols}x${rows}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateScalarMatrixToMatrixCases(
+ sparseF32Range(),
+ sparseMatrixF32Range(cols, rows),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.multiplicationScalarMatrixInterval
+ );
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f32_matrix_scalar_multiplication', {
+ ...mat_scalar_cases,
+ ...scalar_mat_cases,
+});
+
+g.test('matrix_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x is a matrix and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `mat${cols}x${rows}_scalar_const`
+ : `mat${cols}x${rows}_scalar_non_const`
+ );
+ await run(
+ t,
+ binary('*'),
+ [TypeMat(cols, rows, TypeF32), TypeF32],
+ TypeMat(cols, rows, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('matrix_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x *= y, where x is a matrix and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `mat${cols}x${rows}_scalar_const`
+ : `mat${cols}x${rows}_scalar_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('*='),
+ [TypeMat(cols, rows, TypeF32), TypeF32],
+ TypeMat(cols, rows, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('scalar_matrix')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x is a scalar and y is a matrix
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `scalar_mat${cols}x${rows}_const`
+ : `scalar_mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ binary('*'),
+ [TypeF32, TypeMat(cols, rows, TypeF32)],
+ TypeMat(cols, rows, TypeF32),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_subtraction.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_subtraction.spec.ts
new file mode 100644
index 0000000000..5f101d9b27
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_subtraction.spec.ts
@@ -0,0 +1,95 @@
+export const description = `
+Execution Tests for matrix f32 subtraction expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF32, TypeMat } from '../../../../util/conversion.js';
+import { FP } from '../../../../util/floating_point.js';
+import { sparseMatrixF32Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: matCxR_[non_]const
+const mat_cases = ([2, 3, 4] as const)
+ .flatMap(cols =>
+ ([2, 3, 4] as const).flatMap(rows =>
+ ([true, false] as const).map(nonConst => ({
+ [`mat${cols}x${rows}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateMatrixPairToMatrixCases(
+ sparseMatrixF32Range(cols, rows),
+ sparseMatrixF32Range(cols, rows),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.subtractionMatrixMatrixInterval
+ );
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f32_matrix_subtraction', mat_cases);
+
+g.test('matrix')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x - y, where x and y are matrices
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `mat${cols}x${rows}_const` : `mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ binary('-'),
+ [TypeMat(cols, rows, TypeF32), TypeMat(cols, rows, TypeF32)],
+ TypeMat(cols, rows, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('matrix_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x -= y, where x and y are matrices
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `mat${cols}x${rows}_const` : `mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('-='),
+ [TypeMat(cols, rows, TypeF32), TypeMat(cols, rows, TypeF32)],
+ TypeMat(cols, rows, TypeF32),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_vector_multiplication.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_vector_multiplication.spec.ts
new file mode 100644
index 0000000000..e6cdf16d92
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_matrix_vector_multiplication.spec.ts
@@ -0,0 +1,147 @@
+export const description = `
+Execution Tests for matrix-vector and vector-matrix f32 multiplication expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF32, TypeMat, TypeVec } from '../../../../util/conversion.js';
+import { FP } from '../../../../util/floating_point.js';
+import { sparseMatrixF32Range, sparseVectorF32Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: matCxR_vecC_[non_]const
+const mat_vec_cases = ([2, 3, 4] as const)
+ .flatMap(cols =>
+ ([2, 3, 4] as const).flatMap(rows =>
+ ([true, false] as const).map(nonConst => ({
+ [`mat${cols}x${rows}_vec${cols}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateMatrixVectorToVectorCases(
+ sparseMatrixF32Range(cols, rows),
+ sparseVectorF32Range(cols),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.multiplicationMatrixVectorInterval
+ );
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+// Cases: vecR_matCxR_[non_]const
+const vec_mat_cases = ([2, 3, 4] as const)
+ .flatMap(rows =>
+ ([2, 3, 4] as const).flatMap(cols =>
+ ([true, false] as const).map(nonConst => ({
+ [`vec${rows}_mat${cols}x${rows}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateVectorMatrixToVectorCases(
+ sparseVectorF32Range(rows),
+ sparseMatrixF32Range(cols, rows),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.multiplicationVectorMatrixInterval
+ );
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f32_matrix_vector_multiplication', {
+ ...mat_vec_cases,
+ ...vec_mat_cases,
+});
+
+g.test('matrix_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x is a matrix and y is a vector
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `mat${cols}x${rows}_vec${cols}_const`
+ : `mat${cols}x${rows}_vec${cols}_non_const`
+ );
+ await run(
+ t,
+ binary('*'),
+ [TypeMat(cols, rows, TypeF32), TypeVec(cols, TypeF32)],
+ TypeVec(rows, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector_matrix')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x is a vector and y is is a matrix
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `vec${rows}_mat${cols}x${rows}_const`
+ : `vec${rows}_mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ binary('*'),
+ [TypeVec(rows, TypeF32), TypeMat(cols, rows, TypeF32)],
+ TypeVec(cols, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector_matrix_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x *= y, where x is a vector and y is is a matrix
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const cols = t.params.dim;
+ const rows = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `vec${rows}_mat${cols}x${rows}_const`
+ : `vec${rows}_mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('*='),
+ [TypeVec(rows, TypeF32), TypeMat(cols, rows, TypeF32)],
+ TypeVec(cols, TypeF32),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_multiplication.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_multiplication.spec.ts
new file mode 100644
index 0000000000..38da08fd3e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_multiplication.spec.ts
@@ -0,0 +1,194 @@
+export const description = `
+Execution Tests for non-matrix f32 multiplication expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF32, TypeVec } from '../../../../util/conversion.js';
+import { FP, FPVector } from '../../../../util/floating_point.js';
+import { sparseF32Range, sparseVectorF32Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+const multiplicationVectorScalarInterval = (v: readonly number[], s: number): FPVector => {
+ return FP.f32.toVector(v.map(e => FP.f32.multiplicationInterval(e, s)));
+};
+
+const multiplicationScalarVectorInterval = (s: number, v: readonly number[]): FPVector => {
+ return FP.f32.toVector(v.map(e => FP.f32.multiplicationInterval(s, e)));
+};
+
+export const g = makeTestGroup(GPUTest);
+
+const scalar_cases = ([true, false] as const)
+ .map(nonConst => ({
+ [`scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateScalarPairToIntervalCases(
+ sparseF32Range(),
+ sparseF32Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.multiplicationInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const vector_scalar_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`vec${dim}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateVectorScalarToVectorCases(
+ sparseVectorF32Range(dim),
+ sparseF32Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ multiplicationVectorScalarInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const scalar_vector_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`scalar_vec${dim}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateScalarVectorToVectorCases(
+ sparseF32Range(),
+ sparseVectorF32Range(dim),
+ nonConst ? 'unfiltered' : 'finite',
+ multiplicationScalarVectorInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f32_multiplication', {
+ ...scalar_cases,
+ ...vector_scalar_cases,
+ ...scalar_vector_cases,
+});
+
+g.test('scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x and y are scalars
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, binary('*'), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x and y are vectors
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('vectorize', [2, 3, 4] as const))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const' // Using vectorize to generate vector cases based on scalar cases
+ );
+ await run(t, binary('*'), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x *= y
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, compoundBinary('*='), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ binary('*'),
+ [TypeVec(dim, TypeF32), TypeF32],
+ TypeVec(dim, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x *= y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('*='),
+ [TypeVec(dim, TypeF32), TypeF32],
+ TypeVec(dim, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y, where x is a scalar and y is a vector
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `scalar_vec${dim}_const` : `scalar_vec${dim}_non_const`
+ );
+ await run(
+ t,
+ binary('*'),
+ [TypeF32, TypeVec(dim, TypeF32)],
+ TypeVec(dim, TypeF32),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_remainder.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_remainder.spec.ts
new file mode 100644
index 0000000000..390a7f3426
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_remainder.spec.ts
@@ -0,0 +1,194 @@
+export const description = `
+Execution Tests for non-matrix f32 remainder expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF32, TypeVec } from '../../../../util/conversion.js';
+import { FP, FPVector } from '../../../../util/floating_point.js';
+import { sparseF32Range, sparseVectorF32Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+const remainderVectorScalarInterval = (v: readonly number[], s: number): FPVector => {
+ return FP.f32.toVector(v.map(e => FP.f32.remainderInterval(e, s)));
+};
+
+const remainderScalarVectorInterval = (s: number, v: readonly number[]): FPVector => {
+ return FP.f32.toVector(v.map(e => FP.f32.remainderInterval(s, e)));
+};
+
+export const g = makeTestGroup(GPUTest);
+
+const scalar_cases = ([true, false] as const)
+ .map(nonConst => ({
+ [`scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateScalarPairToIntervalCases(
+ sparseF32Range(),
+ sparseF32Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.remainderInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const vector_scalar_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`vec${dim}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateVectorScalarToVectorCases(
+ sparseVectorF32Range(dim),
+ sparseF32Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ remainderVectorScalarInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const scalar_vector_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`scalar_vec${dim}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateScalarVectorToVectorCases(
+ sparseF32Range(),
+ sparseVectorF32Range(dim),
+ nonConst ? 'unfiltered' : 'finite',
+ remainderScalarVectorInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f32_remainder', {
+ ...scalar_cases,
+ ...vector_scalar_cases,
+ ...scalar_vector_cases,
+});
+
+g.test('scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x % y, where x and y are scalars
+Accuracy: Derived from x - y * trunc(x/y)
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, binary('%'), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x % y, where x and y are vectors
+Accuracy: Derived from x - y * trunc(x/y)
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('vectorize', [2, 3, 4] as const))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const' // Using vectorize to generate vector cases based on scalar cases
+ );
+ await run(t, binary('%'), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x %= y
+Accuracy: Derived from x - y * trunc(x/y)
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, compoundBinary('%='), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x % y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ binary('%'),
+ [TypeVec(dim, TypeF32), TypeF32],
+ TypeVec(dim, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x %= y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('%='),
+ [TypeVec(dim, TypeF32), TypeF32],
+ TypeVec(dim, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x % y, where x is a scalar and y is a vector
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `scalar_vec${dim}_const` : `scalar_vec${dim}_non_const`
+ );
+ await run(
+ t,
+ binary('%'),
+ [TypeF32, TypeVec(dim, TypeF32)],
+ TypeVec(dim, TypeF32),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_subtraction.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_subtraction.spec.ts
new file mode 100644
index 0000000000..91e06b7de8
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/f32_subtraction.spec.ts
@@ -0,0 +1,194 @@
+export const description = `
+Execution Tests for non-matrix f32 subtraction expression
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF32, TypeVec } from '../../../../util/conversion.js';
+import { FP, FPVector } from '../../../../util/floating_point.js';
+import { sparseF32Range, sparseVectorF32Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+const subtractionVectorScalarInterval = (v: readonly number[], s: number): FPVector => {
+ return FP.f32.toVector(v.map(e => FP.f32.subtractionInterval(e, s)));
+};
+
+const subtractionScalarVectorInterval = (s: number, v: readonly number[]): FPVector => {
+ return FP.f32.toVector(v.map(e => FP.f32.subtractionInterval(s, e)));
+};
+
+export const g = makeTestGroup(GPUTest);
+
+const scalar_cases = ([true, false] as const)
+ .map(nonConst => ({
+ [`scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateScalarPairToIntervalCases(
+ sparseF32Range(),
+ sparseF32Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.subtractionInterval
+ );
+ },
+ }))
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const vector_scalar_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`vec${dim}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateVectorScalarToVectorCases(
+ sparseVectorF32Range(dim),
+ sparseF32Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ subtractionVectorScalarInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+const scalar_vector_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`scalar_vec${dim}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateScalarVectorToVectorCases(
+ sparseF32Range(),
+ sparseVectorF32Range(dim),
+ nonConst ? 'unfiltered' : 'finite',
+ subtractionScalarVectorInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('binary/f32_subtraction', {
+ ...scalar_cases,
+ ...vector_scalar_cases,
+ ...scalar_vector_cases,
+});
+
+g.test('scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x - y, where x and y are scalars
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, binary('-'), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x - y, where x and y are vectors
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('vectorize', [2, 3, 4] as const))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const' // Using vectorize to generate vector cases based on scalar cases
+ );
+ await run(t, binary('-'), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x -= y
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'scalar_const' : 'scalar_non_const'
+ );
+ await run(t, compoundBinary('-='), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x - y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ binary('-'),
+ [TypeVec(dim, TypeF32), TypeF32],
+ TypeVec(dim, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x -= y, where x is a vector and y is a scalar
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `vec${dim}_scalar_const` : `vec${dim}_scalar_non_const`
+ );
+ await run(
+ t,
+ compoundBinary('-='),
+ [TypeVec(dim, TypeF32), TypeF32],
+ TypeVec(dim, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x - y, where x is a scalar and y is a vector
+Accuracy: Correctly rounded
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? `scalar_vec${dim}_const` : `scalar_vec${dim}_non_const`
+ );
+ await run(
+ t,
+ binary('-'),
+ [TypeF32, TypeVec(dim, TypeF32)],
+ TypeVec(dim, TypeF32),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/i32_arithmetic.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/i32_arithmetic.spec.ts
new file mode 100644
index 0000000000..e9b7a2407f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/i32_arithmetic.spec.ts
@@ -0,0 +1,738 @@
+export const description = `
+Execution Tests for the i32 arithmetic binary expression operations
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { kValue } from '../../../../util/constants.js';
+import { TypeI32, TypeVec } from '../../../../util/conversion.js';
+import { sparseI32Range, vectorI32Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import {
+ allInputSources,
+ generateBinaryToI32Cases,
+ generateI32VectorBinaryToVectorCases,
+ generateVectorI32BinaryToVectorCases,
+ run,
+} from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+function i32_add(x: number, y: number): number | undefined {
+ return x + y;
+}
+
+function i32_subtract(x: number, y: number): number | undefined {
+ return x - y;
+}
+
+function i32_multiply(x: number, y: number): number | undefined {
+ return Math.imul(x, y);
+}
+
+function i32_divide_non_const(x: number, y: number): number | undefined {
+ if (y === 0) {
+ return x;
+ }
+ if (x === kValue.i32.negative.min && y === -1) {
+ return x;
+ }
+ return x / y;
+}
+
+function i32_divide_const(x: number, y: number): number | undefined {
+ if (y === 0) {
+ return undefined;
+ }
+ if (x === kValue.i32.negative.min && y === -1) {
+ return undefined;
+ }
+ return x / y;
+}
+
+function i32_remainder_non_const(x: number, y: number): number | undefined {
+ if (y === 0) {
+ return 0;
+ }
+ if (x === kValue.i32.negative.min && y === -1) {
+ return 0;
+ }
+ return x % y;
+}
+
+function i32_remainder_const(x: number, y: number): number | undefined {
+ if (y === 0) {
+ return undefined;
+ }
+ if (x === kValue.i32.negative.min && y === -1) {
+ return undefined;
+ }
+ return x % y;
+}
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('binary/i32_arithmetic', {
+ addition: () => {
+ return generateBinaryToI32Cases(sparseI32Range(), sparseI32Range(), i32_add);
+ },
+ subtraction: () => {
+ return generateBinaryToI32Cases(sparseI32Range(), sparseI32Range(), i32_subtract);
+ },
+ multiplication: () => {
+ return generateBinaryToI32Cases(sparseI32Range(), sparseI32Range(), i32_multiply);
+ },
+ division_non_const: () => {
+ return generateBinaryToI32Cases(sparseI32Range(), sparseI32Range(), i32_divide_non_const);
+ },
+ division_const: () => {
+ return generateBinaryToI32Cases(sparseI32Range(), sparseI32Range(), i32_divide_const);
+ },
+ remainder_non_const: () => {
+ return generateBinaryToI32Cases(sparseI32Range(), sparseI32Range(), i32_remainder_non_const);
+ },
+ remainder_const: () => {
+ return generateBinaryToI32Cases(sparseI32Range(), sparseI32Range(), i32_remainder_const);
+ },
+ addition_scalar_vector2: () => {
+ return generateI32VectorBinaryToVectorCases(sparseI32Range(), vectorI32Range(2), i32_add);
+ },
+ addition_scalar_vector3: () => {
+ return generateI32VectorBinaryToVectorCases(sparseI32Range(), vectorI32Range(3), i32_add);
+ },
+ addition_scalar_vector4: () => {
+ return generateI32VectorBinaryToVectorCases(sparseI32Range(), vectorI32Range(4), i32_add);
+ },
+ addition_vector2_scalar: () => {
+ return generateVectorI32BinaryToVectorCases(vectorI32Range(2), sparseI32Range(), i32_add);
+ },
+ addition_vector3_scalar: () => {
+ return generateVectorI32BinaryToVectorCases(vectorI32Range(3), sparseI32Range(), i32_add);
+ },
+ addition_vector4_scalar: () => {
+ return generateVectorI32BinaryToVectorCases(vectorI32Range(4), sparseI32Range(), i32_add);
+ },
+ subtraction_scalar_vector2: () => {
+ return generateI32VectorBinaryToVectorCases(sparseI32Range(), vectorI32Range(2), i32_subtract);
+ },
+ subtraction_scalar_vector3: () => {
+ return generateI32VectorBinaryToVectorCases(sparseI32Range(), vectorI32Range(3), i32_subtract);
+ },
+ subtraction_scalar_vector4: () => {
+ return generateI32VectorBinaryToVectorCases(sparseI32Range(), vectorI32Range(4), i32_subtract);
+ },
+ subtraction_vector2_scalar: () => {
+ return generateVectorI32BinaryToVectorCases(vectorI32Range(2), sparseI32Range(), i32_subtract);
+ },
+ subtraction_vector3_scalar: () => {
+ return generateVectorI32BinaryToVectorCases(vectorI32Range(3), sparseI32Range(), i32_subtract);
+ },
+ subtraction_vector4_scalar: () => {
+ return generateVectorI32BinaryToVectorCases(vectorI32Range(4), sparseI32Range(), i32_subtract);
+ },
+ multiplication_scalar_vector2: () => {
+ return generateI32VectorBinaryToVectorCases(sparseI32Range(), vectorI32Range(2), i32_multiply);
+ },
+ multiplication_scalar_vector3: () => {
+ return generateI32VectorBinaryToVectorCases(sparseI32Range(), vectorI32Range(3), i32_multiply);
+ },
+ multiplication_scalar_vector4: () => {
+ return generateI32VectorBinaryToVectorCases(sparseI32Range(), vectorI32Range(4), i32_multiply);
+ },
+ multiplication_vector2_scalar: () => {
+ return generateVectorI32BinaryToVectorCases(vectorI32Range(2), sparseI32Range(), i32_multiply);
+ },
+ multiplication_vector3_scalar: () => {
+ return generateVectorI32BinaryToVectorCases(vectorI32Range(3), sparseI32Range(), i32_multiply);
+ },
+ multiplication_vector4_scalar: () => {
+ return generateVectorI32BinaryToVectorCases(vectorI32Range(4), sparseI32Range(), i32_multiply);
+ },
+ division_scalar_vector2_non_const: () => {
+ return generateI32VectorBinaryToVectorCases(
+ sparseI32Range(),
+ vectorI32Range(2),
+ i32_divide_non_const
+ );
+ },
+ division_scalar_vector3_non_const: () => {
+ return generateI32VectorBinaryToVectorCases(
+ sparseI32Range(),
+ vectorI32Range(3),
+ i32_divide_non_const
+ );
+ },
+ division_scalar_vector4_non_const: () => {
+ return generateI32VectorBinaryToVectorCases(
+ sparseI32Range(),
+ vectorI32Range(4),
+ i32_divide_non_const
+ );
+ },
+ division_vector2_scalar_non_const: () => {
+ return generateVectorI32BinaryToVectorCases(
+ vectorI32Range(2),
+ sparseI32Range(),
+ i32_divide_non_const
+ );
+ },
+ division_vector3_scalar_non_const: () => {
+ return generateVectorI32BinaryToVectorCases(
+ vectorI32Range(3),
+ sparseI32Range(),
+ i32_divide_non_const
+ );
+ },
+ division_vector4_scalar_non_const: () => {
+ return generateVectorI32BinaryToVectorCases(
+ vectorI32Range(4),
+ sparseI32Range(),
+ i32_divide_non_const
+ );
+ },
+ division_scalar_vector2_const: () => {
+ return generateI32VectorBinaryToVectorCases(
+ sparseI32Range(),
+ vectorI32Range(2),
+ i32_divide_const
+ );
+ },
+ division_scalar_vector3_const: () => {
+ return generateI32VectorBinaryToVectorCases(
+ sparseI32Range(),
+ vectorI32Range(3),
+ i32_divide_const
+ );
+ },
+ division_scalar_vector4_const: () => {
+ return generateI32VectorBinaryToVectorCases(
+ sparseI32Range(),
+ vectorI32Range(4),
+ i32_divide_const
+ );
+ },
+ division_vector2_scalar_const: () => {
+ return generateVectorI32BinaryToVectorCases(
+ vectorI32Range(2),
+ sparseI32Range(),
+ i32_divide_const
+ );
+ },
+ division_vector3_scalar_const: () => {
+ return generateVectorI32BinaryToVectorCases(
+ vectorI32Range(3),
+ sparseI32Range(),
+ i32_divide_const
+ );
+ },
+ division_vector4_scalar_const: () => {
+ return generateVectorI32BinaryToVectorCases(
+ vectorI32Range(4),
+ sparseI32Range(),
+ i32_divide_const
+ );
+ },
+ remainder_scalar_vector2_non_const: () => {
+ return generateI32VectorBinaryToVectorCases(
+ sparseI32Range(),
+ vectorI32Range(2),
+ i32_remainder_non_const
+ );
+ },
+ remainder_scalar_vector3_non_const: () => {
+ return generateI32VectorBinaryToVectorCases(
+ sparseI32Range(),
+ vectorI32Range(3),
+ i32_remainder_non_const
+ );
+ },
+ remainder_scalar_vector4_non_const: () => {
+ return generateI32VectorBinaryToVectorCases(
+ sparseI32Range(),
+ vectorI32Range(4),
+ i32_remainder_non_const
+ );
+ },
+ remainder_vector2_scalar_non_const: () => {
+ return generateVectorI32BinaryToVectorCases(
+ vectorI32Range(2),
+ sparseI32Range(),
+ i32_remainder_non_const
+ );
+ },
+ remainder_vector3_scalar_non_const: () => {
+ return generateVectorI32BinaryToVectorCases(
+ vectorI32Range(3),
+ sparseI32Range(),
+ i32_remainder_non_const
+ );
+ },
+ remainder_vector4_scalar_non_const: () => {
+ return generateVectorI32BinaryToVectorCases(
+ vectorI32Range(4),
+ sparseI32Range(),
+ i32_remainder_non_const
+ );
+ },
+ remainder_scalar_vector2_const: () => {
+ return generateI32VectorBinaryToVectorCases(
+ sparseI32Range(),
+ vectorI32Range(2),
+ i32_remainder_const
+ );
+ },
+ remainder_scalar_vector3_const: () => {
+ return generateI32VectorBinaryToVectorCases(
+ sparseI32Range(),
+ vectorI32Range(3),
+ i32_remainder_const
+ );
+ },
+ remainder_scalar_vector4_const: () => {
+ return generateI32VectorBinaryToVectorCases(
+ sparseI32Range(),
+ vectorI32Range(4),
+ i32_remainder_const
+ );
+ },
+ remainder_vector2_scalar_const: () => {
+ return generateVectorI32BinaryToVectorCases(
+ vectorI32Range(2),
+ sparseI32Range(),
+ i32_remainder_const
+ );
+ },
+ remainder_vector3_scalar_const: () => {
+ return generateVectorI32BinaryToVectorCases(
+ vectorI32Range(3),
+ sparseI32Range(),
+ i32_remainder_const
+ );
+ },
+ remainder_vector4_scalar_const: () => {
+ return generateVectorI32BinaryToVectorCases(
+ vectorI32Range(4),
+ sparseI32Range(),
+ i32_remainder_const
+ );
+ },
+});
+
+g.test('addition')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x + y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('addition');
+ await run(t, binary('+'), [TypeI32, TypeI32], TypeI32, t.params, cases);
+ });
+
+g.test('addition_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x += y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('addition');
+ await run(t, compoundBinary('+='), [TypeI32, TypeI32], TypeI32, t.params, cases);
+ });
+
+g.test('subtraction')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x - y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('subtraction');
+ await run(t, binary('-'), [TypeI32, TypeI32], TypeI32, t.params, cases);
+ });
+
+g.test('subtraction_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x -= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('subtraction');
+ await run(t, compoundBinary('-='), [TypeI32, TypeI32], TypeI32, t.params, cases);
+ });
+
+g.test('multiplication')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x * y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('multiplication');
+ await run(t, binary('*'), [TypeI32, TypeI32], TypeI32, t.params, cases);
+ });
+
+g.test('multiplication_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x *= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('multiplication');
+ await run(t, compoundBinary('*='), [TypeI32, TypeI32], TypeI32, t.params, cases);
+ });
+
+g.test('division')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x / y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'division_const' : 'division_non_const'
+ );
+ await run(t, binary('/'), [TypeI32, TypeI32], TypeI32, t.params, cases);
+ });
+
+g.test('division_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x /= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'division_const' : 'division_non_const'
+ );
+ await run(t, compoundBinary('/='), [TypeI32, TypeI32], TypeI32, t.params, cases);
+ });
+
+g.test('remainder')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x % y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'remainder_const' : 'remainder_non_const'
+ );
+ await run(t, binary('%'), [TypeI32, TypeI32], TypeI32, t.params, cases);
+ });
+
+g.test('remainder_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: x %= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'remainder_const' : 'remainder_non_const'
+ );
+ await run(t, compoundBinary('%='), [TypeI32, TypeI32], TypeI32, t.params, cases);
+ });
+
+g.test('addition_scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x + y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_rhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_rhs;
+ const vec_type = TypeVec(vec_size, TypeI32);
+ const cases = await d.get(`addition_scalar_vector${vec_size}`);
+ await run(t, binary('+'), [TypeI32, vec_type], vec_type, t.params, cases);
+ });
+
+g.test('addition_vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x + y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeI32);
+ const cases = await d.get(`addition_vector${vec_size}_scalar`);
+ await run(t, binary('+'), [vec_type, TypeI32], vec_type, t.params, cases);
+ });
+
+g.test('addition_vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x += y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeI32);
+ const cases = await d.get(`addition_vector${vec_size}_scalar`);
+ await run(t, compoundBinary('+='), [vec_type, TypeI32], vec_type, t.params, cases);
+ });
+
+g.test('subtraction_scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x - y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_rhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_rhs;
+ const vec_type = TypeVec(vec_size, TypeI32);
+ const cases = await d.get(`subtraction_scalar_vector${vec_size}`);
+ await run(t, binary('-'), [TypeI32, vec_type], vec_type, t.params, cases);
+ });
+
+g.test('subtraction_vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x - y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeI32);
+ const cases = await d.get(`subtraction_vector${vec_size}_scalar`);
+ await run(t, binary('-'), [vec_type, TypeI32], vec_type, t.params, cases);
+ });
+
+g.test('subtraction_vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x -= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeI32);
+ const cases = await d.get(`subtraction_vector${vec_size}_scalar`);
+ await run(t, compoundBinary('-='), [vec_type, TypeI32], vec_type, t.params, cases);
+ });
+
+g.test('multiplication_scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x * y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_rhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_rhs;
+ const vec_type = TypeVec(vec_size, TypeI32);
+ const cases = await d.get(`multiplication_scalar_vector${vec_size}`);
+ await run(t, binary('*'), [TypeI32, vec_type], vec_type, t.params, cases);
+ });
+
+g.test('multiplication_vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x * y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeI32);
+ const cases = await d.get(`multiplication_vector${vec_size}_scalar`);
+ await run(t, binary('*'), [vec_type, TypeI32], vec_type, t.params, cases);
+ });
+
+g.test('multiplication_vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x *= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeI32);
+ const cases = await d.get(`multiplication_vector${vec_size}_scalar`);
+ await run(t, compoundBinary('*='), [vec_type, TypeI32], vec_type, t.params, cases);
+ });
+
+g.test('division_scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x / y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_rhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_rhs;
+ const vec_type = TypeVec(vec_size, TypeI32);
+ const source = t.params.inputSource === 'const' ? 'const' : 'non_const';
+ const cases = await d.get(`division_scalar_vector${vec_size}_${source}`);
+ await run(t, binary('/'), [TypeI32, vec_type], vec_type, t.params, cases);
+ });
+
+g.test('division_vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x / y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeI32);
+ const source = t.params.inputSource === 'const' ? 'const' : 'non_const';
+ const cases = await d.get(`division_vector${vec_size}_scalar_${source}`);
+ await run(t, binary('/'), [vec_type, TypeI32], vec_type, t.params, cases);
+ });
+
+g.test('division_vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x /= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeI32);
+ const source = t.params.inputSource === 'const' ? 'const' : 'non_const';
+ const cases = await d.get(`division_vector${vec_size}_scalar_${source}`);
+ await run(t, compoundBinary('/='), [vec_type, TypeI32], vec_type, t.params, cases);
+ });
+
+g.test('remainder_scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x % y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_rhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_rhs;
+ const vec_type = TypeVec(vec_size, TypeI32);
+ const source = t.params.inputSource === 'const' ? 'const' : 'non_const';
+ const cases = await d.get(`remainder_scalar_vector${vec_size}_${source}`);
+ await run(t, binary('%'), [TypeI32, vec_type], vec_type, t.params, cases);
+ });
+
+g.test('remainder_vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x % y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeI32);
+ const source = t.params.inputSource === 'const' ? 'const' : 'non_const';
+ const cases = await d.get(`remainder_vector${vec_size}_scalar_${source}`);
+ await run(t, binary('%'), [vec_type, TypeI32], vec_type, t.params, cases);
+ });
+
+g.test('remainder_vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x %= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeI32);
+ const source = t.params.inputSource === 'const' ? 'const' : 'non_const';
+ const cases = await d.get(`remainder_vector${vec_size}_scalar_${source}`);
+ await run(t, compoundBinary('%='), [vec_type, TypeI32], vec_type, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/i32_comparison.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/i32_comparison.spec.ts
new file mode 100644
index 0000000000..dce1a2519e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/i32_comparison.spec.ts
@@ -0,0 +1,121 @@
+export const description = `
+Execution Tests for the i32 comparison expressions
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { i32, bool, TypeBool, TypeI32 } from '../../../../util/conversion.js';
+import { vectorI32Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, Case, run } from '../expression.js';
+
+import { binary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+/**
+ * @returns a test case for the provided left hand & right hand values and
+ * expected boolean result.
+ */
+function makeCase(lhs: number, rhs: number, expected_answer: boolean): Case {
+ return { input: [i32(lhs), i32(rhs)], expected: bool(expected_answer) };
+}
+
+export const d = makeCaseCache('binary/i32_comparison', {
+ equals: () => vectorI32Range(2).map(v => makeCase(v[0], v[1], v[0] === v[1])),
+ not_equals: () => vectorI32Range(2).map(v => makeCase(v[0], v[1], v[0] !== v[1])),
+ less_than: () => vectorI32Range(2).map(v => makeCase(v[0], v[1], v[0] < v[1])),
+ less_equal: () => vectorI32Range(2).map(v => makeCase(v[0], v[1], v[0] <= v[1])),
+ greater_than: () => vectorI32Range(2).map(v => makeCase(v[0], v[1], v[0] > v[1])),
+ greater_equal: () => vectorI32Range(2).map(v => makeCase(v[0], v[1], v[0] >= v[1])),
+});
+
+g.test('equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x == y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('equals');
+ await run(t, binary('=='), [TypeI32, TypeI32], TypeBool, t.params, cases);
+ });
+
+g.test('not_equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x != y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('not_equals');
+ await run(t, binary('!='), [TypeI32, TypeI32], TypeBool, t.params, cases);
+ });
+
+g.test('less_than')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x < y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('less_than');
+ await run(t, binary('<'), [TypeI32, TypeI32], TypeBool, t.params, cases);
+ });
+
+g.test('less_equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x <= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('less_equal');
+ await run(t, binary('<='), [TypeI32, TypeI32], TypeBool, t.params, cases);
+ });
+
+g.test('greater_than')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x > y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('greater_than');
+ await run(t, binary('>'), [TypeI32, TypeI32], TypeBool, t.params, cases);
+ });
+
+g.test('greater_equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x >= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('greater_equal');
+ await run(t, binary('>='), [TypeI32, TypeI32], TypeBool, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/u32_arithmetic.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/u32_arithmetic.spec.ts
new file mode 100644
index 0000000000..88667e8233
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/u32_arithmetic.spec.ts
@@ -0,0 +1,725 @@
+export const description = `
+Execution Tests for the u32 arithmetic binary expression operations
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeU32, TypeVec } from '../../../../util/conversion.js';
+import { sparseU32Range, vectorU32Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import {
+ allInputSources,
+ generateBinaryToU32Cases,
+ generateU32VectorBinaryToVectorCases,
+ generateVectorU32BinaryToVectorCases,
+ run,
+} from '../expression.js';
+
+import { binary, compoundBinary } from './binary.js';
+
+function u32_add(x: number, y: number): number | undefined {
+ return x + y;
+}
+
+function u32_subtract(x: number, y: number): number | undefined {
+ return x - y;
+}
+
+function u32_multiply(x: number, y: number): number | undefined {
+ return Math.imul(x, y);
+}
+
+function u32_divide_non_const(x: number, y: number): number | undefined {
+ if (y === 0) {
+ return x;
+ }
+ return x / y;
+}
+
+function u32_divide_const(x: number, y: number): number | undefined {
+ if (y === 0) {
+ return undefined;
+ }
+ return x / y;
+}
+
+function u32_remainder_non_const(x: number, y: number): number | undefined {
+ if (y === 0) {
+ return 0;
+ }
+ return x % y;
+}
+
+function u32_remainder_const(x: number, y: number): number | undefined {
+ if (y === 0) {
+ return undefined;
+ }
+ return x % y;
+}
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('binary/u32_arithmetic', {
+ addition: () => {
+ return generateBinaryToU32Cases(sparseU32Range(), sparseU32Range(), u32_add);
+ },
+ subtraction: () => {
+ return generateBinaryToU32Cases(sparseU32Range(), sparseU32Range(), u32_subtract);
+ },
+ multiplication: () => {
+ return generateBinaryToU32Cases(sparseU32Range(), sparseU32Range(), u32_multiply);
+ },
+ division_non_const: () => {
+ return generateBinaryToU32Cases(sparseU32Range(), sparseU32Range(), u32_divide_non_const);
+ },
+ division_const: () => {
+ return generateBinaryToU32Cases(sparseU32Range(), sparseU32Range(), u32_divide_const);
+ },
+ remainder_non_const: () => {
+ return generateBinaryToU32Cases(sparseU32Range(), sparseU32Range(), u32_remainder_non_const);
+ },
+ remainder_const: () => {
+ return generateBinaryToU32Cases(sparseU32Range(), sparseU32Range(), u32_remainder_const);
+ },
+ addition_scalar_vector2: () => {
+ return generateU32VectorBinaryToVectorCases(sparseU32Range(), vectorU32Range(2), u32_add);
+ },
+ addition_scalar_vector3: () => {
+ return generateU32VectorBinaryToVectorCases(sparseU32Range(), vectorU32Range(3), u32_add);
+ },
+ addition_scalar_vector4: () => {
+ return generateU32VectorBinaryToVectorCases(sparseU32Range(), vectorU32Range(4), u32_add);
+ },
+ addition_vector2_scalar: () => {
+ return generateVectorU32BinaryToVectorCases(vectorU32Range(2), sparseU32Range(), u32_add);
+ },
+ addition_vector3_scalar: () => {
+ return generateVectorU32BinaryToVectorCases(vectorU32Range(3), sparseU32Range(), u32_add);
+ },
+ addition_vector4_scalar: () => {
+ return generateVectorU32BinaryToVectorCases(vectorU32Range(4), sparseU32Range(), u32_add);
+ },
+ subtraction_scalar_vector2: () => {
+ return generateU32VectorBinaryToVectorCases(sparseU32Range(), vectorU32Range(2), u32_subtract);
+ },
+ subtraction_scalar_vector3: () => {
+ return generateU32VectorBinaryToVectorCases(sparseU32Range(), vectorU32Range(3), u32_subtract);
+ },
+ subtraction_scalar_vector4: () => {
+ return generateU32VectorBinaryToVectorCases(sparseU32Range(), vectorU32Range(4), u32_subtract);
+ },
+ subtraction_vector2_scalar: () => {
+ return generateVectorU32BinaryToVectorCases(vectorU32Range(2), sparseU32Range(), u32_subtract);
+ },
+ subtraction_vector3_scalar: () => {
+ return generateVectorU32BinaryToVectorCases(vectorU32Range(3), sparseU32Range(), u32_subtract);
+ },
+ subtraction_vector4_scalar: () => {
+ return generateVectorU32BinaryToVectorCases(vectorU32Range(4), sparseU32Range(), u32_subtract);
+ },
+ multiplication_scalar_vector2: () => {
+ return generateU32VectorBinaryToVectorCases(sparseU32Range(), vectorU32Range(2), u32_multiply);
+ },
+ multiplication_scalar_vector3: () => {
+ return generateU32VectorBinaryToVectorCases(sparseU32Range(), vectorU32Range(3), u32_multiply);
+ },
+ multiplication_scalar_vector4: () => {
+ return generateU32VectorBinaryToVectorCases(sparseU32Range(), vectorU32Range(4), u32_multiply);
+ },
+ multiplication_vector2_scalar: () => {
+ return generateVectorU32BinaryToVectorCases(vectorU32Range(2), sparseU32Range(), u32_multiply);
+ },
+ multiplication_vector3_scalar: () => {
+ return generateVectorU32BinaryToVectorCases(vectorU32Range(3), sparseU32Range(), u32_multiply);
+ },
+ multiplication_vector4_scalar: () => {
+ return generateVectorU32BinaryToVectorCases(vectorU32Range(4), sparseU32Range(), u32_multiply);
+ },
+ division_scalar_vector2_non_const: () => {
+ return generateU32VectorBinaryToVectorCases(
+ sparseU32Range(),
+ vectorU32Range(2),
+ u32_divide_non_const
+ );
+ },
+ division_scalar_vector3_non_const: () => {
+ return generateU32VectorBinaryToVectorCases(
+ sparseU32Range(),
+ vectorU32Range(3),
+ u32_divide_non_const
+ );
+ },
+ division_scalar_vector4_non_const: () => {
+ return generateU32VectorBinaryToVectorCases(
+ sparseU32Range(),
+ vectorU32Range(4),
+ u32_divide_non_const
+ );
+ },
+ division_vector2_scalar_non_const: () => {
+ return generateVectorU32BinaryToVectorCases(
+ vectorU32Range(2),
+ sparseU32Range(),
+ u32_divide_non_const
+ );
+ },
+ division_vector3_scalar_non_const: () => {
+ return generateVectorU32BinaryToVectorCases(
+ vectorU32Range(3),
+ sparseU32Range(),
+ u32_divide_non_const
+ );
+ },
+ division_vector4_scalar_non_const: () => {
+ return generateVectorU32BinaryToVectorCases(
+ vectorU32Range(4),
+ sparseU32Range(),
+ u32_divide_non_const
+ );
+ },
+ division_scalar_vector2_const: () => {
+ return generateU32VectorBinaryToVectorCases(
+ sparseU32Range(),
+ vectorU32Range(2),
+ u32_divide_const
+ );
+ },
+ division_scalar_vector3_const: () => {
+ return generateU32VectorBinaryToVectorCases(
+ sparseU32Range(),
+ vectorU32Range(3),
+ u32_divide_const
+ );
+ },
+ division_scalar_vector4_const: () => {
+ return generateU32VectorBinaryToVectorCases(
+ sparseU32Range(),
+ vectorU32Range(4),
+ u32_divide_const
+ );
+ },
+ division_vector2_scalar_const: () => {
+ return generateVectorU32BinaryToVectorCases(
+ vectorU32Range(2),
+ sparseU32Range(),
+ u32_divide_const
+ );
+ },
+ division_vector3_scalar_const: () => {
+ return generateVectorU32BinaryToVectorCases(
+ vectorU32Range(3),
+ sparseU32Range(),
+ u32_divide_const
+ );
+ },
+ division_vector4_scalar_const: () => {
+ return generateVectorU32BinaryToVectorCases(
+ vectorU32Range(4),
+ sparseU32Range(),
+ u32_divide_const
+ );
+ },
+ remainder_scalar_vector2_non_const: () => {
+ return generateU32VectorBinaryToVectorCases(
+ sparseU32Range(),
+ vectorU32Range(2),
+ u32_remainder_non_const
+ );
+ },
+ remainder_scalar_vector3_non_const: () => {
+ return generateU32VectorBinaryToVectorCases(
+ sparseU32Range(),
+ vectorU32Range(3),
+ u32_remainder_non_const
+ );
+ },
+ remainder_scalar_vector4_non_const: () => {
+ return generateU32VectorBinaryToVectorCases(
+ sparseU32Range(),
+ vectorU32Range(4),
+ u32_remainder_non_const
+ );
+ },
+ remainder_vector2_scalar_non_const: () => {
+ return generateVectorU32BinaryToVectorCases(
+ vectorU32Range(2),
+ sparseU32Range(),
+ u32_remainder_non_const
+ );
+ },
+ remainder_vector3_scalar_non_const: () => {
+ return generateVectorU32BinaryToVectorCases(
+ vectorU32Range(3),
+ sparseU32Range(),
+ u32_remainder_non_const
+ );
+ },
+ remainder_vector4_scalar_non_const: () => {
+ return generateVectorU32BinaryToVectorCases(
+ vectorU32Range(4),
+ sparseU32Range(),
+ u32_remainder_non_const
+ );
+ },
+ remainder_scalar_vector2_const: () => {
+ return generateU32VectorBinaryToVectorCases(
+ sparseU32Range(),
+ vectorU32Range(2),
+ u32_remainder_const
+ );
+ },
+ remainder_scalar_vector3_const: () => {
+ return generateU32VectorBinaryToVectorCases(
+ sparseU32Range(),
+ vectorU32Range(3),
+ u32_remainder_const
+ );
+ },
+ remainder_scalar_vector4_const: () => {
+ return generateU32VectorBinaryToVectorCases(
+ sparseU32Range(),
+ vectorU32Range(4),
+ u32_remainder_const
+ );
+ },
+ remainder_vector2_scalar_const: () => {
+ return generateVectorU32BinaryToVectorCases(
+ vectorU32Range(2),
+ sparseU32Range(),
+ u32_remainder_const
+ );
+ },
+ remainder_vector3_scalar_const: () => {
+ return generateVectorU32BinaryToVectorCases(
+ vectorU32Range(3),
+ sparseU32Range(),
+ u32_remainder_const
+ );
+ },
+ remainder_vector4_scalar_const: () => {
+ return generateVectorU32BinaryToVectorCases(
+ vectorU32Range(4),
+ sparseU32Range(),
+ u32_remainder_const
+ );
+ },
+});
+
+g.test('addition')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x + y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('addition');
+ await run(t, binary('+'), [TypeU32, TypeU32], TypeU32, t.params, cases);
+ });
+
+g.test('addition_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x += y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('addition');
+ await run(t, compoundBinary('+='), [TypeU32, TypeU32], TypeU32, t.params, cases);
+ });
+
+g.test('subtraction')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x - y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('subtraction');
+ await run(t, binary('-'), [TypeU32, TypeU32], TypeU32, t.params, cases);
+ });
+
+g.test('subtraction_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x -= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('subtraction');
+ await run(t, compoundBinary('-='), [TypeU32, TypeU32], TypeU32, t.params, cases);
+ });
+
+g.test('multiplication')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x * y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('multiplication');
+ await run(t, binary('*'), [TypeU32, TypeU32], TypeU32, t.params, cases);
+ });
+
+g.test('multiplication_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x *= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('multiplication');
+ await run(t, compoundBinary('*='), [TypeU32, TypeU32], TypeU32, t.params, cases);
+ });
+
+g.test('division')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x / y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'division_const' : 'division_non_const'
+ );
+ await run(t, binary('/'), [TypeU32, TypeU32], TypeU32, t.params, cases);
+ });
+
+g.test('division_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x /= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'division_const' : 'division_non_const'
+ );
+ await run(t, compoundBinary('/='), [TypeU32, TypeU32], TypeU32, t.params, cases);
+ });
+
+g.test('remainder')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x % y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'remainder_const' : 'remainder_non_const'
+ );
+ await run(t, binary('%'), [TypeU32, TypeU32], TypeU32, t.params, cases);
+ });
+
+g.test('remainder_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x %= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'remainder_const' : 'remainder_non_const'
+ );
+ await run(t, compoundBinary('%='), [TypeU32, TypeU32], TypeU32, t.params, cases);
+ });
+
+g.test('addition_scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x + y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_rhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_rhs;
+ const vec_type = TypeVec(vec_size, TypeU32);
+ const cases = await d.get(`addition_scalar_vector${vec_size}`);
+ await run(t, binary('+'), [TypeU32, vec_type], vec_type, t.params, cases);
+ });
+
+g.test('addition_vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x + y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeU32);
+ const cases = await d.get(`addition_vector${vec_size}_scalar`);
+ await run(t, binary('+'), [vec_type, TypeU32], vec_type, t.params, cases);
+ });
+
+g.test('addition_vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x += y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeU32);
+ const cases = await d.get(`addition_vector${vec_size}_scalar`);
+ await run(t, compoundBinary('+='), [vec_type, TypeU32], vec_type, t.params, cases);
+ });
+
+g.test('subtraction_scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x - y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_rhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_rhs;
+ const vec_type = TypeVec(vec_size, TypeU32);
+ const cases = await d.get(`subtraction_scalar_vector${vec_size}`);
+ await run(t, binary('-'), [TypeU32, vec_type], vec_type, t.params, cases);
+ });
+
+g.test('subtraction_vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x - y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeU32);
+ const cases = await d.get(`subtraction_vector${vec_size}_scalar`);
+ await run(t, binary('-'), [vec_type, TypeU32], vec_type, t.params, cases);
+ });
+
+g.test('subtraction_vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x -= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeU32);
+ const cases = await d.get(`subtraction_vector${vec_size}_scalar`);
+ await run(t, compoundBinary('-='), [vec_type, TypeU32], vec_type, t.params, cases);
+ });
+
+g.test('multiplication_scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x * y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_rhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_rhs;
+ const vec_type = TypeVec(vec_size, TypeU32);
+ const cases = await d.get(`multiplication_scalar_vector${vec_size}`);
+ await run(t, binary('*'), [TypeU32, vec_type], vec_type, t.params, cases);
+ });
+
+g.test('multiplication_vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x * y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeU32);
+ const cases = await d.get(`multiplication_vector${vec_size}_scalar`);
+ await run(t, binary('*'), [vec_type, TypeU32], vec_type, t.params, cases);
+ });
+
+g.test('multiplication_vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x *= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeU32);
+ const cases = await d.get(`multiplication_vector${vec_size}_scalar`);
+ await run(t, compoundBinary('*='), [vec_type, TypeU32], vec_type, t.params, cases);
+ });
+
+g.test('division_scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x / y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_rhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_rhs;
+ const vec_type = TypeVec(vec_size, TypeU32);
+ const source = t.params.inputSource === 'const' ? 'const' : 'non_const';
+ const cases = await d.get(`division_scalar_vector${vec_size}_${source}`);
+ await run(t, binary('/'), [TypeU32, vec_type], vec_type, t.params, cases);
+ });
+
+g.test('division_vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x / y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeU32);
+ const source = t.params.inputSource === 'const' ? 'const' : 'non_const';
+ const cases = await d.get(`division_vector${vec_size}_scalar_${source}`);
+ await run(t, binary('/'), [vec_type, TypeU32], vec_type, t.params, cases);
+ });
+
+g.test('division_vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x /= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeU32);
+ const source = t.params.inputSource === 'const' ? 'const' : 'non_const';
+ const cases = await d.get(`division_vector${vec_size}_scalar_${source}`);
+ await run(t, compoundBinary('/='), [vec_type, TypeU32], vec_type, t.params, cases);
+ });
+
+g.test('remainder_scalar_vector')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x % y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_rhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_rhs;
+ const vec_type = TypeVec(vec_size, TypeU32);
+ const source = t.params.inputSource === 'const' ? 'const' : 'non_const';
+ const cases = await d.get(`remainder_scalar_vector${vec_size}_${source}`);
+ await run(t, binary('%'), [TypeU32, vec_type], vec_type, t.params, cases);
+ });
+
+g.test('remainder_vector_scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x % y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeU32);
+ const source = t.params.inputSource === 'const' ? 'const' : 'non_const';
+ const cases = await d.get(`remainder_vector${vec_size}_scalar_${source}`);
+ await run(t, binary('%'), [vec_type, TypeU32], vec_type, t.params, cases);
+ });
+
+g.test('remainder_vector_scalar_compound')
+ .specURL('https://www.w3.org/TR/WGSL/#arithmetic-expr')
+ .desc(
+ `
+Expression: x %= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize_lhs', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const vec_size = t.params.vectorize_lhs;
+ const vec_type = TypeVec(vec_size, TypeU32);
+ const source = t.params.inputSource === 'const' ? 'const' : 'non_const';
+ const cases = await d.get(`remainder_vector${vec_size}_scalar_${source}`);
+ await run(t, compoundBinary('%='), [vec_type, TypeU32], vec_type, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/u32_comparison.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/u32_comparison.spec.ts
new file mode 100644
index 0000000000..1f693da5fd
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/binary/u32_comparison.spec.ts
@@ -0,0 +1,121 @@
+export const description = `
+Execution Tests for the u32 comparison expressions
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { u32, bool, TypeBool, TypeU32 } from '../../../../util/conversion.js';
+import { vectorU32Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, Case, run } from '../expression.js';
+
+import { binary } from './binary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+/**
+ * @returns a test case for the provided left hand & right hand values and
+ * expected boolean result.
+ */
+function makeCase(lhs: number, rhs: number, expected_answer: boolean): Case {
+ return { input: [u32(lhs), u32(rhs)], expected: bool(expected_answer) };
+}
+
+export const d = makeCaseCache('binary/u32_comparison', {
+ equals: () => vectorU32Range(2).map(v => makeCase(v[0], v[1], v[0] === v[1])),
+ not_equals: () => vectorU32Range(2).map(v => makeCase(v[0], v[1], v[0] !== v[1])),
+ less_than: () => vectorU32Range(2).map(v => makeCase(v[0], v[1], v[0] < v[1])),
+ less_equal: () => vectorU32Range(2).map(v => makeCase(v[0], v[1], v[0] <= v[1])),
+ greater_than: () => vectorU32Range(2).map(v => makeCase(v[0], v[1], v[0] > v[1])),
+ greater_equal: () => vectorU32Range(2).map(v => makeCase(v[0], v[1], v[0] >= v[1])),
+});
+
+g.test('equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x == y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('equals');
+ await run(t, binary('=='), [TypeU32, TypeU32], TypeBool, t.params, cases);
+ });
+
+g.test('not_equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x != y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('not_equals');
+ await run(t, binary('!='), [TypeU32, TypeU32], TypeBool, t.params, cases);
+ });
+
+g.test('less_than')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x < y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('less_than');
+ await run(t, binary('<'), [TypeU32, TypeU32], TypeBool, t.params, cases);
+ });
+
+g.test('less_equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x <= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('less_equal');
+ await run(t, binary('<='), [TypeU32, TypeU32], TypeBool, t.params, cases);
+ });
+
+g.test('greater_than')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x > y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('greater_than');
+ await run(t, binary('>'), [TypeU32, TypeU32], TypeBool, t.params, cases);
+ });
+
+g.test('greater_equals')
+ .specURL('https://www.w3.org/TR/WGSL/#comparison-expr')
+ .desc(
+ `
+Expression: x >= y
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('greater_equal');
+ await run(t, binary('>='), [TypeU32, TypeU32], TypeBool, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/abs.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/abs.spec.ts
new file mode 100644
index 0000000000..05d5242f73
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/abs.spec.ts
@@ -0,0 +1,196 @@
+export const description = `
+Execution tests for the 'abs' builtin function
+
+S is AbstractInt, i32, or u32
+T is S or vecN<S>
+@const fn abs(e: T ) -> T
+The absolute value of e. Component-wise when T is a vector. If e is a signed
+integral scalar type and evaluates to the largest negative value, then the
+result is e. If e is an unsigned integral type, then the result is e.
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn abs(e: T ) -> T
+Returns the absolute value of e (e.g. e with a positive sign bit).
+Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { kBit } from '../../../../../util/constants.js';
+import {
+ i32Bits,
+ TypeF32,
+ TypeF16,
+ TypeI32,
+ TypeU32,
+ u32Bits,
+ TypeAbstractFloat,
+} from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF32Range, fullF16Range, fullF64Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, onlyConstInputSource, run } from '../../expression.js';
+
+import { abstractBuiltin, builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('abs', {
+ f32: () => {
+ return FP.f32.generateScalarToIntervalCases(fullF32Range(), 'unfiltered', FP.f32.absInterval);
+ },
+ f16: () => {
+ return FP.f16.generateScalarToIntervalCases(fullF16Range(), 'unfiltered', FP.f16.absInterval);
+ },
+ abstract: () => {
+ return FP.abstract.generateScalarToIntervalCases(
+ fullF64Range(),
+ 'unfiltered',
+ FP.abstract.absInterval
+ );
+ },
+});
+
+g.test('abstract_int')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`abstract int tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('u32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`unsigned int tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ await run(t, builtin('abs'), [TypeU32], TypeU32, t.params, [
+ // Min and Max u32
+ { input: u32Bits(kBit.u32.min), expected: u32Bits(kBit.u32.min) },
+ { input: u32Bits(kBit.u32.max), expected: u32Bits(kBit.u32.max) },
+ // Powers of 2: -2^i: 0 =< i =< 31
+ { input: u32Bits(kBit.powTwo.to0), expected: u32Bits(kBit.powTwo.to0) },
+ { input: u32Bits(kBit.powTwo.to1), expected: u32Bits(kBit.powTwo.to1) },
+ { input: u32Bits(kBit.powTwo.to2), expected: u32Bits(kBit.powTwo.to2) },
+ { input: u32Bits(kBit.powTwo.to3), expected: u32Bits(kBit.powTwo.to3) },
+ { input: u32Bits(kBit.powTwo.to4), expected: u32Bits(kBit.powTwo.to4) },
+ { input: u32Bits(kBit.powTwo.to5), expected: u32Bits(kBit.powTwo.to5) },
+ { input: u32Bits(kBit.powTwo.to6), expected: u32Bits(kBit.powTwo.to6) },
+ { input: u32Bits(kBit.powTwo.to7), expected: u32Bits(kBit.powTwo.to7) },
+ { input: u32Bits(kBit.powTwo.to8), expected: u32Bits(kBit.powTwo.to8) },
+ { input: u32Bits(kBit.powTwo.to9), expected: u32Bits(kBit.powTwo.to9) },
+ { input: u32Bits(kBit.powTwo.to10), expected: u32Bits(kBit.powTwo.to10) },
+ { input: u32Bits(kBit.powTwo.to11), expected: u32Bits(kBit.powTwo.to11) },
+ { input: u32Bits(kBit.powTwo.to12), expected: u32Bits(kBit.powTwo.to12) },
+ { input: u32Bits(kBit.powTwo.to13), expected: u32Bits(kBit.powTwo.to13) },
+ { input: u32Bits(kBit.powTwo.to14), expected: u32Bits(kBit.powTwo.to14) },
+ { input: u32Bits(kBit.powTwo.to15), expected: u32Bits(kBit.powTwo.to15) },
+ { input: u32Bits(kBit.powTwo.to16), expected: u32Bits(kBit.powTwo.to16) },
+ { input: u32Bits(kBit.powTwo.to17), expected: u32Bits(kBit.powTwo.to17) },
+ { input: u32Bits(kBit.powTwo.to18), expected: u32Bits(kBit.powTwo.to18) },
+ { input: u32Bits(kBit.powTwo.to19), expected: u32Bits(kBit.powTwo.to19) },
+ { input: u32Bits(kBit.powTwo.to20), expected: u32Bits(kBit.powTwo.to20) },
+ { input: u32Bits(kBit.powTwo.to21), expected: u32Bits(kBit.powTwo.to21) },
+ { input: u32Bits(kBit.powTwo.to22), expected: u32Bits(kBit.powTwo.to22) },
+ { input: u32Bits(kBit.powTwo.to23), expected: u32Bits(kBit.powTwo.to23) },
+ { input: u32Bits(kBit.powTwo.to24), expected: u32Bits(kBit.powTwo.to24) },
+ { input: u32Bits(kBit.powTwo.to25), expected: u32Bits(kBit.powTwo.to25) },
+ { input: u32Bits(kBit.powTwo.to26), expected: u32Bits(kBit.powTwo.to26) },
+ { input: u32Bits(kBit.powTwo.to27), expected: u32Bits(kBit.powTwo.to27) },
+ { input: u32Bits(kBit.powTwo.to28), expected: u32Bits(kBit.powTwo.to28) },
+ { input: u32Bits(kBit.powTwo.to29), expected: u32Bits(kBit.powTwo.to29) },
+ { input: u32Bits(kBit.powTwo.to30), expected: u32Bits(kBit.powTwo.to30) },
+ { input: u32Bits(kBit.powTwo.to31), expected: u32Bits(kBit.powTwo.to31) },
+ ]);
+ });
+
+g.test('i32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`signed int tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ await run(t, builtin('abs'), [TypeI32], TypeI32, t.params, [
+ // Min and max i32
+ // If e evaluates to the largest negative value, then the result is e.
+ { input: i32Bits(kBit.i32.negative.min), expected: i32Bits(kBit.i32.negative.min) },
+ { input: i32Bits(kBit.i32.negative.max), expected: i32Bits(kBit.i32.positive.min) },
+ { input: i32Bits(kBit.i32.positive.max), expected: i32Bits(kBit.i32.positive.max) },
+ { input: i32Bits(kBit.i32.positive.min), expected: i32Bits(kBit.i32.positive.min) },
+ // input: -1 * pow(2, n), n = {-31, ..., 0 }, expected: pow(2, n), n = {-31, ..., 0}]
+ { input: i32Bits(kBit.negPowTwo.to0), expected: i32Bits(kBit.powTwo.to0) },
+ { input: i32Bits(kBit.negPowTwo.to1), expected: i32Bits(kBit.powTwo.to1) },
+ { input: i32Bits(kBit.negPowTwo.to2), expected: i32Bits(kBit.powTwo.to2) },
+ { input: i32Bits(kBit.negPowTwo.to3), expected: i32Bits(kBit.powTwo.to3) },
+ { input: i32Bits(kBit.negPowTwo.to4), expected: i32Bits(kBit.powTwo.to4) },
+ { input: i32Bits(kBit.negPowTwo.to5), expected: i32Bits(kBit.powTwo.to5) },
+ { input: i32Bits(kBit.negPowTwo.to6), expected: i32Bits(kBit.powTwo.to6) },
+ { input: i32Bits(kBit.negPowTwo.to7), expected: i32Bits(kBit.powTwo.to7) },
+ { input: i32Bits(kBit.negPowTwo.to8), expected: i32Bits(kBit.powTwo.to8) },
+ { input: i32Bits(kBit.negPowTwo.to9), expected: i32Bits(kBit.powTwo.to9) },
+ { input: i32Bits(kBit.negPowTwo.to10), expected: i32Bits(kBit.powTwo.to10) },
+ { input: i32Bits(kBit.negPowTwo.to11), expected: i32Bits(kBit.powTwo.to11) },
+ { input: i32Bits(kBit.negPowTwo.to12), expected: i32Bits(kBit.powTwo.to12) },
+ { input: i32Bits(kBit.negPowTwo.to13), expected: i32Bits(kBit.powTwo.to13) },
+ { input: i32Bits(kBit.negPowTwo.to14), expected: i32Bits(kBit.powTwo.to14) },
+ { input: i32Bits(kBit.negPowTwo.to15), expected: i32Bits(kBit.powTwo.to15) },
+ { input: i32Bits(kBit.negPowTwo.to16), expected: i32Bits(kBit.powTwo.to16) },
+ { input: i32Bits(kBit.negPowTwo.to17), expected: i32Bits(kBit.powTwo.to17) },
+ { input: i32Bits(kBit.negPowTwo.to18), expected: i32Bits(kBit.powTwo.to18) },
+ { input: i32Bits(kBit.negPowTwo.to19), expected: i32Bits(kBit.powTwo.to19) },
+ { input: i32Bits(kBit.negPowTwo.to20), expected: i32Bits(kBit.powTwo.to20) },
+ { input: i32Bits(kBit.negPowTwo.to21), expected: i32Bits(kBit.powTwo.to21) },
+ { input: i32Bits(kBit.negPowTwo.to22), expected: i32Bits(kBit.powTwo.to22) },
+ { input: i32Bits(kBit.negPowTwo.to23), expected: i32Bits(kBit.powTwo.to23) },
+ { input: i32Bits(kBit.negPowTwo.to24), expected: i32Bits(kBit.powTwo.to24) },
+ { input: i32Bits(kBit.negPowTwo.to25), expected: i32Bits(kBit.powTwo.to25) },
+ { input: i32Bits(kBit.negPowTwo.to26), expected: i32Bits(kBit.powTwo.to26) },
+ { input: i32Bits(kBit.negPowTwo.to27), expected: i32Bits(kBit.powTwo.to27) },
+ { input: i32Bits(kBit.negPowTwo.to28), expected: i32Bits(kBit.powTwo.to28) },
+ { input: i32Bits(kBit.negPowTwo.to29), expected: i32Bits(kBit.powTwo.to29) },
+ { input: i32Bits(kBit.negPowTwo.to30), expected: i32Bits(kBit.powTwo.to30) },
+ { input: i32Bits(kBit.negPowTwo.to31), expected: i32Bits(kBit.powTwo.to31) },
+ ]);
+ });
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u
+ .combine('inputSource', onlyConstInputSource)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('abstract');
+ await run(t, abstractBuiltin('abs'), [TypeAbstractFloat], TypeAbstractFloat, t.params, cases);
+ });
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`float 32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, builtin('abs'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, builtin('abs'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/acos.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/acos.spec.ts
new file mode 100644
index 0000000000..5755c07905
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/acos.spec.ts
@@ -0,0 +1,78 @@
+export const description = `
+Execution tests for the 'acos' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn acos(e: T ) -> T
+Returns the arc cosine of e. Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { linearRange, fullF32Range, fullF16Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+const f32_inputs = [
+ ...linearRange(-1, 1, 100), // acos is defined on [-1, 1]
+ ...fullF32Range(),
+];
+
+const f16_inputs = [
+ ...linearRange(-1, 1, 100), // acos is defined on [-1, 1]
+ ...fullF16Range(),
+];
+
+export const d = makeCaseCache('acos', {
+ f32_const: () => {
+ return FP.f32.generateScalarToIntervalCases(f32_inputs, 'finite', FP.f32.acosInterval);
+ },
+ f32_non_const: () => {
+ return FP.f32.generateScalarToIntervalCases(f32_inputs, 'unfiltered', FP.f32.acosInterval);
+ },
+ f16_const: () => {
+ return FP.f16.generateScalarToIntervalCases(f16_inputs, 'finite', FP.f16.acosInterval);
+ },
+ f16_non_const: () => {
+ return FP.f16.generateScalarToIntervalCases(f16_inputs, 'unfiltered', FP.f16.acosInterval);
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('acos'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('acos'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/acosh.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/acosh.spec.ts
new file mode 100644
index 0000000000..cc78ce3eee
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/acosh.spec.ts
@@ -0,0 +1,81 @@
+export const description = `
+Execution tests for the 'acosh' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn acosh(e: T ) -> T
+Returns the hyperbolic arc cosine of e. The result is 0 when e < 1.
+Computes the non-negative functional inverse of cosh.
+Component-wise when T is a vector.
+Note: The result is not mathematically meaningful when e < 1.
+
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { biasedRange, fullF32Range, fullF16Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+const f32_inputs = [
+ ...biasedRange(1, 2, 100), // x near 1 can be problematic to implement
+ ...fullF32Range(),
+];
+const f16_inputs = [
+ ...biasedRange(1, 2, 100), // x near 1 can be problematic to implement
+ ...fullF16Range(),
+];
+
+export const d = makeCaseCache('acosh', {
+ f32_const: () => {
+ return FP.f32.generateScalarToIntervalCases(f32_inputs, 'finite', ...FP.f32.acoshIntervals);
+ },
+ f32_non_const: () => {
+ return FP.f32.generateScalarToIntervalCases(f32_inputs, 'unfiltered', ...FP.f32.acoshIntervals);
+ },
+ f16_const: () => {
+ return FP.f16.generateScalarToIntervalCases(f16_inputs, 'finite', ...FP.f16.acoshIntervals);
+ },
+ f16_non_const: () => {
+ return FP.f16.generateScalarToIntervalCases(f16_inputs, 'unfiltered', ...FP.f16.acoshIntervals);
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('acosh'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('acosh'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/all.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/all.spec.ts
new file mode 100644
index 0000000000..9a2938c1d5
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/all.spec.ts
@@ -0,0 +1,92 @@
+export const description = `
+Execution tests for the 'all' builtin function
+
+S is a bool
+T is S or vecN<S>
+@const fn all(e: T) -> bool
+Returns e if e is scalar.
+Returns true if each component of e is true if e is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import {
+ False,
+ True,
+ TypeBool,
+ TypeVec,
+ vec2,
+ vec3,
+ vec4,
+} from '../../../../../util/conversion.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('bool')
+ .specURL('https://www.w3.org/TR/WGSL/#logical-builtin-functions')
+ .desc(`bool tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('overload', ['scalar', 'vec2', 'vec3', 'vec4'] as const)
+ )
+ .fn(async t => {
+ const overloads = {
+ scalar: {
+ type: TypeBool,
+ cases: [
+ { input: False, expected: False },
+ { input: True, expected: True },
+ ],
+ },
+ vec2: {
+ type: TypeVec(2, TypeBool),
+ cases: [
+ { input: vec2(False, False), expected: False },
+ { input: vec2(True, False), expected: False },
+ { input: vec2(False, True), expected: False },
+ { input: vec2(True, True), expected: True },
+ ],
+ },
+ vec3: {
+ type: TypeVec(3, TypeBool),
+ cases: [
+ { input: vec3(False, False, False), expected: False },
+ { input: vec3(True, False, False), expected: False },
+ { input: vec3(False, True, False), expected: False },
+ { input: vec3(True, True, False), expected: False },
+ { input: vec3(False, False, True), expected: False },
+ { input: vec3(True, False, True), expected: False },
+ { input: vec3(False, True, True), expected: False },
+ { input: vec3(True, True, True), expected: True },
+ ],
+ },
+ vec4: {
+ type: TypeVec(4, TypeBool),
+ cases: [
+ { input: vec4(False, False, False, False), expected: False },
+ { input: vec4(False, True, False, False), expected: False },
+ { input: vec4(False, False, True, False), expected: False },
+ { input: vec4(False, True, True, False), expected: False },
+ { input: vec4(False, False, False, True), expected: False },
+ { input: vec4(False, True, False, True), expected: False },
+ { input: vec4(False, False, True, True), expected: False },
+ { input: vec4(False, True, True, True), expected: False },
+ { input: vec4(True, False, False, False), expected: False },
+ { input: vec4(True, False, False, True), expected: False },
+ { input: vec4(True, False, True, False), expected: False },
+ { input: vec4(True, False, True, True), expected: False },
+ { input: vec4(True, True, False, False), expected: False },
+ { input: vec4(True, True, False, True), expected: False },
+ { input: vec4(True, True, True, False), expected: False },
+ { input: vec4(True, True, True, True), expected: True },
+ ],
+ },
+ };
+ const overload = overloads[t.params.overload];
+
+ await run(t, builtin('all'), [overload.type], TypeBool, t.params, overload.cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/any.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/any.spec.ts
new file mode 100644
index 0000000000..19ed7d186f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/any.spec.ts
@@ -0,0 +1,92 @@
+export const description = `
+Execution tests for the 'any' builtin function
+
+S is a bool
+T is S or vecN<S>
+@const fn all(e) -> bool
+Returns e if e is scalar.
+Returns true if any component of e is true if e is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import {
+ False,
+ True,
+ TypeBool,
+ TypeVec,
+ vec2,
+ vec3,
+ vec4,
+} from '../../../../../util/conversion.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('bool')
+ .specURL('https://www.w3.org/TR/WGSL/#logical-builtin-functions')
+ .desc(`bool tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('overload', ['scalar', 'vec2', 'vec3', 'vec4'] as const)
+ )
+ .fn(async t => {
+ const overloads = {
+ scalar: {
+ type: TypeBool,
+ cases: [
+ { input: False, expected: False },
+ { input: True, expected: True },
+ ],
+ },
+ vec2: {
+ type: TypeVec(2, TypeBool),
+ cases: [
+ { input: vec2(False, False), expected: False },
+ { input: vec2(True, False), expected: True },
+ { input: vec2(False, True), expected: True },
+ { input: vec2(True, True), expected: True },
+ ],
+ },
+ vec3: {
+ type: TypeVec(3, TypeBool),
+ cases: [
+ { input: vec3(False, False, False), expected: False },
+ { input: vec3(True, False, False), expected: True },
+ { input: vec3(False, True, False), expected: True },
+ { input: vec3(True, True, False), expected: True },
+ { input: vec3(False, False, True), expected: True },
+ { input: vec3(True, False, True), expected: True },
+ { input: vec3(False, True, True), expected: True },
+ { input: vec3(True, True, True), expected: True },
+ ],
+ },
+ vec4: {
+ type: TypeVec(4, TypeBool),
+ cases: [
+ { input: vec4(False, False, False, False), expected: False },
+ { input: vec4(False, True, False, False), expected: True },
+ { input: vec4(False, False, True, False), expected: True },
+ { input: vec4(False, True, True, False), expected: True },
+ { input: vec4(False, False, False, True), expected: True },
+ { input: vec4(False, True, False, True), expected: True },
+ { input: vec4(False, False, True, True), expected: True },
+ { input: vec4(False, True, True, True), expected: True },
+ { input: vec4(True, False, False, False), expected: True },
+ { input: vec4(True, False, False, True), expected: True },
+ { input: vec4(True, False, True, False), expected: True },
+ { input: vec4(True, False, True, True), expected: True },
+ { input: vec4(True, True, False, False), expected: True },
+ { input: vec4(True, True, False, True), expected: True },
+ { input: vec4(True, True, True, False), expected: True },
+ { input: vec4(True, True, True, True), expected: True },
+ ],
+ },
+ };
+ const overload = overloads[t.params.overload];
+
+ await run(t, builtin('any'), [overload.type], TypeBool, t.params, overload.cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/arrayLength.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/arrayLength.spec.ts
new file mode 100644
index 0000000000..e5c20391d8
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/arrayLength.spec.ts
@@ -0,0 +1,306 @@
+export const description = `
+Execution tests for the 'arrayLength' builtin function.
+
+fn arrayLength(e: ptr<storage,array<T>> ) -> u32
+Returns the number of elements in the runtime-sized array.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { align } from '../../../../../util/math.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// List of array element types to test.
+const kTestTypes = [
+ { type: 'u32', stride: 4 },
+ { type: 'i32', stride: 4 },
+ { type: 'f32', stride: 4 },
+ { type: 'f16', stride: 2 },
+ { type: 'vec2<u32>', stride: 8 },
+ { type: 'vec2<i32>', stride: 8 },
+ { type: 'vec2<f32>', stride: 8 },
+ { type: 'vec2<f16>', stride: 4 },
+ { type: 'vec3<u32>', stride: 16 },
+ { type: 'vec3<i32>', stride: 16 },
+ { type: 'vec3<f32>', stride: 16 },
+ { type: 'vec3<f16>', stride: 8 },
+ { type: 'vec4<u32>', stride: 16 },
+ { type: 'vec4<i32>', stride: 16 },
+ { type: 'vec4<f32>', stride: 16 },
+ { type: 'vec4<f16>', stride: 8 },
+ { type: 'mat2x2<f32>', stride: 16 },
+ { type: 'mat2x3<f32>', stride: 32 },
+ { type: 'mat2x4<f32>', stride: 32 },
+ { type: 'mat3x2<f32>', stride: 24 },
+ { type: 'mat3x3<f32>', stride: 48 },
+ { type: 'mat3x4<f32>', stride: 48 },
+ { type: 'mat4x2<f32>', stride: 32 },
+ { type: 'mat4x3<f32>', stride: 64 },
+ { type: 'mat4x4<f32>', stride: 64 },
+ { type: 'mat2x2<f16>', stride: 8 },
+ { type: 'mat2x3<f16>', stride: 16 },
+ { type: 'mat2x4<f16>', stride: 16 },
+ { type: 'mat3x2<f16>', stride: 12 },
+ { type: 'mat3x3<f16>', stride: 24 },
+ { type: 'mat3x4<f16>', stride: 24 },
+ { type: 'mat4x2<f16>', stride: 16 },
+ { type: 'mat4x3<f16>', stride: 32 },
+ { type: 'mat4x4<f16>', stride: 32 },
+ { type: 'atomic<u32>', stride: 4 },
+ { type: 'atomic<i32>', stride: 4 },
+ { type: 'array<u32,4>', stride: 16 },
+ { type: 'array<i32,4>', stride: 16 },
+ { type: 'array<f32,4>', stride: 16 },
+ { type: 'array<f16,4>', stride: 8 },
+ // Structures - see declarations below.
+ { type: 'ElemStruct', stride: 4 },
+ { type: 'ElemStruct_ImplicitPadding', stride: 16 },
+ { type: 'ElemStruct_ExplicitPadding', stride: 32 },
+] as const;
+
+// Declarations for structures used as array element types.
+const kWgslStructures = `
+struct ElemStruct { a : u32 }
+struct ElemStruct_ImplicitPadding { a : vec3<u32> }
+struct ElemStruct_ExplicitPadding { @align(32) a : u32 }
+`;
+
+/**
+ * Run a shader and check that the array length is correct.
+ *
+ * @param t The test object
+ * @param wgsl The shader source
+ * @param stride The stride in bytes of the array element type
+ * @param offset The offset in bytes of the array from the start of the binding
+ * @param buffer_size The size in bytes of the buffer to allocate
+ * @param binding_size The size in bytes of the binding
+ * @param binding_offset The offset in bytes of the binding
+ * @param expected The array of expected values after running the shader
+ */
+function runShaderTest(
+ t: GPUTest,
+ wgsl: string,
+ stride: number,
+ offset: number,
+ buffer_size: number,
+ binding_size: number,
+ binding_offset: number
+): void {
+ // Create the compute pipeline.
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ // Create the buffer that will contain the runtime-sized array.
+ const buffer = t.device.createBuffer({
+ size: buffer_size,
+ usage: GPUBufferUsage.STORAGE,
+ });
+
+ // Create the buffer that will receive the array length.
+ const lengthBuffer = t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+
+ // Set up bindings.
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: { buffer, size: binding_size, offset: binding_offset } },
+ { binding: 1, resource: { buffer: lengthBuffer } },
+ ],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // Check the length.
+ const length = (binding_size - offset) / stride;
+ t.expectGPUBufferValuesEqual(lengthBuffer, new Uint32Array([length]));
+}
+
+/**
+ * Test if a WGSL type string require using f16 extension.
+ *
+ * @param test_type The wgsl type for testing
+ */
+function typeRequiresF16(test_type: string): boolean {
+ return test_type.includes('f16');
+}
+
+/**
+ * Generate the necessary wgsl header for tested type, especially for f16
+ *
+ * @param test_type The wgsl type for testing
+ */
+function shaderHeader(test_type: string): string {
+ return typeRequiresF16(test_type) ? 'enable f16;\n\n' : '';
+}
+
+g.test('single_element')
+ .specURL('https://www.w3.org/TR/WGSL/#arrayLength-builtin')
+ .desc(
+ `Test the arrayLength() builtin with a binding that is just large enough for a single element.
+
+ Test parameters:
+ - type: The WGSL type to use as the array element type.
+ - stride: The stride in bytes of the array element type.
+ `
+ )
+ .params(u => u.combineWithParams(kTestTypes))
+ .beforeAllSubcases(t => {
+ if (typeRequiresF16(t.params.type)) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const wgsl =
+ shaderHeader(t.params.type) +
+ kWgslStructures +
+ `
+ @group(0) @binding(0) var<storage, read_write> buffer : array<${t.params.type}>;
+ @group(0) @binding(1) var<storage, read_write> length : u32;
+ @compute @workgroup_size(1)
+ fn main() {
+ length = arrayLength(&buffer);
+ }
+ `;
+ let buffer_size: number = t.params.stride;
+ // Ensure that binding size is multiple of 4.
+ buffer_size = buffer_size + ((~buffer_size + 1) & 3);
+ runShaderTest(t, wgsl, t.params.stride, 0, buffer_size, buffer_size, 0);
+ });
+
+g.test('multiple_elements')
+ .specURL('https://www.w3.org/TR/WGSL/#arrayLength-builtin')
+ .desc(
+ `Test the arrayLength() builtin with a binding that is large enough for multiple elements.
+
+ We test sizes that are not exact multiples of the array element strides, to test that the
+ length is correctly floored to the next whole element.
+
+ Test parameters:
+ - buffer_size: The size in bytes of the buffer.
+ - type: The WGSL type to use as the array element type.
+ - stride: The stride in bytes of the array element type.
+ `
+ )
+ .params(u =>
+ u.combine('buffer_size', [640, 1004, 1048576] as const).combineWithParams(kTestTypes)
+ )
+ .beforeAllSubcases(t => {
+ if (typeRequiresF16(t.params.type)) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const wgsl =
+ shaderHeader(t.params.type) +
+ kWgslStructures +
+ `
+ @group(0) @binding(0) var<storage, read_write> buffer : array<${t.params.type}>;
+ @group(0) @binding(1) var<storage, read_write> length : u32;
+ @compute @workgroup_size(1)
+ fn main() {
+ length = arrayLength(&buffer);
+ }
+ `;
+ runShaderTest(t, wgsl, t.params.stride, 0, t.params.buffer_size, t.params.buffer_size, 0);
+ });
+
+g.test('struct_member')
+ .specURL('https://www.w3.org/TR/WGSL/#arrayLength-builtin')
+ .desc(
+ `Test the arrayLength() builtin with an array that is inside a structure.
+
+ We include offsets that are not exact multiples of the array element strides, to test that
+ the length is correctly floored to the next whole element.
+
+ Test parameters:
+ - member_offset: The offset (in bytes) of the array member from the start of the struct.
+ - type: The WGSL type to use as the array element type.
+ - stride: The stride in bytes of the array element type.
+ `
+ )
+ .params(u => u.combine('member_offset', [0, 4, 20] as const).combineWithParams(kTestTypes))
+ .beforeAllSubcases(t => {
+ if (typeRequiresF16(t.params.type)) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const member_offset = align(t.params.member_offset, t.params.stride);
+ const wgsl =
+ shaderHeader(t.params.type) +
+ kWgslStructures +
+ `
+ alias ArrayType = array<${t.params.type}>;
+ struct Struct {
+ ${t.params.member_offset > 0 ? `@size(${member_offset}) padding : u32,` : ``}
+ arr : ArrayType,
+ }
+ @group(0) @binding(0) var<storage, read_write> buffer : Struct;
+ @group(0) @binding(1) var<storage, read_write> length : u32;
+ @compute @workgroup_size(1)
+ fn main() {
+ length = arrayLength(&buffer.arr);
+ }
+ `;
+ const buffer_size = 1048576;
+ runShaderTest(t, wgsl, t.params.stride, member_offset, buffer_size, buffer_size, 0);
+ });
+
+g.test('binding_subregion')
+ .specURL('https://www.w3.org/TR/WGSL/#arrayLength-builtin')
+ .desc(
+ `Test the arrayLength() builtin when used with a binding that starts at a non-zero offset and
+ does not fill the entire buffer.
+ `
+ )
+ .fn(t => {
+ const wgsl = `
+ @group(0) @binding(0) var<storage, read_write> buffer : array<vec3<f32>>;
+ @group(0) @binding(1) var<storage, read_write> length : u32;
+ @compute @workgroup_size(1)
+ fn main() {
+ length = arrayLength(&buffer);
+ }
+ `;
+ const stride = 16;
+ const buffer_size = 1024;
+ const binding_size = 640;
+ const binding_offset = 256;
+ runShaderTest(t, wgsl, stride, 0, buffer_size, binding_size, binding_offset);
+ });
+
+g.test('read_only')
+ .specURL('https://www.w3.org/TR/WGSL/#arrayLength-builtin')
+ .desc(
+ `Test the arrayLength() builtin when used with a read-only storage buffer.
+ `
+ )
+ .fn(t => {
+ const wgsl = `
+ @group(0) @binding(0) var<storage, read> buffer : array<vec3<f32>>;
+ @group(0) @binding(1) var<storage, read_write> length : u32;
+ @compute @workgroup_size(1)
+ fn main() {
+ length = arrayLength(&buffer);
+ }
+ `;
+ const stride = 16;
+ const buffer_size = 1024;
+ runShaderTest(t, wgsl, stride, 0, buffer_size, buffer_size, 0);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/asin.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/asin.spec.ts
new file mode 100644
index 0000000000..8d18ebb303
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/asin.spec.ts
@@ -0,0 +1,78 @@
+export const description = `
+Execution tests for the 'asin' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn asin(e: T ) -> T
+Returns the arc sine of e. Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { linearRange, fullF32Range, fullF16Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+const f32_inputs = [
+ ...linearRange(-1, 1, 100), // asin is defined on [-1, 1]
+ ...fullF32Range(),
+];
+
+const f16_inputs = [
+ ...linearRange(-1, 1, 100), // asin is defined on [-1, 1]
+ ...fullF16Range(),
+];
+
+export const d = makeCaseCache('asin', {
+ f32_const: () => {
+ return FP.f32.generateScalarToIntervalCases(f32_inputs, 'finite', FP.f32.asinInterval);
+ },
+ f32_non_const: () => {
+ return FP.f32.generateScalarToIntervalCases(f32_inputs, 'unfiltered', FP.f32.asinInterval);
+ },
+ f16_const: () => {
+ return FP.f16.generateScalarToIntervalCases(f16_inputs, 'finite', FP.f16.asinInterval);
+ },
+ f16_non_const: () => {
+ return FP.f16.generateScalarToIntervalCases(f16_inputs, 'unfiltered', FP.f16.asinInterval);
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('asin'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('asin'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/asinh.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/asinh.spec.ts
new file mode 100644
index 0000000000..9a8384e090
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/asinh.spec.ts
@@ -0,0 +1,65 @@
+export const description = `
+Execution tests for the 'sinh' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn asinh(e: T ) -> T
+Returns the hyperbolic arc sine of e.
+Computes the functional inverse of sinh.
+Component-wise when T is a vector.
+
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF32Range, fullF16Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('asinh', {
+ f32: () => {
+ return FP.f32.generateScalarToIntervalCases(fullF32Range(), 'unfiltered', FP.f32.asinhInterval);
+ },
+ f16: () => {
+ return FP.f16.generateScalarToIntervalCases(fullF16Range(), 'unfiltered', FP.f16.asinhInterval);
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float test`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, builtin('asinh'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, builtin('asinh'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atan.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atan.spec.ts
new file mode 100644
index 0000000000..3d0d3e6725
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atan.spec.ts
@@ -0,0 +1,80 @@
+export const description = `
+Execution tests for the 'atan' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn atan(e: T ) -> T
+Returns the arc tangent of e. Component-wise when T is a vector.
+
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF32Range, fullF16Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+const known_values = [-Math.sqrt(3), -1, -1 / Math.sqrt(3), 0, 1, 1 / Math.sqrt(3), Math.sqrt(3)];
+
+const f32_inputs = [...known_values, ...fullF32Range()];
+const f16_inputs = [...known_values, ...fullF16Range()];
+
+export const d = makeCaseCache('atan', {
+ f32_const: () => {
+ return FP.f32.generateScalarToIntervalCases(f32_inputs, 'finite', FP.f32.atanInterval);
+ },
+ f32_non_const: () => {
+ return FP.f32.generateScalarToIntervalCases(f32_inputs, 'unfiltered', FP.f32.atanInterval);
+ },
+ f16_const: () => {
+ return FP.f16.generateScalarToIntervalCases(f16_inputs, 'finite', FP.f16.atanInterval);
+ },
+ f16_non_const: () => {
+ return FP.f16.generateScalarToIntervalCases(f16_inputs, 'unfiltered', FP.f16.atanInterval);
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+f32 tests
+
+TODO(#792): Decide what the ground-truth is for these tests. [1]
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('atan'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('atan'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atan2.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atan2.spec.ts
new file mode 100644
index 0000000000..fbace73dd2
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atan2.spec.ts
@@ -0,0 +1,83 @@
+export const description = `
+Execution tests for the 'atan2' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn atan2(e1: T ,e2: T ) -> T
+Returns the arc tangent of e1 over e2. Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { linearRange, sparseF32Range, sparseF16Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+const cases = (['f32', 'f16'] as const)
+ .flatMap(kind =>
+ ([true, false] as const).map(nonConst => ({
+ [`${kind}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ const fp = FP[kind];
+ // Using sparse range since there are N^2 cases being generated, and also including extra values
+ // around 0, where there is a discontinuity that implementations may behave badly at.
+ const numeric_range = [
+ ...(kind === 'f32' ? sparseF32Range() : sparseF16Range()),
+ ...linearRange(fp.constants().negative.max, fp.constants().positive.min, 10),
+ ];
+ return fp.generateScalarPairToIntervalCases(
+ numeric_range,
+ numeric_range,
+ nonConst ? 'unfiltered' : 'finite',
+ fp.atan2Interval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('atan2', cases);
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+f32 tests
+
+TODO(#792): Decide what the ground-truth is for these tests. [1]
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(`f32_${t.params.inputSource === 'const' ? 'const' : 'non_const'}`);
+ await run(t, builtin('atan2'), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(`f16_${t.params.inputSource === 'const' ? 'const' : 'non_const'}`);
+ await run(t, builtin('atan2'), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atanh.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atanh.spec.ts
new file mode 100644
index 0000000000..90f322a7ea
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atanh.spec.ts
@@ -0,0 +1,87 @@
+export const description = `
+Execution tests for the 'atanh' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn atanh(e: T ) -> T
+Returns the hyperbolic arc tangent of e. The result is 0 when abs(e) ≥ 1.
+Computes the functional inverse of tanh.
+Component-wise when T is a vector.
+Note: The result is not mathematically meaningful when abs(e) >= 1.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { kValue } from '../../../../../util/constants.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { biasedRange, fullF32Range, fullF16Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+const f32_inputs = [
+ ...biasedRange(kValue.f32.negative.less_than_one, -0.9, 20), // discontinuity at x = -1
+ -1,
+ ...biasedRange(kValue.f32.positive.less_than_one, 0.9, 20), // discontinuity at x = 1
+ 1,
+ ...fullF32Range(),
+];
+const f16_inputs = [
+ ...biasedRange(kValue.f16.negative.less_than_one, -0.9, 20), // discontinuity at x = -1
+ -1,
+ ...biasedRange(kValue.f16.positive.less_than_one, 0.9, 20), // discontinuity at x = 1
+ 1,
+ ...fullF16Range(),
+];
+
+export const d = makeCaseCache('atanh', {
+ f32_const: () => {
+ return FP.f32.generateScalarToIntervalCases(f32_inputs, 'finite', FP.f32.atanhInterval);
+ },
+ f32_non_const: () => {
+ return FP.f32.generateScalarToIntervalCases(f32_inputs, 'unfiltered', FP.f32.atanhInterval);
+ },
+ f16_const: () => {
+ return FP.f16.generateScalarToIntervalCases(f16_inputs, 'finite', FP.f16.atanhInterval);
+ },
+ f16_non_const: () => {
+ return FP.f16.generateScalarToIntervalCases(f16_inputs, 'unfiltered', FP.f16.atanhInterval);
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('atanh'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('atanh'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicAdd.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicAdd.spec.ts
new file mode 100644
index 0000000000..37d3ce5292
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicAdd.spec.ts
@@ -0,0 +1,101 @@
+export const description = `
+Atomically read, add and store value.
+
+ * Load the original value pointed to by atomic_ptr.
+ * Obtains a new value by adding with the value v.
+ * Store the new value using atomic_ptr.
+
+Returns the original value stored in the atomic object.
+`;
+
+import { makeTestGroup } from '../../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../../gpu_test.js';
+
+import {
+ dispatchSizes,
+ workgroupSizes,
+ runStorageVariableTest,
+ runWorkgroupVariableTest,
+ typedArrayCtor,
+} from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('add_storage')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicAdd(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T) -> T
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ const numInvocations = t.params.workgroupSize * t.params.dispatchSize;
+ // Allocate one extra element to ensure it doesn't get modified
+ const bufferNumElements = 2;
+
+ const initValue = 0;
+ const op = `atomicAdd(&output[0], 1)`;
+ const expected = new (typedArrayCtor(t.params.scalarType))(bufferNumElements);
+ expected[0] = numInvocations;
+
+ runStorageVariableTest({
+ t,
+ workgroupSize: t.params.workgroupSize,
+ dispatchSize: t.params.dispatchSize,
+ bufferNumElements,
+ initValue,
+ op,
+ expected,
+ });
+ });
+
+g.test('add_workgroup')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicAdd(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T) -> T
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ // Allocate one extra element to ensure it doesn't get modified
+ const wgNumElements = 2;
+
+ const initValue = 0;
+ const op = `atomicAdd(&wg[0], 1)`;
+
+ const expected = new (typedArrayCtor(t.params.scalarType))(
+ wgNumElements * t.params.dispatchSize
+ );
+ for (let d = 0; d < t.params.dispatchSize; ++d) {
+ const wg = expected.subarray(d * wgNumElements);
+ wg[0] = t.params.workgroupSize;
+ }
+
+ runWorkgroupVariableTest({
+ t,
+ workgroupSize: t.params.workgroupSize,
+ dispatchSize: t.params.dispatchSize,
+ wgNumElements,
+ initValue,
+ op,
+ expected,
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicAnd.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicAnd.spec.ts
new file mode 100644
index 0000000000..ed5cfa84a3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicAnd.spec.ts
@@ -0,0 +1,135 @@
+export const description = `
+Atomically read, and and store value.
+
+* Load the original value pointed to by atomic_ptr.
+* Obtains a new value by anding with the value v.
+* Store the new value using atomic_ptr.
+
+Returns the original value stored in the atomic object.
+`;
+
+import { makeTestGroup } from '../../../../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../../../../common/util/data_tables.js';
+import { GPUTest } from '../../../../../../gpu_test.js';
+
+import {
+ dispatchSizes,
+ workgroupSizes,
+ runStorageVariableTest,
+ runWorkgroupVariableTest,
+ kMapId,
+ typedArrayCtor,
+} from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('and_storage')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicAnd(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T) -> T
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('mapId', keysOf(kMapId))
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ const numInvocations = t.params.workgroupSize * t.params.dispatchSize;
+
+ // Allocate an output buffer with bitsize of max invocations plus 1 for validation
+ const bufferNumElements = Math.max(1, numInvocations / 32) + 1;
+
+ // Start with all bits high, then using atomicAnd to set mapped global id bit off.
+ // Note: Both WGSL and JS will shift left 1 by id modulo 32.
+ const initValue = 0xffffffff;
+
+ const scalarType = t.params.scalarType;
+ const mapId = kMapId[t.params.mapId];
+ const extra = mapId.wgsl(numInvocations); // Defines map_id()
+ const op = `
+ let i = map_id(u32(id));
+ atomicAnd(&output[i / 32], ~(${scalarType}(1) << i))
+ `;
+
+ const expected = new (typedArrayCtor(scalarType))(bufferNumElements).fill(initValue);
+ for (let id = 0; id < numInvocations; ++id) {
+ const i = mapId.f(id, numInvocations);
+ expected[Math.floor(i / 32)] &= ~(1 << i);
+ }
+
+ runStorageVariableTest({
+ t,
+ workgroupSize: t.params.workgroupSize,
+ dispatchSize: t.params.dispatchSize,
+ bufferNumElements,
+ initValue,
+ op,
+ expected,
+ extra,
+ });
+ });
+
+g.test('and_workgroup')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicAnd(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T) -> T
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('mapId', keysOf(kMapId))
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ const numInvocations = t.params.workgroupSize;
+
+ // Allocate workgroup array with bitsize of max invocations plus 1 for validation
+ const wgNumElements = Math.max(1, numInvocations / 32) + 1;
+
+ // Start with all bits high, then using atomicAnd to set mapped global id bit off.
+ // Note: Both WGSL and JS will shift left 1 by id modulo 32.
+ const initValue = 0xffffffff;
+
+ const scalarType = t.params.scalarType;
+ const mapId = kMapId[t.params.mapId];
+ const extra = mapId.wgsl(numInvocations); // Defines map_id()
+ const op = `
+ let i = map_id(u32(id));
+ atomicAnd(&wg[i / 32], ~(${scalarType}(1) << i))
+ `;
+
+ const expected = new (typedArrayCtor(scalarType))(wgNumElements * t.params.dispatchSize).fill(
+ initValue
+ );
+ for (let d = 0; d < t.params.dispatchSize; ++d) {
+ for (let id = 0; id < numInvocations; ++id) {
+ const wg = expected.subarray(d * wgNumElements);
+ const i = mapId.f(id, numInvocations);
+ wg[Math.floor(i / 32)] &= ~(1 << i);
+ }
+ }
+
+ runWorkgroupVariableTest({
+ t,
+ workgroupSize: t.params.workgroupSize,
+ dispatchSize: t.params.dispatchSize,
+ wgNumElements,
+ initValue,
+ op,
+ expected,
+ extra,
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicCompareExchangeWeak.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicCompareExchangeWeak.spec.ts
new file mode 100644
index 0000000000..2556bb744b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicCompareExchangeWeak.spec.ts
@@ -0,0 +1,742 @@
+export const description = `
+Performs the following steps atomically:
+ * Load the original value pointed to by atomic_ptr.
+ * Compare the original value to the value v using an equality operation.
+ * Store the value v only if the result of the equality comparison was true.
+
+Returns a two member structure, where the first member, old_value, is the original
+value of the atomic object and the second member, exchanged, is whether or not
+the comparison succeeded.
+
+Note: the equality comparison may spuriously fail on some implementations.
+That is, the second component of the result vector may be false even if the first
+component of the result vector equals cmp.
+`;
+
+import { makeTestGroup } from '../../../../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../../../../common/util/data_tables.js';
+import { assert } from '../../../../../../../common/util/util.js';
+import { GPUTest } from '../../../../../../gpu_test.js';
+
+import {
+ dispatchSizes,
+ workgroupSizes,
+ typedArrayCtor,
+ kMapId,
+ onlyWorkgroupSizes,
+} from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('compare_exchange_weak_storage_basic')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicCompareExchangeWeak(atomic_ptr: ptr<AS, atomic<T>, read_write>, cmp: T, v: T) -> __atomic_compare_exchange_result<T>
+
+struct __atomic_compare_exchange_result<T> {
+ old_value : T, // old value stored in the atomic
+ exchanged : bool, // true if the exchange was done
+}
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('mapId', keysOf(kMapId))
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(async t => {
+ const numInvocations = t.params.workgroupSize * t.params.dispatchSize;
+ const bufferNumElements = numInvocations;
+ const scalarType = t.params.scalarType;
+ const mapId = kMapId[t.params.mapId];
+ const extra = mapId.wgsl(numInvocations, t.params.scalarType); // Defines map_id()
+
+ const wgsl =
+ `
+ @group(0) @binding(0)
+ var<storage, read_write> input : array<atomic<${scalarType}>>;
+
+ @group(0) @binding(1)
+ var<storage, read_write> output : array<${scalarType}>;
+
+ @group(0) @binding(2)
+ var<storage, read_write> exchanged : array<${scalarType}>;
+
+ @compute @workgroup_size(${t.params.workgroupSize})
+ fn main(
+ @builtin(global_invocation_id) global_invocation_id : vec3<u32>,
+ ) {
+ let id = ${scalarType}(global_invocation_id[0]);
+
+ // Exchange every third value
+ var comp = id + 1;
+ if (id % 3 == 0) {
+ comp = id;
+ }
+ let r = atomicCompareExchangeWeak(&input[id], comp, map_id(id * 2));
+
+ // Store results
+ output[id] = r.old_value;
+ if (r.exchanged) {
+ exchanged[id] = 1;
+ } else {
+ exchanged[id] = 0;
+ }
+ }
+ ` + extra;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ const arrayType = typedArrayCtor(scalarType);
+
+ // Create input buffer with values [0..n]
+ const inputBuffer = t.device.createBuffer({
+ size: bufferNumElements * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ mappedAtCreation: true,
+ });
+ t.trackForCleanup(inputBuffer);
+ const data = new arrayType(inputBuffer.getMappedRange());
+ data.forEach((_, i) => (data[i] = i));
+ inputBuffer.unmap();
+
+ const outputBuffer = t.device.createBuffer({
+ size: bufferNumElements * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(outputBuffer);
+
+ const exchangedBuffer = t.device.createBuffer({
+ size: bufferNumElements * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(exchangedBuffer);
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: { buffer: inputBuffer } },
+ { binding: 1, resource: { buffer: outputBuffer } },
+ { binding: 2, resource: { buffer: exchangedBuffer } },
+ ],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(t.params.dispatchSize);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // Output buffer should be the same as the initial input buffer as it contains
+ // values returned from atomicCompareExchangeWeak
+ const outputExpected = new (typedArrayCtor(t.params.scalarType))(bufferNumElements);
+ outputExpected.forEach((_, i) => (outputExpected[i] = i));
+ t.expectGPUBufferValuesEqual(outputBuffer, outputExpected);
+
+ // Read back exchanged buffer
+ const exchangedBufferResult = await t.readGPUBufferRangeTyped(exchangedBuffer, {
+ type: arrayType,
+ typedLength: exchangedBuffer.size / arrayType.BYTES_PER_ELEMENT,
+ });
+
+ // The input buffer should have been modified to a computed value for every third value,
+ // unless the comparison spuriously failed.
+ const inputExpected = new (typedArrayCtor(t.params.scalarType))(bufferNumElements);
+ inputExpected.forEach((_, i) => {
+ if (i % 3 === 0 && exchangedBufferResult.data[i]) {
+ inputExpected[i] = mapId.f(i * 2, numInvocations);
+ } else {
+ inputExpected[i] = i; // No change
+ }
+ });
+ t.expectGPUBufferValuesEqual(inputBuffer, inputExpected);
+ });
+
+g.test('compare_exchange_weak_workgroup_basic')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicCompareExchangeWeak(atomic_ptr: ptr<AS, atomic<T>, read_write>, cmp: T, v: T) -> __atomic_compare_exchange_result<T>
+
+struct __atomic_compare_exchange_result<T> {
+ old_value : T, // old value stored in the atomic
+ exchanged : bool, // true if the exchange was done
+}
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('mapId', keysOf(kMapId))
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(async t => {
+ const numInvocations = t.params.workgroupSize;
+ const wgNumElements = numInvocations;
+ const scalarType = t.params.scalarType;
+ const dispatchSize = t.params.dispatchSize;
+ const mapId = kMapId[t.params.mapId];
+ const extra = mapId.wgsl(numInvocations, t.params.scalarType); // Defines map_id()
+
+ const wgsl =
+ `
+ var<workgroup> wg: array<atomic<${scalarType}>, ${wgNumElements}>;
+
+ @group(0) @binding(0)
+ var<storage, read_write> output: array<${scalarType}, ${wgNumElements * dispatchSize}>;
+
+ @group(0) @binding(1)
+ var<storage, read_write> exchanged: array<${scalarType}, ${wgNumElements * dispatchSize}>;
+
+ // Result of each workgroup is written to output[workgroup_id.x]
+ @group(0) @binding(2)
+ var<storage, read_write> wg_copy: array<${scalarType}, ${wgNumElements * dispatchSize}>;
+
+ @compute @workgroup_size(${t.params.workgroupSize})
+ fn main(
+ @builtin(local_invocation_index) local_invocation_index: u32,
+ @builtin(workgroup_id) workgroup_id : vec3<u32>
+ ) {
+ let id = ${scalarType}(local_invocation_index);
+ let global_id = ${scalarType}(workgroup_id.x * ${wgNumElements} + local_invocation_index);
+
+ // Initialize wg[id] with this invocations global id
+ atomicStore(&wg[id], global_id);
+
+ // Exchange every third value
+ var comp = global_id + 1;
+ if (global_id % 3 == 0) {
+ comp = global_id;
+ }
+ let r = atomicCompareExchangeWeak(&wg[id], comp, map_id(global_id * 2));
+
+ // Store results
+ output[global_id] = r.old_value;
+ if (r.exchanged) {
+ exchanged[global_id] = 1;
+ } else {
+ exchanged[global_id] = 0;
+ }
+
+ // Copy new value into wg_copy
+ wg_copy[global_id] = atomicLoad(&wg[id]);
+ }
+ ` + extra;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ const arrayType = typedArrayCtor(scalarType);
+
+ const outputBuffer = t.device.createBuffer({
+ size: wgNumElements * dispatchSize * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(outputBuffer);
+
+ const wgCopyBuffer = t.device.createBuffer({
+ size: wgNumElements * dispatchSize * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(outputBuffer);
+
+ const exchangedBuffer = t.device.createBuffer({
+ size: wgNumElements * dispatchSize * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(exchangedBuffer);
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: { buffer: outputBuffer } },
+ { binding: 1, resource: { buffer: exchangedBuffer } },
+ { binding: 2, resource: { buffer: wgCopyBuffer } },
+ ],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(dispatchSize);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // Output buffer should be the same as the initial wg buffer as it contains
+ // values returned from atomicCompareExchangeWeak
+ const outputExpected = new (typedArrayCtor(t.params.scalarType))(wgNumElements * dispatchSize);
+ outputExpected.forEach((_, i) => (outputExpected[i] = i));
+ t.expectGPUBufferValuesEqual(outputBuffer, outputExpected);
+
+ // Read back exchanged buffer
+ const exchangedBufferResult = await t.readGPUBufferRangeTyped(exchangedBuffer, {
+ type: arrayType,
+ typedLength: exchangedBuffer.size / arrayType.BYTES_PER_ELEMENT,
+ });
+
+ // And the wg copy buffer should have been modified to a computed value for every third value,
+ // unless the comparison spuriously failed.
+ const wgCopyBufferExpected = new (typedArrayCtor(t.params.scalarType))(
+ wgNumElements * dispatchSize
+ );
+ wgCopyBufferExpected.forEach((_, i) => {
+ if (i % 3 === 0 && exchangedBufferResult.data[i]) {
+ wgCopyBufferExpected[i] = mapId.f(i * 2, numInvocations);
+ } else {
+ wgCopyBufferExpected[i] = i; // No change
+ }
+ });
+ t.expectGPUBufferValuesEqual(wgCopyBuffer, wgCopyBufferExpected);
+ });
+
+g.test('compare_exchange_weak_storage_advanced')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicCompareExchangeWeak(atomic_ptr: ptr<AS, atomic<T>, read_write>, cmp: T, v: T) -> __atomic_compare_exchange_result<T>
+
+struct __atomic_compare_exchange_result<T> {
+ old_value : T, // old value stored in the atomic
+ exchanged : bool, // true if the exchange was done
+}
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', onlyWorkgroupSizes) //
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(async t => {
+ const numInvocations = t.params.workgroupSize;
+ const scalarType = t.params.scalarType;
+
+ // Number of times each workgroup attempts to exchange the same value to the same memory address
+ const numWrites = 4;
+
+ const bufferNumElements = numInvocations * numWrites;
+ const pingPongValues = [24, 68];
+
+ const wgsl = `
+ @group(0) @binding(0)
+ var<storage, read_write> data : atomic<${scalarType}>;
+
+ @group(0) @binding(1)
+ var<storage, read_write> old_values : array<${scalarType}>;
+
+ @group(0) @binding(2)
+ var<storage, read_write> exchanged : array<${scalarType}>;
+
+ fn ping_pong_value(i: u32) -> ${scalarType} {
+ if (i % 2 == 0) {
+ return ${pingPongValues[0]};
+ } else {
+ return ${pingPongValues[1]};
+ }
+ }
+
+ @compute @workgroup_size(${t.params.workgroupSize})
+ fn main(
+ @builtin(global_invocation_id) global_invocation_id : vec3<u32>,
+ ) {
+ let id = ${scalarType}(global_invocation_id[0]);
+
+ // Each invocation attempts to write an alternating (ping-pong) value, once per loop.
+ // The data value is initialized with the first of the two ping-pong values.
+ // Only one invocation per loop iteration should succeed. Note the workgroupBarrier() used
+ // to synchronize each invocation in the loop.
+ // The reason we alternate is in case atomicCompareExchangeWeak spurioulsy fails:
+ // If all invocations of one iteration spuriously fail, the very next iteration will also
+ // fail since the value will not have been exchanged; however, the subsequent one will succeed
+ // (assuming not all iterations spuriously fail yet again).
+
+ for (var i = 0u; i < ${numWrites}u; i++) {
+ let compare = ping_pong_value(i);
+ let next = ping_pong_value(i + 1);
+
+ let r = atomicCompareExchangeWeak(&data, compare, next);
+
+ let slot = i * ${numInvocations}u + u32(id);
+ old_values[slot] = r.old_value;
+ if (r.exchanged) {
+ exchanged[slot] = 1;
+ } else {
+ exchanged[slot] = 0;
+ }
+
+ workgroupBarrier();
+ }
+ }
+ `;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ const arrayType = typedArrayCtor(scalarType);
+ const defaultValue = 99999999;
+
+ // Create single-value data buffer initialized to the first ping-pong value
+ const dataBuffer = t.device.createBuffer({
+ size: 1 * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ mappedAtCreation: true,
+ });
+ {
+ const data = new arrayType(dataBuffer.getMappedRange());
+ data[0] = pingPongValues[0];
+ dataBuffer.unmap();
+ }
+ t.trackForCleanup(dataBuffer);
+
+ const oldValuesBuffer = t.device.createBuffer({
+ size: bufferNumElements * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ mappedAtCreation: true,
+ });
+ t.trackForCleanup(oldValuesBuffer);
+ {
+ const data = new arrayType(oldValuesBuffer.getMappedRange());
+ data.fill(defaultValue);
+ oldValuesBuffer.unmap();
+ }
+
+ const exchangedBuffer = t.device.createBuffer({
+ size: bufferNumElements * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ mappedAtCreation: true,
+ });
+ t.trackForCleanup(exchangedBuffer);
+ {
+ const data = new arrayType(exchangedBuffer.getMappedRange());
+ data.fill(defaultValue);
+ exchangedBuffer.unmap();
+ }
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: { buffer: dataBuffer } },
+ { binding: 1, resource: { buffer: oldValuesBuffer } },
+ { binding: 2, resource: { buffer: exchangedBuffer } },
+ ],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // Read back buffers
+ const oldValuesBufferResult = (
+ await t.readGPUBufferRangeTyped(oldValuesBuffer, {
+ type: arrayType,
+ typedLength: oldValuesBuffer.size / arrayType.BYTES_PER_ELEMENT,
+ })
+ ).data;
+ const exchangedBufferResult = (
+ await t.readGPUBufferRangeTyped(exchangedBuffer, {
+ type: arrayType,
+ typedLength: exchangedBuffer.size / arrayType.BYTES_PER_ELEMENT,
+ })
+ ).data;
+
+ for (let w = 0; w < numWrites; ++w) {
+ const offset = w * numInvocations;
+ const exchanged = exchangedBufferResult.subarray(offset, offset + numInvocations);
+ const oldValues = oldValuesBufferResult.subarray(offset, offset + numInvocations);
+
+ const dumpValues = () => {
+ return `
+ For write: ${w}
+ exchanged: ${exchanged}
+ oldValues: ${oldValues}`;
+ };
+
+ // Only one of the invocations should have succeeded to exchange - or none if spurious failures occured
+ const noExchanges = exchanged.every(v => v === 0);
+ if (noExchanges) {
+ // Spurious failure, all values in oldValues should be the default value
+ if (!oldValues.every(v => v === defaultValue)) {
+ t.fail(
+ `Spurious failure detected, expected only default value of ${defaultValue} in oldValues buffer.${dumpValues()}`
+ );
+ return;
+ }
+ } else {
+ // Only one invocation should have exchanged its value
+ if (exchanged.filter(v => v === 1).length !== 1) {
+ t.fail(`More than one invocation exchanged its value.${dumpValues()}`);
+ return;
+ }
+
+ // Get its index
+ const idx = exchanged.findIndex(v => v === 1);
+ assert(idx !== -1);
+
+ // Its output should contain the old value after exchange
+ const oldValue = pingPongValues[w % 2];
+ if (oldValues[idx] !== oldValue) {
+ t.fail(
+ `oldValues[${idx}] expected to contain old value from exchange: ${oldValue}.${dumpValues()}'`
+ );
+ return;
+ }
+
+ // The rest of oldValues should either contain the old value or the newly exchanged value,
+ // depending on whether they executed atomicCompareExchangWeak before or after invocation 'idx'.
+ const oldValuesRest = oldValues.filter((_, i) => i !== idx);
+ if (!oldValuesRest.every(v => pingPongValues.includes(v))) {
+ t.fail(
+ `Values in oldValues buffer should be one of '${pingPongValues}', except at index '${idx} where it is '${oldValue}'.${dumpValues()}`
+ );
+ return;
+ }
+ }
+ }
+ });
+
+g.test('compare_exchange_weak_workgroup_advanced')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicCompareExchangeWeak(atomic_ptr: ptr<AS, atomic<T>, read_write>, cmp: T, v: T) -> __atomic_compare_exchange_result<T>
+
+struct __atomic_compare_exchange_result<T> {
+ old_value : T, // old value stored in the atomic
+ exchanged : bool, // true if the exchange was done
+}
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', onlyWorkgroupSizes) //
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(async t => {
+ const numInvocations = t.params.workgroupSize;
+ const scalarType = t.params.scalarType;
+
+ // Number of times each workgroup attempts to exchange the same value to the same memory address
+ const numWrites = 4;
+
+ const bufferNumElements = numInvocations * numWrites;
+ const pingPongValues = [24, 68];
+
+ const wgsl = `
+ var<workgroup> wg: atomic<${scalarType}>;
+
+ @group(0) @binding(0)
+ var<storage, read_write> old_values : array<${scalarType}>;
+
+ @group(0) @binding(1)
+ var<storage, read_write> exchanged : array<${scalarType}>;
+
+ fn ping_pong_value(i: u32) -> ${scalarType} {
+ if (i % 2 == 0) {
+ return ${pingPongValues[0]};
+ } else {
+ return ${pingPongValues[1]};
+ }
+ }
+
+ @compute @workgroup_size(${t.params.workgroupSize})
+ fn main(
+ @builtin(local_invocation_index) local_invocation_index: u32,
+ @builtin(workgroup_id) workgroup_id : vec3<u32>
+ ) {
+ let id = ${scalarType}(local_invocation_index);
+
+ // Each invocation attempts to write an alternating (ping-pong) value, once per loop.
+ // The input value is initialized with the first of the two ping-pong values.
+ // Only one invocation per loop iteration should succeed. Note the workgroupBarrier() used
+ // to synchronize each invocation in the loop.
+ // The reason we alternate is in case atomicCompareExchangeWeak spurioulsy fails:
+ // If all invocations of one iteration spuriously fail, the very next iteration will also
+ // fail since the value will not have been exchanged; however, the subsequent one will succeed
+ // (assuming not all iterations spuriously fail yet again).
+
+ // Initialize wg
+ if (local_invocation_index == 0) {
+ atomicStore(&wg, ${pingPongValues[0]});
+ }
+ workgroupBarrier();
+
+ for (var i = 0u; i < ${numWrites}u; i++) {
+ let compare = ping_pong_value(i);
+ let next = ping_pong_value(i + 1);
+
+ let r = atomicCompareExchangeWeak(&wg, compare, next);
+
+ let slot = i * ${numInvocations}u + u32(id);
+ old_values[slot] = r.old_value;
+ if (r.exchanged) {
+ exchanged[slot] = 1;
+ } else {
+ exchanged[slot] = 0;
+ }
+
+ workgroupBarrier();
+ }
+ }
+ `;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ const arrayType = typedArrayCtor(scalarType);
+ const defaultValue = 99999999;
+
+ const oldValuesBuffer = t.device.createBuffer({
+ size: bufferNumElements * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ mappedAtCreation: true,
+ });
+ t.trackForCleanup(oldValuesBuffer);
+ {
+ const data = new arrayType(oldValuesBuffer.getMappedRange());
+ data.fill(defaultValue);
+ oldValuesBuffer.unmap();
+ }
+
+ const exchangedBuffer = t.device.createBuffer({
+ size: bufferNumElements * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ mappedAtCreation: true,
+ });
+ t.trackForCleanup(exchangedBuffer);
+ {
+ const data = new arrayType(exchangedBuffer.getMappedRange());
+ data.fill(defaultValue);
+ exchangedBuffer.unmap();
+ }
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: { buffer: oldValuesBuffer } },
+ { binding: 1, resource: { buffer: exchangedBuffer } },
+ ],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // Read back buffers
+ const oldValuesBufferResult = (
+ await t.readGPUBufferRangeTyped(oldValuesBuffer, {
+ type: arrayType,
+ typedLength: oldValuesBuffer.size / arrayType.BYTES_PER_ELEMENT,
+ })
+ ).data;
+ const exchangedBufferResult = (
+ await t.readGPUBufferRangeTyped(exchangedBuffer, {
+ type: arrayType,
+ typedLength: exchangedBuffer.size / arrayType.BYTES_PER_ELEMENT,
+ })
+ ).data;
+
+ for (let w = 0; w < numWrites; ++w) {
+ const offset = w * numInvocations;
+ const exchanged = exchangedBufferResult.subarray(offset, offset + numInvocations);
+ const oldValues = oldValuesBufferResult.subarray(offset, offset + numInvocations);
+
+ const dumpValues = () => {
+ return `
+ For write: ${w}
+ exchanged: ${exchanged}
+ oldValues: ${oldValues}`;
+ };
+
+ // Only one of the invocations should have succeeded to exchange - or none if spurious failures occured
+ const noExchanges = exchanged.every(v => v === 0);
+ if (noExchanges) {
+ // Spurious failure, all values in oldValues should be the default value
+ if (!oldValues.every(v => v === defaultValue)) {
+ t.fail(
+ `Spurious failure detected, expected only default value of ${defaultValue} in oldValues buffer.${dumpValues()}`
+ );
+ return;
+ }
+ } else {
+ // Only one invocation should have exchanged its value
+ if (exchanged.filter(v => v === 1).length !== 1) {
+ t.fail(`More than one invocation exchanged its value.${dumpValues()}`);
+ return;
+ }
+
+ // Get its index
+ const idx = exchanged.findIndex(v => v === 1);
+ assert(idx !== -1);
+
+ // Its output should contain the old value after exchange
+ const oldValue = pingPongValues[w % 2];
+ if (oldValues[idx] !== oldValue) {
+ t.fail(
+ `oldValues[${idx}] expected to contain old value from exchange: ${oldValue}.${dumpValues()}'`
+ );
+ return;
+ }
+
+ // The rest of oldValues should either contain the old value or the newly exchanged value,
+ // depending on whether they executed atomicCompareExchangWeak before or after invocation 'idx'.
+ const oldValuesRest = oldValues.filter((_, i) => i !== idx);
+ if (!oldValuesRest.every(v => pingPongValues.includes(v))) {
+ t.fail(
+ `Values in oldValues buffer should be one of '${pingPongValues}', except at index '${idx} where it is '${oldValue}'.${dumpValues()}`
+ );
+ return;
+ }
+ }
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicExchange.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicExchange.spec.ts
new file mode 100644
index 0000000000..540ac16b07
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicExchange.spec.ts
@@ -0,0 +1,470 @@
+export const description = `
+Atomically stores the value v in the atomic object pointed to atomic_ptr and returns the original value stored in the atomic object.
+`;
+
+import { makeTestGroup } from '../../../../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../../../../common/util/data_tables.js';
+import { GPUTest } from '../../../../../../gpu_test.js';
+import { checkElementsEqual } from '../../../../../../util/check_contents.js';
+
+import { dispatchSizes, workgroupSizes, typedArrayCtor, kMapId } from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('exchange_storage_basic')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicExchange(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T) -> T
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('mapId', keysOf(kMapId))
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ const numInvocations = t.params.workgroupSize * t.params.dispatchSize;
+ const bufferNumElements = numInvocations;
+ const scalarType = t.params.scalarType;
+ const mapId = kMapId[t.params.mapId];
+ const extra = mapId.wgsl(numInvocations, t.params.scalarType); // Defines map_id()
+
+ const wgsl =
+ `
+ @group(0) @binding(0)
+ var<storage, read_write> input : array<atomic<${scalarType}>>;
+
+ @group(0) @binding(1)
+ var<storage, read_write> output : array<${scalarType}>;
+
+ @compute @workgroup_size(${t.params.workgroupSize})
+ fn main(
+ @builtin(global_invocation_id) global_invocation_id : vec3<u32>,
+ ) {
+ let id = ${scalarType}(global_invocation_id[0]);
+
+ output[id] = atomicExchange(&input[id], map_id(id * 2));
+ }
+ ` + extra;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ const arrayType = typedArrayCtor(scalarType);
+
+ // Create input buffer with values [0..n]
+ const inputBuffer = t.device.createBuffer({
+ size: bufferNumElements * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ mappedAtCreation: true,
+ });
+ t.trackForCleanup(inputBuffer);
+ const data = new arrayType(inputBuffer.getMappedRange());
+ data.forEach((_, i) => (data[i] = i));
+ inputBuffer.unmap();
+
+ const outputBuffer = t.device.createBuffer({
+ size: bufferNumElements * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(outputBuffer);
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: { buffer: inputBuffer } },
+ { binding: 1, resource: { buffer: outputBuffer } },
+ ],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(t.params.dispatchSize);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // Output buffer should be the same as the initial input buffer as it contains
+ // values returned from atomicExchange
+ const outputExpected = new (typedArrayCtor(t.params.scalarType))(bufferNumElements);
+ outputExpected.forEach((_, i) => (outputExpected[i] = i));
+ t.expectGPUBufferValuesEqual(outputBuffer, outputExpected);
+
+ // And the input buffer should have been modified to a computed value
+ const inputExpected = new (typedArrayCtor(t.params.scalarType))(bufferNumElements);
+ inputExpected.forEach((_, i) => (inputExpected[i] = mapId.f(i * 2, numInvocations)));
+ t.expectGPUBufferValuesEqual(inputBuffer, inputExpected);
+ });
+
+g.test('exchange_workgroup_basic')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-load')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicLoad(atomic_ptr: ptr<AS, atomic<T>, read_write>) -> T
+
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('mapId', keysOf(kMapId))
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ const numInvocations = t.params.workgroupSize;
+ const wgNumElements = numInvocations;
+ const scalarType = t.params.scalarType;
+ const dispatchSize = t.params.dispatchSize;
+ const mapId = kMapId[t.params.mapId];
+ const extra = mapId.wgsl(numInvocations, t.params.scalarType); // Defines map_id()
+
+ const wgsl =
+ `
+ var<workgroup> wg: array<atomic<${scalarType}>, ${wgNumElements}>;
+
+ // Result of each workgroup is written to output[workgroup_id.x]
+ @group(0) @binding(0)
+ var<storage, read_write> output: array<${scalarType}, ${wgNumElements * dispatchSize}>;
+
+ @group(0) @binding(1)
+ var<storage, read_write> wg_copy: array<${scalarType}, ${wgNumElements * dispatchSize}>;
+
+ @compute @workgroup_size(${t.params.workgroupSize})
+ fn main(
+ @builtin(local_invocation_index) local_invocation_index: u32,
+ @builtin(workgroup_id) workgroup_id : vec3<u32>
+ ) {
+ let id = ${scalarType}(local_invocation_index);
+ let global_id = ${scalarType}(workgroup_id.x * ${wgNumElements} + local_invocation_index);
+
+ // Initialize wg[id] with this invocations global id
+ atomicStore(&wg[id], global_id);
+ workgroupBarrier();
+
+ // Test atomicExchange, storing old value into output
+ output[global_id] = atomicExchange(&wg[id], map_id(global_id * 2));
+
+ // Copy new value into wg_copy
+ wg_copy[global_id] = atomicLoad(&wg[id]);
+ }
+ ` + extra;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ const arrayType = typedArrayCtor(scalarType);
+
+ const outputBuffer = t.device.createBuffer({
+ size: wgNumElements * dispatchSize * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(outputBuffer);
+
+ const wgCopyBuffer = t.device.createBuffer({
+ size: wgNumElements * dispatchSize * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(outputBuffer);
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: { buffer: outputBuffer } },
+ { binding: 1, resource: { buffer: wgCopyBuffer } },
+ ],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(dispatchSize);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // Output buffer should be the same as the initial wg buffer as it contains
+ // values returned from atomicExchange
+ const outputExpected = new (typedArrayCtor(t.params.scalarType))(wgNumElements * dispatchSize);
+ outputExpected.forEach((_, i) => (outputExpected[i] = i));
+ t.expectGPUBufferValuesEqual(outputBuffer, outputExpected);
+
+ // And the wg copy buffer should have been modified to a computed value
+ const wgCopyBufferExpected = new (typedArrayCtor(t.params.scalarType))(
+ wgNumElements * dispatchSize
+ );
+ wgCopyBufferExpected.forEach(
+ (_, i) => (wgCopyBufferExpected[i] = mapId.f(i * 2, numInvocations))
+ );
+ t.expectGPUBufferValuesEqual(wgCopyBuffer, wgCopyBufferExpected);
+ });
+
+g.test('exchange_storage_advanced')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicExchange(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T) -> T
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('mapId', keysOf(kMapId))
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(async t => {
+ const numInvocations = t.params.workgroupSize * t.params.dispatchSize;
+ const bufferNumElements = numInvocations;
+ const scalarType = t.params.scalarType;
+ const mapId = kMapId[t.params.mapId];
+ const extra = mapId.wgsl(numInvocations, t.params.scalarType); // Defines map_id()
+
+ const wgsl =
+ `
+ @group(0) @binding(0)
+ var<storage, read_write> input : atomic<${scalarType}>;
+
+ @group(0) @binding(1)
+ var<storage, read_write> output : array<${scalarType}>;
+
+ @compute @workgroup_size(${t.params.workgroupSize})
+ fn main(
+ @builtin(global_invocation_id) global_invocation_id : vec3<u32>,
+ ) {
+ let id = ${scalarType}(global_invocation_id[0]);
+
+ // All invocations exchange with same single memory address, and we store
+ // the old value at the current invocation's location in the output buffer.
+ output[id] = atomicExchange(&input, map_id(id));
+ }
+ ` + extra;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ const arrayType = typedArrayCtor(scalarType);
+
+ // Create input buffer of size 1 with initial value 0
+ const inputBuffer = t.device.createBuffer({
+ size: 1 * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(inputBuffer);
+
+ const outputBuffer = t.device.createBuffer({
+ size: bufferNumElements * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(outputBuffer);
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: { buffer: inputBuffer } },
+ { binding: 1, resource: { buffer: outputBuffer } },
+ ],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(t.params.dispatchSize);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // Read back buffers
+ const inputBufferResult = await t.readGPUBufferRangeTyped(inputBuffer, {
+ type: arrayType,
+ typedLength: inputBuffer.size / arrayType.BYTES_PER_ELEMENT,
+ });
+ const outputBufferResult = await t.readGPUBufferRangeTyped(outputBuffer, {
+ type: arrayType,
+ typedLength: outputBuffer.size / arrayType.BYTES_PER_ELEMENT,
+ });
+
+ // The one value in the input buffer plus all values in the output buffer
+ // should contain initial value 0 plus map_id(0..n), unsorted.
+ const values = new arrayType([...inputBufferResult.data, ...outputBufferResult.data]);
+
+ const expected = new arrayType(values.length);
+ expected.forEach((_, i) => {
+ if (i === 0) {
+ expected[0] = 0;
+ } else {
+ expected[i] = mapId.f(i - 1, numInvocations);
+ }
+ });
+
+ // Sort both arrays and compare
+ values.sort();
+ expected.sort(); // Sort because we store hashed results when mapId == 'remap'
+ t.expectOK(checkElementsEqual(values, expected));
+ });
+
+g.test('exchange_workgroup_advanced')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-load')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicLoad(atomic_ptr: ptr<AS, atomic<T>, read_write>) -> T
+
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('mapId', keysOf(kMapId))
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(async t => {
+ const numInvocations = t.params.workgroupSize;
+ const scalarType = t.params.scalarType;
+ const dispatchSize = t.params.dispatchSize;
+ const mapId = kMapId[t.params.mapId];
+ const extra = mapId.wgsl(numInvocations, t.params.scalarType); // Defines map_id()
+
+ const wgsl =
+ `
+ var<workgroup> wg: atomic<${scalarType}>;
+
+ // Will contain the atomicExchange result for each invocation at global index
+ @group(0) @binding(0)
+ var<storage, read_write> output: array<${scalarType}, ${numInvocations * dispatchSize}>;
+
+ // Will contain the final value in wg in wg_copy for this dispatch
+ @group(0) @binding(1)
+ var<storage, read_write> wg_copy: array<${scalarType}, ${dispatchSize}>;
+
+ @compute @workgroup_size(${t.params.workgroupSize})
+ fn main(
+ @builtin(local_invocation_index) local_invocation_index: u32,
+ @builtin(workgroup_id) workgroup_id : vec3<u32>
+ ) {
+ let id = ${scalarType}(local_invocation_index);
+ let global_id = ${scalarType}(workgroup_id.x * ${numInvocations} + local_invocation_index);
+
+ // All invocations exchange with same single memory address, and we store
+ // the old value at the current invocation's location in the output buffer.
+ output[global_id] = atomicExchange(&wg, map_id(id));
+
+ // Once all invocations have completed, the first one copies the final exchanged value
+ // to wg_copy for this dispatch (workgroup_id.x)
+ workgroupBarrier();
+ if (local_invocation_index == 0u) {
+ wg_copy[workgroup_id.x] = atomicLoad(&wg);
+ }
+ }
+ ` + extra;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ const arrayType = typedArrayCtor(scalarType);
+
+ const outputBuffer = t.device.createBuffer({
+ size: numInvocations * dispatchSize * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(outputBuffer);
+
+ const wgCopyBuffer = t.device.createBuffer({
+ size: dispatchSize * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(outputBuffer);
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: { buffer: outputBuffer } },
+ { binding: 1, resource: { buffer: wgCopyBuffer } },
+ ],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(dispatchSize);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // Read back buffers
+ const outputBufferResult = await t.readGPUBufferRangeTyped(outputBuffer, {
+ type: arrayType,
+ typedLength: outputBuffer.size / arrayType.BYTES_PER_ELEMENT,
+ });
+ const wgCopyBufferResult = await t.readGPUBufferRangeTyped(wgCopyBuffer, {
+ type: arrayType,
+ typedLength: wgCopyBuffer.size / arrayType.BYTES_PER_ELEMENT,
+ });
+
+ // For each dispatch, the one value in wgCopyBuffer plus all values in the output buffer
+ // should contain initial value 0 plus map_id(0..n), unsorted.
+
+ // Expected values for each dispatch
+ const expected = new arrayType(numInvocations + 1);
+ expected.forEach((_, i) => {
+ if (i === 0) {
+ expected[0] = 0;
+ } else {
+ expected[i] = mapId.f(i - 1, numInvocations);
+ }
+ });
+ expected.sort(); // Sort because we store hashed results when mapId == 'remap'
+
+ // Test values for each dispatch
+ for (let d = 0; d < dispatchSize; ++d) {
+ // Get values for this dispatch
+ const dispatchOffset = d * numInvocations;
+ const values = new arrayType([
+ wgCopyBufferResult.data[d], // Last 'wg' value for this dispatch
+ ...outputBufferResult.data.subarray(dispatchOffset, dispatchOffset + numInvocations), // Rest of the returned values
+ ]);
+
+ values.sort();
+ t.expectOK(checkElementsEqual(values, expected));
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicLoad.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicLoad.spec.ts
new file mode 100644
index 0000000000..2aac7bb9b9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicLoad.spec.ts
@@ -0,0 +1,192 @@
+export const description = `
+Returns the atomically loaded the value pointed to by atomic_ptr. It does not modify the object.
+`;
+
+import { makeTestGroup } from '../../../../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../../../../common/util/data_tables.js';
+import { GPUTest } from '../../../../../../gpu_test.js';
+
+import { dispatchSizes, workgroupSizes, typedArrayCtor, kMapId } from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('load_storage')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-load')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicLoad(atomic_ptr: ptr<AS, atomic<T>, read_write>) -> T
+
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('mapId', keysOf(kMapId))
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ const numInvocations = t.params.workgroupSize * t.params.dispatchSize;
+ const bufferNumElements = numInvocations;
+ const scalarType = t.params.scalarType;
+ const mapId = kMapId[t.params.mapId];
+
+ const wgsl = `
+ @group(0) @binding(0)
+ var<storage, read_write> input : array<atomic<${scalarType}>>;
+
+ @group(0) @binding(1)
+ var<storage, read_write> output : array<${scalarType}>;
+
+ @compute @workgroup_size(${t.params.workgroupSize})
+ fn main(
+ @builtin(global_invocation_id) global_invocation_id : vec3<u32>,
+ ) {
+ let id = ${scalarType}(global_invocation_id[0]);
+ output[id] = atomicLoad(&input[id]);
+ }
+ `;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ const arrayType = typedArrayCtor(scalarType);
+
+ // Create input buffer with values [map_id(0)..map_id(n)]
+ const inputBuffer = t.device.createBuffer({
+ size: bufferNumElements * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ mappedAtCreation: true,
+ });
+ t.trackForCleanup(inputBuffer);
+ const data = new arrayType(inputBuffer.getMappedRange());
+ data.forEach((_, i) => (data[i] = mapId.f(i, numInvocations)));
+ inputBuffer.unmap();
+
+ const outputBuffer = t.device.createBuffer({
+ size: bufferNumElements * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(outputBuffer);
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: { buffer: inputBuffer } },
+ { binding: 1, resource: { buffer: outputBuffer } },
+ ],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(t.params.dispatchSize);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // Both input and output buffer should be the same now
+ const expected = new (typedArrayCtor(t.params.scalarType))(bufferNumElements);
+ expected.forEach((_, i) => (expected[i] = mapId.f(i, numInvocations)));
+ t.expectGPUBufferValuesEqual(inputBuffer, expected);
+ t.expectGPUBufferValuesEqual(outputBuffer, expected);
+ });
+
+g.test('load_workgroup')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-load')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicLoad(atomic_ptr: ptr<AS, atomic<T>, read_write>) -> T
+
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('mapId', keysOf(kMapId))
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ const numInvocations = t.params.workgroupSize;
+ const wgNumElements = numInvocations;
+ const scalarType = t.params.scalarType;
+ const dispatchSize = t.params.dispatchSize;
+ const mapId = kMapId[t.params.mapId];
+ const extra = mapId.wgsl(numInvocations, t.params.scalarType); // Defines map_id()
+
+ const wgsl =
+ `
+ var<workgroup> wg: array<atomic<${scalarType}>, ${wgNumElements}>;
+
+ // Result of each workgroup is written to output[workgroup_id.x]
+ @group(0) @binding(0)
+ var<storage, read_write> output: array<${scalarType}, ${wgNumElements * dispatchSize}>;
+
+ @compute @workgroup_size(${t.params.workgroupSize})
+ fn main(
+ @builtin(local_invocation_index) local_invocation_index: u32,
+ @builtin(workgroup_id) workgroup_id : vec3<u32>
+ ) {
+ let id = ${scalarType}(local_invocation_index);
+ let global_id = ${scalarType}(workgroup_id.x * ${wgNumElements} + local_invocation_index);
+
+ // Initialize wg[id] with this invocations global id (mapped)
+ atomicStore(&wg[id], map_id(global_id));
+ workgroupBarrier();
+
+ // Test atomic loading of value at wg[id] and store result in output[global_id]
+ output[global_id] = atomicLoad(&wg[id]);
+ }
+ ` + extra;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ const arrayType = typedArrayCtor(scalarType);
+
+ const outputBuffer = t.device.createBuffer({
+ size: wgNumElements * dispatchSize * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(outputBuffer);
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer: outputBuffer } }],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(dispatchSize);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // Expected values should be map_id(0..n)
+ const expected = new (typedArrayCtor(t.params.scalarType))(
+ wgNumElements * t.params.dispatchSize
+ );
+ expected.forEach((_, i) => (expected[i] = mapId.f(i, numInvocations)));
+
+ t.expectGPUBufferValuesEqual(outputBuffer, expected);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicMax.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicMax.spec.ts
new file mode 100644
index 0000000000..066d673018
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicMax.spec.ts
@@ -0,0 +1,101 @@
+export const description = `
+Atomically read, max and store value.
+
+* Load the original value pointed to by atomic_ptr.
+* Obtains a new value by taking the max with the value v.
+* Store the new value using atomic_ptr.
+
+Returns the original value stored in the atomic object.
+`;
+
+import { makeTestGroup } from '../../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../../gpu_test.js';
+
+import {
+ dispatchSizes,
+ workgroupSizes,
+ runStorageVariableTest,
+ runWorkgroupVariableTest,
+ typedArrayCtor,
+} from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('max_storage')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicMax(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T) -> T
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ const numInvocations = t.params.workgroupSize * t.params.dispatchSize;
+ // Allocate one extra element to ensure it doesn't get modified
+ const bufferNumElements = 2;
+
+ const initValue = 0;
+ const op = `atomicMax(&output[0], id)`;
+ const expected = new (typedArrayCtor(t.params.scalarType))(bufferNumElements);
+ expected[0] = numInvocations - 1;
+
+ runStorageVariableTest({
+ t,
+ workgroupSize: t.params.workgroupSize,
+ dispatchSize: t.params.dispatchSize,
+ bufferNumElements,
+ initValue,
+ op,
+ expected,
+ });
+ });
+
+g.test('max_workgroup')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicMax(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T) -> T
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ // Allocate one extra element to ensure it doesn't get modified
+ const wgNumElements = 2;
+
+ const initValue = 0;
+ const op = `atomicMax(&wg[0], id)`;
+
+ const expected = new (typedArrayCtor(t.params.scalarType))(
+ wgNumElements * t.params.dispatchSize
+ ).fill(initValue);
+ for (let d = 0; d < t.params.dispatchSize; ++d) {
+ const wg = expected.subarray(d * wgNumElements);
+ wg[0] = t.params.workgroupSize - 1;
+ }
+
+ runWorkgroupVariableTest({
+ t,
+ workgroupSize: t.params.workgroupSize,
+ dispatchSize: t.params.dispatchSize,
+ wgNumElements,
+ initValue,
+ op,
+ expected,
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicMin.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicMin.spec.ts
new file mode 100644
index 0000000000..ad880c4182
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicMin.spec.ts
@@ -0,0 +1,100 @@
+export const description = `
+Atomically read, min and store value.
+
+* Load the original value pointed to by atomic_ptr.
+ * Obtains a new value by take the min with the value v.
+ * Store the new value using atomic_ptr.
+
+Returns the original value stored in the atomic object.
+`;
+
+import { makeTestGroup } from '../../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../../gpu_test.js';
+
+import {
+ dispatchSizes,
+ workgroupSizes,
+ runStorageVariableTest,
+ runWorkgroupVariableTest,
+ typedArrayCtor,
+} from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('min_storage')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicMin(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T) -> T
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ // Allocate one extra element to ensure it doesn't get modified
+ const bufferNumElements = 2;
+
+ const initValue = t.params.scalarType === 'u32' ? 0xffffffff : 0x7fffffff;
+ const op = `atomicMin(&output[0], id)`;
+ const expected = new (typedArrayCtor(t.params.scalarType))(bufferNumElements).fill(initValue);
+ expected[0] = 0;
+
+ runStorageVariableTest({
+ t,
+ workgroupSize: t.params.workgroupSize,
+ dispatchSize: t.params.dispatchSize,
+ bufferNumElements,
+ initValue,
+ op,
+ expected,
+ });
+ });
+
+g.test('min_workgroup')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicMin(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T) -> T
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ // Allocate one extra element to ensure it doesn't get modified
+ const wgNumElements = 2;
+
+ const initValue = t.params.scalarType === 'u32' ? 0xffffffff : 0x7fffffff;
+ const op = `atomicMin(&wg[0], id)`;
+
+ const expected = new (typedArrayCtor(t.params.scalarType))(
+ wgNumElements * t.params.dispatchSize
+ ).fill(initValue);
+ for (let d = 0; d < t.params.dispatchSize; ++d) {
+ const wg = expected.subarray(d * wgNumElements);
+ wg[0] = 0;
+ }
+
+ runWorkgroupVariableTest({
+ t,
+ workgroupSize: t.params.workgroupSize,
+ dispatchSize: t.params.dispatchSize,
+ wgNumElements,
+ initValue,
+ op,
+ expected,
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicOr.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicOr.spec.ts
new file mode 100644
index 0000000000..3892d41b38
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicOr.spec.ts
@@ -0,0 +1,131 @@
+export const description = `
+Atomically read, or and store value.
+
+* Load the original value pointed to by atomic_ptr.
+* Obtains a new value by or'ing with the value v.
+* Store the new value using atomic_ptr.
+
+Returns the original value stored in the atomic object.
+`;
+
+import { makeTestGroup } from '../../../../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../../../../common/util/data_tables.js';
+import { GPUTest } from '../../../../../../gpu_test.js';
+
+import {
+ dispatchSizes,
+ workgroupSizes,
+ runStorageVariableTest,
+ runWorkgroupVariableTest,
+ kMapId,
+ typedArrayCtor,
+} from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('or_storage')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicOr(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T) -> T
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('mapId', keysOf(kMapId))
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ const numInvocations = t.params.workgroupSize * t.params.dispatchSize;
+
+ // Allocate an output buffer with bitsize of max invocations plus 1 for validation
+ const bufferNumElements = Math.max(1, numInvocations / 32) + 1;
+
+ // Start with all bits low, then using atomicOr to set mapped global id bit on.
+ // Note: Both WGSL and JS will shift left 1 by id modulo 32.
+ const initValue = 0;
+
+ const scalarType = t.params.scalarType;
+ const mapId = kMapId[t.params.mapId];
+ const extra = mapId.wgsl(numInvocations); // Defines map_id()
+ const op = `
+ let i = map_id(u32(id));
+ atomicOr(&output[i / 32], ${scalarType}(1) << i)
+ `;
+ const expected = new (typedArrayCtor(scalarType))(bufferNumElements);
+ for (let id = 0; id < numInvocations; ++id) {
+ const i = mapId.f(id, numInvocations);
+ expected[Math.floor(i / 32)] |= 1 << i;
+ }
+
+ runStorageVariableTest({
+ t,
+ workgroupSize: t.params.workgroupSize,
+ dispatchSize: t.params.dispatchSize,
+ bufferNumElements,
+ initValue,
+ op,
+ expected,
+ extra,
+ });
+ });
+
+g.test('or_workgroup')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicOr(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T) -> T
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('mapId', keysOf(kMapId))
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ const numInvocations = t.params.workgroupSize;
+
+ // Allocate workgroup array with bitsize of max invocations plus 1 for validation
+ const wgNumElements = Math.max(1, numInvocations / 32) + 1;
+
+ // Start with all bits low, then using atomicOr to set mapped local id bit on.
+ // Note: Both WGSL and JS will shift left 1 by id modulo 32.
+ const initValue = 0;
+
+ const scalarType = t.params.scalarType;
+ const mapId = kMapId[t.params.mapId];
+ const extra = mapId.wgsl(numInvocations); // Defines map_id()
+ const op = `
+ let i = map_id(u32(id));
+ atomicOr(&wg[i / 32], ${scalarType}(1) << i)
+ `;
+ const expected = new (typedArrayCtor(scalarType))(wgNumElements * t.params.dispatchSize);
+ for (let d = 0; d < t.params.dispatchSize; ++d) {
+ for (let id = 0; id < numInvocations; ++id) {
+ const wg = expected.subarray(d * wgNumElements);
+ const i = mapId.f(id, numInvocations);
+ wg[Math.floor(i / 32)] |= 1 << i;
+ }
+ }
+
+ runWorkgroupVariableTest({
+ t,
+ workgroupSize: t.params.workgroupSize,
+ dispatchSize: t.params.dispatchSize,
+ wgNumElements,
+ initValue,
+ op,
+ expected,
+ extra,
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicStore.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicStore.spec.ts
new file mode 100644
index 0000000000..18ff72975d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicStore.spec.ts
@@ -0,0 +1,301 @@
+export const description = `
+Atomically stores the value v in the atomic object pointed to by atomic_ptr.
+`;
+
+import { makeTestGroup } from '../../../../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../../../../common/util/data_tables.js';
+import { GPUTest } from '../../../../../../gpu_test.js';
+
+import {
+ dispatchSizes,
+ workgroupSizes,
+ runStorageVariableTest,
+ runWorkgroupVariableTest,
+ typedArrayCtor,
+ kMapId,
+} from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('store_storage_basic')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-store')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicStore(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T)
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('mapId', keysOf(kMapId))
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ const numInvocations = t.params.workgroupSize * t.params.dispatchSize;
+ const bufferNumElements = numInvocations;
+ const mapId = kMapId[t.params.mapId];
+ const extra = mapId.wgsl(numInvocations, t.params.scalarType); // Defines map_id()
+
+ const initValue = 0;
+ const op = `atomicStore(&output[id], map_id(id))`;
+ const expected = new (typedArrayCtor(t.params.scalarType))(bufferNumElements);
+ expected.forEach((_, i) => (expected[i] = mapId.f(i, numInvocations)));
+
+ runStorageVariableTest({
+ t,
+ workgroupSize: t.params.workgroupSize,
+ dispatchSize: t.params.dispatchSize,
+ bufferNumElements,
+ initValue,
+ op,
+ expected,
+ extra,
+ });
+ });
+
+g.test('store_workgroup_basic')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-store')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicStore(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T)
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('mapId', keysOf(kMapId))
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ const numInvocations = t.params.workgroupSize;
+ const wgNumElements = numInvocations;
+ const mapId = kMapId[t.params.mapId];
+ const extra = mapId.wgsl(numInvocations, t.params.scalarType); // Defines map_id()
+
+ const initValue = 0;
+ const op = `atomicStore(&wg[id], map_id(global_id))`;
+ const expected = new (typedArrayCtor(t.params.scalarType))(
+ wgNumElements * t.params.dispatchSize
+ );
+ expected.forEach((_, i) => (expected[i] = mapId.f(i, numInvocations)));
+
+ runWorkgroupVariableTest({
+ t,
+ workgroupSize: t.params.workgroupSize,
+ dispatchSize: t.params.dispatchSize,
+ wgNumElements,
+ initValue,
+ op,
+ expected,
+ extra,
+ });
+ });
+
+g.test('store_storage_advanced')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-store')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicStore(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T)
+
+Tests that multiple invocations of atomicStore to the same location returns
+one of the values written.
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('mapId', keysOf(kMapId))
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(async t => {
+ const numInvocations = t.params.workgroupSize * t.params.dispatchSize;
+ const scalarType = t.params.scalarType;
+ const mapId = kMapId[t.params.mapId];
+ const extra = mapId.wgsl(numInvocations, t.params.scalarType); // Defines map_id()
+
+ const wgsl =
+ `
+ @group(0) @binding(0)
+ var<storage, read_write> output : array<atomic<${scalarType}>>;
+
+ @compute @workgroup_size(${t.params.workgroupSize})
+ fn main(
+ @builtin(global_invocation_id) global_invocation_id : vec3<u32>,
+ ) {
+ let id = ${scalarType}(global_invocation_id[0]);
+
+ // All invocations store to the same location
+ atomicStore(&output[0], map_id(id));
+ }
+ ` + extra;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ const arrayType = typedArrayCtor(scalarType);
+
+ // Output buffer has only 1 element
+ const outputBuffer = t.device.createBuffer({
+ size: 1 * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(outputBuffer);
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer: outputBuffer } }],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(t.params.dispatchSize);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // Read back the buffer
+ const outputBufferResult = (
+ await t.readGPUBufferRangeTyped(outputBuffer, {
+ type: arrayType,
+ typedLength: outputBuffer.size / arrayType.BYTES_PER_ELEMENT,
+ })
+ ).data;
+
+ // All invocations wrote to the output[0], so validate that it contains one
+ // of the possible computed values.
+ const expected_one_of = new arrayType(numInvocations);
+ expected_one_of.forEach((_, i) => (expected_one_of[i] = mapId.f(i, numInvocations)));
+
+ if (!expected_one_of.includes(outputBufferResult[0])) {
+ t.fail(
+ `Unexpected value in output[0]: '${outputBufferResult[0]}, expected value to be one of: ${expected_one_of}`
+ );
+ }
+ });
+
+g.test('store_workgroup_advanced')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-store')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicStore(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T)
+
+Tests that multiple invocations of atomicStore to the same location returns
+one of the values written.
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('mapId', keysOf(kMapId))
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(async t => {
+ const numInvocations = t.params.workgroupSize;
+ const scalarType = t.params.scalarType;
+ const dispatchSize = t.params.dispatchSize;
+ const mapId = kMapId[t.params.mapId];
+ const extra = mapId.wgsl(numInvocations, t.params.scalarType); // Defines map_id()
+
+ const wgsl =
+ `
+ var<workgroup> wg: atomic<${scalarType}>;
+
+ // Result of each workgroup is written to output[workgroup_id.x]
+ @group(0) @binding(0)
+ var<storage, read_write> output: array<${scalarType}, ${dispatchSize}>;
+
+ @compute @workgroup_size(${t.params.workgroupSize})
+ fn main(
+ @builtin(local_invocation_index) local_invocation_index: u32,
+ @builtin(workgroup_id) workgroup_id : vec3<u32>
+ ) {
+ let id = ${scalarType}(local_invocation_index);
+
+ // All invocations of a given dispatch store to the same location.
+ // In the end, the final value should be randomly equal to one of the ids.
+ atomicStore(&wg, map_id(id));
+
+ // Once all invocations have completed, the first one copies the result
+ // to output for this dispatch (workgroup_id.x)
+ workgroupBarrier();
+ if (local_invocation_index == 0u) {
+ output[workgroup_id.x] = atomicLoad(&wg);
+ }
+ }
+ ` + extra;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ const arrayType = typedArrayCtor(scalarType);
+
+ const outputBuffer = t.device.createBuffer({
+ size: dispatchSize * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(outputBuffer);
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer: outputBuffer } }],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(dispatchSize);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // Read back the buffer
+ const outputBufferResult = (
+ await t.readGPUBufferRangeTyped(outputBuffer, {
+ type: arrayType,
+ typedLength: outputBuffer.size / arrayType.BYTES_PER_ELEMENT,
+ })
+ ).data;
+
+ // Each dispatch wrote to a single atomic workgroup var that was copied
+ // to outputBuffer[dispatch]. Validate that each value in the output buffer
+ // is one of the possible computed values.
+ const expected_one_of = new arrayType(numInvocations);
+ expected_one_of.forEach((_, i) => (expected_one_of[i] = mapId.f(i, numInvocations)));
+
+ for (let d = 0; d < dispatchSize; d++) {
+ if (!expected_one_of.includes(outputBufferResult[d])) {
+ t.fail(
+ `Unexpected value in output[d] for dispatch d '${d}': '${outputBufferResult[d]}', expected value to be one of: ${expected_one_of}`
+ );
+ }
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicSub.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicSub.spec.ts
new file mode 100644
index 0000000000..6cea190299
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicSub.spec.ts
@@ -0,0 +1,101 @@
+export const description = `
+Atomically read, subtract and store value.
+
+* Load the original value pointed to by atomic_ptr.
+* Obtains a new value by subtracting with the value v.
+* Store the new value using atomic_ptr.
+
+Returns the original value stored in the atomic object.
+`;
+
+import { makeTestGroup } from '../../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../../gpu_test.js';
+
+import {
+ dispatchSizes,
+ workgroupSizes,
+ runStorageVariableTest,
+ runWorkgroupVariableTest,
+ typedArrayCtor,
+} from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('sub_storage')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicSub(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T) -> T
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ const numInvocations = t.params.workgroupSize * t.params.dispatchSize;
+ // Allocate one extra element to ensure it doesn't get modified
+ const bufferNumElements = 2;
+
+ const initValue = 0;
+ const op = `atomicSub(&output[0], 1)`;
+ const expected = new (typedArrayCtor(t.params.scalarType))(bufferNumElements);
+ expected[0] = -1 * numInvocations;
+
+ runStorageVariableTest({
+ t,
+ workgroupSize: t.params.workgroupSize,
+ dispatchSize: t.params.dispatchSize,
+ bufferNumElements,
+ initValue,
+ op,
+ expected,
+ });
+ });
+
+g.test('sub_workgroup')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicSub(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T) -> T
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ // Allocate one extra element to ensure it doesn't get modified
+ const wgNumElements = 2;
+
+ const initValue = 0;
+ const op = `atomicSub(&wg[0], 1)`;
+
+ const expected = new (typedArrayCtor(t.params.scalarType))(
+ wgNumElements * t.params.dispatchSize
+ );
+ for (let d = 0; d < t.params.dispatchSize; ++d) {
+ const wg = expected.subarray(d * wgNumElements);
+ wg[0] = -1 * t.params.workgroupSize;
+ }
+
+ runWorkgroupVariableTest({
+ t,
+ workgroupSize: t.params.workgroupSize,
+ dispatchSize: t.params.dispatchSize,
+ wgNumElements,
+ initValue,
+ op,
+ expected,
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicXor.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicXor.spec.ts
new file mode 100644
index 0000000000..99192fd9fe
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/atomicXor.spec.ts
@@ -0,0 +1,135 @@
+export const description = `
+Atomically read, xor and store value.
+
+* Load the original value pointed to by atomic_ptr.
+* Obtains a new value by xor'ing with the value v.
+* Store the new value using atomic_ptr.
+
+Returns the original value stored in the atomic object.
+`;
+
+import { makeTestGroup } from '../../../../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../../../../common/util/data_tables.js';
+import { GPUTest } from '../../../../../../gpu_test.js';
+
+import {
+ dispatchSizes,
+ workgroupSizes,
+ runStorageVariableTest,
+ runWorkgroupVariableTest,
+ kMapId,
+ typedArrayCtor,
+} from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('xor_storage')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicXor(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T) -> T
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('mapId', keysOf(kMapId))
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ const numInvocations = t.params.workgroupSize * t.params.dispatchSize;
+
+ // Allocate an output buffer with bitsize of max invocations plus 1 for validation
+ const bufferNumElements = Math.max(1, numInvocations / 32) + 1;
+
+ // Start with all bits set to some random value for each u32 in the buffer, then atomicXor each mapped global id bit.
+ // Note: Both WGSL and JS will shift left 1 by id modulo 32.
+ const initValue = 0b11000011010110100000111100111100;
+
+ const scalarType = t.params.scalarType;
+ const mapId = kMapId[t.params.mapId];
+ const extra = mapId.wgsl(numInvocations); // Defines map_id()
+ const op = `
+ let i = map_id(u32(id));
+ atomicXor(&output[i / 32], ${scalarType}(1) << i)
+ `;
+
+ const expected = new (typedArrayCtor(scalarType))(bufferNumElements).fill(initValue);
+ for (let id = 0; id < numInvocations; ++id) {
+ const i = mapId.f(id, numInvocations);
+ expected[Math.floor(i / 32)] ^= 1 << i;
+ }
+
+ runStorageVariableTest({
+ t,
+ workgroupSize: t.params.workgroupSize,
+ dispatchSize: t.params.dispatchSize,
+ bufferNumElements,
+ initValue,
+ op,
+ expected,
+ extra,
+ });
+ });
+
+g.test('xor_workgroup')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+AS is storage or workgroup
+T is i32 or u32
+
+fn atomicXor(atomic_ptr: ptr<AS, atomic<T>, read_write>, v: T) -> T
+`
+ )
+ .params(u =>
+ u
+ .combine('workgroupSize', workgroupSizes)
+ .combine('dispatchSize', dispatchSizes)
+ .combine('mapId', keysOf(kMapId))
+ .combine('scalarType', ['u32', 'i32'])
+ )
+ .fn(t => {
+ const numInvocations = t.params.workgroupSize;
+
+ // Allocate workgroup array with bitsize of max invocations plus 1 for validation
+ const wgNumElements = Math.max(1, numInvocations / 32) + 1;
+
+ // Start with all bits set to some random value for each u32 in the buffer, then atomicXor each mapped global id bit.
+ // Note: Both WGSL and JS will shift left 1 by id modulo 32.
+ const initValue = 0b11000011010110100000111100111100;
+
+ const scalarType = t.params.scalarType;
+ const mapId = kMapId[t.params.mapId];
+ const extra = mapId.wgsl(numInvocations); // Defines map_id()
+ const op = `
+ let i = map_id(u32(id));
+ atomicXor(&wg[i / 32], ${scalarType}(1) << i)
+ `;
+
+ const expected = new (typedArrayCtor(scalarType))(wgNumElements * t.params.dispatchSize).fill(
+ initValue
+ );
+ for (let d = 0; d < t.params.dispatchSize; ++d) {
+ for (let id = 0; id < numInvocations; ++id) {
+ const wg = expected.subarray(d * wgNumElements);
+ const i = mapId.f(id, numInvocations);
+ wg[Math.floor(i / 32)] ^= 1 << i;
+ }
+ }
+
+ runWorkgroupVariableTest({
+ t,
+ workgroupSize: t.params.workgroupSize,
+ dispatchSize: t.params.dispatchSize,
+ wgNumElements,
+ initValue,
+ op,
+ expected,
+ extra,
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/harness.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/harness.ts
new file mode 100644
index 0000000000..ed02467f80
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/atomics/harness.ts
@@ -0,0 +1,208 @@
+import {
+ assert,
+ TypedArrayBufferView,
+ TypedArrayBufferViewConstructor,
+} from '../../../../../../../common/util/util.js';
+import { GPUTest } from '../../../../../../gpu_test.js';
+
+// Use these in combination.
+export const workgroupSizes = [1, 2, 32, 64];
+export const dispatchSizes = [1, 4, 8, 16];
+
+// Use this alone - dispatch size should be 1.
+export const onlyWorkgroupSizes = [1, 2, 4, 8, 16, 32, 64, 128, 256];
+
+export const kMapId = {
+ passthrough: {
+ f: (id: number, _max: number) => id,
+ wgsl: (_max: number, scalarType = 'u32') =>
+ `fn map_id(id: ${scalarType}) -> ${scalarType} { return id; }`,
+ },
+ remap: {
+ f: (id: number, max: number) => (((id >>> 0) * 14957) ^ (((id >>> 0) * 26561) >> 2)) % max,
+ wgsl: (max: number, scalarType = 'u32') =>
+ `fn map_id(id: ${scalarType}) -> ${scalarType} { return ((id * 14957) ^ ((id * 26561) >> 2)) % ${max}; }`,
+ },
+};
+
+export function typedArrayCtor(scalarType: string): TypedArrayBufferViewConstructor {
+ switch (scalarType) {
+ case 'u32':
+ return Uint32Array;
+ case 'i32':
+ return Int32Array;
+ default:
+ assert(false, 'Atomic variables can only by u32 or i32');
+ return Uint8Array;
+ }
+}
+
+export function runStorageVariableTest({
+ t,
+ workgroupSize, // Workgroup X-size
+ dispatchSize, // Dispatch X-size
+ bufferNumElements, // Number of 32-bit elements in output buffer
+ initValue, // 32-bit initial value used to fill output buffer
+ // Atomic op source executed by the compute shader, NOTE: 'id' is global_invocation_id.x,
+ // and `output` is a storage array of atomics.
+ op,
+ expected, // Expected values array to compare against output buffer
+ extra, // Optional extra WGSL source
+}: {
+ t: GPUTest;
+ workgroupSize: number;
+ dispatchSize: number;
+ bufferNumElements: number;
+ initValue: number;
+ op: string;
+ expected: TypedArrayBufferView;
+ extra?: string;
+}) {
+ assert(expected.length === bufferNumElements, "'expected' buffer size is incorrect");
+
+ const scalarType = expected instanceof Uint32Array ? 'u32' : 'i32';
+ const arrayType = typedArrayCtor(scalarType);
+
+ const wgsl = `
+ @group(0) @binding(0)
+ var<storage, read_write> output : array<atomic<${scalarType}>>;
+
+ @compute @workgroup_size(${workgroupSize})
+ fn main(
+ @builtin(global_invocation_id) global_invocation_id : vec3<u32>,
+ ) {
+ let id = ${scalarType}(global_invocation_id[0]);
+ ${op};
+ }
+ ${extra || ''}
+ `;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ const outputBuffer = t.device.createBuffer({
+ size: bufferNumElements * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ mappedAtCreation: true,
+ });
+ // Fill with initial value
+ t.trackForCleanup(outputBuffer);
+ const data = new arrayType(outputBuffer.getMappedRange());
+ data.fill(initValue);
+ outputBuffer.unmap();
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer: outputBuffer } }],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(dispatchSize);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ t.expectGPUBufferValuesEqual(outputBuffer, expected);
+}
+
+export function runWorkgroupVariableTest({
+ t,
+ workgroupSize, // Workgroup X-size
+ dispatchSize, // Dispatch X-size
+ wgNumElements, // Number of 32-bit elements in 'wg' array. Output buffer is sized to wgNumElements * dispatchSize.
+ initValue, // 32-bit initial value used to fill 'wg' array
+ // Atomic op source executed by the compute shader, NOTE: 'id' is local_invocation_index,
+ // `wg` is a workgroup array of atomics of size `workgroupSize`, `output` is a storage array of non-atomics of size
+ // `workgroupSize * dispatcSize` to which each dispatch of `wg` gets copied to (dispatch 0 to first workgroupSize elements,
+ // dispatch 1 to second workgroupSize elements, etc.).
+ op,
+ expected, // Expected values array to compare against output buffer
+ extra, // Optional extra WGSL source
+}: {
+ t: GPUTest;
+ workgroupSize: number;
+ dispatchSize: number;
+ wgNumElements: number;
+ initValue: number;
+ op: string;
+ expected: TypedArrayBufferView;
+ extra?: string;
+}) {
+ assert(expected.length === wgNumElements * dispatchSize, "'expected' buffer size is incorrect");
+
+ const scalarType = expected instanceof Uint32Array ? 'u32' : 'i32';
+ const arrayType = typedArrayCtor(scalarType);
+
+ const wgsl = `
+ var<workgroup> wg: array<atomic<${scalarType}>, ${wgNumElements}>;
+
+ // Result of each workgroup is written to output[workgroup_id.x]
+ @group(0) @binding(0)
+ var<storage, read_write> output: array<${scalarType}, ${wgNumElements * dispatchSize}>;
+
+ @compute @workgroup_size(${workgroupSize})
+ fn main(
+ @builtin(local_invocation_index) local_invocation_index: u32,
+ @builtin(workgroup_id) workgroup_id : vec3<u32>
+ ) {
+ let id = ${scalarType}(local_invocation_index);
+ let global_id = ${scalarType}(workgroup_id.x * ${wgNumElements} + local_invocation_index);
+
+ // Initialize workgroup array
+ if (local_invocation_index == 0) {
+ for (var i = 0u; i < ${wgNumElements}; i++) {
+ atomicStore(&wg[i], bitcast<${scalarType}>(${initValue}u));
+ }
+ }
+ workgroupBarrier();
+
+ ${op};
+
+ // Copy results to output buffer
+ workgroupBarrier();
+ if (local_invocation_index == 0) {
+ for (var i = 0u; i < ${wgNumElements}; i++) {
+ output[(workgroup_id.x * ${wgNumElements}) + i] = atomicLoad(&wg[i]);
+ }
+ }
+ }
+ ${extra || ''}
+ `;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ const outputBuffer = t.device.createBuffer({
+ size: wgNumElements * dispatchSize * arrayType.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer: outputBuffer } }],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(dispatchSize);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ t.expectGPUBufferValuesEqual(outputBuffer, expected);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/bitcast.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/bitcast.spec.ts
new file mode 100644
index 0000000000..390129f2c7
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/bitcast.spec.ts
@@ -0,0 +1,1275 @@
+export const description = `
+Execution tests for the 'bitcast' builtin function
+
+@const @must_use fn bitcast<T>(e: T ) -> T
+T is concrete numeric scalar or concerete numeric vector
+Identity function.
+
+@const @must_use fn bitcast<T>(e: S ) -> T
+@const @must_use fn bitcast<vecN<T>>(e: vecN<S> ) -> vecN<T>
+S is i32, u32, f32
+T is i32, u32, f32, and T is not S
+Reinterpretation of bits. Beware non-normal f32 values.
+
+@const @must_use fn bitcast<T>(e: vec2<f16> ) -> T
+@const @must_use fn bitcast<vec2<T>>(e: vec4<f16> ) -> vec2<T>
+@const @must_use fn bitcast<vec2<f16>>(e: T ) -> vec2<f16>
+@const @must_use fn bitcast<vec4<f16>>(e: vec2<T> ) -> vec4<f16>
+T is i32, u32, f32
+`;
+
+import { TestParams } from '../../../../../../common/framework/fixture.js';
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { assert } from '../../../../../../common/util/util.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { Comparator, alwaysPass, anyOf } from '../../../../../util/compare.js';
+import { kBit, kValue } from '../../../../../util/constants.js';
+import {
+ f32,
+ i32,
+ u32,
+ f16,
+ TypeF32,
+ TypeI32,
+ TypeU32,
+ TypeF16,
+ TypeVec,
+ Vector,
+ Scalar,
+ toVector,
+} from '../../../../../util/conversion.js';
+import { FPInterval, FP } from '../../../../../util/floating_point.js';
+import {
+ fullF32Range,
+ fullI32Range,
+ fullU32Range,
+ fullF16Range,
+ linearRange,
+ isSubnormalNumberF32,
+ isSubnormalNumberF16,
+ cartesianProduct,
+ isFiniteF32,
+ isFiniteF16,
+} from '../../../../../util/math.js';
+import {
+ reinterpretI32AsF32,
+ reinterpretI32AsU32,
+ reinterpretF32AsI32,
+ reinterpretF32AsU32,
+ reinterpretU32AsF32,
+ reinterpretU32AsI32,
+ reinterpretU16AsF16,
+ reinterpretF16AsU16,
+} from '../../../../../util/reinterpret.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run, ShaderBuilder } from '../../expression.js';
+
+import { builtinWithPredeclaration } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+const numNaNs = 11;
+const f32InfAndNaNInU32: number[] = [
+ // Cover NaNs evenly in integer space.
+ // The positive NaN with the lowest integer representation is the integer
+ // for infinity, plus one.
+ // The positive NaN with the highest integer representation is i32.max (!)
+ ...linearRange(kBit.f32.positive.infinity + 1, kBit.i32.positive.max, numNaNs),
+ // The negative NaN with the lowest integer representation is the integer
+ // for negative infinity, plus one.
+ // The negative NaN with the highest integer representation is u32.max (!)
+ ...linearRange(kBit.f32.negative.infinity + 1, kBit.u32.max, numNaNs),
+ kBit.f32.positive.infinity,
+ kBit.f32.negative.infinity,
+];
+const f32InfAndNaNInF32 = f32InfAndNaNInU32.map(u => reinterpretU32AsF32(u));
+const f32InfAndNaNInI32 = f32InfAndNaNInU32.map(u => reinterpretU32AsI32(u));
+
+const f32ZerosInU32 = [0, kBit.f32.negative.zero];
+const f32ZerosInF32 = f32ZerosInU32.map(u => reinterpretU32AsF32(u));
+const f32ZerosInI32 = f32ZerosInU32.map(u => reinterpretU32AsI32(u));
+const f32ZerosInterval: FPInterval = new FPInterval('f32', -0.0, 0.0);
+
+// f32FiniteRange is a list of finite f32s. fullF32Range() already
+// has +0, we only need to add -0.
+const f32FiniteRange: number[] = [...fullF32Range(), kValue.f32.negative.zero];
+const f32RangeWithInfAndNaN: number[] = [...f32FiniteRange, ...f32InfAndNaNInF32];
+
+// F16 values, finite, Inf/NaN, and zeros. Represented in float and u16.
+const f16FiniteInF16: number[] = [...fullF16Range(), kValue.f16.negative.zero];
+const f16FiniteInU16: number[] = f16FiniteInF16.map(u => reinterpretF16AsU16(u));
+
+const f16InfAndNaNInU16: number[] = [
+ // Cover NaNs evenly in integer space.
+ // The positive NaN with the lowest integer representation is the integer
+ // for infinity, plus one.
+ // The positive NaN with the highest integer representation is u16 0x7fff i.e. 32767.
+ ...linearRange(kBit.f16.positive.infinity + 1, 32767, numNaNs).map(v => Math.ceil(v)),
+ // The negative NaN with the lowest integer representation is the integer
+ // for negative infinity, plus one.
+ // The negative NaN with the highest integer representation is u16 0xffff i.e. 65535
+ ...linearRange(kBit.f16.negative.infinity + 1, 65535, numNaNs).map(v => Math.floor(v)),
+ kBit.f16.positive.infinity,
+ kBit.f16.negative.infinity,
+];
+const f16InfAndNaNInF16 = f16InfAndNaNInU16.map(u => reinterpretU16AsF16(u));
+
+const f16ZerosInU16 = [kBit.f16.negative.zero, 0];
+
+// f16 interval that match +/-0.0.
+const f16ZerosInterval: FPInterval = new FPInterval('f16', -0.0, 0.0);
+
+/**
+ * @returns an u32 whose lower and higher 16bits are the two elements of the
+ * given array of two u16 respectively, in little-endian.
+ */
+function u16x2ToU32(u16x2: readonly number[]): number {
+ assert(u16x2.length === 2);
+ // Create a DataView with 4 bytes buffer.
+ const buffer = new ArrayBuffer(4);
+ const view = new DataView(buffer);
+ // Enforce little-endian.
+ view.setUint16(0, u16x2[0], true);
+ view.setUint16(2, u16x2[1], true);
+ return view.getUint32(0, true);
+}
+
+/**
+ * @returns an array of two u16, respectively the lower and higher 16bits of
+ * given u32 in little-endian.
+ */
+function u32ToU16x2(u32: number): number[] {
+ // Create a DataView with 4 bytes buffer.
+ const buffer = new ArrayBuffer(4);
+ const view = new DataView(buffer);
+ // Enforce little-endian.
+ view.setUint32(0, u32, true);
+ return [view.getUint16(0, true), view.getUint16(2, true)];
+}
+
+/**
+ * @returns a vec2<f16> from an array of two u16, each reinterpreted as f16.
+ */
+function u16x2ToVec2F16(u16x2: number[]): Vector {
+ assert(u16x2.length === 2);
+ return toVector(u16x2.map(reinterpretU16AsF16), f16);
+}
+
+/**
+ * @returns a vec4<f16> from an array of four u16, each reinterpreted as f16.
+ */
+function u16x4ToVec4F16(u16x4: number[]): Vector {
+ assert(u16x4.length === 4);
+ return toVector(u16x4.map(reinterpretU16AsF16), f16);
+}
+
+/**
+ * @returns true if and only if a given u32 can bitcast to a vec2<f16> with all elements
+ * being finite f16 values.
+ */
+function canU32BitcastToFiniteVec2F16(u32: number): boolean {
+ return u32ToU16x2(u32)
+ .map(u16 => isFiniteF16(reinterpretU16AsF16(u16)))
+ .reduce((a, b) => a && b, true);
+}
+
+/**
+ * @returns an array of N elements with the i-th element being an array of len elements
+ * [a_i, a_((i+1)%N), ..., a_((i+len-1)%N)], for the input array of N element [a_1, ... a_N]
+ * and the given len. For example, slidingSlice([1, 2, 3], 2) result in
+ * [[1, 2], [2, 3], [3, 1]].
+ * This helper function is used for generating vector cases from scalar values array.
+ */
+function slidingSlice(input: number[], len: number) {
+ const result: number[][] = [];
+ for (let i = 0; i < input.length; i++) {
+ const sub: number[] = [];
+ for (let j = 0; j < len; j++) {
+ sub.push(input[(i + j) % input.length]);
+ }
+ result.push(sub);
+ }
+ return result;
+}
+
+// vec2<f16> interesting (zeros, Inf, and NaN) values for testing cases.
+// vec2<f16> values that has at least one Inf/NaN f16 element, reinterpreted as u32/i32.
+const f16Vec2InfAndNaNInU32 = [
+ ...cartesianProduct(f16InfAndNaNInU16, [...f16InfAndNaNInU16, ...f16FiniteInU16]),
+ ...cartesianProduct(f16FiniteInU16, f16InfAndNaNInU16),
+].map(u16x2ToU32);
+const f16Vec2InfAndNaNInI32 = f16Vec2InfAndNaNInU32.map(u => reinterpretU32AsI32(u));
+// vec2<f16> values with two f16 0.0 element, reinterpreted as u32/i32.
+const f16Vec2ZerosInU32 = cartesianProduct(f16ZerosInU16, f16ZerosInU16).map(u16x2ToU32);
+const f16Vec2ZerosInI32 = f16Vec2ZerosInU32.map(u => reinterpretU32AsI32(u));
+
+// i32/u32/f32 range for bitcasting to vec2<f16>
+// u32 values for bitcasting to vec2<f16> finite, Inf, and NaN.
+const u32RangeForF16Vec2FiniteInfNaN: number[] = [
+ ...fullU32Range(),
+ ...f16Vec2ZerosInU32,
+ ...f16Vec2InfAndNaNInU32,
+];
+// u32 values for bitcasting to finite only vec2<f16>, used for constant evaluation.
+const u32RangeForF16Vec2Finite: number[] = u32RangeForF16Vec2FiniteInfNaN.filter(
+ canU32BitcastToFiniteVec2F16
+);
+// i32 values for bitcasting to vec2<f16> finite, zeros, Inf, and NaN.
+const i32RangeForF16Vec2FiniteInfNaN: number[] = [
+ ...fullI32Range(),
+ ...f16Vec2ZerosInI32,
+ ...f16Vec2InfAndNaNInI32,
+];
+// i32 values for bitcasting to finite only vec2<f16>, used for constant evaluation.
+const i32RangeForF16Vec2Finite: number[] = i32RangeForF16Vec2FiniteInfNaN.filter(u =>
+ canU32BitcastToFiniteVec2F16(reinterpretI32AsU32(u))
+);
+// f32 values with finite/Inf/NaN f32, for bitcasting to vec2<f16> finite, zeros, Inf, and NaN.
+const f32RangeWithInfAndNaNForF16Vec2FiniteInfNaN: number[] = [
+ ...f32RangeWithInfAndNaN,
+ ...u32RangeForF16Vec2FiniteInfNaN.map(reinterpretU32AsF32),
+];
+// Finite f32 values for bitcasting to finite only vec2<f16>, used for constant evaluation.
+const f32FiniteRangeForF16Vec2Finite: number[] = f32RangeWithInfAndNaNForF16Vec2FiniteInfNaN
+ .filter(isFiniteF32)
+ .filter(u => canU32BitcastToFiniteVec2F16(reinterpretF32AsU32(u)));
+
+// vec2<f16> cases for bitcasting to i32/u32/f32, by combining f16 values into pairs
+const f16Vec2FiniteInU16x2 = slidingSlice(f16FiniteInU16, 2);
+const f16Vec2FiniteInfNanInU16x2 = slidingSlice([...f16FiniteInU16, ...f16InfAndNaNInU16], 2);
+// vec4<f16> cases for bitcasting to vec2<i32/u32/f32>, by combining f16 values 4-by-4
+const f16Vec2FiniteInU16x4 = slidingSlice(f16FiniteInU16, 4);
+const f16Vec2FiniteInfNanInU16x4 = slidingSlice([...f16FiniteInU16, ...f16InfAndNaNInU16], 4);
+
+// alwaysPass comparator for i32/u32/f32 cases. For f32/f16 we also use unbound interval, which
+// allow per-element unbounded expectation for vector.
+const anyF32 = alwaysPass('any f32');
+const anyI32 = alwaysPass('any i32');
+const anyU32 = alwaysPass('any u32');
+
+// Unbounded FPInterval
+const f32UnboundedInterval = FP.f32.constants().unboundedInterval;
+const f16UnboundedInterval = FP.f16.constants().unboundedInterval;
+
+// i32 and u32 cases for bitcasting to f32.
+// i32 cases for bitcasting to f32 finite, zeros, Inf, and NaN.
+const i32RangeForF32FiniteInfNaN: number[] = [
+ ...fullI32Range(),
+ ...f32ZerosInI32,
+ ...f32InfAndNaNInI32,
+];
+// i32 cases for bitcasting to f32 finite only.
+const i32RangeForF32Finite: number[] = i32RangeForF32FiniteInfNaN.filter(i =>
+ isFiniteF32(reinterpretI32AsF32(i))
+);
+// u32 cases for bitcasting to f32 finite, zeros, Inf, and NaN.
+const u32RangeForF32FiniteInfNaN: number[] = [
+ ...fullU32Range(),
+ ...f32ZerosInU32,
+ ...f32InfAndNaNInU32,
+];
+// u32 cases for bitcasting to f32 finite only.
+const u32RangeForF32Finite: number[] = u32RangeForF32FiniteInfNaN.filter(u =>
+ isFiniteF32(reinterpretU32AsF32(u))
+);
+
+/**
+ * @returns a Comparator for checking if a f32 value is a valid
+ * bitcast conversion from f32.
+ */
+function bitcastF32ToF32Comparator(f: number): Comparator {
+ if (!isFiniteF32(f)) return anyF32;
+ const acceptable: number[] = [f, ...(isSubnormalNumberF32(f) ? f32ZerosInF32 : [])];
+ return anyOf(...acceptable.map(f32));
+}
+
+/**
+ * @returns a Comparator for checking if a u32 value is a valid
+ * bitcast conversion from f32.
+ */
+function bitcastF32ToU32Comparator(f: number): Comparator {
+ if (!isFiniteF32(f)) return anyU32;
+ const acceptable: number[] = [
+ reinterpretF32AsU32(f),
+ ...(isSubnormalNumberF32(f) ? f32ZerosInU32 : []),
+ ];
+ return anyOf(...acceptable.map(u32));
+}
+
+/**
+ * @returns a Comparator for checking if a i32 value is a valid
+ * bitcast conversion from f32.
+ */
+function bitcastF32ToI32Comparator(f: number): Comparator {
+ if (!isFiniteF32(f)) return anyI32;
+ const acceptable: number[] = [
+ reinterpretF32AsI32(f),
+ ...(isSubnormalNumberF32(f) ? f32ZerosInI32 : []),
+ ];
+ return anyOf(...acceptable.map(i32));
+}
+
+/**
+ * @returns a Comparator for checking if a f32 value is a valid
+ * bitcast conversion from i32.
+ */
+function bitcastI32ToF32Comparator(i: number): Comparator {
+ const f: number = reinterpretI32AsF32(i);
+ if (!isFiniteF32(f)) return anyI32;
+ // Positive or negative zero bit pattern map to any zero.
+ if (f32ZerosInI32.includes(i)) return anyOf(...f32ZerosInF32.map(f32));
+ const acceptable: number[] = [f, ...(isSubnormalNumberF32(f) ? f32ZerosInF32 : [])];
+ return anyOf(...acceptable.map(f32));
+}
+
+/**
+ * @returns a Comparator for checking if a f32 value is a valid
+ * bitcast conversion from u32.
+ */
+function bitcastU32ToF32Comparator(u: number): Comparator {
+ const f: number = reinterpretU32AsF32(u);
+ if (!isFiniteF32(f)) return anyU32;
+ // Positive or negative zero bit pattern map to any zero.
+ if (f32ZerosInU32.includes(u)) return anyOf(...f32ZerosInF32.map(f32));
+ const acceptable: number[] = [f, ...(isSubnormalNumberF32(f) ? f32ZerosInF32 : [])];
+ return anyOf(...acceptable.map(f32));
+}
+
+/**
+ * @returns an array of expected f16 FPInterval for the given bitcasted f16 value, which may be
+ * subnormal, Inf, or NaN. Test cases that bitcasted to vector of f16 use this function to get
+ * per-element expectation and build vector expectation using cartesianProduct.
+ */
+function generateF16ExpectationIntervals(bitcastedF16Value: number): FPInterval[] {
+ // If the bitcasted f16 value is inf or nan, the result is unbounded
+ if (!isFiniteF16(bitcastedF16Value)) {
+ return [f16UnboundedInterval];
+ }
+ // If the casted f16 value is +/-0.0, the result can be one of both. Note that in JS -0.0 === 0.0.
+ if (bitcastedF16Value === 0.0) {
+ return [f16ZerosInterval];
+ }
+ const exactInterval = FP.f16.toInterval(bitcastedF16Value);
+ // If the casted f16 value is subnormal, it also may be flushed to +/-0.0.
+ return [exactInterval, ...(isSubnormalNumberF16(bitcastedF16Value) ? [f16ZerosInterval] : [])];
+}
+
+/**
+ * @returns a Comparator for checking if a f16 value is a valid
+ * bitcast conversion from f16.
+ */
+function bitcastF16ToF16Comparator(f: number): Comparator {
+ if (!isFiniteF16(f)) return anyOf(f16UnboundedInterval);
+ return anyOf(...generateF16ExpectationIntervals(f));
+}
+
+/**
+ * @returns a Comparator for checking if a vec2<f16> is a valid bitcast
+ * conversion from u32.
+ */
+function bitcastU32ToVec2F16Comparator(u: number): Comparator {
+ const bitcastedVec2F16InU16x2 = u32ToU16x2(u).map(reinterpretU16AsF16);
+ // Generate expection for vec2 f16 result, by generating expected intervals for each elements and
+ // then do cartesian product.
+ const expectedIntervalsCombination = cartesianProduct(
+ ...bitcastedVec2F16InU16x2.map(generateF16ExpectationIntervals)
+ );
+ return anyOf(...expectedIntervalsCombination);
+}
+
+/**
+ * @returns a Comparator for checking if a vec2<f16> value is a valid
+ * bitcast conversion from i32.
+ */
+function bitcastI32ToVec2F16Comparator(i: number): Comparator {
+ const bitcastedVec2F16InU16x2 = u32ToU16x2(reinterpretI32AsU32(i)).map(reinterpretU16AsF16);
+ // Generate expection for vec2 f16 result, by generating expected intervals for each elements and
+ // then do cartesian product.
+ const expectedIntervalsCombination = cartesianProduct(
+ ...bitcastedVec2F16InU16x2.map(generateF16ExpectationIntervals)
+ );
+ return anyOf(...expectedIntervalsCombination);
+}
+
+/**
+ * @returns a Comparator for checking if a vec2<f16> value is a valid
+ * bitcast conversion from f32.
+ */
+function bitcastF32ToVec2F16Comparator(f: number): Comparator {
+ // If input f32 is not finite, it can be evaluated to any value and thus any result f16 vec2 is
+ // possible.
+ if (!isFiniteF32(f)) {
+ return anyOf([f16UnboundedInterval, f16UnboundedInterval]);
+ }
+ const bitcastedVec2F16InU16x2 = u32ToU16x2(reinterpretF32AsU32(f)).map(reinterpretU16AsF16);
+ // Generate expection for vec2 f16 result, by generating expected intervals for each elements and
+ // then do cartesian product.
+ const expectedIntervalsCombination = cartesianProduct(
+ ...bitcastedVec2F16InU16x2.map(generateF16ExpectationIntervals)
+ );
+ return anyOf(...expectedIntervalsCombination);
+}
+
+/**
+ * @returns a Comparator for checking if a vec4<f16> is a valid
+ * bitcast conversion from vec2<u32>.
+ */
+function bitcastVec2U32ToVec4F16Comparator(u32x2: number[]): Comparator {
+ assert(u32x2.length === 2);
+ const bitcastedVec4F16InU16x4 = u32x2.flatMap(u32ToU16x2).map(reinterpretU16AsF16);
+ // Generate expection for vec4 f16 result, by generating expected intervals for each elements and
+ // then do cartesian product.
+ const expectedIntervalsCombination = cartesianProduct(
+ ...bitcastedVec4F16InU16x4.map(generateF16ExpectationIntervals)
+ );
+ return anyOf(...expectedIntervalsCombination);
+}
+
+/**
+ * @returns a Comparator for checking if a vec4<f16> is a valid
+ * bitcast conversion from vec2<i32>.
+ */
+function bitcastVec2I32ToVec4F16Comparator(i32x2: number[]): Comparator {
+ assert(i32x2.length === 2);
+ const bitcastedVec4F16InU16x4 = i32x2
+ .map(reinterpretI32AsU32)
+ .flatMap(u32ToU16x2)
+ .map(reinterpretU16AsF16);
+ // Generate expection for vec4 f16 result, by generating expected intervals for each elements and
+ // then do cartesian product.
+ const expectedIntervalsCombination = cartesianProduct(
+ ...bitcastedVec4F16InU16x4.map(generateF16ExpectationIntervals)
+ );
+ return anyOf(...expectedIntervalsCombination);
+}
+
+/**
+ * @returns a Comparator for checking if a vec4<f16> is a valid
+ * bitcast conversion from vec2<f32>.
+ */
+function bitcastVec2F32ToVec4F16Comparator(f32x2: number[]): Comparator {
+ assert(f32x2.length === 2);
+ const bitcastedVec4F16InU16x4 = f32x2
+ .map(reinterpretF32AsU32)
+ .flatMap(u32ToU16x2)
+ .map(reinterpretU16AsF16);
+ // Generate expection for vec4 f16 result, by generating expected intervals for each elements and
+ // then do cartesian product.
+ const expectedIntervalsCombination = cartesianProduct(
+ ...bitcastedVec4F16InU16x4.map(generateF16ExpectationIntervals)
+ );
+ return anyOf(...expectedIntervalsCombination);
+}
+
+// Structure that store the expectations of a single 32bit scalar/element bitcasted from two f16.
+interface ExpectionFor32BitsScalarFromF16x2 {
+ // possibleExpectations is Scalar array if the expectation is for i32/u32 and FPInterval array for
+ // f32. Note that if the expectation for i32/u32 is unbound, possibleExpectations is meaningless.
+ possibleExpectations: (Scalar | FPInterval)[];
+ isUnbounded: boolean;
+}
+
+/**
+ * @returns the array of possible 16bits, represented in u16, that bitcasted
+ * from a given finite f16 represented in u16, handling the possible subnormal
+ * flushing. Used to build up 32bits or larger results.
+ */
+function possibleBitsInU16FromFiniteF16InU16(f16InU16: number): number[] {
+ const h = reinterpretU16AsF16(f16InU16);
+ assert(isFiniteF16(h));
+ return [f16InU16, ...(isSubnormalNumberF16(h) ? f16ZerosInU16 : [])];
+}
+
+/**
+ * @returns the expectation for a single 32bit scalar bitcasted from given pair of
+ * f16, result in ExpectionFor32BitsScalarFromF16x2.
+ */
+function possible32BitScalarIntervalsFromF16x2(
+ f16x2InU16x2: number[],
+ type: 'i32' | 'u32' | 'f32'
+): ExpectionFor32BitsScalarFromF16x2 {
+ assert(f16x2InU16x2.length === 2);
+ let reinterpretFromU32: (x: number) => number;
+ let expectationsForValue: (x: number) => Scalar[] | FPInterval[];
+ let unboundedExpectations: FPInterval[] | Scalar[];
+ if (type === 'u32') {
+ reinterpretFromU32 = (x: number) => x;
+ expectationsForValue = x => [u32(x)];
+ // Scalar expectation can not express "unbounded" for i32 and u32, so use 0 here as a
+ // placeholder, and the possibleExpectations should be ignored if the result is unbounded.
+ unboundedExpectations = [u32(0)];
+ } else if (type === 'i32') {
+ reinterpretFromU32 = (x: number) => reinterpretU32AsI32(x);
+ expectationsForValue = x => [i32(x)];
+ // Scalar expectation can not express "unbounded" for i32 and u32, so use 0 here as a
+ // placeholder, and the possibleExpectations should be ignored if the result is unbounded.
+ unboundedExpectations = [i32(0)];
+ } else {
+ assert(type === 'f32');
+ reinterpretFromU32 = (x: number) => reinterpretU32AsF32(x);
+ expectationsForValue = x => {
+ // Handle the possible Inf/NaN/zeros and subnormal cases for f32 result.
+ if (!isFiniteF32(x)) {
+ return [f32UnboundedInterval];
+ }
+ // If the casted f16 value is +/-0.0, the result can be one of both. Note that in JS -0.0 === 0.0.
+ if (x === 0.0) {
+ return [f32ZerosInterval];
+ }
+ const exactInterval = FP.f32.toInterval(x);
+ // If the casted f16 value is subnormal, it also may be flushed to +/-0.0.
+ return [exactInterval, ...(isSubnormalNumberF32(x) ? [f32ZerosInterval] : [])];
+ };
+ unboundedExpectations = [f32UnboundedInterval];
+ }
+ // Return unbounded expection if f16 Inf/NaN occurs
+ if (
+ !isFiniteF16(reinterpretU16AsF16(f16x2InU16x2[0])) ||
+ !isFiniteF16(reinterpretU16AsF16(f16x2InU16x2[1]))
+ ) {
+ return { possibleExpectations: unboundedExpectations, isUnbounded: true };
+ }
+ const possibleU16Bits = f16x2InU16x2.map(possibleBitsInU16FromFiniteF16InU16);
+ const possibleExpectations = cartesianProduct(...possibleU16Bits).flatMap<Scalar | FPInterval>(
+ (possibleBitsU16x2: readonly number[]) => {
+ assert(possibleBitsU16x2.length === 2);
+ return expectationsForValue(reinterpretFromU32(u16x2ToU32(possibleBitsU16x2)));
+ }
+ );
+ return { possibleExpectations, isUnbounded: false };
+}
+
+/**
+ * @returns a Comparator for checking if a u32 value is a valid
+ * bitcast conversion from vec2 f16.
+ */
+function bitcastVec2F16ToU32Comparator(vec2F16InU16x2: number[]): Comparator {
+ assert(vec2F16InU16x2.length === 2);
+ const expectations = possible32BitScalarIntervalsFromF16x2(vec2F16InU16x2, 'u32');
+ // Return alwaysPass if result is expected unbounded.
+ if (expectations.isUnbounded) {
+ return anyU32;
+ }
+ return anyOf(...expectations.possibleExpectations);
+}
+
+/**
+ * @returns a Comparator for checking if a i32 value is a valid
+ * bitcast conversion from vec2 f16.
+ */
+function bitcastVec2F16ToI32Comparator(vec2F16InU16x2: number[]): Comparator {
+ assert(vec2F16InU16x2.length === 2);
+ const expectations = possible32BitScalarIntervalsFromF16x2(vec2F16InU16x2, 'i32');
+ // Return alwaysPass if result is expected unbounded.
+ if (expectations.isUnbounded) {
+ return anyI32;
+ }
+ return anyOf(...expectations.possibleExpectations);
+}
+
+/**
+ * @returns a Comparator for checking if a i32 value is a valid
+ * bitcast conversion from vec2 f16.
+ */
+function bitcastVec2F16ToF32Comparator(vec2F16InU16x2: number[]): Comparator {
+ assert(vec2F16InU16x2.length === 2);
+ const expectations = possible32BitScalarIntervalsFromF16x2(vec2F16InU16x2, 'f32');
+ // Return alwaysPass if result is expected unbounded.
+ if (expectations.isUnbounded) {
+ return anyF32;
+ }
+ return anyOf(...expectations.possibleExpectations);
+}
+
+/**
+ * @returns a Comparator for checking if a vec2 u32 value is a valid
+ * bitcast conversion from vec4 f16.
+ */
+function bitcastVec4F16ToVec2U32Comparator(vec4F16InU16x4: number[]): Comparator {
+ assert(vec4F16InU16x4.length === 4);
+ const expectationsPerElement = [vec4F16InU16x4.slice(0, 2), vec4F16InU16x4.slice(2, 4)].map(e =>
+ possible32BitScalarIntervalsFromF16x2(e, 'u32')
+ );
+ // Return alwaysPass if any element is expected unbounded. Although it may be only one unbounded
+ // element in the result vector, currently we don't have a way to build a comparator that expect
+ // only one element of i32/u32 vector unbounded.
+ if (expectationsPerElement.map(e => e.isUnbounded).reduce((a, b) => a || b, false)) {
+ return alwaysPass('any vec2<u32>');
+ }
+ return anyOf(
+ ...cartesianProduct(...expectationsPerElement.map(e => e.possibleExpectations)).map(
+ e => new Vector(e as Scalar[])
+ )
+ );
+}
+
+/**
+ * @returns a Comparator for checking if a vec2 i32 value is a valid
+ * bitcast conversion from vec4 f16.
+ */
+function bitcastVec4F16ToVec2I32Comparator(vec4F16InU16x4: number[]): Comparator {
+ assert(vec4F16InU16x4.length === 4);
+ const expectationsPerElement = [vec4F16InU16x4.slice(0, 2), vec4F16InU16x4.slice(2, 4)].map(e =>
+ possible32BitScalarIntervalsFromF16x2(e, 'i32')
+ );
+ // Return alwaysPass if any element is expected unbounded. Although it may be only one unbounded
+ // element in the result vector, currently we don't have a way to build a comparator that expect
+ // only one element of i32/u32 vector unbounded.
+ if (expectationsPerElement.map(e => e.isUnbounded).reduce((a, b) => a || b, false)) {
+ return alwaysPass('any vec2<i32>');
+ }
+ return anyOf(
+ ...cartesianProduct(...expectationsPerElement.map(e => e.possibleExpectations)).map(
+ e => new Vector(e as Scalar[])
+ )
+ );
+}
+
+/**
+ * @returns a Comparator for checking if a vec2 f32 value is a valid
+ * bitcast conversion from vec4 f16.
+ */
+function bitcastVec4F16ToVec2F32Comparator(vec4F16InU16x4: number[]): Comparator {
+ assert(vec4F16InU16x4.length === 4);
+ const expectationsPerElement = [vec4F16InU16x4.slice(0, 2), vec4F16InU16x4.slice(2, 4)].map(e =>
+ possible32BitScalarIntervalsFromF16x2(e, 'f32')
+ );
+ return anyOf(
+ ...cartesianProduct(...expectationsPerElement.map(e => e.possibleExpectations)).map(e => [
+ e[0] as FPInterval,
+ e[1] as FPInterval,
+ ])
+ );
+}
+
+export const d = makeCaseCache('bitcast', {
+ // Identity Cases
+ i32_to_i32: () => fullI32Range().map(e => ({ input: i32(e), expected: i32(e) })),
+ u32_to_u32: () => fullU32Range().map(e => ({ input: u32(e), expected: u32(e) })),
+ f32_inf_nan_to_f32: () =>
+ f32RangeWithInfAndNaN.map(e => ({
+ input: f32(e),
+ expected: bitcastF32ToF32Comparator(e),
+ })),
+ f32_to_f32: () =>
+ f32FiniteRange.map(e => ({ input: f32(e), expected: bitcastF32ToF32Comparator(e) })),
+ f16_inf_nan_to_f16: () =>
+ [...f16FiniteInF16, ...f16InfAndNaNInF16].map(e => ({
+ input: f16(e),
+ expected: bitcastF16ToF16Comparator(e),
+ })),
+ f16_to_f16: () =>
+ f16FiniteInF16.map(e => ({ input: f16(e), expected: bitcastF16ToF16Comparator(e) })),
+
+ // i32,u32,f32 to different i32,u32,f32
+ i32_to_u32: () => fullI32Range().map(e => ({ input: i32(e), expected: u32(e) })),
+ i32_to_f32: () =>
+ i32RangeForF32Finite.map(e => ({
+ input: i32(e),
+ expected: bitcastI32ToF32Comparator(e),
+ })),
+ i32_to_f32_inf_nan: () =>
+ i32RangeForF32FiniteInfNaN.map(e => ({
+ input: i32(e),
+ expected: bitcastI32ToF32Comparator(e),
+ })),
+ u32_to_i32: () => fullU32Range().map(e => ({ input: u32(e), expected: i32(e) })),
+ u32_to_f32: () =>
+ u32RangeForF32Finite.map(e => ({
+ input: u32(e),
+ expected: bitcastU32ToF32Comparator(e),
+ })),
+ u32_to_f32_inf_nan: () =>
+ u32RangeForF32FiniteInfNaN.map(e => ({
+ input: u32(e),
+ expected: bitcastU32ToF32Comparator(e),
+ })),
+ f32_inf_nan_to_i32: () =>
+ f32RangeWithInfAndNaN.map(e => ({
+ input: f32(e),
+ expected: bitcastF32ToI32Comparator(e),
+ })),
+ f32_to_i32: () =>
+ f32FiniteRange.map(e => ({ input: f32(e), expected: bitcastF32ToI32Comparator(e) })),
+
+ f32_inf_nan_to_u32: () =>
+ f32RangeWithInfAndNaN.map(e => ({
+ input: f32(e),
+ expected: bitcastF32ToU32Comparator(e),
+ })),
+ f32_to_u32: () =>
+ f32FiniteRange.map(e => ({ input: f32(e), expected: bitcastF32ToU32Comparator(e) })),
+
+ // i32,u32,f32 to vec2<f16>
+ u32_to_vec2_f16_inf_nan: () =>
+ u32RangeForF16Vec2FiniteInfNaN.map(e => ({
+ input: u32(e),
+ expected: bitcastU32ToVec2F16Comparator(e),
+ })),
+ u32_to_vec2_f16: () =>
+ u32RangeForF16Vec2Finite.map(e => ({
+ input: u32(e),
+ expected: bitcastU32ToVec2F16Comparator(e),
+ })),
+ i32_to_vec2_f16_inf_nan: () =>
+ i32RangeForF16Vec2FiniteInfNaN.map(e => ({
+ input: i32(e),
+ expected: bitcastI32ToVec2F16Comparator(e),
+ })),
+ i32_to_vec2_f16: () =>
+ i32RangeForF16Vec2Finite.map(e => ({
+ input: i32(e),
+ expected: bitcastI32ToVec2F16Comparator(e),
+ })),
+ f32_inf_nan_to_vec2_f16_inf_nan: () =>
+ f32RangeWithInfAndNaNForF16Vec2FiniteInfNaN.map(e => ({
+ input: f32(e),
+ expected: bitcastF32ToVec2F16Comparator(e),
+ })),
+ f32_to_vec2_f16: () =>
+ f32FiniteRangeForF16Vec2Finite.map(e => ({
+ input: f32(e),
+ expected: bitcastF32ToVec2F16Comparator(e),
+ })),
+
+ // vec2<i32>, vec2<u32>, vec2<f32> to vec4<f16>
+ vec2_i32_to_vec4_f16_inf_nan: () =>
+ slidingSlice(i32RangeForF16Vec2FiniteInfNaN, 2).map(e => ({
+ input: toVector(e, i32),
+ expected: bitcastVec2I32ToVec4F16Comparator(e),
+ })),
+ vec2_i32_to_vec4_f16: () =>
+ slidingSlice(i32RangeForF16Vec2Finite, 2).map(e => ({
+ input: toVector(e, i32),
+ expected: bitcastVec2I32ToVec4F16Comparator(e),
+ })),
+ vec2_u32_to_vec4_f16_inf_nan: () =>
+ slidingSlice(u32RangeForF16Vec2FiniteInfNaN, 2).map(e => ({
+ input: toVector(e, u32),
+ expected: bitcastVec2U32ToVec4F16Comparator(e),
+ })),
+ vec2_u32_to_vec4_f16: () =>
+ slidingSlice(u32RangeForF16Vec2Finite, 2).map(e => ({
+ input: toVector(e, u32),
+ expected: bitcastVec2U32ToVec4F16Comparator(e),
+ })),
+ vec2_f32_inf_nan_to_vec4_f16_inf_nan: () =>
+ slidingSlice(f32RangeWithInfAndNaNForF16Vec2FiniteInfNaN, 2).map(e => ({
+ input: toVector(e, f32),
+ expected: bitcastVec2F32ToVec4F16Comparator(e),
+ })),
+ vec2_f32_to_vec4_f16: () =>
+ slidingSlice(f32FiniteRangeForF16Vec2Finite, 2).map(e => ({
+ input: toVector(e, f32),
+ expected: bitcastVec2F32ToVec4F16Comparator(e),
+ })),
+
+ // vec2<f16> to i32, u32, f32
+ vec2_f16_to_u32: () =>
+ f16Vec2FiniteInU16x2.map(e => ({
+ input: u16x2ToVec2F16(e),
+ expected: bitcastVec2F16ToU32Comparator(e),
+ })),
+ vec2_f16_inf_nan_to_u32: () =>
+ f16Vec2FiniteInfNanInU16x2.map(e => ({
+ input: u16x2ToVec2F16(e),
+ expected: bitcastVec2F16ToU32Comparator(e),
+ })),
+ vec2_f16_to_i32: () =>
+ f16Vec2FiniteInU16x2.map(e => ({
+ input: u16x2ToVec2F16(e),
+ expected: bitcastVec2F16ToI32Comparator(e),
+ })),
+ vec2_f16_inf_nan_to_i32: () =>
+ f16Vec2FiniteInfNanInU16x2.map(e => ({
+ input: u16x2ToVec2F16(e),
+ expected: bitcastVec2F16ToI32Comparator(e),
+ })),
+ vec2_f16_to_f32_finite: () =>
+ f16Vec2FiniteInU16x2
+ .filter(u16x2 => isFiniteF32(reinterpretU32AsF32(u16x2ToU32(u16x2))))
+ .map(e => ({
+ input: u16x2ToVec2F16(e),
+ expected: bitcastVec2F16ToF32Comparator(e),
+ })),
+ vec2_f16_inf_nan_to_f32: () =>
+ f16Vec2FiniteInfNanInU16x2.map(e => ({
+ input: u16x2ToVec2F16(e),
+ expected: bitcastVec2F16ToF32Comparator(e),
+ })),
+
+ // vec4<f16> to vec2 of i32, u32, f32
+ vec4_f16_to_vec2_u32: () =>
+ f16Vec2FiniteInU16x4.map(e => ({
+ input: u16x4ToVec4F16(e),
+ expected: bitcastVec4F16ToVec2U32Comparator(e),
+ })),
+ vec4_f16_inf_nan_to_vec2_u32: () =>
+ f16Vec2FiniteInfNanInU16x4.map(e => ({
+ input: u16x4ToVec4F16(e),
+ expected: bitcastVec4F16ToVec2U32Comparator(e),
+ })),
+ vec4_f16_to_vec2_i32: () =>
+ f16Vec2FiniteInU16x4.map(e => ({
+ input: u16x4ToVec4F16(e),
+ expected: bitcastVec4F16ToVec2I32Comparator(e),
+ })),
+ vec4_f16_inf_nan_to_vec2_i32: () =>
+ f16Vec2FiniteInfNanInU16x4.map(e => ({
+ input: u16x4ToVec4F16(e),
+ expected: bitcastVec4F16ToVec2I32Comparator(e),
+ })),
+ vec4_f16_to_vec2_f32_finite: () =>
+ f16Vec2FiniteInU16x4
+ .filter(
+ u16x4 =>
+ isFiniteF32(reinterpretU32AsF32(u16x2ToU32(u16x4.slice(0, 2)))) &&
+ isFiniteF32(reinterpretU32AsF32(u16x2ToU32(u16x4.slice(2, 4))))
+ )
+ .map(e => ({
+ input: u16x4ToVec4F16(e),
+ expected: bitcastVec4F16ToVec2F32Comparator(e),
+ })),
+ vec4_f16_inf_nan_to_vec2_f32: () =>
+ f16Vec2FiniteInfNanInU16x4.map(e => ({
+ input: u16x4ToVec4F16(e),
+ expected: bitcastVec4F16ToVec2F32Comparator(e),
+ })),
+});
+
+/**
+ * @returns a ShaderBuilder that generates a call to bitcast,
+ * using appropriate destination type, which optionally can be
+ * a WGSL type alias.
+ */
+function bitcastBuilder(canonicalDestType: string, params: TestParams): ShaderBuilder {
+ const destType = params.vectorize
+ ? `vec${params.vectorize}<${canonicalDestType}>`
+ : canonicalDestType;
+
+ return builtinWithPredeclaration(
+ `bitcast<${destType}>`,
+ params.alias ? `alias myalias = ${destType};` : ''
+ );
+}
+
+// Identity cases
+g.test('i32_to_i32')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast i32 to i32 tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ .combine('alias', [false, true])
+ )
+ .fn(async t => {
+ const cases = await d.get('i32_to_i32');
+ await run(t, bitcastBuilder('i32', t.params), [TypeI32], TypeI32, t.params, cases);
+ });
+
+g.test('u32_to_u32')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast u32 to u32 tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ .combine('alias', [false, true])
+ )
+ .fn(async t => {
+ const cases = await d.get('u32_to_u32');
+ await run(t, bitcastBuilder('u32', t.params), [TypeU32], TypeU32, t.params, cases);
+ });
+
+g.test('f32_to_f32')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast f32 to f32 tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ .combine('alias', [false, true])
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ // Infinities and NaNs are errors in const-eval.
+ t.params.inputSource === 'const' ? 'f32_to_f32' : 'f32_inf_nan_to_f32'
+ );
+ await run(t, bitcastBuilder('f32', t.params), [TypeF32], TypeF32, t.params, cases);
+ });
+
+// To i32 from u32, f32
+g.test('u32_to_i32')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast u32 to i32 tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ .combine('alias', [false, true])
+ )
+ .fn(async t => {
+ const cases = await d.get('u32_to_i32');
+ await run(t, bitcastBuilder('i32', t.params), [TypeU32], TypeI32, t.params, cases);
+ });
+
+g.test('f32_to_i32')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast f32 to i32 tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ .combine('alias', [false, true])
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ // Infinities and NaNs are errors in const-eval.
+ t.params.inputSource === 'const' ? 'f32_to_i32' : 'f32_inf_nan_to_i32'
+ );
+ await run(t, bitcastBuilder('i32', t.params), [TypeF32], TypeI32, t.params, cases);
+ });
+
+// To u32 from i32, f32
+g.test('i32_to_u32')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast i32 to u32 tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ .combine('alias', [false, true])
+ )
+ .fn(async t => {
+ const cases = await d.get('i32_to_u32');
+ await run(t, bitcastBuilder('u32', t.params), [TypeI32], TypeU32, t.params, cases);
+ });
+
+g.test('f32_to_u32')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast f32 to i32 tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ .combine('alias', [false, true])
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ // Infinities and NaNs are errors in const-eval.
+ t.params.inputSource === 'const' ? 'f32_to_u32' : 'f32_inf_nan_to_u32'
+ );
+ await run(t, bitcastBuilder('u32', t.params), [TypeF32], TypeU32, t.params, cases);
+ });
+
+// To f32 from i32, u32
+g.test('i32_to_f32')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast i32 to f32 tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ .combine('alias', [false, true])
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ // Infinities and NaNs are errors in const-eval.
+ t.params.inputSource === 'const' ? 'i32_to_f32' : 'i32_to_f32_inf_nan'
+ );
+ await run(t, bitcastBuilder('f32', t.params), [TypeI32], TypeF32, t.params, cases);
+ });
+
+g.test('u32_to_f32')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast u32 to f32 tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ .combine('alias', [false, true])
+ )
+ .fn(async t => {
+ const cases = await d.get(
+ // Infinities and NaNs are errors in const-eval.
+ t.params.inputSource === 'const' ? 'u32_to_f32' : 'u32_to_f32_inf_nan'
+ );
+ await run(t, bitcastBuilder('f32', t.params), [TypeU32], TypeF32, t.params, cases);
+ });
+
+// 16 bit types
+
+// f16 cases
+
+// f16: Identity
+g.test('f16_to_f16')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast f16 to f16 tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ .combine('alias', [false, true])
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ // Infinities and NaNs are errors in const-eval.
+ t.params.inputSource === 'const' ? 'f16_to_f16' : 'f16_inf_nan_to_f16'
+ );
+ await run(t, bitcastBuilder('f16', t.params), [TypeF16], TypeF16, t.params, cases);
+ });
+
+// f16: 32-bit scalar numeric to vec2<f16>
+g.test('i32_to_vec2h')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast i32 to vec2h tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('alias', [false, true]))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ // Infinities and NaNs are errors in const-eval.
+ t.params.inputSource === 'const' ? 'i32_to_vec2_f16' : 'i32_to_vec2_f16_inf_nan'
+ );
+ await run(
+ t,
+ bitcastBuilder('vec2<f16>', t.params),
+ [TypeI32],
+ TypeVec(2, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('u32_to_vec2h')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast u32 to vec2h tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('alias', [false, true]))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ // Infinities and NaNs are errors in const-eval.
+ t.params.inputSource === 'const' ? 'u32_to_vec2_f16' : 'u32_to_vec2_f16_inf_nan'
+ );
+ await run(
+ t,
+ bitcastBuilder('vec2<f16>', t.params),
+ [TypeU32],
+ TypeVec(2, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32_to_vec2h')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast u32 to vec2h tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('alias', [false, true]))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ // Infinities and NaNs are errors in const-eval.
+ t.params.inputSource === 'const' ? 'f32_to_vec2_f16' : 'f32_inf_nan_to_vec2_f16_inf_nan'
+ );
+ await run(
+ t,
+ bitcastBuilder('vec2<f16>', t.params),
+ [TypeF32],
+ TypeVec(2, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+// f16: vec2<32-bit scalar numeric> to vec4<f16>
+g.test('vec2i_to_vec4h')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast vec2i to vec4h tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('alias', [false, true]))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ // Infinities and NaNs are errors in const-eval.
+ t.params.inputSource === 'const' ? 'vec2_i32_to_vec4_f16' : 'vec2_i32_to_vec4_f16_inf_nan'
+ );
+ await run(
+ t,
+ bitcastBuilder('vec4<f16>', t.params),
+ [TypeVec(2, TypeI32)],
+ TypeVec(4, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('vec2u_to_vec4h')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast vec2u to vec4h tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('alias', [false, true]))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ // Infinities and NaNs are errors in const-eval.
+ t.params.inputSource === 'const' ? 'vec2_u32_to_vec4_f16' : 'vec2_u32_to_vec4_f16_inf_nan'
+ );
+ await run(
+ t,
+ bitcastBuilder('vec4<f16>', t.params),
+ [TypeVec(2, TypeU32)],
+ TypeVec(4, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('vec2f_to_vec4h')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast vec2f to vec2h tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('alias', [false, true]))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ // Infinities and NaNs are errors in const-eval.
+ t.params.inputSource === 'const'
+ ? 'vec2_f32_to_vec4_f16'
+ : 'vec2_f32_inf_nan_to_vec4_f16_inf_nan'
+ );
+ await run(
+ t,
+ bitcastBuilder('vec4<f16>', t.params),
+ [TypeVec(2, TypeF32)],
+ TypeVec(4, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+// f16: vec2<f16> to 32-bit scalar numeric
+g.test('vec2h_to_i32')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast vec2h to i32 tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('alias', [false, true]))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ // Infinities and NaNs are errors in const-eval.
+ t.params.inputSource === 'const' ? 'vec2_f16_to_i32' : 'vec2_f16_inf_nan_to_i32'
+ );
+ await run(t, bitcastBuilder('i32', t.params), [TypeVec(2, TypeF16)], TypeI32, t.params, cases);
+ });
+
+g.test('vec2h_to_u32')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast vec2h to u32 tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('alias', [false, true]))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ // Infinities and NaNs are errors in const-eval.
+ t.params.inputSource === 'const' ? 'vec2_f16_to_u32' : 'vec2_f16_inf_nan_to_u32'
+ );
+ await run(t, bitcastBuilder('u32', t.params), [TypeVec(2, TypeF16)], TypeU32, t.params, cases);
+ });
+
+g.test('vec2h_to_f32')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast vec2h to f32 tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('alias', [false, true]))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ // Infinities and NaNs are errors in const-eval.
+ t.params.inputSource === 'const' ? 'vec2_f16_to_f32_finite' : 'vec2_f16_inf_nan_to_f32'
+ );
+ await run(t, bitcastBuilder('f32', t.params), [TypeVec(2, TypeF16)], TypeF32, t.params, cases);
+ });
+
+// f16: vec4<f16> to vec2<32-bit scalar numeric>
+g.test('vec4h_to_vec2i')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast vec4h to vec2i tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('alias', [false, true]))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ // Infinities and NaNs are errors in const-eval.
+ t.params.inputSource === 'const' ? 'vec4_f16_to_vec2_i32' : 'vec4_f16_inf_nan_to_vec2_i32'
+ );
+ await run(
+ t,
+ bitcastBuilder('vec2<i32>', t.params),
+ [TypeVec(4, TypeF16)],
+ TypeVec(2, TypeI32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('vec4h_to_vec2u')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast vec4h to vec2u tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('alias', [false, true]))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ // Infinities and NaNs are errors in const-eval.
+ t.params.inputSource === 'const' ? 'vec4_f16_to_vec2_u32' : 'vec4_f16_inf_nan_to_vec2_u32'
+ );
+ await run(
+ t,
+ bitcastBuilder('vec2<u32>', t.params),
+ [TypeVec(4, TypeF16)],
+ TypeVec(2, TypeU32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('vec4h_to_vec2f')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`bitcast vec4h to vec2f tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('alias', [false, true]))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ // Infinities and NaNs are errors in const-eval.
+ t.params.inputSource === 'const'
+ ? 'vec4_f16_to_vec2_f32_finite'
+ : 'vec4_f16_inf_nan_to_vec2_f32'
+ );
+ await run(
+ t,
+ bitcastBuilder('vec2<f32>', t.params),
+ [TypeVec(4, TypeF16)],
+ TypeVec(2, TypeF32),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/builtin.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/builtin.ts
new file mode 100644
index 0000000000..282feea703
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/builtin.ts
@@ -0,0 +1,24 @@
+import {
+ abstractFloatShaderBuilder,
+ basicExpressionBuilder,
+ basicExpressionWithPredeclarationBuilder,
+ ShaderBuilder,
+} from '../../expression.js';
+
+/* @returns a ShaderBuilder that calls the builtin with the given name */
+export function builtin(name: string): ShaderBuilder {
+ return basicExpressionBuilder(values => `${name}(${values.join(', ')})`);
+}
+
+/* @returns a ShaderBuilder that calls the builtin with the given name that returns AbstractFloats */
+export function abstractBuiltin(name: string): ShaderBuilder {
+ return abstractFloatShaderBuilder(values => `${name}(${values.join(', ')})`);
+}
+
+/* @returns a ShaderBuilder that calls the builtin with the given name and has given predeclaration */
+export function builtinWithPredeclaration(name: string, predeclaration: string): ShaderBuilder {
+ return basicExpressionWithPredeclarationBuilder(
+ values => `${name}(${values.join(', ')})`,
+ predeclaration
+ );
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/ceil.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/ceil.spec.ts
new file mode 100644
index 0000000000..6cdf90986b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/ceil.spec.ts
@@ -0,0 +1,101 @@
+export const description = `
+Execution tests for the 'ceil' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn ceil(e: T ) -> T
+Returns the ceiling of e. Component-wise when T is a vector.
+
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF32Range, fullF16Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('ceil', {
+ f32: () => {
+ return FP.f32.generateScalarToIntervalCases(
+ [
+ // Small positive numbers
+ 0.1,
+ 0.9,
+ 1.0,
+ 1.1,
+ 1.9,
+ // Small negative numbers
+ -0.1,
+ -0.9,
+ -1.0,
+ -1.1,
+ -1.9,
+ 0x80000000, // https://github.com/gpuweb/cts/issues/2766
+ ...fullF32Range(),
+ ],
+ 'unfiltered',
+ FP.f32.ceilInterval
+ );
+ },
+ f16: () => {
+ return FP.f16.generateScalarToIntervalCases(
+ [
+ // Small positive numbers
+ 0.1,
+ 0.9,
+ 1.0,
+ 1.1,
+ 1.9,
+ // Small negative numbers
+ -0.1,
+ -0.9,
+ -1.0,
+ -1.1,
+ -1.9,
+ 0x8000, // https://github.com/gpuweb/cts/issues/2766
+ ...fullF16Range(),
+ ],
+ 'unfiltered',
+ FP.f16.ceilInterval
+ );
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, builtin('ceil'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, builtin('ceil'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/clamp.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/clamp.spec.ts
new file mode 100644
index 0000000000..0113fd656f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/clamp.spec.ts
@@ -0,0 +1,195 @@
+export const description = `
+Execution tests for the 'clamp' builtin function
+
+S is AbstractInt, i32, or u32
+T is S or vecN<S>
+@const fn clamp(e: T , low: T, high: T) -> T
+Returns min(max(e,low),high). Component-wise when T is a vector.
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const clamp(e: T , low: T , high: T) -> T
+Returns either min(max(e,low),high), or the median of the three values e, low, high.
+Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { kValue } from '../../../../../util/constants.js';
+import {
+ ScalarType,
+ TypeF32,
+ TypeF16,
+ TypeI32,
+ TypeU32,
+ TypeAbstractFloat,
+} from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { sparseF32Range, sparseF16Range, sparseF64Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, Case, onlyConstInputSource, run } from '../../expression.js';
+
+import { abstractBuiltin, builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+const u32Values = [0, 1, 2, 3, 0x70000000, 0x80000000, kValue.u32.max];
+
+const i32Values = [
+ kValue.i32.negative.min,
+ -3,
+ -2,
+ -1,
+ 0,
+ 1,
+ 2,
+ 3,
+ 0x70000000,
+ kValue.i32.positive.max,
+];
+
+export const d = makeCaseCache('clamp', {
+ u32_non_const: () => {
+ return generateIntegerTestCases(u32Values, TypeU32, 'non-const');
+ },
+ u32_const: () => {
+ return generateIntegerTestCases(u32Values, TypeU32, 'const');
+ },
+ i32_non_const: () => {
+ return generateIntegerTestCases(i32Values, TypeI32, 'non-const');
+ },
+ i32_const: () => {
+ return generateIntegerTestCases(i32Values, TypeI32, 'const');
+ },
+ f32_const: () => {
+ return generateFloatTestCases(sparseF32Range(), 'f32', 'const');
+ },
+ f32_non_const: () => {
+ return generateFloatTestCases(sparseF32Range(), 'f32', 'non-const');
+ },
+ f16_const: () => {
+ return generateFloatTestCases(sparseF16Range(), 'f16', 'const');
+ },
+ f16_non_const: () => {
+ return generateFloatTestCases(sparseF16Range(), 'f16', 'non-const');
+ },
+ abstract: () => {
+ return generateFloatTestCases(sparseF64Range(), 'abstract', 'const');
+ },
+});
+
+/** @returns a set of clamp test cases from an ascending list of integer values */
+function generateIntegerTestCases(
+ test_values: Array<number>,
+ type: ScalarType,
+ stage: 'const' | 'non-const'
+): Array<Case> {
+ return test_values.flatMap(low =>
+ test_values.flatMap(high =>
+ stage === 'const' && low > high
+ ? []
+ : test_values.map(e => ({
+ input: [type.create(e), type.create(low), type.create(high)],
+ expected: type.create(Math.min(Math.max(e, low), high)),
+ }))
+ )
+ );
+}
+
+function generateFloatTestCases(
+ test_values: readonly number[],
+ trait: 'f32' | 'f16' | 'abstract',
+ stage: 'const' | 'non-const'
+): Array<Case> {
+ return test_values.flatMap(low =>
+ test_values.flatMap(high =>
+ stage === 'const' && low > high
+ ? []
+ : test_values.flatMap(e => {
+ const c = FP[trait].makeScalarTripleToIntervalCase(
+ e,
+ low,
+ high,
+ stage === 'const' ? 'finite' : 'unfiltered',
+ ...FP[trait].clampIntervals
+ );
+ return c === undefined ? [] : [c];
+ })
+ )
+ );
+}
+
+g.test('abstract_int')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`abstract int tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('u32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`u32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'u32_const' : 'u32_non_const');
+ await run(t, builtin('clamp'), [TypeU32, TypeU32, TypeU32], TypeU32, t.params, cases);
+ });
+
+g.test('i32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`i32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'i32_const' : 'i32_non_const');
+ await run(t, builtin('clamp'), [TypeI32, TypeI32, TypeI32], TypeI32, t.params, cases);
+ });
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u
+ .combine('inputSource', onlyConstInputSource)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('abstract');
+ await run(
+ t,
+ abstractBuiltin('clamp'),
+ [TypeAbstractFloat, TypeAbstractFloat, TypeAbstractFloat],
+ TypeAbstractFloat,
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('clamp'), [TypeF32, TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('clamp'), [TypeF16, TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/cos.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/cos.spec.ts
new file mode 100644
index 0000000000..723bca2efd
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/cos.spec.ts
@@ -0,0 +1,84 @@
+export const description = `
+Execution tests for the 'cos' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn cos(e: T ) -> T
+Returns the cosine of e. Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF32Range, fullF16Range, linearRange } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('cos', {
+ f32: () => {
+ return FP.f32.generateScalarToIntervalCases(
+ [
+ // Well-defined accuracy range
+ ...linearRange(-Math.PI, Math.PI, 1000),
+ ...fullF32Range(),
+ ],
+ 'unfiltered',
+ FP.f32.cosInterval
+ );
+ },
+ f16: () => {
+ return FP.f16.generateScalarToIntervalCases(
+ [
+ // Well-defined accuracy range
+ ...linearRange(-Math.PI, Math.PI, 1000),
+ ...fullF16Range(),
+ ],
+ 'unfiltered',
+ FP.f16.cosInterval
+ );
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+f32 tests
+
+TODO(#792): Decide what the ground-truth is for these tests. [1]
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, builtin('cos'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, builtin('cos'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/cosh.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/cosh.spec.ts
new file mode 100644
index 0000000000..37fb961c98
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/cosh.spec.ts
@@ -0,0 +1,68 @@
+export const description = `
+Execution tests for the 'cosh' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn cosh(e: T ) -> T
+Returns the hyperbolic cosine of e. Component-wise when T is a vector
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF32Range, fullF16Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('cosh', {
+ f32_const: () => {
+ return FP.f32.generateScalarToIntervalCases(fullF32Range(), 'finite', FP.f32.coshInterval);
+ },
+ f32_non_const: () => {
+ return FP.f32.generateScalarToIntervalCases(fullF32Range(), 'unfiltered', FP.f32.coshInterval);
+ },
+ f16_const: () => {
+ return FP.f16.generateScalarToIntervalCases(fullF16Range(), 'finite', FP.f16.coshInterval);
+ },
+ f16_non_const: () => {
+ return FP.f16.generateScalarToIntervalCases(fullF16Range(), 'unfiltered', FP.f16.coshInterval);
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('cosh'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('cosh'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/countLeadingZeros.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/countLeadingZeros.spec.ts
new file mode 100644
index 0000000000..cfae4bb6e0
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/countLeadingZeros.spec.ts
@@ -0,0 +1,250 @@
+export const description = `
+Execution tests for the 'countLeadingZeros' builtin function
+
+S is i32 or u32
+T is S or vecN<S>
+@const fn countLeadingZeros(e: T ) -> T
+The number of consecutive 0 bits starting from the most significant bit of e,
+when T is a scalar type.
+Component-wise when T is a vector.
+Also known as "clz" in some languages.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeU32, u32Bits, u32, TypeI32, i32Bits, i32 } from '../../../../../util/conversion.js';
+import { allInputSources, Config, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('u32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`u32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cfg: Config = t.params;
+ await run(t, builtin('countLeadingZeros'), [TypeU32], TypeU32, cfg, [
+ // Zero
+ { input: u32Bits(0b00000000000000000000000000000000), expected: u32(32) },
+
+ // One
+ { input: u32Bits(0b00000000000000000000000000000001), expected: u32(31) },
+
+ // 0's after leading 1
+ { input: u32Bits(0b00000000000000000000000000000010), expected: u32(30) },
+ { input: u32Bits(0b00000000000000000000000000000100), expected: u32(29) },
+ { input: u32Bits(0b00000000000000000000000000001000), expected: u32(28) },
+ { input: u32Bits(0b00000000000000000000000000010000), expected: u32(27) },
+ { input: u32Bits(0b00000000000000000000000000100000), expected: u32(26) },
+ { input: u32Bits(0b00000000000000000000000001000000), expected: u32(25) },
+ { input: u32Bits(0b00000000000000000000000010000000), expected: u32(24) },
+ { input: u32Bits(0b00000000000000000000000100000000), expected: u32(23) },
+ { input: u32Bits(0b00000000000000000000001000000000), expected: u32(22) },
+ { input: u32Bits(0b00000000000000000000010000000000), expected: u32(21) },
+ { input: u32Bits(0b00000000000000000000100000000000), expected: u32(20) },
+ { input: u32Bits(0b00000000000000000001000000000000), expected: u32(19) },
+ { input: u32Bits(0b00000000000000000010000000000000), expected: u32(18) },
+ { input: u32Bits(0b00000000000000000100000000000000), expected: u32(17) },
+ { input: u32Bits(0b00000000000000001000000000000000), expected: u32(16) },
+ { input: u32Bits(0b00000000000000010000000000000000), expected: u32(15) },
+ { input: u32Bits(0b00000000000000100000000000000000), expected: u32(14) },
+ { input: u32Bits(0b00000000000001000000000000000000), expected: u32(13) },
+ { input: u32Bits(0b00000000000010000000000000000000), expected: u32(12) },
+ { input: u32Bits(0b00000000000100000000000000000000), expected: u32(11) },
+ { input: u32Bits(0b00000000001000000000000000000000), expected: u32(10) },
+ { input: u32Bits(0b00000000010000000000000000000000), expected: u32(9) },
+ { input: u32Bits(0b00000000100000000000000000000000), expected: u32(8) },
+ { input: u32Bits(0b00000001000000000000000000000000), expected: u32(7) },
+ { input: u32Bits(0b00000010000000000000000000000000), expected: u32(6) },
+ { input: u32Bits(0b00000100000000000000000000000000), expected: u32(5) },
+ { input: u32Bits(0b00001000000000000000000000000000), expected: u32(4) },
+ { input: u32Bits(0b00010000000000000000000000000000), expected: u32(3) },
+ { input: u32Bits(0b00100000000000000000000000000000), expected: u32(2) },
+ { input: u32Bits(0b01000000000000000000000000000000), expected: u32(1) },
+ { input: u32Bits(0b10000000000000000000000000000000), expected: u32(0) },
+
+ // 1's after leading 1
+ { input: u32Bits(0b00000000000000000000000000000011), expected: u32(30) },
+ { input: u32Bits(0b00000000000000000000000000000111), expected: u32(29) },
+ { input: u32Bits(0b00000000000000000000000000001111), expected: u32(28) },
+ { input: u32Bits(0b00000000000000000000000000011111), expected: u32(27) },
+ { input: u32Bits(0b00000000000000000000000000111111), expected: u32(26) },
+ { input: u32Bits(0b00000000000000000000000001111111), expected: u32(25) },
+ { input: u32Bits(0b00000000000000000000000011111111), expected: u32(24) },
+ { input: u32Bits(0b00000000000000000000000111111111), expected: u32(23) },
+ { input: u32Bits(0b00000000000000000000001111111111), expected: u32(22) },
+ { input: u32Bits(0b00000000000000000000011111111111), expected: u32(21) },
+ { input: u32Bits(0b00000000000000000000111111111111), expected: u32(20) },
+ { input: u32Bits(0b00000000000000000001111111111111), expected: u32(19) },
+ { input: u32Bits(0b00000000000000000011111111111111), expected: u32(18) },
+ { input: u32Bits(0b00000000000000000111111111111111), expected: u32(17) },
+ { input: u32Bits(0b00000000000000001111111111111111), expected: u32(16) },
+ { input: u32Bits(0b00000000000000011111111111111111), expected: u32(15) },
+ { input: u32Bits(0b00000000000000111111111111111111), expected: u32(14) },
+ { input: u32Bits(0b00000000000001111111111111111111), expected: u32(13) },
+ { input: u32Bits(0b00000000000011111111111111111111), expected: u32(12) },
+ { input: u32Bits(0b00000000000111111111111111111111), expected: u32(11) },
+ { input: u32Bits(0b00000000001111111111111111111111), expected: u32(10) },
+ { input: u32Bits(0b00000000011111111111111111111111), expected: u32(9) },
+ { input: u32Bits(0b00000000111111111111111111111111), expected: u32(8) },
+ { input: u32Bits(0b00000001111111111111111111111111), expected: u32(7) },
+ { input: u32Bits(0b00000011111111111111111111111111), expected: u32(6) },
+ { input: u32Bits(0b00000111111111111111111111111111), expected: u32(5) },
+ { input: u32Bits(0b00001111111111111111111111111111), expected: u32(4) },
+ { input: u32Bits(0b00011111111111111111111111111111), expected: u32(3) },
+ { input: u32Bits(0b00111111111111111111111111111111), expected: u32(2) },
+ { input: u32Bits(0b01111111111111111111111111111111), expected: u32(1) },
+ { input: u32Bits(0b11111111111111111111111111111111), expected: u32(0) },
+
+ // random after leading 1
+ { input: u32Bits(0b00000000000000000000000000000110), expected: u32(29) },
+ { input: u32Bits(0b00000000000000000000000000001101), expected: u32(28) },
+ { input: u32Bits(0b00000000000000000000000000011101), expected: u32(27) },
+ { input: u32Bits(0b00000000000000000000000000111001), expected: u32(26) },
+ { input: u32Bits(0b00000000000000000000000001101111), expected: u32(25) },
+ { input: u32Bits(0b00000000000000000000000011111111), expected: u32(24) },
+ { input: u32Bits(0b00000000000000000000000111101111), expected: u32(23) },
+ { input: u32Bits(0b00000000000000000000001111111111), expected: u32(22) },
+ { input: u32Bits(0b00000000000000000000011111110001), expected: u32(21) },
+ { input: u32Bits(0b00000000000000000000111011011101), expected: u32(20) },
+ { input: u32Bits(0b00000000000000000001101101111111), expected: u32(19) },
+ { input: u32Bits(0b00000000000000000011111111011111), expected: u32(18) },
+ { input: u32Bits(0b00000000000000000101111001110101), expected: u32(17) },
+ { input: u32Bits(0b00000000000000001101111011110111), expected: u32(16) },
+ { input: u32Bits(0b00000000000000011111111111110011), expected: u32(15) },
+ { input: u32Bits(0b00000000000000111111111110111111), expected: u32(14) },
+ { input: u32Bits(0b00000000000001111111011111111111), expected: u32(13) },
+ { input: u32Bits(0b00000000000011111111111111111111), expected: u32(12) },
+ { input: u32Bits(0b00000000000111110101011110111111), expected: u32(11) },
+ { input: u32Bits(0b00000000001111101111111111110111), expected: u32(10) },
+ { input: u32Bits(0b00000000011111111111010000101111), expected: u32(9) },
+ { input: u32Bits(0b00000000111111111111001111111011), expected: u32(8) },
+ { input: u32Bits(0b00000001111111011111101111111111), expected: u32(7) },
+ { input: u32Bits(0b00000011101011111011110111111011), expected: u32(6) },
+ { input: u32Bits(0b00000111111110111111111111111111), expected: u32(5) },
+ { input: u32Bits(0b00001111000000011011011110111111), expected: u32(4) },
+ { input: u32Bits(0b00011110101111011111111111111111), expected: u32(3) },
+ { input: u32Bits(0b00110110111111100111111110111101), expected: u32(2) },
+ { input: u32Bits(0b01010111111101111111011111011111), expected: u32(1) },
+ { input: u32Bits(0b11100010011110101101101110101111), expected: u32(0) },
+ ]);
+ });
+
+g.test('i32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`i32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cfg: Config = t.params;
+ await run(t, builtin('countLeadingZeros'), [TypeI32], TypeI32, cfg, [
+ // Zero
+ { input: i32Bits(0b00000000000000000000000000000000), expected: i32(32) },
+
+ // One
+ { input: i32Bits(0b00000000000000000000000000000001), expected: i32(31) },
+
+ // 0's after leading 1
+ { input: i32Bits(0b00000000000000000000000000000010), expected: i32(30) },
+ { input: i32Bits(0b00000000000000000000000000000100), expected: i32(29) },
+ { input: i32Bits(0b00000000000000000000000000001000), expected: i32(28) },
+ { input: i32Bits(0b00000000000000000000000000010000), expected: i32(27) },
+ { input: i32Bits(0b00000000000000000000000000100000), expected: i32(26) },
+ { input: i32Bits(0b00000000000000000000000001000000), expected: i32(25) },
+ { input: i32Bits(0b00000000000000000000000010000000), expected: i32(24) },
+ { input: i32Bits(0b00000000000000000000000100000000), expected: i32(23) },
+ { input: i32Bits(0b00000000000000000000001000000000), expected: i32(22) },
+ { input: i32Bits(0b00000000000000000000010000000000), expected: i32(21) },
+ { input: i32Bits(0b00000000000000000000100000000000), expected: i32(20) },
+ { input: i32Bits(0b00000000000000000001000000000000), expected: i32(19) },
+ { input: i32Bits(0b00000000000000000010000000000000), expected: i32(18) },
+ { input: i32Bits(0b00000000000000000100000000000000), expected: i32(17) },
+ { input: i32Bits(0b00000000000000001000000000000000), expected: i32(16) },
+ { input: i32Bits(0b00000000000000010000000000000000), expected: i32(15) },
+ { input: i32Bits(0b00000000000000100000000000000000), expected: i32(14) },
+ { input: i32Bits(0b00000000000001000000000000000000), expected: i32(13) },
+ { input: i32Bits(0b00000000000010000000000000000000), expected: i32(12) },
+ { input: i32Bits(0b00000000000100000000000000000000), expected: i32(11) },
+ { input: i32Bits(0b00000000001000000000000000000000), expected: i32(10) },
+ { input: i32Bits(0b00000000010000000000000000000000), expected: i32(9) },
+ { input: i32Bits(0b00000000100000000000000000000000), expected: i32(8) },
+ { input: i32Bits(0b00000001000000000000000000000000), expected: i32(7) },
+ { input: i32Bits(0b00000010000000000000000000000000), expected: i32(6) },
+ { input: i32Bits(0b00000100000000000000000000000000), expected: i32(5) },
+ { input: i32Bits(0b00001000000000000000000000000000), expected: i32(4) },
+ { input: i32Bits(0b00010000000000000000000000000000), expected: i32(3) },
+ { input: i32Bits(0b00100000000000000000000000000000), expected: i32(2) },
+ { input: i32Bits(0b01000000000000000000000000000000), expected: i32(1) },
+ { input: i32Bits(0b10000000000000000000000000000000), expected: i32(0) },
+
+ // 1's after leading 1
+ { input: i32Bits(0b00000000000000000000000000000011), expected: i32(30) },
+ { input: i32Bits(0b00000000000000000000000000000111), expected: i32(29) },
+ { input: i32Bits(0b00000000000000000000000000001111), expected: i32(28) },
+ { input: i32Bits(0b00000000000000000000000000011111), expected: i32(27) },
+ { input: i32Bits(0b00000000000000000000000000111111), expected: i32(26) },
+ { input: i32Bits(0b00000000000000000000000001111111), expected: i32(25) },
+ { input: i32Bits(0b00000000000000000000000011111111), expected: i32(24) },
+ { input: i32Bits(0b00000000000000000000000111111111), expected: i32(23) },
+ { input: i32Bits(0b00000000000000000000001111111111), expected: i32(22) },
+ { input: i32Bits(0b00000000000000000000011111111111), expected: i32(21) },
+ { input: i32Bits(0b00000000000000000000111111111111), expected: i32(20) },
+ { input: i32Bits(0b00000000000000000001111111111111), expected: i32(19) },
+ { input: i32Bits(0b00000000000000000011111111111111), expected: i32(18) },
+ { input: i32Bits(0b00000000000000000111111111111111), expected: i32(17) },
+ { input: i32Bits(0b00000000000000001111111111111111), expected: i32(16) },
+ { input: i32Bits(0b00000000000000011111111111111111), expected: i32(15) },
+ { input: i32Bits(0b00000000000000111111111111111111), expected: i32(14) },
+ { input: i32Bits(0b00000000000001111111111111111111), expected: i32(13) },
+ { input: i32Bits(0b00000000000011111111111111111111), expected: i32(12) },
+ { input: i32Bits(0b00000000000111111111111111111111), expected: i32(11) },
+ { input: i32Bits(0b00000000001111111111111111111111), expected: i32(10) },
+ { input: i32Bits(0b00000000011111111111111111111111), expected: i32(9) },
+ { input: i32Bits(0b00000000111111111111111111111111), expected: i32(8) },
+ { input: i32Bits(0b00000001111111111111111111111111), expected: i32(7) },
+ { input: i32Bits(0b00000011111111111111111111111111), expected: i32(6) },
+ { input: i32Bits(0b00000111111111111111111111111111), expected: i32(5) },
+ { input: i32Bits(0b00001111111111111111111111111111), expected: i32(4) },
+ { input: i32Bits(0b00011111111111111111111111111111), expected: i32(3) },
+ { input: i32Bits(0b00111111111111111111111111111111), expected: i32(2) },
+ { input: i32Bits(0b01111111111111111111111111111111), expected: i32(1) },
+ { input: i32Bits(0b11111111111111111111111111111111), expected: i32(0) },
+
+ // random after leading 1
+ { input: i32Bits(0b00000000000000000000000000000110), expected: i32(29) },
+ { input: i32Bits(0b00000000000000000000000000001101), expected: i32(28) },
+ { input: i32Bits(0b00000000000000000000000000011101), expected: i32(27) },
+ { input: i32Bits(0b00000000000000000000000000111001), expected: i32(26) },
+ { input: i32Bits(0b00000000000000000000000001101111), expected: i32(25) },
+ { input: i32Bits(0b00000000000000000000000011111111), expected: i32(24) },
+ { input: i32Bits(0b00000000000000000000000111101111), expected: i32(23) },
+ { input: i32Bits(0b00000000000000000000001111111111), expected: i32(22) },
+ { input: i32Bits(0b00000000000000000000011111110001), expected: i32(21) },
+ { input: i32Bits(0b00000000000000000000111011011101), expected: i32(20) },
+ { input: i32Bits(0b00000000000000000001101101111111), expected: i32(19) },
+ { input: i32Bits(0b00000000000000000011111111011111), expected: i32(18) },
+ { input: i32Bits(0b00000000000000000101111001110101), expected: i32(17) },
+ { input: i32Bits(0b00000000000000001101111011110111), expected: i32(16) },
+ { input: i32Bits(0b00000000000000011111111111110011), expected: i32(15) },
+ { input: i32Bits(0b00000000000000111111111110111111), expected: i32(14) },
+ { input: i32Bits(0b00000000000001111111011111111111), expected: i32(13) },
+ { input: i32Bits(0b00000000000011111111111111111111), expected: i32(12) },
+ { input: i32Bits(0b00000000000111110101011110111111), expected: i32(11) },
+ { input: i32Bits(0b00000000001111101111111111110111), expected: i32(10) },
+ { input: i32Bits(0b00000000011111111111010000101111), expected: i32(9) },
+ { input: i32Bits(0b00000000111111111111001111111011), expected: i32(8) },
+ { input: i32Bits(0b00000001111111011111101111111111), expected: i32(7) },
+ { input: i32Bits(0b00000011101011111011110111111011), expected: i32(6) },
+ { input: i32Bits(0b00000111111110111111111111111111), expected: i32(5) },
+ { input: i32Bits(0b00001111000000011011011110111111), expected: i32(4) },
+ { input: i32Bits(0b00011110101111011111111111111111), expected: i32(3) },
+ { input: i32Bits(0b00110110111111100111111110111101), expected: i32(2) },
+ { input: i32Bits(0b01010111111101111111011111011111), expected: i32(1) },
+ { input: i32Bits(0b11100010011110101101101110101111), expected: i32(0) },
+ ]);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/countOneBits.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/countOneBits.spec.ts
new file mode 100644
index 0000000000..f0be916285
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/countOneBits.spec.ts
@@ -0,0 +1,249 @@
+export const description = `
+Execution tests for the 'countOneBits' builtin function
+
+S is i32 or u32
+T is S or vecN<S>
+@const fn countOneBits(e: T ) -> T
+The number of 1 bits in the representation of e.
+Also known as "population count".
+Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeU32, u32Bits, u32, TypeI32, i32Bits, i32 } from '../../../../../util/conversion.js';
+import { allInputSources, Config, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('u32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`u32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cfg: Config = t.params;
+ await run(t, builtin('countOneBits'), [TypeU32], TypeU32, cfg, [
+ // Zero
+ { input: u32Bits(0b00000000000000000000000000000000), expected: u32(0) },
+
+ // One
+ { input: u32Bits(0b00000000000000000000000000000001), expected: u32(1) },
+
+ // 0's after leading 1
+ { input: u32Bits(0b00000000000000000000000000000010), expected: u32(1) },
+ { input: u32Bits(0b00000000000000000000000000000100), expected: u32(1) },
+ { input: u32Bits(0b00000000000000000000000000001000), expected: u32(1) },
+ { input: u32Bits(0b00000000000000000000000000010000), expected: u32(1) },
+ { input: u32Bits(0b00000000000000000000000000100000), expected: u32(1) },
+ { input: u32Bits(0b00000000000000000000000001000000), expected: u32(1) },
+ { input: u32Bits(0b00000000000000000000000010000000), expected: u32(1) },
+ { input: u32Bits(0b00000000000000000000000100000000), expected: u32(1) },
+ { input: u32Bits(0b00000000000000000000001000000000), expected: u32(1) },
+ { input: u32Bits(0b00000000000000000000010000000000), expected: u32(1) },
+ { input: u32Bits(0b00000000000000000000100000000000), expected: u32(1) },
+ { input: u32Bits(0b00000000000000000001000000000000), expected: u32(1) },
+ { input: u32Bits(0b00000000000000000010000000000000), expected: u32(1) },
+ { input: u32Bits(0b00000000000000000100000000000000), expected: u32(1) },
+ { input: u32Bits(0b00000000000000001000000000000000), expected: u32(1) },
+ { input: u32Bits(0b00000000000000010000000000000000), expected: u32(1) },
+ { input: u32Bits(0b00000000000000100000000000000000), expected: u32(1) },
+ { input: u32Bits(0b00000000000001000000000000000000), expected: u32(1) },
+ { input: u32Bits(0b00000000000010000000000000000000), expected: u32(1) },
+ { input: u32Bits(0b00000000000100000000000000000000), expected: u32(1) },
+ { input: u32Bits(0b00000000001000000000000000000000), expected: u32(1) },
+ { input: u32Bits(0b00000000010000000000000000000000), expected: u32(1) },
+ { input: u32Bits(0b00000000100000000000000000000000), expected: u32(1) },
+ { input: u32Bits(0b00000001000000000000000000000000), expected: u32(1) },
+ { input: u32Bits(0b00000010000000000000000000000000), expected: u32(1) },
+ { input: u32Bits(0b00000100000000000000000000000000), expected: u32(1) },
+ { input: u32Bits(0b00001000000000000000000000000000), expected: u32(1) },
+ { input: u32Bits(0b00010000000000000000000000000000), expected: u32(1) },
+ { input: u32Bits(0b00100000000000000000000000000000), expected: u32(1) },
+ { input: u32Bits(0b01000000000000000000000000000000), expected: u32(1) },
+ { input: u32Bits(0b10000000000000000000000000000000), expected: u32(1) },
+
+ // 1's after leading 1
+ { input: u32Bits(0b00000000000000000000000000000011), expected: u32(2) },
+ { input: u32Bits(0b00000000000000000000000000000111), expected: u32(3) },
+ { input: u32Bits(0b00000000000000000000000000001111), expected: u32(4) },
+ { input: u32Bits(0b00000000000000000000000000011111), expected: u32(5) },
+ { input: u32Bits(0b00000000000000000000000000111111), expected: u32(6) },
+ { input: u32Bits(0b00000000000000000000000001111111), expected: u32(7) },
+ { input: u32Bits(0b00000000000000000000000011111111), expected: u32(8) },
+ { input: u32Bits(0b00000000000000000000000111111111), expected: u32(9) },
+ { input: u32Bits(0b00000000000000000000001111111111), expected: u32(10) },
+ { input: u32Bits(0b00000000000000000000011111111111), expected: u32(11) },
+ { input: u32Bits(0b00000000000000000000111111111111), expected: u32(12) },
+ { input: u32Bits(0b00000000000000000001111111111111), expected: u32(13) },
+ { input: u32Bits(0b00000000000000000011111111111111), expected: u32(14) },
+ { input: u32Bits(0b00000000000000000111111111111111), expected: u32(15) },
+ { input: u32Bits(0b00000000000000001111111111111111), expected: u32(16) },
+ { input: u32Bits(0b00000000000000011111111111111111), expected: u32(17) },
+ { input: u32Bits(0b00000000000000111111111111111111), expected: u32(18) },
+ { input: u32Bits(0b00000000000001111111111111111111), expected: u32(19) },
+ { input: u32Bits(0b00000000000011111111111111111111), expected: u32(20) },
+ { input: u32Bits(0b00000000000111111111111111111111), expected: u32(21) },
+ { input: u32Bits(0b00000000001111111111111111111111), expected: u32(22) },
+ { input: u32Bits(0b00000000011111111111111111111111), expected: u32(23) },
+ { input: u32Bits(0b00000000111111111111111111111111), expected: u32(24) },
+ { input: u32Bits(0b00000001111111111111111111111111), expected: u32(25) },
+ { input: u32Bits(0b00000011111111111111111111111111), expected: u32(26) },
+ { input: u32Bits(0b00000111111111111111111111111111), expected: u32(27) },
+ { input: u32Bits(0b00001111111111111111111111111111), expected: u32(28) },
+ { input: u32Bits(0b00011111111111111111111111111111), expected: u32(29) },
+ { input: u32Bits(0b00111111111111111111111111111111), expected: u32(30) },
+ { input: u32Bits(0b01111111111111111111111111111111), expected: u32(31) },
+ { input: u32Bits(0b11111111111111111111111111111111), expected: u32(32) },
+
+ // random after leading 1
+ { input: u32Bits(0b00000000000000000000000000000110), expected: u32(2) },
+ { input: u32Bits(0b00000000000000000000000000001101), expected: u32(3) },
+ { input: u32Bits(0b00000000000000000000000000011101), expected: u32(4) },
+ { input: u32Bits(0b00000000000000000000000000111001), expected: u32(4) },
+ { input: u32Bits(0b00000000000000000000000001101111), expected: u32(6) },
+ { input: u32Bits(0b00000000000000000000000011111111), expected: u32(8) },
+ { input: u32Bits(0b00000000000000000000000111101111), expected: u32(8) },
+ { input: u32Bits(0b00000000000000000000001111111111), expected: u32(10) },
+ { input: u32Bits(0b00000000000000000000011111110001), expected: u32(8) },
+ { input: u32Bits(0b00000000000000000000111011011101), expected: u32(9) },
+ { input: u32Bits(0b00000000000000000001101101111111), expected: u32(11) },
+ { input: u32Bits(0b00000000000000000011111111011111), expected: u32(13) },
+ { input: u32Bits(0b00000000000000000101111001110101), expected: u32(10) },
+ { input: u32Bits(0b00000000000000001101111011110111), expected: u32(13) },
+ { input: u32Bits(0b00000000000000011111111111110011), expected: u32(15) },
+ { input: u32Bits(0b00000000000000111111111110111111), expected: u32(17) },
+ { input: u32Bits(0b00000000000001111111011111111111), expected: u32(18) },
+ { input: u32Bits(0b00000000000011111111111111111111), expected: u32(20) },
+ { input: u32Bits(0b00000000000111110101011110111111), expected: u32(17) },
+ { input: u32Bits(0b00000000001111101111111111110111), expected: u32(20) },
+ { input: u32Bits(0b00000000011111111111010000101111), expected: u32(17) },
+ { input: u32Bits(0b00000000111111111111001111111011), expected: u32(21) },
+ { input: u32Bits(0b00000001111111011111101111111111), expected: u32(23) },
+ { input: u32Bits(0b00000011101011111011110111111011), expected: u32(21) },
+ { input: u32Bits(0b00000111111110111111111111111111), expected: u32(26) },
+ { input: u32Bits(0b00001111000000011011011110111111), expected: u32(18) },
+ { input: u32Bits(0b00011110101111011111111111111111), expected: u32(26) },
+ { input: u32Bits(0b00110110111111100111111110111101), expected: u32(24) },
+ { input: u32Bits(0b01010111111101111111011111011111), expected: u32(26) },
+ { input: u32Bits(0b11100010011110101101101110101111), expected: u32(21) },
+ ]);
+ });
+
+g.test('i32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`i32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cfg: Config = t.params;
+ await run(t, builtin('countOneBits'), [TypeI32], TypeI32, cfg, [
+ // Zero
+ { input: i32Bits(0b00000000000000000000000000000000), expected: i32(0) },
+
+ // One
+ { input: i32Bits(0b00000000000000000000000000000001), expected: i32(1) },
+
+ // 0's after leading 1
+ { input: i32Bits(0b00000000000000000000000000000010), expected: i32(1) },
+ { input: i32Bits(0b00000000000000000000000000000100), expected: i32(1) },
+ { input: i32Bits(0b00000000000000000000000000001000), expected: i32(1) },
+ { input: i32Bits(0b00000000000000000000000000010000), expected: i32(1) },
+ { input: i32Bits(0b00000000000000000000000000100000), expected: i32(1) },
+ { input: i32Bits(0b00000000000000000000000001000000), expected: i32(1) },
+ { input: i32Bits(0b00000000000000000000000010000000), expected: i32(1) },
+ { input: i32Bits(0b00000000000000000000000100000000), expected: i32(1) },
+ { input: i32Bits(0b00000000000000000000001000000000), expected: i32(1) },
+ { input: i32Bits(0b00000000000000000000010000000000), expected: i32(1) },
+ { input: i32Bits(0b00000000000000000000100000000000), expected: i32(1) },
+ { input: i32Bits(0b00000000000000000001000000000000), expected: i32(1) },
+ { input: i32Bits(0b00000000000000000010000000000000), expected: i32(1) },
+ { input: i32Bits(0b00000000000000000100000000000000), expected: i32(1) },
+ { input: i32Bits(0b00000000000000001000000000000000), expected: i32(1) },
+ { input: i32Bits(0b00000000000000010000000000000000), expected: i32(1) },
+ { input: i32Bits(0b00000000000000100000000000000000), expected: i32(1) },
+ { input: i32Bits(0b00000000000001000000000000000000), expected: i32(1) },
+ { input: i32Bits(0b00000000000010000000000000000000), expected: i32(1) },
+ { input: i32Bits(0b00000000000100000000000000000000), expected: i32(1) },
+ { input: i32Bits(0b00000000001000000000000000000000), expected: i32(1) },
+ { input: i32Bits(0b00000000010000000000000000000000), expected: i32(1) },
+ { input: i32Bits(0b00000000100000000000000000000000), expected: i32(1) },
+ { input: i32Bits(0b00000001000000000000000000000000), expected: i32(1) },
+ { input: i32Bits(0b00000010000000000000000000000000), expected: i32(1) },
+ { input: i32Bits(0b00000100000000000000000000000000), expected: i32(1) },
+ { input: i32Bits(0b00001000000000000000000000000000), expected: i32(1) },
+ { input: i32Bits(0b00010000000000000000000000000000), expected: i32(1) },
+ { input: i32Bits(0b00100000000000000000000000000000), expected: i32(1) },
+ { input: i32Bits(0b01000000000000000000000000000000), expected: i32(1) },
+ { input: i32Bits(0b10000000000000000000000000000000), expected: i32(1) },
+
+ // 1's after leading 1
+ { input: i32Bits(0b00000000000000000000000000000011), expected: i32(2) },
+ { input: i32Bits(0b00000000000000000000000000000111), expected: i32(3) },
+ { input: i32Bits(0b00000000000000000000000000001111), expected: i32(4) },
+ { input: i32Bits(0b00000000000000000000000000011111), expected: i32(5) },
+ { input: i32Bits(0b00000000000000000000000000111111), expected: i32(6) },
+ { input: i32Bits(0b00000000000000000000000001111111), expected: i32(7) },
+ { input: i32Bits(0b00000000000000000000000011111111), expected: i32(8) },
+ { input: i32Bits(0b00000000000000000000000111111111), expected: i32(9) },
+ { input: i32Bits(0b00000000000000000000001111111111), expected: i32(10) },
+ { input: i32Bits(0b00000000000000000000011111111111), expected: i32(11) },
+ { input: i32Bits(0b00000000000000000000111111111111), expected: i32(12) },
+ { input: i32Bits(0b00000000000000000001111111111111), expected: i32(13) },
+ { input: i32Bits(0b00000000000000000011111111111111), expected: i32(14) },
+ { input: i32Bits(0b00000000000000000111111111111111), expected: i32(15) },
+ { input: i32Bits(0b00000000000000001111111111111111), expected: i32(16) },
+ { input: i32Bits(0b00000000000000011111111111111111), expected: i32(17) },
+ { input: i32Bits(0b00000000000000111111111111111111), expected: i32(18) },
+ { input: i32Bits(0b00000000000001111111111111111111), expected: i32(19) },
+ { input: i32Bits(0b00000000000011111111111111111111), expected: i32(20) },
+ { input: i32Bits(0b00000000000111111111111111111111), expected: i32(21) },
+ { input: i32Bits(0b00000000001111111111111111111111), expected: i32(22) },
+ { input: i32Bits(0b00000000011111111111111111111111), expected: i32(23) },
+ { input: i32Bits(0b00000000111111111111111111111111), expected: i32(24) },
+ { input: i32Bits(0b00000001111111111111111111111111), expected: i32(25) },
+ { input: i32Bits(0b00000011111111111111111111111111), expected: i32(26) },
+ { input: i32Bits(0b00000111111111111111111111111111), expected: i32(27) },
+ { input: i32Bits(0b00001111111111111111111111111111), expected: i32(28) },
+ { input: i32Bits(0b00011111111111111111111111111111), expected: i32(29) },
+ { input: i32Bits(0b00111111111111111111111111111111), expected: i32(30) },
+ { input: i32Bits(0b01111111111111111111111111111111), expected: i32(31) },
+ { input: i32Bits(0b11111111111111111111111111111111), expected: i32(32) },
+
+ // random after leading 1
+ { input: i32Bits(0b00000000000000000000000000000110), expected: i32(2) },
+ { input: i32Bits(0b00000000000000000000000000001101), expected: i32(3) },
+ { input: i32Bits(0b00000000000000000000000000011101), expected: i32(4) },
+ { input: i32Bits(0b00000000000000000000000000111001), expected: i32(4) },
+ { input: i32Bits(0b00000000000000000000000001101111), expected: i32(6) },
+ { input: i32Bits(0b00000000000000000000000011111111), expected: i32(8) },
+ { input: i32Bits(0b00000000000000000000000111101111), expected: i32(8) },
+ { input: i32Bits(0b00000000000000000000001111111111), expected: i32(10) },
+ { input: i32Bits(0b00000000000000000000011111110001), expected: i32(8) },
+ { input: i32Bits(0b00000000000000000000111011011101), expected: i32(9) },
+ { input: i32Bits(0b00000000000000000001101101111111), expected: i32(11) },
+ { input: i32Bits(0b00000000000000000011111111011111), expected: i32(13) },
+ { input: i32Bits(0b00000000000000000101111001110101), expected: i32(10) },
+ { input: i32Bits(0b00000000000000001101111011110111), expected: i32(13) },
+ { input: i32Bits(0b00000000000000011111111111110011), expected: i32(15) },
+ { input: i32Bits(0b00000000000000111111111110111111), expected: i32(17) },
+ { input: i32Bits(0b00000000000001111111011111111111), expected: i32(18) },
+ { input: i32Bits(0b00000000000011111111111111111111), expected: i32(20) },
+ { input: i32Bits(0b00000000000111110101011110111111), expected: i32(17) },
+ { input: i32Bits(0b00000000001111101111111111110111), expected: i32(20) },
+ { input: i32Bits(0b00000000011111111111010000101111), expected: i32(17) },
+ { input: i32Bits(0b00000000111111111111001111111011), expected: i32(21) },
+ { input: i32Bits(0b00000001111111011111101111111111), expected: i32(23) },
+ { input: i32Bits(0b00000011101011111011110111111011), expected: i32(21) },
+ { input: i32Bits(0b00000111111110111111111111111111), expected: i32(26) },
+ { input: i32Bits(0b00001111000000011011011110111111), expected: i32(18) },
+ { input: i32Bits(0b00011110101111011111111111111111), expected: i32(26) },
+ { input: i32Bits(0b00110110111111100111111110111101), expected: i32(24) },
+ { input: i32Bits(0b01010111111101111111011111011111), expected: i32(26) },
+ { input: i32Bits(0b11100010011110101101101110101111), expected: i32(21) },
+ ]);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/countTrailingZeros.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/countTrailingZeros.spec.ts
new file mode 100644
index 0000000000..d0b3198f49
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/countTrailingZeros.spec.ts
@@ -0,0 +1,250 @@
+export const description = `
+Execution tests for the 'countTrailingZeros' builtin function
+
+S is i32 or u32
+T is S or vecN<S>
+@const fn countTrailingZeros(e: T ) -> T
+The number of consecutive 0 bits starting from the least significant bit of e,
+when T is a scalar type.
+Component-wise when T is a vector.
+Also known as "ctz" in some languages.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { i32, i32Bits, TypeI32, u32, TypeU32, u32Bits } from '../../../../../util/conversion.js';
+import { allInputSources, Config, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('u32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`u32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cfg: Config = t.params;
+ await run(t, builtin('countTrailingZeros'), [TypeU32], TypeU32, cfg, [
+ // Zero
+ { input: u32Bits(0b00000000000000000000000000000000), expected: u32(32) },
+
+ // High bit
+ { input: u32Bits(0b10000000000000000000000000000000), expected: u32(31) },
+
+ // 0's before trailing 1
+ { input: u32Bits(0b00000000000000000000000000000001), expected: u32(0) },
+ { input: u32Bits(0b00000000000000000000000000000010), expected: u32(1) },
+ { input: u32Bits(0b00000000000000000000000000000100), expected: u32(2) },
+ { input: u32Bits(0b00000000000000000000000000001000), expected: u32(3) },
+ { input: u32Bits(0b00000000000000000000000000010000), expected: u32(4) },
+ { input: u32Bits(0b00000000000000000000000000100000), expected: u32(5) },
+ { input: u32Bits(0b00000000000000000000000001000000), expected: u32(6) },
+ { input: u32Bits(0b00000000000000000000000010000000), expected: u32(7) },
+ { input: u32Bits(0b00000000000000000000000100000000), expected: u32(8) },
+ { input: u32Bits(0b00000000000000000000001000000000), expected: u32(9) },
+ { input: u32Bits(0b00000000000000000000010000000000), expected: u32(10) },
+ { input: u32Bits(0b00000000000000000000100000000000), expected: u32(11) },
+ { input: u32Bits(0b00000000000000000001000000000000), expected: u32(12) },
+ { input: u32Bits(0b00000000000000000010000000000000), expected: u32(13) },
+ { input: u32Bits(0b00000000000000000100000000000000), expected: u32(14) },
+ { input: u32Bits(0b00000000000000001000000000000000), expected: u32(15) },
+ { input: u32Bits(0b00000000000000010000000000000000), expected: u32(16) },
+ { input: u32Bits(0b00000000000000100000000000000000), expected: u32(17) },
+ { input: u32Bits(0b00000000000001000000000000000000), expected: u32(18) },
+ { input: u32Bits(0b00000000000010000000000000000000), expected: u32(19) },
+ { input: u32Bits(0b00000000000100000000000000000000), expected: u32(20) },
+ { input: u32Bits(0b00000000001000000000000000000000), expected: u32(21) },
+ { input: u32Bits(0b00000000010000000000000000000000), expected: u32(22) },
+ { input: u32Bits(0b00000000100000000000000000000000), expected: u32(23) },
+ { input: u32Bits(0b00000001000000000000000000000000), expected: u32(24) },
+ { input: u32Bits(0b00000010000000000000000000000000), expected: u32(25) },
+ { input: u32Bits(0b00000100000000000000000000000000), expected: u32(26) },
+ { input: u32Bits(0b00001000000000000000000000000000), expected: u32(27) },
+ { input: u32Bits(0b00010000000000000000000000000000), expected: u32(28) },
+ { input: u32Bits(0b00100000000000000000000000000000), expected: u32(29) },
+ { input: u32Bits(0b01000000000000000000000000000000), expected: u32(30) },
+
+ // 1's before trailing 1
+ { input: u32Bits(0b11111111111111111111111111111111), expected: u32(0) },
+ { input: u32Bits(0b11111111111111111111111111111110), expected: u32(1) },
+ { input: u32Bits(0b11111111111111111111111111111100), expected: u32(2) },
+ { input: u32Bits(0b11111111111111111111111111111000), expected: u32(3) },
+ { input: u32Bits(0b11111111111111111111111111110000), expected: u32(4) },
+ { input: u32Bits(0b11111111111111111111111111100000), expected: u32(5) },
+ { input: u32Bits(0b11111111111111111111111111000000), expected: u32(6) },
+ { input: u32Bits(0b11111111111111111111111110000000), expected: u32(7) },
+ { input: u32Bits(0b11111111111111111111111100000000), expected: u32(8) },
+ { input: u32Bits(0b11111111111111111111111000000000), expected: u32(9) },
+ { input: u32Bits(0b11111111111111111111110000000000), expected: u32(10) },
+ { input: u32Bits(0b11111111111111111111100000000000), expected: u32(11) },
+ { input: u32Bits(0b11111111111111111111000000000000), expected: u32(12) },
+ { input: u32Bits(0b11111111111111111110000000000000), expected: u32(13) },
+ { input: u32Bits(0b11111111111111111100000000000000), expected: u32(14) },
+ { input: u32Bits(0b11111111111111111000000000000000), expected: u32(15) },
+ { input: u32Bits(0b11111111111111110000000000000000), expected: u32(16) },
+ { input: u32Bits(0b11111111111111100000000000000000), expected: u32(17) },
+ { input: u32Bits(0b11111111111111000000000000000000), expected: u32(18) },
+ { input: u32Bits(0b11111111111110000000000000000000), expected: u32(19) },
+ { input: u32Bits(0b11111111111100000000000000000000), expected: u32(20) },
+ { input: u32Bits(0b11111111111000000000000000000000), expected: u32(21) },
+ { input: u32Bits(0b11111111110000000000000000000000), expected: u32(22) },
+ { input: u32Bits(0b11111111100000000000000000000000), expected: u32(23) },
+ { input: u32Bits(0b11111111000000000000000000000000), expected: u32(24) },
+ { input: u32Bits(0b11111110000000000000000000000000), expected: u32(25) },
+ { input: u32Bits(0b11111100000000000000000000000000), expected: u32(26) },
+ { input: u32Bits(0b11111000000000000000000000000000), expected: u32(27) },
+ { input: u32Bits(0b11110000000000000000000000000000), expected: u32(28) },
+ { input: u32Bits(0b11100000000000000000000000000000), expected: u32(29) },
+ { input: u32Bits(0b11000000000000000000000000000000), expected: u32(30) },
+
+ // random before trailing 1
+ { input: u32Bits(0b11110000001111111101111010001111), expected: u32(0) },
+ { input: u32Bits(0b11011110111111100101110011110010), expected: u32(1) },
+ { input: u32Bits(0b11110111011011111111010000111100), expected: u32(2) },
+ { input: u32Bits(0b11010011011101111111010011101000), expected: u32(3) },
+ { input: u32Bits(0b11010111110111110001111110110000), expected: u32(4) },
+ { input: u32Bits(0b11111101111101111110101111100000), expected: u32(5) },
+ { input: u32Bits(0b11111001111011111001111011000000), expected: u32(6) },
+ { input: u32Bits(0b11001110110111110111111010000000), expected: u32(7) },
+ { input: u32Bits(0b11101111011111101110101100000000), expected: u32(8) },
+ { input: u32Bits(0b11111101111011111111111000000000), expected: u32(9) },
+ { input: u32Bits(0b10011111011101110110110000000000), expected: u32(10) },
+ { input: u32Bits(0b11111111101101111011100000000000), expected: u32(11) },
+ { input: u32Bits(0b11111011010110111011000000000000), expected: u32(12) },
+ { input: u32Bits(0b00111101010000111010000000000000), expected: u32(13) },
+ { input: u32Bits(0b11111011110001101100000000000000), expected: u32(14) },
+ { input: u32Bits(0b10111111010111111000000000000000), expected: u32(15) },
+ { input: u32Bits(0b11011101111010110000000000000000), expected: u32(16) },
+ { input: u32Bits(0b01110100110110100000000000000000), expected: u32(17) },
+ { input: u32Bits(0b11100111001011000000000000000000), expected: u32(18) },
+ { input: u32Bits(0b11111001110110000000000000000000), expected: u32(19) },
+ { input: u32Bits(0b00110100100100000000000000000000), expected: u32(20) },
+ { input: u32Bits(0b11111010011000000000000000000000), expected: u32(21) },
+ { input: u32Bits(0b00000010110000000000000000000000), expected: u32(22) },
+ { input: u32Bits(0b11100111100000000000000000000000), expected: u32(23) },
+ { input: u32Bits(0b00101101000000000000000000000000), expected: u32(24) },
+ { input: u32Bits(0b11011010000000000000000000000000), expected: u32(25) },
+ { input: u32Bits(0b11010100000000000000000000000000), expected: u32(26) },
+ { input: u32Bits(0b10111000000000000000000000000000), expected: u32(27) },
+ { input: u32Bits(0b01110000000000000000000000000000), expected: u32(28) },
+ { input: u32Bits(0b10100000000000000000000000000000), expected: u32(29) },
+ ]);
+ });
+
+g.test('i32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`i32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cfg: Config = t.params;
+ await run(t, builtin('countTrailingZeros'), [TypeI32], TypeI32, cfg, [
+ // Zero
+ { input: i32Bits(0b00000000000000000000000000000000), expected: i32(32) },
+
+ // High bit
+ { input: i32Bits(0b10000000000000000000000000000000), expected: i32(31) },
+
+ // 0's before trailing 1
+ { input: i32Bits(0b00000000000000000000000000000001), expected: i32(0) },
+ { input: i32Bits(0b00000000000000000000000000000010), expected: i32(1) },
+ { input: i32Bits(0b00000000000000000000000000000100), expected: i32(2) },
+ { input: i32Bits(0b00000000000000000000000000001000), expected: i32(3) },
+ { input: i32Bits(0b00000000000000000000000000010000), expected: i32(4) },
+ { input: i32Bits(0b00000000000000000000000000100000), expected: i32(5) },
+ { input: i32Bits(0b00000000000000000000000001000000), expected: i32(6) },
+ { input: i32Bits(0b00000000000000000000000010000000), expected: i32(7) },
+ { input: i32Bits(0b00000000000000000000000100000000), expected: i32(8) },
+ { input: i32Bits(0b00000000000000000000001000000000), expected: i32(9) },
+ { input: i32Bits(0b00000000000000000000010000000000), expected: i32(10) },
+ { input: i32Bits(0b00000000000000000000100000000000), expected: i32(11) },
+ { input: i32Bits(0b00000000000000000001000000000000), expected: i32(12) },
+ { input: i32Bits(0b00000000000000000010000000000000), expected: i32(13) },
+ { input: i32Bits(0b00000000000000000100000000000000), expected: i32(14) },
+ { input: i32Bits(0b00000000000000001000000000000000), expected: i32(15) },
+ { input: i32Bits(0b00000000000000010000000000000000), expected: i32(16) },
+ { input: i32Bits(0b00000000000000100000000000000000), expected: i32(17) },
+ { input: i32Bits(0b00000000000001000000000000000000), expected: i32(18) },
+ { input: i32Bits(0b00000000000010000000000000000000), expected: i32(19) },
+ { input: i32Bits(0b00000000000100000000000000000000), expected: i32(20) },
+ { input: i32Bits(0b00000000001000000000000000000000), expected: i32(21) },
+ { input: i32Bits(0b00000000010000000000000000000000), expected: i32(22) },
+ { input: i32Bits(0b00000000100000000000000000000000), expected: i32(23) },
+ { input: i32Bits(0b00000001000000000000000000000000), expected: i32(24) },
+ { input: i32Bits(0b00000010000000000000000000000000), expected: i32(25) },
+ { input: i32Bits(0b00000100000000000000000000000000), expected: i32(26) },
+ { input: i32Bits(0b00001000000000000000000000000000), expected: i32(27) },
+ { input: i32Bits(0b00010000000000000000000000000000), expected: i32(28) },
+ { input: i32Bits(0b00100000000000000000000000000000), expected: i32(29) },
+ { input: i32Bits(0b01000000000000000000000000000000), expected: i32(30) },
+
+ // 1's before trailing 1
+ { input: i32Bits(0b11111111111111111111111111111111), expected: i32(0) },
+ { input: i32Bits(0b11111111111111111111111111111110), expected: i32(1) },
+ { input: i32Bits(0b11111111111111111111111111111100), expected: i32(2) },
+ { input: i32Bits(0b11111111111111111111111111111000), expected: i32(3) },
+ { input: i32Bits(0b11111111111111111111111111110000), expected: i32(4) },
+ { input: i32Bits(0b11111111111111111111111111100000), expected: i32(5) },
+ { input: i32Bits(0b11111111111111111111111111000000), expected: i32(6) },
+ { input: i32Bits(0b11111111111111111111111110000000), expected: i32(7) },
+ { input: i32Bits(0b11111111111111111111111100000000), expected: i32(8) },
+ { input: i32Bits(0b11111111111111111111111000000000), expected: i32(9) },
+ { input: i32Bits(0b11111111111111111111110000000000), expected: i32(10) },
+ { input: i32Bits(0b11111111111111111111100000000000), expected: i32(11) },
+ { input: i32Bits(0b11111111111111111111000000000000), expected: i32(12) },
+ { input: i32Bits(0b11111111111111111110000000000000), expected: i32(13) },
+ { input: i32Bits(0b11111111111111111100000000000000), expected: i32(14) },
+ { input: i32Bits(0b11111111111111111000000000000000), expected: i32(15) },
+ { input: i32Bits(0b11111111111111110000000000000000), expected: i32(16) },
+ { input: i32Bits(0b11111111111111100000000000000000), expected: i32(17) },
+ { input: i32Bits(0b11111111111111000000000000000000), expected: i32(18) },
+ { input: i32Bits(0b11111111111110000000000000000000), expected: i32(19) },
+ { input: i32Bits(0b11111111111100000000000000000000), expected: i32(20) },
+ { input: i32Bits(0b11111111111000000000000000000000), expected: i32(21) },
+ { input: i32Bits(0b11111111110000000000000000000000), expected: i32(22) },
+ { input: i32Bits(0b11111111100000000000000000000000), expected: i32(23) },
+ { input: i32Bits(0b11111111000000000000000000000000), expected: i32(24) },
+ { input: i32Bits(0b11111110000000000000000000000000), expected: i32(25) },
+ { input: i32Bits(0b11111100000000000000000000000000), expected: i32(26) },
+ { input: i32Bits(0b11111000000000000000000000000000), expected: i32(27) },
+ { input: i32Bits(0b11110000000000000000000000000000), expected: i32(28) },
+ { input: i32Bits(0b11100000000000000000000000000000), expected: i32(29) },
+ { input: i32Bits(0b11000000000000000000000000000000), expected: i32(30) },
+
+ // random before trailing 1
+ { input: i32Bits(0b11110000001111111101111010001111), expected: i32(0) },
+ { input: i32Bits(0b11011110111111100101110011110010), expected: i32(1) },
+ { input: i32Bits(0b11110111011011111111010000111100), expected: i32(2) },
+ { input: i32Bits(0b11010011011101111111010011101000), expected: i32(3) },
+ { input: i32Bits(0b11010111110111110001111110110000), expected: i32(4) },
+ { input: i32Bits(0b11111101111101111110101111100000), expected: i32(5) },
+ { input: i32Bits(0b11111001111011111001111011000000), expected: i32(6) },
+ { input: i32Bits(0b11001110110111110111111010000000), expected: i32(7) },
+ { input: i32Bits(0b11101111011111101110101100000000), expected: i32(8) },
+ { input: i32Bits(0b11111101111011111111111000000000), expected: i32(9) },
+ { input: i32Bits(0b10011111011101110110110000000000), expected: i32(10) },
+ { input: i32Bits(0b11111111101101111011100000000000), expected: i32(11) },
+ { input: i32Bits(0b11111011010110111011000000000000), expected: i32(12) },
+ { input: i32Bits(0b00111101010000111010000000000000), expected: i32(13) },
+ { input: i32Bits(0b11111011110001101100000000000000), expected: i32(14) },
+ { input: i32Bits(0b10111111010111111000000000000000), expected: i32(15) },
+ { input: i32Bits(0b11011101111010110000000000000000), expected: i32(16) },
+ { input: i32Bits(0b01110100110110100000000000000000), expected: i32(17) },
+ { input: i32Bits(0b11100111001011000000000000000000), expected: i32(18) },
+ { input: i32Bits(0b11111001110110000000000000000000), expected: i32(19) },
+ { input: i32Bits(0b00110100100100000000000000000000), expected: i32(20) },
+ { input: i32Bits(0b11111010011000000000000000000000), expected: i32(21) },
+ { input: i32Bits(0b00000010110000000000000000000000), expected: i32(22) },
+ { input: i32Bits(0b11100111100000000000000000000000), expected: i32(23) },
+ { input: i32Bits(0b00101101000000000000000000000000), expected: i32(24) },
+ { input: i32Bits(0b11011010000000000000000000000000), expected: i32(25) },
+ { input: i32Bits(0b11010100000000000000000000000000), expected: i32(26) },
+ { input: i32Bits(0b10111000000000000000000000000000), expected: i32(27) },
+ { input: i32Bits(0b01110000000000000000000000000000), expected: i32(28) },
+ { input: i32Bits(0b10100000000000000000000000000000), expected: i32(29) },
+ ]);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/cross.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/cross.spec.ts
new file mode 100644
index 0000000000..2b0b3e58ce
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/cross.spec.ts
@@ -0,0 +1,113 @@
+export const description = `
+Execution tests for the 'cross' builtin function
+
+T is AbstractFloat, f32, or f16
+@const fn cross(e1: vec3<T> ,e2: vec3<T>) -> vec3<T>
+Returns the cross product of e1 and e2.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeAbstractFloat, TypeF16, TypeF32, TypeVec } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { sparseVectorF64Range, vectorF16Range, vectorF32Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, onlyConstInputSource, run } from '../../expression.js';
+
+import { abstractBuiltin, builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('cross', {
+ f32_const: () => {
+ return FP.f32.generateVectorPairToVectorCases(
+ vectorF32Range(3),
+ vectorF32Range(3),
+ 'finite',
+ FP.f32.crossInterval
+ );
+ },
+ f32_non_const: () => {
+ return FP.f32.generateVectorPairToVectorCases(
+ vectorF32Range(3),
+ vectorF32Range(3),
+ 'unfiltered',
+ FP.f32.crossInterval
+ );
+ },
+ f16_const: () => {
+ return FP.f16.generateVectorPairToVectorCases(
+ vectorF16Range(3),
+ vectorF16Range(3),
+ 'finite',
+ FP.f16.crossInterval
+ );
+ },
+ f16_non_const: () => {
+ return FP.f16.generateVectorPairToVectorCases(
+ vectorF16Range(3),
+ vectorF16Range(3),
+ 'unfiltered',
+ FP.f16.crossInterval
+ );
+ },
+ abstract: () => {
+ return FP.abstract.generateVectorPairToVectorCases(
+ sparseVectorF64Range(3),
+ sparseVectorF64Range(3),
+ 'finite',
+ FP.abstract.crossInterval
+ );
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u => u.combine('inputSource', onlyConstInputSource))
+ .fn(async t => {
+ const cases = await d.get('abstract');
+ await run(
+ t,
+ abstractBuiltin('cross'),
+ [TypeVec(3, TypeAbstractFloat), TypeVec(3, TypeAbstractFloat)],
+ TypeVec(3, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(
+ t,
+ builtin('cross'),
+ [TypeVec(3, TypeF32), TypeVec(3, TypeF32)],
+ TypeVec(3, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(
+ t,
+ builtin('cross'),
+ [TypeVec(3, TypeF16), TypeVec(3, TypeF16)],
+ TypeVec(3, TypeF16),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/degrees.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/degrees.spec.ts
new file mode 100644
index 0000000000..f82153ffca
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/degrees.spec.ts
@@ -0,0 +1,95 @@
+export const description = `
+Execution tests for the 'degrees' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<T>
+@const fn degrees(e1: T ) -> T
+Converts radians to degrees, approximating e1 × 180 ÷ π. Component-wise when T is a vector
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeAbstractFloat, TypeF16, TypeF32 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF16Range, fullF32Range, fullF64Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, onlyConstInputSource, run } from '../../expression.js';
+
+import { abstractBuiltin, builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('degrees', {
+ f32_const: () => {
+ return FP.f32.generateScalarToIntervalCases(fullF32Range(), 'finite', FP.f32.degreesInterval);
+ },
+ f32_non_const: () => {
+ return FP.f32.generateScalarToIntervalCases(
+ fullF32Range(),
+ 'unfiltered',
+ FP.f32.degreesInterval
+ );
+ },
+ f16_const: () => {
+ return FP.f16.generateScalarToIntervalCases(fullF16Range(), 'finite', FP.f16.degreesInterval);
+ },
+ f16_non_const: () => {
+ return FP.f16.generateScalarToIntervalCases(
+ fullF16Range(),
+ 'unfiltered',
+ FP.f16.degreesInterval
+ );
+ },
+ abstract: () => {
+ return FP.abstract.generateScalarToIntervalCases(
+ fullF64Range(),
+ 'finite',
+ FP.abstract.degreesInterval
+ );
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u
+ .combine('inputSource', onlyConstInputSource)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('abstract');
+ await run(
+ t,
+ abstractBuiltin('degrees'),
+ [TypeAbstractFloat],
+ TypeAbstractFloat,
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('degrees'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('degrees'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/determinant.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/determinant.spec.ts
new file mode 100644
index 0000000000..f08f4f0b6b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/determinant.spec.ts
@@ -0,0 +1,137 @@
+export const description = `
+Execution tests for the 'determinant' builtin function
+
+T is AbstractFloat, f32, or f16
+@const determinant(e: matCxC<T> ) -> T
+Returns the determinant of e.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16, TypeMat } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Accuracy for determinant is only defined for e, where e is an integer and
+// |e| < quadroot(2**21) [~38],
+// due to computational complexity of calculating the general solution for 4x4,
+// so custom matrices are used.
+//
+// Note: For 2x2 and 3x3 the limits are squareroot and cuberoot instead of
+// quadroot, but using the tighter 4x4 limits for all cases for simplicity.
+const kDeterminantValues = [-38, -10, -5, -1, 0, 1, 5, 10, 38];
+
+const kDeterminantMatrixValues = {
+ 2: kDeterminantValues.map((f, idx) => [
+ [idx % 4 === 0 ? f : idx, idx % 4 === 1 ? f : -idx],
+ [idx % 4 === 2 ? f : -idx, idx % 4 === 3 ? f : idx],
+ ]),
+ 3: kDeterminantValues.map((f, idx) => [
+ [idx % 9 === 0 ? f : idx, idx % 9 === 1 ? f : -idx, idx % 9 === 2 ? f : idx],
+ [idx % 9 === 3 ? f : -idx, idx % 9 === 4 ? f : idx, idx % 9 === 5 ? f : -idx],
+ [idx % 9 === 6 ? f : idx, idx % 9 === 7 ? f : -idx, idx % 9 === 8 ? f : idx],
+ ]),
+ 4: kDeterminantValues.map((f, idx) => [
+ [
+ idx % 16 === 0 ? f : idx,
+ idx % 16 === 1 ? f : -idx,
+ idx % 16 === 2 ? f : idx,
+ idx % 16 === 3 ? f : -idx,
+ ],
+ [
+ idx % 16 === 4 ? f : -idx,
+ idx % 16 === 5 ? f : idx,
+ idx % 16 === 6 ? f : -idx,
+ idx % 16 === 7 ? f : idx,
+ ],
+ [
+ idx % 16 === 8 ? f : idx,
+ idx % 16 === 9 ? f : -idx,
+ idx % 16 === 10 ? f : idx,
+ idx % 16 === 11 ? f : -idx,
+ ],
+ [
+ idx % 16 === 12 ? f : -idx,
+ idx % 16 === 13 ? f : idx,
+ idx % 16 === 14 ? f : -idx,
+ idx % 16 === 15 ? f : idx,
+ ],
+ ]),
+};
+
+// Cases: f32_matDxD_[non_]const
+const f32_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`f32_mat${dim}x${dim}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateMatrixToScalarCases(
+ kDeterminantMatrixValues[dim],
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.determinantInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+// Cases: f16_matDxD_[non_]const
+const f16_cases = ([2, 3, 4] as const)
+ .flatMap(dim =>
+ ([true, false] as const).map(nonConst => ({
+ [`f16_mat${dim}x${dim}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateMatrixToScalarCases(
+ kDeterminantMatrixValues[dim],
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.determinantInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('determinant', {
+ ...f32_cases,
+ ...f16_cases,
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#matrix-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('dimension', [2, 3, 4] as const))
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#matrix-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `f32_mat${dim}x${dim}_const`
+ : `f32_mat${dim}x${dim}_non_const`
+ );
+ await run(t, builtin('determinant'), [TypeMat(dim, dim, TypeF32)], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#matrix-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('dim', [2, 3, 4] as const))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const dim = t.params.dim;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `f16_mat${dim}x${dim}_const`
+ : `f16_mat${dim}x${dim}_non_const`
+ );
+ await run(t, builtin('determinant'), [TypeMat(dim, dim, TypeF16)], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/distance.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/distance.spec.ts
new file mode 100644
index 0000000000..13cddf6403
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/distance.spec.ts
@@ -0,0 +1,241 @@
+export const description = `
+Execution tests for the 'distance' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn distance(e1: T ,e2: T ) -> f32
+Returns the distance between e1 and e2 (e.g. length(e1-e2)).
+
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16, TypeVec } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import {
+ fullF32Range,
+ fullF16Range,
+ sparseVectorF32Range,
+ sparseVectorF16Range,
+} from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: f32_vecN_[non_]const
+const f32_vec_cases = ([2, 3, 4] as const)
+ .flatMap(n =>
+ ([true, false] as const).map(nonConst => ({
+ [`f32_vec${n}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateVectorPairToIntervalCases(
+ sparseVectorF32Range(n),
+ sparseVectorF32Range(n),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.distanceInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+// Cases: f16_vecN_[non_]const
+const f16_vec_cases = ([2, 3, 4] as const)
+ .flatMap(n =>
+ ([true, false] as const).map(nonConst => ({
+ [`f16_vec${n}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateVectorPairToIntervalCases(
+ sparseVectorF16Range(n),
+ sparseVectorF16Range(n),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.distanceInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('distance', {
+ f32_const: () => {
+ return FP.f32.generateScalarPairToIntervalCases(
+ fullF32Range(),
+ fullF32Range(),
+ 'finite',
+ FP.f32.distanceInterval
+ );
+ },
+ f32_non_const: () => {
+ return FP.f32.generateScalarPairToIntervalCases(
+ fullF32Range(),
+ fullF32Range(),
+ 'unfiltered',
+ FP.f32.distanceInterval
+ );
+ },
+ ...f32_vec_cases,
+ f16_const: () => {
+ return FP.f16.generateScalarPairToIntervalCases(
+ fullF16Range(),
+ fullF16Range(),
+ 'finite',
+ FP.f16.distanceInterval
+ );
+ },
+ f16_non_const: () => {
+ return FP.f16.generateScalarPairToIntervalCases(
+ fullF16Range(),
+ fullF16Range(),
+ 'unfiltered',
+ FP.f16.distanceInterval
+ );
+ },
+ ...f16_vec_cases,
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('distance'), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f32_vec2')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests using vec2s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec2_const' : 'f32_vec2_non_const'
+ );
+ await run(
+ t,
+ builtin('distance'),
+ [TypeVec(2, TypeF32), TypeVec(2, TypeF32)],
+ TypeF32,
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32_vec3')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests using vec3s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec3_const' : 'f32_vec3_non_const'
+ );
+ await run(
+ t,
+ builtin('distance'),
+ [TypeVec(3, TypeF32), TypeVec(3, TypeF32)],
+ TypeF32,
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32_vec4')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests using vec4s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec4_const' : 'f32_vec4_non_const'
+ );
+ await run(
+ t,
+ builtin('distance'),
+ [TypeVec(4, TypeF32), TypeVec(4, TypeF32)],
+ TypeF32,
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('distance'), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('f16_vec2')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests using vec2s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec2_const' : 'f16_vec2_non_const'
+ );
+ await run(
+ t,
+ builtin('distance'),
+ [TypeVec(2, TypeF16), TypeVec(2, TypeF16)],
+ TypeF16,
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16_vec3')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests using vec3s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec3_const' : 'f16_vec3_non_const'
+ );
+ await run(
+ t,
+ builtin('distance'),
+ [TypeVec(3, TypeF16), TypeVec(3, TypeF16)],
+ TypeF16,
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16_vec4')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests using vec4s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec4_const' : 'f16_vec4_non_const'
+ );
+ await run(
+ t,
+ builtin('distance'),
+ [TypeVec(4, TypeF16), TypeVec(4, TypeF16)],
+ TypeF16,
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dot.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dot.spec.ts
new file mode 100644
index 0000000000..2726546183
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dot.spec.ts
@@ -0,0 +1,182 @@
+export const description = `
+Execution tests for the 'dot' builtin function
+
+T is AbstractInt, AbstractFloat, i32, u32, f32, or f16
+@const fn dot(e1: vecN<T>,e2: vecN<T>) -> T
+Returns the dot product of e1 and e2.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16, TypeVec } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { sparseVectorF32Range, vectorF32Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: [f32|f16]_vecN_[non_]const
+const cases = (['f32', 'f16'] as const)
+ .flatMap(trait =>
+ ([2, 3, 4] as const).flatMap(N =>
+ ([true, false] as const).map(nonConst => ({
+ [`${trait}_vec${N}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ // vec3 and vec4 require calculating all possible permutations, so their runtime is much
+ // longer per test, so only using sparse vectors for them.
+ return FP[trait].generateVectorPairToIntervalCases(
+ N === 2 ? vectorF32Range(2) : sparseVectorF32Range(N),
+ N === 2 ? vectorF32Range(2) : sparseVectorF32Range(N),
+ nonConst ? 'unfiltered' : 'finite',
+ FP[trait].dotInterval
+ );
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('dot', cases);
+
+g.test('abstract_int')
+ .specURL('https://www.w3.org/TR/WGSL/#vector-builtin-functions')
+ .desc(`abstract int tests`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .unimplemented();
+
+g.test('i32')
+ .specURL('https://www.w3.org/TR/WGSL/#vector-builtin-functions')
+ .desc(`i32 tests`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .unimplemented();
+
+g.test('u32')
+ .specURL('https://www.w3.org/TR/WGSL/#vector-builtin-functions')
+ .desc(`u32 tests`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .unimplemented();
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#vector-builtin-functions')
+ .desc(`abstract float test`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .unimplemented();
+
+g.test('f32_vec2')
+ .specURL('https://www.w3.org/TR/WGSL/#vector-builtin-functions')
+ .desc(`f32 tests using vec2s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec2_const' : 'f32_vec2_non_const'
+ );
+ await run(
+ t,
+ builtin('dot'),
+ [TypeVec(2, TypeF32), TypeVec(2, TypeF32)],
+ TypeF32,
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32_vec3')
+ .specURL('https://www.w3.org/TR/WGSL/#vector-builtin-functions')
+ .desc(`f32 tests using vec3s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec3_const' : 'f32_vec3_non_const'
+ );
+ await run(
+ t,
+ builtin('dot'),
+ [TypeVec(3, TypeF32), TypeVec(3, TypeF32)],
+ TypeF32,
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32_vec4')
+ .specURL('https://www.w3.org/TR/WGSL/#vector-builtin-functions')
+ .desc(`f32 tests using vec4s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec4_const' : 'f32_vec4_non_const'
+ );
+ await run(
+ t,
+ builtin('dot'),
+ [TypeVec(4, TypeF32), TypeVec(4, TypeF32)],
+ TypeF32,
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16_vec2')
+ .specURL('https://www.w3.org/TR/WGSL/#vector-builtin-functions')
+ .desc(`f16 tests using vec2s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec2_const' : 'f16_vec2_non_const'
+ );
+ await run(
+ t,
+ builtin('dot'),
+ [TypeVec(2, TypeF16), TypeVec(2, TypeF16)],
+ TypeF16,
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16_vec3')
+ .specURL('https://www.w3.org/TR/WGSL/#vector-builtin-functions')
+ .desc(`f16 tests using vec3s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec3_const' : 'f16_vec3_non_const'
+ );
+ await run(
+ t,
+ builtin('dot'),
+ [TypeVec(3, TypeF16), TypeVec(3, TypeF16)],
+ TypeF16,
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16_vec4')
+ .specURL('https://www.w3.org/TR/WGSL/#vector-builtin-functions')
+ .desc(`f16 tests using vec4s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec4_const' : 'f16_vec4_non_const'
+ );
+ await run(
+ t,
+ builtin('dot'),
+ [TypeVec(4, TypeF16), TypeVec(4, TypeF16)],
+ TypeF16,
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdx.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdx.spec.ts
new file mode 100644
index 0000000000..287a51c699
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdx.spec.ts
@@ -0,0 +1,23 @@
+export const description = `
+Execution tests for the 'dpdx' builtin function
+
+T is f32 or vecN<f32>
+fn dpdx(e:T) -> T
+Partial derivative of e with respect to window x coordinates.
+The result is the same as either dpdxFine(e) or dpdxCoarse(e).
+
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { allInputSources } from '../../expression.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#derivative-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdxCoarse.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdxCoarse.spec.ts
new file mode 100644
index 0000000000..67a75bb010
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdxCoarse.spec.ts
@@ -0,0 +1,22 @@
+export const description = `
+Execution tests for the 'dpdxCoarse' builtin function
+
+T is f32 or vecN<f32>
+fn dpdxCoarse(e:T) ->T
+Returns the partial derivative of e with respect to window x coordinates using local differences.
+This may result in fewer unique positions that dpdxFine(e).
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { allInputSources } from '../../expression.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#derivative-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdxFine.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdxFine.spec.ts
new file mode 100644
index 0000000000..91d65b990b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdxFine.spec.ts
@@ -0,0 +1,21 @@
+export const description = `
+Execution tests for the 'dpdxFine' builtin function
+
+T is f32 or vecN<f32>
+fn dpdxFine(e:T) ->T
+Returns the partial derivative of e with respect to window x coordinates.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { allInputSources } from '../../expression.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#derivative-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdy.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdy.spec.ts
new file mode 100644
index 0000000000..0cd9cafdb9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdy.spec.ts
@@ -0,0 +1,22 @@
+export const description = `
+Execution tests for the 'dpdy' builtin function
+
+T is f32 or vecN<f32>
+fn dpdy(e:T) ->T
+Partial derivative of e with respect to window y coordinates.
+The result is the same as either dpdyFine(e) or dpdyCoarse(e).
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { allInputSources } from '../../expression.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#derivative-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdyCoarse.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdyCoarse.spec.ts
new file mode 100644
index 0000000000..f06869fdc2
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdyCoarse.spec.ts
@@ -0,0 +1,22 @@
+export const description = `
+Execution tests for the 'dpdyCoarse' builtin function
+
+T is f32 or vecN<f32>
+fn dpdyCoarse(e:T) ->T
+Returns the partial derivative of e with respect to window y coordinates using local differences.
+This may result in fewer unique positions that dpdyFine(e).
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { allInputSources } from '../../expression.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#derivative-builtin-functions')
+ .desc(`f32 test`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdyFine.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdyFine.spec.ts
new file mode 100644
index 0000000000..e09761de95
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/dpdyFine.spec.ts
@@ -0,0 +1,21 @@
+export const description = `
+Execution tests for the 'dpdyFine' builtin function
+
+T is f32 or vecN<f32>
+fn dpdyFine(e:T) ->T
+Returns the partial derivative of e with respect to window y coordinates.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { allInputSources } from '../../expression.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#derivative-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/exp.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/exp.spec.ts
new file mode 100644
index 0000000000..8b1ced3cab
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/exp.spec.ts
@@ -0,0 +1,90 @@
+export const description = `
+Execution tests for the 'exp' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn exp(e1: T ) -> T
+Returns the natural exponentiation of e1 (e.g. e^e1). Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { kValue } from '../../../../../util/constants.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { biasedRange, linearRange } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// floor(ln(max f32 value)) = 88, so exp(88) will be within range of a f32, but exp(89) will not
+// floor(ln(max f64 value)) = 709, so exp(709) can be handled by the testing framework, but exp(710) will misbehave
+const f32_inputs = [
+ 0, // Returns 1 by definition
+ -89, // Returns subnormal value
+ kValue.f32.negative.min, // Closest to returning 0 as possible
+ ...biasedRange(kValue.f32.negative.max, -88, 100),
+ ...biasedRange(kValue.f32.positive.min, 88, 100),
+ ...linearRange(89, 709, 10), // Overflows f32, but not f64
+];
+
+// floor(ln(max f16 value)) = 11, so exp(11) will be within range of a f16, but exp(12) will not
+const f16_inputs = [
+ 0, // Returns 1 by definition
+ -12, // Returns subnormal value
+ kValue.f16.negative.min, // Closest to returning 0 as possible
+ ...biasedRange(kValue.f16.negative.max, -11, 100),
+ ...biasedRange(kValue.f16.positive.min, 11, 100),
+ ...linearRange(12, 709, 10), // Overflows f16, but not f64
+];
+
+export const d = makeCaseCache('exp', {
+ f32_const: () => {
+ return FP.f32.generateScalarToIntervalCases(f32_inputs, 'finite', FP.f32.expInterval);
+ },
+ f32_non_const: () => {
+ return FP.f32.generateScalarToIntervalCases(f32_inputs, 'unfiltered', FP.f32.expInterval);
+ },
+ f16_const: () => {
+ return FP.f16.generateScalarToIntervalCases(f16_inputs, 'finite', FP.f16.expInterval);
+ },
+ f16_non_const: () => {
+ return FP.f16.generateScalarToIntervalCases(f16_inputs, 'unfiltered', FP.f16.expInterval);
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('exp'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('exp'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/exp2.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/exp2.spec.ts
new file mode 100644
index 0000000000..67e123cb30
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/exp2.spec.ts
@@ -0,0 +1,90 @@
+export const description = `
+Execution tests for the 'exp2' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn exp2(e: T ) -> T
+Returns 2 raised to the power e (e.g. 2^e). Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { kValue } from '../../../../../util/constants.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { biasedRange, linearRange } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// floor(log2(max f32 value)) = 127, so exp2(127) will be within range of a f32, but exp2(128) will not
+// floor(ln(max f64 value)) = 1023, so exp2(1023) can be handled by the testing framework, but exp2(1024) will misbehave
+const f32_inputs = [
+ 0, // Returns 1 by definition
+ -128, // Returns subnormal value
+ kValue.f32.negative.min, // Closest to returning 0 as possible
+ ...biasedRange(kValue.f32.negative.max, -127, 100),
+ ...biasedRange(kValue.f32.positive.min, 127, 100),
+ ...linearRange(128, 1023, 10), // Overflows f32, but not f64
+];
+
+// floor(log2(max f16 value)) = 15, so exp2(15) will be within range of a f16, but exp2(15) will not
+const f16_inputs = [
+ 0, // Returns 1 by definition
+ -16, // Returns subnormal value
+ kValue.f16.negative.min, // Closest to returning 0 as possible
+ ...biasedRange(kValue.f16.negative.max, -15, 100),
+ ...biasedRange(kValue.f16.positive.min, 15, 100),
+ ...linearRange(16, 1023, 10), // Overflows f16, but not f64
+];
+
+export const d = makeCaseCache('exp2', {
+ f32_const: () => {
+ return FP.f32.generateScalarToIntervalCases(f32_inputs, 'finite', FP.f32.exp2Interval);
+ },
+ f32_non_const: () => {
+ return FP.f32.generateScalarToIntervalCases(f32_inputs, 'unfiltered', FP.f32.exp2Interval);
+ },
+ f16_const: () => {
+ return FP.f16.generateScalarToIntervalCases(f16_inputs, 'finite', FP.f16.exp2Interval);
+ },
+ f16_non_const: () => {
+ return FP.f16.generateScalarToIntervalCases(f16_inputs, 'unfiltered', FP.f16.exp2Interval);
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('exp2'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('exp2'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/extractBits.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/extractBits.spec.ts
new file mode 100644
index 0000000000..d535bf5d74
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/extractBits.spec.ts
@@ -0,0 +1,337 @@
+export const description = `
+Execution tests for the 'extractBits' builtin function
+
+T is u32 or vecN<u32>
+@const fn extractBits(e: T, offset: u32, count: u32) -> T
+Reads bits from an integer, without sign extension.
+
+When T is a scalar type, then:
+ w is the bit width of T
+ o = min(offset,w)
+ c = min(count, w - o)
+
+The result is 0 if c is 0.
+Otherwise, bits 0..c-1 of the result are copied from bits o..o+c-1 of e.
+Other bits of the result are 0.
+Component-wise when T is a vector.
+
+
+T is i32 or vecN<i32>
+@const fn extractBits(e: T, offset: u32, count: u32) -> T
+Reads bits from an integer, with sign extension.
+
+When T is a scalar type, then:
+ w is the bit width of T
+ o = min(offset,w)
+ c = min(count, w - o)
+
+The result is 0 if c is 0.
+Otherwise, bits 0..c-1 of the result are copied from bits o..o+c-1 of e.
+Other bits of the result are the same as bit c-1 of the result.
+Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import {
+ i32Bits,
+ TypeI32,
+ u32,
+ TypeU32,
+ u32Bits,
+ vec2,
+ vec3,
+ vec4,
+ TypeVec,
+} from '../../../../../util/conversion.js';
+import { allInputSources, Config, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('u32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`u32 tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('width', [1, 2, 3, 4]))
+ .fn(async t => {
+ const cfg: Config = t.params;
+
+ const T = t.params.width === 1 ? TypeU32 : TypeVec(t.params.width, TypeU32);
+
+ const V = (x: number, y?: number, z?: number, w?: number) => {
+ y = y === undefined ? x : y;
+ z = z === undefined ? x : z;
+ w = w === undefined ? x : w;
+
+ switch (t.params.width) {
+ case 1:
+ return u32Bits(x);
+ case 2:
+ return vec2(u32Bits(x), u32Bits(y));
+ case 3:
+ return vec3(u32Bits(x), u32Bits(y), u32Bits(z));
+ default:
+ return vec4(u32Bits(x), u32Bits(y), u32Bits(z), u32Bits(w));
+ }
+ };
+
+ const all_1 = V(0b11111111111111111111111111111111);
+ const all_0 = V(0b00000000000000000000000000000000);
+ const low_1 = V(0b00000000000000000000000000000001);
+ const high_1 = V(0b10000000000000000000000000000000);
+ const pattern = V(
+ 0b00000000000111011100000000000000,
+ 0b11111111111000000011111111111111,
+ 0b00000000010101010101000000000000,
+ 0b00000000001010101010100000000000
+ );
+
+ const cases = [
+ { input: [all_0, u32(0), u32(32)], expected: all_0 },
+ { input: [all_0, u32(1), u32(10)], expected: all_0 },
+ { input: [all_0, u32(2), u32(5)], expected: all_0 },
+ { input: [all_0, u32(0), u32(1)], expected: all_0 },
+ { input: [all_0, u32(31), u32(1)], expected: all_0 },
+
+ { input: [all_1, u32(0), u32(32)], expected: all_1 },
+ {
+ input: [all_1, u32(1), u32(10)],
+ expected: V(0b00000000000000000000001111111111),
+ },
+ {
+ input: [all_1, u32(2), u32(5)],
+ expected: V(0b00000000000000000000000000011111),
+ },
+ { input: [all_1, u32(0), u32(1)], expected: low_1 },
+ { input: [all_1, u32(31), u32(1)], expected: low_1 },
+
+ // Patterns
+ { input: [pattern, u32(0), u32(32)], expected: pattern },
+ {
+ input: [pattern, u32(1), u32(31)],
+ expected: V(
+ 0b00000000000011101110000000000000,
+ 0b01111111111100000001111111111111,
+ 0b00000000001010101010100000000000,
+ 0b00000000000101010101010000000000
+ ),
+ },
+ {
+ input: [pattern, u32(14), u32(18)],
+ expected: V(
+ 0b00000000000000000000000001110111,
+ 0b00000000000000111111111110000000,
+ 0b00000000000000000000000101010101,
+ 0b00000000000000000000000010101010
+ ),
+ },
+ {
+ input: [pattern, u32(14), u32(7)],
+ expected: V(
+ 0b00000000000000000000000001110111,
+ 0b00000000000000000000000000000000,
+ 0b00000000000000000000000001010101,
+ 0b00000000000000000000000000101010
+ ),
+ },
+ {
+ input: [pattern, u32(14), u32(4)],
+ expected: V(
+ 0b00000000000000000000000000000111,
+ 0b00000000000000000000000000000000,
+ 0b00000000000000000000000000000101,
+ 0b00000000000000000000000000001010
+ ),
+ },
+ {
+ input: [pattern, u32(14), u32(3)],
+ expected: V(
+ 0b00000000000000000000000000000111,
+ 0b00000000000000000000000000000000,
+ 0b00000000000000000000000000000101,
+ 0b00000000000000000000000000000010
+ ),
+ },
+ {
+ input: [pattern, u32(18), u32(3)],
+ expected: V(
+ 0b00000000000000000000000000000111,
+ 0b00000000000000000000000000000000,
+ 0b00000000000000000000000000000101,
+ 0b00000000000000000000000000000010
+ ),
+ },
+ { input: [low_1, u32(0), u32(1)], expected: low_1 },
+ { input: [high_1, u32(31), u32(1)], expected: low_1 },
+
+ // Zero count
+ { input: [all_1, u32(0), u32(0)], expected: all_0 },
+ { input: [all_0, u32(0), u32(0)], expected: all_0 },
+ { input: [low_1, u32(0), u32(0)], expected: all_0 },
+ { input: [high_1, u32(31), u32(0)], expected: all_0 },
+ { input: [pattern, u32(0), u32(0)], expected: all_0 },
+ ];
+
+ if (t.params.inputSource !== 'const') {
+ cases.push(
+ ...[
+ // End overflow
+ { input: [low_1, u32(0), u32(99)], expected: low_1 },
+ { input: [high_1, u32(31), u32(99)], expected: low_1 },
+ { input: [pattern, u32(0), u32(99)], expected: pattern },
+ {
+ input: [pattern, u32(14), u32(99)],
+ expected: V(
+ 0b00000000000000000000000001110111,
+ 0b00000000000000111111111110000000,
+ 0b00000000000000000000000101010101,
+ 0b00000000000000000000000010101010
+ ),
+ },
+ ]
+ );
+ }
+
+ await run(t, builtin('extractBits'), [T, TypeU32, TypeU32], T, cfg, cases);
+ });
+
+g.test('i32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`i32 tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('width', [1, 2, 3, 4]))
+ .fn(async t => {
+ const cfg: Config = t.params;
+
+ const T = t.params.width === 1 ? TypeI32 : TypeVec(t.params.width, TypeI32);
+
+ const V = (x: number, y?: number, z?: number, w?: number) => {
+ y = y === undefined ? x : y;
+ z = z === undefined ? x : z;
+ w = w === undefined ? x : w;
+
+ switch (t.params.width) {
+ case 1:
+ return i32Bits(x);
+ case 2:
+ return vec2(i32Bits(x), i32Bits(y));
+ case 3:
+ return vec3(i32Bits(x), i32Bits(y), i32Bits(z));
+ default:
+ return vec4(i32Bits(x), i32Bits(y), i32Bits(z), i32Bits(w));
+ }
+ };
+
+ const all_1 = V(0b11111111111111111111111111111111);
+ const all_0 = V(0b00000000000000000000000000000000);
+ const low_1 = V(0b00000000000000000000000000000001);
+ const high_1 = V(0b10000000000000000000000000000000);
+ const pattern = V(
+ 0b00000000000111011100000000000000,
+ 0b11111111111000000011111111111111,
+ 0b00000000010101010101000000000000,
+ 0b00000000001010101010100000000000
+ );
+
+ const cases = [
+ { input: [all_0, u32(0), u32(32)], expected: all_0 },
+ { input: [all_0, u32(1), u32(10)], expected: all_0 },
+ { input: [all_0, u32(2), u32(5)], expected: all_0 },
+ { input: [all_0, u32(0), u32(1)], expected: all_0 },
+ { input: [all_0, u32(31), u32(1)], expected: all_0 },
+
+ { input: [all_1, u32(0), u32(32)], expected: all_1 },
+ { input: [all_1, u32(1), u32(10)], expected: all_1 },
+ { input: [all_1, u32(2), u32(5)], expected: all_1 },
+ { input: [all_1, u32(0), u32(1)], expected: all_1 },
+ { input: [all_1, u32(31), u32(1)], expected: all_1 },
+
+ // Patterns
+ { input: [pattern, u32(0), u32(32)], expected: pattern },
+ {
+ input: [pattern, u32(1), u32(31)],
+ expected: V(
+ 0b00000000000011101110000000000000,
+ 0b11111111111100000001111111111111,
+ 0b00000000001010101010100000000000,
+ 0b00000000000101010101010000000000
+ ),
+ },
+ {
+ input: [pattern, u32(14), u32(18)],
+ expected: V(
+ 0b00000000000000000000000001110111,
+ 0b11111111111111111111111110000000,
+ 0b00000000000000000000000101010101,
+ 0b00000000000000000000000010101010
+ ),
+ },
+ {
+ input: [pattern, u32(14), u32(7)],
+ expected: V(
+ 0b11111111111111111111111111110111,
+ 0b00000000000000000000000000000000,
+ 0b11111111111111111111111111010101,
+ 0b00000000000000000000000000101010
+ ),
+ },
+ {
+ input: [pattern, u32(14), u32(4)],
+ expected: V(
+ 0b00000000000000000000000000000111,
+ 0b00000000000000000000000000000000,
+ 0b00000000000000000000000000000101,
+ 0b11111111111111111111111111111010
+ ),
+ },
+ {
+ input: [pattern, u32(14), u32(3)],
+ expected: V(
+ 0b11111111111111111111111111111111,
+ 0b00000000000000000000000000000000,
+ 0b11111111111111111111111111111101,
+ 0b00000000000000000000000000000010
+ ),
+ },
+ {
+ input: [pattern, u32(18), u32(3)],
+ expected: V(
+ 0b11111111111111111111111111111111,
+ 0b00000000000000000000000000000000,
+ 0b11111111111111111111111111111101,
+ 0b00000000000000000000000000000010
+ ),
+ },
+ { input: [low_1, u32(0), u32(1)], expected: all_1 },
+ { input: [high_1, u32(31), u32(1)], expected: all_1 },
+
+ // Zero count
+ { input: [all_1, u32(0), u32(0)], expected: all_0 },
+ { input: [all_0, u32(0), u32(0)], expected: all_0 },
+ { input: [low_1, u32(0), u32(0)], expected: all_0 },
+ { input: [high_1, u32(31), u32(0)], expected: all_0 },
+ { input: [pattern, u32(0), u32(0)], expected: all_0 },
+ ];
+
+ if (t.params.inputSource !== 'const') {
+ cases.push(
+ ...[
+ // End overflow
+ { input: [low_1, u32(0), u32(99)], expected: low_1 },
+ { input: [high_1, u32(31), u32(99)], expected: all_1 },
+ { input: [pattern, u32(0), u32(99)], expected: pattern },
+ {
+ input: [pattern, u32(14), u32(99)],
+ expected: V(
+ 0b00000000000000000000000001110111,
+ 0b11111111111111111111111110000000,
+ 0b00000000000000000000000101010101,
+ 0b00000000000000000000000010101010
+ ),
+ },
+ ]
+ );
+ }
+
+ await run(t, builtin('extractBits'), [T, TypeU32, TypeU32], T, cfg, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/faceForward.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/faceForward.spec.ts
new file mode 100644
index 0000000000..6b6794fb9f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/faceForward.spec.ts
@@ -0,0 +1,256 @@
+export const description = `
+Execution tests for the 'faceForward' builtin function
+
+T is vecN<AbstractFloat>, vecN<f32>, or vecN<f16>
+@const fn faceForward(e1: T ,e2: T ,e3: T ) -> T
+Returns e1 if dot(e2,e3) is negative, and -e1 otherwise.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { ROArrayArray } from '../../../../../../common/util/types.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { anyOf } from '../../../../../util/compare.js';
+import { toVector, TypeF32, TypeF16, TypeVec } from '../../../../../util/conversion.js';
+import { FP, FPKind, FPVector } from '../../../../../util/floating_point.js';
+import {
+ cartesianProduct,
+ sparseVectorF32Range,
+ sparseVectorF16Range,
+} from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, Case, IntervalFilter, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Using a bespoke implementation of make*Case and generate*Cases here
+// since faceForwardIntervals is the only builtin with the API signature
+// (vec, vec, vec) -> vec
+//
+// Additionally faceForward has significant complexities around it due to the
+// fact that `dot` is calculated in it s operation, but the result of dot isn't
+// used to calculate the builtin's result.
+
+/**
+ * @returns a Case for `faceForward`
+ * @param kind what kind of floating point numbers being operated on
+ * @param x the `x` param for the case
+ * @param y the `y` param for the case
+ * @param z the `z` param for the case
+ * @param check what interval checking to apply
+ * */
+function makeCase(
+ kind: FPKind,
+ x: readonly number[],
+ y: readonly number[],
+ z: readonly number[],
+ check: IntervalFilter
+): Case | undefined {
+ const fp = FP[kind];
+ x = x.map(fp.quantize);
+ y = y.map(fp.quantize);
+ z = z.map(fp.quantize);
+
+ const results = FP[kind].faceForwardIntervals(x, y, z);
+ if (check === 'finite' && results.some(r => r === undefined)) {
+ return undefined;
+ }
+
+ // Stripping the undefined results, since undefined is used to signal that an OOB
+ // could occur within the calculation that isn't reflected in the result
+ // intervals.
+ const define_results = results.filter((r): r is FPVector => r !== undefined);
+
+ return {
+ input: [
+ toVector(x, fp.scalarBuilder),
+ toVector(y, fp.scalarBuilder),
+ toVector(z, fp.scalarBuilder),
+ ],
+ expected: anyOf(...define_results),
+ };
+}
+
+/**
+ * @returns an array of Cases for `faceForward`
+ * @param kind what kind of floating point numbers being operated on
+ * @param xs array of inputs to try for the `x` param
+ * @param ys array of inputs to try for the `y` param
+ * @param zs array of inputs to try for the `z` param
+ * @param check what interval checking to apply
+ */
+function generateCases(
+ kind: FPKind,
+ xs: ROArrayArray<number>,
+ ys: ROArrayArray<number>,
+ zs: ROArrayArray<number>,
+ check: IntervalFilter
+): Case[] {
+ // Cannot use `cartesianProduct` here due to heterogeneous param types
+ return cartesianProduct(xs, ys, zs)
+ .map(e => makeCase(kind, e[0], e[1], e[2], check))
+ .filter((c): c is Case => c !== undefined);
+}
+
+// Cases: f32_vecN_[non_]const
+const f32_vec_cases = ([2, 3, 4] as const)
+ .flatMap(n =>
+ ([true, false] as const).map(nonConst => ({
+ [`f32_vec${n}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return generateCases(
+ 'f32',
+ sparseVectorF32Range(n),
+ sparseVectorF32Range(n),
+ sparseVectorF32Range(n),
+ nonConst ? 'unfiltered' : 'finite'
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+// Cases: f16_vecN_[non_]const
+const f16_vec_cases = ([2, 3, 4] as const)
+ .flatMap(n =>
+ ([true, false] as const).map(nonConst => ({
+ [`f16_vec${n}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return generateCases(
+ 'f16',
+ sparseVectorF16Range(n),
+ sparseVectorF16Range(n),
+ sparseVectorF16Range(n),
+ nonConst ? 'unfiltered' : 'finite'
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('faceForward', {
+ ...f32_vec_cases,
+ ...f16_vec_cases,
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('vectorize', [2, 3, 4] as const))
+ .unimplemented();
+
+g.test('f32_vec2')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests using vec2s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec2_const' : 'f32_vec2_non_const'
+ );
+ await run(
+ t,
+ builtin('faceForward'),
+ [TypeVec(2, TypeF32), TypeVec(2, TypeF32), TypeVec(2, TypeF32)],
+ TypeVec(2, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32_vec3')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests using vec3s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec3_const' : 'f32_vec3_non_const'
+ );
+ await run(
+ t,
+ builtin('faceForward'),
+ [TypeVec(3, TypeF32), TypeVec(3, TypeF32), TypeVec(3, TypeF32)],
+ TypeVec(3, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32_vec4')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests using vec4s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec4_const' : 'f32_vec4_non_const'
+ );
+ await run(
+ t,
+ builtin('faceForward'),
+ [TypeVec(4, TypeF32), TypeVec(4, TypeF32), TypeVec(4, TypeF32)],
+ TypeVec(4, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16_vec2')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests using vec2s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec2_const' : 'f16_vec2_non_const'
+ );
+ await run(
+ t,
+ builtin('faceForward'),
+ [TypeVec(2, TypeF16), TypeVec(2, TypeF16), TypeVec(2, TypeF16)],
+ TypeVec(2, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16_vec3')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests using vec3s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec3_const' : 'f16_vec3_non_const'
+ );
+ await run(
+ t,
+ builtin('faceForward'),
+ [TypeVec(3, TypeF16), TypeVec(3, TypeF16), TypeVec(3, TypeF16)],
+ TypeVec(3, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16_vec4')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests using vec4s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec4_const' : 'f16_vec4_non_const'
+ );
+ await run(
+ t,
+ builtin('faceForward'),
+ [TypeVec(4, TypeF16), TypeVec(4, TypeF16), TypeVec(4, TypeF16)],
+ TypeVec(4, TypeF16),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/firstLeadingBit.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/firstLeadingBit.spec.ts
new file mode 100644
index 0000000000..26216563cd
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/firstLeadingBit.spec.ts
@@ -0,0 +1,350 @@
+export const description = `
+Execution tests for the 'firstLeadingBit' builtin function
+
+T is u32 or vecN<u32>
+@const fn firstLeadingBit(e: T ) -> T
+For scalar T, the result is: T(-1) if e is zero.
+Otherwise the position of the most significant 1 bit in e.
+Component-wise when T is a vector.
+
+T is i32 or vecN<i32>
+@const fn firstLeadingBit(e: T ) -> T
+For scalar T, the result is: -1 if e is 0 or -1.
+Otherwise the position of the most significant bit in e that is different from e’s sign bit.
+Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { i32, i32Bits, TypeI32, u32, TypeU32, u32Bits } from '../../../../../util/conversion.js';
+import { allInputSources, Config, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('u32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`u32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cfg: Config = t.params;
+ await run(t, builtin('firstLeadingBit'), [TypeU32], TypeU32, cfg, [
+ // Zero
+ { input: u32Bits(0b00000000000000000000000000000000), expected: u32(-1) },
+
+ // One
+ { input: u32Bits(0b00000000000000000000000000000001), expected: u32(0) },
+
+ // 0's after leading 1
+ { input: u32Bits(0b00000000000000000000000000000010), expected: u32(1) },
+ { input: u32Bits(0b00000000000000000000000000000100), expected: u32(2) },
+ { input: u32Bits(0b00000000000000000000000000001000), expected: u32(3) },
+ { input: u32Bits(0b00000000000000000000000000010000), expected: u32(4) },
+ { input: u32Bits(0b00000000000000000000000000100000), expected: u32(5) },
+ { input: u32Bits(0b00000000000000000000000001000000), expected: u32(6) },
+ { input: u32Bits(0b00000000000000000000000010000000), expected: u32(7) },
+ { input: u32Bits(0b00000000000000000000000100000000), expected: u32(8) },
+ { input: u32Bits(0b00000000000000000000001000000000), expected: u32(9) },
+ { input: u32Bits(0b00000000000000000000010000000000), expected: u32(10) },
+ { input: u32Bits(0b00000000000000000000100000000000), expected: u32(11) },
+ { input: u32Bits(0b00000000000000000001000000000000), expected: u32(12) },
+ { input: u32Bits(0b00000000000000000010000000000000), expected: u32(13) },
+ { input: u32Bits(0b00000000000000000100000000000000), expected: u32(14) },
+ { input: u32Bits(0b00000000000000001000000000000000), expected: u32(15) },
+ { input: u32Bits(0b00000000000000010000000000000000), expected: u32(16) },
+ { input: u32Bits(0b00000000000000100000000000000000), expected: u32(17) },
+ { input: u32Bits(0b00000000000001000000000000000000), expected: u32(18) },
+ { input: u32Bits(0b00000000000010000000000000000000), expected: u32(19) },
+ { input: u32Bits(0b00000000000100000000000000000000), expected: u32(20) },
+ { input: u32Bits(0b00000000001000000000000000000000), expected: u32(21) },
+ { input: u32Bits(0b00000000010000000000000000000000), expected: u32(22) },
+ { input: u32Bits(0b00000000100000000000000000000000), expected: u32(23) },
+ { input: u32Bits(0b00000001000000000000000000000000), expected: u32(24) },
+ { input: u32Bits(0b00000010000000000000000000000000), expected: u32(25) },
+ { input: u32Bits(0b00000100000000000000000000000000), expected: u32(26) },
+ { input: u32Bits(0b00001000000000000000000000000000), expected: u32(27) },
+ { input: u32Bits(0b00010000000000000000000000000000), expected: u32(28) },
+ { input: u32Bits(0b00100000000000000000000000000000), expected: u32(29) },
+ { input: u32Bits(0b01000000000000000000000000000000), expected: u32(30) },
+ { input: u32Bits(0b10000000000000000000000000000000), expected: u32(31) },
+
+ // 1's after leading 1
+ { input: u32Bits(0b00000000000000000000000000000011), expected: u32(1) },
+ { input: u32Bits(0b00000000000000000000000000000111), expected: u32(2) },
+ { input: u32Bits(0b00000000000000000000000000001111), expected: u32(3) },
+ { input: u32Bits(0b00000000000000000000000000011111), expected: u32(4) },
+ { input: u32Bits(0b00000000000000000000000000111111), expected: u32(5) },
+ { input: u32Bits(0b00000000000000000000000001111111), expected: u32(6) },
+ { input: u32Bits(0b00000000000000000000000011111111), expected: u32(7) },
+ { input: u32Bits(0b00000000000000000000000111111111), expected: u32(8) },
+ { input: u32Bits(0b00000000000000000000001111111111), expected: u32(9) },
+ { input: u32Bits(0b00000000000000000000011111111111), expected: u32(10) },
+ { input: u32Bits(0b00000000000000000000111111111111), expected: u32(11) },
+ { input: u32Bits(0b00000000000000000001111111111111), expected: u32(12) },
+ { input: u32Bits(0b00000000000000000011111111111111), expected: u32(13) },
+ { input: u32Bits(0b00000000000000000111111111111111), expected: u32(14) },
+ { input: u32Bits(0b00000000000000001111111111111111), expected: u32(15) },
+ { input: u32Bits(0b00000000000000011111111111111111), expected: u32(16) },
+ { input: u32Bits(0b00000000000000111111111111111111), expected: u32(17) },
+ { input: u32Bits(0b00000000000001111111111111111111), expected: u32(18) },
+ { input: u32Bits(0b00000000000011111111111111111111), expected: u32(19) },
+ { input: u32Bits(0b00000000000111111111111111111111), expected: u32(20) },
+ { input: u32Bits(0b00000000001111111111111111111111), expected: u32(21) },
+ { input: u32Bits(0b00000000011111111111111111111111), expected: u32(22) },
+ { input: u32Bits(0b00000000111111111111111111111111), expected: u32(23) },
+ { input: u32Bits(0b00000001111111111111111111111111), expected: u32(24) },
+ { input: u32Bits(0b00000011111111111111111111111111), expected: u32(25) },
+ { input: u32Bits(0b00000111111111111111111111111111), expected: u32(26) },
+ { input: u32Bits(0b00001111111111111111111111111111), expected: u32(27) },
+ { input: u32Bits(0b00011111111111111111111111111111), expected: u32(28) },
+ { input: u32Bits(0b00111111111111111111111111111111), expected: u32(29) },
+ { input: u32Bits(0b01111111111111111111111111111111), expected: u32(30) },
+ { input: u32Bits(0b11111111111111111111111111111111), expected: u32(31) },
+
+ // random after leading 1
+ { input: u32Bits(0b00000000000000000000000000000110), expected: u32(2) },
+ { input: u32Bits(0b00000000000000000000000000001101), expected: u32(3) },
+ { input: u32Bits(0b00000000000000000000000000011101), expected: u32(4) },
+ { input: u32Bits(0b00000000000000000000000000111001), expected: u32(5) },
+ { input: u32Bits(0b00000000000000000000000001101111), expected: u32(6) },
+ { input: u32Bits(0b00000000000000000000000011111111), expected: u32(7) },
+ { input: u32Bits(0b00000000000000000000000111101111), expected: u32(8) },
+ { input: u32Bits(0b00000000000000000000001111111111), expected: u32(9) },
+ { input: u32Bits(0b00000000000000000000011111110001), expected: u32(10) },
+ { input: u32Bits(0b00000000000000000000111011011101), expected: u32(11) },
+ { input: u32Bits(0b00000000000000000001101101111111), expected: u32(12) },
+ { input: u32Bits(0b00000000000000000011111111011111), expected: u32(13) },
+ { input: u32Bits(0b00000000000000000101111001110101), expected: u32(14) },
+ { input: u32Bits(0b00000000000000001101111011110111), expected: u32(15) },
+ { input: u32Bits(0b00000000000000011111111111110011), expected: u32(16) },
+ { input: u32Bits(0b00000000000000111111111110111111), expected: u32(17) },
+ { input: u32Bits(0b00000000000001111111011111111111), expected: u32(18) },
+ { input: u32Bits(0b00000000000011111111111111111111), expected: u32(19) },
+ { input: u32Bits(0b00000000000111110101011110111111), expected: u32(20) },
+ { input: u32Bits(0b00000000001111101111111111110111), expected: u32(21) },
+ { input: u32Bits(0b00000000011111111111010000101111), expected: u32(22) },
+ { input: u32Bits(0b00000000111111111111001111111011), expected: u32(23) },
+ { input: u32Bits(0b00000001111111011111101111111111), expected: u32(24) },
+ { input: u32Bits(0b00000011101011111011110111111011), expected: u32(25) },
+ { input: u32Bits(0b00000111111110111111111111111111), expected: u32(26) },
+ { input: u32Bits(0b00001111000000011011011110111111), expected: u32(27) },
+ { input: u32Bits(0b00011110101111011111111111111111), expected: u32(28) },
+ { input: u32Bits(0b00110110111111100111111110111101), expected: u32(29) },
+ { input: u32Bits(0b01010111111101111111011111011111), expected: u32(30) },
+ { input: u32Bits(0b11100010011110101101101110101111), expected: u32(31) },
+ ]);
+ });
+
+g.test('i32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`i32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cfg: Config = t.params;
+ await run(t, builtin('firstLeadingBit'), [TypeI32], TypeI32, cfg, [
+ // Zero
+ { input: i32Bits(0b00000000000000000000000000000000), expected: i32(-1) },
+
+ // Negative One
+ { input: i32Bits(0b11111111111111111111111111111111), expected: i32(-1) },
+
+ // One
+ { input: i32Bits(0b00000000000000000000000000000001), expected: i32(0) },
+
+ // Positive: 0's after leading 1
+ { input: i32Bits(0b00000000000000000000000000000010), expected: i32(1) },
+ { input: i32Bits(0b00000000000000000000000000000100), expected: i32(2) },
+ { input: i32Bits(0b00000000000000000000000000001000), expected: i32(3) },
+ { input: i32Bits(0b00000000000000000000000000010000), expected: i32(4) },
+ { input: i32Bits(0b00000000000000000000000000100000), expected: i32(5) },
+ { input: i32Bits(0b00000000000000000000000001000000), expected: i32(6) },
+ { input: i32Bits(0b00000000000000000000000010000000), expected: i32(7) },
+ { input: i32Bits(0b00000000000000000000000100000000), expected: i32(8) },
+ { input: i32Bits(0b00000000000000000000001000000000), expected: i32(9) },
+ { input: i32Bits(0b00000000000000000000010000000000), expected: i32(10) },
+ { input: i32Bits(0b00000000000000000000100000000000), expected: i32(11) },
+ { input: i32Bits(0b00000000000000000001000000000000), expected: i32(12) },
+ { input: i32Bits(0b00000000000000000010000000000000), expected: i32(13) },
+ { input: i32Bits(0b00000000000000000100000000000000), expected: i32(14) },
+ { input: i32Bits(0b00000000000000001000000000000000), expected: i32(15) },
+ { input: i32Bits(0b00000000000000010000000000000000), expected: i32(16) },
+ { input: i32Bits(0b00000000000000100000000000000000), expected: i32(17) },
+ { input: i32Bits(0b00000000000001000000000000000000), expected: i32(18) },
+ { input: i32Bits(0b00000000000010000000000000000000), expected: i32(19) },
+ { input: i32Bits(0b00000000000100000000000000000000), expected: i32(20) },
+ { input: i32Bits(0b00000000001000000000000000000000), expected: i32(21) },
+ { input: i32Bits(0b00000000010000000000000000000000), expected: i32(22) },
+ { input: i32Bits(0b00000000100000000000000000000000), expected: i32(23) },
+ { input: i32Bits(0b00000001000000000000000000000000), expected: i32(24) },
+ { input: i32Bits(0b00000010000000000000000000000000), expected: i32(25) },
+ { input: i32Bits(0b00000100000000000000000000000000), expected: i32(26) },
+ { input: i32Bits(0b00001000000000000000000000000000), expected: i32(27) },
+ { input: i32Bits(0b00010000000000000000000000000000), expected: i32(28) },
+ { input: i32Bits(0b00100000000000000000000000000000), expected: i32(29) },
+ { input: i32Bits(0b01000000000000000000000000000000), expected: i32(30) },
+
+ // Negative: 0's after leading 0
+ { input: i32Bits(0b11111111111111111111111111111110), expected: i32(0) },
+ { input: i32Bits(0b11111111111111111111111111111100), expected: i32(1) },
+ { input: i32Bits(0b11111111111111111111111111111000), expected: i32(2) },
+ { input: i32Bits(0b11111111111111111111111111110000), expected: i32(3) },
+ { input: i32Bits(0b11111111111111111111111111100000), expected: i32(4) },
+ { input: i32Bits(0b11111111111111111111111111000000), expected: i32(5) },
+ { input: i32Bits(0b11111111111111111111111110000000), expected: i32(6) },
+ { input: i32Bits(0b11111111111111111111111100000000), expected: i32(7) },
+ { input: i32Bits(0b11111111111111111111111000000000), expected: i32(8) },
+ { input: i32Bits(0b11111111111111111111110000000000), expected: i32(9) },
+ { input: i32Bits(0b11111111111111111111100000000000), expected: i32(10) },
+ { input: i32Bits(0b11111111111111111111000000000000), expected: i32(11) },
+ { input: i32Bits(0b11111111111111111110000000000000), expected: i32(12) },
+ { input: i32Bits(0b11111111111111111100000000000000), expected: i32(13) },
+ { input: i32Bits(0b11111111111111111000000000000000), expected: i32(14) },
+ { input: i32Bits(0b11111111111111110000000000000000), expected: i32(15) },
+ { input: i32Bits(0b11111111111111100000000000000000), expected: i32(16) },
+ { input: i32Bits(0b11111111111111000000000000000000), expected: i32(17) },
+ { input: i32Bits(0b11111111111110000000000000000000), expected: i32(18) },
+ { input: i32Bits(0b11111111111100000000000000000000), expected: i32(19) },
+ { input: i32Bits(0b11111111111000000000000000000000), expected: i32(20) },
+ { input: i32Bits(0b11111111110000000000000000000000), expected: i32(21) },
+ { input: i32Bits(0b11111111100000000000000000000000), expected: i32(22) },
+ { input: i32Bits(0b11111111000000000000000000000000), expected: i32(23) },
+ { input: i32Bits(0b11111110000000000000000000000000), expected: i32(24) },
+ { input: i32Bits(0b11111100000000000000000000000000), expected: i32(25) },
+ { input: i32Bits(0b11111000000000000000000000000000), expected: i32(26) },
+ { input: i32Bits(0b11110000000000000000000000000000), expected: i32(27) },
+ { input: i32Bits(0b11100000000000000000000000000000), expected: i32(28) },
+ { input: i32Bits(0b11000000000000000000000000000000), expected: i32(29) },
+ { input: i32Bits(0b10000000000000000000000000000000), expected: i32(30) },
+
+ // Positive: 1's after leading 1
+ { input: i32Bits(0b00000000000000000000000000000011), expected: i32(1) },
+ { input: i32Bits(0b00000000000000000000000000000111), expected: i32(2) },
+ { input: i32Bits(0b00000000000000000000000000001111), expected: i32(3) },
+ { input: i32Bits(0b00000000000000000000000000011111), expected: i32(4) },
+ { input: i32Bits(0b00000000000000000000000000111111), expected: i32(5) },
+ { input: i32Bits(0b00000000000000000000000001111111), expected: i32(6) },
+ { input: i32Bits(0b00000000000000000000000011111111), expected: i32(7) },
+ { input: i32Bits(0b00000000000000000000000111111111), expected: i32(8) },
+ { input: i32Bits(0b00000000000000000000001111111111), expected: i32(9) },
+ { input: i32Bits(0b00000000000000000000011111111111), expected: i32(10) },
+ { input: i32Bits(0b00000000000000000000111111111111), expected: i32(11) },
+ { input: i32Bits(0b00000000000000000001111111111111), expected: i32(12) },
+ { input: i32Bits(0b00000000000000000011111111111111), expected: i32(13) },
+ { input: i32Bits(0b00000000000000000111111111111111), expected: i32(14) },
+ { input: i32Bits(0b00000000000000001111111111111111), expected: i32(15) },
+ { input: i32Bits(0b00000000000000011111111111111111), expected: i32(16) },
+ { input: i32Bits(0b00000000000000111111111111111111), expected: i32(17) },
+ { input: i32Bits(0b00000000000001111111111111111111), expected: i32(18) },
+ { input: i32Bits(0b00000000000011111111111111111111), expected: i32(19) },
+ { input: i32Bits(0b00000000000111111111111111111111), expected: i32(20) },
+ { input: i32Bits(0b00000000001111111111111111111111), expected: i32(21) },
+ { input: i32Bits(0b00000000011111111111111111111111), expected: i32(22) },
+ { input: i32Bits(0b00000000111111111111111111111111), expected: i32(23) },
+ { input: i32Bits(0b00000001111111111111111111111111), expected: i32(24) },
+ { input: i32Bits(0b00000011111111111111111111111111), expected: i32(25) },
+ { input: i32Bits(0b00000111111111111111111111111111), expected: i32(26) },
+ { input: i32Bits(0b00001111111111111111111111111111), expected: i32(27) },
+ { input: i32Bits(0b00011111111111111111111111111111), expected: i32(28) },
+ { input: i32Bits(0b00111111111111111111111111111111), expected: i32(29) },
+ { input: i32Bits(0b01111111111111111111111111111111), expected: i32(30) },
+
+ // Negative: 1's after leading 0
+ { input: i32Bits(0b11111111111111111111111111111101), expected: i32(1) },
+ { input: i32Bits(0b11111111111111111111111111111011), expected: i32(2) },
+ { input: i32Bits(0b11111111111111111111111111110111), expected: i32(3) },
+ { input: i32Bits(0b11111111111111111111111111101111), expected: i32(4) },
+ { input: i32Bits(0b11111111111111111111111111011111), expected: i32(5) },
+ { input: i32Bits(0b11111111111111111111111110111111), expected: i32(6) },
+ { input: i32Bits(0b11111111111111111111111101111111), expected: i32(7) },
+ { input: i32Bits(0b11111111111111111111111011111111), expected: i32(8) },
+ { input: i32Bits(0b11111111111111111111110111111111), expected: i32(9) },
+ { input: i32Bits(0b11111111111111111111101111111111), expected: i32(10) },
+ { input: i32Bits(0b11111111111111111111011111111111), expected: i32(11) },
+ { input: i32Bits(0b11111111111111111110111111111111), expected: i32(12) },
+ { input: i32Bits(0b11111111111111111101111111111111), expected: i32(13) },
+ { input: i32Bits(0b11111111111111111011111111111111), expected: i32(14) },
+ { input: i32Bits(0b11111111111111110111111111111111), expected: i32(15) },
+ { input: i32Bits(0b11111111111111101111111111111111), expected: i32(16) },
+ { input: i32Bits(0b11111111111111011111111111111111), expected: i32(17) },
+ { input: i32Bits(0b11111111111110111111111111111111), expected: i32(18) },
+ { input: i32Bits(0b11111111111101111111111111111111), expected: i32(19) },
+ { input: i32Bits(0b11111111111011111111111111111111), expected: i32(20) },
+ { input: i32Bits(0b11111111110111111111111111111111), expected: i32(21) },
+ { input: i32Bits(0b11111111101111111111111111111111), expected: i32(22) },
+ { input: i32Bits(0b11111111011111111111111111111111), expected: i32(23) },
+ { input: i32Bits(0b11111110111111111111111111111111), expected: i32(24) },
+ { input: i32Bits(0b11111101111111111111111111111111), expected: i32(25) },
+ { input: i32Bits(0b11111011111111111111111111111111), expected: i32(26) },
+ { input: i32Bits(0b11110111111111111111111111111111), expected: i32(27) },
+ { input: i32Bits(0b11101111111111111111111111111111), expected: i32(28) },
+ { input: i32Bits(0b11011111111111111111111111111111), expected: i32(29) },
+ { input: i32Bits(0b10111111111111111111111111111111), expected: i32(30) },
+
+ // Positive: random after leading 1
+ { input: i32Bits(0b00000000000000000000000000000110), expected: i32(2) },
+ { input: i32Bits(0b00000000000000000000000000001101), expected: i32(3) },
+ { input: i32Bits(0b00000000000000000000000000011101), expected: i32(4) },
+ { input: i32Bits(0b00000000000000000000000000111001), expected: i32(5) },
+ { input: i32Bits(0b00000000000000000000000001101111), expected: i32(6) },
+ { input: i32Bits(0b00000000000000000000000011111111), expected: i32(7) },
+ { input: i32Bits(0b00000000000000000000000111101111), expected: i32(8) },
+ { input: i32Bits(0b00000000000000000000001111111111), expected: i32(9) },
+ { input: i32Bits(0b00000000000000000000011111110001), expected: i32(10) },
+ { input: i32Bits(0b00000000000000000000111011011101), expected: i32(11) },
+ { input: i32Bits(0b00000000000000000001101101111111), expected: i32(12) },
+ { input: i32Bits(0b00000000000000000011111111011111), expected: i32(13) },
+ { input: i32Bits(0b00000000000000000101111001110101), expected: i32(14) },
+ { input: i32Bits(0b00000000000000001101111011110111), expected: i32(15) },
+ { input: i32Bits(0b00000000000000011111111111110011), expected: i32(16) },
+ { input: i32Bits(0b00000000000000111111111110111111), expected: i32(17) },
+ { input: i32Bits(0b00000000000001111111011111111111), expected: i32(18) },
+ { input: i32Bits(0b00000000000011111111111111111111), expected: i32(19) },
+ { input: i32Bits(0b00000000000111110101011110111111), expected: i32(20) },
+ { input: i32Bits(0b00000000001111101111111111110111), expected: i32(21) },
+ { input: i32Bits(0b00000000011111111111010000101111), expected: i32(22) },
+ { input: i32Bits(0b00000000111111111111001111111011), expected: i32(23) },
+ { input: i32Bits(0b00000001111111011111101111111111), expected: i32(24) },
+ { input: i32Bits(0b00000011101011111011110111111011), expected: i32(25) },
+ { input: i32Bits(0b00000111111110111111111111111111), expected: i32(26) },
+ { input: i32Bits(0b00001111000000011011011110111111), expected: i32(27) },
+ { input: i32Bits(0b00011110101111011111111111111111), expected: i32(28) },
+ { input: i32Bits(0b00110110111111100111111110111101), expected: i32(29) },
+ { input: i32Bits(0b01010111111101111111011111011111), expected: i32(30) },
+
+ // Negative: random after leading 0
+ { input: i32Bits(0b11111111111111111111111111111010), expected: i32(2) },
+ { input: i32Bits(0b11111111111111111111111111110110), expected: i32(3) },
+ { input: i32Bits(0b11111111111111111111111111101101), expected: i32(4) },
+ { input: i32Bits(0b11111111111111111111111111011101), expected: i32(5) },
+ { input: i32Bits(0b11111111111111111111111110111001), expected: i32(6) },
+ { input: i32Bits(0b11111111111111111111111101101111), expected: i32(7) },
+ { input: i32Bits(0b11111111111111111111111011111111), expected: i32(8) },
+ { input: i32Bits(0b11111111111111111111110111101111), expected: i32(9) },
+ { input: i32Bits(0b11111111111111111111101111111111), expected: i32(10) },
+ { input: i32Bits(0b11111111111111111111011111110001), expected: i32(11) },
+ { input: i32Bits(0b11111111111111111110111011011101), expected: i32(12) },
+ { input: i32Bits(0b11111111111111111101101101111111), expected: i32(13) },
+ { input: i32Bits(0b11111111111111111011111111011111), expected: i32(14) },
+ { input: i32Bits(0b11111111111111110101111001110101), expected: i32(15) },
+ { input: i32Bits(0b11111111111111101101111011110111), expected: i32(16) },
+ { input: i32Bits(0b11111111111111011111111111110011), expected: i32(17) },
+ { input: i32Bits(0b11111111111110111111111110111111), expected: i32(18) },
+ { input: i32Bits(0b11111111111101111111011111111111), expected: i32(19) },
+ { input: i32Bits(0b11111111111011111111111111111111), expected: i32(20) },
+ { input: i32Bits(0b11111111110111110101011110111111), expected: i32(21) },
+ { input: i32Bits(0b11111111101111101111111111110111), expected: i32(22) },
+ { input: i32Bits(0b11111111011111111111010000101111), expected: i32(23) },
+ { input: i32Bits(0b11111110111111111111001111111011), expected: i32(24) },
+ { input: i32Bits(0b11111101111111011111101111111111), expected: i32(25) },
+ { input: i32Bits(0b11111011101011111011110111111011), expected: i32(26) },
+ { input: i32Bits(0b11110111111110111111111111111111), expected: i32(27) },
+ { input: i32Bits(0b11101111000000011011011110111111), expected: i32(28) },
+ { input: i32Bits(0b11011110101111011111111111111111), expected: i32(29) },
+ { input: i32Bits(0b10110110111111100111111110111101), expected: i32(30) },
+ ]);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/firstTrailingBit.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/firstTrailingBit.spec.ts
new file mode 100644
index 0000000000..5c65f59d28
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/firstTrailingBit.spec.ts
@@ -0,0 +1,250 @@
+export const description = `
+Execution tests for the 'firstTrailingBit' builtin function
+
+S is i32, u32
+T is S or vecN<S>
+@const fn firstTrailingBit(e: T ) -> T
+For scalar T, the result is: T(-1) if e is zero.
+Otherwise the position of the least significant 1 bit in e.
+Component-wise when T is a vector.
+
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { i32, i32Bits, TypeI32, u32, TypeU32, u32Bits } from '../../../../../util/conversion.js';
+import { allInputSources, Config, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('u32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`u32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cfg: Config = t.params;
+ await run(t, builtin('firstTrailingBit'), [TypeU32], TypeU32, cfg, [
+ // Zero
+ { input: u32Bits(0b00000000000000000000000000000000), expected: u32(-1) },
+
+ // High bit
+ { input: u32Bits(0b10000000000000000000000000000000), expected: u32(31) },
+
+ // 0's before trailing 1
+ { input: u32Bits(0b00000000000000000000000000000001), expected: u32(0) },
+ { input: u32Bits(0b00000000000000000000000000000010), expected: u32(1) },
+ { input: u32Bits(0b00000000000000000000000000000100), expected: u32(2) },
+ { input: u32Bits(0b00000000000000000000000000001000), expected: u32(3) },
+ { input: u32Bits(0b00000000000000000000000000010000), expected: u32(4) },
+ { input: u32Bits(0b00000000000000000000000000100000), expected: u32(5) },
+ { input: u32Bits(0b00000000000000000000000001000000), expected: u32(6) },
+ { input: u32Bits(0b00000000000000000000000010000000), expected: u32(7) },
+ { input: u32Bits(0b00000000000000000000000100000000), expected: u32(8) },
+ { input: u32Bits(0b00000000000000000000001000000000), expected: u32(9) },
+ { input: u32Bits(0b00000000000000000000010000000000), expected: u32(10) },
+ { input: u32Bits(0b00000000000000000000100000000000), expected: u32(11) },
+ { input: u32Bits(0b00000000000000000001000000000000), expected: u32(12) },
+ { input: u32Bits(0b00000000000000000010000000000000), expected: u32(13) },
+ { input: u32Bits(0b00000000000000000100000000000000), expected: u32(14) },
+ { input: u32Bits(0b00000000000000001000000000000000), expected: u32(15) },
+ { input: u32Bits(0b00000000000000010000000000000000), expected: u32(16) },
+ { input: u32Bits(0b00000000000000100000000000000000), expected: u32(17) },
+ { input: u32Bits(0b00000000000001000000000000000000), expected: u32(18) },
+ { input: u32Bits(0b00000000000010000000000000000000), expected: u32(19) },
+ { input: u32Bits(0b00000000000100000000000000000000), expected: u32(20) },
+ { input: u32Bits(0b00000000001000000000000000000000), expected: u32(21) },
+ { input: u32Bits(0b00000000010000000000000000000000), expected: u32(22) },
+ { input: u32Bits(0b00000000100000000000000000000000), expected: u32(23) },
+ { input: u32Bits(0b00000001000000000000000000000000), expected: u32(24) },
+ { input: u32Bits(0b00000010000000000000000000000000), expected: u32(25) },
+ { input: u32Bits(0b00000100000000000000000000000000), expected: u32(26) },
+ { input: u32Bits(0b00001000000000000000000000000000), expected: u32(27) },
+ { input: u32Bits(0b00010000000000000000000000000000), expected: u32(28) },
+ { input: u32Bits(0b00100000000000000000000000000000), expected: u32(29) },
+ { input: u32Bits(0b01000000000000000000000000000000), expected: u32(30) },
+
+ // 1's before trailing 1
+ { input: u32Bits(0b11111111111111111111111111111111), expected: u32(0) },
+ { input: u32Bits(0b11111111111111111111111111111110), expected: u32(1) },
+ { input: u32Bits(0b11111111111111111111111111111100), expected: u32(2) },
+ { input: u32Bits(0b11111111111111111111111111111000), expected: u32(3) },
+ { input: u32Bits(0b11111111111111111111111111110000), expected: u32(4) },
+ { input: u32Bits(0b11111111111111111111111111100000), expected: u32(5) },
+ { input: u32Bits(0b11111111111111111111111111000000), expected: u32(6) },
+ { input: u32Bits(0b11111111111111111111111110000000), expected: u32(7) },
+ { input: u32Bits(0b11111111111111111111111100000000), expected: u32(8) },
+ { input: u32Bits(0b11111111111111111111111000000000), expected: u32(9) },
+ { input: u32Bits(0b11111111111111111111110000000000), expected: u32(10) },
+ { input: u32Bits(0b11111111111111111111100000000000), expected: u32(11) },
+ { input: u32Bits(0b11111111111111111111000000000000), expected: u32(12) },
+ { input: u32Bits(0b11111111111111111110000000000000), expected: u32(13) },
+ { input: u32Bits(0b11111111111111111100000000000000), expected: u32(14) },
+ { input: u32Bits(0b11111111111111111000000000000000), expected: u32(15) },
+ { input: u32Bits(0b11111111111111110000000000000000), expected: u32(16) },
+ { input: u32Bits(0b11111111111111100000000000000000), expected: u32(17) },
+ { input: u32Bits(0b11111111111111000000000000000000), expected: u32(18) },
+ { input: u32Bits(0b11111111111110000000000000000000), expected: u32(19) },
+ { input: u32Bits(0b11111111111100000000000000000000), expected: u32(20) },
+ { input: u32Bits(0b11111111111000000000000000000000), expected: u32(21) },
+ { input: u32Bits(0b11111111110000000000000000000000), expected: u32(22) },
+ { input: u32Bits(0b11111111100000000000000000000000), expected: u32(23) },
+ { input: u32Bits(0b11111111000000000000000000000000), expected: u32(24) },
+ { input: u32Bits(0b11111110000000000000000000000000), expected: u32(25) },
+ { input: u32Bits(0b11111100000000000000000000000000), expected: u32(26) },
+ { input: u32Bits(0b11111000000000000000000000000000), expected: u32(27) },
+ { input: u32Bits(0b11110000000000000000000000000000), expected: u32(28) },
+ { input: u32Bits(0b11100000000000000000000000000000), expected: u32(29) },
+ { input: u32Bits(0b11000000000000000000000000000000), expected: u32(30) },
+
+ // random before trailing 1
+ { input: u32Bits(0b11110000001111111101111010001111), expected: u32(0) },
+ { input: u32Bits(0b11011110111111100101110011110010), expected: u32(1) },
+ { input: u32Bits(0b11110111011011111111010000111100), expected: u32(2) },
+ { input: u32Bits(0b11010011011101111111010011101000), expected: u32(3) },
+ { input: u32Bits(0b11010111110111110001111110110000), expected: u32(4) },
+ { input: u32Bits(0b11111101111101111110101111100000), expected: u32(5) },
+ { input: u32Bits(0b11111001111011111001111011000000), expected: u32(6) },
+ { input: u32Bits(0b11001110110111110111111010000000), expected: u32(7) },
+ { input: u32Bits(0b11101111011111101110101100000000), expected: u32(8) },
+ { input: u32Bits(0b11111101111011111111111000000000), expected: u32(9) },
+ { input: u32Bits(0b10011111011101110110110000000000), expected: u32(10) },
+ { input: u32Bits(0b11111111101101111011100000000000), expected: u32(11) },
+ { input: u32Bits(0b11111011010110111011000000000000), expected: u32(12) },
+ { input: u32Bits(0b00111101010000111010000000000000), expected: u32(13) },
+ { input: u32Bits(0b11111011110001101100000000000000), expected: u32(14) },
+ { input: u32Bits(0b10111111010111111000000000000000), expected: u32(15) },
+ { input: u32Bits(0b11011101111010110000000000000000), expected: u32(16) },
+ { input: u32Bits(0b01110100110110100000000000000000), expected: u32(17) },
+ { input: u32Bits(0b11100111001011000000000000000000), expected: u32(18) },
+ { input: u32Bits(0b11111001110110000000000000000000), expected: u32(19) },
+ { input: u32Bits(0b00110100100100000000000000000000), expected: u32(20) },
+ { input: u32Bits(0b11111010011000000000000000000000), expected: u32(21) },
+ { input: u32Bits(0b00000010110000000000000000000000), expected: u32(22) },
+ { input: u32Bits(0b11100111100000000000000000000000), expected: u32(23) },
+ { input: u32Bits(0b00101101000000000000000000000000), expected: u32(24) },
+ { input: u32Bits(0b11011010000000000000000000000000), expected: u32(25) },
+ { input: u32Bits(0b11010100000000000000000000000000), expected: u32(26) },
+ { input: u32Bits(0b10111000000000000000000000000000), expected: u32(27) },
+ { input: u32Bits(0b01110000000000000000000000000000), expected: u32(28) },
+ { input: u32Bits(0b10100000000000000000000000000000), expected: u32(29) },
+ ]);
+ });
+
+g.test('i32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`i32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cfg: Config = t.params;
+ await run(t, builtin('firstTrailingBit'), [TypeI32], TypeI32, cfg, [
+ // Zero
+ { input: i32Bits(0b00000000000000000000000000000000), expected: i32(-1) },
+
+ // High bit
+ { input: i32Bits(0b10000000000000000000000000000000), expected: i32(31) },
+
+ // 0's before trailing 1
+ { input: i32Bits(0b00000000000000000000000000000001), expected: i32(0) },
+ { input: i32Bits(0b00000000000000000000000000000010), expected: i32(1) },
+ { input: i32Bits(0b00000000000000000000000000000100), expected: i32(2) },
+ { input: i32Bits(0b00000000000000000000000000001000), expected: i32(3) },
+ { input: i32Bits(0b00000000000000000000000000010000), expected: i32(4) },
+ { input: i32Bits(0b00000000000000000000000000100000), expected: i32(5) },
+ { input: i32Bits(0b00000000000000000000000001000000), expected: i32(6) },
+ { input: i32Bits(0b00000000000000000000000010000000), expected: i32(7) },
+ { input: i32Bits(0b00000000000000000000000100000000), expected: i32(8) },
+ { input: i32Bits(0b00000000000000000000001000000000), expected: i32(9) },
+ { input: i32Bits(0b00000000000000000000010000000000), expected: i32(10) },
+ { input: i32Bits(0b00000000000000000000100000000000), expected: i32(11) },
+ { input: i32Bits(0b00000000000000000001000000000000), expected: i32(12) },
+ { input: i32Bits(0b00000000000000000010000000000000), expected: i32(13) },
+ { input: i32Bits(0b00000000000000000100000000000000), expected: i32(14) },
+ { input: i32Bits(0b00000000000000001000000000000000), expected: i32(15) },
+ { input: i32Bits(0b00000000000000010000000000000000), expected: i32(16) },
+ { input: i32Bits(0b00000000000000100000000000000000), expected: i32(17) },
+ { input: i32Bits(0b00000000000001000000000000000000), expected: i32(18) },
+ { input: i32Bits(0b00000000000010000000000000000000), expected: i32(19) },
+ { input: i32Bits(0b00000000000100000000000000000000), expected: i32(20) },
+ { input: i32Bits(0b00000000001000000000000000000000), expected: i32(21) },
+ { input: i32Bits(0b00000000010000000000000000000000), expected: i32(22) },
+ { input: i32Bits(0b00000000100000000000000000000000), expected: i32(23) },
+ { input: i32Bits(0b00000001000000000000000000000000), expected: i32(24) },
+ { input: i32Bits(0b00000010000000000000000000000000), expected: i32(25) },
+ { input: i32Bits(0b00000100000000000000000000000000), expected: i32(26) },
+ { input: i32Bits(0b00001000000000000000000000000000), expected: i32(27) },
+ { input: i32Bits(0b00010000000000000000000000000000), expected: i32(28) },
+ { input: i32Bits(0b00100000000000000000000000000000), expected: i32(29) },
+ { input: i32Bits(0b01000000000000000000000000000000), expected: i32(30) },
+
+ // 1's before trailing 1
+ { input: i32Bits(0b11111111111111111111111111111111), expected: i32(0) },
+ { input: i32Bits(0b11111111111111111111111111111110), expected: i32(1) },
+ { input: i32Bits(0b11111111111111111111111111111100), expected: i32(2) },
+ { input: i32Bits(0b11111111111111111111111111111000), expected: i32(3) },
+ { input: i32Bits(0b11111111111111111111111111110000), expected: i32(4) },
+ { input: i32Bits(0b11111111111111111111111111100000), expected: i32(5) },
+ { input: i32Bits(0b11111111111111111111111111000000), expected: i32(6) },
+ { input: i32Bits(0b11111111111111111111111110000000), expected: i32(7) },
+ { input: i32Bits(0b11111111111111111111111100000000), expected: i32(8) },
+ { input: i32Bits(0b11111111111111111111111000000000), expected: i32(9) },
+ { input: i32Bits(0b11111111111111111111110000000000), expected: i32(10) },
+ { input: i32Bits(0b11111111111111111111100000000000), expected: i32(11) },
+ { input: i32Bits(0b11111111111111111111000000000000), expected: i32(12) },
+ { input: i32Bits(0b11111111111111111110000000000000), expected: i32(13) },
+ { input: i32Bits(0b11111111111111111100000000000000), expected: i32(14) },
+ { input: i32Bits(0b11111111111111111000000000000000), expected: i32(15) },
+ { input: i32Bits(0b11111111111111110000000000000000), expected: i32(16) },
+ { input: i32Bits(0b11111111111111100000000000000000), expected: i32(17) },
+ { input: i32Bits(0b11111111111111000000000000000000), expected: i32(18) },
+ { input: i32Bits(0b11111111111110000000000000000000), expected: i32(19) },
+ { input: i32Bits(0b11111111111100000000000000000000), expected: i32(20) },
+ { input: i32Bits(0b11111111111000000000000000000000), expected: i32(21) },
+ { input: i32Bits(0b11111111110000000000000000000000), expected: i32(22) },
+ { input: i32Bits(0b11111111100000000000000000000000), expected: i32(23) },
+ { input: i32Bits(0b11111111000000000000000000000000), expected: i32(24) },
+ { input: i32Bits(0b11111110000000000000000000000000), expected: i32(25) },
+ { input: i32Bits(0b11111100000000000000000000000000), expected: i32(26) },
+ { input: i32Bits(0b11111000000000000000000000000000), expected: i32(27) },
+ { input: i32Bits(0b11110000000000000000000000000000), expected: i32(28) },
+ { input: i32Bits(0b11100000000000000000000000000000), expected: i32(29) },
+ { input: i32Bits(0b11000000000000000000000000000000), expected: i32(30) },
+
+ // random before trailing 1
+ { input: i32Bits(0b11110000001111111101111010001111), expected: i32(0) },
+ { input: i32Bits(0b11011110111111100101110011110010), expected: i32(1) },
+ { input: i32Bits(0b11110111011011111111010000111100), expected: i32(2) },
+ { input: i32Bits(0b11010011011101111111010011101000), expected: i32(3) },
+ { input: i32Bits(0b11010111110111110001111110110000), expected: i32(4) },
+ { input: i32Bits(0b11111101111101111110101111100000), expected: i32(5) },
+ { input: i32Bits(0b11111001111011111001111011000000), expected: i32(6) },
+ { input: i32Bits(0b11001110110111110111111010000000), expected: i32(7) },
+ { input: i32Bits(0b11101111011111101110101100000000), expected: i32(8) },
+ { input: i32Bits(0b11111101111011111111111000000000), expected: i32(9) },
+ { input: i32Bits(0b10011111011101110110110000000000), expected: i32(10) },
+ { input: i32Bits(0b11111111101101111011100000000000), expected: i32(11) },
+ { input: i32Bits(0b11111011010110111011000000000000), expected: i32(12) },
+ { input: i32Bits(0b00111101010000111010000000000000), expected: i32(13) },
+ { input: i32Bits(0b11111011110001101100000000000000), expected: i32(14) },
+ { input: i32Bits(0b10111111010111111000000000000000), expected: i32(15) },
+ { input: i32Bits(0b11011101111010110000000000000000), expected: i32(16) },
+ { input: i32Bits(0b01110100110110100000000000000000), expected: i32(17) },
+ { input: i32Bits(0b11100111001011000000000000000000), expected: i32(18) },
+ { input: i32Bits(0b11111001110110000000000000000000), expected: i32(19) },
+ { input: i32Bits(0b00110100100100000000000000000000), expected: i32(20) },
+ { input: i32Bits(0b11111010011000000000000000000000), expected: i32(21) },
+ { input: i32Bits(0b00000010110000000000000000000000), expected: i32(22) },
+ { input: i32Bits(0b11100111100000000000000000000000), expected: i32(23) },
+ { input: i32Bits(0b00101101000000000000000000000000), expected: i32(24) },
+ { input: i32Bits(0b11011010000000000000000000000000), expected: i32(25) },
+ { input: i32Bits(0b11010100000000000000000000000000), expected: i32(26) },
+ { input: i32Bits(0b10111000000000000000000000000000), expected: i32(27) },
+ { input: i32Bits(0b01110000000000000000000000000000), expected: i32(28) },
+ { input: i32Bits(0b10100000000000000000000000000000), expected: i32(29) },
+ ]);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/floor.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/floor.spec.ts
new file mode 100644
index 0000000000..873a6772c3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/floor.spec.ts
@@ -0,0 +1,96 @@
+export const description = `
+Execution tests for the 'floor' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn floor(e: T ) -> T
+Returns the floor of e. Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16, TypeAbstractFloat } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF32Range, fullF16Range, fullF64Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, onlyConstInputSource, run } from '../../expression.js';
+
+import { abstractBuiltin, builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+const kSmallMagnitudeTestValues = [0.1, 0.9, 1.0, 1.1, 1.9, -0.1, -0.9, -1.0, -1.1, -1.9];
+
+export const d = makeCaseCache('floor', {
+ f32: () => {
+ return FP.f32.generateScalarToIntervalCases(
+ [
+ ...kSmallMagnitudeTestValues,
+ ...fullF32Range(),
+ 0x8000_0000, // https://github.com/gpuweb/cts/issues/2766
+ ],
+ 'unfiltered',
+ FP.f32.floorInterval
+ );
+ },
+ f16: () => {
+ return FP.f16.generateScalarToIntervalCases(
+ [
+ ...kSmallMagnitudeTestValues,
+ ...fullF16Range(),
+ 0x8000, // https://github.com/gpuweb/cts/issues/2766
+ ],
+ 'unfiltered',
+ FP.f16.floorInterval
+ );
+ },
+ abstract: () => {
+ return FP.abstract.generateScalarToIntervalCases(
+ [
+ ...kSmallMagnitudeTestValues,
+ ...fullF64Range(),
+ 0x8000_0000_0000_0000, // https://github.com/gpuweb/cts/issues/2766
+ ],
+ 'unfiltered',
+ FP.abstract.floorInterval
+ );
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u
+ .combine('inputSource', onlyConstInputSource)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('abstract');
+ await run(t, abstractBuiltin('floor'), [TypeAbstractFloat], TypeAbstractFloat, t.params, cases);
+ });
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, builtin('floor'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, builtin('floor'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fma.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fma.spec.ts
new file mode 100644
index 0000000000..701f9d7ca9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fma.spec.ts
@@ -0,0 +1,113 @@
+export const description = `
+Execution tests for the 'fma' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn fma(e1: T ,e2: T ,e3: T ) -> T
+Returns e1 * e2 + e3. Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16, TypeAbstractFloat } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { sparseF32Range, sparseF16Range, sparseF64Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, onlyConstInputSource, run } from '../../expression.js';
+
+import { abstractBuiltin, builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('fma', {
+ f32_const: () => {
+ return FP.f32.generateScalarTripleToIntervalCases(
+ sparseF32Range(),
+ sparseF32Range(),
+ sparseF32Range(),
+ 'finite',
+ FP.f32.fmaInterval
+ );
+ },
+ f32_non_const: () => {
+ return FP.f32.generateScalarTripleToIntervalCases(
+ sparseF32Range(),
+ sparseF32Range(),
+ sparseF32Range(),
+ 'unfiltered',
+ FP.f32.fmaInterval
+ );
+ },
+ f16_const: () => {
+ return FP.f16.generateScalarTripleToIntervalCases(
+ sparseF16Range(),
+ sparseF16Range(),
+ sparseF16Range(),
+ 'finite',
+ FP.f16.fmaInterval
+ );
+ },
+ f16_non_const: () => {
+ return FP.f16.generateScalarTripleToIntervalCases(
+ sparseF16Range(),
+ sparseF16Range(),
+ sparseF16Range(),
+ 'unfiltered',
+ FP.f16.fmaInterval
+ );
+ },
+ abstract: () => {
+ return FP.abstract.generateScalarTripleToIntervalCases(
+ sparseF64Range(),
+ sparseF64Range(),
+ sparseF64Range(),
+ 'finite',
+ FP.abstract.fmaInterval
+ );
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u
+ .combine('inputSource', onlyConstInputSource)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('abstract');
+ await run(
+ t,
+ abstractBuiltin('fma'),
+ [TypeAbstractFloat, TypeAbstractFloat, TypeAbstractFloat],
+ TypeAbstractFloat,
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('fma'), [TypeF32, TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('fma'), [TypeF16, TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fract.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fract.spec.ts
new file mode 100644
index 0000000000..44ea31fde2
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fract.spec.ts
@@ -0,0 +1,103 @@
+export const description = `
+Execution tests for the 'fract' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn fract(e: T ) -> T
+Returns the fractional part of e, computed as e - floor(e).
+Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF32Range, fullF16Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('fract', {
+ f32: () => {
+ return FP.f32.generateScalarToIntervalCases(
+ [
+ 0.5, // 0.5 -> 0.5
+ 0.9, // ~0.9 -> ~0.9
+ 1, // 1 -> 0
+ 2, // 2 -> 0
+ 1.11, // ~1.11 -> ~0.11
+ 10.0001, // ~10.0001 -> ~0.0001
+ -0.1, // ~-0.1 -> ~0.9
+ -0.5, // -0.5 -> 0.5
+ -0.9, // ~-0.9 -> ~0.1
+ -1, // -1 -> 0
+ -2, // -2 -> 0
+ -1.11, // ~-1.11 -> ~0.89
+ -10.0001, // -10.0001 -> ~0.9999
+ 0x80000000, // https://github.com/gpuweb/cts/issues/2766
+ ...fullF32Range(),
+ ],
+ 'unfiltered',
+ FP.f32.fractInterval
+ );
+ },
+ f16: () => {
+ return FP.f16.generateScalarToIntervalCases(
+ [
+ 0.5, // 0.5 -> 0.5
+ 0.9, // ~0.9 -> ~0.9
+ 1, // 1 -> 0
+ 2, // 2 -> 0
+ 1.11, // ~1.11 -> ~0.11
+ 10.0078125, // 10.0078125 -> 0.0078125
+ -0.1, // ~-0.1 -> ~0.9
+ -0.5, // -0.5 -> 0.5
+ -0.9, // ~-0.9 -> ~0.1
+ -1, // -1 -> 0
+ -2, // -2 -> 0
+ -1.11, // ~-1.11 -> ~0.89
+ -10.0078125, // -10.0078125 -> 0.9921875
+ 658.5, // 658.5 -> 0.5
+ ...fullF16Range(),
+ ],
+ 'unfiltered',
+ FP.f16.fractInterval
+ );
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, builtin('fract'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, builtin('fract'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/frexp.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/frexp.spec.ts
new file mode 100644
index 0000000000..ffe672b08c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/frexp.spec.ts
@@ -0,0 +1,475 @@
+export const description = `
+Execution tests for the 'frexp' builtin function
+
+S is f32 or f16
+T is S or vecN<S>
+
+@const fn frexp(e: T) -> result_struct
+
+Splits e into a significand and exponent of the form significand * 2^exponent.
+Returns the result_struct for the appropriate overload.
+
+
+The magnitude of the significand is in the range of [0.5, 1.0) or 0.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { skipUndefined } from '../../../../../util/compare.js';
+import {
+ i32,
+ Scalar,
+ toVector,
+ TypeF32,
+ TypeF16,
+ TypeI32,
+ TypeVec,
+ Vector,
+} from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import {
+ frexp,
+ fullF16Range,
+ fullF32Range,
+ vectorF16Range,
+ vectorF32Range,
+} from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import {
+ allInputSources,
+ basicExpressionBuilder,
+ Case,
+ run,
+ ShaderBuilder,
+} from '../../expression.js';
+
+export const g = makeTestGroup(GPUTest);
+
+/* @returns an ShaderBuilder that evaluates frexp and returns .fract from the result structure */
+function fractBuilder(): ShaderBuilder {
+ return basicExpressionBuilder(value => `frexp(${value}).fract`);
+}
+
+/* @returns an ShaderBuilder that evaluates frexp and returns .exp from the result structure */
+function expBuilder(): ShaderBuilder {
+ return basicExpressionBuilder(value => `frexp(${value}).exp`);
+}
+
+/* @returns a fract Case for a given scalar or vector input */
+function makeVectorCaseFract(v: number | readonly number[], trait: 'f32' | 'f16'): Case {
+ const fp = FP[trait];
+ let toInput: (n: readonly number[]) => Scalar | Vector;
+ let toOutput: (n: readonly number[]) => Scalar | Vector;
+ if (v instanceof Array) {
+ // Input is vector
+ toInput = (n: readonly number[]) => toVector(n, fp.scalarBuilder);
+ toOutput = (n: readonly number[]) => toVector(n, fp.scalarBuilder);
+ } else {
+ // Input is scalar, also wrap it in an array.
+ v = [v];
+ toInput = (n: readonly number[]) => fp.scalarBuilder(n[0]);
+ toOutput = (n: readonly number[]) => fp.scalarBuilder(n[0]);
+ }
+
+ v = v.map(fp.quantize);
+ if (v.some(e => e !== 0 && fp.isSubnormal(e))) {
+ return { input: toInput(v), expected: skipUndefined(undefined) };
+ }
+
+ const fs = v.map(e => {
+ return frexp(e, trait).fract;
+ });
+
+ return { input: toInput(v), expected: toOutput(fs) };
+}
+
+/* @returns an exp Case for a given scalar or vector input */
+function makeVectorCaseExp(v: number | readonly number[], trait: 'f32' | 'f16'): Case {
+ const fp = FP[trait];
+ let toInput: (n: readonly number[]) => Scalar | Vector;
+ let toOutput: (n: readonly number[]) => Scalar | Vector;
+ if (v instanceof Array) {
+ // Input is vector
+ toInput = (n: readonly number[]) => toVector(n, fp.scalarBuilder);
+ toOutput = (n: readonly number[]) => toVector(n, i32);
+ } else {
+ // Input is scalar, also wrap it in an array.
+ v = [v];
+ toInput = (n: readonly number[]) => fp.scalarBuilder(n[0]);
+ toOutput = (n: readonly number[]) => i32(n[0]);
+ }
+
+ v = v.map(fp.quantize);
+ if (v.some(e => e !== 0 && fp.isSubnormal(e))) {
+ return { input: toInput(v), expected: skipUndefined(undefined) };
+ }
+
+ const fs = v.map(e => {
+ return frexp(e, trait).exp;
+ });
+
+ return { input: toInput(v), expected: toOutput(fs) };
+}
+
+export const d = makeCaseCache('frexp', {
+ f32_fract: () => {
+ return fullF32Range().map(v => makeVectorCaseFract(v, 'f32'));
+ },
+ f32_exp: () => {
+ return fullF32Range().map(v => makeVectorCaseExp(v, 'f32'));
+ },
+ f32_vec2_fract: () => {
+ return vectorF32Range(2).map(v => makeVectorCaseFract(v, 'f32'));
+ },
+ f32_vec2_exp: () => {
+ return vectorF32Range(2).map(v => makeVectorCaseExp(v, 'f32'));
+ },
+ f32_vec3_fract: () => {
+ return vectorF32Range(3).map(v => makeVectorCaseFract(v, 'f32'));
+ },
+ f32_vec3_exp: () => {
+ return vectorF32Range(3).map(v => makeVectorCaseExp(v, 'f32'));
+ },
+ f32_vec4_fract: () => {
+ return vectorF32Range(4).map(v => makeVectorCaseFract(v, 'f32'));
+ },
+ f32_vec4_exp: () => {
+ return vectorF32Range(4).map(v => makeVectorCaseExp(v, 'f32'));
+ },
+ f16_fract: () => {
+ return fullF16Range().map(v => makeVectorCaseFract(v, 'f16'));
+ },
+ f16_exp: () => {
+ return fullF16Range().map(v => makeVectorCaseExp(v, 'f16'));
+ },
+ f16_vec2_fract: () => {
+ return vectorF16Range(2).map(v => makeVectorCaseFract(v, 'f16'));
+ },
+ f16_vec2_exp: () => {
+ return vectorF16Range(2).map(v => makeVectorCaseExp(v, 'f16'));
+ },
+ f16_vec3_fract: () => {
+ return vectorF16Range(3).map(v => makeVectorCaseFract(v, 'f16'));
+ },
+ f16_vec3_exp: () => {
+ return vectorF16Range(3).map(v => makeVectorCaseExp(v, 'f16'));
+ },
+ f16_vec4_fract: () => {
+ return vectorF16Range(4).map(v => makeVectorCaseFract(v, 'f16'));
+ },
+ f16_vec4_exp: () => {
+ return vectorF16Range(4).map(v => makeVectorCaseExp(v, 'f16'));
+ },
+});
+
+g.test('f32_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is f32
+
+struct __frexp_result_f32 {
+ fract : f32, // fract part
+ exp : i32 // exponent part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get('f32_fract');
+ await run(t, fractBuilder(), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f32_exp')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is f32
+
+struct __frexp_result_f32 {
+ fract : f32, // fract part
+ exp : i32 // exponent part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get('f32_exp');
+ await run(t, expBuilder(), [TypeF32], TypeI32, t.params, cases);
+ });
+
+g.test('f32_vec2_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec2<f32>
+
+struct __frexp_result_vec2_f32 {
+ fract : vec2<f32>, // fract part
+ exp : vec2<i32> // exponent part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get('f32_vec2_fract');
+ await run(t, fractBuilder(), [TypeVec(2, TypeF32)], TypeVec(2, TypeF32), t.params, cases);
+ });
+
+g.test('f32_vec2_exp')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec2<f32>
+
+struct __frexp_result_vec2_f32 {
+ fract : vec2<f32>, // fractional part
+ exp : vec2<i32> // exponent part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get('f32_vec2_exp');
+ await run(t, expBuilder(), [TypeVec(2, TypeF32)], TypeVec(2, TypeI32), t.params, cases);
+ });
+
+g.test('f32_vec3_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec3<f32>
+
+struct __frexp_result_vec3_f32 {
+ fract : vec3<f32>, // fractional part
+ exp : vec3<i32> // exponent part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get('f32_vec3_fract');
+ await run(t, fractBuilder(), [TypeVec(3, TypeF32)], TypeVec(3, TypeF32), t.params, cases);
+ });
+
+g.test('f32_vec3_exp')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec3<f32>
+
+struct __frexp_result_vec3_f32 {
+ fract : vec3<f32>, // fractional part
+ exp : vec3<i32> // exponent part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get('f32_vec3_exp');
+ await run(t, expBuilder(), [TypeVec(3, TypeF32)], TypeVec(3, TypeI32), t.params, cases);
+ });
+
+g.test('f32_vec4_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec4<f32>
+
+struct __frexp_result_vec4_f32 {
+ fract : vec4<f32>, // fractional part
+ exp : vec4<i32> // exponent part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get('f32_vec4_fract');
+ await run(t, fractBuilder(), [TypeVec(4, TypeF32)], TypeVec(4, TypeF32), t.params, cases);
+ });
+
+g.test('f32_vec4_exp')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec4<f32>
+
+struct __frexp_result_vec4_f32 {
+ fract : vec4<f32>, // fractional part
+ exp : vec4<i32> // exponent part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get('f32_vec4_exp');
+ await run(t, expBuilder(), [TypeVec(4, TypeF32)], TypeVec(4, TypeI32), t.params, cases);
+ });
+
+g.test('f16_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is f16
+
+struct __frexp_result_f16 {
+ fract : f16, // fract part
+ exp : i32 // exponent part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16_fract');
+ await run(t, fractBuilder(), [TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('f16_exp')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is f16
+
+struct __frexp_result_f16 {
+ fract : f16, // fract part
+ exp : i32 // exponent part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16_exp');
+ await run(t, expBuilder(), [TypeF16], TypeI32, t.params, cases);
+ });
+
+g.test('f16_vec2_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec2<f16>
+
+struct __frexp_result_vec2_f16 {
+ fract : vec2<f16>, // fract part
+ exp : vec2<i32> // exponent part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16_vec2_fract');
+ await run(t, fractBuilder(), [TypeVec(2, TypeF16)], TypeVec(2, TypeF16), t.params, cases);
+ });
+
+g.test('f16_vec2_exp')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec2<f16>
+
+struct __frexp_result_vec2_f16 {
+ fract : vec2<f16>, // fractional part
+ exp : vec2<i32> // exponent part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16_vec2_exp');
+ await run(t, expBuilder(), [TypeVec(2, TypeF16)], TypeVec(2, TypeI32), t.params, cases);
+ });
+
+g.test('f16_vec3_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec3<f16>
+
+struct __frexp_result_vec3_f16 {
+ fract : vec3<f16>, // fractional part
+ exp : vec3<i32> // exponent part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16_vec3_fract');
+ await run(t, fractBuilder(), [TypeVec(3, TypeF16)], TypeVec(3, TypeF16), t.params, cases);
+ });
+
+g.test('f16_vec3_exp')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec3<f16>
+
+struct __frexp_result_vec3_f16 {
+ fract : vec3<f16>, // fractional part
+ exp : vec3<i32> // exponent part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16_vec3_exp');
+ await run(t, expBuilder(), [TypeVec(3, TypeF16)], TypeVec(3, TypeI32), t.params, cases);
+ });
+
+g.test('f16_vec4_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec4<f16>
+
+struct __frexp_result_vec4_f16 {
+ fract : vec4<f16>, // fractional part
+ exp : vec4<i32> // exponent part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16_vec4_fract');
+ await run(t, fractBuilder(), [TypeVec(4, TypeF16)], TypeVec(4, TypeF16), t.params, cases);
+ });
+
+g.test('f16_vec4_exp')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec4<f16>
+
+struct __frexp_result_vec4_f16 {
+ fract : vec4<f16>, // fractional part
+ exp : vec4<i32> // exponent part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16_vec4_exp');
+ await run(t, expBuilder(), [TypeVec(4, TypeF16)], TypeVec(4, TypeI32), t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fwidth.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fwidth.spec.ts
new file mode 100644
index 0000000000..7c6f0232a9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fwidth.spec.ts
@@ -0,0 +1,21 @@
+export const description = `
+Execution tests for the 'fwidth' builtin function
+
+T is f32 or vecN<f32>
+fn fwidth(e:T) ->T
+Returns abs(dpdx(e)) + abs(dpdy(e)).
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { allInputSources } from '../../expression.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#derivative-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fwidthCoarse.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fwidthCoarse.spec.ts
new file mode 100644
index 0000000000..9f93237934
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fwidthCoarse.spec.ts
@@ -0,0 +1,21 @@
+export const description = `
+Execution tests for the 'fwidthCoarse' builtin function
+
+T is f32 or vecN<f32>
+fn fwidthCoarse(e:T) ->T
+Returns abs(dpdxCoarse(e)) + abs(dpdyCoarse(e)).
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { allInputSources } from '../../expression.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#derivative-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fwidthFine.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fwidthFine.spec.ts
new file mode 100644
index 0000000000..b08c293228
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/fwidthFine.spec.ts
@@ -0,0 +1,21 @@
+export const description = `
+Execution tests for the 'fwidthFine' builtin function
+
+T is f32 or vecN<f32>
+fn fwidthFine(e:T) ->T
+Returns abs(dpdxFine(e)) + abs(dpdyFine(e)).
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { allInputSources } from '../../expression.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#derivative-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/insertBits.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/insertBits.spec.ts
new file mode 100644
index 0000000000..1068e76252
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/insertBits.spec.ts
@@ -0,0 +1,386 @@
+export const description = `
+Execution tests for the 'insertBits' builtin function
+
+S is i32 or u32
+T is S or vecN<S>
+@const fn insertBits(e: T, newbits:T, offset: u32, count: u32) -> T Sets bits in an integer.
+
+When T is a scalar type, then:
+ w is the bit width of T
+ o = min(offset,w)
+ c = min(count, w - o)
+
+The result is e if c is 0.
+Otherwise, bits o..o+c-1 of the result are copied from bits 0..c-1 of newbits.
+Other bits of the result are copied from e.
+Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import {
+ i32Bits,
+ TypeI32,
+ u32,
+ TypeU32,
+ u32Bits,
+ vec2,
+ vec3,
+ vec4,
+ TypeVec,
+} from '../../../../../util/conversion.js';
+import { allInputSources, Config, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('integer')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`integer tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('signed', [false, true])
+ .combine('width', [1, 2, 3, 4])
+ )
+ .fn(async t => {
+ const cfg: Config = t.params;
+ const scalarType = t.params.signed ? TypeI32 : TypeU32;
+ const T = t.params.width === 1 ? scalarType : TypeVec(t.params.width, scalarType);
+
+ const V = (x: number, y?: number, z?: number, w?: number) => {
+ y = y === undefined ? x : y;
+ z = z === undefined ? x : z;
+ w = w === undefined ? x : w;
+
+ if (t.params.signed) {
+ switch (t.params.width) {
+ case 1:
+ return i32Bits(x);
+ case 2:
+ return vec2(i32Bits(x), i32Bits(y));
+ case 3:
+ return vec3(i32Bits(x), i32Bits(y), i32Bits(z));
+ default:
+ return vec4(i32Bits(x), i32Bits(y), i32Bits(z), i32Bits(w));
+ }
+ } else {
+ switch (t.params.width) {
+ case 1:
+ return u32Bits(x);
+ case 2:
+ return vec2(u32Bits(x), u32Bits(y));
+ case 3:
+ return vec3(u32Bits(x), u32Bits(y), u32Bits(z));
+ default:
+ return vec4(u32Bits(x), u32Bits(y), u32Bits(z), u32Bits(w));
+ }
+ }
+ };
+
+ const all_1 = V(0b11111111111111111111111111111111);
+ const all_0 = V(0b00000000000000000000000000000000);
+ const low_1 = V(0b00000000000000000000000000000001);
+ const low_0 = V(0b11111111111111111111111111111110);
+ const high_1 = V(0b10000000000000000000000000000000);
+ const high_0 = V(0b01111111111111111111111111111111);
+ const pattern = V(
+ 0b10001001010100100010010100100010,
+ 0b11001110001100111000110011100011,
+ 0b10101010101010101010101010101010,
+ 0b01010101010101010101010101010101
+ );
+
+ const cases = [
+ { input: [all_0, all_0, u32(0), u32(32)], expected: all_0 },
+ { input: [all_0, all_0, u32(1), u32(10)], expected: all_0 },
+ { input: [all_0, all_0, u32(2), u32(5)], expected: all_0 },
+ { input: [all_0, all_0, u32(0), u32(1)], expected: all_0 },
+ { input: [all_0, all_0, u32(31), u32(1)], expected: all_0 },
+
+ { input: [all_0, all_1, u32(0), u32(32)], expected: all_1 },
+ { input: [all_1, all_0, u32(0), u32(32)], expected: all_0 },
+ { input: [all_0, all_1, u32(0), u32(1)], expected: low_1 },
+ { input: [all_1, all_0, u32(0), u32(1)], expected: low_0 },
+ { input: [all_0, all_1, u32(31), u32(1)], expected: high_1 },
+ { input: [all_1, all_0, u32(31), u32(1)], expected: high_0 },
+ { input: [all_0, all_1, u32(1), u32(10)], expected: V(0b00000000000000000000011111111110) },
+ { input: [all_1, all_0, u32(1), u32(10)], expected: V(0b11111111111111111111100000000001) },
+ { input: [all_0, all_1, u32(2), u32(5)], expected: V(0b00000000000000000000000001111100) },
+ { input: [all_1, all_0, u32(2), u32(5)], expected: V(0b11111111111111111111111110000011) },
+
+ // Patterns
+ { input: [all_0, pattern, u32(0), u32(32)], expected: pattern },
+ { input: [all_1, pattern, u32(0), u32(32)], expected: pattern },
+ {
+ input: [all_0, pattern, u32(1), u32(31)],
+ expected: V(
+ 0b00010010101001000100101001000100,
+ 0b10011100011001110001100111000110,
+ 0b01010101010101010101010101010100,
+ 0b10101010101010101010101010101010
+ ),
+ },
+ {
+ input: [all_1, pattern, u32(1), u32(31)],
+ expected: V(
+ 0b00010010101001000100101001000101,
+ 0b10011100011001110001100111000111,
+ 0b01010101010101010101010101010101,
+ 0b10101010101010101010101010101011
+ ),
+ },
+ {
+ input: [all_0, pattern, u32(14), u32(18)],
+ expected: V(
+ 0b10001001010010001000000000000000,
+ 0b11100011001110001100000000000000,
+ 0b10101010101010101000000000000000,
+ 0b01010101010101010100000000000000
+ ),
+ },
+ {
+ input: [all_1, pattern, u32(14), u32(18)],
+ expected: V(
+ 0b10001001010010001011111111111111,
+ 0b11100011001110001111111111111111,
+ 0b10101010101010101011111111111111,
+ 0b01010101010101010111111111111111
+ ),
+ },
+ {
+ input: [all_0, pattern, u32(14), u32(7)],
+ expected: V(
+ 0b00000000000010001000000000000000,
+ 0b00000000000110001100000000000000,
+ 0b00000000000010101000000000000000,
+ 0b00000000000101010100000000000000
+ ),
+ },
+ {
+ input: [all_1, pattern, u32(14), u32(7)],
+ expected: V(
+ 0b11111111111010001011111111111111,
+ 0b11111111111110001111111111111111,
+ 0b11111111111010101011111111111111,
+ 0b11111111111101010111111111111111
+ ),
+ },
+ {
+ input: [all_0, pattern, u32(14), u32(4)],
+ expected: V(
+ 0b00000000000000001000000000000000,
+ 0b00000000000000001100000000000000,
+ 0b00000000000000101000000000000000,
+ 0b00000000000000010100000000000000
+ ),
+ },
+ {
+ input: [all_1, pattern, u32(14), u32(4)],
+ expected: V(
+ 0b11111111111111001011111111111111,
+ 0b11111111111111001111111111111111,
+ 0b11111111111111101011111111111111,
+ 0b11111111111111010111111111111111
+ ),
+ },
+ {
+ input: [all_0, pattern, u32(14), u32(3)],
+ expected: V(
+ 0b00000000000000001000000000000000,
+ 0b00000000000000001100000000000000,
+ 0b00000000000000001000000000000000,
+ 0b00000000000000010100000000000000
+ ),
+ },
+ {
+ input: [all_1, pattern, u32(14), u32(3)],
+ expected: V(
+ 0b11111111111111101011111111111111,
+ 0b11111111111111101111111111111111,
+ 0b11111111111111101011111111111111,
+ 0b11111111111111110111111111111111
+ ),
+ },
+ {
+ input: [all_0, pattern, u32(18), u32(3)],
+ expected: V(
+ 0b00000000000010000000000000000000,
+ 0b00000000000011000000000000000000,
+ 0b00000000000010000000000000000000,
+ 0b00000000000101000000000000000000
+ ),
+ },
+ {
+ input: [all_1, pattern, u32(18), u32(3)],
+ expected: V(
+ 0b11111111111010111111111111111111,
+ 0b11111111111011111111111111111111,
+ 0b11111111111010111111111111111111,
+ 0b11111111111101111111111111111111
+ ),
+ },
+ {
+ input: [pattern, all_0, u32(1), u32(31)],
+ expected: V(
+ 0b00000000000000000000000000000000,
+ 0b00000000000000000000000000000001,
+ 0b00000000000000000000000000000000,
+ 0b00000000000000000000000000000001
+ ),
+ },
+ {
+ input: [pattern, all_1, u32(1), u32(31)],
+ expected: V(
+ 0b11111111111111111111111111111110,
+ 0b11111111111111111111111111111111,
+ 0b11111111111111111111111111111110,
+ 0b11111111111111111111111111111111
+ ),
+ },
+ {
+ input: [pattern, all_0, u32(14), u32(18)],
+ expected: V(
+ 0b00000000000000000010010100100010,
+ 0b00000000000000000000110011100011,
+ 0b00000000000000000010101010101010,
+ 0b00000000000000000001010101010101
+ ),
+ },
+ {
+ input: [pattern, all_1, u32(14), u32(18)],
+ expected: V(
+ 0b11111111111111111110010100100010,
+ 0b11111111111111111100110011100011,
+ 0b11111111111111111110101010101010,
+ 0b11111111111111111101010101010101
+ ),
+ },
+ {
+ input: [pattern, all_0, u32(14), u32(7)],
+ expected: V(
+ 0b10001001010000000010010100100010,
+ 0b11001110001000000000110011100011,
+ 0b10101010101000000010101010101010,
+ 0b01010101010000000001010101010101
+ ),
+ },
+ {
+ input: [pattern, all_1, u32(14), u32(7)],
+ expected: V(
+ 0b10001001010111111110010100100010,
+ 0b11001110001111111100110011100011,
+ 0b10101010101111111110101010101010,
+ 0b01010101010111111101010101010101
+ ),
+ },
+ {
+ input: [pattern, all_0, u32(14), u32(4)],
+ expected: V(
+ 0b10001001010100000010010100100010,
+ 0b11001110001100000000110011100011,
+ 0b10101010101010000010101010101010,
+ 0b01010101010101000001010101010101
+ ),
+ },
+ {
+ input: [pattern, all_1, u32(14), u32(4)],
+ expected: V(
+ 0b10001001010100111110010100100010,
+ 0b11001110001100111100110011100011,
+ 0b10101010101010111110101010101010,
+ 0b01010101010101111101010101010101
+ ),
+ },
+ {
+ input: [pattern, all_0, u32(14), u32(3)],
+ expected: V(
+ 0b10001001010100100010010100100010,
+ 0b11001110001100100000110011100011,
+ 0b10101010101010100010101010101010,
+ 0b01010101010101000001010101010101
+ ),
+ },
+ {
+ input: [pattern, all_1, u32(14), u32(3)],
+ expected: V(
+ 0b10001001010100111110010100100010,
+ 0b11001110001100111100110011100011,
+ 0b10101010101010111110101010101010,
+ 0b01010101010101011101010101010101
+ ),
+ },
+ {
+ input: [pattern, all_0, u32(18), u32(3)],
+ expected: V(
+ 0b10001001010000100010010100100010,
+ 0b11001110001000111000110011100011,
+ 0b10101010101000101010101010101010,
+ 0b01010101010000010101010101010101
+ ),
+ },
+ {
+ input: [pattern, all_1, u32(18), u32(3)],
+ expected: V(
+ 0b10001001010111100010010100100010,
+ 0b11001110001111111000110011100011,
+ 0b10101010101111101010101010101010,
+ 0b01010101010111010101010101010101
+ ),
+ },
+ {
+ input: [pattern, pattern, u32(18), u32(3)],
+ expected: V(
+ 0b10001001010010100010010100100010,
+ 0b11001110001011111000110011100011,
+ 0b10101010101010101010101010101010,
+ 0b01010101010101010101010101010101
+ ),
+ },
+ {
+ input: [pattern, pattern, u32(14), u32(7)],
+ expected: V(
+ 0b10001001010010001010010100100010,
+ 0b11001110001110001100110011100011,
+ 0b10101010101010101010101010101010,
+ 0b01010101010101010101010101010101
+ ),
+ },
+
+ // Zero count
+ { input: [pattern, all_1, u32(0), u32(0)], expected: pattern },
+ { input: [pattern, all_1, u32(1), u32(0)], expected: pattern },
+ { input: [pattern, all_1, u32(2), u32(0)], expected: pattern },
+ { input: [pattern, all_1, u32(31), u32(0)], expected: pattern },
+ { input: [pattern, all_1, u32(32), u32(0)], expected: pattern },
+ { input: [pattern, all_1, u32(0), u32(0)], expected: pattern },
+ ];
+
+ if (t.params.inputSource !== 'const') {
+ cases.push(
+ ...[
+ // Start overflow
+ { input: [all_0, pattern, u32(50), u32(3)], expected: all_0 },
+ { input: [all_1, pattern, u32(50), u32(3)], expected: all_1 },
+ { input: [pattern, pattern, u32(50), u32(3)], expected: pattern },
+
+ // End overflow
+ { input: [all_0, pattern, u32(0), u32(99)], expected: pattern },
+ { input: [all_1, pattern, u32(0), u32(99)], expected: pattern },
+ { input: [all_0, low_1, u32(31), u32(99)], expected: high_1 },
+ {
+ input: [pattern, pattern, u32(20), u32(99)],
+ expected: V(
+ 0b01010010001000100010010100100010,
+ 0b11001110001100111000110011100011,
+ 0b10101010101010101010101010101010,
+ 0b01010101010101010101010101010101
+ ),
+ },
+ ]
+ );
+ }
+
+ await run(t, builtin('insertBits'), [T, T, TypeU32, TypeU32], T, cfg, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/inversesqrt.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/inversesqrt.spec.ts
new file mode 100644
index 0000000000..3e83816387
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/inversesqrt.spec.ts
@@ -0,0 +1,81 @@
+export const description = `
+Execution tests for the 'inverseSqrt' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn inverseSqrt(e: T ) -> T
+Returns the reciprocal of sqrt(e). Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { kValue } from '../../../../../util/constants.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { biasedRange, linearRange } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('inverseSqrt', {
+ f32: () => {
+ return FP.f32.generateScalarToIntervalCases(
+ [
+ // 0 < x <= 1 linearly spread
+ ...linearRange(kValue.f32.positive.min, 1, 100),
+ // 1 <= x < 2^32, biased towards 1
+ ...biasedRange(1, 2 ** 32, 1000),
+ ],
+ 'unfiltered',
+ FP.f32.inverseSqrtInterval
+ );
+ },
+ f16: () => {
+ return FP.f16.generateScalarToIntervalCases(
+ [
+ // 0 < x <= 1 linearly spread
+ ...linearRange(kValue.f16.positive.min, 1, 100),
+ // 1 <= x < 2^15, biased towards 1
+ ...biasedRange(1, 2 ** 15, 1000),
+ ],
+ 'unfiltered',
+ FP.f16.inverseSqrtInterval
+ );
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, builtin('inverseSqrt'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, builtin('inverseSqrt'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/ldexp.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/ldexp.spec.ts
new file mode 100644
index 0000000000..3829867752
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/ldexp.spec.ts
@@ -0,0 +1,121 @@
+export const description = `
+Execution tests for the 'ldexp' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+
+K is AbstractInt, i32
+I is K or vecN<K>, where
+ I is a scalar if T is a scalar, or a vector when T is a vector
+
+@const fn ldexp(e1: T ,e2: I ) -> T
+Returns e1 * 2^e2. Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { assert } from '../../../../../../common/util/util.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { anyOf } from '../../../../../util/compare.js';
+import { i32, TypeF32, TypeF16, TypeI32 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import {
+ biasedRange,
+ quantizeToI32,
+ sparseF32Range,
+ sparseI32Range,
+ sparseF16Range,
+} from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, Case, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+const bias = {
+ f32: 127,
+ f16: 15,
+} as const;
+
+// ldexpInterval's return interval doesn't cover the flush-to-zero cases when e2 + bias <= 0, thus
+// special examination is required.
+// See the comment block on ldexpInterval for more details
+// e2 is an integer (i32) while e1 is float.
+const makeCase = (trait: 'f32' | 'f16', e1: number, e2: number): Case => {
+ const FPTrait = FP[trait];
+ e1 = FPTrait.quantize(e1);
+ // e2 should be in i32 range for the convinience.
+ assert(-2147483648 <= e2 && e2 <= 2147483647, 'e2 should be in i32 range');
+ e2 = quantizeToI32(e2);
+
+ const expected = FPTrait.ldexpInterval(e1, e2);
+
+ // Result may be zero if e2 + bias <= 0
+ if (e2 + bias[trait] <= 0) {
+ return {
+ input: [FPTrait.scalarBuilder(e1), i32(e2)],
+ expected: anyOf(expected, FPTrait.constants().zeroInterval),
+ };
+ }
+
+ return { input: [FPTrait.scalarBuilder(e1), i32(e2)], expected };
+};
+
+export const d = makeCaseCache('ldexp', {
+ f32_non_const: () => {
+ return sparseF32Range().flatMap(e1 => sparseI32Range().map(e2 => makeCase('f32', e1, e2)));
+ },
+ f32_const: () => {
+ return sparseF32Range().flatMap(e1 =>
+ biasedRange(-bias.f32 - 10, bias.f32 + 1, 10).flatMap(e2 =>
+ FP.f32.isFinite(e1 * 2 ** quantizeToI32(e2)) ? makeCase('f32', e1, e2) : []
+ )
+ );
+ },
+ f16_non_const: () => {
+ return sparseF16Range().flatMap(e1 => sparseI32Range().map(e2 => makeCase('f16', e1, e2)));
+ },
+ f16_const: () => {
+ return sparseF16Range().flatMap(e1 =>
+ biasedRange(-bias.f16 - 10, bias.f16 + 1, 10).flatMap(e2 =>
+ FP.f16.isFinite(e1 * 2 ** quantizeToI32(e2)) ? makeCase('f16', e1, e2) : []
+ )
+ );
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('ldexp'), [TypeF32, TypeI32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('ldexp'), [TypeF16, TypeI32], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/length.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/length.spec.ts
new file mode 100644
index 0000000000..85c1f85169
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/length.spec.ts
@@ -0,0 +1,178 @@
+export const description = `
+Execution tests for the 'length' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn length(e: T ) -> f32
+Returns the length of e (e.g. abs(e) if T is a scalar, or sqrt(e[0]^2 + e[1]^2 + ...) if T is a vector).
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16, TypeVec } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import {
+ fullF32Range,
+ fullF16Range,
+ vectorF32Range,
+ vectorF16Range,
+} from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: f32_vecN_[non_]const
+const f32_vec_cases = ([2, 3, 4] as const)
+ .flatMap(n =>
+ ([true, false] as const).map(nonConst => ({
+ [`f32_vec${n}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateVectorToIntervalCases(
+ vectorF32Range(n),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.lengthInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+// Cases: f16_vecN_[non_]const
+const f16_vec_cases = ([2, 3, 4] as const)
+ .flatMap(n =>
+ ([true, false] as const).map(nonConst => ({
+ [`f16_vec${n}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateVectorToIntervalCases(
+ vectorF16Range(n),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.lengthInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('length', {
+ f32: () => {
+ return FP.f32.generateScalarToIntervalCases(
+ fullF32Range(),
+ 'unfiltered',
+ FP.f32.lengthInterval
+ );
+ },
+ ...f32_vec_cases,
+ f16: () => {
+ return FP.f16.generateScalarToIntervalCases(
+ fullF16Range(),
+ 'unfiltered',
+ FP.f16.lengthInterval
+ );
+ },
+ ...f16_vec_cases,
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, builtin('length'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f32_vec2')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests using vec2s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec2_const' : 'f32_vec2_non_const'
+ );
+ await run(t, builtin('length'), [TypeVec(2, TypeF32)], TypeF32, t.params, cases);
+ });
+
+g.test('f32_vec3')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests using vec3s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec3_const' : 'f32_vec3_non_const'
+ );
+ await run(t, builtin('length'), [TypeVec(3, TypeF32)], TypeF32, t.params, cases);
+ });
+
+g.test('f32_vec4')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests using vec4s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec4_const' : 'f32_vec4_non_const'
+ );
+ await run(t, builtin('length'), [TypeVec(4, TypeF32)], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, builtin('length'), [TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('f16_vec2')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests using vec2s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec2_const' : 'f16_vec2_non_const'
+ );
+ await run(t, builtin('length'), [TypeVec(2, TypeF16)], TypeF16, t.params, cases);
+ });
+
+g.test('f16_vec3')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests using vec3s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec3_const' : 'f16_vec3_non_const'
+ );
+ await run(t, builtin('length'), [TypeVec(3, TypeF16)], TypeF16, t.params, cases);
+ });
+
+g.test('f16_vec4')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests using vec4s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec4_const' : 'f16_vec4_non_const'
+ );
+ await run(t, builtin('length'), [TypeVec(4, TypeF16)], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/log.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/log.spec.ts
new file mode 100644
index 0000000000..ac60e2b1bc
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/log.spec.ts
@@ -0,0 +1,89 @@
+export const description = `
+Execution tests for the 'log' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn log(e: T ) -> T
+Returns the natural logarithm of e. Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { kValue } from '../../../../../util/constants.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { biasedRange, fullF32Range, fullF16Range, linearRange } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// log's accuracy is defined in three regions { [0, 0.5), [0.5, 2.0], (2.0, +∞] }
+const f32_inputs = [
+ ...linearRange(kValue.f32.positive.min, 0.5, 20),
+ ...linearRange(0.5, 2.0, 20),
+ ...biasedRange(2.0, 2 ** 32, 1000),
+ ...fullF32Range(),
+];
+const f16_inputs = [
+ ...linearRange(kValue.f16.positive.min, 0.5, 20),
+ ...linearRange(0.5, 2.0, 20),
+ ...biasedRange(2.0, 2 ** 32, 1000),
+ ...fullF16Range(),
+];
+
+export const d = makeCaseCache('log', {
+ f32_const: () => {
+ return FP.f32.generateScalarToIntervalCases(f32_inputs, 'finite', FP.f32.logInterval);
+ },
+ f32_non_const: () => {
+ return FP.f32.generateScalarToIntervalCases(f32_inputs, 'unfiltered', FP.f32.logInterval);
+ },
+ f16_const: () => {
+ return FP.f16.generateScalarToIntervalCases(f16_inputs, 'finite', FP.f16.logInterval);
+ },
+ f16_non_const: () => {
+ return FP.f16.generateScalarToIntervalCases(f16_inputs, 'unfiltered', FP.f16.logInterval);
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+f32 tests
+
+TODO(#792): Decide what the ground-truth is for these tests. [1]
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('log'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('log'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/log2.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/log2.spec.ts
new file mode 100644
index 0000000000..37931579b9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/log2.spec.ts
@@ -0,0 +1,89 @@
+export const description = `
+Execution tests for the 'log2' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn log2(e: T ) -> T
+Returns the base-2 logarithm of e. Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { kValue } from '../../../../../util/constants.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { biasedRange, fullF32Range, fullF16Range, linearRange } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// log2's accuracy is defined in three regions { [0, 0.5), [0.5, 2.0], (2.0, +∞] }
+const f32_inputs = [
+ ...linearRange(kValue.f32.positive.min, 0.5, 20),
+ ...linearRange(0.5, 2.0, 20),
+ ...biasedRange(2.0, 2 ** 32, 1000),
+ ...fullF32Range(),
+];
+const f16_inputs = [
+ ...linearRange(kValue.f16.positive.min, 0.5, 20),
+ ...linearRange(0.5, 2.0, 20),
+ ...biasedRange(2.0, 2 ** 32, 1000),
+ ...fullF16Range(),
+];
+
+export const d = makeCaseCache('log2', {
+ f32_const: () => {
+ return FP.f32.generateScalarToIntervalCases(f32_inputs, 'finite', FP.f32.log2Interval);
+ },
+ f32_non_const: () => {
+ return FP.f32.generateScalarToIntervalCases(f32_inputs, 'unfiltered', FP.f32.log2Interval);
+ },
+ f16_const: () => {
+ return FP.f16.generateScalarToIntervalCases(f16_inputs, 'finite', FP.f16.log2Interval);
+ },
+ f16_non_const: () => {
+ return FP.f16.generateScalarToIntervalCases(f16_inputs, 'unfiltered', FP.f16.log2Interval);
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+f32 tests
+
+TODO(#792): Decide what the ground-truth is for these tests. [1]
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('log2'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('log2'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/max.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/max.spec.ts
new file mode 100644
index 0000000000..6654b4951c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/max.spec.ts
@@ -0,0 +1,165 @@
+export const description = `
+Execution tests for the 'max' builtin function
+
+S is AbstractInt, i32, or u32
+T is S or vecN<S>
+@const fn max(e1: T ,e2: T) -> T
+Returns e2 if e1 is less than e2, and e1 otherwise. Component-wise when T is a vector.
+
+S is AbstractFloat, f32, f16
+T is vecN<S>
+@const fn max(e1: T ,e2: T) -> T
+Returns e2 if e1 is less than e2, and e1 otherwise.
+If one operand is a NaN, the other is returned.
+If both operands are NaNs, a NaN is returned.
+Component-wise when T is a vector.
+
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import {
+ i32,
+ TypeF32,
+ TypeF16,
+ TypeI32,
+ TypeU32,
+ u32,
+ TypeAbstractFloat,
+} from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF32Range, fullF16Range, sparseF64Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, Case, onlyConstInputSource, run } from '../../expression.js';
+
+import { abstractBuiltin, builtin } from './builtin.js';
+
+/** Generate set of max test cases from list of interesting values */
+function generateTestCases(
+ values: Array<number>,
+ makeCase: (x: number, y: number) => Case
+): Array<Case> {
+ const cases = new Array<Case>();
+ values.forEach(e => {
+ values.forEach(f => {
+ cases.push(makeCase(e, f));
+ });
+ });
+ return cases;
+}
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('max', {
+ f32: () => {
+ return FP.f32.generateScalarPairToIntervalCases(
+ fullF32Range(),
+ fullF32Range(),
+ 'unfiltered',
+ FP.f32.maxInterval
+ );
+ },
+ f16: () => {
+ return FP.f16.generateScalarPairToIntervalCases(
+ fullF16Range(),
+ fullF16Range(),
+ 'unfiltered',
+ FP.f16.maxInterval
+ );
+ },
+ abstract: () => {
+ return FP.abstract.generateScalarPairToIntervalCases(
+ sparseF64Range(),
+ sparseF64Range(),
+ 'unfiltered',
+ FP.abstract.maxInterval
+ );
+ },
+});
+
+g.test('abstract_int')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`abstract int tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('u32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`u32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const makeCase = (x: number, y: number): Case => {
+ return { input: [u32(x), u32(y)], expected: u32(Math.max(x, y)) };
+ };
+
+ const test_values: Array<number> = [0, 1, 2, 0x70000000, 0x80000000, 0xffffffff];
+ const cases = generateTestCases(test_values, makeCase);
+
+ await run(t, builtin('max'), [TypeU32, TypeU32], TypeU32, t.params, cases);
+ });
+
+g.test('i32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`i32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const makeCase = (x: number, y: number): Case => {
+ return { input: [i32(x), i32(y)], expected: i32(Math.max(x, y)) };
+ };
+
+ const test_values: Array<number> = [-0x70000000, -2, -1, 0, 1, 2, 0x70000000];
+ const cases = generateTestCases(test_values, makeCase);
+
+ await run(t, builtin('max'), [TypeI32, TypeI32], TypeI32, t.params, cases);
+ });
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u
+ .combine('inputSource', onlyConstInputSource)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('abstract');
+ await run(
+ t,
+ abstractBuiltin('max'),
+ [TypeAbstractFloat, TypeAbstractFloat],
+ TypeAbstractFloat,
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, builtin('max'), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, builtin('max'), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/min.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/min.spec.ts
new file mode 100644
index 0000000000..6c05319546
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/min.spec.ts
@@ -0,0 +1,164 @@
+export const description = `
+Execution tests for the 'min' builtin function
+
+S is AbstractInt, i32, or u32
+T is S or vecN<S>
+@const fn min(e1: T ,e2: T) -> T
+Returns e1 if e1 is less than e2, and e2 otherwise. Component-wise when T is a vector.
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn min(e1: T ,e2: T) -> T
+Returns e2 if e2 is less than e1, and e1 otherwise.
+If one operand is a NaN, the other is returned.
+If both operands are NaNs, a NaN is returned.
+Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import {
+ i32,
+ TypeF32,
+ TypeF16,
+ TypeI32,
+ TypeU32,
+ u32,
+ TypeAbstractFloat,
+} from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF32Range, fullF16Range, sparseF64Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, Case, onlyConstInputSource, run } from '../../expression.js';
+
+import { abstractBuiltin, builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('min', {
+ f32: () => {
+ return FP.f32.generateScalarPairToIntervalCases(
+ fullF32Range(),
+ fullF32Range(),
+ 'unfiltered',
+ FP.f32.minInterval
+ );
+ },
+ f16: () => {
+ return FP.f16.generateScalarPairToIntervalCases(
+ fullF16Range(),
+ fullF16Range(),
+ 'unfiltered',
+ FP.f16.minInterval
+ );
+ },
+ abstract: () => {
+ return FP.abstract.generateScalarPairToIntervalCases(
+ sparseF64Range(),
+ sparseF64Range(),
+ 'unfiltered',
+ FP.abstract.minInterval
+ );
+ },
+});
+
+/** Generate set of min test cases from list of interesting values */
+function generateTestCases(
+ values: Array<number>,
+ makeCase: (x: number, y: number) => Case
+): Array<Case> {
+ const cases = new Array<Case>();
+ values.forEach(e => {
+ values.forEach(f => {
+ cases.push(makeCase(e, f));
+ });
+ });
+ return cases;
+}
+
+g.test('abstract_int')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`abstract int tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('u32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`u32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const makeCase = (x: number, y: number): Case => {
+ return { input: [u32(x), u32(y)], expected: u32(Math.min(x, y)) };
+ };
+
+ const test_values: Array<number> = [0, 1, 2, 0x70000000, 0x80000000, 0xffffffff];
+ const cases = generateTestCases(test_values, makeCase);
+
+ await run(t, builtin('min'), [TypeU32, TypeU32], TypeU32, t.params, cases);
+ });
+
+g.test('i32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`i32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const makeCase = (x: number, y: number): Case => {
+ return { input: [i32(x), i32(y)], expected: i32(Math.min(x, y)) };
+ };
+
+ const test_values: Array<number> = [-0x70000000, -2, -1, 0, 1, 2, 0x70000000];
+ const cases = generateTestCases(test_values, makeCase);
+
+ await run(t, builtin('min'), [TypeI32, TypeI32], TypeI32, t.params, cases);
+ });
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u
+ .combine('inputSource', onlyConstInputSource)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('abstract');
+ await run(
+ t,
+ abstractBuiltin('min'),
+ [TypeAbstractFloat, TypeAbstractFloat],
+ TypeAbstractFloat,
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, builtin('min'), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, builtin('min'), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/mix.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/mix.spec.ts
new file mode 100644
index 0000000000..95e9f6b310
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/mix.spec.ts
@@ -0,0 +1,275 @@
+export const description = `
+Execution tests for the 'mix' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn mix(e1: T, e2: T, e3: T) -> T
+Returns the linear blend of e1 and e2 (e.g. e1*(1-e3)+e2*e3). Component-wise when T is a vector.
+
+T is AbstractFloat, f32, or f16
+T2 is vecN<T>
+@const fn mix(e1: T2, e2: T2, e3: T) -> T2
+Returns the component-wise linear blend of e1 and e2, using scalar blending factor e3 for each component.
+Same as mix(e1,e2,T2(e3)).
+
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeVec, TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import {
+ sparseF32Range,
+ sparseF16Range,
+ sparseVectorF32Range,
+ sparseVectorF16Range,
+} from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: f32_vecN_scalar_[non_]const
+const f32_vec_scalar_cases = ([2, 3, 4] as const)
+ .flatMap(n =>
+ ([true, false] as const).map(nonConst => ({
+ [`f32_vec${n}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateVectorPairScalarToVectorComponentWiseCase(
+ sparseVectorF32Range(n),
+ sparseVectorF32Range(n),
+ sparseF32Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ ...FP.f32.mixIntervals
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+// Cases: f16_vecN_scalar_[non_]const
+const f16_vec_scalar_cases = ([2, 3, 4] as const)
+ .flatMap(n =>
+ ([true, false] as const).map(nonConst => ({
+ [`f16_vec${n}_scalar_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateVectorPairScalarToVectorComponentWiseCase(
+ sparseVectorF16Range(n),
+ sparseVectorF16Range(n),
+ sparseF16Range(),
+ nonConst ? 'unfiltered' : 'finite',
+ ...FP.f16.mixIntervals
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('mix', {
+ f32_const: () => {
+ return FP.f32.generateScalarTripleToIntervalCases(
+ sparseF32Range(),
+ sparseF32Range(),
+ sparseF32Range(),
+ 'finite',
+ ...FP.f32.mixIntervals
+ );
+ },
+ f32_non_const: () => {
+ return FP.f32.generateScalarTripleToIntervalCases(
+ sparseF32Range(),
+ sparseF32Range(),
+ sparseF32Range(),
+ 'unfiltered',
+ ...FP.f32.mixIntervals
+ );
+ },
+ ...f32_vec_scalar_cases,
+ f16_const: () => {
+ return FP.f16.generateScalarTripleToIntervalCases(
+ sparseF16Range(),
+ sparseF16Range(),
+ sparseF16Range(),
+ 'finite',
+ ...FP.f16.mixIntervals
+ );
+ },
+ f16_non_const: () => {
+ return FP.f16.generateScalarTripleToIntervalCases(
+ sparseF16Range(),
+ sparseF16Range(),
+ sparseF16Range(),
+ 'unfiltered',
+ ...FP.f16.mixIntervals
+ );
+ },
+ ...f16_vec_scalar_cases,
+});
+
+g.test('abstract_float_matching')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract_float test with matching third param`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('abstract_float_nonmatching_vec2')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract_float tests with two vec2<abstract_float> params and scalar third param`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .unimplemented();
+
+g.test('abstract_float_nonmatching_vec3')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract_float tests with two vec3<abstract_float> params and scalar third param`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .unimplemented();
+
+g.test('abstract_float_nonmatching_vec4')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract_float tests with two vec4<abstract_float> params and scalar third param`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .unimplemented();
+
+g.test('f32_matching')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 test with matching third param`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('mix'), [TypeF32, TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f32_nonmatching_vec2')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests with two vec2<f32> params and scalar third param`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec2_scalar_const' : 'f32_vec2_scalar_non_const'
+ );
+ await run(
+ t,
+ builtin('mix'),
+ [TypeVec(2, TypeF32), TypeVec(2, TypeF32), TypeF32],
+ TypeVec(2, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32_nonmatching_vec3')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests with two vec3<f32> params and scalar third param`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec3_scalar_const' : 'f32_vec3_scalar_non_const'
+ );
+ await run(
+ t,
+ builtin('mix'),
+ [TypeVec(3, TypeF32), TypeVec(3, TypeF32), TypeF32],
+ TypeVec(3, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32_nonmatching_vec4')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests with two vec4<f32> params and scalar third param`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec4_scalar_const' : 'f32_vec4_scalar_non_const'
+ );
+ await run(
+ t,
+ builtin('mix'),
+ [TypeVec(4, TypeF32), TypeVec(4, TypeF32), TypeF32],
+ TypeVec(4, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16_matching')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 test with matching third param`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('mix'), [TypeF16, TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('f16_nonmatching_vec2')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests with two vec2<f16> params and scalar third param`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec2_scalar_const' : 'f16_vec2_scalar_non_const'
+ );
+ await run(
+ t,
+ builtin('mix'),
+ [TypeVec(2, TypeF16), TypeVec(2, TypeF16), TypeF16],
+ TypeVec(2, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16_nonmatching_vec3')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests with two vec3<f16> params and scalar third param`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec3_scalar_const' : 'f16_vec3_scalar_non_const'
+ );
+ await run(
+ t,
+ builtin('mix'),
+ [TypeVec(3, TypeF16), TypeVec(3, TypeF16), TypeF16],
+ TypeVec(3, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16_nonmatching_vec4')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests with two vec4<f16> params and scalar third param`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec4_scalar_const' : 'f16_vec4_scalar_non_const'
+ );
+ await run(
+ t,
+ builtin('mix'),
+ [TypeVec(4, TypeF16), TypeVec(4, TypeF16), TypeF16],
+ TypeVec(4, TypeF16),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/modf.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/modf.spec.ts
new file mode 100644
index 0000000000..1a3d8a2850
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/modf.spec.ts
@@ -0,0 +1,661 @@
+export const description = `
+Execution tests for the 'modf' builtin function
+
+T is f32 or f16 or AbstractFloat
+@const fn modf(e:T) -> result_struct
+Splits |e| into fractional and whole number parts.
+The whole part is (|e| % 1.0), and the fractional part is |e| minus the whole part.
+Returns the result_struct for the given type.
+
+S is f32 or f16 or AbstractFloat
+T is vecN<S>
+@const fn modf(e:T) -> result_struct
+Splits the components of |e| into fractional and whole number parts.
+The |i|'th component of the whole and fractional parts equal the whole and fractional parts of modf(e[i]).
+Returns the result_struct for the given type.
+
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import {
+ toVector,
+ TypeAbstractFloat,
+ TypeF16,
+ TypeF32,
+ TypeVec,
+} from '../../../../../util/conversion.js';
+import { FP, FPKind } from '../../../../../util/floating_point.js';
+import {
+ fullF16Range,
+ fullF32Range,
+ fullF64Range,
+ vectorF16Range,
+ vectorF32Range,
+ vectorF64Range,
+} from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import {
+ abstractFloatShaderBuilder,
+ allInputSources,
+ basicExpressionBuilder,
+ Case,
+ onlyConstInputSource,
+ run,
+ ShaderBuilder,
+} from '../../expression.js';
+
+export const g = makeTestGroup(GPUTest);
+
+/** @returns an ShaderBuilder that evaluates modf and returns .whole from the result structure */
+function wholeBuilder(): ShaderBuilder {
+ return basicExpressionBuilder(value => `modf(${value}).whole`);
+}
+
+/** @returns an ShaderBuilder that evaluates modf and returns .fract from the result structure */
+function fractBuilder(): ShaderBuilder {
+ return basicExpressionBuilder(value => `modf(${value}).fract`);
+}
+
+/** @returns an ShaderBuilder that evaluates modf and returns .whole from the result structure for AbstractFloats */
+function abstractWholeBuilder(): ShaderBuilder {
+ return abstractFloatShaderBuilder(value => `modf(${value}).whole`);
+}
+
+/** @returns an ShaderBuilder that evaluates modf and returns .fract from the result structure for AbstractFloats */
+function abstractFractBuilder(): ShaderBuilder {
+ return abstractFloatShaderBuilder(value => `modf(${value}).fract`);
+}
+
+/** @returns a fract Case for a scalar vector input */
+function makeScalarCaseFract(kind: FPKind, n: number): Case {
+ const fp = FP[kind];
+ n = fp.quantize(n);
+ const result = fp.modfInterval(n).fract;
+
+ return { input: fp.scalarBuilder(n), expected: result };
+}
+
+/** @returns a whole Case for a scalar vector input */
+function makeScalarCaseWhole(kind: FPKind, n: number): Case {
+ const fp = FP[kind];
+ n = fp.quantize(n);
+ const result = fp.modfInterval(n).whole;
+
+ return { input: fp.scalarBuilder(n), expected: result };
+}
+
+/** @returns a fract Case for a given vector input */
+function makeVectorCaseFract(kind: FPKind, v: readonly number[]): Case {
+ const fp = FP[kind];
+ v = v.map(fp.quantize);
+ const fs = v.map(e => {
+ return fp.modfInterval(e).fract;
+ });
+
+ return { input: toVector(v, fp.scalarBuilder), expected: fs };
+}
+
+/** @returns a whole Case for a given vector input */
+function makeVectorCaseWhole(kind: FPKind, v: readonly number[]): Case {
+ const fp = FP[kind];
+ v = v.map(fp.quantize);
+ const ws = v.map(e => {
+ return fp.modfInterval(e).whole;
+ });
+
+ return { input: toVector(v, fp.scalarBuilder), expected: ws };
+}
+
+const scalar_range = {
+ f32: fullF32Range(),
+ f16: fullF16Range(),
+ abstract: fullF64Range(),
+};
+
+const vector_range = {
+ f32: {
+ 2: vectorF32Range(2),
+ 3: vectorF32Range(3),
+ 4: vectorF32Range(4),
+ },
+ f16: {
+ 2: vectorF16Range(2),
+ 3: vectorF16Range(3),
+ 4: vectorF16Range(4),
+ },
+ abstract: {
+ 2: vectorF64Range(2),
+ 3: vectorF64Range(3),
+ 4: vectorF64Range(4),
+ },
+};
+
+// Cases: [f32|f16|abstract]_[fract|whole]
+const scalar_cases = (['f32', 'f16', 'abstract'] as const)
+ .flatMap(kind =>
+ (['whole', 'fract'] as const).map(portion => ({
+ [`${kind}_${portion}`]: () => {
+ const makeCase = portion === 'whole' ? makeScalarCaseWhole : makeScalarCaseFract;
+ return scalar_range[kind].map(makeCase.bind(null, kind));
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+// Cases: [f32|f16|abstract]_vecN_[fract|whole]
+const vec_cases = (['f32', 'f16', 'abstract'] as const)
+ .flatMap(kind =>
+ ([2, 3, 4] as const).flatMap(n =>
+ (['whole', 'fract'] as const).map(portion => ({
+ [`${kind}_vec${n}_${portion}`]: () => {
+ const makeCase = portion === 'whole' ? makeVectorCaseWhole : makeVectorCaseFract;
+ return vector_range[kind][n].map(makeCase.bind(null, kind));
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('modf', {
+ ...scalar_cases,
+ ...vec_cases,
+});
+
+g.test('f32_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is f32
+
+struct __modf_result_f32 {
+ fract : f32, // fractional part
+ whole : f32 // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get('f32_fract');
+ await run(t, fractBuilder(), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f32_whole')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is f32
+
+struct __modf_result_f32 {
+ fract : f32, // fractional part
+ whole : f32 // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get('f32_whole');
+ await run(t, wholeBuilder(), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f32_vec2_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec2<f32>
+
+struct __modf_result_vec2_f32 {
+ fract : vec2<f32>, // fractional part
+ whole : vec2<f32> // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get('f32_vec2_fract');
+ await run(t, fractBuilder(), [TypeVec(2, TypeF32)], TypeVec(2, TypeF32), t.params, cases);
+ });
+
+g.test('f32_vec2_whole')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec2<f32>
+
+struct __modf_result_vec2_f32 {
+ fract : vec2<f32>, // fractional part
+ whole : vec2<f32> // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get('f32_vec2_whole');
+ await run(t, wholeBuilder(), [TypeVec(2, TypeF32)], TypeVec(2, TypeF32), t.params, cases);
+ });
+
+g.test('f32_vec3_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec3<f32>
+
+struct __modf_result_vec3_f32 {
+ fract : vec3<f32>, // fractional part
+ whole : vec3<f32> // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get('f32_vec3_fract');
+ await run(t, fractBuilder(), [TypeVec(3, TypeF32)], TypeVec(3, TypeF32), t.params, cases);
+ });
+
+g.test('f32_vec3_whole')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec3<f32>
+
+struct __modf_result_vec3_f32 {
+ fract : vec3<f32>, // fractional part
+ whole : vec3<f32> // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get('f32_vec3_whole');
+ await run(t, wholeBuilder(), [TypeVec(3, TypeF32)], TypeVec(3, TypeF32), t.params, cases);
+ });
+
+g.test('f32_vec4_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec4<f32>
+
+struct __modf_result_vec4_f32 {
+ fract : vec4<f32>, // fractional part
+ whole : vec4<f32> // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get('f32_vec4_fract');
+ await run(t, fractBuilder(), [TypeVec(4, TypeF32)], TypeVec(4, TypeF32), t.params, cases);
+ });
+
+g.test('f32_vec4_whole')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec4<f32>
+
+struct __modf_result_vec4_f32 {
+ fract : vec4<f32>, // fractional part
+ whole : vec4<f32> // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get('f32_vec4_whole');
+ await run(t, wholeBuilder(), [TypeVec(4, TypeF32)], TypeVec(4, TypeF32), t.params, cases);
+ });
+
+g.test('f16_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is f16
+
+struct __modf_result_f16 {
+ fract : f16, // fractional part
+ whole : f16 // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get('f16_fract');
+ await run(t, fractBuilder(), [TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('f16_whole')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is f16
+
+struct __modf_result_f16 {
+ fract : f16, // fractional part
+ whole : f16 // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get('f16_whole');
+ await run(t, wholeBuilder(), [TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('f16_vec2_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec2<f16>
+
+struct __modf_result_vec2_f16 {
+ fract : vec2<f16>, // fractional part
+ whole : vec2<f16> // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get('f16_vec2_fract');
+ await run(t, fractBuilder(), [TypeVec(2, TypeF16)], TypeVec(2, TypeF16), t.params, cases);
+ });
+
+g.test('f16_vec2_whole')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec2<f16>
+
+struct __modf_result_vec2_f16 {
+ fract : vec2<f16>, // fractional part
+ whole : vec2<f16> // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get('f16_vec2_whole');
+ await run(t, wholeBuilder(), [TypeVec(2, TypeF16)], TypeVec(2, TypeF16), t.params, cases);
+ });
+
+g.test('f16_vec3_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec3<f16>
+
+struct __modf_result_vec3_f16 {
+ fract : vec3<f16>, // fractional part
+ whole : vec3<f16> // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get('f16_vec3_fract');
+ await run(t, fractBuilder(), [TypeVec(3, TypeF16)], TypeVec(3, TypeF16), t.params, cases);
+ });
+
+g.test('f16_vec3_whole')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec3<f16>
+
+struct __modf_result_vec3_f16 {
+ fract : vec3<f16>, // fractional part
+ whole : vec3<f16> // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get('f16_vec3_whole');
+ await run(t, wholeBuilder(), [TypeVec(3, TypeF16)], TypeVec(3, TypeF16), t.params, cases);
+ });
+
+g.test('f16_vec4_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec4<f16>
+
+struct __modf_result_vec4_f16 {
+ fract : vec4<f16>, // fractional part
+ whole : vec4<f16> // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get('f16_vec4_fract');
+ await run(t, fractBuilder(), [TypeVec(4, TypeF16)], TypeVec(4, TypeF16), t.params, cases);
+ });
+
+g.test('f16_vec4_whole')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec4<f16>
+
+struct __modf_result_vec4_f16 {
+ fract : vec4<f16>, // fractional part
+ whole : vec4<f16> // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get('f16_vec4_whole');
+ await run(t, wholeBuilder(), [TypeVec(4, TypeF16)], TypeVec(4, TypeF16), t.params, cases);
+ });
+
+g.test('abstract_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is AbstractFloat
+
+struct __modf_result_abstract {
+ fract : AbstractFloat, // fractional part
+ whole : AbstractFloat // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource))
+ .fn(async t => {
+ const cases = await d.get('abstract_fract');
+ await run(t, abstractFractBuilder(), [TypeAbstractFloat], TypeAbstractFloat, t.params, cases);
+ });
+
+g.test('abstract_whole')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is AbstractFloat
+
+struct __modf_result_abstract {
+ fract : AbstractFloat, // fractional part
+ whole : AbstractFloat // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource))
+ .fn(async t => {
+ const cases = await d.get('abstract_whole');
+ await run(t, abstractWholeBuilder(), [TypeAbstractFloat], TypeAbstractFloat, t.params, cases);
+ });
+
+g.test('abstract_vec2_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec2<abstract>
+
+struct __modf_result_vec2_abstract {
+ fract : vec2<abstract>, // fractional part
+ whole : vec2<abstract> // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource))
+ .fn(async t => {
+ const cases = await d.get('abstract_vec2_fract');
+ await run(
+ t,
+ abstractFractBuilder(),
+ [TypeVec(2, TypeAbstractFloat)],
+ TypeVec(2, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
+
+g.test('abstract_vec2_whole')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec2<abstract>
+
+struct __modf_result_vec2_abstract {
+ fract : vec2<abstract>, // fractional part
+ whole : vec2<abstract> // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource))
+ .fn(async t => {
+ const cases = await d.get('abstract_vec2_whole');
+ await run(
+ t,
+ abstractWholeBuilder(),
+ [TypeVec(2, TypeAbstractFloat)],
+ TypeVec(2, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
+
+g.test('abstract_vec3_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec3<abstract>
+
+struct __modf_result_vec3_abstract {
+ fract : vec3<abstract>, // fractional part
+ whole : vec3<abstract> // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource))
+ .fn(async t => {
+ const cases = await d.get('abstract_vec3_fract');
+ await run(
+ t,
+ abstractFractBuilder(),
+ [TypeVec(3, TypeAbstractFloat)],
+ TypeVec(3, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
+
+g.test('abstract_vec3_whole')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec3<abstract>
+
+struct __modf_result_vec3_abstract {
+ fract : vec3<abstract>, // fractional part
+ whole : vec3<abstract> // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource))
+ .fn(async t => {
+ const cases = await d.get('abstract_vec3_whole');
+ await run(
+ t,
+ abstractWholeBuilder(),
+ [TypeVec(3, TypeAbstractFloat)],
+ TypeVec(3, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
+
+g.test('abstract_vec4_fract')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec4<abstract>
+
+struct __modf_result_vec4_abstract {
+ fract : vec4<abstract>, // fractional part
+ whole : vec4<abstract> // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource))
+ .fn(async t => {
+ const cases = await d.get('abstract_vec4_fract');
+ await run(
+ t,
+ abstractFractBuilder(),
+ [TypeVec(4, TypeAbstractFloat)],
+ TypeVec(4, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
+
+g.test('abstract_vec4_whole')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+T is vec4<abstract>
+
+struct __modf_result_vec4_abstract {
+ fract : vec4<abstract>, // fractional part
+ whole : vec4<abstract> // whole part
+}
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource))
+ .fn(async t => {
+ const cases = await d.get('abstract_vec4_whole');
+ await run(
+ t,
+ abstractWholeBuilder(),
+ [TypeVec(4, TypeAbstractFloat)],
+ TypeVec(4, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/normalize.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/normalize.spec.ts
new file mode 100644
index 0000000000..615617b448
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/normalize.spec.ts
@@ -0,0 +1,137 @@
+export const description = `
+Execution tests for the 'normalize' builtin function
+
+T is AbstractFloat, f32, or f16
+@const fn normalize(e: vecN<T> ) -> vecN<T>
+Returns a unit vector in the same direction as e.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16, TypeVec } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { vectorF32Range, vectorF16Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: f32_vecN_[non_]const
+const f32_vec_cases = ([2, 3, 4] as const)
+ .flatMap(n =>
+ ([true, false] as const).map(nonConst => ({
+ [`f32_vec${n}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateVectorToVectorCases(
+ vectorF32Range(n),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.normalizeInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+// Cases: f16_vecN_[non_]const
+const f16_vec_cases = ([2, 3, 4] as const)
+ .flatMap(n =>
+ ([true, false] as const).map(nonConst => ({
+ [`f16_vec${n}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateVectorToVectorCases(
+ vectorF16Range(n),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.normalizeInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('normalize', {
+ ...f32_vec_cases,
+ ...f16_vec_cases,
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32_vec2')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests using vec2s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec2_const' : 'f32_vec2_non_const'
+ );
+ await run(t, builtin('normalize'), [TypeVec(2, TypeF32)], TypeVec(2, TypeF32), t.params, cases);
+ });
+
+g.test('f32_vec3')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests using vec3s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec3_const' : 'f32_vec3_non_const'
+ );
+ await run(t, builtin('normalize'), [TypeVec(3, TypeF32)], TypeVec(3, TypeF32), t.params, cases);
+ });
+
+g.test('f32_vec4')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests using vec4s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec4_const' : 'f32_vec4_non_const'
+ );
+ await run(t, builtin('normalize'), [TypeVec(4, TypeF32)], TypeVec(4, TypeF32), t.params, cases);
+ });
+
+g.test('f16_vec2')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests using vec2s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec2_const' : 'f16_vec2_non_const'
+ );
+ await run(t, builtin('normalize'), [TypeVec(2, TypeF16)], TypeVec(2, TypeF16), t.params, cases);
+ });
+
+g.test('f16_vec3')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests using vec3s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec3_const' : 'f16_vec3_non_const'
+ );
+ await run(t, builtin('normalize'), [TypeVec(3, TypeF16)], TypeVec(3, TypeF16), t.params, cases);
+ });
+
+g.test('f16_vec4')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests using vec4s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec4_const' : 'f16_vec4_non_const'
+ );
+ await run(t, builtin('normalize'), [TypeVec(4, TypeF16)], TypeVec(4, TypeF16), t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack2x16float.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack2x16float.spec.ts
new file mode 100644
index 0000000000..790e54720c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack2x16float.spec.ts
@@ -0,0 +1,88 @@
+export const description = `
+Converts two floating point values to half-precision floating point numbers, and then combines them into one u32 value.
+Component e[i] of the input is converted to a IEEE-754 binary16 value,
+which is then placed in bits 16 × i through 16 × i + 15 of the result.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { anyOf, skipUndefined } from '../../../../../util/compare.js';
+import {
+ f32,
+ pack2x16float,
+ TypeF32,
+ TypeU32,
+ TypeVec,
+ u32,
+ vec2,
+} from '../../../../../util/conversion.js';
+import { cartesianProduct, fullF32Range, quantizeToF32 } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, Case, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// pack2x16float has somewhat unusual behaviour, specifically around how it is
+// supposed to behave when values go OOB and when they are considered to have
+// gone OOB, so has its own bespoke implementation.
+
+/**
+ * @returns a Case for `pack2x16float`
+ * @param param0 first param for the case
+ * @param param1 second param for the case
+ * @param filter_undefined should inputs that cause an undefined expectation be
+ * filtered out, needed for const-eval
+ */
+function makeCase(param0: number, param1: number, filter_undefined: boolean): Case | undefined {
+ param0 = quantizeToF32(param0);
+ param1 = quantizeToF32(param1);
+
+ const results = pack2x16float(param0, param1);
+ if (filter_undefined && results.some(r => r === undefined)) {
+ return undefined;
+ }
+
+ return {
+ input: [vec2(f32(param0), f32(param1))],
+ expected: anyOf(
+ ...results.map(r => (r === undefined ? skipUndefined(undefined) : skipUndefined(u32(r))))
+ ),
+ };
+}
+
+/**
+ * @returns an array of Cases for `pack2x16float`
+ * @param param0s array of inputs to try for the first param
+ * @param param1s array of inputs to try for the second param
+ * @param filter_undefined should inputs that cause an undefined expectation be
+ * filtered out, needed for const-eval
+ */
+function generateCases(param0s: number[], param1s: number[], filter_undefined: boolean): Case[] {
+ return cartesianProduct(param0s, param1s)
+ .map(e => makeCase(e[0], e[1], filter_undefined))
+ .filter((c): c is Case => c !== undefined);
+}
+
+export const d = makeCaseCache('pack2x16float', {
+ f32_const: () => {
+ return generateCases(fullF32Range(), fullF32Range(), true);
+ },
+ f32_non_const: () => {
+ return generateCases(fullF32Range(), fullF32Range(), false);
+ },
+});
+
+g.test('pack')
+ .specURL('https://www.w3.org/TR/WGSL/#pack-builtin-functions')
+ .desc(
+ `
+@const fn pack2x16float(e: vec2<f32>) -> u32
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('pack2x16float'), [TypeVec(2, TypeF32)], TypeU32, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack2x16snorm.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack2x16snorm.spec.ts
new file mode 100644
index 0000000000..54bb21f6c6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack2x16snorm.spec.ts
@@ -0,0 +1,55 @@
+export const description = `
+Converts two normalized floating point values to 16-bit signed integers, and then combines them into one u32 value.
+Component e[i] of the input is converted to a 16-bit twos complement integer value
+⌊ 0.5 + 32767 × min(1, max(-1, e[i])) ⌋ which is then placed in
+bits 16 × i through 16 × i + 15 of the result.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { kValue } from '../../../../../util/constants.js';
+import {
+ f32,
+ pack2x16snorm,
+ TypeF32,
+ TypeU32,
+ TypeVec,
+ u32,
+ vec2,
+} from '../../../../../util/conversion.js';
+import { quantizeToF32, vectorF32Range } from '../../../../../util/math.js';
+import { allInputSources, Case, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('pack')
+ .specURL('https://www.w3.org/TR/WGSL/#pack-builtin-functions')
+ .desc(
+ `
+@const fn pack2x16snorm(e: vec2<f32>) -> u32
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const makeCase = (x: number, y: number): Case => {
+ x = quantizeToF32(x);
+ y = quantizeToF32(y);
+ return { input: [vec2(f32(x), f32(y))], expected: u32(pack2x16snorm(x, y)) };
+ };
+
+ // Returns a value normalized to [-1, 1].
+ const normalizeF32 = (n: number): number => {
+ return n / kValue.f32.positive.max;
+ };
+
+ const cases: Array<Case> = vectorF32Range(2).flatMap(v => {
+ return [
+ makeCase(...(v as [number, number])),
+ makeCase(...(v.map(normalizeF32) as [number, number])),
+ ];
+ });
+
+ await run(t, builtin('pack2x16snorm'), [TypeVec(2, TypeF32)], TypeU32, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack2x16unorm.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack2x16unorm.spec.ts
new file mode 100644
index 0000000000..a875a9c7e1
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack2x16unorm.spec.ts
@@ -0,0 +1,55 @@
+export const description = `
+Converts two normalized floating point values to 16-bit unsigned integers, and then combines them into one u32 value.
+Component e[i] of the input is converted to a 16-bit unsigned integer value
+⌊ 0.5 + 65535 × min(1, max(0, e[i])) ⌋ which is then placed in
+bits 16 × i through 16 × i + 15 of the result.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { kValue } from '../../../../../util/constants.js';
+import {
+ f32,
+ pack2x16unorm,
+ TypeF32,
+ TypeU32,
+ TypeVec,
+ u32,
+ vec2,
+} from '../../../../../util/conversion.js';
+import { quantizeToF32, vectorF32Range } from '../../../../../util/math.js';
+import { allInputSources, Case, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('pack')
+ .specURL('https://www.w3.org/TR/WGSL/#pack-builtin-functions')
+ .desc(
+ `
+@const fn pack2x16unorm(e: vec2<f32>) -> u32
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const makeCase = (x: number, y: number): Case => {
+ x = quantizeToF32(x);
+ y = quantizeToF32(y);
+ return { input: [vec2(f32(x), f32(y))], expected: u32(pack2x16unorm(x, y)) };
+ };
+
+ // Returns a value normalized to [0, 1].
+ const normalizeF32 = (n: number): number => {
+ return n > 0 ? n / kValue.f32.positive.max : n / kValue.f32.negative.min;
+ };
+
+ const cases: Array<Case> = vectorF32Range(2).flatMap(v => {
+ return [
+ makeCase(...(v as [number, number])),
+ makeCase(...(v.map(normalizeF32) as [number, number])),
+ ];
+ });
+
+ await run(t, builtin('pack2x16unorm'), [TypeVec(2, TypeF32)], TypeU32, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack4x8snorm.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack4x8snorm.spec.ts
new file mode 100644
index 0000000000..de0463e9fc
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack4x8snorm.spec.ts
@@ -0,0 +1,60 @@
+export const description = `
+Converts four normalized floating point values to 8-bit signed integers, and then combines them into one u32 value.
+Component e[i] of the input is converted to an 8-bit twos complement integer value
+⌊ 0.5 + 127 × min(1, max(-1, e[i])) ⌋ which is then placed in
+bits 8 × i through 8 × i + 7 of the result.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { kValue } from '../../../../../util/constants.js';
+import {
+ f32,
+ pack4x8snorm,
+ Scalar,
+ TypeF32,
+ TypeU32,
+ TypeVec,
+ u32,
+ vec4,
+} from '../../../../../util/conversion.js';
+import { quantizeToF32, vectorF32Range } from '../../../../../util/math.js';
+import { allInputSources, Case, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('pack')
+ .specURL('https://www.w3.org/TR/WGSL/#pack-builtin-functions')
+ .desc(
+ `
+@const fn pack4x8snorm(e: vec4<f32>) -> u32
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const makeCase = (vals: [number, number, number, number]): Case => {
+ const vals_f32 = new Array<Scalar>(4) as [Scalar, Scalar, Scalar, Scalar];
+ for (const idx in vals) {
+ vals[idx] = quantizeToF32(vals[idx]);
+ vals_f32[idx] = f32(vals[idx]);
+ }
+
+ return { input: [vec4(...vals_f32)], expected: u32(pack4x8snorm(...vals)) };
+ };
+
+ // Returns a value normalized to [-1, 1].
+ const normalizeF32 = (n: number): number => {
+ return n / kValue.f32.positive.max;
+ };
+
+ const cases: Array<Case> = vectorF32Range(4).flatMap(v => {
+ return [
+ makeCase(v as [number, number, number, number]),
+ makeCase(v.map(normalizeF32) as [number, number, number, number]),
+ ];
+ });
+
+ await run(t, builtin('pack4x8snorm'), [TypeVec(4, TypeF32)], TypeU32, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack4x8unorm.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack4x8unorm.spec.ts
new file mode 100644
index 0000000000..b670e92fbb
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pack4x8unorm.spec.ts
@@ -0,0 +1,60 @@
+export const description = `
+Converts four normalized floating point values to 8-bit unsigned integers, and then combines them into one u32 value.
+Component e[i] of the input is converted to an 8-bit unsigned integer value
+⌊ 0.5 + 255 × min(1, max(0, e[i])) ⌋ which is then placed in
+bits 8 × i through 8 × i + 7 of the result.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { kValue } from '../../../../../util/constants.js';
+import {
+ f32,
+ pack4x8unorm,
+ Scalar,
+ TypeF32,
+ TypeU32,
+ TypeVec,
+ u32,
+ vec4,
+} from '../../../../../util/conversion.js';
+import { quantizeToF32, vectorF32Range } from '../../../../../util/math.js';
+import { allInputSources, Case, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('pack')
+ .specURL('https://www.w3.org/TR/WGSL/#pack-builtin-functions')
+ .desc(
+ `
+@const fn pack4x8unorm(e: vec4<f32>) -> u32
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const makeCase = (vals: [number, number, number, number]): Case => {
+ const vals_f32 = new Array<Scalar>(4) as [Scalar, Scalar, Scalar, Scalar];
+ for (const idx in vals) {
+ vals[idx] = quantizeToF32(vals[idx]);
+ vals_f32[idx] = f32(vals[idx]);
+ }
+
+ return { input: [vec4(...vals_f32)], expected: u32(pack4x8unorm(...vals)) };
+ };
+
+ // Returns a value normalized to [0, 1].
+ const normalizeF32 = (n: number): number => {
+ return n > 0 ? n / kValue.f32.positive.max : n / kValue.f32.negative.min;
+ };
+
+ const cases: Array<Case> = vectorF32Range(4).flatMap(v => {
+ return [
+ makeCase(v as [number, number, number, number]),
+ makeCase(v.map(normalizeF32) as [number, number, number, number]),
+ ];
+ });
+
+ await run(t, builtin('pack4x8unorm'), [TypeVec(4, TypeF32)], TypeU32, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pow.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pow.spec.ts
new file mode 100644
index 0000000000..f9b4fe1cfa
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/pow.spec.ts
@@ -0,0 +1,88 @@
+export const description = `
+Execution tests for the 'pow' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn pow(e1: T ,e2: T ) -> T
+Returns e1 raised to the power e2. Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF32Range, fullF16Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('pow', {
+ f32_const: () => {
+ return FP.f32.generateScalarPairToIntervalCases(
+ fullF32Range(),
+ fullF32Range(),
+ 'finite',
+ FP.f32.powInterval
+ );
+ },
+ f32_non_const: () => {
+ return FP.f32.generateScalarPairToIntervalCases(
+ fullF32Range(),
+ fullF32Range(),
+ 'unfiltered',
+ FP.f32.powInterval
+ );
+ },
+ f16_const: () => {
+ return FP.f16.generateScalarPairToIntervalCases(
+ fullF16Range(),
+ fullF16Range(),
+ 'finite',
+ FP.f16.powInterval
+ );
+ },
+ f16_non_const: () => {
+ return FP.f16.generateScalarPairToIntervalCases(
+ fullF16Range(),
+ fullF16Range(),
+ 'unfiltered',
+ FP.f16.powInterval
+ );
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('pow'), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('pow'), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/quantizeToF16.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/quantizeToF16.spec.ts
new file mode 100644
index 0000000000..b37d4c5afb
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/quantizeToF16.spec.ts
@@ -0,0 +1,70 @@
+export const description = `
+Execution tests for the 'quantizeToF16' builtin function
+
+T is f32 or vecN<f32>
+@const fn quantizeToF16(e: T ) -> T
+Quantizes a 32-bit floating point value e as if e were converted to a IEEE 754
+binary16 value, and then converted back to a IEEE 754 binary32 value.
+Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { kValue } from '../../../../../util/constants.js';
+import { TypeF32 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF16Range, fullF32Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('quantizeToF16', {
+ f32_const: () => {
+ return FP.f32.generateScalarToIntervalCases(
+ [
+ kValue.f16.negative.min,
+ kValue.f16.negative.max,
+ kValue.f16.negative.subnormal.min,
+ kValue.f16.negative.subnormal.max,
+ kValue.f16.positive.subnormal.min,
+ kValue.f16.positive.subnormal.max,
+ kValue.f16.positive.min,
+ kValue.f16.positive.max,
+ ...fullF16Range(),
+ ],
+ 'finite',
+ FP.f32.quantizeToF16Interval
+ );
+ },
+ f32_non_const: () => {
+ return FP.f32.generateScalarToIntervalCases(
+ [
+ kValue.f16.negative.min,
+ kValue.f16.negative.max,
+ kValue.f16.negative.subnormal.min,
+ kValue.f16.negative.subnormal.max,
+ kValue.f16.positive.subnormal.min,
+ kValue.f16.positive.subnormal.max,
+ kValue.f16.positive.min,
+ kValue.f16.positive.max,
+ ...fullF32Range(),
+ ],
+ 'unfiltered',
+ FP.f32.quantizeToF16Interval
+ );
+ },
+});
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('quantizeToF16'), [TypeF32], TypeF32, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/radians.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/radians.spec.ts
new file mode 100644
index 0000000000..63ae45b656
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/radians.spec.ts
@@ -0,0 +1,90 @@
+export const description = `
+Execution tests for the 'radians' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn radians(e1: T ) -> T
+Converts degrees to radians, approximating e1 * π / 180.
+Component-wise when T is a vector
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeAbstractFloat, TypeF16, TypeF32 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF16Range, fullF32Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, onlyConstInputSource, run } from '../../expression.js';
+
+import { abstractBuiltin, builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('radians', {
+ f32: () => {
+ return FP.f32.generateScalarToIntervalCases(
+ fullF32Range(),
+ 'unfiltered',
+ FP.f32.radiansInterval
+ );
+ },
+ f16: () => {
+ return FP.f16.generateScalarToIntervalCases(
+ fullF16Range(),
+ 'unfiltered',
+ FP.f16.radiansInterval
+ );
+ },
+ abstract: () => {
+ return FP.abstract.generateScalarToIntervalCases(
+ fullF16Range(),
+ 'unfiltered',
+ FP.abstract.radiansInterval
+ );
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u
+ .combine('inputSource', onlyConstInputSource)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('abstract');
+ await run(
+ t,
+ abstractBuiltin('radians'),
+ [TypeAbstractFloat],
+ TypeAbstractFloat,
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, builtin('radians'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, builtin('radians'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/reflect.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/reflect.spec.ts
new file mode 100644
index 0000000000..2614c4e686
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/reflect.spec.ts
@@ -0,0 +1,180 @@
+export const description = `
+Execution tests for the 'reflect' builtin function
+
+T is vecN<AbstractFloat>, vecN<f32>, or vecN<f16>
+@const fn reflect(e1: T, e2: T ) -> T
+For the incident vector e1 and surface orientation e2, returns the reflection
+direction e1-2*dot(e2,e1)*e2.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16, TypeVec } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { sparseVectorF32Range, sparseVectorF16Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: f32_vecN_[non_]const
+const f32_vec_cases = ([2, 3, 4] as const)
+ .flatMap(n =>
+ ([true, false] as const).map(nonConst => ({
+ [`f32_vec${n}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateVectorPairToVectorCases(
+ sparseVectorF32Range(n),
+ sparseVectorF32Range(n),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.reflectInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+// Cases: f16_vecN_[non_]const
+const f16_vec_cases = ([2, 3, 4] as const)
+ .flatMap(n =>
+ ([true, false] as const).map(nonConst => ({
+ [`f16_vec${n}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateVectorPairToVectorCases(
+ sparseVectorF16Range(n),
+ sparseVectorF16Range(n),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.reflectInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('reflect', {
+ ...f32_vec_cases,
+ ...f16_vec_cases,
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('vectorize', [2, 3, 4] as const))
+ .unimplemented();
+
+g.test('f32_vec2')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests using vec2s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec2_const' : 'f32_vec2_non_const'
+ );
+ await run(
+ t,
+ builtin('reflect'),
+ [TypeVec(2, TypeF32), TypeVec(2, TypeF32)],
+ TypeVec(2, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32_vec3')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests using vec3s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec3_const' : 'f32_vec3_non_const'
+ );
+ await run(
+ t,
+ builtin('reflect'),
+ [TypeVec(3, TypeF32), TypeVec(3, TypeF32)],
+ TypeVec(3, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32_vec4')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests using vec4s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec4_const' : 'f32_vec4_non_const'
+ );
+ await run(
+ t,
+ builtin('reflect'),
+ [TypeVec(4, TypeF32), TypeVec(4, TypeF32)],
+ TypeVec(4, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16_vec2')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests using vec2s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec2_const' : 'f16_vec2_non_const'
+ );
+ await run(
+ t,
+ builtin('reflect'),
+ [TypeVec(2, TypeF16), TypeVec(2, TypeF16)],
+ TypeVec(2, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16_vec3')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests using vec3s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec3_const' : 'f16_vec3_non_const'
+ );
+ await run(
+ t,
+ builtin('reflect'),
+ [TypeVec(3, TypeF16), TypeVec(3, TypeF16)],
+ TypeVec(3, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16_vec4')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests using vec4s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec4_const' : 'f16_vec4_non_const'
+ );
+ await run(
+ t,
+ builtin('reflect'),
+ [TypeVec(4, TypeF16), TypeVec(4, TypeF16)],
+ TypeVec(4, TypeF16),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/refract.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/refract.spec.ts
new file mode 100644
index 0000000000..be1a76b437
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/refract.spec.ts
@@ -0,0 +1,253 @@
+export const description = `
+Execution tests for the 'refract' builtin function
+
+T is vecN<I>
+I is AbstractFloat, f32, or f16
+@const fn refract(e1: T ,e2: T ,e3: I ) -> T
+For the incident vector e1 and surface normal e2, and the ratio of indices of
+refraction e3, let k = 1.0 -e3*e3* (1.0 - dot(e2,e1) * dot(e2,e1)).
+If k < 0.0, returns the refraction vector 0.0, otherwise return the refraction
+vector e3*e1- (e3* dot(e2,e1) + sqrt(k)) *e2.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { ROArrayArray } from '../../../../../../common/util/types.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { toVector, TypeF32, TypeF16, TypeVec } from '../../../../../util/conversion.js';
+import { FP, FPKind } from '../../../../../util/floating_point.js';
+import {
+ sparseVectorF32Range,
+ sparseVectorF16Range,
+ sparseF32Range,
+ sparseF16Range,
+} from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, Case, IntervalFilter, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Using a bespoke implementation of make*Case and generate*Cases here
+// since refract is the only builtin with the API signature
+// (vec, vec, scalar) -> vec
+
+/**
+ * @returns a Case for `refract`
+ * @param kind what type of floating point numbers to operate on
+ * @param i the `i` param for the case
+ * @param s the `s` param for the case
+ * @param r the `r` param for the case
+ * @param check what interval checking to apply
+ * */
+function makeCase(
+ kind: FPKind,
+ i: readonly number[],
+ s: readonly number[],
+ r: number,
+ check: IntervalFilter
+): Case | undefined {
+ const fp = FP[kind];
+ i = i.map(fp.quantize);
+ s = s.map(fp.quantize);
+ r = fp.quantize(r);
+
+ const vectors = fp.refractInterval(i, s, r);
+ if (check === 'finite' && vectors.some(e => !e.isFinite())) {
+ return undefined;
+ }
+
+ return {
+ input: [toVector(i, fp.scalarBuilder), toVector(s, fp.scalarBuilder), fp.scalarBuilder(r)],
+ expected: fp.refractInterval(i, s, r),
+ };
+}
+
+/**
+ * @returns an array of Cases for `refract`
+ * @param kind what type of floating point numbers to operate on
+ * @param param_is array of inputs to try for the `i` param
+ * @param param_ss array of inputs to try for the `s` param
+ * @param param_rs array of inputs to try for the `r` param
+ * @param check what interval checking to apply
+ */
+function generateCases(
+ kind: FPKind,
+ param_is: ROArrayArray<number>,
+ param_ss: ROArrayArray<number>,
+ param_rs: readonly number[],
+ check: IntervalFilter
+): Case[] {
+ // Cannot use `cartesianProduct` here due to heterogeneous param types
+ return param_is
+ .flatMap(i => {
+ return param_ss.flatMap(s => {
+ return param_rs.map(r => {
+ return makeCase(kind, i, s, r, check);
+ });
+ });
+ })
+ .filter((c): c is Case => c !== undefined);
+}
+
+// Cases: f32_vecN_[non_]const
+const f32_vec_cases = ([2, 3, 4] as const)
+ .flatMap(n =>
+ ([true, false] as const).map(nonConst => ({
+ [`f32_vec${n}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return generateCases(
+ 'f32',
+ sparseVectorF32Range(n),
+ sparseVectorF32Range(n),
+ sparseF32Range(),
+ nonConst ? 'unfiltered' : 'finite'
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+// Cases: f16_vecN_[non_]const
+const f16_vec_cases = ([2, 3, 4] as const)
+ .flatMap(n =>
+ ([true, false] as const).map(nonConst => ({
+ [`f16_vec${n}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return generateCases(
+ 'f16',
+ sparseVectorF16Range(n),
+ sparseVectorF16Range(n),
+ sparseF16Range(),
+ nonConst ? 'unfiltered' : 'finite'
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('refract', {
+ ...f32_vec_cases,
+ ...f16_vec_cases,
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u => u.combine('inputSource', allInputSources).combine('vectorize', [2, 3, 4] as const))
+ .unimplemented();
+
+g.test('f32_vec2')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests using vec2s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec2_const' : 'f32_vec2_non_const'
+ );
+ await run(
+ t,
+ builtin('refract'),
+ [TypeVec(2, TypeF32), TypeVec(2, TypeF32), TypeF32],
+ TypeVec(2, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32_vec3')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests using vec3s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec3_const' : 'f32_vec3_non_const'
+ );
+ await run(
+ t,
+ builtin('refract'),
+ [TypeVec(3, TypeF32), TypeVec(3, TypeF32), TypeF32],
+ TypeVec(3, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32_vec4')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f32 tests using vec4s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f32_vec4_const' : 'f32_vec4_non_const'
+ );
+ await run(
+ t,
+ builtin('refract'),
+ [TypeVec(4, TypeF32), TypeVec(4, TypeF32), TypeF32],
+ TypeVec(4, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16_vec2')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests using vec2s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec2_const' : 'f16_vec2_non_const'
+ );
+ await run(
+ t,
+ builtin('refract'),
+ [TypeVec(2, TypeF16), TypeVec(2, TypeF16), TypeF16],
+ TypeVec(2, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16_vec3')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests using vec3s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec3_const' : 'f16_vec3_non_const'
+ );
+ await run(
+ t,
+ builtin('refract'),
+ [TypeVec(3, TypeF16), TypeVec(3, TypeF16), TypeF16],
+ TypeVec(3, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16_vec4')
+ .specURL('https://www.w3.org/TR/WGSL/#numeric-builtin-functions')
+ .desc(`f16 tests using vec4s`)
+ .params(u => u.combine('inputSource', allInputSources))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(
+ t.params.inputSource === 'const' ? 'f16_vec4_const' : 'f16_vec4_non_const'
+ );
+ await run(
+ t,
+ builtin('refract'),
+ [TypeVec(4, TypeF16), TypeVec(4, TypeF16), TypeF16],
+ TypeVec(4, TypeF16),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/reverseBits.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/reverseBits.spec.ts
new file mode 100644
index 0000000000..6acb359822
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/reverseBits.spec.ts
@@ -0,0 +1,250 @@
+export const description = `
+Execution tests for the 'reversBits' builtin function
+
+S is i32, u32
+T is S or vecN<S>
+@const fn reverseBits(e: T ) -> T
+Reverses the bits in e: The bit at position k of the result equals the bit at position 31-k of e.
+Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeU32, u32Bits, TypeI32, i32Bits } from '../../../../../util/conversion.js';
+import { allInputSources, Config, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('u32')
+ .specURL('https://www.w3.org/TR/WGSL/#integer-builtin-functions')
+ .desc(`u32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cfg: Config = t.params;
+ // prettier-ignore
+ await run(t, builtin('reverseBits'), [TypeU32], TypeU32, cfg, [
+ // Zero
+ { input: u32Bits(0b00000000000000000000000000000000), expected: u32Bits(0b00000000000000000000000000000000) },
+
+ // One
+ { input: u32Bits(0b00000000000000000000000000000001), expected: u32Bits(0b10000000000000000000000000000000) },
+
+ // 0's after leading 1
+ { input: u32Bits(0b00000000000000000000000000000010), expected: u32Bits(0b01000000000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000000000100), expected: u32Bits(0b00100000000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000000001000), expected: u32Bits(0b00010000000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000000010000), expected: u32Bits(0b00001000000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000000100000), expected: u32Bits(0b00000100000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000001000000), expected: u32Bits(0b00000010000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000010000000), expected: u32Bits(0b00000001000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000100000000), expected: u32Bits(0b00000000100000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000001000000000), expected: u32Bits(0b00000000010000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000010000000000), expected: u32Bits(0b00000000001000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000100000000000), expected: u32Bits(0b00000000000100000000000000000000) },
+ { input: u32Bits(0b00000000000000000001000000000000), expected: u32Bits(0b00000000000010000000000000000000) },
+ { input: u32Bits(0b00000000000000000010000000000000), expected: u32Bits(0b00000000000001000000000000000000) },
+ { input: u32Bits(0b00000000000000000100000000000000), expected: u32Bits(0b00000000000000100000000000000000) },
+ { input: u32Bits(0b00000000000000001000000000000000), expected: u32Bits(0b00000000000000010000000000000000) },
+ { input: u32Bits(0b00000000000000010000000000000000), expected: u32Bits(0b00000000000000001000000000000000) },
+ { input: u32Bits(0b00000000000000100000000000000000), expected: u32Bits(0b00000000000000000100000000000000) },
+ { input: u32Bits(0b00000000000001000000000000000000), expected: u32Bits(0b00000000000000000010000000000000) },
+ { input: u32Bits(0b00000000000010000000000000000000), expected: u32Bits(0b00000000000000000001000000000000) },
+ { input: u32Bits(0b00000000000100000000000000000000), expected: u32Bits(0b00000000000000000000100000000000) },
+ { input: u32Bits(0b00000000001000000000000000000000), expected: u32Bits(0b00000000000000000000010000000000) },
+ { input: u32Bits(0b00000000010000000000000000000000), expected: u32Bits(0b00000000000000000000001000000000) },
+ { input: u32Bits(0b00000000100000000000000000000000), expected: u32Bits(0b00000000000000000000000100000000) },
+ { input: u32Bits(0b00000001000000000000000000000000), expected: u32Bits(0b00000000000000000000000010000000) },
+ { input: u32Bits(0b00000010000000000000000000000000), expected: u32Bits(0b00000000000000000000000001000000) },
+ { input: u32Bits(0b00000100000000000000000000000000), expected: u32Bits(0b00000000000000000000000000100000) },
+ { input: u32Bits(0b00001000000000000000000000000000), expected: u32Bits(0b00000000000000000000000000010000) },
+ { input: u32Bits(0b00010000000000000000000000000000), expected: u32Bits(0b00000000000000000000000000001000) },
+ { input: u32Bits(0b00100000000000000000000000000000), expected: u32Bits(0b00000000000000000000000000000100) },
+ { input: u32Bits(0b01000000000000000000000000000000), expected: u32Bits(0b00000000000000000000000000000010) },
+ { input: u32Bits(0b10000000000000000000000000000000), expected: u32Bits(0b00000000000000000000000000000001) },
+
+ // 1's after leading 1
+ { input: u32Bits(0b00000000000000000000000000000011), expected: u32Bits(0b11000000000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000000000111), expected: u32Bits(0b11100000000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000000001111), expected: u32Bits(0b11110000000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000000011111), expected: u32Bits(0b11111000000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000000111111), expected: u32Bits(0b11111100000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000001111111), expected: u32Bits(0b11111110000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000011111111), expected: u32Bits(0b11111111000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000111111111), expected: u32Bits(0b11111111100000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000001111111111), expected: u32Bits(0b11111111110000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000011111111111), expected: u32Bits(0b11111111111000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000111111111111), expected: u32Bits(0b11111111111100000000000000000000) },
+ { input: u32Bits(0b00000000000000000001111111111111), expected: u32Bits(0b11111111111110000000000000000000) },
+ { input: u32Bits(0b00000000000000000011111111111111), expected: u32Bits(0b11111111111111000000000000000000) },
+ { input: u32Bits(0b00000000000000000111111111111111), expected: u32Bits(0b11111111111111100000000000000000) },
+ { input: u32Bits(0b00000000000000001111111111111111), expected: u32Bits(0b11111111111111110000000000000000) },
+ { input: u32Bits(0b00000000000000011111111111111111), expected: u32Bits(0b11111111111111111000000000000000) },
+ { input: u32Bits(0b00000000000000111111111111111111), expected: u32Bits(0b11111111111111111100000000000000) },
+ { input: u32Bits(0b00000000000001111111111111111111), expected: u32Bits(0b11111111111111111110000000000000) },
+ { input: u32Bits(0b00000000000011111111111111111111), expected: u32Bits(0b11111111111111111111000000000000) },
+ { input: u32Bits(0b00000000000111111111111111111111), expected: u32Bits(0b11111111111111111111100000000000) },
+ { input: u32Bits(0b00000000001111111111111111111111), expected: u32Bits(0b11111111111111111111110000000000) },
+ { input: u32Bits(0b00000000011111111111111111111111), expected: u32Bits(0b11111111111111111111111000000000) },
+ { input: u32Bits(0b00000000111111111111111111111111), expected: u32Bits(0b11111111111111111111111100000000) },
+ { input: u32Bits(0b00000001111111111111111111111111), expected: u32Bits(0b11111111111111111111111110000000) },
+ { input: u32Bits(0b00000011111111111111111111111111), expected: u32Bits(0b11111111111111111111111111000000) },
+ { input: u32Bits(0b00000111111111111111111111111111), expected: u32Bits(0b11111111111111111111111111100000) },
+ { input: u32Bits(0b00001111111111111111111111111111), expected: u32Bits(0b11111111111111111111111111110000) },
+ { input: u32Bits(0b00011111111111111111111111111111), expected: u32Bits(0b11111111111111111111111111111000) },
+ { input: u32Bits(0b00111111111111111111111111111111), expected: u32Bits(0b11111111111111111111111111111100) },
+ { input: u32Bits(0b01111111111111111111111111111111), expected: u32Bits(0b11111111111111111111111111111110) },
+ { input: u32Bits(0b11111111111111111111111111111111), expected: u32Bits(0b11111111111111111111111111111111) },
+
+ // random after leading 1
+ { input: u32Bits(0b00000000000000000000000000000110), expected: u32Bits(0b01100000000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000000001101), expected: u32Bits(0b10110000000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000000011101), expected: u32Bits(0b10111000000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000000111001), expected: u32Bits(0b10011100000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000001101111), expected: u32Bits(0b11110110000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000011111111), expected: u32Bits(0b11111111000000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000000111101111), expected: u32Bits(0b11110111100000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000001111111111), expected: u32Bits(0b11111111110000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000011111110001), expected: u32Bits(0b10001111111000000000000000000000) },
+ { input: u32Bits(0b00000000000000000000111011011101), expected: u32Bits(0b10111011011100000000000000000000) },
+ { input: u32Bits(0b00000000000000000001101101111111), expected: u32Bits(0b11111110110110000000000000000000) },
+ { input: u32Bits(0b00000000000000000011111111011111), expected: u32Bits(0b11111011111111000000000000000000) },
+ { input: u32Bits(0b00000000000000000101111001110101), expected: u32Bits(0b10101110011110100000000000000000) },
+ { input: u32Bits(0b00000000000000001101111011110111), expected: u32Bits(0b11101111011110110000000000000000) },
+ { input: u32Bits(0b00000000000000011111111111110011), expected: u32Bits(0b11001111111111111000000000000000) },
+ { input: u32Bits(0b00000000000000111111111110111111), expected: u32Bits(0b11111101111111111100000000000000) },
+ { input: u32Bits(0b00000000000001111111011111111111), expected: u32Bits(0b11111111111011111110000000000000) },
+ { input: u32Bits(0b00000000000011111111111111111111), expected: u32Bits(0b11111111111111111111000000000000) },
+ { input: u32Bits(0b00000000000111110101011110111111), expected: u32Bits(0b11111101111010101111100000000000) },
+ { input: u32Bits(0b00000000001111101111111111110111), expected: u32Bits(0b11101111111111110111110000000000) },
+ { input: u32Bits(0b00000000011111111111010000101111), expected: u32Bits(0b11110100001011111111111000000000) },
+ { input: u32Bits(0b00000000111111111111001111111011), expected: u32Bits(0b11011111110011111111111100000000) },
+ { input: u32Bits(0b00000001111111011111101111111111), expected: u32Bits(0b11111111110111111011111110000000) },
+ { input: u32Bits(0b00000011101011111011110111111011), expected: u32Bits(0b11011111101111011111010111000000) },
+ { input: u32Bits(0b00000111111110111111111111111111), expected: u32Bits(0b11111111111111111101111111100000) },
+ { input: u32Bits(0b00001111000000011011011110111111), expected: u32Bits(0b11111101111011011000000011110000) },
+ { input: u32Bits(0b00011110101111011111111111111111), expected: u32Bits(0b11111111111111111011110101111000) },
+ { input: u32Bits(0b00110110111111100111111110111101), expected: u32Bits(0b10111101111111100111111101101100) },
+ { input: u32Bits(0b01010111111101111111011111011111), expected: u32Bits(0b11111011111011111110111111101010) },
+ { input: u32Bits(0b11100010011110101101101110101111), expected: u32Bits(0b11110101110110110101111001000111) },
+ ]);
+ });
+
+g.test('i32')
+ .specURL('https://www.w3.org/TR/2021/WD-WGSL-20210929/#integer-builtin-functions')
+ .desc(`i32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cfg: Config = t.params;
+ // prettier-ignore
+ await run(t, builtin('reverseBits'), [TypeI32], TypeI32, cfg, [
+ // Zero
+ { input: i32Bits(0b00000000000000000000000000000000), expected: i32Bits(0b00000000000000000000000000000000) },
+
+ // One
+ { input: i32Bits(0b00000000000000000000000000000001), expected: i32Bits(0b10000000000000000000000000000000) },
+
+ // 0's after leading 1
+ { input: i32Bits(0b00000000000000000000000000000010), expected: i32Bits(0b01000000000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000000000100), expected: i32Bits(0b00100000000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000000001000), expected: i32Bits(0b00010000000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000000010000), expected: i32Bits(0b00001000000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000000100000), expected: i32Bits(0b00000100000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000001000000), expected: i32Bits(0b00000010000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000010000000), expected: i32Bits(0b00000001000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000100000000), expected: i32Bits(0b00000000100000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000001000000000), expected: i32Bits(0b00000000010000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000010000000000), expected: i32Bits(0b00000000001000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000100000000000), expected: i32Bits(0b00000000000100000000000000000000) },
+ { input: i32Bits(0b00000000000000000001000000000000), expected: i32Bits(0b00000000000010000000000000000000) },
+ { input: i32Bits(0b00000000000000000010000000000000), expected: i32Bits(0b00000000000001000000000000000000) },
+ { input: i32Bits(0b00000000000000000100000000000000), expected: i32Bits(0b00000000000000100000000000000000) },
+ { input: i32Bits(0b00000000000000001000000000000000), expected: i32Bits(0b00000000000000010000000000000000) },
+ { input: i32Bits(0b00000000000000010000000000000000), expected: i32Bits(0b00000000000000001000000000000000) },
+ { input: i32Bits(0b00000000000000100000000000000000), expected: i32Bits(0b00000000000000000100000000000000) },
+ { input: i32Bits(0b00000000000001000000000000000000), expected: i32Bits(0b00000000000000000010000000000000) },
+ { input: i32Bits(0b00000000000010000000000000000000), expected: i32Bits(0b00000000000000000001000000000000) },
+ { input: i32Bits(0b00000000000100000000000000000000), expected: i32Bits(0b00000000000000000000100000000000) },
+ { input: i32Bits(0b00000000001000000000000000000000), expected: i32Bits(0b00000000000000000000010000000000) },
+ { input: i32Bits(0b00000000010000000000000000000000), expected: i32Bits(0b00000000000000000000001000000000) },
+ { input: i32Bits(0b00000000100000000000000000000000), expected: i32Bits(0b00000000000000000000000100000000) },
+ { input: i32Bits(0b00000001000000000000000000000000), expected: i32Bits(0b00000000000000000000000010000000) },
+ { input: i32Bits(0b00000010000000000000000000000000), expected: i32Bits(0b00000000000000000000000001000000) },
+ { input: i32Bits(0b00000100000000000000000000000000), expected: i32Bits(0b00000000000000000000000000100000) },
+ { input: i32Bits(0b00001000000000000000000000000000), expected: i32Bits(0b00000000000000000000000000010000) },
+ { input: i32Bits(0b00010000000000000000000000000000), expected: i32Bits(0b00000000000000000000000000001000) },
+ { input: i32Bits(0b00100000000000000000000000000000), expected: i32Bits(0b00000000000000000000000000000100) },
+ { input: i32Bits(0b01000000000000000000000000000000), expected: i32Bits(0b00000000000000000000000000000010) },
+ { input: i32Bits(0b10000000000000000000000000000000), expected: i32Bits(0b00000000000000000000000000000001) },
+
+ // 1's after leading 1
+ { input: i32Bits(0b00000000000000000000000000000011), expected: i32Bits(0b11000000000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000000000111), expected: i32Bits(0b11100000000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000000001111), expected: i32Bits(0b11110000000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000000011111), expected: i32Bits(0b11111000000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000000111111), expected: i32Bits(0b11111100000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000001111111), expected: i32Bits(0b11111110000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000011111111), expected: i32Bits(0b11111111000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000111111111), expected: i32Bits(0b11111111100000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000001111111111), expected: i32Bits(0b11111111110000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000011111111111), expected: i32Bits(0b11111111111000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000111111111111), expected: i32Bits(0b11111111111100000000000000000000) },
+ { input: i32Bits(0b00000000000000000001111111111111), expected: i32Bits(0b11111111111110000000000000000000) },
+ { input: i32Bits(0b00000000000000000011111111111111), expected: i32Bits(0b11111111111111000000000000000000) },
+ { input: i32Bits(0b00000000000000000111111111111111), expected: i32Bits(0b11111111111111100000000000000000) },
+ { input: i32Bits(0b00000000000000001111111111111111), expected: i32Bits(0b11111111111111110000000000000000) },
+ { input: i32Bits(0b00000000000000011111111111111111), expected: i32Bits(0b11111111111111111000000000000000) },
+ { input: i32Bits(0b00000000000000111111111111111111), expected: i32Bits(0b11111111111111111100000000000000) },
+ { input: i32Bits(0b00000000000001111111111111111111), expected: i32Bits(0b11111111111111111110000000000000) },
+ { input: i32Bits(0b00000000000011111111111111111111), expected: i32Bits(0b11111111111111111111000000000000) },
+ { input: i32Bits(0b00000000000111111111111111111111), expected: i32Bits(0b11111111111111111111100000000000) },
+ { input: i32Bits(0b00000000001111111111111111111111), expected: i32Bits(0b11111111111111111111110000000000) },
+ { input: i32Bits(0b00000000011111111111111111111111), expected: i32Bits(0b11111111111111111111111000000000) },
+ { input: i32Bits(0b00000000111111111111111111111111), expected: i32Bits(0b11111111111111111111111100000000) },
+ { input: i32Bits(0b00000001111111111111111111111111), expected: i32Bits(0b11111111111111111111111110000000) },
+ { input: i32Bits(0b00000011111111111111111111111111), expected: i32Bits(0b11111111111111111111111111000000) },
+ { input: i32Bits(0b00000111111111111111111111111111), expected: i32Bits(0b11111111111111111111111111100000) },
+ { input: i32Bits(0b00001111111111111111111111111111), expected: i32Bits(0b11111111111111111111111111110000) },
+ { input: i32Bits(0b00011111111111111111111111111111), expected: i32Bits(0b11111111111111111111111111111000) },
+ { input: i32Bits(0b00111111111111111111111111111111), expected: i32Bits(0b11111111111111111111111111111100) },
+ { input: i32Bits(0b01111111111111111111111111111111), expected: i32Bits(0b11111111111111111111111111111110) },
+ { input: i32Bits(0b11111111111111111111111111111111), expected: i32Bits(0b11111111111111111111111111111111) },
+
+ // random after leading 1
+ { input: i32Bits(0b00000000000000000000000000000110), expected: i32Bits(0b01100000000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000000001101), expected: i32Bits(0b10110000000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000000011101), expected: i32Bits(0b10111000000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000000111001), expected: i32Bits(0b10011100000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000001101111), expected: i32Bits(0b11110110000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000011111111), expected: i32Bits(0b11111111000000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000000111101111), expected: i32Bits(0b11110111100000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000001111111111), expected: i32Bits(0b11111111110000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000011111110001), expected: i32Bits(0b10001111111000000000000000000000) },
+ { input: i32Bits(0b00000000000000000000111011011101), expected: i32Bits(0b10111011011100000000000000000000) },
+ { input: i32Bits(0b00000000000000000001101101111111), expected: i32Bits(0b11111110110110000000000000000000) },
+ { input: i32Bits(0b00000000000000000011111111011111), expected: i32Bits(0b11111011111111000000000000000000) },
+ { input: i32Bits(0b00000000000000000101111001110101), expected: i32Bits(0b10101110011110100000000000000000) },
+ { input: i32Bits(0b00000000000000001101111011110111), expected: i32Bits(0b11101111011110110000000000000000) },
+ { input: i32Bits(0b00000000000000011111111111110011), expected: i32Bits(0b11001111111111111000000000000000) },
+ { input: i32Bits(0b00000000000000111111111110111111), expected: i32Bits(0b11111101111111111100000000000000) },
+ { input: i32Bits(0b00000000000001111111011111111111), expected: i32Bits(0b11111111111011111110000000000000) },
+ { input: i32Bits(0b00000000000011111111111111111111), expected: i32Bits(0b11111111111111111111000000000000) },
+ { input: i32Bits(0b00000000000111110101011110111111), expected: i32Bits(0b11111101111010101111100000000000) },
+ { input: i32Bits(0b00000000001111101111111111110111), expected: i32Bits(0b11101111111111110111110000000000) },
+ { input: i32Bits(0b00000000011111111111010000101111), expected: i32Bits(0b11110100001011111111111000000000) },
+ { input: i32Bits(0b00000000111111111111001111111011), expected: i32Bits(0b11011111110011111111111100000000) },
+ { input: i32Bits(0b00000001111111011111101111111111), expected: i32Bits(0b11111111110111111011111110000000) },
+ { input: i32Bits(0b00000011101011111011110111111011), expected: i32Bits(0b11011111101111011111010111000000) },
+ { input: i32Bits(0b00000111111110111111111111111111), expected: i32Bits(0b11111111111111111101111111100000) },
+ { input: i32Bits(0b00001111000000011011011110111111), expected: i32Bits(0b11111101111011011000000011110000) },
+ { input: i32Bits(0b00011110101111011111111111111111), expected: i32Bits(0b11111111111111111011110101111000) },
+ { input: i32Bits(0b00110110111111100111111110111101), expected: i32Bits(0b10111101111111100111111101101100) },
+ { input: i32Bits(0b01010111111101111111011111011111), expected: i32Bits(0b11111011111011111110111111101010) },
+ { input: i32Bits(0b11100010011110101101101110101111), expected: i32Bits(0b11110101110110110101111001000111) },
+ ]);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/round.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/round.spec.ts
new file mode 100644
index 0000000000..bd40ed4b2a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/round.spec.ts
@@ -0,0 +1,79 @@
+export const description = `
+Execution tests for the 'round' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn round(e: T) -> T
+Result is the integer k nearest to e, as a floating point value.
+When e lies halfway between integers k and k+1, the result is k when k is even,
+and k+1 when k is odd.
+Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF32Range, fullF16Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('round', {
+ f32: () => {
+ return FP.f32.generateScalarToIntervalCases(
+ [
+ 0x80000000, // https://github.com/gpuweb/cts/issues/2766,
+ ...fullF32Range(),
+ ],
+ 'unfiltered',
+ FP.f32.roundInterval
+ );
+ },
+ f16: () => {
+ return FP.f16.generateScalarToIntervalCases(
+ [
+ 0x8000, // https://github.com/gpuweb/cts/issues/2766
+ ...fullF16Range(),
+ ],
+ 'unfiltered',
+ FP.f16.roundInterval
+ );
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, builtin('round'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, builtin('round'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/saturate.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/saturate.spec.ts
new file mode 100644
index 0000000000..2f16502921
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/saturate.spec.ts
@@ -0,0 +1,100 @@
+export const description = `
+Execution tests for the 'saturate' builtin function
+
+S is AbstractFloat, f32, or f16
+T is S or vecN<S>
+@const fn saturate(e: T) -> T
+Returns clamp(e, 0.0, 1.0). Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeAbstractFloat, TypeF16, TypeF32 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF16Range, fullF32Range, fullF64Range, linearRange } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, onlyConstInputSource, run } from '../../expression.js';
+
+import { abstractBuiltin, builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('saturate', {
+ f32: () => {
+ return FP.f32.generateScalarToIntervalCases(
+ [
+ // Non-clamped values
+ ...linearRange(0.0, 1.0, 20),
+ ...fullF32Range(),
+ ],
+ 'unfiltered',
+ FP.f32.saturateInterval
+ );
+ },
+ f16: () => {
+ return FP.f16.generateScalarToIntervalCases(
+ [
+ // Non-clamped values
+ ...linearRange(0.0, 1.0, 20),
+ ...fullF16Range(),
+ ],
+ 'unfiltered',
+ FP.f16.saturateInterval
+ );
+ },
+ abstract: () => {
+ return FP.abstract.generateScalarToIntervalCases(
+ [
+ // Non-clamped values
+ ...linearRange(0.0, 1.0, 20),
+ ...fullF64Range(),
+ ],
+ 'unfiltered',
+ FP.abstract.saturateInterval
+ );
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u
+ .combine('inputSource', onlyConstInputSource)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('abstract');
+ await run(
+ t,
+ abstractBuiltin('saturate'),
+ [TypeAbstractFloat],
+ TypeAbstractFloat,
+ t.params,
+ cases
+ );
+ });
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, builtin('saturate'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, builtin('saturate'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/select.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/select.spec.ts
new file mode 100644
index 0000000000..c64f989f42
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/select.spec.ts
@@ -0,0 +1,253 @@
+export const description = `
+Execution tests for the 'select' builtin function
+
+T is scalar, abstract numeric type, or vector
+@const fn select(f: T, t: T, cond: bool) -> T
+Returns t when cond is true, and f otherwise.
+
+T is scalar or abstract numeric type
+@const fn select(f: vecN<T>, t: vecN<T>, cond: vecN<bool>) -> vecN<T>
+Component-wise selection. Result component i is evaluated as select(f[i],t[i],cond[i]).
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import {
+ VectorType,
+ TypeVec,
+ TypeBool,
+ TypeF32,
+ TypeF16,
+ TypeI32,
+ TypeU32,
+ f32,
+ f16,
+ i32,
+ u32,
+ False,
+ True,
+ bool,
+ vec2,
+ vec3,
+ vec4,
+ abstractFloat,
+ TypeAbstractFloat,
+} from '../../../../../util/conversion.js';
+import { run, CaseList, allInputSources } from '../../expression.js';
+
+import { abstractBuiltin, builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+function makeBool(n: number) {
+ return bool((n & 1) === 1);
+}
+
+type scalarKind = 'b' | 'af' | 'f' | 'h' | 'i' | 'u';
+
+const dataType = {
+ b: {
+ type: TypeBool,
+ constructor: makeBool,
+ },
+ af: {
+ type: TypeAbstractFloat,
+ constructor: abstractFloat,
+ },
+ f: {
+ type: TypeF32,
+ constructor: f32,
+ },
+ h: {
+ type: TypeF16,
+ constructor: f16,
+ },
+ i: {
+ type: TypeI32,
+ constructor: i32,
+ },
+ u: {
+ type: TypeU32,
+ constructor: u32,
+ },
+};
+
+g.test('scalar')
+ .specURL('https://www.w3.org/TR/WGSL/#logical-builtin-functions')
+ .desc(`scalar tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('component', ['b', 'af', 'f', 'h', 'i', 'u'] as const)
+ .combine('overload', ['scalar', 'vec2', 'vec3', 'vec4'] as const)
+ )
+ .beforeAllSubcases(t => {
+ if (t.params.component === 'h') {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ }
+ t.skipIf(t.params.component === 'af' && t.params.inputSource !== 'const');
+ })
+ .fn(async t => {
+ const componentType = dataType[t.params.component as scalarKind].type;
+ const cons = dataType[t.params.component as scalarKind].constructor;
+
+ // Create the scalar values that will be selected from, either as scalars
+ // or vectors.
+ //
+ // Each boolean will select between c[k] and c[k+4]. Those values must
+ // always compare as different. The tricky case is boolean, where the parity
+ // has to be different, i.e. c[k]-c[k+4] must be odd.
+ const c = [0, 1, 2, 3, 5, 6, 7, 8].map(i => cons(i));
+ // Now form vectors that will have different components from each other.
+ const v2a = vec2(c[0], c[1]);
+ const v2b = vec2(c[4], c[5]);
+ const v3a = vec3(c[0], c[1], c[2]);
+ const v3b = vec3(c[4], c[5], c[6]);
+ const v4a = vec4(c[0], c[1], c[2], c[3]);
+ const v4b = vec4(c[4], c[5], c[6], c[7]);
+
+ const overloads = {
+ scalar: {
+ type: componentType,
+ cases: [
+ { input: [c[0], c[1], False], expected: c[0] },
+ { input: [c[0], c[1], True], expected: c[1] },
+ ],
+ },
+ vec2: {
+ type: TypeVec(2, componentType),
+ cases: [
+ { input: [v2a, v2b, False], expected: v2a },
+ { input: [v2a, v2b, True], expected: v2b },
+ ],
+ },
+ vec3: {
+ type: TypeVec(3, componentType),
+ cases: [
+ { input: [v3a, v3b, False], expected: v3a },
+ { input: [v3a, v3b, True], expected: v3b },
+ ],
+ },
+ vec4: {
+ type: TypeVec(4, componentType),
+ cases: [
+ { input: [v4a, v4b, False], expected: v4a },
+ { input: [v4a, v4b, True], expected: v4b },
+ ],
+ },
+ };
+ const overload = overloads[t.params.overload];
+
+ await run(
+ t,
+ t.params.component === 'af' ? abstractBuiltin('select') : builtin('select'),
+ [overload.type, overload.type, TypeBool],
+ overload.type,
+ t.params,
+ overload.cases
+ );
+ });
+
+g.test('vector')
+ .specURL('https://www.w3.org/TR/WGSL/#logical-builtin-functions')
+ .desc(`vector tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('component', ['b', 'af', 'f', 'h', 'i', 'u'] as const)
+ .combine('overload', ['vec2', 'vec3', 'vec4'] as const)
+ )
+ .beforeAllSubcases(t => {
+ if (t.params.component === 'h') {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ }
+ t.skipIf(t.params.component === 'af' && t.params.inputSource !== 'const');
+ })
+ .fn(async t => {
+ const componentType = dataType[t.params.component as scalarKind].type;
+ const cons = dataType[t.params.component as scalarKind].constructor;
+
+ // Create the scalar values that will be selected from.
+ //
+ // Each boolean will select between c[k] and c[k+4]. Those values must
+ // always compare as different. The tricky case is boolean, where the parity
+ // has to be different, i.e. c[k]-c[k+4] must be odd.
+ const c = [0, 1, 2, 3, 5, 6, 7, 8].map(i => cons(i));
+ const T = True;
+ const F = False;
+
+ let tests: { dataType: VectorType; boolType: VectorType; cases: CaseList };
+
+ switch (t.params.overload) {
+ case 'vec2': {
+ const a = vec2(c[0], c[1]);
+ const b = vec2(c[4], c[5]);
+ tests = {
+ dataType: TypeVec(2, componentType),
+ boolType: TypeVec(2, TypeBool),
+ cases: [
+ { input: [a, b, vec2(F, F)], expected: vec2(a.x, a.y) },
+ { input: [a, b, vec2(F, T)], expected: vec2(a.x, b.y) },
+ { input: [a, b, vec2(T, F)], expected: vec2(b.x, a.y) },
+ { input: [a, b, vec2(T, T)], expected: vec2(b.x, b.y) },
+ ],
+ };
+ break;
+ }
+ case 'vec3': {
+ const a = vec3(c[0], c[1], c[2]);
+ const b = vec3(c[4], c[5], c[6]);
+ tests = {
+ dataType: TypeVec(3, componentType),
+ boolType: TypeVec(3, TypeBool),
+ cases: [
+ { input: [a, b, vec3(F, F, F)], expected: vec3(a.x, a.y, a.z) },
+ { input: [a, b, vec3(F, F, T)], expected: vec3(a.x, a.y, b.z) },
+ { input: [a, b, vec3(F, T, F)], expected: vec3(a.x, b.y, a.z) },
+ { input: [a, b, vec3(F, T, T)], expected: vec3(a.x, b.y, b.z) },
+ { input: [a, b, vec3(T, F, F)], expected: vec3(b.x, a.y, a.z) },
+ { input: [a, b, vec3(T, F, T)], expected: vec3(b.x, a.y, b.z) },
+ { input: [a, b, vec3(T, T, F)], expected: vec3(b.x, b.y, a.z) },
+ { input: [a, b, vec3(T, T, T)], expected: vec3(b.x, b.y, b.z) },
+ ],
+ };
+ break;
+ }
+ case 'vec4': {
+ const a = vec4(c[0], c[1], c[2], c[3]);
+ const b = vec4(c[4], c[5], c[6], c[7]);
+ tests = {
+ dataType: TypeVec(4, componentType),
+ boolType: TypeVec(4, TypeBool),
+ cases: [
+ { input: [a, b, vec4(F, F, F, F)], expected: vec4(a.x, a.y, a.z, a.w) },
+ { input: [a, b, vec4(F, F, F, T)], expected: vec4(a.x, a.y, a.z, b.w) },
+ { input: [a, b, vec4(F, F, T, F)], expected: vec4(a.x, a.y, b.z, a.w) },
+ { input: [a, b, vec4(F, F, T, T)], expected: vec4(a.x, a.y, b.z, b.w) },
+ { input: [a, b, vec4(F, T, F, F)], expected: vec4(a.x, b.y, a.z, a.w) },
+ { input: [a, b, vec4(F, T, F, T)], expected: vec4(a.x, b.y, a.z, b.w) },
+ { input: [a, b, vec4(F, T, T, F)], expected: vec4(a.x, b.y, b.z, a.w) },
+ { input: [a, b, vec4(F, T, T, T)], expected: vec4(a.x, b.y, b.z, b.w) },
+ { input: [a, b, vec4(T, F, F, F)], expected: vec4(b.x, a.y, a.z, a.w) },
+ { input: [a, b, vec4(T, F, F, T)], expected: vec4(b.x, a.y, a.z, b.w) },
+ { input: [a, b, vec4(T, F, T, F)], expected: vec4(b.x, a.y, b.z, a.w) },
+ { input: [a, b, vec4(T, F, T, T)], expected: vec4(b.x, a.y, b.z, b.w) },
+ { input: [a, b, vec4(T, T, F, F)], expected: vec4(b.x, b.y, a.z, a.w) },
+ { input: [a, b, vec4(T, T, F, T)], expected: vec4(b.x, b.y, a.z, b.w) },
+ { input: [a, b, vec4(T, T, T, F)], expected: vec4(b.x, b.y, b.z, a.w) },
+ { input: [a, b, vec4(T, T, T, T)], expected: vec4(b.x, b.y, b.z, b.w) },
+ ],
+ };
+ break;
+ }
+ }
+
+ await run(
+ t,
+ t.params.component === 'af' ? abstractBuiltin('select') : builtin('select'),
+ [tests.dataType, tests.dataType, tests.boolType],
+ tests.dataType,
+ t.params,
+ tests.cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/sign.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/sign.spec.ts
new file mode 100644
index 0000000000..a147acf6fb
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/sign.spec.ts
@@ -0,0 +1,109 @@
+export const description = `
+Execution tests for the 'sign' builtin function
+
+S is AbstractFloat, AbstractInt, i32, f32, f16
+T is S or vecN<S>
+@const fn sign(e: T ) -> T
+Returns the sign of e. Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import {
+ i32,
+ TypeF32,
+ TypeF16,
+ TypeI32,
+ TypeAbstractFloat,
+} from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import {
+ fullF32Range,
+ fullF16Range,
+ fullI32Range,
+ fullF64Range,
+} from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, onlyConstInputSource, run } from '../../expression.js';
+
+import { abstractBuiltin, builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('sign', {
+ f32: () => {
+ return FP.f32.generateScalarToIntervalCases(fullF32Range(), 'unfiltered', FP.f32.signInterval);
+ },
+ f16: () => {
+ return FP.f16.generateScalarToIntervalCases(fullF16Range(), 'unfiltered', FP.f16.signInterval);
+ },
+ abstract_float: () => {
+ return FP.abstract.generateScalarToIntervalCases(
+ fullF64Range(),
+ 'unfiltered',
+ FP.abstract.signInterval
+ );
+ },
+ i32: () =>
+ fullI32Range().map(i => {
+ const signFunc = (i: number): number => (i < 0 ? -1 : i > 0 ? 1 : 0);
+ return { input: [i32(i)], expected: i32(signFunc(i)) };
+ }),
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#sign-builtin')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u
+ .combine('inputSource', onlyConstInputSource)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('abstract_float');
+ await run(t, abstractBuiltin('sign'), [TypeAbstractFloat], TypeAbstractFloat, t.params, cases);
+ });
+
+g.test('abstract_int')
+ .specURL('https://www.w3.org/TR/WGSL/#sign-builtin')
+ .desc(`abstract int tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('i32')
+ .specURL('https://www.w3.org/TR/WGSL/#sign-builtin')
+ .desc(`i32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('i32');
+ await run(t, builtin('sign'), [TypeI32], TypeI32, t.params, cases);
+ });
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#sign-builtin')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, builtin('sign'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#sign-builtin')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, builtin('sign'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/sin.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/sin.spec.ts
new file mode 100644
index 0000000000..4ab3ae7a3d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/sin.spec.ts
@@ -0,0 +1,84 @@
+export const description = `
+Execution tests for the 'sin' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn sin(e: T ) -> T
+Returns the sine of e. Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF32Range, fullF16Range, linearRange } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('sin', {
+ f32: () => {
+ return FP.f32.generateScalarToIntervalCases(
+ [
+ // Well-defined accuracy range
+ ...linearRange(-Math.PI, Math.PI, 1000),
+ ...fullF32Range(),
+ ],
+ 'unfiltered',
+ FP.f32.sinInterval
+ );
+ },
+ f16: () => {
+ return FP.f16.generateScalarToIntervalCases(
+ [
+ // Well-defined accuracy range
+ ...linearRange(-Math.PI, Math.PI, 1000),
+ ...fullF16Range(),
+ ],
+ 'unfiltered',
+ FP.f16.sinInterval
+ );
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(
+ `
+f32 tests
+
+TODO(#792): Decide what the ground-truth is for these tests. [1]
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, builtin('sin'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, builtin('sin'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/sinh.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/sinh.spec.ts
new file mode 100644
index 0000000000..d9b93a3dc8
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/sinh.spec.ts
@@ -0,0 +1,68 @@
+export const description = `
+Execution tests for the 'sinh' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn sinh(e: T ) -> T
+Returns the hyperbolic sine of e. Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF32Range, fullF16Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('sinh', {
+ f32_const: () => {
+ return FP.f32.generateScalarToIntervalCases(fullF32Range(), 'finite', FP.f32.sinhInterval);
+ },
+ f32_non_const: () => {
+ return FP.f32.generateScalarToIntervalCases(fullF32Range(), 'unfiltered', FP.f32.sinhInterval);
+ },
+ f16_const: () => {
+ return FP.f16.generateScalarToIntervalCases(fullF16Range(), 'finite', FP.f16.sinhInterval);
+ },
+ f16_non_const: () => {
+ return FP.f16.generateScalarToIntervalCases(fullF16Range(), 'unfiltered', FP.f16.sinhInterval);
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('sinh'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('sinh'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/smoothstep.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/smoothstep.spec.ts
new file mode 100644
index 0000000000..20d2a4edbc
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/smoothstep.spec.ts
@@ -0,0 +1,94 @@
+export const description = `
+Execution tests for the 'smoothstep' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn smoothstep(low: T , high: T , x: T ) -> T
+Returns the smooth Hermite interpolation between 0 and 1.
+Component-wise when T is a vector.
+For scalar T, the result is t * t * (3.0 - 2.0 * t), where t = clamp((x - low) / (high - low), 0.0, 1.0).
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { sparseF32Range, sparseF16Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('smoothstep', {
+ f32_const: () => {
+ return FP.f32.generateScalarTripleToIntervalCases(
+ sparseF32Range(),
+ sparseF32Range(),
+ sparseF32Range(),
+ 'finite',
+ FP.f32.smoothStepInterval
+ );
+ },
+ f32_non_const: () => {
+ return FP.f32.generateScalarTripleToIntervalCases(
+ sparseF32Range(),
+ sparseF32Range(),
+ sparseF32Range(),
+ 'unfiltered',
+ FP.f32.smoothStepInterval
+ );
+ },
+ f16_const: () => {
+ return FP.f16.generateScalarTripleToIntervalCases(
+ sparseF16Range(),
+ sparseF16Range(),
+ sparseF16Range(),
+ 'finite',
+ FP.f16.smoothStepInterval
+ );
+ },
+ f16_non_const: () => {
+ return FP.f16.generateScalarTripleToIntervalCases(
+ sparseF16Range(),
+ sparseF16Range(),
+ sparseF16Range(),
+ 'unfiltered',
+ FP.f16.smoothStepInterval
+ );
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('smoothstep'), [TypeF32, TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('smoothstep'), [TypeF16, TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/sqrt.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/sqrt.spec.ts
new file mode 100644
index 0000000000..a092438043
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/sqrt.spec.ts
@@ -0,0 +1,68 @@
+export const description = `
+Execution tests for the 'sqrt' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn sqrt(e: T ) -> T
+Returns the square root of e. Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF32Range, fullF16Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('sqrt', {
+ f32_const: () => {
+ return FP.f32.generateScalarToIntervalCases(fullF32Range(), 'finite', FP.f32.sqrtInterval);
+ },
+ f32_non_const: () => {
+ return FP.f32.generateScalarToIntervalCases(fullF32Range(), 'unfiltered', FP.f32.sqrtInterval);
+ },
+ f16_const: () => {
+ return FP.f16.generateScalarToIntervalCases(fullF16Range(), 'finite', FP.f16.sqrtInterval);
+ },
+ f16_non_const: () => {
+ return FP.f16.generateScalarToIntervalCases(fullF16Range(), 'unfiltered', FP.f16.sqrtInterval);
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, builtin('sqrt'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f16_const' : 'f16_non_const');
+ await run(t, builtin('sqrt'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/step.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/step.spec.ts
new file mode 100644
index 0000000000..752e2676e6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/step.spec.ts
@@ -0,0 +1,87 @@
+export const description = `
+Execution tests for the 'step' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn step(edge: T ,x: T ) -> T
+Returns 1.0 if edge ≤ x, and 0.0 otherwise. Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { anyOf } from '../../../../../util/compare.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF32Range, fullF16Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, Case, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// stepInterval's return value can't always be interpreted as a single acceptance
+// interval, valid result may be 0.0 or 1.0 or both of them, but will never be a
+// value in interval (0.0, 1.0).
+// See the comment block on stepInterval for more details
+const makeCase = (trait: 'f32' | 'f16', edge: number, x: number): Case => {
+ const FPTrait = FP[trait];
+ edge = FPTrait.quantize(edge);
+ x = FPTrait.quantize(x);
+ const expected = FPTrait.stepInterval(edge, x);
+
+ // [0, 0], [1, 1], or [-∞, +∞] cases
+ if (expected.isPoint() || !expected.isFinite()) {
+ return { input: [FPTrait.scalarBuilder(edge), FPTrait.scalarBuilder(x)], expected };
+ }
+
+ // [0, 1] case, valid result is either 0.0 or 1.0.
+ const zeroInterval = FPTrait.toInterval(0);
+ const oneInterval = FPTrait.toInterval(1);
+ return {
+ input: [FPTrait.scalarBuilder(edge), FPTrait.scalarBuilder(x)],
+ expected: anyOf(zeroInterval, oneInterval),
+ };
+};
+
+export const d = makeCaseCache('step', {
+ f32: () => {
+ return fullF32Range().flatMap(edge => fullF32Range().map(x => makeCase('f32', edge, x)));
+ },
+ f16: () => {
+ return fullF16Range().flatMap(edge => fullF16Range().map(x => makeCase('f16', edge, x)));
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, builtin('step'), [TypeF32, TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, builtin('step'), [TypeF16, TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/storageBarrier.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/storageBarrier.spec.ts
new file mode 100644
index 0000000000..f376db4472
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/storageBarrier.spec.ts
@@ -0,0 +1,38 @@
+export const description = `
+'storageBarrier' affects memory and atomic operations in the storage address space.
+
+All synchronization functions execute a control barrier with Acquire/Release memory ordering.
+That is, all synchronization functions, and affected memory and atomic operations are ordered
+in program order relative to the synchronization function. Additionally, the affected memory
+and atomic operations program-ordered before the synchronization function must be visible to
+all other threads in the workgroup before any affected memory or atomic operation program-ordered
+after the synchronization function is executed by a member of the workgroup. All synchronization
+functions use the Workgroup memory scope. All synchronization functions have a Workgroup
+execution scope.
+
+All synchronization functions must only be used in the compute shader stage.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('stage')
+ .specURL('https://www.w3.org/TR/WGSL/#sync-builtin-functions')
+ .desc(
+ `
+All synchronization functions must only be used in the compute shader stage.
+`
+ )
+ .params(u => u.combine('stage', ['vertex', 'fragment', 'compute'] as const))
+ .unimplemented();
+
+g.test('barrier')
+ .specURL('https://www.w3.org/TR/WGSL/#sync-builtin-functions')
+ .desc(
+ `
+fn storageBarrier()
+`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/tan.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/tan.spec.ts
new file mode 100644
index 0000000000..be3bdee046
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/tan.spec.ts
@@ -0,0 +1,78 @@
+export const description = `
+Execution tests for the 'tan' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn tan(e: T ) -> T
+Returns the tangent of e. Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF32Range, fullF16Range, linearRange } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('tan', {
+ f32: () => {
+ return FP.f32.generateScalarToIntervalCases(
+ [
+ // Defined accuracy range
+ ...linearRange(-Math.PI, Math.PI, 100),
+ ...fullF32Range(),
+ ],
+ 'unfiltered',
+ FP.f32.tanInterval
+ );
+ },
+ f16: () => {
+ return FP.f16.generateScalarToIntervalCases(
+ [
+ // Defined accuracy range
+ ...linearRange(-Math.PI, Math.PI, 100),
+ ...fullF16Range(),
+ ],
+ 'unfiltered',
+ FP.f16.tanInterval
+ );
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, builtin('tan'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, builtin('tan'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/tanh.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/tanh.spec.ts
new file mode 100644
index 0000000000..3aca5b924b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/tanh.spec.ts
@@ -0,0 +1,62 @@
+export const description = `
+Execution tests for the 'tanh' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn tanh(e: T ) -> T
+Returns the hyperbolic tangent of e. Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeF16 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF32Range, fullF16Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('tanh', {
+ f32: () => {
+ return FP.f32.generateScalarToIntervalCases(fullF32Range(), 'unfiltered', FP.f32.tanhInterval);
+ },
+ f16: () => {
+ return FP.f16.generateScalarToIntervalCases(fullF16Range(), 'unfiltered', FP.f16.tanhInterval);
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, builtin('tanh'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, builtin('tanh'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureDimension.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureDimension.spec.ts
new file mode 100644
index 0000000000..0ecb9964cf
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureDimension.spec.ts
@@ -0,0 +1,160 @@
+export const description = `
+Execution tests for the 'textureDimension' builtin function
+
+The dimensions of the texture in texels.
+For textures based on cubes, the results are the dimensions of each face of the cube.
+Cube faces are square, so the x and y components of the result are equal.
+If level is outside the range [0, textureNumLevels(t)) then any valid value for the return type may be returned.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('sampled')
+ .specURL('https://www.w3.org/TR/WGSL/#texturedimensions')
+ .desc(
+ `
+T: f32, i32, u32
+
+fn textureDimensions(t: texture_1d<T>) -> u32
+fn textureDimensions(t: texture_1d<T>, level: u32) -> u32
+fn textureDimensions(t: texture_2d<T>) -> vec2<u32>
+fn textureDimensions(t: texture_2d<T>, level: u32) -> vec2<u32>
+fn textureDimensions(t: texture_2d_array<T>) -> vec2<u32>
+fn textureDimensions(t: texture_2d_array<T>, level: u32) -> vec2<u32>
+fn textureDimensions(t: texture_3d<T>) -> vec3<u32>
+fn textureDimensions(t: texture_3d<T>, level: u32) -> vec3<u32>
+fn textureDimensions(t: texture_cube<T>) -> vec2<u32>
+fn textureDimensions(t: texture_cube<T>, level: u32) -> vec2<u32>
+fn textureDimensions(t: texture_cube_array<T>) -> vec2<u32>
+fn textureDimensions(t: texture_cube_array<T>, level: u32) -> vec2<u32>
+fn textureDimensions(t: texture_multisampled_2d<T>)-> vec2<u32>
+
+Parameters:
+ * t: the sampled texture
+ * level:
+ - The mip level, with level 0 containing a full size version of the texture.
+ - If omitted, the dimensions of level 0 are returned.
+`
+ )
+ .params(u =>
+ u
+ .combine('texture_type', [
+ 'texture_1d',
+ 'texture_2d',
+ 'texture_2d_array',
+ 'texture_3d',
+ 'texture_cube',
+ 'texture_cube_array',
+ 'texture_multisampled_2d',
+ ] as const)
+ .beginSubcases()
+ .combine('sampled_type', ['f32-only', 'i32', 'u32'] as const)
+ .combine('level', [undefined, 0, 1, 'textureNumLevels', 'textureNumLevels+1'] as const)
+ )
+ .unimplemented();
+
+g.test('depth')
+ .specURL('https://www.w3.org/TR/WGSL/#texturedimensions')
+ .desc(
+ `
+fn textureDimensions(t: texture_depth_2d) -> vec2<u32>
+fn textureDimensions(t: texture_depth_2d, level: u32) -> vec2<u32>
+fn textureDimensions(t: texture_depth_2d_array) -> vec2<u32>
+fn textureDimensions(t: texture_depth_2d_array, level: u32) -> vec2<u32>
+fn textureDimensions(t: texture_depth_cube) -> vec2<u32>
+fn textureDimensions(t: texture_depth_cube, level: u32) -> vec2<u32>
+fn textureDimensions(t: texture_depth_cube_array) -> vec2<u32>
+fn textureDimensions(t: texture_depth_cube_array, level: u32) -> vec2<u32>
+fn textureDimensions(t: texture_depth_multisampled_2d)-> vec2<u32>
+
+Parameters:
+ * t: the depth or multisampled texture
+ * level:
+ - The mip level, with level 0 containing a full size version of the texture.
+ - If omitted, the dimensions of level 0 are returned.
+`
+ )
+ .params(u =>
+ u
+ .combine('texture_type', [
+ 'texture_depth_2d',
+ 'texture_depth_2d_array',
+ 'texture_depth_cube',
+ 'texture_depth_cube_array',
+ 'texture_depth_multisampled_2d',
+ ])
+ .beginSubcases()
+ .combine('level', [undefined, 0, 1, 'textureNumLevels', 'textureNumLevels+1'] as const)
+ )
+ .unimplemented();
+
+g.test('storage')
+ .specURL('https://www.w3.org/TR/WGSL/#texturedimensions')
+ .desc(
+ `
+F: rgba8unorm
+ rgba8snorm
+ rgba8uint
+ rgba8sint
+ rgba16uint
+ rgba16sint
+ rgba16float
+ r32uint
+ r32sint
+ r32float
+ rg32uint
+ rg32sint
+ rg32float
+ rgba32uint
+ rgba32sint
+ rgba32float
+A: read, write, read_write
+
+fn textureDimensions(t: texture_storage_1d<F,A>) -> u32
+fn textureDimensions(t: texture_storage_2d<F,A>) -> vec2<u32>
+fn textureDimensions(t: texture_storage_2d_array<F,A>) -> vec2<u32>
+fn textureDimensions(t: texture_storage_3d<F,A>) -> vec3<u32>
+
+Parameters:
+ * t: the storage texture
+`
+ )
+ .params(u =>
+ u
+ .combine('texel_format', [
+ 'rgba8unorm',
+ 'rgba8snorm',
+ 'rgba8uint',
+ 'rgba8sint',
+ 'rgba16uint',
+ 'rgba16sint',
+ 'rgba16float',
+ 'r32uint',
+ 'r32sint',
+ 'r32float',
+ 'rg32uint',
+ 'rg32sint',
+ 'rg32float',
+ 'rgba32uint',
+ 'rgba32sint',
+ 'rgba32float',
+ ] as const)
+ .beginSubcases()
+ .combine('access_mode', ['read', 'write', 'read_write'] as const)
+ )
+ .unimplemented();
+
+g.test('external')
+ .specURL('https://www.w3.org/TR/WGSL/#texturedimensions')
+ .desc(
+ `
+fn textureDimensions(t: texture_external) -> vec2<u32>
+
+Parameters:
+ * t: the external texture
+`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureGather.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureGather.spec.ts
new file mode 100644
index 0000000000..40b331efab
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureGather.spec.ts
@@ -0,0 +1,270 @@
+export const description = `
+Execution tests for the 'textureGather' builtin function
+
+A texture gather operation reads from a 2D, 2D array, cube, or cube array texture, computing a four-component vector as follows:
+ * Find the four texels that would be used in a sampling operation with linear filtering, from mip level 0:
+ - Use the specified coordinate, array index (when present), and offset (when present).
+ - The texels are adjacent, forming a square, when considering their texture space coordinates (u,v).
+ - Selected texels at the texture edge, cube face edge, or cube corners are handled as in ordinary texture sampling.
+ * For each texel, read one channel and convert it into a scalar value.
+ - For non-depth textures, a zero-based component parameter specifies the channel to use.
+ * If the texture format supports the specified channel, i.e. has more than component channels:
+ - Yield scalar value v[component] when the texel value is v.
+ * Otherwise:
+ - Yield 0.0 when component is 1 or 2.
+ - Yield 1.0 when component is 3 (the alpha channel).
+ - For depth textures, yield the texel value. (Depth textures only have one channel.)
+ * Yield the four-component vector, arranging scalars produced by the previous step into components according to the relative coordinates of the texels, as follows:
+ - Result component Relative texel coordinate
+ x (umin,vmax)
+ y (umax,vmax)
+ z (umax,vmin)
+ w (umin,vmin)
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+
+import { generateCoordBoundaries, generateOffsets } from './utils.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('sampled_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturegather')
+ .desc(
+ `
+C: i32, u32
+T: i32, u32, f32
+
+fn textureGather(component: C, t: texture_2d<T>, s: sampler, coords: vec2<f32>) -> vec4<T>
+fn textureGather(component: C, t: texture_2d<T>, s: sampler, coords: vec2<f32>, offset: vec2<i32>) -> vec4<T>
+
+Parameters:
+ * component:
+ - The index of the channel to read from the selected texels.
+ - When provided, the component expression must a creation-time expression (e.g. 1).
+ - Its value must be at least 0 and at most 3. Values outside of this range will result in a shader-creation error.
+ * t: The sampled texture to read from
+ * s: The sampler type
+ * coords: The texture coordinates
+ * offset:
+ - The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ This offset is applied before applying any texture wrapping modes.
+ - The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ - Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('T', ['f32-only', 'i32', 'u32'] as const)
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('offset', generateOffsets(2))
+ )
+ .unimplemented();
+
+g.test('sampled_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturegather')
+ .desc(
+ `
+C: i32, u32
+T: i32, u32, f32
+
+fn textureGather(component: C, t: texture_cube<T>, s: sampler, coords: vec3<f32>) -> vec4<T>
+
+Parameters:
+ * component:
+ - The index of the channel to read from the selected texels.
+ - When provided, the component expression must a creation-time expression (e.g. 1).
+ - Its value must be at least 0 and at most 3. Values outside of this range will result in a shader-creation error.
+ * t: The sampled texture to read from
+ * s: The sampler type
+ * coords: The texture coordinates
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('T', ['f32-only', 'i32', 'u32'] as const)
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ .combine('coords', generateCoordBoundaries(3))
+ )
+ .unimplemented();
+
+g.test('sampled_array_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturegather')
+ .desc(
+ `
+C: i32, u32
+T: i32, u32, f32
+
+fn textureGather(component: C, t: texture_2d_array<T>, s: sampler, coords: vec2<f32>, array_index: C) -> vec4<T>
+fn textureGather(component: C, t: texture_2d_array<T>, s: sampler, coords: vec2<f32>, array_index: C, offset: vec2<i32>) -> vec4<T>
+
+Parameters:
+ * component:
+ - The index of the channel to read from the selected texels.
+ - When provided, the component expression must a creation-time expression (e.g. 1).
+ - Its value must be at least 0 and at most 3. Values outside of this range will result in a shader-creation error.
+ * t: The sampled texture to read from
+ * s: The sampler type
+ * coords: The texture coordinates
+ * array_index: The 0-based texture array index
+ * offset:
+ - The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ This offset is applied before applying any texture wrapping modes.
+ - The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ - Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('T', ['f32-only', 'i32', 'u32'] as const)
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ .combine('coords', generateCoordBoundaries(2))
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ .combine('offset', generateOffsets(2))
+ )
+ .unimplemented();
+
+g.test('sampled_array_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturegather')
+ .desc(
+ `
+C: i32, u32
+T: i32, u32, f32
+
+fn textureGather(component: C, t: texture_cube_array<T>, s: sampler, coords: vec3<f32>, array_index: C) -> vec4<T>
+
+Parameters:
+ * component:
+ - The index of the channel to read from the selected texels.
+ - When provided, the component expression must a creation-time expression (e.g. 1).
+ - Its value must be at least 0 and at most 3. Values outside of this range will result in a shader-creation error.
+ * t: The sampled texture to read from
+ * s: The sampler type
+ * coords: The texture coordinates
+ * array_index: The 0-based texture array index
+`
+ )
+ .paramsSubcasesOnly(
+ u =>
+ u
+ .combine('T', ['f32-only', 'i32', 'u32'] as const)
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ .combine('coords', generateCoordBoundaries(3))
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ )
+ .unimplemented();
+
+g.test('depth_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturegather')
+ .desc(
+ `
+fn textureGather(t: texture_depth_2d, s: sampler, coords: vec2<f32>) -> vec4<f32>
+fn textureGather(t: texture_depth_2d, s: sampler, coords: vec2<f32>, offset: vec2<i32>) -> vec4<f32>
+
+Parameters:
+ * t: The depth texture to read from
+ * s: The sampler type
+ * coords: The texture coordinates
+ * offset:
+ - The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ This offset is applied before applying any texture wrapping modes.
+ - The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ - Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('offset', generateOffsets(2))
+ )
+ .unimplemented();
+
+g.test('depth_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturegather')
+ .desc(
+ `
+fn textureGather(t: texture_depth_cube, s: sampler, coords: vec3<f32>) -> vec4<f32>
+
+Parameters:
+ * t: The depth texture to read from
+ * s: The sampler type
+ * coords: The texture coordinates
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(3))
+ )
+ .unimplemented();
+
+g.test('depth_array_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturegather')
+ .desc(
+ `
+C: i32, u32
+
+fn textureGather(t: texture_depth_2d_array, s: sampler, coords: vec2<f32>, array_index: C) -> vec4<f32>
+fn textureGather(t: texture_depth_2d_array, s: sampler, coords: vec2<f32>, array_index: C, offset: vec2<i32>) -> vec4<f32>
+
+Parameters:
+ * t: The depth texture to read from
+ * s: The sampler type
+ * coords: The texture coordinates
+ * array_index: The 0-based texture array index
+ * offset:
+ - The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ This offset is applied before applying any texture wrapping modes.
+ - The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ - Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('coords', generateCoordBoundaries(2))
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ .combine('offset', generateOffsets(2))
+ )
+ .unimplemented();
+
+g.test('depth_array_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturegather')
+ .desc(
+ `
+C: i32, u32
+
+fn textureGather(t: texture_depth_cube_array, s: sampler, coords: vec3<f32>, array_index: C) -> vec4<f32>
+
+Parameters:
+ * t: The depth texture to read from
+ * s: The sampler type
+ * coords: The texture coordinates
+ * array_index: The 0-based texture array index
+`
+ )
+ .paramsSubcasesOnly(
+ u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('coords', generateCoordBoundaries(3))
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureGatherCompare.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureGatherCompare.spec.ts
new file mode 100644
index 0000000000..c743883ce8
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureGatherCompare.spec.ts
@@ -0,0 +1,134 @@
+export const description = `
+Execution tests for the 'textureGatherCompare' builtin function
+
+A texture gather compare operation performs a depth comparison on four texels in a depth texture and collects the results into a single vector, as follows:
+ * Find the four texels that would be used in a depth sampling operation with linear filtering, from mip level 0:
+ - Use the specified coordinate, array index (when present), and offset (when present).
+ - The texels are adjacent, forming a square, when considering their texture space coordinates (u,v).
+ - Selected texels at the texture edge, cube face edge, or cube corners are handled as in ordinary texture sampling.
+ * For each texel, perform a comparison against the depth reference value, yielding a 0.0 or 1.0 value, as controlled by the comparison sampler parameters.
+ * Yield the four-component vector where the components are the comparison results with the texels with relative texel coordinates as follows:
+
+ Result component Relative texel coordinate
+ x (umin,vmax)
+ y (umax,vmax)
+ z (umax,vmin)
+ w (umin,vmin)
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+
+import { generateCoordBoundaries, generateOffsets } from './utils.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('array_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturegathercompare')
+ .desc(
+ `
+C: i32, u32
+
+fn textureGatherCompare(t: texture_depth_2d_array, s: sampler_comparison, coords: vec2<f32>, array_index: C, depth_ref: f32) -> vec4<f32>
+fn textureGatherCompare(t: texture_depth_2d_array, s: sampler_comparison, coords: vec2<f32>, array_index: C, depth_ref: f32, offset: vec2<i32>) -> vec4<f32>
+
+Parameters:
+ * t: The depth texture to read from
+ * s: The sampler_comparison
+ * coords: The texture coordinates
+ * array_index: The 0-based array index.
+ * depth_ref: The reference value to compare the sampled depth value against
+ * offset:
+ - The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ This offset is applied before applying any texture wrapping modes.
+ - The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ - Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4])
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('depth_ref', [-1 /* smaller ref */, 0 /* equal ref */, 1 /* larger ref */] as const)
+ .combine('offset', generateOffsets(2))
+ )
+ .unimplemented();
+
+g.test('array_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturegathercompare')
+ .desc(
+ `
+C: i32, u32
+
+fn textureGatherCompare(t: texture_depth_cube_array, s: sampler_comparison, coords: vec3<f32>, array_index: C, depth_ref: f32) -> vec4<f32>
+
+Parameters:
+ * t: The depth texture to read from
+ * s: The sampler_comparison
+ * coords: The texture coordinates
+ * array_index: The 0-based array index.
+ * depth_ref: The reference value to compare the sampled depth value against
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4])
+ .combine('coords', generateCoordBoundaries(3))
+ .combine('depth_ref', [-1 /* smaller ref */, 0 /* equal ref */, 1 /* larger ref */] as const)
+ )
+ .unimplemented();
+
+g.test('sampled_array_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturegathercompare')
+ .desc(
+ `
+fn textureGatherCompare(t: texture_depth_2d, s: sampler_comparison, coords: vec2<f32>, depth_ref: f32) -> vec4<f32>
+fn textureGatherCompare(t: texture_depth_2d, s: sampler_comparison, coords: vec2<f32>, depth_ref: f32, offset: vec2<i32>) -> vec4<f32>
+
+Parameters:
+ * t: The depth texture to read from
+ * s: The sampler_comparison
+ * coords: The texture coordinates
+ * depth_ref: The reference value to compare the sampled depth value against
+ * offset:
+ - The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ This offset is applied before applying any texture wrapping modes.
+ - The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ - Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('depth_ref', [-1 /* smaller ref */, 0 /* equal ref */, 1 /* larger ref */] as const)
+ .combine('offset', generateOffsets(2))
+ )
+ .unimplemented();
+
+g.test('sampled_array_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturegathercompare')
+ .desc(
+ `
+fn textureGatherCompare(t: texture_depth_cube, s: sampler_comparison, coords: vec3<f32>, depth_ref: f32) -> vec4<f32>
+
+Parameters:
+ * t: The depth texture to read from
+ * s: The sampler_comparison
+ * coords: The texture coordinates
+ * depth_ref: The reference value to compare the sampled depth value against
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(3))
+ .combine('depth_ref', [-1 /* smaller ref */, 0 /* equal ref */, 1 /* larger ref */] as const)
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureLoad.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureLoad.spec.ts
new file mode 100644
index 0000000000..30cc4fff52
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureLoad.spec.ts
@@ -0,0 +1,185 @@
+export const description = `
+Execution tests for the 'textureLoad' builtin function
+
+Reads a single texel from a texture without sampling or filtering.
+
+Returns the unfiltered texel data.
+
+An out of bounds access occurs if:
+ * any element of coords is outside the range [0, textureDimensions(t, level)) for the corresponding element, or
+ * array_index is outside the range [0, textureNumLayers(t)), or
+ * level is outside the range [0, textureNumLevels(t))
+
+If an out of bounds access occurs, the built-in function returns one of:
+ * The data for some texel within bounds of the texture
+ * A vector (0,0,0,0) or (0,0,0,1) of the appropriate type for non-depth textures
+ * 0.0 for depth textures
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+
+import { generateCoordBoundaries } from './utils.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('sampled_1d')
+ .specURL('https://www.w3.org/TR/WGSL/#textureload')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureLoad(t: texture_1d<T>, coords: C, level: C) -> vec4<T>
+
+Parameters:
+ * t: The sampled texture to read from
+ * coords: The 0-based texel coordinate
+ * level: The mip level, with level 0 containing a full size version of the texture
+`
+ )
+ .params(u =>
+ u
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('coords', generateCoordBoundaries(1))
+ .combine('level', [-1, 0, `numlevels-1`, `numlevels`] as const)
+ )
+ .unimplemented();
+
+g.test('sampled_2d')
+ .specURL('https://www.w3.org/TR/WGSL/#textureload')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureLoad(t: texture_2d<T>, coords: vec2<C>, level: C) -> vec4<T>
+
+Parameters:
+ * t: The sampled texture to read from
+ * coords: The 0-based texel coordinate
+ * level: The mip level, with level 0 containing a full size version of the texture
+`
+ )
+ .params(u =>
+ u
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('level', [-1, 0, `numlevels-1`, `numlevels`] as const)
+ )
+ .unimplemented();
+
+g.test('sampled_3d')
+ .specURL('https://www.w3.org/TR/WGSL/#textureload')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureLoad(t: texture_3d<T>, coords: vec3<C>, level: C) -> vec4<T>
+
+Parameters:
+ * t: The sampled texture to read from
+ * coords: The 0-based texel coordinate
+ * level: The mip level, with level 0 containing a full size version of the texture
+`
+ )
+ .params(u =>
+ u
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('coords', generateCoordBoundaries(3))
+ .combine('level', [-1, 0, `numlevels-1`, `numlevels`] as const)
+ )
+ .unimplemented();
+
+g.test('multisampled')
+ .specURL('https://www.w3.org/TR/WGSL/#textureload')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureLoad(t: texture_multisampled_2d<T>, coords: vec2<C>, sample_index: C)-> vec4<T>
+fn textureLoad(t: texture_depth_multisampled_2d, coords: vec2<C>, sample_index: C)-> f32
+
+Parameters:
+ * t: The sampled texture to read from
+ * coords: The 0-based texel coordinate
+ * sample_index: The 0-based sample index of the multisampled texture
+`
+ )
+ .params(u =>
+ u
+ .combine('texture_type', [
+ 'texture_multisampled_2d',
+ 'texture_depth_multisampled_2d',
+ ] as const)
+ .beginSubcases()
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('sample_index', [-1, 0, `sampleCount-1`, `sampleCount`] as const)
+ )
+ .unimplemented();
+
+g.test('depth')
+ .specURL('https://www.w3.org/TR/WGSL/#textureload')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureLoad(t: texture_depth_2d, coords: vec2<C>, level: C) -> f32
+
+Parameters:
+ * t: The sampled texture to read from
+ * coords: The 0-based texel coordinate
+ * level: The mip level, with level 0 containing a full size version of the texture
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('level', [-1, 0, `numlevels-1`, `numlevels`] as const)
+ )
+ .unimplemented();
+
+g.test('external')
+ .specURL('https://www.w3.org/TR/WGSL/#textureload')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureLoad(t: texture_external, coords: vec2<C>) -> vec4<f32>
+
+Parameters:
+ * t: The sampled texture to read from
+ * coords: The 0-based texel coordinate
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u.combine('C', ['i32', 'u32'] as const).combine('coords', generateCoordBoundaries(2))
+ )
+ .unimplemented();
+
+g.test('arrayed')
+ .specURL('https://www.w3.org/TR/WGSL/#textureload')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureLoad(t: texture_2d_array<T>, coords: vec2<C>, array_index: C, level: C) -> vec4<T>
+fn textureLoad(t: texture_depth_2d_array, coords: vec2<C>, array_index: C, level: C) -> f32
+
+Parameters:
+ * t: The sampled texture to read from
+ * coords: The 0-based texel coordinate
+ * array_index: The 0-based texture array index
+ * level: The mip level, with level 0 containing a full size version of the texture
+`
+ )
+ .params(u =>
+ u
+ .combine('texture_type', ['texture_2d_array', 'texture_depth_2d_array'] as const)
+ .beginSubcases()
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('array_index', [-1, 0, `numlayers-1`, `numlayers`] as const)
+ .combine('level', [-1, 0, `numlevels-1`, `numlevels`] as const)
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureNumLayers.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureNumLayers.spec.ts
new file mode 100644
index 0000000000..b845301161
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureNumLayers.spec.ts
@@ -0,0 +1,100 @@
+export const description = `
+Execution tests for the 'textureNumLayers' builtin function
+
+Returns the number of layers (elements) of an array texture.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('sampled')
+ .specURL('https://www.w3.org/TR/WGSL/#texturenumlayers')
+ .desc(
+ `
+T, a sampled type.
+
+fn textureNumLayers(t: texture_2d_array<T>) -> u32
+fn textureNumLayers(t: texture_cube_array<T>) -> u32
+
+Parameters
+ * t The sampled array texture.
+`
+ )
+ .params(u =>
+ u
+ .combine('texture_type', ['texture_2d_array', 'texture_cube_array'] as const)
+ .beginSubcases()
+ .combine('sampled_type', ['f32-only', 'i32', 'u32'] as const)
+ )
+ .unimplemented();
+
+g.test('arrayed')
+ .specURL('https://www.w3.org/TR/WGSL/#texturenumlayers')
+ .desc(
+ `
+fn textureNumLayers(t: texture_depth_2d_array) -> u32
+fn textureNumLayers(t: texture_depth_cube_array) -> u32
+
+Parameters
+ * t The depth array texture.
+`
+ )
+ .params(u =>
+ u.combine('texture_type', ['texture_depth_2d_array', 'texture_depth_cube_array'] as const)
+ )
+ .unimplemented();
+
+g.test('storage')
+ .specURL('https://www.w3.org/TR/WGSL/#texturenumlayers')
+ .desc(
+ `
+F: rgba8unorm
+ rgba8snorm
+ rgba8uint
+ rgba8sint
+ rgba16uint
+ rgba16sint
+ rgba16float
+ r32uint
+ r32sint
+ r32float
+ rg32uint
+ rg32sint
+ rg32float
+ rgba32uint
+ rgba32sint
+ rgba32float
+A: read, write, read_write
+
+fn textureNumLayers(t: texture_storage_2d_array<F,A>) -> u32
+
+Parameters
+ * t The sampled storage array texture.
+`
+ )
+ .params(u =>
+ u
+ .beginSubcases()
+ .combine('texel_format', [
+ 'rgba8unorm',
+ 'rgba8snorm',
+ 'rgba8uint',
+ 'rgba8sint',
+ 'rgba16uint',
+ 'rgba16sint',
+ 'rgba16float',
+ 'r32uint',
+ 'r32sint',
+ 'r32float',
+ 'rg32uint',
+ 'rg32sint',
+ 'rg32float',
+ 'rgba32uint',
+ 'rgba32sint',
+ 'rgba32float',
+ ] as const)
+ .combine('access_mode', ['read', 'write', 'read_write'] as const)
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureNumLevels.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureNumLevels.spec.ts
new file mode 100644
index 0000000000..4204397b23
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureNumLevels.spec.ts
@@ -0,0 +1,65 @@
+export const description = `
+Execution tests for the 'textureNumLevels' builtin function
+
+Returns the number of mip levels of a texture.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('sampled')
+ .specURL('https://www.w3.org/TR/WGSL/#texturenumlevels')
+ .desc(
+ `
+T, a sampled type.
+
+fn textureNumLevels(t: texture_1d<T>) -> u32
+fn textureNumLevels(t: texture_2d<T>) -> u32
+fn textureNumLevels(t: texture_2d_array<T>) -> u32
+fn textureNumLevels(t: texture_3d<T>) -> u32
+fn textureNumLevels(t: texture_cube<T>) -> u32
+fn textureNumLevels(t: texture_cube_array<T>) -> u32
+
+Parameters
+ * t The sampled array texture.
+`
+ )
+ .params(u =>
+ u
+ .combine('texture_type', [
+ 'texture_1d',
+ 'texture_2d',
+ 'texture_2d_array',
+ 'texture_3d',
+ 'texture_cube',
+ 'texture_cube_array`',
+ ] as const)
+ .beginSubcases()
+ .combine('sampled_type', ['f32-only', 'i32', 'u32'] as const)
+ )
+ .unimplemented();
+
+g.test('depth')
+ .specURL('https://www.w3.org/TR/WGSL/#texturenumlevels')
+ .desc(
+ `
+fn textureNumLevels(t: texture_depth_2d) -> u32
+fn textureNumLevels(t: texture_depth_2d_array) -> u32
+fn textureNumLevels(t: texture_depth_cube) -> u32
+fn textureNumLevels(t: texture_depth_cube_array) -> u32
+
+Parameters
+ * t The depth array texture.
+`
+ )
+ .params(u =>
+ u.combine('texture_type', [
+ 'texture_depth_2d',
+ 'texture_depth_2d_array',
+ 'texture_depth_cube',
+ 'texture_depth_cube_array',
+ ] as const)
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureNumSamples.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureNumSamples.spec.ts
new file mode 100644
index 0000000000..26bda6cd48
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureNumSamples.spec.ts
@@ -0,0 +1,37 @@
+export const description = `
+Execution tests for the 'textureNumSamples' builtin function
+
+Returns the number samples per texel in a multisampled texture.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('sampled')
+ .specURL('https://www.w3.org/TR/WGSL/#texturenumsamples')
+ .desc(
+ `
+T, a sampled type.
+
+fn textureNumSamples(t: texture_multisampled_2d<T>) -> u32
+
+Parameters
+ * t The multisampled texture.
+`
+ )
+ .params(u => u.beginSubcases().combine('sampled_type', ['f32-only', 'i32', 'u32'] as const))
+ .unimplemented();
+
+g.test('depth')
+ .specURL('https://www.w3.org/TR/WGSL/#texturenumsamples')
+ .desc(
+ `
+fn textureNumSamples(t: texture_depth_multisampled_2d) -> u32
+
+Parameters
+ * t The multisampled texture.
+`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSample.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSample.spec.ts
new file mode 100644
index 0000000000..f5b01dfc63
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSample.spec.ts
@@ -0,0 +1,273 @@
+export const description = `
+Samples a texture.
+
+Must only be used in a fragment shader stage.
+Must only be invoked in uniform control flow.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+
+import { generateCoordBoundaries, generateOffsets } from './utils.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('stage')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesample')
+ .desc(
+ `
+Tests that 'textureSample' can only be called in 'fragment' shaders.
+`
+ )
+ .params(u => u.combine('stage', ['fragment', 'vertex', 'compute'] as const))
+ .unimplemented();
+
+g.test('control_flow')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesample')
+ .desc(
+ `
+Tests that 'textureSample' can only be called in uniform control flow.
+`
+ )
+ .params(u => u.combine('stage', ['fragment', 'vertex', 'compute'] as const))
+ .unimplemented();
+
+g.test('sampled_1d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesample')
+ .desc(
+ `
+fn textureSample(t: texture_1d<f32>, s: sampler, coords: f32) -> vec4<f32>
+
+Parameters:
+ * t The sampled, depth, or external texture to sample.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'] as const)
+ .combine('coords', generateCoordBoundaries(1))
+ )
+ .unimplemented();
+
+g.test('sampled_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesample')
+ .desc(
+ `
+fn textureSample(t: texture_2d<f32>, s: sampler, coords: vec2<f32>) -> vec4<f32>
+fn textureSample(t: texture_2d<f32>, s: sampler, coords: vec2<f32>, offset: vec2<i32>) -> vec4<f32>
+
+Parameters:
+ * t The sampled, depth, or external texture to sample.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'] as const)
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('offset', generateOffsets(2))
+ )
+ .unimplemented();
+
+g.test('sampled_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesample')
+ .desc(
+ `
+fn textureSample(t: texture_3d<f32>, s: sampler, coords: vec3<f32>) -> vec4<f32>
+fn textureSample(t: texture_3d<f32>, s: sampler, coords: vec3<f32>, offset: vec3<i32>) -> vec4<f32>
+fn textureSample(t: texture_cube<f32>, s: sampler, coords: vec3<f32>) -> vec4<f32>
+
+Parameters:
+ * t The sampled, depth, or external texture to sample.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .params(u =>
+ u
+ .combine('texture_type', ['texture_3d', 'texture_cube'] as const)
+ .beginSubcases()
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'] as const)
+ .combine('coords', generateCoordBoundaries(3))
+ .combine('offset', generateOffsets(3))
+ )
+ .unimplemented();
+
+g.test('depth_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesample')
+ .desc(
+ `
+fn textureSample(t: texture_depth_2d, s: sampler, coords: vec2<f32>) -> f32
+fn textureSample(t: texture_depth_2d, s: sampler, coords: vec2<f32>, offset: vec2<i32>) -> f32
+
+Parameters:
+ * t The sampled, depth, or external texture to sample.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'] as const)
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('offset', generateOffsets(2))
+ )
+ .unimplemented();
+
+g.test('sampled_array_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesample')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureSample(t: texture_2d_array<f32>, s: sampler, coords: vec2<f32>, array_index: C) -> vec4<f32>
+fn textureSample(t: texture_2d_array<f32>, s: sampler, coords: vec2<f32>, array_index: C, offset: vec2<i32>) -> vec4<f32>
+
+Parameters:
+ * t The sampled, depth, or external texture to sample.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+ * array_index The 0-based texture array index to sample.
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'] as const)
+ .combine('coords', generateCoordBoundaries(2))
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ .combine('offset', generateOffsets(2))
+ )
+ .unimplemented();
+
+g.test('sampled_array_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesample')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureSample(t: texture_cube_array<f32>, s: sampler, coords: vec3<f32>, array_index: C) -> vec4<f32>
+
+Parameters:
+ * t The sampled, depth, or external texture to sample.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+ * array_index The 0-based texture array index to sample.
+`
+ )
+ .paramsSubcasesOnly(
+ u =>
+ u
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'] as const)
+ .combine('coords', generateCoordBoundaries(3))
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ )
+ .unimplemented();
+
+g.test('depth_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesample')
+ .desc(
+ `
+fn textureSample(t: texture_depth_cube, s: sampler, coords: vec3<f32>) -> f32
+
+Parameters:
+ * t The sampled, depth, or external texture to sample.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'] as const)
+ .combine('coords', generateCoordBoundaries(3))
+ )
+ .unimplemented();
+
+g.test('depth_array_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesample')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureSample(t: texture_depth_2d_array, s: sampler, coords: vec2<f32>, array_index: C) -> f32
+fn textureSample(t: texture_depth_2d_array, s: sampler, coords: vec2<f32>, array_index: C, offset: vec2<i32>) -> f32
+
+Parameters:
+ * t The sampled, depth, or external texture to sample.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+ * array_index The 0-based texture array index to sample.
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'] as const)
+ .combine('coords', generateCoordBoundaries(2))
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ .combine('offset', generateOffsets(2))
+ )
+ .unimplemented();
+
+g.test('depth_array_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesample')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureSample(t: texture_depth_cube_array, s: sampler, coords: vec3<f32>, array_index: C) -> f32
+
+Parameters:
+ * t The sampled, depth, or external texture to sample.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+ * array_index The 0-based texture array index to sample.
+`
+ )
+ .paramsSubcasesOnly(
+ u =>
+ u
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'] as const)
+ .combine('coords', generateCoordBoundaries(3))
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleBias.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleBias.spec.ts
new file mode 100644
index 0000000000..786bce4830
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleBias.spec.ts
@@ -0,0 +1,163 @@
+export const description = `
+Execution tests for the 'textureSampleBias' builtin function
+
+Samples a texture with a bias to the mip level.
+Must only be used in a fragment shader stage.
+Must only be invoked in uniform control flow.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+
+import { generateCoordBoundaries, generateOffsets } from './utils.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('stage')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplebias')
+ .desc(
+ `
+Tests that 'textureSampleBias' can only be called in 'fragment' shaders.
+`
+ )
+ .params(u => u.combine('stage', ['fragment', 'vertex', 'compute'] as const))
+ .unimplemented();
+
+g.test('control_flow')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplebias')
+ .desc(
+ `
+Tests that 'textureSampleBias' can only be called in uniform control flow.
+`
+ )
+ .params(u => u.combine('stage', ['fragment', 'vertex', 'compute'] as const))
+ .unimplemented();
+
+g.test('sampled_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplebias')
+ .desc(
+ `
+fn textureSampleBias(t: texture_2d<f32>, s: sampler, coords: vec2<f32>, bias: f32) -> vec4<f32>
+fn textureSampleBias(t: texture_2d<f32>, s: sampler, coords: vec2<f32>, bias: f32, offset: vec2<i32>) -> vec4<f32>
+
+Parameters:
+ * t: The sampled texture to read from
+ * s: The sampler type
+ * coords: The texture coordinates
+ * bias: The bias to apply to the mip level before sampling. bias must be between -16.0 and 15.99.
+ * offset:
+ - The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ This offset is applied before applying any texture wrapping modes.
+ - The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ - Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('bias', [-16.1, -16, 0, 1, 15.99, 16] as const)
+ .combine('offset', generateOffsets(2))
+ )
+ .unimplemented();
+
+g.test('sampled_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplebias')
+ .desc(
+ `
+fn textureSampleBias(t: texture_3d<f32>, s: sampler, coords: vec3<f32>, bias: f32) -> vec4<f32>
+fn textureSampleBias(t: texture_3d<f32>, s: sampler, coords: vec3<f32>, bias: f32, offset: vec3<i32>) -> vec4<f32>
+fn textureSampleBias(t: texture_cube<f32>, s: sampler, coords: vec3<f32>, bias: f32) -> vec4<f32>
+
+Parameters:
+ * t: The sampled texture to read from
+ * s: The sampler type
+ * coords: The texture coordinates
+ * bias: The bias to apply to the mip level before sampling. bias must be between -16.0 and 15.99.
+ * offset:
+ - The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ This offset is applied before applying any texture wrapping modes.
+ - The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ - Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .params(u =>
+ u
+ .combine('texture_type', ['texture_3d', 'texture_cube'] as const)
+ .beginSubcases()
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(3))
+ .combine('bias', [-16.1, -16, 0, 1, 15.99, 16] as const)
+ .combine('offset', generateOffsets(3))
+ )
+ .unimplemented();
+
+g.test('arrayed_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplebias')
+ .desc(
+ `
+C: i32, u32
+
+fn textureSampleBias(t: texture_2d_array<f32>, s: sampler, coords: vec2<f32>, array_index: C, bias: f32) -> vec4<f32>
+fn textureSampleBias(t: texture_2d_array<f32>, s: sampler, coords: vec2<f32>, array_index: C, bias: f32, offset: vec2<i32>) -> vec4<f32>
+
+Parameters:
+ * t: The sampled texture to read from
+ * s: The sampler type
+ * coords: The texture coordinates
+ * array_index: The 0-based texture array index to sample.
+ * bias: The bias to apply to the mip level before sampling. bias must be between -16.0 and 15.99.
+ * offset:
+ - The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ This offset is applied before applying any texture wrapping modes.
+ - The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ - Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ .combine('bias', [-16.1, -16, 0, 1, 15.99, 16] as const)
+ .combine('offset', generateOffsets(2))
+ )
+ .unimplemented();
+
+g.test('arrayed_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplebias')
+ .desc(
+ `
+C: i32, u32
+
+fn textureSampleBias(t: texture_cube_array<f32>, s: sampler, coords: vec3<f32>, array_index: C, bias: f32) -> vec4<f32>
+
+Parameters:
+ * t: The sampled texture to read from
+ * s: The sampler type
+ * coords: The texture coordinates
+ * array_index: The 0-based texture array index to sample.
+ * bias: The bias to apply to the mip level before sampling. bias must be between -16.0 and 15.99.
+ * offset:
+ - The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ This offset is applied before applying any texture wrapping modes.
+ - The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ - Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(3))
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ .combine('bias', [-16.1, -16, 0, 1, 15.99, 16] as const)
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleCompare.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleCompare.spec.ts
new file mode 100644
index 0000000000..9f723fac2e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleCompare.spec.ts
@@ -0,0 +1,145 @@
+export const description = `
+Samples a depth texture and compares the sampled depth values against a reference value.
+
+Must only be used in a fragment shader stage.
+Must only be invoked in uniform control flow.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+
+import { generateCoordBoundaries, generateOffsets } from './utils.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('stage')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplecompare')
+ .desc(
+ `
+Tests that 'textureSampleCompare' can only be called in 'fragment' shaders.
+`
+ )
+ .params(u => u.combine('stage', ['fragment', 'vertex', 'compute'] as const))
+ .unimplemented();
+
+g.test('control_flow')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplecompare')
+ .desc(
+ `
+Tests that 'textureSampleCompare' can only be called in uniform control flow.
+`
+ )
+ .params(u => u.combine('stage', ['fragment', 'vertex', 'compute'] as const))
+ .unimplemented();
+
+g.test('2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplecompare')
+ .desc(
+ `
+fn textureSampleCompare(t: texture_depth_2d, s: sampler_comparison, coords: vec2<f32>, depth_ref: f32) -> f32
+fn textureSampleCompare(t: texture_depth_2d, s: sampler_comparison, coords: vec2<f32>, depth_ref: f32, offset: vec2<i32>) -> f32
+
+Parameters:
+ * t The depth texture to sample.
+ * s The sampler_comparision type.
+ * coords The texture coordinates used for sampling.
+ * depth_ref The reference value to compare the sampled depth value against.
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('depth_ref', [-1 /* smaller ref */, 0 /* equal ref */, 1 /* larger ref */] as const)
+ .combine('offset', generateOffsets(2))
+ )
+ .unimplemented();
+
+g.test('3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplecompare')
+ .desc(
+ `
+fn textureSampleCompare(t: texture_depth_cube, s: sampler_comparison, coords: vec3<f32>, depth_ref: f32) -> f32
+
+Parameters:
+ * t The depth texture to sample.
+ * s The sampler_comparision type.
+ * coords The texture coordinates used for sampling.
+ * depth_ref The reference value to compare the sampled depth value against.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(3))
+ .combine('depth_ref', [-1 /* smaller ref */, 0 /* equal ref */, 1 /* larger ref */] as const)
+ )
+ .unimplemented();
+
+g.test('arrayed_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplecompare')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureSampleCompare(t: texture_depth_2d_array, s: sampler_comparison, coords: vec2<f32>, array_index: C, depth_ref: f32) -> f32
+fn textureSampleCompare(t: texture_depth_2d_array, s: sampler_comparison, coords: vec2<f32>, array_index: C, depth_ref: f32, offset: vec2<i32>) -> f32
+
+Parameters:
+ * t The depth texture to sample.
+ * s The sampler_comparision type.
+ * coords The texture coordinates used for sampling.
+ * array_index: The 0-based texture array index to sample.
+ * depth_ref The reference value to compare the sampled depth value against.
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ .combine('depth_ref', [-1 /* smaller ref */, 0 /* equal ref */, 1 /* larger ref */] as const)
+ .combine('offset', generateOffsets(2))
+ )
+ .unimplemented();
+
+g.test('arrayed_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplecompare')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureSampleCompare(t: texture_depth_cube_array, s: sampler_comparison, coords: vec3<f32>, array_index: C, depth_ref: f32) -> f32
+
+Parameters:
+ * t The depth texture to sample.
+ * s The sampler_comparision type.
+ * coords The texture coordinates used for sampling.
+ * array_index: The 0-based texture array index to sample.
+ * depth_ref The reference value to compare the sampled depth value against.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(3))
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ .combine('depth_ref', [-1 /* smaller ref */, 0 /* equal ref */, 1 /* larger ref */] as const)
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleCompareLevel.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleCompareLevel.spec.ts
new file mode 100644
index 0000000000..500df8a6ec
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleCompareLevel.spec.ts
@@ -0,0 +1,149 @@
+export const description = `
+Samples a depth texture and compares the sampled depth values against a reference value.
+
+The textureSampleCompareLevel function is the same as textureSampleCompare, except that:
+
+ * textureSampleCompareLevel always samples texels from mip level 0.
+ * The function does not compute derivatives.
+ * There is no requirement for textureSampleCompareLevel to be invoked in uniform control flow.
+ * textureSampleCompareLevel may be invoked in any shader stage.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+
+import { generateCoordBoundaries, generateOffsets } from './utils.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('stage')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplecomparelevel')
+ .desc(
+ `
+Tests that 'textureSampleCompareLevel' maybe called in any shader stage.
+`
+ )
+ .params(u => u.combine('stage', ['fragment', 'vertex', 'compute'] as const))
+ .unimplemented();
+
+g.test('control_flow')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplecomparelevel')
+ .desc(
+ `
+Tests that 'textureSampleCompareLevel' maybe called in non-uniform control flow.
+`
+ )
+ .params(u => u.combine('stage', ['fragment', 'vertex', 'compute'] as const))
+ .unimplemented();
+
+g.test('2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplecomparelevel')
+ .desc(
+ `
+fn textureSampleCompareLevel(t: texture_depth_2d, s: sampler_comparison, coords: vec2<f32>, depth_ref: f32) -> f32
+fn textureSampleCompareLevel(t: texture_depth_2d, s: sampler_comparison, coords: vec2<f32>, depth_ref: f32, offset: vec2<i32>) -> f32
+
+Parameters:
+ * t The depth texture to sample.
+ * s The sampler_comparision type.
+ * coords The texture coordinates used for sampling.
+ * depth_ref The reference value to compare the sampled depth value against.
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('depth_ref', [-1 /* smaller ref */, 0 /* equal ref */, 1 /* larger ref */] as const)
+ .combine('offset', generateOffsets(2))
+ )
+ .unimplemented();
+
+g.test('3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplecomparelevel')
+ .desc(
+ `
+fn textureSampleCompareLevel(t: texture_depth_cube, s: sampler_comparison, coords: vec3<f32>, depth_ref: f32) -> f32
+
+Parameters:
+ * t The depth texture to sample.
+ * s The sampler_comparision type.
+ * coords The texture coordinates used for sampling.
+ * depth_ref The reference value to compare the sampled depth value against.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(3))
+ .combine('depth_ref', [-1 /* smaller ref */, 0 /* equal ref */, 1 /* larger ref */] as const)
+ )
+ .unimplemented();
+
+g.test('arrayed_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplecomparelevel')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureSampleCompareLevel(t: texture_depth_2d_array, s: sampler_comparison, coords: vec2<f32>, array_index: C, depth_ref: f32) -> f32
+fn textureSampleCompareLevel(t: texture_depth_2d_array, s: sampler_comparison, coords: vec2<f32>, array_index: C, depth_ref: f32, offset: vec2<i32>) -> f32
+
+Parameters:
+ * t The depth texture to sample.
+ * s The sampler_comparision type.
+ * coords The texture coordinates used for sampling.
+ * array_index: The 0-based texture array index to sample.
+ * depth_ref The reference value to compare the sampled depth value against.
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ .combine('depth_ref', [-1 /* smaller ref */, 0 /* equal ref */, 1 /* larger ref */] as const)
+ .combine('offset', generateOffsets(2))
+ )
+ .unimplemented();
+
+g.test('arrayed_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplecomparelevel')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureSampleCompareLevel(t: texture_depth_cube_array, s: sampler_comparison, coords: vec3<f32>, array_index: C, depth_ref: f32) -> f32
+
+Parameters:
+ * t The depth texture to sample.
+ * s The sampler_comparision type.
+ * coords The texture coordinates used for sampling.
+ * array_index: The 0-based texture array index to sample.
+ * depth_ref The reference value to compare the sampled depth value against.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(3))
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ .combine('depth_ref', [-1 /* smaller ref */, 0 /* equal ref */, 1 /* larger ref */] as const)
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleGrad.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleGrad.spec.ts
new file mode 100644
index 0000000000..e0d754ece3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleGrad.spec.ts
@@ -0,0 +1,136 @@
+export const description = `
+Samples a texture using explicit gradients.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+
+import { generateCoordBoundaries, generateOffsets } from './utils.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('sampled_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplegrad')
+ .desc(
+ `
+fn textureSampleGrad(t: texture_2d<f32>, s: sampler, coords: vec2<f32>, ddx: vec2<f32>, ddy: vec2<f32>) -> vec4<f32>
+fn textureSampleGrad(t: texture_2d<f32>, s: sampler, coords: vec2<f32>, ddx: vec2<f32>, ddy: vec2<f32>, offset: vec2<i32>) -> vec4<f32>
+
+Parameters:
+ * t The sampled texture.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+ * ddx The x direction derivative vector used to compute the sampling locations
+ * ddy The y direction derivative vector used to compute the sampling locations
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('offset', generateOffsets(2))
+ )
+ .unimplemented();
+
+g.test('sampled_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplegrad')
+ .desc(
+ `
+fn textureSampleGrad(t: texture_3d<f32>, s: sampler, coords: vec3<f32>, ddx: vec3<f32>, ddy: vec3<f32>) -> vec4<f32>
+fn textureSampleGrad(t: texture_3d<f32>, s: sampler, coords: vec3<f32>, ddx: vec3<f32>, ddy: vec3<f32>, offset: vec3<i32>) -> vec4<f32>
+fn textureSampleGrad(t: texture_cube<f32>, s: sampler, coords: vec3<f32>, ddx: vec3<f32>, ddy: vec3<f32>) -> vec4<f32>
+
+Parameters:
+ * t The sampled texture.
+ * s The sampler type.
+ * ddx The x direction derivative vector used to compute the sampling locations
+ * ddy The y direction derivative vector used to compute the sampling locations
+ * coords The texture coordinates used for sampling.
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(3))
+ .combine('offset', generateOffsets(3))
+ )
+ .unimplemented();
+
+g.test('sampled_array_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplegrad')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureSampleGrad(t: texture_2d_array<f32>, s: sampler, coords: vec2<f32>, array_index: C, ddx: vec2<f32>, ddy: vec2<f32>) -> vec4<f32>
+fn textureSampleGrad(t: texture_2d_array<f32>, s: sampler, coords: vec2<f32>, array_index: C, ddx: vec2<f32>, ddy: vec2<f32>, offset: vec2<i32>) -> vec4<f32>
+
+Parameters:
+ * t The sampled texture.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+ * array_index The 0-based texture array index to sample.
+ * ddx The x direction derivative vector used to compute the sampling locations
+ * ddy The y direction derivative vector used to compute the sampling locations
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ .combine('coords', generateCoordBoundaries(2))
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ .combine('offset', generateOffsets(2))
+ )
+ .unimplemented();
+
+g.test('sampled_array_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplegrad')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureSampleGrad(t: texture_cube_array<f32>, s: sampler, coords: vec3<f32>, array_index: C, ddx: vec3<f32>, ddy: vec3<f32>) -> vec4<f32>
+
+Parameters:
+ * t The sampled texture.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+ * array_index The 0-based texture array index to sample.
+ * ddx The x direction derivative vector used to compute the sampling locations
+ * ddy The y direction derivative vector used to compute the sampling locations
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ .combine('coords', generateCoordBoundaries(3))
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleLevel.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleLevel.spec.ts
new file mode 100644
index 0000000000..f8073c65d6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureSampleLevel.spec.ts
@@ -0,0 +1,274 @@
+export const description = `
+Samples a texture.
+
+Must only be used in a fragment shader stage.
+Must only be invoked in uniform control flow.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+
+import { generateCoordBoundaries, generateOffsets } from './utils.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('sampled_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplelevel')
+ .desc(
+ `
+fn textureSampleLevel(t: texture_2d<f32>, s: sampler, coords: vec2<f32>, level: f32) -> vec4<f32>
+fn textureSampleLevel(t: texture_2d<f32>, s: sampler, coords: vec2<f32>, level: f32, offset: vec2<i32>) -> vec4<f32>
+
+Parameters:
+ * t The sampled or depth texture to sample.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+ * level
+ * The mip level, with level 0 containing a full size version of the texture.
+ * For the functions where level is a f32, fractional values may interpolate between
+ two levels if the format is filterable according to the Texture Format Capabilities.
+ * When not specified, mip level 0 is sampled.
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('offset', generateOffsets(2))
+ .combine('level', [undefined, 0, 1, 'textureNumLevels', 'textureNumLevels+1'] as const)
+ )
+ .unimplemented();
+
+g.test('sampled_array_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplelevel')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureSampleLevel(t: texture_2d_array<f32>, s: sampler, coords: vec2<f32>, array_index: C, level: f32) -> vec4<f32>
+fn textureSampleLevel(t: texture_2d_array<f32>, s: sampler, coords: vec2<f32>, array_index: C, level: f32, offset: vec2<i32>) -> vec4<f32>
+
+Parameters:
+ * t The sampled or depth texture to sample.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+ * array_index The 0-based texture array index to sample.
+ * level
+ * The mip level, with level 0 containing a full size version of the texture.
+ * For the functions where level is a f32, fractional values may interpolate between
+ two levels if the format is filterable according to the Texture Format Capabilities.
+ * When not specified, mip level 0 is sampled.
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('offset', generateOffsets(2))
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ .combine('level', [undefined, 0, 1, 'textureNumLevels', 'textureNumLevels+1'] as const)
+ )
+ .unimplemented();
+
+g.test('sampled_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplelevel')
+ .desc(
+ `
+fn textureSampleLevel(t: texture_3d<f32>, s: sampler, coords: vec3<f32>, level: f32) -> vec4<f32>
+fn textureSampleLevel(t: texture_3d<f32>, s: sampler, coords: vec3<f32>, level: f32, offset: vec3<i32>) -> vec4<f32>
+fn textureSampleLevel(t: texture_cube<f32>, s: sampler, coords: vec3<f32>, level: f32) -> vec4<f32>
+
+Parameters:
+ * t The sampled or depth texture to sample.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+ * level
+ * The mip level, with level 0 containing a full size version of the texture.
+ * For the functions where level is a f32, fractional values may interpolate between
+ two levels if the format is filterable according to the Texture Format Capabilities.
+ * When not specified, mip level 0 is sampled.
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .params(u =>
+ u
+ .combine('texture_type', ['texture_3d', 'texture_cube'] as const)
+ .beginSubcases()
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('coords', generateCoordBoundaries(3))
+ .combine('offset', generateOffsets(3))
+ .combine('level', [undefined, 0, 1, 'textureNumLevels', 'textureNumLevels+1'] as const)
+ )
+ .unimplemented();
+
+g.test('sampled_array_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplelevel')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureSampleLevel(t: texture_cube_array<f32>, s: sampler, coords: vec3<f32>, array_index: C, level: f32) -> vec4<f32>
+
+Parameters:
+ * t The sampled or depth texture to sample.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+ * array_index The 0-based texture array index to sample.
+ * level
+ * The mip level, with level 0 containing a full size version of the texture.
+ * For the functions where level is a f32, fractional values may interpolate between
+ two levels if the format is filterable according to the Texture Format Capabilities.
+ * When not specified, mip level 0 is sampled.
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ .combine('coords', generateCoordBoundaries(3))
+ .combine('offset', generateOffsets(3))
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ .combine('level', [undefined, 0, 1, 'textureNumLevels', 'textureNumLevels+1'] as const)
+ )
+ .unimplemented();
+
+g.test('depth_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplelevel')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureSampleLevel(t: texture_depth_2d, s: sampler, coords: vec2<f32>, level: C) -> f32
+fn textureSampleLevel(t: texture_depth_2d, s: sampler, coords: vec2<f32>, level: C, offset: vec2<i32>) -> f32
+
+Parameters:
+ * t The sampled or depth texture to sample.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+ * level
+ * The mip level, with level 0 containing a full size version of the texture.
+ * For the functions where level is a f32, fractional values may interpolate between
+ two levels if the format is filterable according to the Texture Format Capabilities.
+ * When not specified, mip level 0 is sampled.
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('offset', generateOffsets(2))
+ .combine('level', [undefined, 0, 1, 'textureNumLevels', 'textureNumLevels+1'] as const)
+ )
+ .unimplemented();
+
+g.test('depth_array_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplelevel')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureSampleLevel(t: texture_depth_2d_array, s: sampler, coords: vec2<f32>, array_index: C, level: C) -> f32
+fn textureSampleLevel(t: texture_depth_2d_array, s: sampler, coords: vec2<f32>, array_index: C, level: C, offset: vec2<i32>) -> f32
+
+Parameters:
+ * t The sampled or depth texture to sample.
+ * s The sampler type.
+ * array_index The 0-based texture array index to sample.
+ * coords The texture coordinates used for sampling.
+ * level
+ * The mip level, with level 0 containing a full size version of the texture.
+ * For the functions where level is a f32, fractional values may interpolate between
+ two levels if the format is filterable according to the Texture Format Capabilities.
+ * When not specified, mip level 0 is sampled.
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .paramsSubcasesOnly(u =>
+ u
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('offset', generateOffsets(2))
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ .combine('level', [undefined, 0, 1, 'textureNumLevels', 'textureNumLevels+1'] as const)
+ )
+ .unimplemented();
+
+g.test('depth_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturesamplelevel')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureSampleLevel(t: texture_depth_cube, s: sampler, coords: vec3<f32>, level: C) -> f32
+fn textureSampleLevel(t: texture_depth_cube_array, s: sampler, coords: vec3<f32>, array_index: C, level: C) -> f32
+
+Parameters:
+ * t The sampled or depth texture to sample.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+ * level
+ * The mip level, with level 0 containing a full size version of the texture.
+ * For the functions where level is a f32, fractional values may interpolate between
+ two levels if the format is filterable according to the Texture Format Capabilities.
+ * When not specified, mip level 0 is sampled.
+ * offset
+ * The optional texel offset applied to the unnormalized texture coordinate before sampling the texture.
+ * This offset is applied before applying any texture wrapping modes.
+ * The offset expression must be a creation-time expression (e.g. vec2<i32>(1, 2)).
+ * Each offset component must be at least -8 and at most 7.
+ Values outside of this range will result in a shader-creation error.
+`
+ )
+ .params(u =>
+ u
+ .combine('texture_type', ['texture_depth_cube', 'texture_depth_cube_array'] as const)
+ .beginSubcases()
+ .combine('S', ['clamp-to-edge', 'repeat', 'mirror-repeat'])
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ .combine('coords', generateCoordBoundaries(3))
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ .combine('level', [undefined, 0, 1, 'textureNumLevels', 'textureNumLevels+1'] as const)
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureStore.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureStore.spec.ts
new file mode 100644
index 0000000000..efef971e24
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/textureStore.spec.ts
@@ -0,0 +1,122 @@
+export const description = `
+Writes a single texel to a texture.
+
+The channel format T depends on the storage texel format F.
+See the texel format table for the mapping of texel format to channel format.
+
+Note: An out-of-bounds access occurs if:
+ * any element of coords is outside the range [0, textureDimensions(t)) for the corresponding element, or
+ * array_index is outside the range of [0, textureNumLayers(t))
+
+If an out-of-bounds access occurs, the built-in function may do any of the following:
+ * not be executed
+ * store value to some in bounds texel
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TexelFormats } from '../../../../types.js';
+
+import { generateCoordBoundaries } from './utils.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('store_1d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturestore')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureStore(t: texture_storage_1d<F,write>, coords: C, value: vec4<T>)
+
+Parameters:
+ * t The sampled, depth, or external texture to sample.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+ * value The new texel value
+`
+ )
+ .params(u =>
+ u
+ .combineWithParams(TexelFormats)
+ .beginSubcases()
+ .combine('coords', generateCoordBoundaries(1))
+ .combine('C', ['i32', 'u32'] as const)
+ )
+ .unimplemented();
+
+g.test('store_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturestore')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureStore(t: texture_storage_2d<F,write>, coords: vec2<C>, value: vec4<T>)
+
+Parameters:
+ * t The sampled, depth, or external texture to sample.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+ * value The new texel value
+`
+ )
+ .params(u =>
+ u
+ .combineWithParams(TexelFormats)
+ .beginSubcases()
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('C', ['i32', 'u32'] as const)
+ )
+ .unimplemented();
+
+g.test('store_array_2d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturestore')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureStore(t: texture_storage_2d_array<F,write>, coords: vec2<C>, array_index: C, value: vec4<T>)
+
+Parameters:
+ * t The sampled, depth, or external texture to sample.
+ * s The sampler type.
+ * array_index The 0-based texture array index
+ * coords The texture coordinates used for sampling.
+ * value The new texel value
+`
+ )
+ .params(
+ u =>
+ u
+ .combineWithParams(TexelFormats)
+ .beginSubcases()
+ .combine('coords', generateCoordBoundaries(2))
+ .combine('C', ['i32', 'u32'] as const)
+ .combine('C_value', [-1, 0, 1, 2, 3, 4] as const)
+ /* array_index not param'd as out-of-bounds is implementation specific */
+ )
+ .unimplemented();
+
+g.test('store_3d_coords')
+ .specURL('https://www.w3.org/TR/WGSL/#texturestore')
+ .desc(
+ `
+C is i32 or u32
+
+fn textureStore(t: texture_storage_3d<F,write>, coords: vec3<C>, value: vec4<T>)
+
+Parameters:
+ * t The sampled, depth, or external texture to sample.
+ * s The sampler type.
+ * coords The texture coordinates used for sampling.
+ * value The new texel value
+`
+ )
+ .params(u =>
+ u
+ .combineWithParams(TexelFormats)
+ .beginSubcases()
+ .combine('coords', generateCoordBoundaries(3))
+ .combine('C', ['i32', 'u32'] as const)
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/transpose.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/transpose.spec.ts
new file mode 100644
index 0000000000..6fd4887f35
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/transpose.spec.ts
@@ -0,0 +1,158 @@
+export const description = `
+Execution tests for the 'transpose' builtin function
+
+T is AbstractFloat, f32, or f16
+@const transpose(e: matRxC<T> ) -> matCxR<T>
+Returns the transpose of e.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeAbstractFloat, TypeF16, TypeF32, TypeMat } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import {
+ sparseMatrixF16Range,
+ sparseMatrixF32Range,
+ sparseMatrixF64Range,
+} from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, onlyConstInputSource, run } from '../../expression.js';
+
+import { abstractBuiltin, builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: f32_matCxR_[non_]const
+const f32_cases = ([2, 3, 4] as const)
+ .flatMap(cols =>
+ ([2, 3, 4] as const).flatMap(rows =>
+ ([true, false] as const).map(nonConst => ({
+ [`f32_mat${cols}x${rows}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateMatrixToMatrixCases(
+ sparseMatrixF32Range(cols, rows),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.transposeInterval
+ );
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+// Cases: f16_matCxR_[non_]const
+const f16_cases = ([2, 3, 4] as const)
+ .flatMap(cols =>
+ ([2, 3, 4] as const).flatMap(rows =>
+ ([true, false] as const).map(nonConst => ({
+ [`f16_mat${cols}x${rows}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f16.generateMatrixToMatrixCases(
+ sparseMatrixF16Range(cols, rows),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.transposeInterval
+ );
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+// Cases: abstract_matCxR
+const abstract_cases = ([2, 3, 4] as const)
+ .flatMap(cols =>
+ ([2, 3, 4] as const).map(rows => ({
+ [`abstract_mat${cols}x${rows}`]: () => {
+ return FP.abstract.generateMatrixToMatrixCases(
+ sparseMatrixF64Range(cols, rows),
+ 'finite',
+ FP.abstract.transposeInterval
+ );
+ },
+ }))
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('transpose', {
+ ...f32_cases,
+ ...f16_cases,
+ ...abstract_cases,
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#matrix-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u
+ .combine('inputSource', onlyConstInputSource)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(`abstract_mat${cols}x${rows}`);
+ await run(
+ t,
+ abstractBuiltin('transpose'),
+ [TypeMat(cols, rows, TypeAbstractFloat)],
+ TypeMat(rows, cols, TypeAbstractFloat),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#matrix-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `f32_mat${cols}x${rows}_const`
+ : `f32_mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ builtin('transpose'),
+ [TypeMat(cols, rows, TypeF32)],
+ TypeMat(rows, cols, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#matrix-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `f16_mat${cols}x${rows}_const`
+ : `f16_mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ builtin('transpose'),
+ [TypeMat(cols, rows, TypeF16)],
+ TypeMat(rows, cols, TypeF16),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/trunc.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/trunc.spec.ts
new file mode 100644
index 0000000000..63cd8470f5
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/trunc.spec.ts
@@ -0,0 +1,75 @@
+export const description = `
+Execution tests for the 'trunc' builtin function
+
+S is AbstractFloat, f32, f16
+T is S or vecN<S>
+@const fn trunc(e: T ) -> T
+Returns the nearest whole number whose absolute value is less than or equal to e.
+Component-wise when T is a vector.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeAbstractFloat, TypeF16, TypeF32 } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullF32Range, fullF64Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, onlyConstInputSource, run } from '../../expression.js';
+
+import { abstractBuiltin, builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('trunc', {
+ f32: () => {
+ return FP.f32.generateScalarToIntervalCases(fullF32Range(), 'unfiltered', FP.f32.truncInterval);
+ },
+ f16: () => {
+ return FP.f16.generateScalarToIntervalCases(fullF32Range(), 'unfiltered', FP.f16.truncInterval);
+ },
+ abstract: () => {
+ return FP.abstract.generateScalarToIntervalCases(
+ fullF64Range(),
+ 'unfiltered',
+ FP.abstract.truncInterval
+ );
+ },
+});
+
+g.test('abstract_float')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`abstract float tests`)
+ .params(u =>
+ u
+ .combine('inputSource', onlyConstInputSource)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('abstract');
+ await run(t, abstractBuiltin('trunc'), [TypeAbstractFloat], TypeAbstractFloat, t.params, cases);
+ });
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, builtin('trunc'), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#float-builtin-functions')
+ .desc(`f16 tests`)
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, builtin('trunc'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack2x16float.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack2x16float.spec.ts
new file mode 100644
index 0000000000..4a0bf075e9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack2x16float.spec.ts
@@ -0,0 +1,48 @@
+export const description = `
+Decomposes a 32-bit value into two 16-bit chunks, and reinterpets each chunk as
+a floating point value.
+Component i of the result is the f32 representation of v, where v is the
+interpretation of bits 16×i through 16×i+15 of e as an IEEE-754 binary16 value.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeU32, TypeVec } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullU32Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('unpack2x16float', {
+ u32_const: () => {
+ return FP.f32.generateU32ToIntervalCases(
+ fullU32Range(),
+ 'finite',
+ FP.f32.unpack2x16floatInterval
+ );
+ },
+ u32_non_const: () => {
+ return FP.f32.generateU32ToIntervalCases(
+ fullU32Range(),
+ 'unfiltered',
+ FP.f32.unpack2x16floatInterval
+ );
+ },
+});
+
+g.test('unpack')
+ .specURL('https://www.w3.org/TR/WGSL/#unpack-builtin-functions')
+ .desc(
+ `
+@const fn unpack2x16float(e: u32) -> vec2<f32>
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'u32_const' : 'u32_non_const');
+ await run(t, builtin('unpack2x16float'), [TypeU32], TypeVec(2, TypeF32), t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack2x16snorm.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack2x16snorm.spec.ts
new file mode 100644
index 0000000000..195cfd9a01
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack2x16snorm.spec.ts
@@ -0,0 +1,48 @@
+export const description = `
+Decomposes a 32-bit value into two 16-bit chunks, then reinterprets each chunk
+as a signed normalized floating point value.
+Component i of the result is max(v ÷ 32767, -1), where v is the interpretation
+of bits 16×i through 16×i+15 of e as a twos-complement signed integer.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeU32, TypeVec } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullU32Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('unpack2x16snorm', {
+ u32_const: () => {
+ return FP.f32.generateU32ToIntervalCases(
+ fullU32Range(),
+ 'finite',
+ FP.f32.unpack2x16snormInterval
+ );
+ },
+ u32_non_const: () => {
+ return FP.f32.generateU32ToIntervalCases(
+ fullU32Range(),
+ 'unfiltered',
+ FP.f32.unpack2x16snormInterval
+ );
+ },
+});
+
+g.test('unpack')
+ .specURL('https://www.w3.org/TR/WGSL/#unpack-builtin-functions')
+ .desc(
+ `
+@const fn unpack2x16snorm(e: u32) -> vec2<f32>
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'u32_const' : 'u32_non_const');
+ await run(t, builtin('unpack2x16snorm'), [TypeU32], TypeVec(2, TypeF32), t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack2x16unorm.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack2x16unorm.spec.ts
new file mode 100644
index 0000000000..16b4e6397c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack2x16unorm.spec.ts
@@ -0,0 +1,48 @@
+export const description = `
+Decomposes a 32-bit value into two 16-bit chunks, then reinterprets each chunk
+as an unsigned normalized floating point value.
+Component i of the result is v ÷ 65535, where v is the interpretation of bits
+16×i through 16×i+15 of e as an unsigned integer.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeU32, TypeVec } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullU32Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('unpack2x16unorm', {
+ u32_const: () => {
+ return FP.f32.generateU32ToIntervalCases(
+ fullU32Range(),
+ 'finite',
+ FP.f32.unpack2x16unormInterval
+ );
+ },
+ u32_non_const: () => {
+ return FP.f32.generateU32ToIntervalCases(
+ fullU32Range(),
+ 'unfiltered',
+ FP.f32.unpack2x16unormInterval
+ );
+ },
+});
+
+g.test('unpack')
+ .specURL('https://www.w3.org/TR/WGSL/#unpack-builtin-functions')
+ .desc(
+ `
+@const fn unpack2x16unorm(e: u32) -> vec2<f32>
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'u32_const' : 'u32_non_const');
+ await run(t, builtin('unpack2x16unorm'), [TypeU32], TypeVec(2, TypeF32), t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack4x8snorm.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack4x8snorm.spec.ts
new file mode 100644
index 0000000000..7ea8d51918
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack4x8snorm.spec.ts
@@ -0,0 +1,48 @@
+export const description = `
+Decomposes a 32-bit value into four 8-bit chunks, then reinterprets each chunk
+as a signed normalized floating point value.
+Component i of the result is max(v ÷ 127, -1), where v is the interpretation of
+bits 8×i through 8×i+7 of e as a twos-complement signed integer.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeU32, TypeVec } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullU32Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('unpack4x8snorm', {
+ u32_const: () => {
+ return FP.f32.generateU32ToIntervalCases(
+ fullU32Range(),
+ 'finite',
+ FP.f32.unpack4x8snormInterval
+ );
+ },
+ u32_non_const: () => {
+ return FP.f32.generateU32ToIntervalCases(
+ fullU32Range(),
+ 'unfiltered',
+ FP.f32.unpack4x8snormInterval
+ );
+ },
+});
+
+g.test('unpack')
+ .specURL('https://www.w3.org/TR/WGSL/#unpack-builtin-functions')
+ .desc(
+ `
+@const fn unpack4x8snorm(e: u32) -> vec4<f32>
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'u32_const' : 'u32_non_const');
+ await run(t, builtin('unpack4x8snorm'), [TypeU32], TypeVec(4, TypeF32), t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack4x8unorm.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack4x8unorm.spec.ts
new file mode 100644
index 0000000000..bf54d23c12
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/unpack4x8unorm.spec.ts
@@ -0,0 +1,48 @@
+export const description = `
+Decomposes a 32-bit value into four 8-bit chunks, then reinterprets each chunk
+as an unsigned normalized floating point value.
+Component i of the result is v ÷ 255, where v is the interpretation of bits 8×i
+through 8×i+7 of e as an unsigned integer.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+import { TypeF32, TypeU32, TypeVec } from '../../../../../util/conversion.js';
+import { FP } from '../../../../../util/floating_point.js';
+import { fullU32Range } from '../../../../../util/math.js';
+import { makeCaseCache } from '../../case_cache.js';
+import { allInputSources, run } from '../../expression.js';
+
+import { builtin } from './builtin.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('unpack4x8unorm', {
+ u32_const: () => {
+ return FP.f32.generateU32ToIntervalCases(
+ fullU32Range(),
+ 'finite',
+ FP.f32.unpack4x8unormInterval
+ );
+ },
+ u32_non_const: () => {
+ return FP.f32.generateU32ToIntervalCases(
+ fullU32Range(),
+ 'unfiltered',
+ FP.f32.unpack4x8unormInterval
+ );
+ },
+});
+
+g.test('unpack')
+ .specURL('https://www.w3.org/TR/WGSL/#unpack-builtin-functions')
+ .desc(
+ `
+@const fn unpack4x8unorm(e: u32) -> vec4<f32>
+`
+ )
+ .params(u => u.combine('inputSource', allInputSources))
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'u32_const' : 'u32_non_const');
+ await run(t, builtin('unpack4x8unorm'), [TypeU32], TypeVec(4, TypeF32), t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/utils.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/utils.ts
new file mode 100644
index 0000000000..9cbee00939
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/utils.ts
@@ -0,0 +1,45 @@
+/**
+ * Generates the boundary entries for the given number of dimensions
+ *
+ * @param numDimensions: The number of dimensions to generate for
+ * @returns an array of generated coord boundaries
+ */
+export function generateCoordBoundaries(numDimensions: number) {
+ const ret = ['in-bounds'];
+
+ if (numDimensions < 1 || numDimensions > 3) {
+ throw new Error(`invalid numDimensions: ${numDimensions}`);
+ }
+
+ const name = 'xyz';
+ for (let i = 0; i < numDimensions; ++i) {
+ for (const j of ['min', 'max']) {
+ for (const k of ['wrap', 'boundary']) {
+ ret.push(`${name[i]}-${j}-${k}`);
+ }
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * Generates a set of offset values to attempt in the range [-9, 8].
+ *
+ * @param numDimensions: The number of dimensions to generate for
+ * @return an array of generated offset values
+ */
+export function generateOffsets(numDimensions: number) {
+ if (numDimensions < 2 || numDimensions > 3) {
+ throw new Error(`generateOffsets: invalid numDimensions: ${numDimensions}`);
+ }
+ const ret: Array<undefined | Array<number>> = [undefined];
+ for (const val of [-9, -8, 0, 1, 7, 8]) {
+ const v = [];
+ for (let i = 0; i < numDimensions; ++i) {
+ v.push(val);
+ }
+ ret.push(v);
+ }
+ return ret;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/workgroupBarrier.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/workgroupBarrier.spec.ts
new file mode 100644
index 0000000000..74e0f12325
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/call/builtin/workgroupBarrier.spec.ts
@@ -0,0 +1,38 @@
+export const description = `
+'workgroupBarrier' affects memory and atomic operations in the workgroup address space.
+
+All synchronization functions execute a control barrier with Acquire/Release memory ordering.
+That is, all synchronization functions, and affected memory and atomic operations are ordered
+in program order relative to the synchronization function. Additionally, the affected memory
+and atomic operations program-ordered before the synchronization function must be visible to
+all other threads in the workgroup before any affected memory or atomic operation program-ordered
+after the synchronization function is executed by a member of the workgroup. All synchronization
+functions use the Workgroup memory scope. All synchronization functions have a Workgroup
+execution scope.
+
+All synchronization functions must only be used in the compute shader stage.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('stage')
+ .specURL('https://www.w3.org/TR/WGSL/#sync-builtin-functions')
+ .desc(
+ `
+All synchronization functions must only be used in the compute shader stage.
+`
+ )
+ .params(u => u.combine('stage', ['vertex', 'fragment', 'compute'] as const))
+ .unimplemented();
+
+g.test('barrier')
+ .specURL('https://www.w3.org/TR/WGSL/#sync-builtin-functions')
+ .desc(
+ `
+fn workgroupBarrier()
+`
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/case_cache.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/case_cache.ts
new file mode 100644
index 0000000000..ff82792d64
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/case_cache.ts
@@ -0,0 +1,200 @@
+import { Cacheable, dataCache } from '../../../../common/framework/data_cache.js';
+import { unreachable } from '../../../../common/util/util.js';
+import BinaryStream from '../../../util/binary_stream.js';
+import { deserializeComparator, serializeComparator } from '../../../util/compare.js';
+import {
+ Scalar,
+ Vector,
+ serializeValue,
+ deserializeValue,
+ Matrix,
+ Value,
+} from '../../../util/conversion.js';
+import {
+ deserializeFPInterval,
+ FPInterval,
+ serializeFPInterval,
+} from '../../../util/floating_point.js';
+import { flatten2DArray, unflatten2DArray } from '../../../util/math.js';
+
+import { Case, CaseList, Expectation, isComparator } from './expression.js';
+
+enum SerializedExpectationKind {
+ Value,
+ Interval,
+ Interval1DArray,
+ Interval2DArray,
+ Array,
+ Comparator,
+}
+
+/** serializeExpectation() serializes an Expectation to a BinaryStream */
+export function serializeExpectation(s: BinaryStream, e: Expectation) {
+ if (e instanceof Scalar || e instanceof Vector || e instanceof Matrix) {
+ s.writeU8(SerializedExpectationKind.Value);
+ serializeValue(s, e);
+ return;
+ }
+ if (e instanceof FPInterval) {
+ s.writeU8(SerializedExpectationKind.Interval);
+ serializeFPInterval(s, e);
+ return;
+ }
+ if (e instanceof Array) {
+ if (e[0] instanceof Array) {
+ e = e as FPInterval[][];
+ const cols = e.length;
+ const rows = e[0].length;
+ s.writeU8(SerializedExpectationKind.Interval2DArray);
+ s.writeU16(cols);
+ s.writeU16(rows);
+ s.writeArray(flatten2DArray(e), serializeFPInterval);
+ } else {
+ e = e as FPInterval[];
+ s.writeU8(SerializedExpectationKind.Interval1DArray);
+ s.writeArray(e, serializeFPInterval);
+ }
+ return;
+ }
+ if (isComparator(e)) {
+ s.writeU8(SerializedExpectationKind.Comparator);
+ serializeComparator(s, e);
+ return;
+ }
+ unreachable(`cannot serialize Expectation ${e}`);
+}
+
+/** deserializeExpectation() deserializes an Expectation from a BinaryStream */
+export function deserializeExpectation(s: BinaryStream): Expectation {
+ const kind = s.readU8();
+ switch (kind) {
+ case SerializedExpectationKind.Value: {
+ return deserializeValue(s);
+ }
+ case SerializedExpectationKind.Interval: {
+ return deserializeFPInterval(s);
+ }
+ case SerializedExpectationKind.Interval1DArray: {
+ return s.readArray(deserializeFPInterval);
+ }
+ case SerializedExpectationKind.Interval2DArray: {
+ const cols = s.readU16();
+ const rows = s.readU16();
+ return unflatten2DArray(s.readArray(deserializeFPInterval), cols, rows);
+ }
+ case SerializedExpectationKind.Comparator: {
+ return deserializeComparator(s);
+ }
+ default: {
+ unreachable(`invalid serialized expectation kind: ${kind}`);
+ }
+ }
+}
+
+/** serializeCase() serializes a Case to a BinaryStream */
+export function serializeCase(s: BinaryStream, c: Case) {
+ s.writeCond(c.input instanceof Array, {
+ if_true: () => {
+ // c.input is array
+ s.writeArray(c.input as Value[], serializeValue);
+ },
+ if_false: () => {
+ // c.input is not array
+ serializeValue(s, c.input as Value);
+ },
+ });
+ serializeExpectation(s, c.expected);
+}
+
+/** deserializeCase() deserializes a Case from a BinaryStream */
+export function deserializeCase(s: BinaryStream): Case {
+ const input = s.readCond({
+ if_true: () => {
+ // c.input is array
+ return s.readArray(deserializeValue);
+ },
+ if_false: () => {
+ // c.input is not array
+ return deserializeValue(s);
+ },
+ });
+ const expected = deserializeExpectation(s);
+ return { input, expected };
+}
+
+/** CaseListBuilder is a function that builds a CaseList */
+export type CaseListBuilder = () => CaseList;
+
+/**
+ * CaseCache is a cache of CaseList.
+ * CaseCache implements the Cacheable interface, so the cases can be pre-built
+ * and stored in the data cache, reducing computation costs at CTS runtime.
+ */
+export class CaseCache implements Cacheable<Record<string, CaseList>> {
+ /**
+ * Constructor
+ * @param name the name of the cache. This must be globally unique.
+ * @param builders a Record of case-list name to case-list builder.
+ */
+ constructor(name: string, builders: Record<string, CaseListBuilder>) {
+ this.path = `webgpu/shader/execution/case-cache/${name}.bin`;
+ this.builders = builders;
+ }
+
+ /** get() returns the list of cases with the given name */
+ public async get(name: string): Promise<CaseList> {
+ const data = await dataCache.fetch(this);
+ return data[name];
+ }
+
+ /**
+ * build() implements the Cacheable.build interface.
+ * @returns the data.
+ */
+ build(): Promise<Record<string, CaseList>> {
+ const built: Record<string, CaseList> = {};
+ for (const name in this.builders) {
+ const cases = this.builders[name]();
+ built[name] = cases;
+ }
+ return Promise.resolve(built);
+ }
+
+ /**
+ * serialize() implements the Cacheable.serialize interface.
+ * @returns the serialized data.
+ */
+ serialize(data: Record<string, CaseList>): Uint8Array {
+ const maxSize = 32 << 20; // 32MB - max size for a file
+ const stream = new BinaryStream(new ArrayBuffer(maxSize));
+ stream.writeU32(Object.keys(data).length);
+ for (const name in data) {
+ stream.writeString(name);
+ stream.writeArray(data[name], serializeCase);
+ }
+ return stream.buffer();
+ }
+
+ /**
+ * deserialize() implements the Cacheable.deserialize interface.
+ * @returns the deserialize data.
+ */
+ deserialize(array: Uint8Array): Record<string, CaseList> {
+ const s = new BinaryStream(array.buffer);
+ const casesByName: Record<string, CaseList> = {};
+ const numRecords = s.readU32();
+ for (let i = 0; i < numRecords; i++) {
+ const name = s.readString();
+ const cases = s.readArray(deserializeCase);
+ casesByName[name] = cases;
+ }
+ return casesByName;
+ }
+
+ public readonly path: string;
+ private readonly builders: Record<string, CaseListBuilder>;
+}
+
+export function makeCaseCache(name: string, builders: Record<string, CaseListBuilder>): CaseCache {
+ return new CaseCache(name, builders);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/expression.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/expression.ts
new file mode 100644
index 0000000000..f85516f29b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/expression.ts
@@ -0,0 +1,1436 @@
+import { globalTestConfig } from '../../../../common/framework/test_config.js';
+import { ROArrayArray } from '../../../../common/util/types.js';
+import { assert, objectEquals, unreachable } from '../../../../common/util/util.js';
+import { GPUTest } from '../../../gpu_test.js';
+import { compare, Comparator, ComparatorImpl } from '../../../util/compare.js';
+import { kValue } from '../../../util/constants.js';
+import {
+ ScalarType,
+ Scalar,
+ Type,
+ TypeVec,
+ TypeU32,
+ Value,
+ Vector,
+ VectorType,
+ u32,
+ i32,
+ Matrix,
+ MatrixType,
+ ScalarBuilder,
+ scalarTypeOf,
+} from '../../../util/conversion.js';
+import { FPInterval } from '../../../util/floating_point.js';
+import {
+ cartesianProduct,
+ QuantizeFunc,
+ quantizeToI32,
+ quantizeToU32,
+} from '../../../util/math.js';
+
+export type Expectation =
+ | Value
+ | FPInterval
+ | readonly FPInterval[]
+ | ROArrayArray<FPInterval>
+ | Comparator;
+
+/** @returns if this Expectation actually a Comparator */
+export function isComparator(e: Expectation): e is Comparator {
+ return !(
+ e instanceof FPInterval ||
+ e instanceof Scalar ||
+ e instanceof Vector ||
+ e instanceof Matrix ||
+ e instanceof Array
+ );
+}
+
+/** @returns the input if it is already a Comparator, otherwise wraps it in a 'value' comparator */
+export function toComparator(input: Expectation): Comparator {
+ if (isComparator(input)) {
+ return input;
+ }
+
+ return { compare: got => compare(got, input as Value), kind: 'value' };
+}
+
+/** Case is a single expression test case. */
+export type Case = {
+ // The input value(s)
+ input: Value | ReadonlyArray<Value>;
+ // The expected result, or function to check the result
+ expected: Expectation;
+};
+
+/** CaseList is a list of Cases */
+export type CaseList = Array<Case>;
+
+/** The input value source */
+export type InputSource =
+ | 'const' // Shader creation time constant values (@const)
+ | 'uniform' // Uniform buffer
+ | 'storage_r' // Read-only storage buffer
+ | 'storage_rw'; // Read-write storage buffer
+
+/** All possible input sources */
+export const allInputSources: InputSource[] = ['const', 'uniform', 'storage_r', 'storage_rw'];
+
+/** Just constant input source */
+export const onlyConstInputSource: InputSource[] = ['const'];
+
+/** Configuration for running a expression test */
+export type Config = {
+ // Where the input values are read from
+ inputSource: InputSource;
+ // If defined, scalar test cases will be packed into vectors of the given
+ // width, which must be 2, 3 or 4.
+ // Requires that all parameters of the expression overload are of a scalar
+ // type, and the return type of the expression overload is also a scalar type.
+ // If the number of test cases is not a multiple of the vector width, then the
+ // last scalar value is repeated to fill the last vector value.
+ vectorize?: number;
+};
+
+// Helper for returning the stride for a given Type
+function valueStride(ty: Type): number {
+ // AbstractFloats are passed out of the shader via a struct of 2x u32s and
+ // unpacking containers as arrays
+ if (scalarTypeOf(ty).kind === 'abstract-float') {
+ if (ty instanceof ScalarType) {
+ return 16;
+ }
+ if (ty instanceof VectorType) {
+ if (ty.width === 2) {
+ return 16;
+ }
+ // vec3s have padding to make them the same size as vec4s
+ return 32;
+ }
+ if (ty instanceof MatrixType) {
+ switch (ty.cols) {
+ case 2:
+ switch (ty.rows) {
+ case 2:
+ return 32;
+ case 3:
+ return 64;
+ case 4:
+ return 64;
+ }
+ break;
+ case 3:
+ switch (ty.rows) {
+ case 2:
+ return 48;
+ case 3:
+ return 96;
+ case 4:
+ return 96;
+ }
+ break;
+ case 4:
+ switch (ty.rows) {
+ case 2:
+ return 64;
+ case 3:
+ return 128;
+ case 4:
+ return 128;
+ }
+ break;
+ }
+ }
+ unreachable(`AbstractFloats have not yet been implemented for ${ty.toString()}`);
+ }
+
+ if (ty instanceof MatrixType) {
+ switch (ty.cols) {
+ case 2:
+ switch (ty.rows) {
+ case 2:
+ return 16;
+ case 3:
+ return 32;
+ case 4:
+ return 32;
+ }
+ break;
+ case 3:
+ switch (ty.rows) {
+ case 2:
+ return 32;
+ case 3:
+ return 64;
+ case 4:
+ return 64;
+ }
+ break;
+ case 4:
+ switch (ty.rows) {
+ case 2:
+ return 32;
+ case 3:
+ return 64;
+ case 4:
+ return 64;
+ }
+ break;
+ }
+ unreachable(
+ `Attempted to get stride length for a matrix with dimensions (${ty.cols}x${ty.rows}), which isn't currently handled`
+ );
+ }
+
+ // Handles scalars and vectors
+ return 16;
+}
+
+// Helper for summing up all of the stride values for an array of Types
+function valueStrides(tys: Type[]): number {
+ return tys.map(valueStride).reduce((sum, c) => sum + c);
+}
+
+// Helper for returning the WGSL storage type for the given Type.
+function storageType(ty: Type): Type {
+ if (ty instanceof ScalarType) {
+ assert(ty.kind !== 'f64', `No storage type defined for 'f64' values`);
+ assert(
+ ty.kind !== 'abstract-float',
+ `Custom handling is implemented for 'abstract-float' values`
+ );
+ if (ty.kind === 'bool') {
+ return TypeU32;
+ }
+ }
+ if (ty instanceof VectorType) {
+ return TypeVec(ty.width, storageType(ty.elementType) as ScalarType);
+ }
+ return ty;
+}
+
+// Helper for converting a value of the type 'ty' from the storage type.
+function fromStorage(ty: Type, expr: string): string {
+ if (ty instanceof ScalarType) {
+ assert(ty.kind !== 'abstract-float', `AbstractFloat values should not be in input storage`);
+ assert(ty.kind !== 'f64', `'No storage type defined for 'f64' values`);
+ if (ty.kind === 'bool') {
+ return `${expr} != 0u`;
+ }
+ }
+ if (ty instanceof VectorType) {
+ assert(
+ ty.elementType.kind !== 'abstract-float',
+ `AbstractFloat values cannot appear in input storage`
+ );
+ assert(ty.elementType.kind !== 'f64', `'No storage type defined for 'f64' values`);
+ if (ty.elementType.kind === 'bool') {
+ return `${expr} != vec${ty.width}<u32>(0u)`;
+ }
+ }
+ return expr;
+}
+
+// Helper for converting a value of the type 'ty' to the storage type.
+function toStorage(ty: Type, expr: string): string {
+ if (ty instanceof ScalarType) {
+ assert(
+ ty.kind !== 'abstract-float',
+ `AbstractFloat values have custom code for writing to storage`
+ );
+ assert(ty.kind !== 'f64', `No storage type defined for 'f64' values`);
+ if (ty.kind === 'bool') {
+ return `select(0u, 1u, ${expr})`;
+ }
+ }
+ if (ty instanceof VectorType) {
+ assert(
+ ty.elementType.kind !== 'abstract-float',
+ `AbstractFloat values have custom code for writing to storage`
+ );
+ assert(ty.elementType.kind !== 'f64', `'No storage type defined for 'f64' values`);
+ if (ty.elementType.kind === 'bool') {
+ return `select(vec${ty.width}<u32>(0u), vec${ty.width}<u32>(1u), ${expr})`;
+ }
+ }
+ return expr;
+}
+
+// A Pipeline is a map of WGSL shader source to a built pipeline
+type PipelineCache = Map<String, GPUComputePipeline>;
+
+/**
+ * Searches for an entry with the given key, adding and returning the result of calling
+ * `create` if the entry was not found.
+ * @param map the cache map
+ * @param key the entry's key
+ * @param create the function used to construct a value, if not found in the cache
+ * @returns the value, either fetched from the cache, or newly built.
+ */
+function getOrCreate<K, V>(map: Map<K, V>, key: K, create: () => V) {
+ const existing = map.get(key);
+ if (existing !== undefined) {
+ return existing;
+ }
+ const value = create();
+ map.set(key, value);
+ return value;
+}
+
+/**
+ * Runs the list of expression tests, possibly splitting the tests into multiple
+ * dispatches to keep the input data within the buffer binding limits.
+ * run() will pack the scalar test cases into smaller set of vectorized tests
+ * if `cfg.vectorize` is defined.
+ * @param t the GPUTest
+ * @param shaderBuilder the shader builder function
+ * @param parameterTypes the list of expression parameter types
+ * @param resultType the return type for the expression overload
+ * @param cfg test configuration values
+ * @param cases list of test cases
+ * @param batch_size override the calculated casesPerBatch.
+ */
+export async function run(
+ t: GPUTest,
+ shaderBuilder: ShaderBuilder,
+ parameterTypes: Array<Type>,
+ resultType: Type,
+ cfg: Config = { inputSource: 'storage_r' },
+ cases: CaseList,
+ batch_size?: number
+) {
+ // If the 'vectorize' config option was provided, pack the cases into vectors.
+ if (cfg.vectorize !== undefined) {
+ const packed = packScalarsToVector(parameterTypes, resultType, cases, cfg.vectorize);
+ cases = packed.cases;
+ parameterTypes = packed.parameterTypes;
+ resultType = packed.resultType;
+ }
+
+ // The size of the input buffer may exceed the maximum buffer binding size,
+ // so chunk the tests up into batches that fit into the limits. We also split
+ // the cases into smaller batches to help with shader compilation performance.
+ const casesPerBatch = (function () {
+ if (batch_size) {
+ return batch_size;
+ }
+ switch (cfg.inputSource) {
+ case 'const':
+ // Some drivers are slow to optimize shaders with many constant values,
+ // or statements. 32 is an empirically picked number of cases that works
+ // well for most drivers.
+ return 32;
+ case 'uniform':
+ // Some drivers are slow to build pipelines with large uniform buffers.
+ // 2k appears to be a sweet-spot when benchmarking.
+ return Math.floor(
+ Math.min(1024 * 2, t.device.limits.maxUniformBufferBindingSize) /
+ valueStrides(parameterTypes)
+ );
+ case 'storage_r':
+ case 'storage_rw':
+ return Math.floor(
+ t.device.limits.maxStorageBufferBindingSize / valueStrides(parameterTypes)
+ );
+ }
+ })();
+
+ // A cache to hold built shader pipelines.
+ const pipelineCache = new Map<String, GPUComputePipeline>();
+
+ // Submit all the cases in batches, rate-limiting to ensure not too many
+ // batches are in flight simultaneously.
+ const maxBatchesInFlight = 5;
+ let batchesInFlight = 0;
+ let resolvePromiseBlockingBatch: (() => void) | undefined = undefined;
+ const batchFinishedCallback = () => {
+ batchesInFlight -= 1;
+ // If there is any batch waiting on a previous batch to finish,
+ // unblock it now, and clear the resolve callback.
+ if (resolvePromiseBlockingBatch) {
+ resolvePromiseBlockingBatch();
+ resolvePromiseBlockingBatch = undefined;
+ }
+ };
+
+ const processBatch = async (batchCases: CaseList) => {
+ const checkBatch = await submitBatch(
+ t,
+ shaderBuilder,
+ parameterTypes,
+ resultType,
+ batchCases,
+ cfg.inputSource,
+ pipelineCache
+ );
+ checkBatch();
+ void t.queue.onSubmittedWorkDone().finally(batchFinishedCallback);
+ };
+
+ const pendingBatches = [];
+
+ for (let i = 0; i < cases.length; i += casesPerBatch) {
+ const batchCases = cases.slice(i, Math.min(i + casesPerBatch, cases.length));
+
+ if (batchesInFlight > maxBatchesInFlight) {
+ await new Promise<void>(resolve => {
+ // There should only be one batch waiting at a time.
+ assert(resolvePromiseBlockingBatch === undefined);
+ resolvePromiseBlockingBatch = resolve;
+ });
+ }
+ batchesInFlight += 1;
+
+ pendingBatches.push(processBatch(batchCases));
+ }
+
+ await Promise.all(pendingBatches);
+}
+
+/**
+ * Submits the list of expression tests. The input data must fit within the
+ * buffer binding limits of the given inputSource.
+ * @param t the GPUTest
+ * @param shaderBuilder the shader builder function
+ * @param parameterTypes the list of expression parameter types
+ * @param resultType the return type for the expression overload
+ * @param cases list of test cases that fit within the binding limits of the device
+ * @param inputSource the source of the input values
+ * @param pipelineCache the cache of compute pipelines, shared between batches
+ * @returns a function that checks the results are as expected
+ */
+async function submitBatch(
+ t: GPUTest,
+ shaderBuilder: ShaderBuilder,
+ parameterTypes: Array<Type>,
+ resultType: Type,
+ cases: CaseList,
+ inputSource: InputSource,
+ pipelineCache: PipelineCache
+): Promise<() => void> {
+ // Construct a buffer to hold the results of the expression tests
+ const outputBufferSize = cases.length * valueStride(resultType);
+ const outputBuffer = t.device.createBuffer({
+ size: outputBufferSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE,
+ });
+
+ const [pipeline, group] = await buildPipeline(
+ t,
+ shaderBuilder,
+ parameterTypes,
+ resultType,
+ cases,
+ inputSource,
+ outputBuffer,
+ pipelineCache
+ );
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, group);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+
+ // Heartbeat to ensure CTS runners know we're alive.
+ globalTestConfig.testHeartbeatCallback();
+
+ t.queue.submit([encoder.finish()]);
+
+ // Return a function that can check the results of the shader
+ return () => {
+ const checkExpectation = (outputData: Uint8Array) => {
+ // Read the outputs from the output buffer
+ const outputs = new Array<Value>(cases.length);
+ for (let i = 0; i < cases.length; i++) {
+ outputs[i] = resultType.read(outputData, i * valueStride(resultType));
+ }
+
+ // The list of expectation failures
+ const errs: string[] = [];
+
+ // For each case...
+ for (let caseIdx = 0; caseIdx < cases.length; caseIdx++) {
+ const c = cases[caseIdx];
+ const got = outputs[caseIdx];
+ const cmp = toComparator(c.expected).compare(got);
+ if (!cmp.matched) {
+ errs.push(`(${c.input instanceof Array ? c.input.join(', ') : c.input})
+ returned: ${cmp.got}
+ expected: ${cmp.expected}`);
+ }
+ }
+
+ return errs.length > 0 ? new Error(errs.join('\n\n')) : undefined;
+ };
+
+ // Heartbeat to ensure CTS runners know we're alive.
+ globalTestConfig.testHeartbeatCallback();
+
+ t.expectGPUBufferValuesPassCheck(outputBuffer, checkExpectation, {
+ type: Uint8Array,
+ typedLength: outputBufferSize,
+ });
+ };
+}
+
+/**
+ * map is a helper for returning a new array with each element of `v`
+ * transformed with `fn`.
+ * If `v` is not an array, then `fn` is called with (v, 0).
+ */
+function map<T, U>(v: T | readonly T[], fn: (value: T, index?: number) => U): U[] {
+ if (v instanceof Array) {
+ return v.map(fn);
+ }
+ return [fn(v, 0)];
+}
+
+/**
+ * ShaderBuilder is a function used to construct the WGSL shader used by an
+ * expression test.
+ * @param parameterTypes the list of expression parameter types
+ * @param resultType the return type for the expression overload
+ * @param cases list of test cases that fit within the binding limits of the device
+ * @param inputSource the source of the input values
+ */
+export type ShaderBuilder = (
+ parameterTypes: Array<Type>,
+ resultType: Type,
+ cases: CaseList,
+ inputSource: InputSource
+) => string;
+
+/**
+ * Helper that returns the WGSL to declare the output storage buffer for a shader
+ */
+function wgslOutputs(resultType: Type, count: number): string {
+ let output_struct = undefined;
+ if (scalarTypeOf(resultType).kind !== 'abstract-float') {
+ output_struct = `
+struct Output {
+ @size(${valueStride(resultType)}) value : ${storageType(resultType)}
+};`;
+ } else {
+ if (resultType instanceof ScalarType) {
+ output_struct = `struct AF {
+ low: u32,
+ high: u32,
+};
+
+struct Output {
+ @size(${valueStride(resultType)}) value: AF,
+};`;
+ }
+ if (resultType instanceof VectorType) {
+ const dim = resultType.width;
+ output_struct = `struct AF {
+ low: u32,
+ high: u32,
+};
+
+struct Output {
+ @size(${valueStride(resultType)}) value: array<AF, ${dim}>,
+};`;
+ }
+
+ if (resultType instanceof MatrixType) {
+ const cols = resultType.cols;
+ const rows = resultType.rows === 2 ? 2 : 4; // 3 element rows have a padding element
+ output_struct = `struct AF {
+ low: u32,
+ high: u32,
+};
+
+struct Output {
+ @size(${valueStride(resultType)}) value: array<array<AF, ${rows}>, ${cols}>,
+};`;
+ }
+
+ assert(output_struct !== undefined, `No implementation for result type '${resultType}'`);
+ }
+
+ return `${output_struct}
+@group(0) @binding(0) var<storage, read_write> outputs : array<Output, ${count}>;
+`;
+}
+
+/**
+ * Helper that returns the WGSL to declare the values array for a shader
+ */
+function wgslValuesArray(
+ parameterTypes: Array<Type>,
+ resultType: Type,
+ cases: CaseList,
+ expressionBuilder: ExpressionBuilder
+): string {
+ return `
+const values = array(
+ ${cases.map(c => expressionBuilder(map(c.input, v => v.wgsl()))).join(',\n ')}
+);`;
+}
+
+/**
+ * Helper that returns the WGSL 'var' declaration for the given input source
+ */
+function wgslInputVar(inputSource: InputSource, count: number) {
+ switch (inputSource) {
+ case 'storage_r':
+ return `@group(0) @binding(1) var<storage, read> inputs : array<Input, ${count}>;`;
+ case 'storage_rw':
+ return `@group(0) @binding(1) var<storage, read_write> inputs : array<Input, ${count}>;`;
+ case 'uniform':
+ return `@group(0) @binding(1) var<uniform> inputs : array<Input, ${count}>;`;
+ }
+ throw new Error(`InputSource ${inputSource} does not use an input var`);
+}
+
+/**
+ * Helper that returns the WGSL header before any other declaration, currently include f16
+ * enable directive if necessary.
+ */
+function wgslHeader(parameterTypes: Array<Type>, resultType: Type) {
+ const usedF16 =
+ scalarTypeOf(resultType).kind === 'f16' ||
+ parameterTypes.some((ty: Type) => scalarTypeOf(ty).kind === 'f16');
+ const header = usedF16 ? 'enable f16;\n' : '';
+ return header;
+}
+
+/**
+ * ExpressionBuilder returns the WGSL used to evaluate an expression with the
+ * given input values.
+ */
+export type ExpressionBuilder = (values: ReadonlyArray<string>) => string;
+
+/**
+ * Returns a ShaderBuilder that builds a basic expression test shader.
+ * @param expressionBuilder the expression builder
+ */
+function basicExpressionShaderBody(
+ expressionBuilder: ExpressionBuilder,
+ parameterTypes: Array<Type>,
+ resultType: Type,
+ cases: CaseList,
+ inputSource: InputSource
+): string {
+ assert(
+ scalarTypeOf(resultType).kind !== 'abstract-float',
+ `abstractFloatShaderBuilder should be used when result type is 'abstract-float`
+ );
+ if (inputSource === 'const') {
+ //////////////////////////////////////////////////////////////////////////
+ // Constant eval
+ //////////////////////////////////////////////////////////////////////////
+ let body = '';
+ if (parameterTypes.some(ty => scalarTypeOf(ty).kind === 'abstract-float')) {
+ // Directly assign the expression to the output, to avoid an
+ // intermediate store, which will concretize the value early
+ body = cases
+ .map(
+ (c, i) =>
+ ` outputs[${i}].value = ${toStorage(
+ resultType,
+ expressionBuilder(map(c.input, v => v.wgsl()))
+ )};`
+ )
+ .join('\n ');
+ } else if (globalTestConfig.unrollConstEvalLoops) {
+ body = cases
+ .map((_, i) => {
+ const value = `values[${i}]`;
+ return ` outputs[${i}].value = ${toStorage(resultType, value)};`;
+ })
+ .join('\n ');
+ } else {
+ body = `
+ for (var i = 0u; i < ${cases.length}; i++) {
+ outputs[i].value = ${toStorage(resultType, `values[i]`)};
+ }`;
+ }
+
+ return `
+${wgslOutputs(resultType, cases.length)}
+
+${wgslValuesArray(parameterTypes, resultType, cases, expressionBuilder)}
+
+@compute @workgroup_size(1)
+fn main() {
+${body}
+}`;
+ } else {
+ //////////////////////////////////////////////////////////////////////////
+ // Runtime eval
+ //////////////////////////////////////////////////////////////////////////
+
+ // returns the WGSL expression to load the ith parameter of the given type from the input buffer
+ const paramExpr = (ty: Type, i: number) => fromStorage(ty, `inputs[i].param${i}`);
+
+ // resolves to the expression that calls the builtin
+ const expr = toStorage(resultType, expressionBuilder(parameterTypes.map(paramExpr)));
+
+ return `
+struct Input {
+${parameterTypes
+ .map((ty, i) => ` @size(${valueStride(ty)}) param${i} : ${storageType(ty)},`)
+ .join('\n')}
+};
+
+${wgslOutputs(resultType, cases.length)}
+
+${wgslInputVar(inputSource, cases.length)}
+
+@compute @workgroup_size(1)
+fn main() {
+ for (var i = 0; i < ${cases.length}; i++) {
+ outputs[i].value = ${expr};
+ }
+}
+`;
+ }
+}
+
+/**
+ * Returns a ShaderBuilder that builds a basic expression test shader.
+ * @param expressionBuilder the expression builder
+ */
+export function basicExpressionBuilder(expressionBuilder: ExpressionBuilder): ShaderBuilder {
+ return (
+ parameterTypes: Array<Type>,
+ resultType: Type,
+ cases: CaseList,
+ inputSource: InputSource
+ ) => {
+ return `\
+${wgslHeader(parameterTypes, resultType)}
+
+${basicExpressionShaderBody(expressionBuilder, parameterTypes, resultType, cases, inputSource)}`;
+ };
+}
+
+/**
+ * Returns a ShaderBuilder that builds a basic expression test shader with given predeclaration
+ * string goes after WGSL header (i.e. enable directives) if any but before anything else.
+ * @param expressionBuilder the expression builder
+ * @param predeclaration the predeclaration string
+ */
+export function basicExpressionWithPredeclarationBuilder(
+ expressionBuilder: ExpressionBuilder,
+ predeclaration: string
+): ShaderBuilder {
+ return (
+ parameterTypes: Array<Type>,
+ resultType: Type,
+ cases: CaseList,
+ inputSource: InputSource
+ ) => {
+ return `\
+${wgslHeader(parameterTypes, resultType)}
+
+${predeclaration}
+
+${basicExpressionShaderBody(expressionBuilder, parameterTypes, resultType, cases, inputSource)}`;
+ };
+}
+
+/**
+ * Returns a ShaderBuilder that builds a compound assignment operator test shader.
+ * @param op the compound operator
+ */
+export function compoundAssignmentBuilder(op: string): ShaderBuilder {
+ return (
+ parameterTypes: Array<Type>,
+ resultType: Type,
+ cases: CaseList,
+ inputSource: InputSource
+ ) => {
+ //////////////////////////////////////////////////////////////////////////
+ // Input validation
+ //////////////////////////////////////////////////////////////////////////
+ if (parameterTypes.length !== 2) {
+ throw new Error(`compoundBinaryOp() requires exactly two parameters values per case`);
+ }
+ const lhsType = parameterTypes[0];
+ const rhsType = parameterTypes[1];
+ if (!objectEquals(lhsType, resultType)) {
+ throw new Error(
+ `compoundBinaryOp() requires result type (${resultType}) to be equal to the LHS type (${lhsType})`
+ );
+ }
+ if (inputSource === 'const') {
+ //////////////////////////////////////////////////////////////////////////
+ // Constant eval
+ //////////////////////////////////////////////////////////////////////////
+ let body = '';
+ if (globalTestConfig.unrollConstEvalLoops) {
+ body = cases
+ .map((_, i) => {
+ return `
+ var ret_${i} = lhs[${i}];
+ ret_${i} ${op} rhs[${i}];
+ outputs[${i}].value = ${storageType(resultType)}(ret_${i});`;
+ })
+ .join('\n ');
+ } else {
+ body = `
+ for (var i = 0u; i < ${cases.length}; i++) {
+ var ret = lhs[i];
+ ret ${op} rhs[i];
+ outputs[i].value = ${storageType(resultType)}(ret);
+ }`;
+ }
+
+ const values = cases.map(c => (c.input as Value[]).map(v => v.wgsl()));
+
+ return `
+${wgslHeader(parameterTypes, resultType)}
+${wgslOutputs(resultType, cases.length)}
+
+const lhs = array(
+${values.map(c => `${c[0]}`).join(',\n ')}
+ );
+const rhs = array(
+${values.map(c => `${c[1]}`).join(',\n ')}
+);
+
+@compute @workgroup_size(1)
+fn main() {
+${body}
+}`;
+ } else {
+ //////////////////////////////////////////////////////////////////////////
+ // Runtime eval
+ //////////////////////////////////////////////////////////////////////////
+ return `
+${wgslHeader(parameterTypes, resultType)}
+${wgslOutputs(resultType, cases.length)}
+
+struct Input {
+ @size(${valueStride(lhsType)}) lhs : ${storageType(lhsType)},
+ @size(${valueStride(rhsType)}) rhs : ${storageType(rhsType)},
+}
+
+${wgslInputVar(inputSource, cases.length)}
+
+@compute @workgroup_size(1)
+fn main() {
+ for (var i = 0; i < ${cases.length}; i++) {
+ var ret = ${lhsType}(inputs[i].lhs);
+ ret ${op} ${rhsType}(inputs[i].rhs);
+ outputs[i].value = ${storageType(resultType)}(ret);
+ }
+}
+`;
+ }
+ };
+}
+
+/**
+ * @returns a string that extracts the value of an AbstractFloat into an output
+ * destination
+ * @param expr expression for an AbstractFloat value, if working with vectors or
+ * matrices, this string needs to include indexing into the
+ * container.
+ * @param case_idx index in the case output array to assign the result
+ * @param accessor string representing how access to the AF that needs to be
+ * operated on.
+ * For scalars this should be left as ''.
+ * For vectors this will be an indexing operation,
+ * i.e. '[i]'
+ * For matrices this will double indexing operation,
+ * i.e. '[c][r]'
+ */
+function abstractFloatSnippet(expr: string, case_idx: number, accessor: string = ''): string {
+ // AbstractFloats are f64s under the hood. WebGPU does not support
+ // putting f64s in buffers, so the result needs to be split up into u32s
+ // and rebuilt in the test framework.
+ //
+ // Since there is no 64-bit data type that can be used as an element for a
+ // vector or a matrix in WGSL, the testing framework needs to pass the u32s
+ // via a struct with two u32s, and deconstruct vectors and matrices into
+ // arrays.
+ //
+ // This is complicated by the fact that user defined functions cannot
+ // take/return AbstractFloats, and AbstractFloats cannot be stored in
+ // variables, so the code cannot just inject a simple utility function
+ // at the top of the shader, instead this snippet needs to be inlined
+ // everywhere the test needs to return an AbstractFloat.
+ //
+ // select is used below, since ifs are not available during constant
+ // eval. This has the side effect of short-circuiting doesn't occur, so
+ // both sides of the select have to evaluate and be valid.
+ //
+ // This snippet implements FTZ for subnormals to bypass the need for
+ // complex subnormal specific logic.
+ //
+ // Expressions resulting in subnormals can still be reasonably tested,
+ // since this snippet will return 0 with the correct sign, which is
+ // always in the acceptance interval for a subnormal result, since an
+ // implementation may FTZ.
+ //
+ // Documentation for the snippet working with scalar results is included here
+ // in this code block, since shader length affects compilation time
+ // significantly on some backends. The code for vectors and matrices basically
+ // the same thing, with extra indexing operations.
+ //
+ // Snippet with documentation:
+ // const kExponentBias = 1022;
+ //
+ // // Detect if the value is zero or subnormal, so that FTZ behaviour
+ // // can occur
+ // const subnormal_or_zero : bool = (${expr} <= ${kValue.f64.positive.subnormal.max}) && (${expr} >= ${kValue.f64.negative.subnormal.min});
+ //
+ // // MSB of the upper u32 is 1 if the value is negative, otherwise 0
+ // // Extract the sign bit early, so that abs() can be used with
+ // // frexp() so negative cases do not need to be handled
+ // const sign_bit : u32 = select(0, 0x80000000, ${expr} < 0);
+ //
+ // // Use frexp() to obtain the exponent and fractional parts, and
+ // // then perform FTZ if needed
+ // const f = frexp(abs(${expr}));
+ // const f_fract = select(f.fract, 0, subnormal_or_zero);
+ // const f_exp = select(f.exp, -kExponentBias, subnormal_or_zero);
+ //
+ // // Adjust for the exponent bias and shift for storing in bits
+ // // [20..31] of the upper u32
+ // const exponent_bits : u32 = (f_exp + kExponentBias) << 20;
+ //
+ // // Extract the portion of the mantissa that appears in upper u32 as
+ // // a float for later use
+ // const high_mantissa = ldexp(f_fract, 21);
+ //
+ // // Extract the portion of the mantissa that appears in upper u32 as
+ // // as bits. This value is masked, because normals will explicitly
+ // // have the implicit leading 1 that should not be in the final
+ // // result.
+ // const high_mantissa_bits : u32 = u32(ldexp(f_fract, 21)) & 0x000fffff;
+ //
+ // // Calculate the mantissa stored in the lower u32 as a float
+ // const low_mantissa = f_fract - ldexp(floor(high_mantissa), -21);
+ //
+ // // Convert the lower u32 mantissa to bits
+ // const low_mantissa_bits = u32(ldexp(low_mantissa, 53));
+ //
+ // outputs[${i}].value.high = sign_bit | exponent_bits | high_mantissa_bits;
+ // outputs[${i}].value.low = low_mantissa_bits;
+ // prettier-ignore
+ return ` {
+ const kExponentBias = 1022;
+ const subnormal_or_zero : bool = (${expr}${accessor} <= ${kValue.f64.positive.subnormal.max}) && (${expr}${accessor} >= ${kValue.f64.negative.subnormal.min});
+ const sign_bit : u32 = select(0, 0x80000000, ${expr}${accessor} < 0);
+ const f = frexp(abs(${expr}${accessor}));
+ const f_fract = select(f.fract, 0, subnormal_or_zero);
+ const f_exp = select(f.exp, -kExponentBias, subnormal_or_zero);
+ const exponent_bits : u32 = (f_exp + kExponentBias) << 20;
+ const high_mantissa = ldexp(f_fract, 21);
+ const high_mantissa_bits : u32 = u32(ldexp(f_fract, 21)) & 0x000fffff;
+ const low_mantissa = f_fract - ldexp(floor(high_mantissa), -21);
+ const low_mantissa_bits = u32(ldexp(low_mantissa, 53));
+ outputs[${case_idx}].value${accessor}.high = sign_bit | exponent_bits | high_mantissa_bits;
+ outputs[${case_idx}].value${accessor}.low = low_mantissa_bits;
+ }`;
+}
+
+/** @returns a string for a specific case that has a AbstractFloat result */
+function abstractFloatCaseBody(expr: string, resultType: Type, i: number): string {
+ if (resultType instanceof ScalarType) {
+ return abstractFloatSnippet(expr, i);
+ }
+
+ if (resultType instanceof VectorType) {
+ return [...Array(resultType.width).keys()]
+ .map(idx => abstractFloatSnippet(expr, i, `[${idx}]`))
+ .join(' \n');
+ }
+
+ if (resultType instanceof MatrixType) {
+ const cols = resultType.cols;
+ const rows = resultType.rows;
+ const results: String[] = [...Array(cols * rows)];
+
+ for (let c = 0; c < cols; c++) {
+ for (let r = 0; r < rows; r++) {
+ results[c * rows + r] = abstractFloatSnippet(expr, i, `[${c}][${r}]`);
+ }
+ }
+
+ return results.join(' \n');
+ }
+
+ unreachable(`Results of type '${resultType}' not yet implemented`);
+}
+
+/**
+ * @returns a ShaderBuilder that builds a test shader hands AbstractFloat results.
+ * @param expressionBuilder an expression builder that will return AbstractFloats
+ */
+export function abstractFloatShaderBuilder(expressionBuilder: ExpressionBuilder): ShaderBuilder {
+ return (
+ parameterTypes: Array<Type>,
+ resultType: Type,
+ cases: CaseList,
+ inputSource: InputSource
+ ) => {
+ assert(inputSource === 'const', 'AbstractFloat results are only defined for const-eval');
+ assert(
+ scalarTypeOf(resultType).kind === 'abstract-float',
+ `Expected resultType of 'abstract-float', received '${scalarTypeOf(resultType).kind}' instead`
+ );
+
+ const body = cases
+ .map((c, i) => {
+ const expr = `${expressionBuilder(map(c.input, v => v.wgsl()))}`;
+ return abstractFloatCaseBody(expr, resultType, i);
+ })
+ .join('\n ');
+
+ return `
+${wgslHeader(parameterTypes, resultType)}
+
+${wgslOutputs(resultType, cases.length)}
+
+@compute @workgroup_size(1)
+fn main() {
+${body}
+}`;
+ };
+}
+
+/**
+ * Constructs and returns a GPUComputePipeline and GPUBindGroup for running a
+ * batch of test cases. If a pre-created pipeline can be found in
+ * `pipelineCache`, then this may be returned instead of creating a new
+ * pipeline.
+ * @param t the GPUTest
+ * @param shaderBuilder the shader builder
+ * @param parameterTypes the list of expression parameter types
+ * @param resultType the return type for the expression overload
+ * @param cases list of test cases that fit within the binding limits of the device
+ * @param inputSource the source of the input values
+ * @param outputBuffer the buffer that will hold the output values of the tests
+ * @param pipelineCache the cache of compute pipelines, shared between batches
+ */
+async function buildPipeline(
+ t: GPUTest,
+ shaderBuilder: ShaderBuilder,
+ parameterTypes: Array<Type>,
+ resultType: Type,
+ cases: CaseList,
+ inputSource: InputSource,
+ outputBuffer: GPUBuffer,
+ pipelineCache: PipelineCache
+): Promise<[GPUComputePipeline, GPUBindGroup]> {
+ cases.forEach(c => {
+ const inputTypes = c.input instanceof Array ? c.input.map(i => i.type) : [c.input.type];
+ if (!objectEquals(inputTypes, parameterTypes)) {
+ const input_str = `[${inputTypes.join(',')}]`;
+ const param_str = `[${parameterTypes.join(',')}]`;
+ throw new Error(
+ `case input types ${input_str} do not match provided runner parameter types ${param_str}`
+ );
+ }
+ });
+
+ const source = shaderBuilder(parameterTypes, resultType, cases, inputSource);
+
+ switch (inputSource) {
+ case 'const': {
+ // build the shader module
+ const module = t.device.createShaderModule({ code: source });
+
+ // build the pipeline
+ const pipeline = await t.device.createComputePipelineAsync({
+ layout: 'auto',
+ compute: { module, entryPoint: 'main' },
+ });
+
+ // build the bind group
+ const group = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer: outputBuffer } }],
+ });
+
+ return [pipeline, group];
+ }
+
+ case 'uniform':
+ case 'storage_r':
+ case 'storage_rw': {
+ // Input values come from a uniform or storage buffer
+
+ // size in bytes of the input buffer
+ const inputSize = cases.length * valueStrides(parameterTypes);
+
+ // Holds all the parameter values for all cases
+ const inputData = new Uint8Array(inputSize);
+
+ // Pack all the input parameter values into the inputData buffer
+ {
+ const caseStride = valueStrides(parameterTypes);
+ for (let caseIdx = 0; caseIdx < cases.length; caseIdx++) {
+ const caseBase = caseIdx * caseStride;
+ let offset = caseBase;
+ for (let paramIdx = 0; paramIdx < parameterTypes.length; paramIdx++) {
+ const params = cases[caseIdx].input;
+ if (params instanceof Array) {
+ params[paramIdx].copyTo(inputData, offset);
+ } else {
+ params.copyTo(inputData, offset);
+ }
+ offset += valueStride(parameterTypes[paramIdx]);
+ }
+ }
+ }
+
+ // build the compute pipeline, if the shader hasn't been compiled already.
+ const pipeline = getOrCreate(pipelineCache, source, () => {
+ // build the shader module
+ const module = t.device.createShaderModule({ code: source });
+
+ // build the pipeline
+ return t.device.createComputePipeline({
+ layout: 'auto',
+ compute: { module, entryPoint: 'main' },
+ });
+ });
+
+ // build the input buffer
+ const inputBuffer = t.makeBufferWithContents(
+ inputData,
+ GPUBufferUsage.COPY_SRC |
+ (inputSource === 'uniform' ? GPUBufferUsage.UNIFORM : GPUBufferUsage.STORAGE)
+ );
+
+ // build the bind group
+ const group = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: { buffer: outputBuffer } },
+ { binding: 1, resource: { buffer: inputBuffer } },
+ ],
+ });
+
+ return [pipeline, group];
+ }
+ }
+}
+
+/**
+ * Packs a list of scalar test cases into a smaller list of vector cases.
+ * Requires that all parameters of the expression overload are of a scalar type,
+ * and the return type of the expression overload is also a scalar type.
+ * If `cases.length` is not a multiple of `vectorWidth`, then the last scalar
+ * test case value is repeated to fill the vector value.
+ */
+function packScalarsToVector(
+ parameterTypes: Array<Type>,
+ resultType: Type,
+ cases: CaseList,
+ vectorWidth: number
+): { cases: CaseList; parameterTypes: Array<Type>; resultType: Type } {
+ // Validate that the parameters and return type are all vectorizable
+ for (let i = 0; i < parameterTypes.length; i++) {
+ const ty = parameterTypes[i];
+ if (!(ty instanceof ScalarType)) {
+ throw new Error(
+ `packScalarsToVector() can only be used on scalar parameter types, but the ${i}'th parameter type is a ${ty}'`
+ );
+ }
+ }
+ if (!(resultType instanceof ScalarType)) {
+ throw new Error(
+ `packScalarsToVector() can only be used with a scalar return type, but the return type is a ${resultType}'`
+ );
+ }
+
+ const packedCases: Array<Case> = [];
+ const packedParameterTypes = parameterTypes.map(p => TypeVec(vectorWidth, p as ScalarType));
+ const packedResultType = new VectorType(vectorWidth, resultType);
+
+ const clampCaseIdx = (idx: number) => Math.min(idx, cases.length - 1);
+
+ let caseIdx = 0;
+ while (caseIdx < cases.length) {
+ // Construct the vectorized inputs from the scalar cases
+ const packedInputs = new Array<Vector>(parameterTypes.length);
+ for (let paramIdx = 0; paramIdx < parameterTypes.length; paramIdx++) {
+ const inputElements = new Array<Scalar>(vectorWidth);
+ for (let i = 0; i < vectorWidth; i++) {
+ const input = cases[clampCaseIdx(caseIdx + i)].input;
+ inputElements[i] = (input instanceof Array ? input[paramIdx] : input) as Scalar;
+ }
+ packedInputs[paramIdx] = new Vector(inputElements);
+ }
+
+ // Gather the comparators for the packed cases
+ const cmp_impls = new Array<ComparatorImpl>(vectorWidth);
+ for (let i = 0; i < vectorWidth; i++) {
+ cmp_impls[i] = toComparator(cases[clampCaseIdx(caseIdx + i)].expected).compare;
+ }
+ const comparators: Comparator = {
+ compare: (got: Value) => {
+ let matched = true;
+ const gElements = new Array<string>(vectorWidth);
+ const eElements = new Array<string>(vectorWidth);
+ for (let i = 0; i < vectorWidth; i++) {
+ const d = cmp_impls[i]((got as Vector).elements[i]);
+ matched = matched && d.matched;
+ gElements[i] = d.got;
+ eElements[i] = d.expected;
+ }
+ return {
+ matched,
+ got: `${packedResultType}(${gElements.join(', ')})`,
+ expected: `${packedResultType}(${eElements.join(', ')})`,
+ };
+ },
+ kind: 'packed',
+ };
+
+ // Append the new packed case
+ packedCases.push({ input: packedInputs, expected: comparators });
+ caseIdx += vectorWidth;
+ }
+
+ return {
+ cases: packedCases,
+ parameterTypes: packedParameterTypes,
+ resultType: packedResultType,
+ };
+}
+
+/**
+ * Indicates bounds that acceptance intervals need to be within to avoid inputs
+ * being filtered out. This is used for const-eval tests, since going OOB will
+ * cause a validation error not an execution error.
+ */
+export type IntervalFilter =
+ | 'finite' // Expected to be finite in the interval numeric space
+ | 'unfiltered'; // No expectations
+
+/**
+ * A function that performs a binary operation on x and y, and returns the expected
+ * result.
+ */
+export interface BinaryOp {
+ (x: number, y: number): number | undefined;
+}
+
+/**
+ * @returns array of Case for the input params with op applied
+ * @param param0s array of inputs to try for the first param
+ * @param param1s array of inputs to try for the second param
+ * @param op callback called on each pair of inputs to produce each case
+ * @param quantize function to quantize all values
+ * @param scalarize function to convert numbers to Scalars
+ */
+function generateScalarBinaryToScalarCases(
+ param0s: readonly number[],
+ param1s: readonly number[],
+ op: BinaryOp,
+ quantize: QuantizeFunc,
+ scalarize: ScalarBuilder
+): Case[] {
+ param0s = param0s.map(quantize);
+ param1s = param1s.map(quantize);
+ return cartesianProduct(param0s, param1s).reduce((cases, e) => {
+ const expected = op(e[0], e[1]);
+ if (expected !== undefined) {
+ cases.push({ input: [scalarize(e[0]), scalarize(e[1])], expected: scalarize(expected) });
+ }
+ return cases;
+ }, new Array<Case>());
+}
+
+/**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param param0s array of inputs to try for the first param
+ * @param param1s array of inputs to try for the second param
+ * @param op callback called on each pair of inputs to produce each case
+ */
+export function generateBinaryToI32Cases(
+ param0s: readonly number[],
+ param1s: readonly number[],
+ op: BinaryOp
+) {
+ return generateScalarBinaryToScalarCases(param0s, param1s, op, quantizeToI32, i32);
+}
+
+/**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param param0s array of inputs to try for the first param
+ * @param param1s array of inputs to try for the second param
+ * @param op callback called on each pair of inputs to produce each case
+ */
+export function generateBinaryToU32Cases(
+ param0s: readonly number[],
+ param1s: readonly number[],
+ op: BinaryOp
+) {
+ return generateScalarBinaryToScalarCases(param0s, param1s, op, quantizeToU32, u32);
+}
+
+/**
+ * @returns a Case for the input params with op applied
+ * @param scalar scalar param
+ * @param vector vector param (2, 3, or 4 elements)
+ * @param op the op to apply to scalar and vector
+ * @param quantize function to quantize all values in vectors and scalars
+ * @param scalarize function to convert numbers to Scalars
+ */
+function makeScalarVectorBinaryToVectorCase(
+ scalar: number,
+ vector: readonly number[],
+ op: BinaryOp,
+ quantize: QuantizeFunc,
+ scalarize: ScalarBuilder
+): Case | undefined {
+ scalar = quantize(scalar);
+ vector = vector.map(quantize);
+ const result = vector.map(v => op(scalar, v));
+ if (result.includes(undefined)) {
+ return undefined;
+ }
+ return {
+ input: [scalarize(scalar), new Vector(vector.map(scalarize))],
+ expected: new Vector((result as readonly number[]).map(scalarize)),
+ };
+}
+
+/**
+ * @returns array of Case for the input params with op applied
+ * @param scalars array of scalar params
+ * @param vectors array of vector params (2, 3, or 4 elements)
+ * @param op the op to apply to each pair of scalar and vector
+ * @param quantize function to quantize all values in vectors and scalars
+ * @param scalarize function to convert numbers to Scalars
+ */
+function generateScalarVectorBinaryToVectorCases(
+ scalars: readonly number[],
+ vectors: ROArrayArray<number>,
+ op: BinaryOp,
+ quantize: QuantizeFunc,
+ scalarize: ScalarBuilder
+): Case[] {
+ const cases = new Array<Case>();
+ scalars.forEach(s => {
+ vectors.forEach(v => {
+ const c = makeScalarVectorBinaryToVectorCase(s, v, op, quantize, scalarize);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ });
+ });
+ return cases;
+}
+
+/**
+ * @returns a Case for the input params with op applied
+ * @param vector vector param (2, 3, or 4 elements)
+ * @param scalar scalar param
+ * @param op the op to apply to vector and scalar
+ * @param quantize function to quantize all values in vectors and scalars
+ * @param scalarize function to convert numbers to Scalars
+ */
+function makeVectorScalarBinaryToVectorCase(
+ vector: readonly number[],
+ scalar: number,
+ op: BinaryOp,
+ quantize: QuantizeFunc,
+ scalarize: ScalarBuilder
+): Case | undefined {
+ vector = vector.map(quantize);
+ scalar = quantize(scalar);
+ const result = vector.map(v => op(v, scalar));
+ if (result.includes(undefined)) {
+ return undefined;
+ }
+ return {
+ input: [new Vector(vector.map(scalarize)), scalarize(scalar)],
+ expected: new Vector((result as readonly number[]).map(scalarize)),
+ };
+}
+
+/**
+ * @returns array of Case for the input params with op applied
+ * @param vectors array of vector params (2, 3, or 4 elements)
+ * @param scalars array of scalar params
+ * @param op the op to apply to each pair of vector and scalar
+ * @param quantize function to quantize all values in vectors and scalars
+ * @param scalarize function to convert numbers to Scalars
+ */
+function generateVectorScalarBinaryToVectorCases(
+ vectors: ROArrayArray<number>,
+ scalars: readonly number[],
+ op: BinaryOp,
+ quantize: QuantizeFunc,
+ scalarize: ScalarBuilder
+): Case[] {
+ const cases = new Array<Case>();
+ scalars.forEach(s => {
+ vectors.forEach(v => {
+ const c = makeVectorScalarBinaryToVectorCase(v, s, op, quantize, scalarize);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ });
+ });
+ return cases;
+}
+
+/**
+ * @returns array of Case for the input params with op applied
+ * @param scalars array of scalar params
+ * @param vectors array of vector params (2, 3, or 4 elements)
+ * @param op he op to apply to each pair of scalar and vector
+ */
+export function generateU32VectorBinaryToVectorCases(
+ scalars: readonly number[],
+ vectors: ROArrayArray<number>,
+ op: BinaryOp
+): Case[] {
+ return generateScalarVectorBinaryToVectorCases(scalars, vectors, op, quantizeToU32, u32);
+}
+
+/**
+ * @returns array of Case for the input params with op applied
+ * @param vectors array of vector params (2, 3, or 4 elements)
+ * @param scalars array of scalar params
+ * @param op he op to apply to each pair of vector and scalar
+ */
+export function generateVectorU32BinaryToVectorCases(
+ vectors: ROArrayArray<number>,
+ scalars: readonly number[],
+ op: BinaryOp
+): Case[] {
+ return generateVectorScalarBinaryToVectorCases(vectors, scalars, op, quantizeToU32, u32);
+}
+
+/**
+ * @returns array of Case for the input params with op applied
+ * @param scalars array of scalar params
+ * @param vectors array of vector params (2, 3, or 4 elements)
+ * @param op he op to apply to each pair of scalar and vector
+ */
+export function generateI32VectorBinaryToVectorCases(
+ scalars: readonly number[],
+ vectors: ROArrayArray<number>,
+ op: BinaryOp
+): Case[] {
+ return generateScalarVectorBinaryToVectorCases(scalars, vectors, op, quantizeToI32, i32);
+}
+
+/**
+ * @returns array of Case for the input params with op applied
+ * @param vectors array of vector params (2, 3, or 4 elements)
+ * @param scalars array of scalar params
+ * @param op he op to apply to each pair of vector and scalar
+ */
+export function generateVectorI32BinaryToVectorCases(
+ vectors: ROArrayArray<number>,
+ scalars: readonly number[],
+ op: BinaryOp
+): Case[] {
+ return generateVectorScalarBinaryToVectorCases(vectors, scalars, op, quantizeToI32, i32);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/af_arithmetic.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/af_arithmetic.spec.ts
new file mode 100644
index 0000000000..182c0d76a9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/af_arithmetic.spec.ts
@@ -0,0 +1,43 @@
+export const description = `
+Execution Tests for AbstractFloat arithmetic unary expression operations
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeAbstractFloat } from '../../../../util/conversion.js';
+import { FP } from '../../../../util/floating_point.js';
+import { fullF64Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { onlyConstInputSource, run } from '../expression.js';
+
+import { abstractUnary } from './unary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('unary/af_arithmetic', {
+ negation: () => {
+ return FP.abstract.generateScalarToIntervalCases(
+ fullF64Range({ neg_norm: 250, neg_sub: 20, pos_sub: 20, pos_norm: 250 }),
+ 'unfiltered',
+ FP.abstract.negationInterval
+ );
+ },
+});
+
+g.test('negation')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: -x
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u
+ .combine('inputSource', onlyConstInputSource)
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('negation');
+ await run(t, abstractUnary('-'), [TypeAbstractFloat], TypeAbstractFloat, t.params, cases, 1);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/af_assignment.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/af_assignment.spec.ts
new file mode 100644
index 0000000000..141d87d0f2
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/af_assignment.spec.ts
@@ -0,0 +1,112 @@
+export const description = `
+Execution Tests for assignment of AbstractFloats
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { kValue } from '../../../../util/constants.js';
+import { abstractFloat, TypeAbstractFloat, TypeF16, TypeF32 } from '../../../../util/conversion.js';
+import { FP } from '../../../../util/floating_point.js';
+import { filteredF64Range, fullF64Range, isSubnormalNumberF64 } from '../../../../util/math.js';
+import { reinterpretU64AsF64 } from '../../../../util/reinterpret.js';
+import { makeCaseCache } from '../case_cache.js';
+import {
+ abstractFloatShaderBuilder,
+ basicExpressionBuilder,
+ onlyConstInputSource,
+ run,
+ ShaderBuilder,
+} from '../expression.js';
+
+function concrete_assignment(): ShaderBuilder {
+ return basicExpressionBuilder(value => `${value}`);
+}
+
+function abstract_assignment(): ShaderBuilder {
+ return abstractFloatShaderBuilder(value => `${value}`);
+}
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('unary/af_assignment', {
+ abstract: () => {
+ const inputs = [
+ // Values that are useful for debugging the underlying framework/shader code, since it cannot be directly unit tested.
+ 0,
+ 0.5,
+ 0.5,
+ 1,
+ -1,
+ reinterpretU64AsF64(0x7000_0000_0000_0001n), // smallest magnitude negative subnormal with non-zero mantissa
+ reinterpretU64AsF64(0x0000_0000_0000_0001n), // smallest magnitude positive subnormal with non-zero mantissa
+ reinterpretU64AsF64(0x600a_aaaa_5555_5555n), // negative subnormal with obvious pattern
+ reinterpretU64AsF64(0x000a_aaaa_5555_5555n), // positive subnormal with obvious pattern
+ reinterpretU64AsF64(0x0010_0000_0000_0001n), // smallest magnitude negative normal with non-zero mantissa
+ reinterpretU64AsF64(0x0010_0000_0000_0001n), // smallest magnitude positive normal with non-zero mantissa
+ reinterpretU64AsF64(0xf555_5555_aaaa_aaaan), // negative normal with obvious pattern
+ reinterpretU64AsF64(0x5555_5555_aaaa_aaaan), // positive normal with obvious pattern
+ reinterpretU64AsF64(0xffef_ffff_ffff_ffffn), // largest magnitude negative normal
+ reinterpretU64AsF64(0x7fef_ffff_ffff_ffffn), // largest magnitude positive normal
+ // WebGPU implementation stressing values
+ ...fullF64Range(),
+ ];
+ return inputs.map(f => {
+ return {
+ input: abstractFloat(f),
+ expected: isSubnormalNumberF64(f) ? abstractFloat(0) : abstractFloat(f),
+ };
+ });
+ },
+ f32: () => {
+ return filteredF64Range(kValue.f32.negative.min, kValue.f32.positive.max).map(f => {
+ return { input: abstractFloat(f), expected: FP.f32.correctlyRoundedInterval(f) };
+ });
+ },
+ f16: () => {
+ return filteredF64Range(kValue.f16.negative.min, kValue.f16.positive.max).map(f => {
+ return { input: abstractFloat(f), expected: FP.f16.correctlyRoundedInterval(f) };
+ });
+ },
+});
+
+g.test('abstract')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-conversion')
+ .desc(
+ `
+testing that extracting abstract floats works
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource))
+ .fn(async t => {
+ const cases = await d.get('abstract');
+ await run(t, abstract_assignment(), [TypeAbstractFloat], TypeAbstractFloat, t.params, cases, 1);
+ });
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-conversion')
+ .desc(
+ `
+concretizing to f32
+`
+ )
+ .params(u => u.combine('inputSource', onlyConstInputSource))
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, concrete_assignment(), [TypeAbstractFloat], TypeF32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-conversion')
+ .desc(
+ `
+concretizing to f16
+`
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .params(u => u.combine('inputSource', onlyConstInputSource))
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, concrete_assignment(), [TypeAbstractFloat], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/bool_conversion.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/bool_conversion.spec.ts
new file mode 100644
index 0000000000..8fcfed339f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/bool_conversion.spec.ts
@@ -0,0 +1,174 @@
+export const description = `
+Execution Tests for the boolean conversion operations
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { anyOf } from '../../../../util/compare.js';
+import {
+ bool,
+ f32,
+ f16,
+ i32,
+ Scalar,
+ TypeBool,
+ TypeF32,
+ TypeF16,
+ TypeI32,
+ TypeU32,
+ u32,
+} from '../../../../util/conversion.js';
+import {
+ fullF32Range,
+ fullF16Range,
+ fullI32Range,
+ fullU32Range,
+ isSubnormalNumberF32,
+ isSubnormalNumberF16,
+} from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run, ShaderBuilder } from '../expression.js';
+
+import { unary } from './unary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('unary/bool_conversion', {
+ bool: () => {
+ return [
+ { input: bool(true), expected: bool(true) },
+ { input: bool(false), expected: bool(false) },
+ ];
+ },
+ u32: () => {
+ return fullU32Range().map(u => {
+ return { input: u32(u), expected: u === 0 ? bool(false) : bool(true) };
+ });
+ },
+ i32: () => {
+ return fullI32Range().map(i => {
+ return { input: i32(i), expected: i === 0 ? bool(false) : bool(true) };
+ });
+ },
+ f32: () => {
+ return fullF32Range().map(f => {
+ const expected: Scalar[] = [];
+ if (f !== 0) {
+ expected.push(bool(true));
+ }
+ if (isSubnormalNumberF32(f)) {
+ expected.push(bool(false));
+ }
+ return { input: f32(f), expected: anyOf(...expected) };
+ });
+ },
+ f16: () => {
+ return fullF16Range().map(f => {
+ const expected: Scalar[] = [];
+ if (f !== 0) {
+ expected.push(bool(true));
+ }
+ if (isSubnormalNumberF16(f)) {
+ expected.push(bool(false));
+ }
+ return { input: f16(f), expected: anyOf(...expected) };
+ });
+ },
+});
+
+/** Generate expression builder based on how the test case is to be vectorized */
+function vectorizeToExpression(vectorize: undefined | 2 | 3 | 4): ShaderBuilder {
+ return vectorize === undefined ? unary('bool') : unary(`vec${vectorize}<bool>`);
+}
+
+g.test('bool')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+bool(e), where e is a bool
+
+Identity operation
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('bool');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeBool], TypeBool, t.params, cases);
+ });
+
+g.test('u32')
+ .specURL('https://www.w3.org/TR/WGSL/#bool-builtin')
+ .desc(
+ `
+bool(e), where e is a u32
+
+Coercion to boolean.
+The result is false if e is 0, and true otherwise.
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('u32');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeU32], TypeBool, t.params, cases);
+ });
+
+g.test('i32')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+bool(e), where e is a i32
+
+Coercion to boolean.
+The result is false if e is 0, and true otherwise.
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('i32');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeI32], TypeBool, t.params, cases);
+ });
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+bool(e), where e is a f32
+
+Coercion to boolean.
+The result is false if e is 0.0 or -0.0, and true otherwise.
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeF32], TypeBool, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+bool(e), where e is a f16
+
+Coercion to boolean.
+The result is false if e is 0.0 or -0.0, and true otherwise.
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeF16], TypeBool, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/bool_logical.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/bool_logical.spec.ts
new file mode 100644
index 0000000000..01eaaab43a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/bool_logical.spec.ts
@@ -0,0 +1,33 @@
+export const description = `
+Execution Tests for the boolean unary logical expression operations
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { bool, TypeBool } from '../../../../util/conversion.js';
+import { allInputSources, run } from '../expression.js';
+
+import { unary } from './unary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('negation')
+ .specURL('https://www.w3.org/TR/WGSL/#logical-expr')
+ .desc(
+ `
+Expression: !e
+
+Logical negation. The result is true when e is false and false when e is true. Component-wise when T is a vector.
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = [
+ { input: bool(true), expected: bool(false) },
+ { input: bool(false), expected: bool(true) },
+ ];
+
+ await run(t, unary('!'), [TypeBool], TypeBool, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/f16_arithmetic.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/f16_arithmetic.spec.ts
new file mode 100644
index 0000000000..83d7579c07
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/f16_arithmetic.spec.ts
@@ -0,0 +1,44 @@
+export const description = `
+Execution Tests for the f16 arithmetic unary expression operations
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF16 } from '../../../../util/conversion.js';
+import { FP } from '../../../../util/floating_point.js';
+import { fullF16Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { unary } from './unary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('unary/f16_arithmetic', {
+ negation: () => {
+ return FP.f16.generateScalarToIntervalCases(
+ fullF16Range({ neg_norm: 250, neg_sub: 20, pos_sub: 20, pos_norm: 250 }),
+ 'unfiltered',
+ FP.f16.negationInterval
+ );
+ },
+});
+
+g.test('negation')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: -x
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get('negation');
+ await run(t, unary('-'), [TypeF16], TypeF16, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/f16_conversion.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/f16_conversion.spec.ts
new file mode 100644
index 0000000000..9eb84f0270
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/f16_conversion.spec.ts
@@ -0,0 +1,301 @@
+export const description = `
+Execution Tests for the f32 conversion operations
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import {
+ bool,
+ f16,
+ i32,
+ TypeBool,
+ TypeF32,
+ TypeF16,
+ TypeI32,
+ TypeMat,
+ TypeU32,
+ u32,
+} from '../../../../util/conversion.js';
+import { FP, FPInterval } from '../../../../util/floating_point.js';
+import {
+ fullF32Range,
+ fullF16Range,
+ fullI32Range,
+ fullU32Range,
+ sparseMatrixF32Range,
+ sparseMatrixF16Range,
+} from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run, ShaderBuilder } from '../expression.js';
+
+import { unary } from './unary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+const f16FiniteRangeInterval = new FPInterval(
+ 'f32',
+ FP.f16.constants().negative.min,
+ FP.f16.constants().positive.max
+);
+
+// Cases: f32_matCxR_[non_]const
+// Note that f32 values may be not exactly representable in f16 and/or out of range.
+const f32_mat_cases = ([2, 3, 4] as const)
+ .flatMap(cols =>
+ ([2, 3, 4] as const).flatMap(rows =>
+ ([true, false] as const).map(nonConst => ({
+ [`f32_mat${cols}x${rows}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateMatrixToMatrixCases(
+ sparseMatrixF32Range(cols, rows),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.correctlyRoundedMatrix
+ );
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+// Cases: f16_matCxR_[non_]const
+const f16_mat_cases = ([2, 3, 4] as const)
+ .flatMap(cols =>
+ ([2, 3, 4] as const).flatMap(rows =>
+ ([true, false] as const).map(nonConst => ({
+ [`f16_mat${cols}x${rows}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ // Input matrix is of f16 types, use f16.generateMatrixToMatrixCases.
+ return FP.f16.generateMatrixToMatrixCases(
+ sparseMatrixF16Range(cols, rows),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f16.correctlyRoundedMatrix
+ );
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('unary/f16_conversion', {
+ bool: () => {
+ return [
+ { input: bool(true), expected: f16(1.0) },
+ { input: bool(false), expected: f16(0.0) },
+ ];
+ },
+ u32_non_const: () => {
+ return [...fullU32Range(), 65504].map(u => {
+ return { input: u32(u), expected: FP.f16.correctlyRoundedInterval(u) };
+ });
+ },
+ u32_const: () => {
+ return [...fullU32Range(), 65504]
+ .filter(v => f16FiniteRangeInterval.contains(v))
+ .map(u => {
+ return { input: u32(u), expected: FP.f16.correctlyRoundedInterval(u) };
+ });
+ },
+ i32_non_const: () => {
+ return [...fullI32Range(), 65504, -65504].map(i => {
+ return { input: i32(i), expected: FP.f16.correctlyRoundedInterval(i) };
+ });
+ },
+ i32_const: () => {
+ return [...fullI32Range(), 65504, -65504]
+ .filter(v => f16FiniteRangeInterval.contains(v))
+ .map(i => {
+ return { input: i32(i), expected: FP.f16.correctlyRoundedInterval(i) };
+ });
+ },
+ // Note that f32 values may be not exactly representable in f16 and/or out of range.
+ f32_non_const: () => {
+ return FP.f32.generateScalarToIntervalCases(
+ [...fullF32Range(), 65535.996, -65535.996],
+ 'unfiltered',
+ FP.f16.correctlyRoundedInterval
+ );
+ },
+ f32_const: () => {
+ return FP.f32.generateScalarToIntervalCases(
+ [...fullF32Range(), 65535.996, -65535.996],
+ 'finite',
+ FP.f16.correctlyRoundedInterval
+ );
+ },
+ // All f16 values are exactly representable in f16.
+ f16: () => {
+ return fullF16Range().map(f => {
+ return { input: f16(f), expected: FP.f16.correctlyRoundedInterval(f) };
+ });
+ },
+ ...f32_mat_cases,
+ ...f16_mat_cases,
+});
+
+/** Generate a ShaderBuilder based on how the test case is to be vectorized */
+function vectorizeToExpression(vectorize: undefined | 2 | 3 | 4): ShaderBuilder {
+ return vectorize === undefined ? unary('f16') : unary(`vec${vectorize}<f16>`);
+}
+
+/** Generate a ShaderBuilder for a matrix of the provided dimensions */
+function matrixExperession(cols: number, rows: number): ShaderBuilder {
+ return unary(`mat${cols}x${rows}<f16>`);
+}
+
+g.test('bool')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+f16(e), where e is a bool
+
+The result is 1.0 if e is true and 0.0 otherwise
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get('bool');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeBool], TypeF16, t.params, cases);
+ });
+
+g.test('u32')
+ .specURL('https://www.w3.org/TR/WGSL/#bool-builtin')
+ .desc(
+ `
+f16(e), where e is a u32
+
+Converted to f16, +/-Inf if out of range
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'u32_const' : 'u32_non_const');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeU32], TypeF16, t.params, cases);
+ });
+
+g.test('i32')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+f16(e), where e is a i32
+
+Converted to f16, +/-Inf if out of range
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'i32_const' : 'i32_non_const');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeI32], TypeF16, t.params, cases);
+ });
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+f16(e), where e is a f32
+
+Correctly rounded to f16
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get(t.params.inputSource === 'const' ? 'f32_const' : 'f32_non_const');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeF32], TypeF16, t.params, cases);
+ });
+
+g.test('f32_mat')
+ .specURL('https://www.w3.org/TR/WGSL/#matrix-builtin-functions')
+ .desc(`f32 matrix to f16 matrix tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `f32_mat${cols}x${rows}_const`
+ : `f32_mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ matrixExperession(cols, rows),
+ [TypeMat(cols, rows, TypeF32)],
+ TypeMat(cols, rows, TypeF16),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+ f16(e), where e is a f16
+
+ Identical.
+ `
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeF16], TypeF16, t.params, cases);
+ });
+
+g.test('f16_mat')
+ .specURL('https://www.w3.org/TR/WGSL/#matrix-builtin-functions')
+ .desc(`f16 matrix to f16 matrix tests, expected identical`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `f16_mat${cols}x${rows}_const`
+ : `f16_mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ matrixExperession(cols, rows),
+ [TypeMat(cols, rows, TypeF16)],
+ TypeMat(cols, rows, TypeF16),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/f32_arithmetic.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/f32_arithmetic.spec.ts
new file mode 100644
index 0000000000..f53cff46d8
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/f32_arithmetic.spec.ts
@@ -0,0 +1,41 @@
+export const description = `
+Execution Tests for the f32 arithmetic unary expression operations
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { TypeF32 } from '../../../../util/conversion.js';
+import { FP } from '../../../../util/floating_point.js';
+import { fullF32Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { unary } from './unary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('unary/f32_arithmetic', {
+ negation: () => {
+ return FP.f32.generateScalarToIntervalCases(
+ fullF32Range({ neg_norm: 250, neg_sub: 20, pos_sub: 20, pos_norm: 250 }),
+ 'unfiltered',
+ FP.f32.negationInterval
+ );
+ },
+});
+
+g.test('negation')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: -x
+Accuracy: Correctly rounded
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('negation');
+ await run(t, unary('-'), [TypeF32], TypeF32, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/f32_conversion.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/f32_conversion.spec.ts
new file mode 100644
index 0000000000..223b13c2d5
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/f32_conversion.spec.ts
@@ -0,0 +1,257 @@
+export const description = `
+Execution Tests for the f32 conversion operations
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import {
+ bool,
+ f32,
+ f16,
+ i32,
+ TypeBool,
+ TypeF32,
+ TypeF16,
+ TypeI32,
+ TypeMat,
+ TypeU32,
+ u32,
+} from '../../../../util/conversion.js';
+import { FP } from '../../../../util/floating_point.js';
+import {
+ fullF32Range,
+ fullF16Range,
+ fullI32Range,
+ fullU32Range,
+ sparseMatrixF32Range,
+ sparseMatrixF16Range,
+} from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run, ShaderBuilder } from '../expression.js';
+
+import { unary } from './unary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Cases: f32_matCxR_[non_]const
+const f32_mat_cases = ([2, 3, 4] as const)
+ .flatMap(cols =>
+ ([2, 3, 4] as const).flatMap(rows =>
+ ([true, false] as const).map(nonConst => ({
+ [`f32_mat${cols}x${rows}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ return FP.f32.generateMatrixToMatrixCases(
+ sparseMatrixF32Range(cols, rows),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.correctlyRoundedMatrix
+ );
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+// Cases: f16_matCxR_[non_]const
+// Note that all f16 values are exactly representable in f32.
+const f16_mat_cases = ([2, 3, 4] as const)
+ .flatMap(cols =>
+ ([2, 3, 4] as const).flatMap(rows =>
+ ([true, false] as const).map(nonConst => ({
+ [`f16_mat${cols}x${rows}_${nonConst ? 'non_const' : 'const'}`]: () => {
+ // Input matrix is of f16 types, use f16.generateMatrixToMatrixCases.
+ return FP.f16.generateMatrixToMatrixCases(
+ sparseMatrixF16Range(cols, rows),
+ nonConst ? 'unfiltered' : 'finite',
+ FP.f32.correctlyRoundedMatrix
+ );
+ },
+ }))
+ )
+ )
+ .reduce((a, b) => ({ ...a, ...b }), {});
+
+export const d = makeCaseCache('unary/f32_conversion', {
+ bool: () => {
+ return [
+ { input: bool(true), expected: f32(1.0) },
+ { input: bool(false), expected: f32(0.0) },
+ ];
+ },
+ u32: () => {
+ return fullU32Range().map(u => {
+ return { input: u32(u), expected: FP.f32.correctlyRoundedInterval(u) };
+ });
+ },
+ i32: () => {
+ return fullI32Range().map(i => {
+ return { input: i32(i), expected: FP.f32.correctlyRoundedInterval(i) };
+ });
+ },
+ f32: () => {
+ return fullF32Range().map(f => {
+ return { input: f32(f), expected: FP.f32.correctlyRoundedInterval(f) };
+ });
+ },
+ // All f16 values are exactly representable in f32.
+ f16: () => {
+ return fullF16Range().map(f => {
+ return { input: f16(f), expected: FP.f32.correctlyRoundedInterval(f) };
+ });
+ },
+ ...f32_mat_cases,
+ ...f16_mat_cases,
+});
+
+/** Generate a ShaderBuilder based on how the test case is to be vectorized */
+function vectorizeToExpression(vectorize: undefined | 2 | 3 | 4): ShaderBuilder {
+ return vectorize === undefined ? unary('f32') : unary(`vec${vectorize}<f32>`);
+}
+
+/** Generate a ShaderBuilder for a matrix of the provided dimensions */
+function matrixExperession(cols: number, rows: number): ShaderBuilder {
+ return unary(`mat${cols}x${rows}<f32>`);
+}
+
+g.test('bool')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+f32(e), where e is a bool
+
+The result is 1.0 if e is true and 0.0 otherwise
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('bool');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeBool], TypeF32, t.params, cases);
+ });
+
+g.test('u32')
+ .specURL('https://www.w3.org/TR/WGSL/#bool-builtin')
+ .desc(
+ `
+f32(e), where e is a u32
+
+Converted to f32
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('u32');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeU32], TypeF32, t.params, cases);
+ });
+
+g.test('i32')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+f32(e), where e is a i32
+
+Converted to f32
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('i32');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeI32], TypeF32, t.params, cases);
+ });
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+f32(e), where e is a f32
+
+Identity operation
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeF32], TypeF32, t.params, cases);
+ });
+
+g.test('f32_mat')
+ .specURL('https://www.w3.org/TR/WGSL/#matrix-builtin-functions')
+ .desc(`f32 tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `f32_mat${cols}x${rows}_const`
+ : `f32_mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ matrixExperession(cols, rows),
+ [TypeMat(cols, rows, TypeF32)],
+ TypeMat(cols, rows, TypeF32),
+ t.params,
+ cases
+ );
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+ f32(e), where e is a f16
+
+ Expect the same value, since all f16 values is exactly representable in f32.
+ `
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeF16], TypeF32, t.params, cases);
+ });
+
+g.test('f16_mat')
+ .specURL('https://www.w3.org/TR/WGSL/#matrix-builtin-functions')
+ .desc(`f16 matrix to f32 matrix tests`)
+ .params(u =>
+ u
+ .combine('inputSource', allInputSources)
+ .combine('cols', [2, 3, 4] as const)
+ .combine('rows', [2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(async t => {
+ const cols = t.params.cols;
+ const rows = t.params.rows;
+ const cases = await d.get(
+ t.params.inputSource === 'const'
+ ? `f16_mat${cols}x${rows}_const`
+ : `f16_mat${cols}x${rows}_non_const`
+ );
+ await run(
+ t,
+ matrixExperession(cols, rows),
+ [TypeMat(cols, rows, TypeF16)],
+ TypeMat(cols, rows, TypeF32),
+ t.params,
+ cases
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/i32_arithmetic.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/i32_arithmetic.spec.ts
new file mode 100644
index 0000000000..14519b8967
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/i32_arithmetic.spec.ts
@@ -0,0 +1,37 @@
+export const description = `
+Execution Tests for the i32 arithmetic unary expression operations
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { i32, TypeI32 } from '../../../../util/conversion.js';
+import { fullI32Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { unary } from './unary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('unary/i32_arithmetic', {
+ negation: () => {
+ return fullI32Range().map(e => {
+ return { input: i32(e), expected: i32(-e) };
+ });
+ },
+});
+
+g.test('negation')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+Expression: -x
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('negation');
+ await run(t, unary('-'), [TypeI32], TypeI32, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/i32_complement.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/i32_complement.spec.ts
new file mode 100644
index 0000000000..e8bda51b51
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/i32_complement.spec.ts
@@ -0,0 +1,37 @@
+export const description = `
+Execution Tests for the i32 bitwise complement operation
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { i32, TypeI32 } from '../../../../util/conversion.js';
+import { fullI32Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { unary } from './unary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('unary/i32_complement', {
+ complement: () => {
+ return fullI32Range().map(e => {
+ return { input: i32(e), expected: i32(~e) };
+ });
+ },
+});
+
+g.test('i32_complement')
+ .specURL('https://www.w3.org/TR/WGSL/#bit-expr')
+ .desc(
+ `
+Expression: ~x
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('complement');
+ await run(t, unary('~'), [TypeI32], TypeI32, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/i32_conversion.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/i32_conversion.spec.ts
new file mode 100644
index 0000000000..a77aa0e4d3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/i32_conversion.spec.ts
@@ -0,0 +1,196 @@
+export const description = `
+Execution Tests for the i32 conversion operations
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { kValue } from '../../../../util/constants.js';
+import {
+ bool,
+ f32,
+ f16,
+ i32,
+ TypeBool,
+ TypeF32,
+ TypeF16,
+ TypeI32,
+ TypeU32,
+ u32,
+} from '../../../../util/conversion.js';
+import {
+ fullF32Range,
+ fullF16Range,
+ fullI32Range,
+ fullU32Range,
+ quantizeToF32,
+ quantizeToF16,
+} from '../../../../util/math.js';
+import { reinterpretU32AsI32 } from '../../../../util/reinterpret.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run, ShaderBuilder } from '../expression.js';
+
+import { unary } from './unary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('unary/i32_conversion', {
+ bool: () => {
+ return [
+ { input: bool(true), expected: i32(1) },
+ { input: bool(false), expected: i32(0) },
+ ];
+ },
+ u32: () => {
+ return fullU32Range().map(u => {
+ return { input: u32(u), expected: i32(reinterpretU32AsI32(u)) };
+ });
+ },
+ i32: () => {
+ return fullI32Range().map(i => {
+ return { input: i32(i), expected: i32(i) };
+ });
+ },
+ f32: () => {
+ return fullF32Range().map(f => {
+ // Handles zeros and subnormals
+ if (Math.abs(f) < 1.0) {
+ return { input: f32(f), expected: i32(0) };
+ }
+
+ if (f <= kValue.i32.negative.min) {
+ return { input: f32(f), expected: i32(kValue.i32.negative.min) };
+ }
+
+ if (f >= kValue.i32.positive.max) {
+ return { input: f32(f), expected: i32(kValue.i32.positive.max) };
+ }
+
+ // All f32 no larger than 2^24 has a precise interger part and a fractional part, just need
+ // to trunc towards 0 for the result integer.
+ if (Math.abs(f) <= 2 ** 24) {
+ return { input: f32(f), expected: i32(Math.trunc(f)) };
+ }
+
+ // All f32s between 2 ** 24 and kValue.i32.negative.min/.positive.max are
+ // integers, so in theory one could use them directly, expect that number
+ // is actually f64 internally, so they need to be quantized to f32 first.
+ // Cannot just use trunc here, since that might produce a i32 value that
+ // is precise in f64, but not in f32.
+ return { input: f32(f), expected: i32(quantizeToF32(f)) };
+ });
+ },
+ f16: () => {
+ // Note that finite f16 values are always in range of i32.
+ return fullF16Range().map(f => {
+ // Handles zeros and subnormals
+ if (Math.abs(f) < 1.0) {
+ return { input: f16(f), expected: i32(0) };
+ }
+
+ // All f16 no larger than <= 2^12 has a precise interger part and a fractional part, just need
+ // to trunc towards 0 for the result integer.
+ if (Math.abs(f) <= 2 ** 12) {
+ return { input: f16(f), expected: i32(Math.trunc(f)) };
+ }
+
+ // All f16s larger than 2 ** 12 are integers, so in theory one could use them directly, expect
+ // that number is actually f64 internally, so they need to be quantized to f16 first.
+ // Cannot just use trunc here, since that might produce a i32 value that is precise in f64,
+ // but not in f16.
+ return { input: f16(f), expected: i32(quantizeToF16(f)) };
+ });
+ },
+});
+
+/** Generate a ShaderBuilder based on how the test case is to be vectorized */
+function vectorizeToExpression(vectorize: undefined | 2 | 3 | 4): ShaderBuilder {
+ return vectorize === undefined ? unary('i32') : unary(`vec${vectorize}<i32>`);
+}
+
+g.test('bool')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+i32(e), where e is a bool
+
+The result is 1u if e is true and 0u otherwise
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('bool');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeBool], TypeI32, t.params, cases);
+ });
+
+g.test('u32')
+ .specURL('https://www.w3.org/TR/WGSL/#bool-builtin')
+ .desc(
+ `
+i32(e), where e is a u32
+
+Reinterpretation of bits
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('u32');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeU32], TypeI32, t.params, cases);
+ });
+
+g.test('i32')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+i32(e), where e is a i32
+
+Identity operation
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('i32');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeI32], TypeI32, t.params, cases);
+ });
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+i32(e), where e is a f32
+
+e is converted to i32, rounding towards zero
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeF32], TypeI32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+i32(e), where e is a f16
+
+e is converted to u32, rounding towards zero
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeF16], TypeI32, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/u32_complement.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/u32_complement.spec.ts
new file mode 100644
index 0000000000..446e0918bd
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/u32_complement.spec.ts
@@ -0,0 +1,37 @@
+export const description = `
+Execution Tests for the u32 bitwise complement operation
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { u32, TypeU32 } from '../../../../util/conversion.js';
+import { fullU32Range } from '../../../../util/math.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run } from '../expression.js';
+
+import { unary } from './unary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('unary/u32_complement', {
+ complement: () => {
+ return fullU32Range().map(e => {
+ return { input: u32(e), expected: u32(~e) };
+ });
+ },
+});
+
+g.test('u32_complement')
+ .specURL('https://www.w3.org/TR/WGSL/#bit-expr')
+ .desc(
+ `
+Expression: ~x
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('complement');
+ await run(t, unary('~'), [TypeU32], TypeU32, t.params, cases);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/u32_conversion.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/u32_conversion.spec.ts
new file mode 100644
index 0000000000..87dc6e7a5d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/u32_conversion.spec.ts
@@ -0,0 +1,206 @@
+export const description = `
+Execution Tests for the u32 conversion operations
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../../gpu_test.js';
+import { kValue } from '../../../../util/constants.js';
+import {
+ bool,
+ f32,
+ f16,
+ i32,
+ TypeBool,
+ TypeF32,
+ TypeF16,
+ TypeI32,
+ TypeU32,
+ u32,
+} from '../../../../util/conversion.js';
+import {
+ fullF32Range,
+ fullF16Range,
+ fullI32Range,
+ fullU32Range,
+ quantizeToF32,
+ quantizeToF16,
+} from '../../../../util/math.js';
+import { reinterpretI32AsU32 } from '../../../../util/reinterpret.js';
+import { makeCaseCache } from '../case_cache.js';
+import { allInputSources, run, ShaderBuilder } from '../expression.js';
+
+import { unary } from './unary.js';
+
+export const g = makeTestGroup(GPUTest);
+
+export const d = makeCaseCache('unary/u32_conversion', {
+ bool: () => {
+ return [
+ { input: bool(true), expected: u32(1) },
+ { input: bool(false), expected: u32(0) },
+ ];
+ },
+ u32: () => {
+ return fullU32Range().map(u => {
+ return { input: u32(u), expected: u32(u) };
+ });
+ },
+ i32: () => {
+ return fullI32Range().map(i => {
+ return { input: i32(i), expected: u32(reinterpretI32AsU32(i)) };
+ });
+ },
+ f32: () => {
+ return fullF32Range().map(f => {
+ // Handles zeros, subnormals, and negatives
+ if (f < 1.0) {
+ return { input: f32(f), expected: u32(0) };
+ }
+
+ if (f >= kValue.u32.max) {
+ return { input: f32(f), expected: u32(kValue.u32.max) };
+ }
+
+ // All f32 no larger than 2^24 has a precise interger part and a fractional part, just need
+ // to trunc towards 0 for the result integer.
+ if (f <= 2 ** 24) {
+ return { input: f32(f), expected: u32(Math.floor(f)) };
+ }
+
+ // All f32s between 2 ** 24 and kValue.u32.max are integers, so in theory
+ // one could use them directly, expect that number is actually f64
+ // internally, so they need to be quantized to f32 first.
+ // Cannot just use floor here, since that might produce a u32 value that
+ // is precise in f64, but not in f32.
+ return { input: f32(f), expected: u32(quantizeToF32(f)) };
+ });
+ },
+ f16: () => {
+ // Note that all positive finite f16 values are in range of u32.
+ return fullF16Range().map(f => {
+ // Handles zeros, subnormals, and negatives
+ if (f < 1.0) {
+ return { input: f16(f), expected: u32(0) };
+ }
+
+ // All f16 no larger than <= 2^12 has a precise interger part and a fractional part, just need
+ // to trunc towards 0 for the result integer.
+ if (f <= 2 ** 12) {
+ return { input: f16(f), expected: u32(Math.trunc(f)) };
+ }
+
+ // All f16s larger than 2 ** 12 are integers, so in theory one could use them directly, expect
+ // that number is actually f64 internally, so they need to be quantized to f16 first.
+ // Cannot just use trunc here, since that might produce a u32 value that is precise in f64,
+ // but not in f16.
+ return { input: f16(f), expected: u32(quantizeToF16(f)) };
+ });
+ },
+});
+
+/** Generate a ShaderBuilder based on how the test case is to be vectorized */
+function vectorizeToExpression(vectorize: undefined | 2 | 3 | 4): ShaderBuilder {
+ return vectorize === undefined ? unary('u32') : unary(`vec${vectorize}<u32>`);
+}
+
+g.test('bool')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+u32(e), where e is a bool
+
+The result is 1u if e is true and 0u otherwise
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('bool');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeBool], TypeU32, t.params, cases);
+ });
+
+g.test('u32')
+ .specURL('https://www.w3.org/TR/WGSL/#bool-builtin')
+ .desc(
+ `
+u32(e), where e is a u32
+
+Identity operation
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('u32');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeU32], TypeU32, t.params, cases);
+ });
+
+g.test('i32')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+u32(e), where e is a i32
+
+Reinterpretation of bits
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('i32');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeI32], TypeU32, t.params, cases);
+ });
+
+g.test('f32')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+u32(e), where e is a f32
+
+e is converted to u32, rounding towards zero
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(async t => {
+ const cases = await d.get('f32');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeF32], TypeU32, t.params, cases);
+ });
+
+g.test('f16')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+u32(e), where e is a f16
+
+e is converted to u32, rounding towards zero
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(async t => {
+ const cases = await d.get('f16');
+ await run(t, vectorizeToExpression(t.params.vectorize), [TypeF16], TypeU32, t.params, cases);
+ });
+
+g.test('abstract_int')
+ .specURL('https://www.w3.org/TR/WGSL/#value-constructor-builtin-function')
+ .desc(
+ `
+u32(e), where e is an AbstractInt
+
+Identity operation if the e can be represented in u32, otherwise it produces a shader-creation error
+`
+ )
+ .params(u =>
+ u.combine('inputSource', allInputSources).combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .unimplemented();
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/unary.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/unary.ts
new file mode 100644
index 0000000000..160e465178
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/expression/unary/unary.ts
@@ -0,0 +1,15 @@
+import {
+ abstractFloatShaderBuilder,
+ basicExpressionBuilder,
+ ShaderBuilder,
+} from '../expression.js';
+
+/* @returns a ShaderBuilder that evaluates a prefix unary operation */
+export function unary(op: string): ShaderBuilder {
+ return basicExpressionBuilder(value => `${op}(${value})`);
+}
+
+/* @returns a ShaderBuilder that evaluates a prefix unary operation that returns AbstractFloats */
+export function abstractUnary(op: string): ShaderBuilder {
+ return abstractFloatShaderBuilder(value => `${op}(${value})`);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/float_parse.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/float_parse.spec.ts
new file mode 100644
index 0000000000..c532bfe419
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/float_parse.spec.ts
@@ -0,0 +1,131 @@
+export const description = `
+Execution Tests for float parsing cases
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { keysOf } from '../../../common/util/data_tables.js';
+import { iterRange } from '../../../common/util/util.js';
+import { GPUTest } from '../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+/**
+ * Run a shader and check that the buffer output matches expectations.
+ *
+ * @param t The test object
+ * @param wgsl The shader source
+ * @param expected The array of expected values after running the shader
+ */
+function runShaderTest(t: GPUTest, wgsl: string, expected: Float32Array): void {
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ // Allocate a buffer and fill it with 0xdeadbeef words.
+ const outputBuffer = t.makeBufferWithContents(
+ new Float32Array([...iterRange(expected.length, _i => 0xdeadbeef)]),
+ GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC
+ );
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer: outputBuffer } }],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // Check that only the non-padding bytes were modified.
+ t.expectGPUBufferValuesEqual(outputBuffer, expected);
+}
+
+const kTestFloats = {
+ small_pos_zero_exp: {
+ src:
+ '0.' +
+ '00000000000000000000000000000000000000000000000000' + // 50
+ '00000000000000000000000000000000000000000000000000' + // 100
+ '00000000000000000000000000000000000000000000000000' + // 150
+ '00000000000000000000000000000000000000000000000000' + // 200
+ '00000000000000000000000000000000000000000000000000' + // 250
+ '00000000000000000000000000000000000000000000000000' + // 300
+ '00000000000000000000000000000000000000000000000000' + // 350
+ '1e+0',
+ result: 0.0,
+ },
+ small_pos_non_zero_exp: {
+ src:
+ '0.' +
+ '00000000000000000000000000000000000000000000000000' + // 50
+ '00000000000000000000000000000000000000000000000000' + // 100
+ '00000000000000000000000000000000000000000000000000' + // 150
+ '00000000000000000000000000000000000000000000000000' + // 200
+ '00000000000000000000000000000000000000000000000000' + // 250
+ '00000000000000000000000000000000000000000000000000' + // 300
+ '00000000000000000000000000000000000000000000000000' + // 350
+ '1e+10',
+ result: 0.0,
+ },
+ pos_exp_neg_result: {
+ src:
+ '0.' +
+ '00000000000000000000000000000000000000000000000000' + // 50
+ '00000000000000000000000000000000000000000000000000' + // 100
+ '00000000000000000000000000000000000000000000000000' + // 150
+ '00000000000000000000000000000000000000000000000000' + // 200
+ '00000000000000000000000000000000000000000000000000' + // 250
+ '00000000000000000000000000000000000000000000000000' + // 300
+ '00000000000000000000000000000000000000000000000000' + // 350
+ '1e+300',
+ result: 1e-51,
+ },
+ no_exp: {
+ src:
+ '0.' +
+ '00000000000000000000000000000000000000000000000000' + // 50
+ '00000000000000000000000000000000000000000000000000' + // 100
+ '00000000000000000000000000000000000000000000000000' + // 150
+ '00000000000000000000000000000000000000000000000000' + // 200
+ '00000000000000000000000000000000000000000000000000' + // 250
+ '00000000000000000000000000000000000000000000000000' + // 300
+ '00000000000000000000000000000000000000000000000000' + // 350
+ '1',
+ result: 0.0,
+ },
+ large_number_small_exp: {
+ src:
+ '1' +
+ '00000000000000000000000000000000000000000000000000' + // 50
+ '00000000000000000000000000000000000000000000000000' + // 100
+ '.0e-350',
+ result: 1e-251,
+ },
+};
+
+g.test('valid')
+ .desc(`Test that floats are parsed correctly`)
+ .params(u => u.combine('value', keysOf(kTestFloats)))
+ .fn(t => {
+ const data = kTestFloats[t.params.value];
+ const wgsl = `
+ struct S {
+ val: f32,
+ }
+ @group(0) @binding(0) var<storage, read_write> buffer : S;
+
+ @compute @workgroup_size(1)
+ fn main() {
+ buffer = S(${data.src});
+ }
+ `;
+ runShaderTest(t, wgsl, new Float32Array([data.result]));
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/call.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/call.spec.ts
new file mode 100644
index 0000000000..b86134a58c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/call.spec.ts
@@ -0,0 +1,83 @@
+export const description = `
+Flow control tests for function calls.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+import { runFlowControlTest } from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('call_basic')
+ .desc('Test that flow control enters a called function')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ f();
+ ${f.expect_order(2)}
+`,
+ extra: `
+fn f() {
+ ${f.expect_order(1)}
+}`,
+ }));
+ });
+
+g.test('call_nested')
+ .desc('Test that flow control enters a nested function calls')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ a();
+ ${f.expect_order(6)}
+`,
+ extra: `
+fn a() {
+ ${f.expect_order(1)}
+ b();
+ ${f.expect_order(5)}
+}
+fn b() {
+ ${f.expect_order(2)}
+ c();
+ ${f.expect_order(4)}
+}
+fn c() {
+ ${f.expect_order(3)}
+}`,
+ }));
+ });
+
+g.test('call_repeated')
+ .desc('Test that flow control enters a nested function calls')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ a();
+ ${f.expect_order(10)}
+`,
+ extra: `
+fn a() {
+ ${f.expect_order(1)}
+ b();
+ ${f.expect_order(5)}
+ b();
+ ${f.expect_order(9)}
+}
+fn b() {
+ ${f.expect_order(2, 6)}
+ c();
+ ${f.expect_order(4, 8)}
+}
+fn c() {
+ ${f.expect_order(3, 7)}
+}`,
+ }));
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/complex.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/complex.spec.ts
new file mode 100644
index 0000000000..ed1a6e840b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/complex.spec.ts
@@ -0,0 +1,42 @@
+export const description = `
+Flow control tests for interesting complex cases.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+import { runFlowControlTest } from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('continue_in_switch_in_for_loop')
+ .desc('Test flow control for a continue statement in a switch, in a for-loop')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f =>
+ `
+ ${f.expect_order(0)}
+ for (var i = ${f.value(0)}; i < 3; i++) {
+ ${f.expect_order(1, 4, 6)}
+ switch (i) {
+ case 2: {
+ ${f.expect_order(7)}
+ break;
+ }
+ case 1: {
+ ${f.expect_order(5)}
+ continue;
+ }
+ default: {
+ ${f.expect_order(2)}
+ break;
+ }
+ }
+ ${f.expect_order(3, 8)}
+ }
+ ${f.expect_order(9)}
+`
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/eval_order.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/eval_order.spec.ts
new file mode 100644
index 0000000000..5a07646a0d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/eval_order.spec.ts
@@ -0,0 +1,1007 @@
+export const description = `
+Flow control tests for expression evaluation order.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+import { runFlowControlTest } from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('binary_op')
+ .desc('Test that a binary operator evaluates the LHS then the RHS')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let l = lhs() + rhs();
+ ${f.expect_order(3)}
+`,
+ extra: `
+fn lhs() -> i32 {
+ ${f.expect_order(1)}
+ return 0;
+}
+fn rhs() -> i32 {
+ ${f.expect_order(2)}
+ return 0;
+}`,
+ }));
+ });
+
+g.test('binary_op_rhs_const')
+ .desc('Test that a binary operator evaluates the LHS, when the RHS is a constant expression')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let l = lhs() + rhs();
+ ${f.expect_order(2)}
+`,
+ extra: `
+fn lhs() -> i32 {
+ ${f.expect_order(1)}
+ return 0;
+}
+fn rhs() -> i32 {
+ return 0;
+}`,
+ }));
+ });
+
+g.test('binary_op_lhs_const')
+ .desc('Test that a binary operator evaluates the RHS, when the LHS is a constant expression')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let l = lhs() + rhs();
+ ${f.expect_order(2)}
+`,
+ extra: `
+fn lhs() -> i32 {
+ return 0;
+}
+fn rhs() -> i32 {
+ ${f.expect_order(1)}
+ return 0;
+}`,
+ }));
+ });
+
+g.test('binary_op_chain')
+ .desc('Test that a binary operator chain evaluates left-to-right')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let l = a() + b() - c() * d();
+ ${f.expect_order(5)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 1;
+}
+fn c() -> i32 {
+ ${f.expect_order(3)}
+ return 1;
+}
+fn d() -> i32 {
+ ${f.expect_order(4)}
+ return 1;
+}`,
+ }));
+ });
+
+g.test('binary_op_chain_R_C_C_C')
+ .desc(
+ 'Test evaluation order of a binary operator chain with a runtime-expression for the left-most expression'
+ )
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let l = f() + 1 + 2 + 3;
+ ${f.expect_order(2)}
+`,
+ extra: `
+fn f() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+`,
+ }));
+ });
+
+g.test('binary_op_chain_C_R_C_C')
+ .desc(
+ 'Test evaluation order of a binary operator chain with a runtime-expression for the second-left-most-const'
+ )
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let l = 1 + f() + 2 + 3;
+ ${f.expect_order(2)}
+ `,
+ extra: `
+fn f() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+ `,
+ }));
+ });
+
+g.test('binary_op_chain_C_C_R_C')
+ .desc(
+ 'Test evaluation order of a binary operator chain with a runtime-expression for the second-right-most-const'
+ )
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let l = 1 + 2 + f() + 3;
+ ${f.expect_order(2)}
+`,
+ extra: `
+fn f() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+ `,
+ }));
+ });
+
+g.test('binary_op_chain_C_C_C_R')
+ .desc(
+ 'Test evaluation order of a binary operator chain with a runtime-expression for the right-most expression'
+ )
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let l = 1 + 2 + 3 + f();
+ ${f.expect_order(2)}
+ `,
+ extra: `
+fn f() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+ `,
+ }));
+ });
+
+g.test('binary_op_parenthesized_expr')
+ .desc('Test that a parenthesized binary operator expression evaluates left-to-right')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let x = (a() + b()) - (c() * d());
+ ${f.expect_order(5)}
+ let y = a() + (b() - c()) * d();
+ ${f.expect_order(10)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1, 6)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2, 7)}
+ return 1;
+}
+fn c() -> i32 {
+ ${f.expect_order(3, 8)}
+ return 1;
+}
+fn d() -> i32 {
+ ${f.expect_order(4, 9)}
+ return 1;
+}`,
+ }));
+ });
+
+g.test('array_index')
+ .desc('Test that array indices are evaluated left-to-right')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ var arr : array<array<array<i32, 8>, 8>, 8>;
+ ${f.expect_order(0)}
+ let x = arr[a()][b()][c()];
+ ${f.expect_order(4)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 1;
+}
+fn c() -> i32 {
+ ${f.expect_order(3)}
+ return 1;
+}`,
+ }));
+ });
+
+g.test('array_index_lhs_assignment')
+ .desc(
+ 'Test that array indices are evaluated left-to-right, when indexing the LHS of an assignment'
+ )
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ var arr : array<array<array<i32, 8>, 8>, 8>;
+ ${f.expect_order(0)}
+ arr[a()][b()][c()] = ~d();
+ ${f.expect_order(5)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 1;
+}
+fn c() -> i32 {
+ ${f.expect_order(3)}
+ return 1;
+}
+fn d() -> i32 {
+ ${f.expect_order(4)}
+ return 1;
+}`,
+ }));
+ });
+
+g.test('array_index_lhs_member_assignment')
+ .desc(
+ 'Test that array indices are evaluated left-to-right, when indexing with member-accessors in the LHS of an assignment'
+ )
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ var arr : array<array<S, 8>, 8>;
+ ${f.expect_order(0)}
+ arr[a()][b()].member[c()] = d();
+ ${f.expect_order(5)}
+`,
+ extra: `
+struct S {
+ member : array<i32, 8>,
+}
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 1;
+}
+fn c() -> i32 {
+ ${f.expect_order(3)}
+ return 1;
+}
+fn d() -> i32 {
+ ${f.expect_order(4)}
+ return 1;
+}`,
+ }));
+ });
+
+g.test('array_index_via_ptrs')
+ .desc('Test that array indices are evaluated in order, when used via pointers')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ var arr : array<array<array<i32, 8>, 8>, 8>;
+ ${f.expect_order(0)}
+ let p0 = &arr;
+ ${f.expect_order(1)}
+ let p1 = &(*p0)[a()];
+ ${f.expect_order(3)}
+ let p2 = &(*p1)[b()];
+ ${f.expect_order(5)}
+ let p3 = &(*p2)[c()];
+ ${f.expect_order(7)}
+ let p4 = *p3;
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(2)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(4)}
+ return 1;
+}
+fn c() -> i32 {
+ ${f.expect_order(6)}
+ return 1;
+}`,
+ }));
+ });
+
+g.test('array_index_via_struct_members')
+ .desc('Test that array indices are evaluated in order, when accessed via structure members')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ var x : X;
+ ${f.expect_order(0)}
+ let r = x.y[a()].z[b()].a[c()];
+ ${f.expect_order(4)}
+`,
+ extra: `
+struct X {
+ y : array<Y, 3>,
+};
+struct Y {
+ z : array<Z, 3>,
+};
+struct Z {
+ a : array<i32, 3>,
+};
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 1;
+}
+fn c() -> i32 {
+ ${f.expect_order(3)}
+ return 1;
+}`,
+ }));
+ });
+
+g.test('matrix_index')
+ .desc('Test that matrix indices are evaluated left-to-right')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ var mat : mat4x4<f32>;
+ ${f.expect_order(0)}
+ let x = mat[a()][b()];
+ ${f.expect_order(3)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 1;
+}`,
+ }));
+ });
+
+g.test('matrix_index_via_ptr')
+ .desc('Test that matrix indices are evaluated in order, when used via pointers')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ var mat : mat4x4<f32>;
+ ${f.expect_order(0)}
+ let p0 = &mat;
+ ${f.expect_order(1)}
+ let p1 = &(*p0)[a()];
+ ${f.expect_order(3)}
+ let v = (*p1)[b()];
+ ${f.expect_order(5)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(2)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(4)}
+ return 1;
+}`,
+ }));
+ });
+
+g.test('logical_and')
+ .desc(
+ 'Test that a chain of logical-AND expressions are evaluated left-to-right, stopping at the first false'
+ )
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let l = a() && b() && c();
+ ${f.expect_order(3)}
+`,
+ extra: `
+fn a() -> bool {
+ ${f.expect_order(1)}
+ return true;
+}
+fn b() -> bool {
+ ${f.expect_order(2)}
+ return false;
+}
+fn c() -> bool {
+ ${f.expect_not_reached()}
+ return true;
+}
+`,
+ }));
+ });
+
+g.test('logical_or')
+ .desc(
+ 'Test that a chain of logical-OR expressions are evaluated left-to-right, stopping at the first true'
+ )
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let l = a() || b() || c();
+ ${f.expect_order(3)}
+`,
+ extra: `
+fn a() -> bool {
+ ${f.expect_order(1)}
+ return false;
+}
+fn b() -> bool {
+ ${f.expect_order(2)}
+ return true;
+}
+fn c() -> bool {
+ ${f.expect_not_reached()}
+ return true;
+}
+`,
+ }));
+ });
+
+g.test('bitwise_and')
+ .desc(
+ 'Test that a chain of bitwise-AND expressions are evaluated left-to-right, with no short-circuiting'
+ )
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let l = a() & b() & c();
+ ${f.expect_order(4)}
+`,
+ extra: `
+fn a() -> bool {
+ ${f.expect_order(1)}
+ return true;
+}
+fn b() -> bool {
+ ${f.expect_order(2)}
+ return false;
+}
+fn c() -> bool {
+ ${f.expect_order(3)}
+ return true;
+}
+`,
+ }));
+ });
+
+g.test('bitwise_or')
+ .desc(
+ 'Test that a chain of bitwise-OR expressions are evaluated left-to-right, with no short-circuiting'
+ )
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let l = a() | b() | c();
+ ${f.expect_order(4)}
+`,
+ extra: `
+fn a() -> bool {
+ ${f.expect_order(1)}
+ return false;
+}
+fn b() -> bool {
+ ${f.expect_order(2)}
+ return true;
+}
+fn c() -> bool {
+ ${f.expect_order(3)}
+ return true;
+}
+`,
+ }));
+ });
+
+g.test('user_fn_args')
+ .desc('Test user function call arguments are evaluated left-to-right')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let l = f(a(), b(), c());
+ ${f.expect_order(5)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 2;
+}
+fn c() -> i32 {
+ ${f.expect_order(3)}
+ return 3;
+}
+fn f(x : i32, y : i32, z : i32) -> i32 {
+ ${f.expect_order(4)}
+ return x + y + z;
+}`,
+ }));
+ });
+
+g.test('nested_fn_args')
+ .desc('Test user nested call arguments are evaluated left-to-right')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let l = g(c(a(), b()), f(d(), e()));
+ ${f.expect_order(8)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 0;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 0;
+}
+fn c(x : i32, y : i32) -> i32 {
+ ${f.expect_order(3)}
+ return x + y;
+}
+fn d() -> i32 {
+ ${f.expect_order(4)}
+ return 0;
+}
+fn e() -> i32 {
+ ${f.expect_order(5)}
+ return 0;
+}
+fn f(x : i32, y : i32) -> i32 {
+ ${f.expect_order(6)}
+ return x + y;
+}
+fn g(x : i32, y : i32) -> i32 {
+ ${f.expect_order(7)}
+ return x + y;
+}`,
+ }));
+ });
+
+g.test('builtin_fn_args')
+ .desc('Test builtin function call arguments are evaluated left-to-right')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let l = mix(a(), b(), c());
+ ${f.expect_order(4)}
+`,
+ extra: `
+fn a() -> f32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> f32 {
+ ${f.expect_order(2)}
+ return 2;
+}
+fn c() -> f32 {
+ ${f.expect_order(3)}
+ return 3;
+}
+`,
+ }));
+ });
+
+g.test('nested_builtin_fn_args')
+ .desc('Test nested builtin function call arguments are evaluated left-to-right')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let l = mix(a(), mix(b(), c(), d()), e());
+ ${f.expect_order(6)}
+`,
+ extra: `
+fn a() -> f32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> f32 {
+ ${f.expect_order(2)}
+ return 2;
+}
+fn c() -> f32 {
+ ${f.expect_order(3)}
+ return 3;
+}
+fn d() -> f32 {
+ ${f.expect_order(4)}
+ return 3;
+}
+fn e() -> f32 {
+ ${f.expect_order(5)}
+ return 3;
+}
+`,
+ }));
+ });
+
+g.test('1d_array_constructor')
+ .desc('Test arguments of an array constructor are evaluated left-to-right')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let v = array(a(), b(), c(), d());
+ ${f.expect_order(5)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 2;
+}
+fn c() -> i32 {
+ ${f.expect_order(3)}
+ return 1;
+}
+fn d() -> i32 {
+ ${f.expect_order(4)}
+ return 2;
+}
+`,
+ }));
+ });
+
+g.test('2d_array_constructor')
+ .desc('Test arguments of a 2D array constructor are evaluated left-to-right')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let v = array(array(a(), b()), array(c(), d()));
+ ${f.expect_order(5)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 2;
+}
+fn c() -> i32 {
+ ${f.expect_order(3)}
+ return 1;
+}
+fn d() -> i32 {
+ ${f.expect_order(4)}
+ return 2;
+}
+`,
+ }));
+ });
+
+g.test('vec4_constructor')
+ .desc('Test arguments of a vector constructor are evaluated left-to-right')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let v = vec4(a(), b(), c(), d());
+ ${f.expect_order(5)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 2;
+}
+fn c() -> i32 {
+ ${f.expect_order(3)}
+ return 1;
+}
+fn d() -> i32 {
+ ${f.expect_order(4)}
+ return 2;
+}
+`,
+ }));
+ });
+
+g.test('nested_vec4_constructor')
+ .desc('Test arguments of a nested vector constructor are evaluated left-to-right')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let v = vec4(a(), vec2(b(), c()), d());
+ ${f.expect_order(5)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 2;
+}
+fn c() -> i32 {
+ ${f.expect_order(3)}
+ return 1;
+}
+fn d() -> i32 {
+ ${f.expect_order(4)}
+ return 2;
+}
+`,
+ }));
+ });
+
+g.test('struct_constructor')
+ .desc('Test arguments of a structure constructor are evaluated left-to-right')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let v = S(a(), b(), c(), d());
+ ${f.expect_order(5)}
+`,
+ extra: `
+struct S {
+ a : i32,
+ b : i32,
+ c : i32,
+ d : i32,
+}
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 2;
+}
+fn c() -> i32 {
+ ${f.expect_order(3)}
+ return 1;
+}
+fn d() -> i32 {
+ ${f.expect_order(4)}
+ return 2;
+}
+`,
+ }));
+ });
+
+g.test('nested_struct_constructor')
+ .desc('Test arguments of a nested structure constructor are evaluated left-to-right')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ let v = Y(a(), X(b(), c()), d());
+ ${f.expect_order(5)}
+`,
+ extra: `
+struct Y {
+ a : i32,
+ x : X,
+ c : i32,
+}
+struct X {
+ b : i32,
+ c : i32,
+}
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 2;
+}
+fn c() -> i32 {
+ ${f.expect_order(3)}
+ return 1;
+}
+fn d() -> i32 {
+ ${f.expect_order(4)}
+ return 2;
+}
+`,
+ }));
+ });
+
+g.test('1d_array_assignment')
+ .desc('Test LHS of an array element assignment is evaluated before RHS')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ var arr : array<i32, 8>;
+ ${f.expect_order(0)}
+ arr[a()] = arr[b()];
+ ${f.expect_order(3)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 2;
+}
+`,
+ }));
+ });
+
+g.test('2d_array_assignment')
+ .desc('Test LHS of 2D-array element assignment is evaluated before RHS')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ var arr : array<array<i32, 8>, 8>;
+ ${f.expect_order(0)}
+ arr[a()][b()] = arr[c()][d()];
+ ${f.expect_order(5)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 2;
+}
+fn c() -> i32 {
+ ${f.expect_order(3)}
+ return 1;
+}
+fn d() -> i32 {
+ ${f.expect_order(4)}
+ return 2;
+}
+`,
+ }));
+ });
+
+g.test('1d_array_compound_assignment')
+ .desc('Test LHS of an array element compound assignment is evaluated before RHS')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ var arr : array<i32, 8>;
+ ${f.expect_order(0)}
+ arr[a()] += arr[b()];
+ ${f.expect_order(3)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 2;
+}
+`,
+ }));
+ });
+
+g.test('2d_array_compound_assignment')
+ .desc('Test LHS of a 2D-array element compound assignment is evaluated before RHS')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ var arr : array<array<i32, 8>, 8>;
+ ${f.expect_order(0)}
+ arr[a()][b()] += arr[c()][d()];
+ ${f.expect_order(5)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 2;
+}
+fn c() -> i32 {
+ ${f.expect_order(3)}
+ return 1;
+}
+fn d() -> i32 {
+ ${f.expect_order(4)}
+ return 2;
+}
+`,
+ }));
+ });
+
+g.test('1d_array_increment')
+ .desc('Test index of an array element increment is evaluated only once')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ var arr : array<i32, 8>;
+ ${f.expect_order(0)}
+ arr[a()]++;
+ ${f.expect_order(2)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+`,
+ }));
+ });
+
+g.test('2d_array_increment')
+ .desc('Test index of a 2D-array element increment is evaluated only once')
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ var arr : array<array<i32, 8>, 8>;
+ ${f.expect_order(0)}
+ arr[a()][b()]++;
+ ${f.expect_order(3)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 1;
+}
+`,
+ }));
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/for.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/for.spec.ts
new file mode 100644
index 0000000000..898b8a0e04
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/for.spec.ts
@@ -0,0 +1,271 @@
+export const description = `
+Flow control tests for for-loops.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+import { runFlowControlTest } from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('for_basic')
+ .desc('Test that flow control executes a for-loop body the correct number of times')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f =>
+ `
+ ${f.expect_order(0)}
+ for (var i = ${f.value(0)}; i < ${f.value(3)}; i++) {
+ ${f.expect_order(1, 2, 3)}
+ }
+ ${f.expect_order(4)}
+`
+ );
+ });
+
+g.test('for_break')
+ .desc('Test that flow control exits a for-loop when reaching a break statement')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f =>
+ `
+ ${f.expect_order(0)}
+ for (var i = ${f.value(0)}; i < ${f.value(5)}; i++) {
+ ${f.expect_order(1, 3, 5, 7)}
+ if (i == 3) {
+ break;
+ ${f.expect_not_reached()}
+ }
+ ${f.expect_order(2, 4, 6)}
+ }
+ ${f.expect_order(8)}
+`
+ );
+ });
+
+g.test('for_continue')
+ .desc('Test flow control for a for-loop continue statement')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f =>
+ `
+ ${f.expect_order(0)}
+ for (var i = ${f.value(0)}; i < ${f.value(5)}; i++) {
+ ${f.expect_order(1, 3, 5, 7, 8)}
+ if (i == 3) {
+ continue;
+ ${f.expect_not_reached()}
+ }
+ ${f.expect_order(2, 4, 6, 9)}
+ }
+ ${f.expect_order(10)}
+`
+ );
+ });
+
+g.test('for_initalizer')
+ .desc('Test flow control for a for-loop initializer')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ for (var i = initializer(); i < ${f.value(3)}; i++) {
+ ${f.expect_order(2, 3, 4)}
+ }
+ ${f.expect_order(5)}
+`,
+ extra: `
+fn initializer() -> i32 {
+ ${f.expect_order(1)}
+ return ${f.value(0)};
+}
+`,
+ }));
+ });
+
+g.test('for_complex_initalizer')
+ .desc('Test flow control for a complex for-loop initializer')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ for (var i = initializer(max(a(), b())); i < ${f.value(5)}; i++) {
+ ${f.expect_order(4, 5, 6)}
+ }
+ ${f.expect_order(7)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return ${f.value(1)};
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return ${f.value(2)};
+}
+fn initializer(v : i32) -> i32 {
+ ${f.expect_order(3)}
+ return v;
+}
+`,
+ }));
+ });
+
+g.test('for_condition')
+ .desc('Test flow control for a for-loop condition')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ for (var i = ${f.value(0)}; condition(i); i++) {
+ ${f.expect_order(2, 4, 6)}
+ }
+ ${f.expect_order(8)}
+`,
+ extra: `
+fn condition(i : i32) -> bool {
+ ${f.expect_order(1, 3, 5, 7)}
+ return i < ${f.value(3)};
+}
+`,
+ }));
+ });
+
+g.test('for_complex_condition')
+ .desc('Test flow control for a for-loop condition')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ for (var i = ${f.value(0)}; condition(i, a() * b()); i++) {
+ ${f.expect_order(4, 8)}
+ }
+ ${f.expect_order(12)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1, 5, 9)}
+ return ${f.value(1)};
+}
+fn b() -> i32 {
+ ${f.expect_order(2, 6, 10)}
+ return ${f.value(2)};
+}
+fn condition(i : i32, j : i32) -> bool {
+ ${f.expect_order(3, 7, 11)}
+ return i < j;
+}
+`,
+ }));
+ });
+
+g.test('for_continuing')
+ .desc('Test flow control for a for-loop continuing statement')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ for (var i = ${f.value(0)}; i < ${f.value(3)}; i = cont(i)) {
+ ${f.expect_order(1, 3, 5)}
+ }
+ ${f.expect_order(7)}
+`,
+ extra: `
+fn cont(i : i32) -> i32 {
+ ${f.expect_order(2, 4, 6)}
+ return i + 1;
+}
+`,
+ }));
+ });
+
+g.test('for_complex_continuing')
+ .desc('Test flow control for a for-loop continuing statement')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ for (var i = ${f.value(0)}; i < ${f.value(3)}; i += cont(a(), b())) {
+ ${f.expect_order(1, 5, 9)}
+ }
+ ${f.expect_order(13)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(2, 6, 10)}
+ return ${f.value(1)};
+}
+fn b() -> i32 {
+ ${f.expect_order(3, 7, 11)}
+ return ${f.value(2)};
+}
+fn cont(i : i32, j : i32) -> i32 {
+ ${f.expect_order(4, 8, 12)}
+ return j >> u32(i);
+}
+`,
+ }));
+ });
+
+g.test('nested_for_break')
+ .desc('Test flow control for a for-loop break statement in an outer for-loop')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f =>
+ `
+ ${f.expect_order(0)}
+ for (var i = ${f.value(0)}; i < ${f.value(2)}; i++) {
+ ${f.expect_order(1, 5)}
+ for (var i = ${f.value(5)}; i < ${f.value(7)}; i++) {
+ ${f.expect_order(2, 4, 6, 8)}
+ if (i == ${f.value(6)}) {
+ break;
+ ${f.expect_not_reached()}
+ }
+ ${f.expect_order(3, 7)}
+ }
+ }
+ ${f.expect_order(9)}
+`
+ );
+ });
+
+g.test('nested_for_continue')
+ .desc('Test flow control for a for-loop continue statement in an outer for-loop')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f =>
+ `
+ ${f.expect_order(0)}
+ for (var i = ${f.value(0)}; i < ${f.value(2)}; i++) {
+ ${f.expect_order(1, 5)}
+ for (var i = ${f.value(5)}; i < ${f.value(7)}; i++) {
+ ${f.expect_order(2, 3, 6, 7)}
+ if (i == ${f.value(5)}) {
+ continue;
+ ${f.expect_not_reached()}
+ }
+ ${f.expect_order(4, 8)}
+ }
+ }
+ ${f.expect_order(9)}
+`
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/harness.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/harness.ts
new file mode 100644
index 0000000000..885e32bd4a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/harness.ts
@@ -0,0 +1,312 @@
+import { Colors } from '../../../../common/util/colors.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+/**
+ * Options for runFlowControlTest()
+ */
+interface FlowControlTest extends GPUTest {
+ params: {
+ /**
+ * If true, then constant values will be placed into a storage buffer,
+ * preventing the shader compiler from knowing the value at compile time.
+ * This can prevent constant folding, loop unrolling, dead-code
+ * optimizations etc, which would could all affect the tests.
+ */
+ preventValueOptimizations?: boolean;
+ };
+}
+
+/**
+ * The builder interface for the runFlowControlTest() callback.
+ * This interface is indented to be used to inject WGSL logic into the test
+ * shader.
+ * @see runFlowControlTest
+ */
+interface FlowControlTestBuilder {
+ /**
+ * Emits a value into the shader.
+ * If the FlowControlTest.params.preventValueOptimizations flag is enabled,
+ * then value() emits an expression to load the given value from a storage
+ * buffer, preventing the shader compiler from knowing the value at compile
+ * time. This can prevent constant folding, loop unrolling, dead-code
+ * optimizations etc, which would could all affect the tests.
+ */
+ value(v: number | boolean): string;
+
+ /**
+ * Emits an expectation that the statement will be executed at the given
+ * chronological events.
+ * @param event one or more chronological events, the first being 0.
+ */
+ expect_order(...event: number[]): string;
+
+ /**
+ * Emits an expectation that the statement will not be reached.
+ */
+ expect_not_reached(): string;
+}
+
+/**
+ * Builds, runs then checks the output of a flow control shader test.
+ *
+ * `build_wgsl` is a function that's called to build the WGSL shader.
+ * This function takes a FlowControlTestBuilder as the single argument, and
+ * returns either a string which is embedded into the WGSL entrypoint function,
+ * or an object of the signature `{ entrypoint: string; extra: string }` which
+ * contains the entrypoint code, along with additional module-scope code.
+ *
+ * The FlowControlTestBuilder should be used to insert expectations into WGSL to
+ * validate control flow. FlowControlTestBuilder also can be used to add values
+ * to the shader which cannot be optimized away.
+ *
+ * Example, testing that an if-statement behaves as expected:
+ *
+ * ```
+ * runFlowControlTest(t, f =>
+ * `
+ * ${f.expect_order(0)}
+ * if (${f.value(true)}) {
+ * ${f.expect_order(1)}
+ * } else {
+ * ${f.expect_not_reached()}
+ * }
+ * ${f.expect_order(2)}
+ * `);
+ * ```
+ *
+ * @param t The test object
+ * @param builder The shader builder function that takes a
+ * FlowControlTestBuilder as the single argument, and returns either a WGSL
+ * string which is embedded into the WGSL entrypoint function, or a structure
+ * with entrypoint-scoped WGSL code and extra module-scope WGSL code.
+ */
+export function runFlowControlTest(
+ t: FlowControlTest,
+ build_wgsl: (builder: FlowControlTestBuilder) => string | { entrypoint: string; extra: string }
+) {
+ const inputData = new Array<number>();
+
+ type ExpectedEvents = {
+ kind: 'events';
+ stack: string | undefined;
+ values: number[];
+ counter: number;
+ };
+ type ExpectedNotReached = {
+ kind: 'not-reached';
+ stack: string | undefined;
+ };
+
+ const expectations = new Array<ExpectedEvents | ExpectedNotReached>();
+
+ const build_wgsl_result = build_wgsl({
+ value: v => {
+ if (t.params.preventValueOptimizations) {
+ if (typeof v === 'boolean') {
+ inputData.push(v ? 1 : 0);
+ return `inputs[${inputData.length - 1}] != 0`;
+ }
+ inputData.push(v);
+ return `inputs[${inputData.length - 1}]`;
+ } else {
+ return `${v}`;
+ }
+ },
+ expect_order: (...expected) => {
+ expectations.push({
+ kind: 'events',
+ stack: Error().stack,
+ values: expected,
+ counter: 0,
+ });
+ // Expectation id starts from 1 to distinguish from initialization 0.
+ return `push_output(${expectations.length}); // expect_order(${expected.join(', ')})`;
+ },
+ expect_not_reached: () => {
+ expectations.push({
+ kind: 'not-reached',
+ stack: Error().stack,
+ });
+ // Expectation id starts from 1 to distinguish from initialization 0.
+ return `push_output(${expectations.length}); // expect_not_reached()`;
+ },
+ });
+
+ const built_wgsl =
+ typeof build_wgsl_result === 'string'
+ ? { entrypoint: build_wgsl_result, extra: '' }
+ : build_wgsl_result;
+
+ const main_wgsl = built_wgsl.entrypoint !== undefined ? built_wgsl : built_wgsl.entrypoint;
+
+ const wgsl = `
+struct Outputs {
+ count : u32,
+ data : array<u32>,
+};
+@group(0) @binding(0) var<storage, read> inputs : array<i32>;
+@group(0) @binding(1) var<storage, read_write> outputs : Outputs;
+
+fn push_output(value : u32) {
+ outputs.data[outputs.count] = value;
+ outputs.count++;
+}
+
+@compute @workgroup_size(1)
+fn main() {
+ _ = &inputs;
+ _ = &outputs;
+ ${main_wgsl.entrypoint}
+}
+${main_wgsl.extra}
+`;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ // If there are no inputs, just put a single value in the buffer to keep
+ // makeBufferWithContents() happy.
+ if (inputData.length === 0) {
+ inputData.push(0);
+ }
+
+ const inputBuffer = t.makeBufferWithContents(new Uint32Array(inputData), GPUBufferUsage.STORAGE);
+
+ const maxOutputValues = 1000;
+ const outputBuffer = t.device.createBuffer({
+ size: 4 * (1 + maxOutputValues),
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: { buffer: inputBuffer } },
+ { binding: 1, resource: { buffer: outputBuffer } },
+ ],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ t.eventualExpectOK(
+ t
+ .readGPUBufferRangeTyped(outputBuffer, {
+ type: Uint32Array,
+ typedLength: outputBuffer.size / 4,
+ })
+ .then(outputs => {
+ // outputs[0] is the number of outputted values
+ // outputs[1..N] holds the outputted values
+ const outputCount = outputs.data[0];
+ if (outputCount > maxOutputValues) {
+ return new Error(
+ `output data count (${outputCount}) exceeds limit of ${maxOutputValues}`
+ );
+ }
+
+ // returns an Error with the given message and WGSL source
+ const fail = (err: string) => Error(`${err}\nWGSL:\n${Colors.dim(Colors.blue(wgsl))}`);
+
+ // returns a string that shows the outputted values to help understand the whole trace.
+ const print_output_value = () => {
+ const subarray = outputs.data.subarray(1, outputCount + 1);
+ return `Output values (length: ${outputCount}): ${subarray.join(', ')}`;
+ };
+
+ // returns a colorized string of the expect_order() call, highlighting
+ // the event number that caused an error.
+ const expect_order_err = (expectation: ExpectedEvents, err_idx: number) => {
+ let out = 'expect_order(';
+ for (let i = 0; i < expectation.values.length; i++) {
+ if (i > 0) {
+ out += ', ';
+ }
+ if (i < err_idx) {
+ out += Colors.green(`${expectation.values[i]}`);
+ } else if (i > err_idx) {
+ out += Colors.dim(`${expectation.values[i]}`);
+ } else {
+ out += Colors.red(`${expectation.values[i]}`);
+ }
+ }
+ out += ')';
+ return out;
+ };
+
+ // Each of the outputted values represents an event
+ // Check that each event is as expected
+ for (let event = 0; event < outputCount; event++) {
+ const eventValue = outputs.data[1 + event]; // outputs.data[0] is count
+ // Expectation id starts from 1, and 0 is invalid value.
+ if (eventValue === 0) {
+ return fail(
+ `outputs.data[${event}] is initial value 0, doesn't refer to any valid expectations)\n${print_output_value()}`
+ );
+ }
+ const expectationIndex = eventValue - 1;
+ if (expectationIndex >= expectations.length) {
+ return fail(
+ `outputs.data[${event}] value (${expectationIndex}) exceeds number of expectations (${
+ expectations.length
+ })\n${print_output_value()}`
+ );
+ }
+ const expectation = expectations[expectationIndex];
+ switch (expectation.kind) {
+ case 'not-reached':
+ return fail(
+ `expect_not_reached() reached at event ${event}\n${print_output_value()}\n${
+ expectation.stack
+ }`
+ );
+ case 'events':
+ if (expectation.counter >= expectation.values.length) {
+ return fail(
+ `${expect_order_err(
+ expectation,
+ expectation.counter
+ )}) unexpectedly reached at event ${Colors.red(
+ `${event}`
+ )}\n${print_output_value()}\n${expectation.stack}`
+ );
+ }
+ if (event !== expectation.values[expectation.counter]) {
+ return fail(
+ `${expect_order_err(expectation, expectation.counter)} expected event ${
+ expectation.values[expectation.counter]
+ }, got ${event}\n${print_output_value()}\n${expectation.stack}`
+ );
+ }
+
+ expectation.counter++;
+ break;
+ }
+ }
+
+ // Finally check that all expect_order() calls were reached
+ for (const expectation of expectations) {
+ if (expectation.kind === 'events' && expectation.counter !== expectation.values.length) {
+ return fail(
+ `${expect_order_err(expectation, expectation.counter)} event ${
+ expectation.values[expectation.counter]
+ } was not reached\n${expectation.stack}\n${print_output_value()}`
+ );
+ }
+ }
+ outputs.cleanup();
+ return undefined;
+ })
+ );
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/if.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/if.spec.ts
new file mode 100644
index 0000000000..9bc5ca209d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/if.spec.ts
@@ -0,0 +1,102 @@
+export const description = `
+Flow control tests for if-statements.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+import { runFlowControlTest } from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('if_true')
+ .desc(
+ "Test that flow control executes the 'true' block of an if statement and not the 'false' block"
+ )
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f => `
+ ${f.expect_order(0)}
+ if (${f.value(true)}) {
+ ${f.expect_order(1)}
+ } else {
+ ${f.expect_not_reached()}
+ }
+ ${f.expect_order(2)}
+`
+ );
+ });
+
+g.test('if_false')
+ .desc(
+ "Test that flow control executes the 'false' block of an if statement and not the 'true' block"
+ )
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f => `
+ ${f.expect_order(0)}
+ if (${f.value(false)}) {
+ ${f.expect_not_reached()}
+ } else {
+ ${f.expect_order(1)}
+ }
+ ${f.expect_order(2)}
+`
+ );
+ });
+
+g.test('else_if')
+ .desc("Test that flow control executes the correct 'else if' block of an if statement")
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f => `
+ ${f.expect_order(0)}
+ if (${f.value(false)}) {
+ ${f.expect_not_reached()}
+ } else if (${f.value(false)}) {
+ ${f.expect_not_reached()}
+ } else if (${f.value(true)}) {
+ ${f.expect_order(1)}
+ } else if (${f.value(false)}) {
+ ${f.expect_not_reached()}
+ }
+ ${f.expect_order(2)}
+`
+ );
+ });
+
+g.test('nested_if_else')
+ .desc('Test flow control for nested if-else statements')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f => `
+${f.expect_order(0)}
+if (${f.value(true)}) {
+ ${f.expect_order(1)}
+ if (${f.value(false)}) {
+ ${f.expect_not_reached()}
+ } else {
+ ${f.expect_order(2)}
+ if (${f.value(true)}) {
+ ${f.expect_order(3)}
+ } else {
+ ${f.expect_not_reached()}
+ }
+ ${f.expect_order(4)}
+ }
+ ${f.expect_order(5)}
+} else {
+ ${f.expect_not_reached()}
+}
+${f.expect_order(6)}
+`
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/loop.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/loop.spec.ts
new file mode 100644
index 0000000000..18d0e5d1ee
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/loop.spec.ts
@@ -0,0 +1,125 @@
+export const description = `
+Flow control tests for loops.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+import { runFlowControlTest } from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('loop_break')
+ .desc('Test that flow control exits a loop when reaching a break statement')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f =>
+ `
+ ${f.expect_order(0)}
+ var i = ${f.value(0)};
+ loop {
+ ${f.expect_order(1, 3, 5, 7)}
+ if i == 3 {
+ break;
+ }
+ ${f.expect_order(2, 4, 6)}
+ i++;
+ }
+ ${f.expect_order(8)}
+`
+ );
+ });
+
+g.test('loop_continue')
+ .desc('Test flow control for a loop continue statement')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f =>
+ `
+ ${f.expect_order(0)}
+ var i = ${f.value(0)};
+ loop {
+ ${f.expect_order(1, 3, 5, 7, 8)}
+ if i == 3 {
+ i++;
+ continue;
+ ${f.expect_not_reached()}
+ }
+ ${f.expect_order(2, 4, 6, 9)}
+ if i == 4 {
+ break;
+ }
+ i++;
+ }
+ ${f.expect_order(10)}
+`
+ );
+ });
+
+g.test('loop_continuing_basic')
+ .desc('Test basic flow control for a loop continuing block')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f =>
+ `
+ ${f.expect_order(0)}
+ var i = ${f.value(0)};
+ loop {
+ ${f.expect_order(1, 3, 5)}
+ i++;
+
+ continuing {
+ ${f.expect_order(2, 4, 6)}
+ break if i == 3;
+ }
+ }
+ ${f.expect_order(7)}
+`
+ );
+ });
+
+g.test('nested_loops')
+ .desc('Test flow control for a loop nested in another loop')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f =>
+ `
+ ${f.expect_order(0)}
+ var i = ${f.value(0)};
+ loop {
+ ${f.expect_order(1, 11, 21)}
+ if i == ${f.value(6)} {
+ ${f.expect_order(22)}
+ break;
+ ${f.expect_not_reached()}
+ }
+ ${f.expect_order(2, 12)}
+ loop {
+ i++;
+ ${f.expect_order(3, 6, 9, 13, 16, 19)}
+ if (i % ${f.value(3)}) == 0 {
+ ${f.expect_order(10, 20)}
+ break;
+ ${f.expect_not_reached()}
+ }
+ ${f.expect_order(4, 7, 14, 17)}
+ if (i & ${f.value(1)}) == 0 {
+ ${f.expect_order(8, 15)}
+ continue;
+ ${f.expect_not_reached()}
+ }
+ ${f.expect_order(5, 18)}
+ }
+ }
+ ${f.expect_order(23)}
+`
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/phony.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/phony.spec.ts
new file mode 100644
index 0000000000..1821f407ac
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/phony.spec.ts
@@ -0,0 +1,135 @@
+export const description = `
+Flow control tests for phony assignments.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+import { runFlowControlTest } from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('phony_assign_call_basic')
+ .desc('Test flow control for a phony assigned with a single function call')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ _ = f();
+ ${f.expect_order(2)}
+`,
+ extra: `
+fn f() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+`,
+ }));
+ });
+
+g.test('phony_assign_call_must_use')
+ .desc(
+ 'Test flow control for a phony assigned with a single function call annotated with @must_use'
+ )
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+ ${f.expect_order(0)}
+ _ = f();
+ ${f.expect_order(2)}
+`,
+ extra: `
+@must_use
+fn f() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+`,
+ }));
+ });
+
+g.test('phony_assign_call_nested')
+ .desc('Test flow control for a phony assigned with nested function calls')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+${f.expect_order(0)}
+_ = c(a(), b());
+${f.expect_order(4)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 1;
+}
+fn c(x : i32, y : i32) -> i32 {
+ ${f.expect_order(3)}
+ return x + y;
+}
+`,
+ }));
+ });
+
+g.test('phony_assign_call_nested_must_use')
+ .desc(
+ 'Test flow control for a phony assigned with nested function calls, all annotated with @must_use'
+ )
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+${f.expect_order(0)}
+_ = c(a(), b());
+${f.expect_order(4)}
+`,
+ extra: `
+@must_use
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+@must_use
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 1;
+}
+@must_use
+fn c(x : i32, y : i32) -> i32 {
+ ${f.expect_order(3)}
+ return x + y;
+}
+`,
+ }));
+ });
+
+g.test('phony_assign_call_builtin')
+ .desc(
+ 'Test flow control for a phony assigned with a builtin call, with two function calls as arguments'
+ )
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(t, f => ({
+ entrypoint: `
+${f.expect_order(0)}
+_ = max(a(), b());
+${f.expect_order(3)}
+`,
+ extra: `
+fn a() -> i32 {
+ ${f.expect_order(1)}
+ return 1;
+}
+fn b() -> i32 {
+ ${f.expect_order(2)}
+ return 1;
+}
+`,
+ }));
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/return.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/return.spec.ts
new file mode 100644
index 0000000000..ddfb20044c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/return.spec.ts
@@ -0,0 +1,56 @@
+export const description = `
+Flow control tests for return statements.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+import { runFlowControlTest } from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('return')
+ .desc("Test that flow control does not execute after a 'return' statement")
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f => `
+ ${f.expect_order(0)}
+ return;
+ ${f.expect_not_reached()}
+`
+ );
+ });
+
+g.test('return_conditional_true')
+ .desc("Test that flow control does not execute after a 'return' statement in a if (true) block")
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f => `
+ ${f.expect_order(0)}
+ if (${f.value(true)}) {
+ return;
+ }
+ ${f.expect_not_reached()}
+`
+ );
+ });
+
+g.test('return_conditional_false')
+ .desc("Test that flow control does not execute after a 'return' statement in a if (false) block")
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f => `
+ ${f.expect_order(0)}
+ if (${f.value(false)}) {
+ return;
+ }
+ ${f.expect_order(1)}
+`
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/switch.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/switch.spec.ts
new file mode 100644
index 0000000000..e500729614
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/switch.spec.ts
@@ -0,0 +1,156 @@
+export const description = `
+Flow control tests for switch statements.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+import { runFlowControlTest } from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('switch')
+ .desc('Test that flow control executes the correct switch case block')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f => `
+ ${f.expect_order(0)}
+ switch (${f.value(1)}) {
+ case 0: {
+ ${f.expect_not_reached()}
+ break;
+ }
+ case 1: {
+ ${f.expect_order(1)}
+ break;
+ }
+ case 2: {
+ ${f.expect_not_reached()}
+ break;
+ }
+ default: {
+ ${f.expect_not_reached()}
+ break;
+ }
+ }
+ ${f.expect_order(2)}
+`
+ );
+ });
+
+g.test('switch_multiple_case')
+ .desc(
+ 'Test that flow control executes the correct switch case block with multiple cases per block'
+ )
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f => `
+ ${f.expect_order(0)}
+ switch (${f.value(2)}) {
+ case 0, 1: {
+ ${f.expect_not_reached()}
+ break;
+ }
+ case 2, 3: {
+ ${f.expect_order(1)}
+ break;
+ }
+ default: {
+ ${f.expect_not_reached()}
+ break;
+ }
+ }
+ ${f.expect_order(2)}
+`
+ );
+ });
+
+g.test('switch_multiple_case_default')
+ .desc(
+ 'Test that flow control executes the correct switch case block with multiple cases per block (combined with default)'
+ )
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f => `
+ ${f.expect_order(0)}
+ switch (${f.value(2)}) {
+ case 0, 1: {
+ ${f.expect_not_reached()}
+ break;
+ }
+ case 2, 3, default: {
+ ${f.expect_order(1)}
+ break;
+ }
+ }
+ ${f.expect_order(2)}
+ switch (${f.value(1)}) {
+ case 0, 1: {
+ ${f.expect_order(3)}
+ break;
+ }
+ case 2, 3, default: {
+ ${f.expect_not_reached()}
+ break;
+ }
+ }
+ ${f.expect_order(4)}
+`
+ );
+ });
+
+g.test('switch_default')
+ .desc('Test that flow control executes the switch default block')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f => `
+${f.expect_order(0)}
+switch (${f.value(4)}) {
+ case 0: {
+ ${f.expect_not_reached()}
+ break;
+ }
+ case 1: {
+ ${f.expect_not_reached()}
+ break;
+ }
+ case 2: {
+ ${f.expect_not_reached()}
+ break;
+ }
+ default: {
+ ${f.expect_order(1)}
+ break;
+ }
+}
+${f.expect_order(2)}
+`
+ );
+ });
+
+g.test('switch_default_only')
+ .desc('Test that flow control executes the switch default block, which is the only case')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f => `
+${f.expect_order(0)}
+switch (${f.value(4)}) {
+default: {
+ ${f.expect_order(1)}
+ break;
+}
+}
+${f.expect_order(2)}
+`
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/while.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/while.spec.ts
new file mode 100644
index 0000000000..88ce6838a5
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/flow_control/while.spec.ts
@@ -0,0 +1,140 @@
+export const description = `
+Flow control tests for while-loops.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+import { runFlowControlTest } from './harness.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('while_basic')
+ .desc('Test that flow control executes a while-loop body the correct number of times')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f =>
+ `
+ ${f.expect_order(0)}
+ var i = ${f.value(0)};
+ while (i < ${f.value(5)}) {
+ ${f.expect_order(1, 2, 3, 4, 5)}
+ i++;
+ }
+ ${f.expect_order(6)}
+`
+ );
+ });
+
+g.test('while_break')
+ .desc('Test that flow control exits a while-loop when reaching a break statement')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f =>
+ `
+ ${f.expect_order(0)}
+ var i = ${f.value(0)};
+ while (i < ${f.value(5)}) {
+ ${f.expect_order(1, 3, 5, 7)}
+ if (i == 3) {
+ break;
+ ${f.expect_not_reached()}
+ }
+ ${f.expect_order(2, 4, 6)}
+ i++;
+ }
+ ${f.expect_order(8)}
+`
+ );
+ });
+
+g.test('while_continue')
+ .desc('Test flow control for a while-loop continue statement')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f =>
+ `
+ ${f.expect_order(0)}
+ var i = ${f.value(0)};
+ while (i < ${f.value(5)}) {
+ ${f.expect_order(1, 3, 5, 7, 8)}
+ if (i == 3) {
+ i++;
+ continue;
+ ${f.expect_not_reached()}
+ }
+ ${f.expect_order(2, 4, 6, 9)}
+ i++;
+ }
+ ${f.expect_order(10)}
+`
+ );
+ });
+
+g.test('while_nested_break')
+ .desc('Test that flow control exits a nested while-loop when reaching a break statement')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f =>
+ `
+ ${f.expect_order(0)}
+ var i = ${f.value(0)};
+ while (i < ${f.value(3)}) {
+ ${f.expect_order(1, 5, 11)}
+ i++;
+ var j = ${f.value(0)};
+ while (j < i) {
+ ${f.expect_order(2, 6, 8, 12)}
+ j++;
+ if ((i+j) & 2) == 0 {
+ ${f.expect_order(9, 13)}
+ break;
+ ${f.expect_not_reached()}
+ }
+ ${f.expect_order(3, 7)}
+ }
+ ${f.expect_order(4, 10, 14)}
+ }
+ ${f.expect_order(15)}
+`
+ );
+ });
+
+g.test('while_nested_continue')
+ .desc('Test flow control for a nested while-loop with a continue statement')
+ .params(u => u.combine('preventValueOptimizations', [true, false]))
+ .fn(t => {
+ runFlowControlTest(
+ t,
+ f =>
+ `
+ ${f.expect_order(0)}
+ var i = ${f.value(0)};
+ while (i < ${f.value(3)}) {
+ ${f.expect_order(1, 5, 11)}
+ i++;
+ var j = ${f.value(0)};
+ while (j < i) {
+ ${f.expect_order(2, 6, 8, 12, 14, 16)}
+ j++;
+ if ((i+j) & 2) == 0 {
+ ${f.expect_order(9, 13, 15)}
+ continue;
+ ${f.expect_not_reached()}
+ }
+ ${f.expect_order(3, 7, 17)}
+ }
+ ${f.expect_order(4, 10, 18)}
+ }
+ ${f.expect_order(19)}
+`
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/adjacent.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/adjacent.spec.ts
new file mode 100644
index 0000000000..332cca2931
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/adjacent.spec.ts
@@ -0,0 +1,272 @@
+export const description = `
+Tests writes from different invocations to adjacent scalars do not interfere.
+This is especially interesting when the scalar type is narrower than 32-bits.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+import { PRNG } from '../../../util/prng.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Algorithm: with N invocations, N is even:
+// srcBuffer: An array of random scalar values. Avoids unsupported values like infinity and NaN.
+// resultBuffer: A result array
+// pattern: 0|1|2|3
+// Pattern 0: Identity: invocation i: dst[i] = src[i]
+// Pattern 1: Try to prevent write coalescing.
+// Even elements stay in place.
+// Reverse order of odd elements.
+// invocation 2k: dst[2k] = src[2k]
+// invocation 2k+1: dst[2k+1] = src[N - (2k+1)]
+// Example: with N=6
+// dst[0] = src[0]
+// dst[1] = src[5]
+// dst[2] = src[2]
+// dst[3] = src[3]
+// dst[4] = src[4]
+// dst[5] = src[1]
+// Pattern 2: Try to prevent write coalescing.
+// Reverse order of even elements.
+// Odd elements stay in place.
+// invocation 2k: dst[2k] = src[N - 2 - 2k]
+// invocation 2k+1: dst[2k+1] = src[2k+1]
+// Example: with N=6
+// dst[0] = src[4]
+// dst[1] = src[1]
+// dst[2] = src[2]
+// dst[3] = src[3]
+// dst[4] = src[0]
+// dst[5] = src[5]
+// Pattern 3: Reverse elements: dst[i] = src[N-1-i]
+// addressSpace: workgroup|storage
+// Where dst is allocated.
+
+type AddressSpace = 'workgroup' | 'storage';
+type Pattern = 0 | 1 | 2 | 3;
+const kAddressSpaces = ['workgroup', 'storage'] as const;
+const kPatterns = [0, 1, 2, 3] as const;
+
+interface AdjacentWritesTest extends GPUTest {
+ params: {
+ pattern: Pattern;
+ addressSpace: AddressSpace;
+ };
+}
+
+// For simplicity, make the entire source (and destination) array fit
+// in workgroup memory.
+// We can count on up to 16384 bytes in workgroup memory.
+const kNumValues = 4096; // Assumed even
+const kWorkgroupSize = 128; // Use 1-dimensional workgroups.
+
+/**
+ * @returns an integer for the bit pattern of a random finite f16 value.
+ * Consumes values from `prng`.
+ *
+ * @param prng - a pseudo-random number generator.
+ */
+function randomFiniteF16(prng: PRNG): number {
+ const exponent_bits = 0x7c00;
+ // With any reasonable random number stream, the average number
+ // of trips around this loop is < 1 + 1/32 because there are 5
+ // exponent bits.
+ let candidate: number;
+ do {
+ candidate = prng.randomU32() & 0xffff;
+ // Non-finite f16 values have all 1 bits in the exponent.
+ } while ((candidate & exponent_bits) === exponent_bits);
+ return candidate;
+}
+
+/**
+ * Fills array `arr` with random finite f16 values.
+ * Consumes values from `prng`.
+ *
+ * @param prng - a pseudo-random number generator.
+ * @param arr - the array to fill. Assume it is already correctly sized.
+ */
+function fillWithRandomFiniteF16(prng: PRNG, arr: Uint16Array) {
+ for (let i = 0; i < arr.length; i++) {
+ arr[i] = randomFiniteF16(prng);
+ }
+}
+
+/**
+ * @returns the expression for the destination index, based on `pattern`.
+ *
+ * @param i the WGSL string for the source index
+ * @param pattern the indexing pattern
+ */
+function getDstIndexExpression(i: string, pattern: Pattern): string {
+ switch (pattern) {
+ case 0:
+ return `${i}`;
+ case 1:
+ // Even elements map to themselves.
+ // Odd elements map to the reversed order of odd elements.
+ return `select(${kNumValues} - ${i}, ${i}, (${i} & 1) == 0)`;
+ case 2:
+ // Even elements map to the reversed order of odd elements.
+ // Since N is even, element 0 should get index N-2. (!)
+ // Odd elements map to themselves.
+ return `select(${i}, ${kNumValues} - 2 - ${i}, (${i} & 1) == 0)`;
+ case 3:
+ return `${kNumValues} - 1 -${i}`;
+ }
+}
+
+/**
+ * Computes the reference (correct) result for the given source array and indexing pattern.
+ *
+ * @param pattern the indexing pattern
+ * @param src the source array
+ * @param dst the array to fill with values transferred from `src`
+ */
+function computeReference(pattern: Pattern, src: Uint16Array, dst: Uint16Array) {
+ for (let i = 0; i < src.length; i++) {
+ const isEven = (i & 1) === 0;
+ switch (pattern) {
+ case 0:
+ dst[i] = src[i];
+ break;
+ case 1:
+ if (isEven) {
+ dst[i] = src[i];
+ } else {
+ dst[src.length - i] = src[i];
+ }
+ break;
+ case 2:
+ if (isEven) {
+ dst[kNumValues - 2 - i] = src[i];
+ } else {
+ dst[i] = src[i];
+ }
+ break;
+ case 3:
+ dst[src.length - 1 - i] = src[i];
+ break;
+ }
+ }
+}
+
+/**
+ * @returns the source text for a shader that copies elements from a source
+ * buffer to a destination buffer, while remapping indices according to the
+ * specified pattern.
+ *
+ * @param p contains the address space and pattern
+ */
+function makeShaderText(p: { addressSpace: AddressSpace; pattern: Pattern }): string {
+ // When the destination buffer is in 'storage', then write directly to it.
+ // Otherwise, destination is in workgroup memory, and we need to name the
+ // output buffer differently.
+ const dstBuf = p.addressSpace === 'storage' ? 'dst' : 'dstBuf';
+
+ const parts: string[] = [];
+
+ parts.push(`
+ enable f16;
+ @group(0) @binding(0) var<storage> src: array<f16>;
+ @group(0) @binding(1) var<storage,read_write> ${dstBuf}: array<f16>;
+ `);
+
+ if (p.addressSpace === 'workgroup') {
+ parts.push(`var<workgroup> dst: array<f16,${kNumValues}>;`);
+ }
+
+ parts.push(`
+ @compute @workgroup_size(${kWorkgroupSize})
+ fn adjacent_writes(@builtin(global_invocation_id) gid: vec3u) {
+ let srcIndex = gid.x;
+ let dstIndex = ${getDstIndexExpression('srcIndex', p.pattern)};
+ dst[dstIndex] = src[srcIndex];
+ `);
+
+ if (p.addressSpace === 'workgroup') {
+ // Copy to the output buffer.
+ // The barrier is not necessary here, but it should prevent
+ // the compiler from being clever and optimizing away the
+ // intermediate write to workgroup memory.
+ parts.push(` workgroupBarrier();`);
+ parts.push(` ${dstBuf}[dstIndex] = dst[dstIndex];`);
+ }
+ parts.push('}');
+
+ return parts.join('\n');
+}
+
+/**
+ * Runs the test on the GPU, generating random source data and
+ * checking the results against the expected permutation of that data.
+ *
+ * @param t the AdjacentWritesTest specification.
+ */
+function runTest(t: AdjacentWritesTest) {
+ const seed = ((t.params.pattern as number) + 1) * (t.params.addressSpace as string).length;
+ const prng = new PRNG(seed);
+
+ const expected = new Uint16Array(kNumValues);
+
+ const bytesPerScalar = 2; // f16 is 2 bytes wide.
+ const bufByteSize = kNumValues * bytesPerScalar;
+ const hostSrcBuf = t.device.createBuffer({
+ size: bufByteSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE,
+ mappedAtCreation: true,
+ });
+ {
+ const hostSrcUint16 = new Uint16Array(hostSrcBuf.getMappedRange());
+ fillWithRandomFiniteF16(prng, hostSrcUint16);
+ computeReference(t.params.pattern, hostSrcUint16, expected);
+ hostSrcBuf.unmap();
+ }
+
+ const srcBuf = t.device.createBuffer({
+ size: bufByteSize,
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE,
+ });
+ const dstBuf = t.device.createBuffer({
+ size: bufByteSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
+ });
+
+ const shaderText = makeShaderText(t.params);
+ const shader = t.device.createShaderModule({ code: shaderText });
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: { module: shader, entryPoint: 'adjacent_writes' },
+ });
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ { binding: 0, resource: { buffer: srcBuf } },
+ { binding: 1, resource: { buffer: dstBuf } },
+ ],
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.copyBufferToBuffer(hostSrcBuf, 0, srcBuf, 0, bufByteSize);
+
+ const computeEncoder = encoder.beginComputePass();
+ computeEncoder.setPipeline(pipeline);
+ computeEncoder.setBindGroup(0, bindGroup);
+ computeEncoder.dispatchWorkgroups(kNumValues / kWorkgroupSize);
+ computeEncoder.end();
+
+ const commands = encoder.finish();
+ t.device.queue.submit([commands]);
+
+ t.expectGPUBufferValuesEqual(dstBuf, expected);
+}
+
+g.test('f16')
+ .desc(
+ `Check that writes by different invocations to adjacent f16 values in an array do not interfere with each other.`
+ )
+ .params(u => u.combine('addressSpace', kAddressSpaces).combine('pattern', kPatterns))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(t => runTest(t));
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/atomicity.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/atomicity.spec.ts
new file mode 100644
index 0000000000..371eee5f92
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/atomicity.spec.ts
@@ -0,0 +1,102 @@
+export const description = `Tests for the atomicity of atomic read-modify-write instructions.`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+import {
+ MemoryModelTestParams,
+ MemoryModelTester,
+ buildTestShader,
+ TestType,
+ buildResultShader,
+ ResultType,
+ MemoryType,
+} from './memory_model_setup.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// A reasonable parameter set, determined heuristically.
+const memoryModelTestParams: MemoryModelTestParams = {
+ workgroupSize: 256,
+ testingWorkgroups: 512,
+ maxWorkgroups: 1024,
+ shufflePct: 100,
+ barrierPct: 100,
+ memStressPct: 100,
+ memStressIterations: 1024,
+ memStressStoreFirstPct: 50,
+ memStressStoreSecondPct: 50,
+ preStressPct: 100,
+ preStressIterations: 1024,
+ preStressStoreFirstPct: 50,
+ preStressStoreSecondPct: 50,
+ scratchMemorySize: 2048,
+ stressLineSize: 64,
+ stressTargetLines: 2,
+ stressStrategyBalancePct: 50,
+ permuteFirst: 109,
+ permuteSecond: 419,
+ memStride: 4,
+ aliasedMemory: false,
+ numBehaviors: 4,
+};
+
+const storageMemoryTestCode = `
+ let r0 = atomicAdd(&test_locations.value[x_0], 0u);
+ atomicStore(&test_locations.value[x_1], 2u);
+ atomicStore(&results.value[id_0].r0, r0);
+`;
+
+const workgroupMemoryTestCode = `
+ let r0 = atomicAdd(&wg_test_locations[x_0], 0u);
+ atomicStore(&wg_test_locations[x_1], 2u);
+ workgroupBarrier();
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_0].r0, r0);
+ atomicStore(&test_locations.value[shuffled_workgroup * workgroupXSize * stress_params.mem_stride * 2u + x_1], atomicLoad(&wg_test_locations[x_1]));
+`;
+
+const resultCode = `
+ if ((r0 == 0u && mem_x_0 == 2u)) {
+ atomicAdd(&test_results.seq0, 1u);
+ } else if ((r0 == 2u && mem_x_0 == 1u)) {
+ atomicAdd(&test_results.seq1, 1u);
+ } else if ((r0 == 0u && mem_x_0 == 1u)) {
+ atomicAdd(&test_results.weak, 1u);
+ }
+`;
+
+g.test('atomicity')
+ .desc(
+ `Checks whether a store on one thread can interrupt an atomic RMW on a second thread. If the read returned by
+ the RMW instruction is the initial value of memory (0), but the final value in memory is 1, then the atomic write
+ in the second thread occurred in between the read and the write of the RMW.
+ `
+ )
+ .paramsSimple([
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.InterWorkgroup,
+ _testCode: storageMemoryTestCode,
+ },
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: storageMemoryTestCode,
+ },
+ {
+ memType: MemoryType.AtomicWorkgroupClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: workgroupMemoryTestCode,
+ },
+ ])
+ .fn(async t => {
+ const testShader = buildTestShader(t.params._testCode, t.params.memType, t.params.testType);
+ const resultShader = buildResultShader(resultCode, t.params.testType, ResultType.FourBehavior);
+ const memModelTester = new MemoryModelTester(
+ t,
+ memoryModelTestParams,
+ testShader,
+ resultShader
+ );
+ await memModelTester.run(10, 3);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/barrier.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/barrier.spec.ts
new file mode 100644
index 0000000000..478ae28a7a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/barrier.spec.ts
@@ -0,0 +1,250 @@
+export const description = `
+Tests for non-atomic memory synchronization within a workgroup in the presence of a WebGPU barrier`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+import {
+ MemoryModelTestParams,
+ MemoryModelTester,
+ kAccessValueTypes,
+ buildTestShader,
+ MemoryType,
+ TestType,
+ buildResultShader,
+ ResultType,
+} from './memory_model_setup.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// A reasonable parameter set, determined heuristically.
+const memoryModelTestParams: MemoryModelTestParams = {
+ workgroupSize: 256,
+ testingWorkgroups: 512,
+ maxWorkgroups: 1024,
+ shufflePct: 100,
+ barrierPct: 100,
+ memStressPct: 100,
+ memStressIterations: 1024,
+ memStressStoreFirstPct: 50,
+ memStressStoreSecondPct: 50,
+ preStressPct: 100,
+ preStressIterations: 1024,
+ preStressStoreFirstPct: 50,
+ preStressStoreSecondPct: 50,
+ scratchMemorySize: 2048,
+ stressLineSize: 64,
+ stressTargetLines: 2,
+ stressStrategyBalancePct: 50,
+ permuteFirst: 109,
+ permuteSecond: 419,
+ memStride: 4,
+ aliasedMemory: false,
+ numBehaviors: 2,
+};
+
+// The two kinds of non-atomic accesses tested.
+// rw: read -> barrier -> write
+// wr: write -> barrier -> read
+// ww: write -> barrier -> write
+type AccessPair = 'rw' | 'wr' | 'ww';
+
+// Test the non-atomic memory types.
+const kMemTypes = [MemoryType.NonAtomicStorageClass, MemoryType.NonAtomicWorkgroupClass] as const;
+
+const storageMemoryBarrierStoreLoadTestCode = `
+ test_locations.value[x_0] = 1;
+ workgroupBarrier();
+ let r0 = u32(test_locations.value[x_1]);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r0, r0);
+`;
+
+const workgroupMemoryBarrierStoreLoadTestCode = `
+ wg_test_locations[x_0] = 1;
+ workgroupBarrier();
+ let r0 = u32(wg_test_locations[x_1]);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r0, r0);
+`;
+
+const storageMemoryBarrierLoadStoreTestCode = `
+ let r0 = u32(test_locations.value[x_0]);
+ workgroupBarrier();
+ test_locations.value[x_1] = 1;
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_0].r0, r0);
+`;
+
+const workgroupMemoryBarrierLoadStoreTestCode = `
+ let r0 = u32(wg_test_locations[x_0]);
+ workgroupBarrier();
+ wg_test_locations[x_1] = 1;
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_0].r0, r0);
+`;
+
+const storageMemoryBarrierStoreStoreTestCode = `
+ test_locations.value[x_0] = 1;
+ storageBarrier();
+ test_locations.value[x_1] = 2;
+`;
+
+const workgroupMemoryBarrierStoreStoreTestCode = `
+ wg_test_locations[x_0] = 1;
+ workgroupBarrier();
+ wg_test_locations[x_1] = 2;
+ workgroupBarrier();
+ test_locations.value[shuffled_workgroup * workgroupXSize * stress_params.mem_stride * 2u + x_1] = wg_test_locations[x_1];
+`;
+
+function getTestCode(p: { memType: MemoryType; accessPair: AccessPair }): string {
+ switch (p.accessPair) {
+ case 'rw':
+ return p.memType === MemoryType.NonAtomicStorageClass
+ ? storageMemoryBarrierLoadStoreTestCode
+ : workgroupMemoryBarrierLoadStoreTestCode;
+ case 'wr':
+ return p.memType === MemoryType.NonAtomicStorageClass
+ ? storageMemoryBarrierStoreLoadTestCode
+ : workgroupMemoryBarrierStoreLoadTestCode;
+ case 'ww':
+ return p.memType === MemoryType.NonAtomicStorageClass
+ ? storageMemoryBarrierStoreStoreTestCode
+ : workgroupMemoryBarrierStoreStoreTestCode;
+ }
+}
+
+g.test('workgroup_barrier_store_load')
+ .desc(
+ `Checks whether the workgroup barrier properly synchronizes a non-atomic write and read on
+ separate threads in the same workgroup. Within a workgroup, the barrier should force an invocation
+ after the barrier to read a write from an invocation before the barrier.
+ `
+ )
+ .params(u =>
+ u
+ .combine('accessValueType', kAccessValueTypes)
+ .combine('memType', kMemTypes)
+ .combine('accessPair', ['wr'] as const)
+ )
+ .beforeAllSubcases(t => {
+ if (t.params.accessValueType === 'f16') {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(async t => {
+ const resultCode = `
+ if (r0 == 1u) {
+ atomicAdd(&test_results.seq, 1u);
+ } else if (r0 == 0u) {
+ atomicAdd(&test_results.weak, 1u);
+ }
+ `;
+ const testShader = buildTestShader(
+ getTestCode(t.params),
+ t.params.memType,
+ TestType.IntraWorkgroup
+ );
+ const resultShader = buildResultShader(
+ resultCode,
+ TestType.IntraWorkgroup,
+ ResultType.TwoBehavior
+ );
+ const memModelTester = new MemoryModelTester(
+ t,
+ memoryModelTestParams,
+ testShader,
+ resultShader,
+ t.params.accessValueType
+ );
+ await memModelTester.run(15, 1);
+ });
+
+g.test('workgroup_barrier_load_store')
+ .desc(
+ `Checks whether the workgroup barrier properly synchronizes a non-atomic write and read on
+ separate threads in the same workgroup. Within a workgroup, the barrier should force an invocation
+ before the barrier to not read the write from an invocation after the barrier.
+ `
+ )
+ .params(u =>
+ u
+ .combine('accessValueType', kAccessValueTypes)
+ .combine('memType', kMemTypes)
+ .combine('accessPair', ['rw'] as const)
+ )
+ .beforeAllSubcases(t => {
+ if (t.params.accessValueType === 'f16') {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(async t => {
+ const resultCode = `
+ if (r0 == 0u) {
+ atomicAdd(&test_results.seq, 1u);
+ } else if (r0 == 1u) {
+ atomicAdd(&test_results.weak, 1u);
+ }
+ `;
+ const testShader = buildTestShader(
+ getTestCode(t.params),
+ t.params.memType,
+ TestType.IntraWorkgroup
+ );
+ const resultShader = buildResultShader(
+ resultCode,
+ TestType.IntraWorkgroup,
+ ResultType.TwoBehavior
+ );
+ const memModelTester = new MemoryModelTester(
+ t,
+ memoryModelTestParams,
+ testShader,
+ resultShader,
+ t.params.accessValueType
+ );
+ await memModelTester.run(12, 1);
+ });
+
+g.test('workgroup_barrier_store_store')
+ .desc(
+ `Checks whether the workgroup barrier properly synchronizes non-atomic writes on
+ separate threads in the same workgroup. Within a workgroup, the barrier should force the value in memory
+ to be the result of the write after the barrier, not the write before.
+ `
+ )
+ .params(u =>
+ u
+ .combine('accessValueType', kAccessValueTypes)
+ .combine('memType', kMemTypes)
+ .combine('accessPair', ['ww'] as const)
+ )
+ .beforeAllSubcases(t => {
+ if (t.params.accessValueType === 'f16') {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(async t => {
+ const resultCode = `
+ if (mem_x_0 == 2u) {
+ atomicAdd(&test_results.seq, 1u);
+ } else if (mem_x_0 == 1u) {
+ atomicAdd(&test_results.weak, 1u);
+ }
+ `;
+ const testShader = buildTestShader(
+ getTestCode(t.params),
+ t.params.memType,
+ TestType.IntraWorkgroup
+ );
+ const resultShader = buildResultShader(
+ resultCode,
+ TestType.IntraWorkgroup,
+ ResultType.TwoBehavior
+ );
+ const memModelTester = new MemoryModelTester(
+ t,
+ memoryModelTestParams,
+ testShader,
+ resultShader,
+ t.params.accessValueType
+ );
+ await memModelTester.run(10, 1);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/coherence.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/coherence.spec.ts
new file mode 100644
index 0000000000..742db51169
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/coherence.spec.ts
@@ -0,0 +1,525 @@
+export const description = `
+Tests that all threads see a sequentially consistent view of the order of memory
+accesses to a single memory location. Uses a parallel testing strategy along with stressing
+threads to increase coverage of possible bugs.`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+import {
+ MemoryModelTestParams,
+ MemoryModelTester,
+ buildTestShader,
+ MemoryType,
+ TestType,
+ buildResultShader,
+ ResultType,
+} from './memory_model_setup.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// A reasonable parameter set, determined heuristically.
+const memoryModelTestParams: MemoryModelTestParams = {
+ workgroupSize: 256,
+ testingWorkgroups: 39,
+ maxWorkgroups: 952,
+ shufflePct: 0,
+ barrierPct: 0,
+ memStressPct: 0,
+ memStressIterations: 1024,
+ memStressStoreFirstPct: 50,
+ memStressStoreSecondPct: 50,
+ preStressPct: 0,
+ preStressIterations: 1024,
+ preStressStoreFirstPct: 50,
+ preStressStoreSecondPct: 50,
+ scratchMemorySize: 2048,
+ stressLineSize: 64,
+ stressTargetLines: 2,
+ stressStrategyBalancePct: 50,
+ permuteFirst: 109,
+ permuteSecond: 1,
+ memStride: 1,
+ aliasedMemory: true,
+ numBehaviors: 4,
+};
+
+const storageMemoryCorrTestCode = `
+ atomicStore(&test_locations.value[x_0], 1u);
+ let r0 = atomicLoad(&test_locations.value[x_1]);
+ let r1 = atomicLoad(&test_locations.value[y_1]);
+ atomicStore(&results.value[id_1].r0, r0);
+ atomicStore(&results.value[id_1].r1, r1);
+`;
+
+const workgroupStorageMemoryCorrTestCode = `
+ atomicStore(&test_locations.value[x_0], 1u);
+ let r0 = atomicLoad(&test_locations.value[x_1]);
+ let r1 = atomicLoad(&test_locations.value[y_1]);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r0, r0);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r1, r1);
+`;
+
+const storageMemoryCorrRMWTestCode = `
+ atomicExchange(&test_locations.value[x_0], 1u);
+ let r0 = atomicLoad(&test_locations.value[x_1]);
+ let r1 = atomicAdd(&test_locations.value[y_1], 0u);
+ atomicStore(&results.value[id_1].r0, r0);
+ atomicStore(&results.value[id_1].r1, r1);
+`;
+
+const workgroupStorageMemoryCorrRMWTestCode = `
+ atomicExchange(&test_locations.value[x_0], 1u);
+ let r0 = atomicLoad(&test_locations.value[x_1]);
+ let r1 = atomicAdd(&test_locations.value[y_1], 0u);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r0, r0);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r1, r1);
+`;
+
+const workgroupMemoryCorrTestCode = `
+ atomicStore(&wg_test_locations[x_0], 1u);
+ let r0 = atomicLoad(&wg_test_locations[x_1]);
+ let r1 = atomicLoad(&wg_test_locations[y_1]);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r0, r0);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r1, r1);
+`;
+
+const workgroupMemoryCorrRMWTestCode = `
+ atomicExchange(&wg_test_locations[x_0], 1u);
+ let r0 = atomicLoad(&wg_test_locations[x_1]);
+ let r1 = atomicAdd(&wg_test_locations[y_1], 0u);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r0, r0);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r1, r1);
+`;
+
+g.test('corr')
+ .desc(
+ `Ensures two reads on one thread cannot observe an inconsistent view of a write on a second thread.
+ The first thread writes the value 1 some location x, and the second thread reads x twice in a row.
+ If the first read returns 1 but the second read returns 0, then there has been a coherence violation.
+ `
+ )
+ .paramsSimple([
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.InterWorkgroup,
+ _testCode: storageMemoryCorrTestCode,
+ },
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.InterWorkgroup,
+ _testCode: storageMemoryCorrRMWTestCode,
+ extraFlags: 'rmw_variant',
+ },
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: workgroupStorageMemoryCorrTestCode,
+ },
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: workgroupStorageMemoryCorrRMWTestCode,
+ extraFlags: 'rmw_variant',
+ },
+ {
+ memType: MemoryType.AtomicWorkgroupClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: workgroupMemoryCorrTestCode,
+ },
+ {
+ memType: MemoryType.AtomicWorkgroupClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: workgroupMemoryCorrRMWTestCode,
+ extraFlags: 'rmw_variant',
+ },
+ ])
+ .fn(async t => {
+ const resultCode = `
+ if ((r0 == 0u && r1 == 0u)) {
+ atomicAdd(&test_results.seq0, 1u);
+ } else if ((r0 == 1u && r1 == 1u)) {
+ atomicAdd(&test_results.seq1, 1u);
+ } else if ((r0 == 0u && r1 == 1u)) {
+ atomicAdd(&test_results.interleaved, 1u);
+ } else if ((r0 == 1u && r1 == 0u)) {
+ atomicAdd(&test_results.weak, 1u);
+ }
+ `;
+ const testShader = buildTestShader(t.params._testCode, t.params.memType, t.params.testType);
+ const resultShader = buildResultShader(resultCode, t.params.testType, ResultType.FourBehavior);
+ const memModelTester = new MemoryModelTester(
+ t,
+ memoryModelTestParams,
+ testShader,
+ resultShader
+ );
+ await memModelTester.run(60, 3);
+ });
+
+const storageMemoryCowwTestCode = `
+ atomicStore(&test_locations.value[x_0], 1u);
+ atomicStore(&test_locations.value[y_0], 2u);
+`;
+
+const storageMemoryCowwRMWTestCode = `
+ atomicExchange(&test_locations.value[x_0], 1u);
+ atomicStore(&test_locations.value[y_0], 2u);
+`;
+
+const workgroupMemoryCowwTestCode = `
+ atomicStore(&wg_test_locations[x_0], 1u);
+ atomicStore(&wg_test_locations[y_0], 2u);
+ workgroupBarrier();
+ atomicStore(&test_locations.value[shuffled_workgroup * workgroupXSize * stress_params.mem_stride * 2u + x_0], atomicLoad(&wg_test_locations[x_0]));
+`;
+
+const workgroupMemoryCowwRMWTestCode = `
+ atomicExchange(&wg_test_locations[x_0], 1u);
+ atomicStore(&wg_test_locations[y_0], 2u);
+ workgroupBarrier();
+ atomicStore(&test_locations.value[shuffled_workgroup * workgroupXSize * stress_params.mem_stride * 2u + x_0], atomicLoad(&wg_test_locations[x_0]));
+`;
+
+g.test('coww')
+ .desc(
+ `Ensures two writes on one thread do not lead to incoherent results. The thread first writes 1 to
+ some location x and then writes 2 to the same location. If the value in memory after the test finishes
+ is 1, then there has been a coherence violation.
+ `
+ )
+ .paramsSimple([
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.InterWorkgroup,
+ _testCode: storageMemoryCowwTestCode,
+ },
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.InterWorkgroup,
+ _testCode: storageMemoryCowwRMWTestCode,
+ extraFlags: 'rmw_variant',
+ },
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: storageMemoryCowwTestCode,
+ },
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: storageMemoryCowwRMWTestCode,
+ extraFlags: 'rmw_variant',
+ },
+ {
+ memType: MemoryType.AtomicWorkgroupClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: workgroupMemoryCowwTestCode,
+ },
+ {
+ memType: MemoryType.AtomicWorkgroupClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: workgroupMemoryCowwRMWTestCode,
+ extraFlags: 'rmw_variant',
+ },
+ ])
+ .fn(async t => {
+ const resultCode = `
+ if (mem_x_0 == 2u) {
+ atomicAdd(&test_results.seq, 1u);
+ } else if (mem_x_0 == 1u) {
+ atomicAdd(&test_results.weak, 1u);
+ }
+ `;
+ const testShader = buildTestShader(t.params._testCode, t.params.memType, t.params.testType);
+ const resultShader = buildResultShader(resultCode, t.params.testType, ResultType.TwoBehavior);
+ const params = {
+ ...memoryModelTestParams,
+ numBehaviors: 2,
+ };
+ const memModelTester = new MemoryModelTester(t, params, testShader, resultShader);
+ await memModelTester.run(60, 1);
+ });
+
+const storageMemoryCowrTestCode = `
+ atomicStore(&test_locations.value[x_0], 1u);
+ let r0 = atomicLoad(&test_locations.value[y_0]);
+ atomicStore(&test_locations.value[x_1], 2u);
+ atomicStore(&results.value[id_0].r0, r0);
+`;
+
+const workgroupStorageMemoryCowrTestCode = `
+ atomicStore(&test_locations.value[x_0], 1u);
+ let r0 = atomicLoad(&test_locations.value[y_0]);
+ atomicStore(&test_locations.value[x_1], 2u);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_0].r0, r0);
+`;
+
+const storageMemoryCowrRMWTestCode = `
+ atomicExchange(&test_locations.value[x_0], 1u);
+ let r0 = atomicAdd(&test_locations.value[y_0], 0u);
+ atomicExchange(&test_locations.value[x_1], 2u);
+ atomicStore(&results.value[id_0].r0, r0);
+`;
+
+const workgroupStorageMemoryCowrRMWTestCode = `
+ atomicExchange(&test_locations.value[x_0], 1u);
+ let r0 = atomicAdd(&test_locations.value[y_0], 0u);
+ atomicExchange(&test_locations.value[x_1], 2u);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_0].r0, r0);
+`;
+
+const workgroupMemoryCowrTestCode = `
+ atomicStore(&wg_test_locations[x_0], 1u);
+ let r0 = atomicLoad(&wg_test_locations[y_0]);
+ atomicStore(&wg_test_locations[x_1], 2u);
+ workgroupBarrier();
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_0].r0, r0);
+ atomicStore(&test_locations.value[shuffled_workgroup * workgroupXSize * stress_params.mem_stride * 2u + x_1], atomicLoad(&wg_test_locations[x_1]));
+`;
+
+const workgroupMemoryCowrRMWTestCode = `
+ atomicExchange(&wg_test_locations[x_0], 1u);
+ let r0 = atomicAdd(&wg_test_locations[y_0], 0u);
+ atomicExchange(&wg_test_locations[x_1], 2u);
+ workgroupBarrier();
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_0].r0, r0);
+ atomicStore(&test_locations.value[shuffled_workgroup * workgroupXSize * stress_params.mem_stride * 2u + x_1], atomicLoad(&wg_test_locations[x_1]));
+`;
+
+g.test('cowr')
+ .desc(
+ `The first thread first writes 1 to some location x and then reads x. The second thread writes 2 to x.
+ If the first thread reads the value 2 and the value in memory at the end of the test is 1, then the read
+ and write on the first thread have been reordered, a coherence violation.
+ `
+ )
+ .paramsSimple([
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.InterWorkgroup,
+ _testCode: storageMemoryCowrTestCode,
+ },
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.InterWorkgroup,
+ _testCode: storageMemoryCowrRMWTestCode,
+ extraFlags: 'rmw_variant',
+ },
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: workgroupStorageMemoryCowrTestCode,
+ },
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: workgroupStorageMemoryCowrRMWTestCode,
+ extraFlags: 'rmw_variant',
+ },
+ {
+ memType: MemoryType.AtomicWorkgroupClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: workgroupMemoryCowrTestCode,
+ },
+ {
+ memType: MemoryType.AtomicWorkgroupClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: workgroupMemoryCowrRMWTestCode,
+ extraFlags: 'rmw_variant',
+ },
+ ])
+ .fn(async t => {
+ const resultCode = `
+ if ((r0 == 1u && mem_x_0 == 2u)) {
+ atomicAdd(&test_results.seq0, 1u);
+ } else if ((r0 == 1u && mem_x_0 == 1u)) {
+ atomicAdd(&test_results.seq1, 1u);
+ } else if ((r0 == 2u && mem_x_0 == 2u)) {
+ atomicAdd(&test_results.interleaved, 1u);
+ } else if ((r0 == 2u && mem_x_0 == 1u)) {
+ atomicAdd(&test_results.weak, 1u);
+ }
+ `;
+ const testShader = buildTestShader(t.params._testCode, t.params.memType, t.params.testType);
+ const resultShader = buildResultShader(resultCode, t.params.testType, ResultType.FourBehavior);
+ const memModelTester = new MemoryModelTester(
+ t,
+ memoryModelTestParams,
+ testShader,
+ resultShader
+ );
+ await memModelTester.run(60, 3);
+ });
+
+const storageMemoryCorw1TestCode = `
+ let r0 = atomicLoad(&test_locations.value[x_0]);
+ atomicStore(&test_locations.value[x_0], 1u);
+ workgroupBarrier();
+ atomicStore(&results.value[id_0].r0, r0);
+`;
+
+const workgroupStorageMemoryCorw1TestCode = `
+ let r0 = atomicLoad(&test_locations.value[x_0]);
+ atomicStore(&test_locations.value[y_0], 1u);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_0].r0, r0);
+`;
+
+const workgroupMemoryCorw1TestCode = `
+ let r0 = atomicLoad(&wg_test_locations[x_0]);
+ atomicStore(&wg_test_locations[y_0], 1u);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_0].r0, r0);
+`;
+
+g.test('corw1')
+ .desc(
+ `One thread first reads from a memory location x and then writes 1 to x. If the read observes the subsequent
+ write, there has been a coherence violation.
+ `
+ )
+ .paramsSimple([
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.InterWorkgroup,
+ _testCode: storageMemoryCorw1TestCode,
+ },
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: workgroupStorageMemoryCorw1TestCode,
+ },
+ {
+ memType: MemoryType.AtomicWorkgroupClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: workgroupMemoryCorw1TestCode,
+ },
+ ])
+ .fn(async t => {
+ const resultCode = `
+ if (r0 == 0u) {
+ atomicAdd(&test_results.seq, 1u);
+ } else if (r0 == 1u) {
+ atomicAdd(&test_results.weak, 1u);
+ }
+ `;
+ const testShader = buildTestShader(t.params._testCode, t.params.memType, t.params.testType);
+ const resultShader = buildResultShader(resultCode, t.params.testType, ResultType.TwoBehavior);
+ const params = {
+ ...memoryModelTestParams,
+ numBehaviors: 2,
+ };
+ const memModelTester = new MemoryModelTester(t, params, testShader, resultShader);
+ await memModelTester.run(60, 1);
+ });
+
+const storageMemoryCorw2TestCode = `
+ let r0 = atomicLoad(&test_locations.value[x_0]);
+ atomicStore(&test_locations.value[y_0], 1u);
+ atomicStore(&test_locations.value[x_1], 2u);
+ atomicStore(&results.value[id_0].r0, r0);
+`;
+
+const workgroupStorageMemoryCorw2TestCode = `
+ let r0 = atomicLoad(&test_locations.value[x_0]);
+ atomicStore(&test_locations.value[y_0], 1u);
+ atomicStore(&test_locations.value[x_1], 2u);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_0].r0, r0);
+`;
+
+const storageMemoryCorw2RMWTestCode = `
+ let r0 = atomicLoad(&test_locations.value[x_0]);
+ atomicStore(&test_locations.value[y_0], 1u);
+ atomicExchange(&test_locations.value[x_1], 2u);
+ atomicStore(&results.value[id_0].r0, r0);
+`;
+
+const workgroupStorageMemoryCorw2RMWTestCode = `
+ let r0 = atomicLoad(&test_locations.value[x_0]);
+ atomicStore(&test_locations.value[y_0], 1u);
+ atomicExchange(&test_locations.value[x_1], 2u);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_0].r0, r0);
+`;
+
+const workgroupMemoryCorw2TestCode = `
+ let r0 = atomicLoad(&wg_test_locations[x_0]);
+ atomicStore(&wg_test_locations[y_0], 1u);
+ atomicStore(&wg_test_locations[x_1], 2u);
+ workgroupBarrier();
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_0].r0, r0);
+ atomicStore(&test_locations.value[shuffled_workgroup * workgroupXSize * stress_params.mem_stride * 2u + x_1], atomicLoad(&wg_test_locations[x_1]));
+`;
+
+const workgroupMemoryCorw2RMWTestCode = `
+ let r0 = atomicLoad(&wg_test_locations[x_0]);
+ atomicStore(&wg_test_locations[y_0], 1u);
+ atomicExchange(&wg_test_locations[x_1], 2u);
+ workgroupBarrier();
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_0].r0, r0);
+ atomicStore(&test_locations.value[shuffled_workgroup * workgroupXSize * stress_params.mem_stride * 2u + x_1], atomicLoad(&wg_test_locations[x_1]));
+`;
+
+g.test('corw2')
+ .desc(
+ `The first thread reads from some memory location x, and then writes 1 to x. The second thread
+ writes 2 to x. If the first thread reads the value 2, but the value in memory after the test
+ completes is 1, then the instructions on the first thread have been re-ordered, leading to a
+ coherence violation.
+ `
+ )
+ .paramsSimple([
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.InterWorkgroup,
+ _testCode: storageMemoryCorw2TestCode,
+ },
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.InterWorkgroup,
+ _testCode: storageMemoryCorw2RMWTestCode,
+ extraFlags: 'rmw_variant',
+ },
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: workgroupStorageMemoryCorw2TestCode,
+ },
+ {
+ memType: MemoryType.AtomicStorageClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: workgroupStorageMemoryCorw2RMWTestCode,
+ extraFlags: 'rmw_variant',
+ },
+ {
+ memType: MemoryType.AtomicWorkgroupClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: workgroupMemoryCorw2TestCode,
+ },
+ {
+ memType: MemoryType.AtomicWorkgroupClass,
+ testType: TestType.IntraWorkgroup,
+ _testCode: workgroupMemoryCorw2RMWTestCode,
+ extraFlags: 'rmw_variant',
+ },
+ ])
+ .fn(async t => {
+ const resultCode = `
+ if ((r0 == 0u && mem_x_0 == 2u)) {
+ atomicAdd(&test_results.seq0, 1u);
+ } else if ((r0 == 2u && mem_x_0 == 1u)) {
+ atomicAdd(&test_results.seq1, 1u);
+ } else if ((r0 == 0u && mem_x_0 == 1u)) {
+ atomicAdd(&test_results.interleaved, 1u);
+ } else if ((r0 == 2u && mem_x_0 == 2u)) {
+ atomicAdd(&test_results.weak, 1u);
+ }
+ `;
+ const testShader = buildTestShader(t.params._testCode, t.params.memType, t.params.testType);
+ const resultShader = buildResultShader(resultCode, t.params.testType, ResultType.FourBehavior);
+ const memModelTester = new MemoryModelTester(
+ t,
+ memoryModelTestParams,
+ testShader,
+ resultShader
+ );
+ await memModelTester.run(60, 3);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/memory_model_setup.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/memory_model_setup.ts
new file mode 100644
index 0000000000..f8e5b9034c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/memory_model_setup.ts
@@ -0,0 +1,1118 @@
+import { GPUTest } from '../../../gpu_test';
+import { checkElementsPassPredicate } from '../../../util/check_contents.js';
+
+/* All buffer sizes are counted in units of 4-byte words. */
+
+/**
+ * The value type loaded and stored from memory.
+ * This is what the WGSL spec calls 'store type' for the locations being accessed.
+ * The GPU buffers are sized assuming this type is at most 4 bytes.
+ *
+ * 'u32' is the default case; it can be atomically loaded and stored.
+ * 'f16' is interesting because it is not 32-bits, and can't be the store type
+ * for atomic accesses.
+ */
+export type AccessValueType = 'f16' | 'u32';
+export const kAccessValueTypes = ['f16', 'u32'] as const;
+
+/* Parameter values are set heuristically, typically by a time-intensive search. */
+export type MemoryModelTestParams = {
+ /* Number of invocations per workgroup. The workgroups are 1-dimensional. */
+ workgroupSize: number;
+ /** The number of workgroups to assign to running the test. */
+ testingWorkgroups: number;
+ /**
+ * Run no more than this many workgroups. Must be >= the number of testing workgroups. Non-testing workgroups are used
+ * to stress other memory locations.
+ */
+ maxWorkgroups: number;
+ /** The percentage of iterations to shuffle the workgroup ids. */
+ shufflePct: number;
+ /** The percentage of iterations to run the bounded spin-loop barrier. */
+ barrierPct: number;
+ /** The percentage of iterations to run memory stress using non-testing workgroups. */
+ memStressPct: number;
+ /** The number of iterations to run the memory stress pattern. */
+ memStressIterations: number;
+ /** The percentage of iterations the first instruction in the stress pattern should be a store. */
+ memStressStoreFirstPct: number;
+ /** The percentage of iterations the second instruction in the stress pattern should be a store. */
+ memStressStoreSecondPct: number;
+ /** The percentage of iterations for testing threads to run stress before running the test. */
+ preStressPct: number;
+ /** Same as for memStressIterations. */
+ preStressIterations: number;
+ /** The percentage of iterations the first instruction in the pre-stress pattern should be a store. */
+ preStressStoreFirstPct: number;
+ /** The percentage of iterations the second instruction in the pre-stress pattern should be a store. */
+ preStressStoreSecondPct: number;
+ /** The size of the scratch memory region, used for stressing threads. */
+ scratchMemorySize: number;
+ /** The size of each block of memory stressing threads access. */
+ stressLineSize: number;
+ /** The number of blocks of memory to assign stressing threads to. */
+ stressTargetLines: number;
+ /** How non-testing threads are assigned to stressing locations. 100 means all iterations use a round robin approach, 0 means all use a chunking approach. */
+ stressStrategyBalancePct: number;
+ /** Used to permute thread ids within a workgroup, so more random pairings are created between threads coordinating on a test. */
+ permuteFirst: number;
+ /** Used to create distance between memory locations used in a test. Set this to 1 for memory that should be aliased. */
+ permuteSecond: number;
+ /** The distance (in number of 4 byte intervals) between any two memory locations used for testing. */
+ memStride: number;
+ /** For tests that access one memory location, but use dynamic addresses to avoid compiler optimization, aliased memory should be set to true. */
+ aliasedMemory: boolean;
+ /** The number of possible behaviors that a test can have. */
+ numBehaviors: number;
+};
+
+/** The number of memory locations accessed by a test. Currently, only tests with up to 2 memory locations are supported. */
+const numMemLocations = 2;
+
+/** The number of read outputs per test that need to be analyzed in the result aggregation shader. Currently, only tests with up to 2 read outputs are supported. */
+const numReadOutputs = 2;
+
+/** Represents a device buffer and a utility buffer for resetting memory and copying parameters. */
+type BufferWithSource = {
+ /** Buffer used by shader code. */
+ deviceBuf: GPUBuffer;
+ /** Buffer populated from the host size, data is copied to device buffer for use by shader. */
+ srcBuf: GPUBuffer;
+ /** Size in bytes of the buffer. */
+ size: number;
+};
+
+/** Specifies the buffers used during a memory model test. */
+type MemoryModelBuffers = {
+ /** This is the memory region that testing threads read from and write to. */
+ testLocations: BufferWithSource;
+ /** This buffer collects the results of reads for analysis in the result aggregation shader. */
+ readResults: BufferWithSource;
+ /** This buffer is the aggregated results of every testing thread, and is used to check for test success/failure. */
+ testResults: BufferWithSource;
+ /** This buffer stores the shuffled workgroup ids for use during testing. Read-only in the shader. */
+ shuffledWorkgroups: BufferWithSource;
+ /** This is the bounded spin-loop barrier, used to temporally align testing threads. */
+ barrier: BufferWithSource;
+ /** Memory region for stressing threads to read to and write from. */
+ scratchpad: BufferWithSource;
+ /** The memory locations in the scratch region that stressing threads access. */
+ scratchMemoryLocations: BufferWithSource;
+ /** Parameters that are used by the shader to calculate memory locations and perform stress. */
+ stressParams: BufferWithSource;
+};
+
+/** The number of stress params to add to the stress params buffer. */
+const numStressParams = 12;
+const barrierParamIndex = 0;
+const memStressIndex = 1;
+const memStressIterationsIndex = 2;
+const memStressPatternIndex = 3;
+const preStressIndex = 4;
+const preStressIterationsIndex = 5;
+const preStressPatternIndex = 6;
+const permuteFirstIndex = 7;
+const permuteSecondIndex = 8;
+const testingWorkgroupsIndex = 9;
+const memStrideIndex = 10;
+const memLocationOffsetIndex = 11;
+
+/**
+ * All memory used in these consists of a four byte word, so this value is used to correctly set the byte size of buffers that
+ * are read to/written from during tests and for storing test results.
+ */
+const bytesPerWord = 4;
+
+/**
+ * Returns the shader preamble based on the access value type:
+ * - enable directives, if necessary
+ * - the type alias for AccessValueType
+ */
+function shaderPreamble(accessValueType: AccessValueType): string {
+ if (accessValueType === 'f16') {
+ return 'enable f16;\nalias AccessValueTy = f16;\n';
+ }
+ return `alias AccessValueTy = ${accessValueType};\n`;
+}
+
+/**
+ * Implements setup code necessary to run a memory model test. A test consists of two parts:
+ * 1.) A test shader that runs a specified memory model litmus test and attempts to reveal a weak (disallowed) behavior.
+ * At a high level, a test shader consists of a set of testing workgroups where every invocation executes the litmus test
+ * on a set of test locations, and a set of stressing workgroups where every invocation accesses a specified memory location
+ * in a random pattern.
+ *
+ * The main buffer variables are:
+ *
+ * `test_locations`: invocations access entries in this array, trying to
+ * evoke weak behaviours.
+ *
+ * This is array<AccessValueTy> or array<atomic<u32>>.
+ * AccessValueTy is either f16 or u32.
+ * Note that atomic<u32> is only used when AccessValueTy is u32.
+ *
+ * `results`: holds the observed values, which is where we can see
+ * whether a weak behaviour was observed.
+ *
+ * This is an array<atomic<u32>>.
+ *
+ * The others are used to parameterize and stress the main activity.
+ *
+ * 2.) A result shader that takes the output of the test shader, which consists of the memory locations accessed during the test
+ * and the results of any reads made during the test, and aggregate the results based on the possible behaviors of the test.
+ *
+ * The first two buffer variables are the same buffers as for the test shader:
+ *
+ * `test_locations` is the same as `test_locations` from the test shader,
+ * but is mapped as array<AccessValueTy>.
+ *
+ * `read_results` is the same buffer as `results` from the test shader.
+ *
+ * The other variables are used to accumulate a summary that counts the weak behaviours stimulated and recorded by the
+ * test shader.
+ */
+export class MemoryModelTester {
+ protected test: GPUTest;
+ protected params: MemoryModelTestParams;
+ protected buffers: MemoryModelBuffers;
+ protected testPipeline: GPUComputePipeline;
+ protected testBindGroup: GPUBindGroup;
+ protected resultPipeline: GPUComputePipeline;
+ protected resultBindGroup: GPUBindGroup;
+
+ /** Sets up a memory model test by initializing buffers and pipeline layouts. */
+ constructor(
+ t: GPUTest,
+ params: MemoryModelTestParams,
+ testShader: string,
+ resultShader: string,
+ accessValueType: AccessValueType = 'u32'
+ ) {
+ this.test = t;
+ this.params = params;
+
+ testShader = shaderPreamble(accessValueType) + testShader;
+ resultShader = shaderPreamble(accessValueType) + resultShader;
+
+ // set up buffers
+ const testingThreads = this.params.workgroupSize * this.params.testingWorkgroups;
+ const testLocationsSize =
+ testingThreads * numMemLocations * this.params.memStride * bytesPerWord;
+ const testLocationsBuffer: BufferWithSource = {
+ deviceBuf: this.test.device.createBuffer({
+ size: testLocationsSize,
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE,
+ }),
+ srcBuf: this.test.device.createBuffer({
+ size: testLocationsSize,
+ usage: GPUBufferUsage.COPY_SRC,
+ }),
+ size: testLocationsSize,
+ };
+
+ const readResultsSize = testingThreads * numReadOutputs * bytesPerWord;
+ const readResultsBuffer: BufferWithSource = {
+ deviceBuf: this.test.device.createBuffer({
+ size: readResultsSize,
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE,
+ }),
+ srcBuf: this.test.device.createBuffer({
+ size: readResultsSize,
+ usage: GPUBufferUsage.COPY_SRC,
+ }),
+ size: readResultsSize,
+ };
+
+ const testResultsSize = this.params.numBehaviors * bytesPerWord;
+ const testResultsBuffer: BufferWithSource = {
+ deviceBuf: this.test.device.createBuffer({
+ size: testResultsSize,
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ }),
+ srcBuf: this.test.device.createBuffer({
+ size: testResultsSize,
+ usage: GPUBufferUsage.COPY_SRC,
+ }),
+ size: testResultsSize,
+ };
+
+ const shuffledWorkgroupsSize = this.params.maxWorkgroups * bytesPerWord;
+ const shuffledWorkgroupsBuffer: BufferWithSource = {
+ deviceBuf: this.test.device.createBuffer({
+ size: shuffledWorkgroupsSize,
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE,
+ }),
+ srcBuf: this.test.device.createBuffer({
+ size: shuffledWorkgroupsSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE,
+ }),
+ size: shuffledWorkgroupsSize,
+ };
+
+ const barrierSize = bytesPerWord;
+ const barrierBuffer: BufferWithSource = {
+ deviceBuf: this.test.device.createBuffer({
+ size: barrierSize,
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE,
+ }),
+ srcBuf: this.test.device.createBuffer({
+ size: barrierSize,
+ usage: GPUBufferUsage.COPY_SRC,
+ }),
+ size: barrierSize,
+ };
+
+ const scratchpadSize = this.params.scratchMemorySize * bytesPerWord;
+ const scratchpadBuffer: BufferWithSource = {
+ deviceBuf: this.test.device.createBuffer({
+ size: scratchpadSize,
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE,
+ }),
+ srcBuf: this.test.device.createBuffer({
+ size: scratchpadSize,
+ usage: GPUBufferUsage.COPY_SRC,
+ }),
+ size: scratchpadSize,
+ };
+
+ const scratchMemoryLocationsSize = this.params.maxWorkgroups * bytesPerWord;
+ const scratchMemoryLocationsBuffer: BufferWithSource = {
+ deviceBuf: this.test.device.createBuffer({
+ size: scratchMemoryLocationsSize,
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE,
+ }),
+ srcBuf: this.test.device.createBuffer({
+ size: scratchMemoryLocationsSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE,
+ }),
+ size: scratchMemoryLocationsSize,
+ };
+
+ const stressParamsSize = numStressParams * bytesPerWord;
+ const stressParamsBuffer: BufferWithSource = {
+ deviceBuf: this.test.device.createBuffer({
+ size: stressParamsSize,
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.UNIFORM,
+ }),
+ srcBuf: this.test.device.createBuffer({
+ size: stressParamsSize,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.MAP_WRITE,
+ }),
+ size: stressParamsSize,
+ };
+
+ this.buffers = {
+ testLocations: testLocationsBuffer,
+ readResults: readResultsBuffer,
+ testResults: testResultsBuffer,
+ shuffledWorkgroups: shuffledWorkgroupsBuffer,
+ barrier: barrierBuffer,
+ scratchpad: scratchpadBuffer,
+ scratchMemoryLocations: scratchMemoryLocationsBuffer,
+ stressParams: stressParamsBuffer,
+ };
+
+ // set up pipeline layouts
+ const testLayout = this.test.device.createBindGroupLayout({
+ entries: [
+ { binding: 0, visibility: GPUShaderStage.COMPUTE, buffer: { type: 'storage' } },
+ { binding: 1, visibility: GPUShaderStage.COMPUTE, buffer: { type: 'storage' } },
+ { binding: 2, visibility: GPUShaderStage.COMPUTE, buffer: { type: 'read-only-storage' } },
+ { binding: 3, visibility: GPUShaderStage.COMPUTE, buffer: { type: 'storage' } },
+ { binding: 4, visibility: GPUShaderStage.COMPUTE, buffer: { type: 'storage' } },
+ { binding: 5, visibility: GPUShaderStage.COMPUTE, buffer: { type: 'storage' } },
+ { binding: 6, visibility: GPUShaderStage.COMPUTE, buffer: { type: 'uniform' } },
+ ],
+ });
+ this.testPipeline = this.test.device.createComputePipeline({
+ layout: this.test.device.createPipelineLayout({
+ bindGroupLayouts: [testLayout],
+ }),
+ compute: {
+ module: this.test.device.createShaderModule({
+ code: testShader,
+ }),
+ entryPoint: 'main',
+ },
+ });
+ this.testBindGroup = this.test.device.createBindGroup({
+ entries: [
+ { binding: 0, resource: { buffer: this.buffers.testLocations.deviceBuf } },
+ { binding: 1, resource: { buffer: this.buffers.readResults.deviceBuf } },
+ { binding: 2, resource: { buffer: this.buffers.shuffledWorkgroups.deviceBuf } },
+ { binding: 3, resource: { buffer: this.buffers.barrier.deviceBuf } },
+ { binding: 4, resource: { buffer: this.buffers.scratchpad.deviceBuf } },
+ { binding: 5, resource: { buffer: this.buffers.scratchMemoryLocations.deviceBuf } },
+ { binding: 6, resource: { buffer: this.buffers.stressParams.deviceBuf } },
+ ],
+ layout: testLayout,
+ });
+
+ const resultLayout = this.test.device.createBindGroupLayout({
+ entries: [
+ { binding: 0, visibility: GPUShaderStage.COMPUTE, buffer: { type: 'storage' } },
+ { binding: 1, visibility: GPUShaderStage.COMPUTE, buffer: { type: 'storage' } },
+ { binding: 2, visibility: GPUShaderStage.COMPUTE, buffer: { type: 'storage' } },
+ { binding: 3, visibility: GPUShaderStage.COMPUTE, buffer: { type: 'uniform' } },
+ ],
+ });
+ this.resultPipeline = this.test.device.createComputePipeline({
+ layout: this.test.device.createPipelineLayout({
+ bindGroupLayouts: [resultLayout],
+ }),
+ compute: {
+ module: this.test.device.createShaderModule({
+ code: resultShader,
+ }),
+ entryPoint: 'main',
+ },
+ });
+ this.resultBindGroup = this.test.device.createBindGroup({
+ entries: [
+ { binding: 0, resource: { buffer: this.buffers.testLocations.deviceBuf } },
+ { binding: 1, resource: { buffer: this.buffers.readResults.deviceBuf } },
+ { binding: 2, resource: { buffer: this.buffers.testResults.deviceBuf } },
+ { binding: 3, resource: { buffer: this.buffers.stressParams.deviceBuf } },
+ ],
+ layout: resultLayout,
+ });
+ }
+
+ /**
+ * Run the test for the specified number of iterations. Checks the testResults buffer on the weakIndex; if
+ * this value is not 0 then the test has failed. The number of iterations is chosen per test so that the
+ * full set of tests meets some time budget while still being reasonably effective at uncovering issues.
+ * Currently, we aim for each test to complete in under one second.
+ */
+ async run(iterations: number, weakIndex: number): Promise<void> {
+ for (let i = 0; i < iterations; i++) {
+ const numWorkgroups = this.getRandomInRange(
+ this.params.testingWorkgroups,
+ this.params.maxWorkgroups
+ );
+ await this.setShuffledWorkgroups(numWorkgroups);
+ await this.setScratchLocations(numWorkgroups);
+ await this.setStressParams();
+ const encoder = this.test.device.createCommandEncoder();
+ this.copyBufferToBuffer(encoder, this.buffers.testLocations);
+ this.copyBufferToBuffer(encoder, this.buffers.readResults);
+ this.copyBufferToBuffer(encoder, this.buffers.testResults);
+ this.copyBufferToBuffer(encoder, this.buffers.barrier);
+ this.copyBufferToBuffer(encoder, this.buffers.shuffledWorkgroups);
+ this.copyBufferToBuffer(encoder, this.buffers.scratchpad);
+ this.copyBufferToBuffer(encoder, this.buffers.scratchMemoryLocations);
+ this.copyBufferToBuffer(encoder, this.buffers.stressParams);
+
+ const testPass = encoder.beginComputePass();
+ testPass.setPipeline(this.testPipeline);
+ testPass.setBindGroup(0, this.testBindGroup);
+ testPass.dispatchWorkgroups(numWorkgroups);
+ testPass.end();
+
+ const resultPass = encoder.beginComputePass();
+ resultPass.setPipeline(this.resultPipeline);
+ resultPass.setBindGroup(0, this.resultBindGroup);
+ resultPass.dispatchWorkgroups(this.params.testingWorkgroups);
+ resultPass.end();
+
+ this.test.device.queue.submit([encoder.finish()]);
+ this.test.expectGPUBufferValuesPassCheck(
+ this.buffers.testResults.deviceBuf,
+ this.checkWeakIndex(weakIndex),
+ {
+ type: Uint32Array,
+ typedLength: this.params.numBehaviors,
+ }
+ );
+ }
+ }
+
+ /** Returns a function that checks whether the test passes, given a weak index and the test results buffer. */
+ protected checkWeakIndex(weakIndex: number): (a: Uint32Array) => Error | undefined {
+ const checkResult = this.checkResult(weakIndex);
+ const resultPrinter = this.resultPrinter(weakIndex);
+ return function (a: Uint32Array): Error | undefined {
+ return checkElementsPassPredicate(a, checkResult, {
+ predicatePrinter: [{ leftHeader: 'expected ==', getValueForCell: resultPrinter }],
+ });
+ };
+ }
+
+ /**
+ * Returns a function that checks whether the specified weak index's value is not equal to 0.
+ * If the weak index's value is not 0, it means the test has observed a behavior disallowed by the memory model and
+ * is considered a test failure.
+ */
+ protected checkResult(weakIndex: number): (i: number, v: number) => boolean {
+ return function (i: number, v: number): boolean {
+ if (i === weakIndex && v > 0) {
+ return false;
+ }
+ return true;
+ };
+ }
+
+ /** Returns a printer function that visualizes the results of checking the test results. */
+ protected resultPrinter(weakIndex: number): (i: number) => string | number {
+ return function (i: number): string | number {
+ if (i === weakIndex) {
+ return 0;
+ } else {
+ return 'any value';
+ }
+ };
+ }
+
+ /** Utility method that simplifies copying source buffers to device buffers. */
+ protected copyBufferToBuffer(encoder: GPUCommandEncoder, buffer: BufferWithSource): void {
+ encoder.copyBufferToBuffer(buffer.srcBuf, 0, buffer.deviceBuf, 0, buffer.size);
+ }
+
+ /** Returns a random integer between 0 and the max. */
+ protected getRandomInt(max: number): number {
+ return Math.floor(Math.random() * max);
+ }
+
+ /** Returns a random number in between the min and max values. */
+ protected getRandomInRange(min: number, max: number): number {
+ if (min === max) {
+ return min;
+ } else {
+ const offset = this.getRandomInt(max - min);
+ return min + offset;
+ }
+ }
+
+ /** Returns a permuted array using a simple Fisher-Yates shuffle algorithm. */
+ protected shuffleArray(a: number[]): void {
+ for (let i = a.length - 1; i >= 0; i--) {
+ const toSwap = this.getRandomInt(i + 1);
+ const temp = a[toSwap];
+ a[toSwap] = a[i];
+ a[i] = temp;
+ }
+ }
+
+ /**
+ * Shuffles the order of workgroup ids, so that threads operating on the same memory location are not always in
+ * consecutive workgroups.
+ */
+ protected async setShuffledWorkgroups(numWorkgroups: number): Promise<void> {
+ await this.buffers.shuffledWorkgroups.srcBuf.mapAsync(GPUMapMode.WRITE);
+ const shuffledWorkgroupsBuffer = this.buffers.shuffledWorkgroups.srcBuf.getMappedRange();
+ const shuffledWorkgroupsArray = new Uint32Array(shuffledWorkgroupsBuffer);
+ for (let i = 0; i < numWorkgroups; i++) {
+ shuffledWorkgroupsArray[i] = i;
+ }
+ if (this.getRandomInt(100) < this.params.shufflePct) {
+ for (let i = numWorkgroups - 1; i > 0; i--) {
+ const x = this.getRandomInt(i + 1);
+ const temp = shuffledWorkgroupsArray[i];
+ shuffledWorkgroupsArray[i] = shuffledWorkgroupsArray[x];
+ shuffledWorkgroupsArray[x] = temp;
+ }
+ }
+ this.buffers.shuffledWorkgroups.srcBuf.unmap();
+ }
+
+ /** Sets the memory locations that stressing workgroups will access. Uses either a chunking or round robin assignment strategy. */
+ protected async setScratchLocations(numWorkgroups: number): Promise<void> {
+ await this.buffers.scratchMemoryLocations.srcBuf.mapAsync(GPUMapMode.WRITE);
+ const scratchLocationsArrayBuffer = this.buffers.scratchMemoryLocations.srcBuf.getMappedRange();
+ const scratchLocationsArray = new Uint32Array(scratchLocationsArrayBuffer);
+ const scratchNumRegions = this.params.scratchMemorySize / this.params.stressLineSize;
+ const scratchRegions = [...Array(scratchNumRegions).keys()];
+ this.shuffleArray(scratchRegions);
+ for (let i = 0; i < this.params.stressTargetLines; i++) {
+ const region = scratchRegions[i];
+ const locInRegion = this.getRandomInt(this.params.stressLineSize);
+ if (this.getRandomInt(100) < this.params.stressStrategyBalancePct) {
+ // In the round-robin case, the current scratch location is striped across all workgroups.
+ for (let j = i; j < numWorkgroups; j += this.params.stressTargetLines) {
+ scratchLocationsArray[j] = region * this.params.stressLineSize + locInRegion;
+ }
+ } else {
+ // In the chunking case, the current scratch location is assigned to a block of workgroups. The final scratch
+ // location may be assigned to more workgroups, if the number of scratch locations does not cleanly divide the
+ // number of workgroups.
+ const workgroupsPerLocation = numWorkgroups / this.params.stressTargetLines;
+ for (let j = 0; j < workgroupsPerLocation; j++) {
+ scratchLocationsArray[i * workgroupsPerLocation + j] =
+ region * this.params.stressLineSize + locInRegion;
+ }
+ if (
+ i === this.params.stressTargetLines - 1 &&
+ numWorkgroups % this.params.stressTargetLines !== 0
+ ) {
+ for (let j = 0; j < numWorkgroups % this.params.stressTargetLines; j++) {
+ scratchLocationsArray[numWorkgroups - j - 1] =
+ region * this.params.stressLineSize + locInRegion;
+ }
+ }
+ }
+ }
+ this.buffers.scratchMemoryLocations.srcBuf.unmap();
+ }
+
+ /** Sets the parameters that are used by the shader to calculate memory locations and perform stress. */
+ protected async setStressParams(): Promise<void> {
+ await this.buffers.stressParams.srcBuf.mapAsync(GPUMapMode.WRITE);
+ const stressParamsArrayBuffer = this.buffers.stressParams.srcBuf.getMappedRange();
+ const stressParamsArray = new Uint32Array(stressParamsArrayBuffer);
+ if (this.getRandomInt(100) < this.params.barrierPct) {
+ stressParamsArray[barrierParamIndex] = 1;
+ } else {
+ stressParamsArray[barrierParamIndex] = 0;
+ }
+ if (this.getRandomInt(100) < this.params.memStressPct) {
+ stressParamsArray[memStressIndex] = 1;
+ } else {
+ stressParamsArray[memStressIndex] = 0;
+ }
+ stressParamsArray[memStressIterationsIndex] = this.params.memStressIterations;
+ const memStressStoreFirst = this.getRandomInt(100) < this.params.memStressStoreFirstPct;
+ const memStressStoreSecond = this.getRandomInt(100) < this.params.memStressStoreSecondPct;
+ let memStressPattern;
+ if (memStressStoreFirst && memStressStoreSecond) {
+ memStressPattern = 0;
+ } else if (memStressStoreFirst && !memStressStoreSecond) {
+ memStressPattern = 1;
+ } else if (!memStressStoreFirst && memStressStoreSecond) {
+ memStressPattern = 2;
+ } else {
+ memStressPattern = 3;
+ }
+ stressParamsArray[memStressPatternIndex] = memStressPattern;
+ if (this.getRandomInt(100) < this.params.preStressPct) {
+ stressParamsArray[preStressIndex] = 1;
+ } else {
+ stressParamsArray[preStressIndex] = 0;
+ }
+ stressParamsArray[preStressIterationsIndex] = this.params.preStressIterations;
+ const preStressStoreFirst = this.getRandomInt(100) < this.params.preStressStoreFirstPct;
+ const preStressStoreSecond = this.getRandomInt(100) < this.params.preStressStoreSecondPct;
+ let preStressPattern;
+ if (preStressStoreFirst && preStressStoreSecond) {
+ preStressPattern = 0;
+ } else if (preStressStoreFirst && !preStressStoreSecond) {
+ preStressPattern = 1;
+ } else if (!preStressStoreFirst && preStressStoreSecond) {
+ preStressPattern = 2;
+ } else {
+ preStressPattern = 3;
+ }
+ stressParamsArray[preStressPatternIndex] = preStressPattern;
+ stressParamsArray[permuteFirstIndex] = this.params.permuteFirst;
+ stressParamsArray[permuteSecondIndex] = this.params.permuteSecond;
+ stressParamsArray[testingWorkgroupsIndex] = this.params.testingWorkgroups;
+ stressParamsArray[memStrideIndex] = this.params.memStride;
+ if (this.params.aliasedMemory) {
+ stressParamsArray[memLocationOffsetIndex] = 0;
+ } else {
+ stressParamsArray[memLocationOffsetIndex] = this.params.memStride;
+ }
+ this.buffers.stressParams.srcBuf.unmap();
+ }
+}
+
+/** Defines common data structures used in memory model test shaders. */
+const shaderMemStructures = `
+ struct Memory {
+ value: array<AccessValueTy>
+ };
+
+ struct AtomicMemory {
+ value: array<atomic<u32>>
+ };
+
+ struct IndexMemory {
+ value: array<u32>
+ };
+
+ struct ReadResult {
+ r0: atomic<u32>,
+ r1: atomic<u32>,
+ };
+
+ struct ReadResults {
+ value: array<ReadResult>
+ };
+
+ struct StressParamsMemory {
+ do_barrier: u32,
+ mem_stress: u32,
+ mem_stress_iterations: u32,
+ mem_stress_pattern: u32,
+ pre_stress: u32,
+ pre_stress_iterations: u32,
+ pre_stress_pattern: u32,
+ permute_first: u32,
+ permute_second: u32,
+ testing_workgroups: u32,
+ mem_stride: u32,
+ location_offset: u32,
+ };
+`;
+
+/**
+ * Structure to hold the counts of occurrences of the possible behaviors of a two-thread, four-instruction test.
+ * "seq0" means the first invocation's instructions are observed to have occurred before the second invocation's instructions.
+ * "seq1" means the second invocation's instructions are observed to have occurred before the first invocation's instructions.
+ * "interleaved" means there was an observation of some interleaving of instructions between the two invocations.
+ * "weak" means there was an observation of some ordering of instructions that is inconsistent with the WebGPU memory model.
+ */
+const fourBehaviorTestResultStructure = `
+ struct TestResults {
+ seq0: atomic<u32>,
+ seq1: atomic<u32>,
+ interleaved: atomic<u32>,
+ weak: atomic<u32>,
+ };
+`;
+
+/**
+ * Defines the possible behaviors of a two instruction test. Used to test the behavior of non-atomic memory with barriers and
+ * one-thread coherence tests.
+ * "seq" means that the expected, sequential behavior occurred.
+ * "weak" means that an unexpected, inconsistent behavior occurred.
+ */
+const twoBehaviorTestResultStructure = `
+ struct TestResults {
+ seq: atomic<u32>,
+ weak: atomic<u32>,
+ };
+`;
+
+/** Common bindings used in the test shader phase of a test. */
+const commonTestShaderBindings = `
+ @group(0) @binding(1) var<storage, read_write> results : ReadResults;
+ @group(0) @binding(2) var<storage, read> shuffled_workgroups : IndexMemory;
+ @group(0) @binding(3) var<storage, read_write> barrier : AtomicMemory;
+ @group(0) @binding(4) var<storage, read_write> scratchpad : IndexMemory;
+ @group(0) @binding(5) var<storage, read_write> scratch_locations : IndexMemory;
+ @group(0) @binding(6) var<uniform> stress_params : StressParamsMemory;
+`;
+
+/** The combined bindings for a test on atomic memory. */
+const atomicTestShaderBindings = [
+ `
+ @group(0) @binding(0) var<storage, read_write> test_locations : AtomicMemory;
+`,
+ commonTestShaderBindings,
+].join('\n');
+
+/** The combined bindings for a test on non-atomic memory. */
+const nonAtomicTestShaderBindings = [
+ `
+ @group(0) @binding(0) var<storage, read_write> test_locations : Memory;
+`,
+ commonTestShaderBindings,
+].join('\n');
+
+/** Bindings used in the result aggregation phase of the test. */
+const resultShaderBindings = `
+ @group(0) @binding(0) var<storage, read_write> test_locations : Memory;
+ @group(0) @binding(1) var<storage, read_write> read_results : ReadResults;
+ @group(0) @binding(2) var<storage, read_write> test_results : TestResults;
+ @group(0) @binding(3) var<uniform> stress_params : StressParamsMemory;
+`;
+
+/**
+ * For tests that operate on workgroup memory, include this definition. 3584 memory locations is
+ * large enough to accommodate the maximum memory size needed per workgroup for testing, which is
+ * 256 invocations per workgroup x 2 memory locations x 7 (memStride, or max stride between successive memory locations).
+ * Should change to a pipeline overridable constant when possible.
+ */
+const atomicWorkgroupMemory = `
+ var<workgroup> wg_test_locations: array<atomic<u32>, 3584>;
+`;
+
+/**
+ * For tests that operate on non-atomic workgroup memory, include this definition. 3584 memory locations
+ * is large enough to accommodate the maximum memory size needed per workgroup for testing.
+ */
+const nonAtomicWorkgroupMemory = `
+ var<workgroup> wg_test_locations: array<AccessValueTy, 3584>;
+`;
+
+/**
+ * Functions used to calculate memory locations for each invocation, for both testing and result aggregation.
+ * The permute function ensures a random permutation based on multiplying and modding by coprime numbers. The stripe
+ * workgroup function ensures that invocations coordinating on a test are spread out across different workgroups.
+ */
+const memoryLocationFunctions = `
+ fn permute_id(id: u32, factor: u32, mask: u32) -> u32 {
+ return (id * factor) % mask;
+ }
+
+ fn stripe_workgroup(workgroup_id: u32, local_id: u32) -> u32 {
+ return (workgroup_id + 1u + local_id % (stress_params.testing_workgroups - 1u)) % stress_params.testing_workgroups;
+ }
+`;
+
+/** Functions that help add stress to the test. */
+const testShaderFunctions = `
+ //Force the invocations in the workgroup to wait for each other, but without the general memory ordering
+ // effects of a control barrier. The barrier spins until either all invocations have incremented the atomic
+ // variable or 1024 loops have occurred. 1024 was chosen because it gives more time for invocations to enter
+ // the barrier but does not overly reduce testing throughput.
+ fn spin(limit: u32) {
+ var i : u32 = 0u;
+ var bar_val : u32 = atomicAdd(&barrier.value[0], 1u);
+ loop {
+ if (i == 1024u || bar_val >= limit) {
+ break;
+ }
+ bar_val = atomicAdd(&barrier.value[0], 0u);
+ i = i + 1u;
+ }
+ }
+
+ // Perform iterations of stress, depending on the specified pattern. Pattern 0 is store-store, pattern 1 is store-load,
+ // pattern 2 is load-store, and pattern 3 is load-load. The extra if condition (if tmpX > 100000u), is used to avoid
+ // the compiler optimizing out unused loads, where 100,000 is larger than the maximum number of stress iterations used
+ // in any test.
+ fn do_stress(iterations: u32, pattern: u32, workgroup_id: u32) {
+ let addr = scratch_locations.value[workgroup_id];
+ switch(pattern) {
+ case 0u: {
+ for(var i: u32 = 0u; i < iterations; i = i + 1u) {
+ scratchpad.value[addr] = i;
+ scratchpad.value[addr] = i + 1u;
+ }
+ }
+ case 1u: {
+ for(var i: u32 = 0u; i < iterations; i = i + 1u) {
+ scratchpad.value[addr] = i;
+ let tmp1: u32 = scratchpad.value[addr];
+ if (tmp1 > 100000u) {
+ scratchpad.value[addr] = i;
+ break;
+ }
+ }
+ }
+ case 2u: {
+ for(var i: u32 = 0u; i < iterations; i = i + 1u) {
+ let tmp1: u32 = scratchpad.value[addr];
+ if (tmp1 > 100000u) {
+ scratchpad.value[addr] = i;
+ break;
+ }
+ scratchpad.value[addr] = i;
+ }
+ }
+ case 3u: {
+ for(var i: u32 = 0u; i < iterations; i = i + 1u) {
+ let tmp1: u32 = scratchpad.value[addr];
+ if (tmp1 > 100000u) {
+ scratchpad.value[addr] = i;
+ break;
+ }
+ let tmp2: u32 = scratchpad.value[addr];
+ if (tmp2 > 100000u) {
+ scratchpad.value[addr] = i;
+ break;
+ }
+ }
+ }
+ default: {
+ }
+ }
+ }
+`;
+
+/**
+ * Entry point to both test and result shaders. One-dimensional workgroup size is hardcoded to 256, until
+ * pipeline overridable constants are supported.
+ */
+const shaderEntryPoint = `
+ // Change to pipeline overridable constant when possible.
+ const workgroupXSize = 256u;
+ @compute @workgroup_size(workgroupXSize) fn main(
+ @builtin(local_invocation_id) local_invocation_id : vec3<u32>,
+ @builtin(workgroup_id) workgroup_id : vec3<u32>) {
+`;
+
+/** All test shaders first calculate the shuffled workgroup. */
+const testShaderCommonHeader = `
+ let shuffled_workgroup = shuffled_workgroups.value[workgroup_id[0]];
+ if (shuffled_workgroup < stress_params.testing_workgroups) {
+`;
+
+/**
+ * All test shaders must calculate addresses for memory locations used in the test. Not all these addresses are
+ * used in every test, but no test uses more than these addresses.
+ */
+const testShaderCommonCalculations = `
+ let x_0 = id_0 * stress_params.mem_stride * 2u;
+ let y_0 = permute_id(id_0, stress_params.permute_second, total_ids) * stress_params.mem_stride * 2u + stress_params.location_offset;
+ let x_1 = id_1 * stress_params.mem_stride * 2u;
+ let y_1 = permute_id(id_1, stress_params.permute_second, total_ids) * stress_params.mem_stride * 2u + stress_params.location_offset;
+ if (stress_params.pre_stress == 1u) {
+ do_stress(stress_params.pre_stress_iterations, stress_params.pre_stress_pattern, shuffled_workgroup);
+ }
+`;
+
+/**
+ * An inter-workgroup test calculates two sets of memory locations that are guaranteed to be in separate workgroups.
+ * If the bounded spin-loop barrier is called, it attempts to wait for all invocations in all workgroups.
+ */
+const interWorkgroupTestShaderCode = [
+ `
+ let total_ids = workgroupXSize * stress_params.testing_workgroups;
+ let id_0 = shuffled_workgroup * workgroupXSize + local_invocation_id[0];
+ let new_workgroup = stripe_workgroup(shuffled_workgroup, local_invocation_id[0]);
+ let id_1 = new_workgroup * workgroupXSize + permute_id(local_invocation_id[0], stress_params.permute_first, workgroupXSize);
+`,
+ testShaderCommonCalculations,
+ `
+ if (stress_params.do_barrier == 1u) {
+ spin(workgroupXSize * stress_params.testing_workgroups);
+ }
+`,
+].join('\n');
+
+/**
+ * An intra-workgroup test calculates two set of memory locations that are guaranteed to be in the same workgroup.
+ * If the bounded spin-loop barrier is called, it attempts to wait for all invocations in the same workgroup.
+ */
+const intraWorkgroupTestShaderCode = [
+ `
+ let total_ids = workgroupXSize;
+ let id_0 = local_invocation_id[0];
+ let id_1 = permute_id(local_invocation_id[0], stress_params.permute_first, workgroupXSize);
+`,
+ testShaderCommonCalculations,
+ `
+ if (stress_params.do_barrier == 1u) {
+ spin(workgroupXSize);
+ }
+`,
+].join('\n');
+
+/**
+ * Tests that operate on storage memory and communicate with invocations in the same workgroup must offset their locations
+ * relative to global memory.
+ */
+const storageIntraWorkgroupTestShaderCode = `
+ let total_ids = workgroupXSize;
+ let id_0 = local_invocation_id[0];
+ let id_1 = permute_id(local_invocation_id[0], stress_params.permute_first, workgroupXSize);
+ let x_0 = (shuffled_workgroup * workgroupXSize + id_0) * stress_params.mem_stride * 2u;
+ let y_0 = (shuffled_workgroup * workgroupXSize + permute_id(id_0, stress_params.permute_second, total_ids)) * stress_params.mem_stride * 2u + stress_params.location_offset;
+ let x_1 = (shuffled_workgroup * workgroupXSize + id_1) * stress_params.mem_stride * 2u;
+ let y_1 = (shuffled_workgroup * workgroupXSize + permute_id(id_1, stress_params.permute_second, total_ids)) * stress_params.mem_stride * 2u + stress_params.location_offset;
+ if (stress_params.pre_stress == 1u) {
+ do_stress(stress_params.pre_stress_iterations, stress_params.pre_stress_pattern, shuffled_workgroup);
+ }
+ if (stress_params.do_barrier == 1u) {
+ spin(workgroupXSize);
+ }
+`;
+
+/** All test shaders may perform stress with non-testing threads. */
+const testShaderCommonFooter = `
+ } else if (stress_params.mem_stress == 1u) {
+ do_stress(stress_params.mem_stress_iterations, stress_params.mem_stress_pattern, shuffled_workgroup);
+ }
+ }
+`;
+
+/**
+ * All result shaders must calculate memory locations used in the test. Not all these locations are
+ * used in every result shader, but no result shader uses more than these locations.
+ *
+ * Each value read from test_locations is converted from AccessValueTy to u32
+ * before storing it in the read result. This assumes u32(AccessValueTy)
+ * is either an identity function u32(u32) or a value-converting overload such
+ * as u32(f16).
+ */
+const resultShaderCommonCalculations = `
+ let id_0 = workgroup_id[0] * workgroupXSize + local_invocation_id[0];
+ let x_0 = id_0 * stress_params.mem_stride * 2u;
+ let mem_x_0 = u32(test_locations.value[x_0]);
+ let r0 = atomicLoad(&read_results.value[id_0].r0);
+ let r1 = atomicLoad(&read_results.value[id_0].r1);
+`;
+
+/** Common result shader code for an inter-workgroup test. */
+const interWorkgroupResultShaderCode = [
+ resultShaderCommonCalculations,
+ `
+ let total_ids = workgroupXSize * stress_params.testing_workgroups;
+ let y_0 = permute_id(id_0, stress_params.permute_second, total_ids) * stress_params.mem_stride * 2u + stress_params.location_offset;
+ let mem_y_0 = u32(test_locations.value[y_0]);
+`,
+].join('\n');
+
+/** Common result shader code for an intra-workgroup test. */
+const intraWorkgroupResultShaderCode = [
+ resultShaderCommonCalculations,
+ `
+ let total_ids = workgroupXSize;
+ let y_0 = (workgroup_id[0] * workgroupXSize + permute_id(local_invocation_id[0], stress_params.permute_second, total_ids)) * stress_params.mem_stride * 2u + stress_params.location_offset;
+ let mem_y_0 = u32(test_locations.value[y_0]);
+`,
+].join('\n');
+
+/** Ending bracket for result shaders. */
+const resultShaderCommonFooter = `
+}
+`;
+
+/** The common shader code for test shaders that perform atomic storage class memory litmus tests. */
+const storageMemoryAtomicTestShaderCode = [
+ shaderMemStructures,
+ atomicTestShaderBindings,
+ memoryLocationFunctions,
+ testShaderFunctions,
+ shaderEntryPoint,
+ testShaderCommonHeader,
+].join('\n');
+
+/** The common shader code for test shaders that perform non-atomic storage class memory litmus tests. */
+const storageMemoryNonAtomicTestShaderCode = [
+ shaderMemStructures,
+ nonAtomicTestShaderBindings,
+ memoryLocationFunctions,
+ testShaderFunctions,
+ shaderEntryPoint,
+ testShaderCommonHeader,
+].join('\n');
+
+/** The common shader code for test shaders that perform atomic workgroup class memory litmus tests. */
+const workgroupMemoryAtomicTestShaderCode = [
+ shaderMemStructures,
+ atomicTestShaderBindings,
+ atomicWorkgroupMemory,
+ memoryLocationFunctions,
+ testShaderFunctions,
+ shaderEntryPoint,
+ testShaderCommonHeader,
+].join('\n');
+
+/** The common shader code for test shaders that perform non-atomic workgroup class memory litmus tests. */
+const workgroupMemoryNonAtomicTestShaderCode = [
+ shaderMemStructures,
+ nonAtomicTestShaderBindings,
+ nonAtomicWorkgroupMemory,
+ memoryLocationFunctions,
+ testShaderFunctions,
+ shaderEntryPoint,
+ testShaderCommonHeader,
+].join('\n');
+
+/** The common shader code for all result shaders. */
+const resultShaderCommonCode = [
+ shaderMemStructures,
+ resultShaderBindings,
+ memoryLocationFunctions,
+ shaderEntryPoint,
+].join('\n');
+
+/**
+ * Defines the types of possible memory a test is operating on. Used as part of the process of building shader code from
+ * its composite parts.
+ */
+export enum MemoryType {
+ /** Atomic memory in the storage address space. */
+ AtomicStorageClass = 'atomic_storage',
+ /** Non-atomic memory in the storage address space. */
+ NonAtomicStorageClass = 'non_atomic_storage',
+ /** Atomic memory in the workgroup address space. */
+ AtomicWorkgroupClass = 'atomic_workgroup',
+ /** Non-atomic memory in the workgroup address space. */
+ NonAtomicWorkgroupClass = 'non_atomic_workgroup',
+}
+
+/**
+ * Defines the relative positions of two invocations coordinating on a test. Used as part of the process of building shader
+ * code from its composite parts.
+ */
+export enum TestType {
+ /** A test consists of two invocations in different workgroups. */
+ InterWorkgroup = 'inter_workgroup',
+ /** A test consists of two invocations in the same workgroup. */
+ IntraWorkgroup = 'intra_workgroup',
+}
+
+/** Defines the number of behaviors a test may have. */
+export enum ResultType {
+ TwoBehavior,
+ FourBehavior,
+}
+
+/**
+ * Given test code that performs the actual sequence of loads and stores, as well as a memory type and test type, returns
+ * a complete test shader.
+ */
+export function buildTestShader(
+ testCode: string,
+ memoryType: MemoryType,
+ testType: TestType
+): string {
+ let memoryTypeCode;
+ let isStorageAS = false;
+ switch (memoryType) {
+ case MemoryType.AtomicStorageClass:
+ memoryTypeCode = storageMemoryAtomicTestShaderCode;
+ isStorageAS = true;
+ break;
+ case MemoryType.NonAtomicStorageClass:
+ memoryTypeCode = storageMemoryNonAtomicTestShaderCode;
+ isStorageAS = true;
+ break;
+ case MemoryType.AtomicWorkgroupClass:
+ memoryTypeCode = workgroupMemoryAtomicTestShaderCode;
+ break;
+ case MemoryType.NonAtomicWorkgroupClass:
+ memoryTypeCode = workgroupMemoryNonAtomicTestShaderCode;
+ }
+ let testTypeCode;
+ switch (testType) {
+ case TestType.InterWorkgroup:
+ testTypeCode = interWorkgroupTestShaderCode;
+ break;
+ case TestType.IntraWorkgroup:
+ if (isStorageAS) {
+ testTypeCode = storageIntraWorkgroupTestShaderCode;
+ } else {
+ testTypeCode = intraWorkgroupTestShaderCode;
+ }
+ }
+ return [memoryTypeCode, testTypeCode, testCode, testShaderCommonFooter].join('\n');
+}
+
+/**
+ * Given result code that aggregates the possible behaviors of a test across all instances, as well as a test type and
+ * number of behaviors, returns a complete result shader.
+ */
+export function buildResultShader(
+ resultCode: string,
+ testType: TestType,
+ resultType: ResultType
+): string {
+ let resultStructure;
+ switch (resultType) {
+ case ResultType.TwoBehavior:
+ resultStructure = twoBehaviorTestResultStructure;
+ break;
+ case ResultType.FourBehavior:
+ resultStructure = fourBehaviorTestResultStructure;
+ }
+ let testTypeCode;
+ switch (testType) {
+ case TestType.InterWorkgroup:
+ testTypeCode = interWorkgroupResultShaderCode;
+ break;
+ case TestType.IntraWorkgroup:
+ testTypeCode = intraWorkgroupResultShaderCode;
+ }
+ return [
+ resultStructure,
+ resultShaderCommonCode,
+ testTypeCode,
+ resultCode,
+ resultShaderCommonFooter,
+ ].join('\n');
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/weak.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/weak.spec.ts
new file mode 100644
index 0000000000..68f86a7d00
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/memory_model/weak.spec.ts
@@ -0,0 +1,429 @@
+export const description = `
+Tests for properties of the WebGPU memory model involving two memory locations.
+Specifically, the acquire/release ordering provided by WebGPU's barriers can be used to disallow
+weak behaviors in several classic memory model litmus tests.`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+import {
+ MemoryModelTestParams,
+ MemoryModelTester,
+ buildTestShader,
+ MemoryType,
+ TestType,
+ buildResultShader,
+ ResultType,
+} from './memory_model_setup.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// A reasonable parameter set, determined heuristically.
+const memoryModelTestParams: MemoryModelTestParams = {
+ workgroupSize: 256,
+ testingWorkgroups: 739,
+ maxWorkgroups: 885,
+ shufflePct: 0,
+ barrierPct: 0,
+ memStressPct: 0,
+ memStressIterations: 1024,
+ memStressStoreFirstPct: 50,
+ memStressStoreSecondPct: 50,
+ preStressPct: 100,
+ preStressIterations: 33,
+ preStressStoreFirstPct: 0,
+ preStressStoreSecondPct: 100,
+ scratchMemorySize: 1408,
+ stressLineSize: 4,
+ stressTargetLines: 11,
+ stressStrategyBalancePct: 0,
+ permuteFirst: 109,
+ permuteSecond: 419,
+ memStride: 2,
+ aliasedMemory: false,
+ numBehaviors: 4,
+};
+
+const workgroupMemoryMessagePassingTestCode = `
+ atomicStore(&wg_test_locations[x_0], 1u);
+ workgroupBarrier();
+ atomicStore(&wg_test_locations[y_0], 1u);
+ let r0 = atomicLoad(&wg_test_locations[y_1]);
+ workgroupBarrier();
+ let r1 = atomicLoad(&wg_test_locations[x_1]);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r0, r0);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r1, r1);
+`;
+
+const storageMemoryMessagePassingTestCode = `
+ atomicStore(&test_locations.value[x_0], 1u);
+ storageBarrier();
+ atomicStore(&test_locations.value[y_0], 1u);
+ let r0 = atomicLoad(&test_locations.value[y_1]);
+ storageBarrier();
+ let r1 = atomicLoad(&test_locations.value[x_1]);
+ atomicStore(&results.value[shuffled_workgroup * u32(workgroupXSize) + id_1].r0, r0);
+ atomicStore(&results.value[shuffled_workgroup * u32(workgroupXSize) + id_1].r1, r1);
+`;
+
+g.test('message_passing')
+ .desc(
+ `Checks whether two reads on one thread can observe two writes in another thread in a way
+ that is inconsistent with sequential consistency. In the message passing litmus test, one
+ thread writes the value 1 to some location x and then 1 to some location y. The second thread
+ reads y and then x. If the second thread reads y == 1 and x == 0, then sequential consistency
+ has not been respected. The acquire/release semantics of WebGPU's barrier functions should disallow
+ this behavior within a workgroup.
+ `
+ )
+ .paramsSimple([
+ { memType: MemoryType.AtomicWorkgroupClass, _testCode: workgroupMemoryMessagePassingTestCode },
+ { memType: MemoryType.AtomicStorageClass, _testCode: storageMemoryMessagePassingTestCode },
+ ])
+ .fn(async t => {
+ const testShader = buildTestShader(
+ t.params._testCode,
+ t.params.memType,
+ TestType.IntraWorkgroup
+ );
+ const messagePassingResultShader = buildResultShader(
+ `
+ if ((r0 == 0u && r1 == 0u)) {
+ atomicAdd(&test_results.seq0, 1u);
+ } else if ((r0 == 1u && r1 == 1u)) {
+ atomicAdd(&test_results.seq1, 1u);
+ } else if ((r0 == 0u && r1 == 1u)) {
+ atomicAdd(&test_results.interleaved, 1u);
+ } else if ((r0 == 1u && r1 == 0u)) {
+ atomicAdd(&test_results.weak, 1u);
+ }
+ `,
+ TestType.IntraWorkgroup,
+ ResultType.FourBehavior
+ );
+ const memModelTester = new MemoryModelTester(
+ t,
+ memoryModelTestParams,
+ testShader,
+ messagePassingResultShader
+ );
+ await memModelTester.run(40, 3);
+ });
+
+const workgroupMemoryStoreTestCode = `
+ atomicStore(&wg_test_locations[x_0], 2u);
+ workgroupBarrier();
+ atomicStore(&wg_test_locations[y_0], 1u);
+ let r0 = atomicLoad(&wg_test_locations[y_1]);
+ workgroupBarrier();
+ atomicStore(&wg_test_locations[x_1], 1u);
+ workgroupBarrier();
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r0, r0);
+ atomicStore(&test_locations.value[shuffled_workgroup * workgroupXSize * stress_params.mem_stride * 2u + x_1], atomicLoad(&wg_test_locations[x_1]));
+`;
+
+const storageMemoryStoreTestCode = `
+ atomicStore(&test_locations.value[x_0], 2u);
+ storageBarrier();
+ atomicStore(&test_locations.value[y_0], 1u);
+ let r0 = atomicLoad(&test_locations.value[y_1]);
+ storageBarrier();
+ atomicStore(&test_locations.value[x_1], 1u);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r0, r0);
+`;
+
+g.test('store')
+ .desc(
+ `In the store litmus test, one thread writes 2 to some memory location x and then 1 to some memory location
+ y. A second thread reads the value of y and then writes 1 to x. If the read on the second thread returns 1,
+ but the value of x in memory after the test ends is 2, then there has been a re-ordering which is not allowed
+ when using WebGPU's barriers.
+ `
+ )
+ .paramsSimple([
+ { memType: MemoryType.AtomicWorkgroupClass, _testCode: workgroupMemoryStoreTestCode },
+ { memType: MemoryType.AtomicStorageClass, _testCode: storageMemoryStoreTestCode },
+ ])
+ .fn(async t => {
+ const testShader = buildTestShader(
+ t.params._testCode,
+ t.params.memType,
+ TestType.IntraWorkgroup
+ );
+ const messagePassingResultShader = buildResultShader(
+ `
+ if ((r0 == 1u && mem_x_0 == 1u)) {
+ atomicAdd(&test_results.seq0, 1u);
+ } else if ((r0 == 0u && mem_x_0 == 2u)) {
+ atomicAdd(&test_results.seq1, 1u);
+ } else if ((r0 == 0u && mem_x_0 == 1u)) {
+ atomicAdd(&test_results.interleaved, 1u);
+ } else if ((r0 == 1u && mem_x_0 == 2u)) {
+ atomicAdd(&test_results.weak, 1u);
+ }
+ `,
+ TestType.IntraWorkgroup,
+ ResultType.FourBehavior
+ );
+ const memModelTester = new MemoryModelTester(
+ t,
+ memoryModelTestParams,
+ testShader,
+ messagePassingResultShader
+ );
+ await memModelTester.run(40, 3);
+ });
+
+const workgroupMemoryLoadBufferTestCode = `
+ let r0 = atomicLoad(&wg_test_locations[y_0]);
+ workgroupBarrier();
+ atomicStore(&wg_test_locations[x_0], 1u);
+ let r1 = atomicLoad(&wg_test_locations[x_1]);
+ workgroupBarrier();
+ atomicStore(&wg_test_locations[y_1], 1u);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_0].r0, r0);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r1, r1);
+`;
+
+const storageMemoryLoadBufferTestCode = `
+ let r0 = atomicLoad(&test_locations.value[y_0]);
+ storageBarrier();
+ atomicStore(&test_locations.value[x_0], 1u);
+ let r1 = atomicLoad(&test_locations.value[x_1]);
+ storageBarrier();
+ atomicStore(&test_locations.value[y_1], 1u);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_0].r0, r0);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r1, r1);
+`;
+
+g.test('load_buffer')
+ .desc(
+ `In the load buffer litmus test, one thread reads from memory location y and then writes 1 to memory location x.
+ A second thread reads from x and then writes 1 to y. If both threads read the value 0, then the loads have been
+ buffered or re-ordered, which is not allowed when used in conjunction with WebGPU's barriers.
+ `
+ )
+ .paramsSimple([
+ { memType: MemoryType.AtomicWorkgroupClass, _testCode: workgroupMemoryLoadBufferTestCode },
+ { memType: MemoryType.AtomicStorageClass, _testCode: storageMemoryLoadBufferTestCode },
+ ])
+ .fn(async t => {
+ const testShader = buildTestShader(
+ t.params._testCode,
+ t.params.memType,
+ TestType.IntraWorkgroup
+ );
+ const messagePassingResultShader = buildResultShader(
+ `
+ if ((r0 == 1u && r1 == 0u)) {
+ atomicAdd(&test_results.seq0, 1u);
+ } else if ((r0 == 0u && r1 == 1u)) {
+ atomicAdd(&test_results.seq1, 1u);
+ } else if ((r0 == 0u && r1 == 0u)) {
+ atomicAdd(&test_results.interleaved, 1u);
+ } else if ((r0 == 1u && r1 == 1u)) {
+ atomicAdd(&test_results.weak, 1u);
+ }
+ `,
+ TestType.IntraWorkgroup,
+ ResultType.FourBehavior
+ );
+ const memModelTester = new MemoryModelTester(
+ t,
+ memoryModelTestParams,
+ testShader,
+ messagePassingResultShader
+ );
+ await memModelTester.run(40, 3);
+ });
+
+const workgroupMemoryReadTestCode = `
+ atomicStore(&wg_test_locations[x_0], 1u);
+ workgroupBarrier();
+ atomicExchange(&wg_test_locations[y_0], 1u);
+ atomicExchange(&wg_test_locations[y_1], 2u);
+ workgroupBarrier();
+ let r0 = atomicLoad(&wg_test_locations[x_1]);
+ workgroupBarrier();
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r0, r0);
+ atomicStore(&test_locations.value[shuffled_workgroup * workgroupXSize * stress_params.mem_stride * 2u + y_1], atomicLoad(&wg_test_locations[y_1]));
+`;
+
+const storageMemoryReadTestCode = `
+ atomicStore(&test_locations.value[x_0], 1u);
+ storageBarrier();
+ atomicExchange(&test_locations.value[y_0], 1u);
+ atomicExchange(&test_locations.value[y_1], 2u);
+ storageBarrier();
+ let r0 = atomicLoad(&test_locations.value[x_1]);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r0, r0);
+`;
+
+g.test('read')
+ .desc(
+ `In the read litmus test, one thread writes 1 to memory location x and then 1 to memory location y. A second thread
+ first writes 2 to y and then reads from x. If the value read by the second thread is 0 but the value in memory of y
+ after the test completes is 2, then there has been some re-ordering of instructions disallowed when using WebGPU's
+ barrier. Additionally, both writes to y are RMWs, so that the barrier forces the correct acquire/release memory ordering
+ synchronization.
+ `
+ )
+ .paramsSimple([
+ { memType: MemoryType.AtomicWorkgroupClass, _testCode: workgroupMemoryReadTestCode },
+ { memType: MemoryType.AtomicStorageClass, _testCode: storageMemoryReadTestCode },
+ ])
+ .fn(async t => {
+ const testShader = buildTestShader(
+ t.params._testCode,
+ t.params.memType,
+ TestType.IntraWorkgroup
+ );
+ const messagePassingResultShader = buildResultShader(
+ `
+ if ((r0 == 1u && mem_y_0 == 2u)) {
+ atomicAdd(&test_results.seq0, 1u);
+ } else if ((r0 == 0u && mem_y_0 == 1u)) {
+ atomicAdd(&test_results.seq1, 1u);
+ } else if ((r0 == 1u && mem_y_0 == 1u)) {
+ atomicAdd(&test_results.interleaved, 1u);
+ } else if ((r0 == 0u && mem_y_0 == 2u)) {
+ atomicAdd(&test_results.weak, 1u);
+ }
+ `,
+ TestType.IntraWorkgroup,
+ ResultType.FourBehavior
+ );
+ const memModelTester = new MemoryModelTester(
+ t,
+ memoryModelTestParams,
+ testShader,
+ messagePassingResultShader
+ );
+ await memModelTester.run(40, 3);
+ });
+
+const workgroupMemoryStoreBufferTestCode = `
+ atomicStore(&wg_test_locations[x_0], 1u);
+ workgroupBarrier();
+ let r0 = atomicAdd(&wg_test_locations[y_0], 0u);
+ atomicExchange(&wg_test_locations[y_1], 1u);
+ workgroupBarrier();
+ let r1 = atomicLoad(&wg_test_locations[x_1]);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_0].r0, r0);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r1, r1);
+`;
+
+const storageMemoryStoreBufferTestCode = `
+ atomicStore(&test_locations.value[x_0], 1u);
+ storageBarrier();
+ let r0 = atomicAdd(&test_locations.value[y_0], 0u);
+ atomicExchange(&test_locations.value[y_1], 1u);
+ storageBarrier();
+ let r1 = atomicLoad(&test_locations.value[x_1]);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_0].r0, r0);
+ atomicStore(&results.value[shuffled_workgroup * workgroupXSize + id_1].r1, r1);
+`;
+
+g.test('store_buffer')
+ .desc(
+ `In the store buffer litmus test, one thread writes 1 to memory location x and then reads from memory location
+ y. A second thread writes 1 to y and then reads from x. If both reads return 0, then stores have been buffered
+ or some other re-ordering has occurred that is disallowed by WebGPU's barriers. Additionally, both the read
+ and store to y are RMWs to achieve the necessary synchronization across threads.
+ `
+ )
+ .paramsSimple([
+ { memType: MemoryType.AtomicWorkgroupClass, _testCode: workgroupMemoryStoreBufferTestCode },
+ { memType: MemoryType.AtomicStorageClass, _testCode: storageMemoryStoreBufferTestCode },
+ ])
+ .fn(async t => {
+ const testShader = buildTestShader(
+ t.params._testCode,
+ t.params.memType,
+ TestType.IntraWorkgroup
+ );
+ const messagePassingResultShader = buildResultShader(
+ `
+ if ((r0 == 1u && r1 == 0u)) {
+ atomicAdd(&test_results.seq0, 1u);
+ } else if ((r0 == 0u && r1 == 1u)) {
+ atomicAdd(&test_results.seq1, 1u);
+ } else if ((r0 == 1u && r1 == 1u)) {
+ atomicAdd(&test_results.interleaved, 1u);
+ } else if ((r0 == 0u && r1 == 0u)) {
+ atomicAdd(&test_results.weak, 1u);
+ }
+ `,
+ TestType.IntraWorkgroup,
+ ResultType.FourBehavior
+ );
+ const memModelTester = new MemoryModelTester(
+ t,
+ memoryModelTestParams,
+ testShader,
+ messagePassingResultShader
+ );
+ await memModelTester.run(40, 3);
+ });
+
+const workgroupMemory2P2WTestCode = `
+ atomicStore(&wg_test_locations[x_0], 2u);
+ workgroupBarrier();
+ atomicExchange(&wg_test_locations[y_0], 1u);
+ atomicExchange(&wg_test_locations[y_1], 2u);
+ workgroupBarrier();
+ atomicStore(&wg_test_locations[x_1], 1u);
+ workgroupBarrier();
+ atomicStore(&test_locations.value[shuffled_workgroup * workgroupXSize * stress_params.mem_stride * 2u + x_1], atomicLoad(&wg_test_locations[x_1]));
+ atomicStore(&test_locations.value[shuffled_workgroup * workgroupXSize * stress_params.mem_stride * 2u + y_1], atomicLoad(&wg_test_locations[y_1]));
+`;
+
+const storageMemory2P2WTestCode = `
+ atomicStore(&test_locations.value[x_0], 2u);
+ storageBarrier();
+ atomicExchange(&test_locations.value[y_0], 1u);
+ atomicExchange(&test_locations.value[y_1], 2u);
+ storageBarrier();
+ atomicStore(&test_locations.value[x_1], 1u);
+`;
+
+g.test('2_plus_2_write')
+ .desc(
+ `In the 2+2 write litmus test, one thread stores 2 to memory location x and then 1 to memory location y.
+ A second thread stores 2 to y and then 1 to x. If at the end of the test both memory locations are set to 2,
+ then some disallowed re-ordering has occurred. Both writes to y are RMWs to achieve the required synchronization.
+ `
+ )
+ .paramsSimple([
+ { memType: MemoryType.AtomicWorkgroupClass, _testCode: workgroupMemory2P2WTestCode },
+ { memType: MemoryType.AtomicStorageClass, _testCode: storageMemory2P2WTestCode },
+ ])
+ .fn(async t => {
+ const testShader = buildTestShader(
+ t.params._testCode,
+ t.params.memType,
+ TestType.IntraWorkgroup
+ );
+ const messagePassingResultShader = buildResultShader(
+ `
+ if ((mem_x_0 == 1u && mem_y_0 == 2u)) {
+ atomicAdd(&test_results.seq0, 1u);
+ } else if ((mem_x_0 == 2u && mem_y_0 == 1u)) {
+ atomicAdd(&test_results.seq1, 1u);
+ } else if ((mem_x_0 == 1u && mem_y_0 == 1u)) {
+ atomicAdd(&test_results.interleaved, 1u);
+ } else if ((mem_x_0 == 2u && mem_y_0 == 2u)) {
+ atomicAdd(&test_results.weak, 1u);
+ }
+ `,
+ TestType.IntraWorkgroup,
+ ResultType.FourBehavior
+ );
+ const memModelTester = new MemoryModelTester(
+ t,
+ memoryModelTestParams,
+ testShader,
+ messagePassingResultShader
+ );
+ await memModelTester.run(40, 3);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/padding.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/padding.spec.ts
new file mode 100644
index 0000000000..3a3671bcc3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/padding.spec.ts
@@ -0,0 +1,406 @@
+export const description = `
+Execution Tests for preservation of padding bytes in structures and arrays.
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { iterRange } from '../../../common/util/util.js';
+import { GPUTest } from '../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+/**
+ * Run a shader and check that the buffer output matches expectations.
+ *
+ * @param t The test object
+ * @param wgsl The shader source
+ * @param expected The array of expected values after running the shader
+ */
+function runShaderTest(t: GPUTest, wgsl: string, expected: Uint32Array): void {
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ // Allocate a buffer and fill it with 0xdeadbeef words.
+ const outputBuffer = t.makeBufferWithContents(
+ new Uint32Array([...iterRange(expected.length, _i => 0xdeadbeef)]),
+ GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC
+ );
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer: outputBuffer } }],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // Check that only the non-padding bytes were modified.
+ t.expectGPUBufferValuesEqual(outputBuffer, expected);
+}
+
+g.test('struct_implicit')
+ .desc(
+ `Test that padding bytes in between structure members are preserved.
+
+ This test defines a structure that has implicit padding and creates a read-write storage
+ buffer with that structure type. The shader assigns the whole variable at once, and we
+ then test that data in the padding bytes was preserved.
+ `
+ )
+ .fn(t => {
+ const wgsl = `
+ struct S {
+ a : u32,
+ // 12 bytes of padding
+ b : vec3<u32>,
+ // 4 bytes of padding
+ c : vec2<u32>,
+ // 8 bytes of padding
+ }
+ @group(0) @binding(0) var<storage, read_write> buffer : S;
+
+ @compute @workgroup_size(1)
+ fn main() {
+ buffer = S(0x12345678, vec3(0xabcdef01), vec2(0x98765432));
+ }
+ `;
+ runShaderTest(
+ t,
+ wgsl,
+ new Uint32Array([
+ // a : u32
+ 0x12345678, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef,
+ // b : vec3<u32>
+ 0xabcdef01, 0xabcdef01, 0xabcdef01, 0xdeadbeef,
+ // c : vec2<u32>
+ 0x98765432, 0x98765432, 0xdeadbeef, 0xdeadbeef,
+ ])
+ );
+ });
+
+g.test('struct_explicit')
+ .desc(
+ `Test that padding bytes in between structure members are preserved.
+
+ This test defines a structure with explicit padding attributes and creates a read-write storage
+ buffer with that structure type. The shader assigns the whole variable at once, and we
+ then test that data in the padding bytes was preserved.
+ `
+ )
+ .fn(t => {
+ const wgsl = `
+ struct S {
+ a : u32,
+ // 12 bytes of padding
+ @align(16) @size(20) b : u32,
+ // 16 bytes of padding
+ @size(12) c : u32,
+ // 8 bytes of padding
+ }
+ @group(0) @binding(0) var<storage, read_write> buffer : S;
+
+ @compute @workgroup_size(1)
+ fn main() {
+ buffer = S(0x12345678, 0xabcdef01, 0x98765432);
+ }
+ `;
+ runShaderTest(
+ t,
+ wgsl,
+ new Uint32Array([
+ // a : u32
+ 0x12345678, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef,
+ // @align(16) @size(20) b : u32
+ 0xabcdef01, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef,
+ // @size(12) c : u32
+ 0x98765432, 0xdeadbeef, 0xdeadbeef,
+ ])
+ );
+ });
+
+g.test('struct_nested')
+ .desc(
+ `Test that padding bytes in nested structures are preserved.
+
+ This test defines a set of nested structures that have padding and creates a read-write storage
+ buffer with the root structure type. The shader assigns the whole variable at once, and we
+ then test that data in the padding bytes was preserved.
+ `
+ )
+ .fn(t => {
+ const wgsl = `
+ // Size of S1 is 48 bytes.
+ // Alignment of S1 is 16 bytes.
+ struct S1 {
+ a : u32,
+ // 12 bytes of padding
+ b : vec3<u32>,
+ // 4 bytes of padding
+ c : vec2<u32>,
+ // 8 bytes of padding
+ }
+
+ // Size of S2 is 112 bytes.
+ // Alignment of S2 is 48 bytes.
+ struct S2 {
+ a2 : u32,
+ // 12 bytes of padding
+ b2 : S1,
+ c2 : S1,
+ }
+
+ // Size of S3 is 144 bytes.
+ // Alignment of S3 is 48 bytes.
+ struct S3 {
+ a3 : S1,
+ b3 : S2,
+ c3 : S2,
+ }
+
+ @group(0) @binding(0) var<storage, read_write> buffer : S3;
+
+ @compute @workgroup_size(1)
+ fn main() {
+ buffer = S3();
+ }
+ `;
+ runShaderTest(
+ t,
+ wgsl,
+ new Uint32Array([
+ // a3 : S1
+ // a3.a1 : u32
+ 0x00000000, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef,
+ // a3.b1 : vec3<u32>
+ 0x00000000, 0x00000000, 0x00000000, 0xdeadbeef,
+ // a3.c1 : vec2<u32>
+ 0x00000000, 0x00000000, 0xdeadbeef, 0xdeadbeef,
+
+ // b3 : S2
+ // b3.a2 : u32
+ 0x00000000, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef,
+ // b3.b2 : S1
+ // b3.b2.a1 : u32
+ 0x00000000, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef,
+ // b3.b2.b1 : vec3<u32>
+ 0x00000000, 0x00000000, 0x00000000, 0xdeadbeef,
+ // b3.b2.c1 : vec2<u32>
+ 0x00000000, 0x00000000, 0xdeadbeef, 0xdeadbeef,
+ // b3.c2 : S1
+ // b3.c2.a1 : u32
+ 0x00000000, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef,
+ // b3.c2.b1 : vec3<u32>
+ 0x00000000, 0x00000000, 0x00000000, 0xdeadbeef,
+ // b3.c2.c1 : vec2<u32>
+ 0x00000000, 0x00000000, 0xdeadbeef, 0xdeadbeef,
+
+ // c3 : S2
+ // c3.a2 : u32
+ 0x00000000, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef,
+ // c3.b2 : S1
+ // c3.b2.a1 : u32
+ 0x00000000, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef,
+ // c3.b2.b1 : vec3<u32>
+ 0x00000000, 0x00000000, 0x00000000, 0xdeadbeef,
+ // c3.b2.c1 : vec2<u32>
+ 0x00000000, 0x00000000, 0xdeadbeef, 0xdeadbeef,
+ // c3.c2 : S1
+ // c3.c2.a1 : u32
+ 0x00000000, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef,
+ // c3.c2.b1 : vec3<u32>
+ 0x00000000, 0x00000000, 0x00000000, 0xdeadbeef,
+ // c3.c2.c1 : vec2<u32>
+ 0x00000000, 0x00000000, 0xdeadbeef, 0xdeadbeef,
+ ])
+ );
+ });
+
+g.test('array_of_vec3')
+ .desc(
+ `Test that padding bytes in between array elements are preserved.
+
+ This test defines creates a read-write storage buffer with type array<vec3, 4>. The shader
+ assigns the whole variable at once, and we then test that data in the padding bytes was
+ preserved.
+ `
+ )
+ .fn(t => {
+ const wgsl = `
+ @group(0) @binding(0) var<storage, read_write> buffer : array<vec3<u32>, 4>;
+
+ @compute @workgroup_size(1)
+ fn main() {
+ buffer = array<vec3<u32>, 4>(
+ vec3(0x12345678),
+ vec3(0xabcdef01),
+ vec3(0x98765432),
+ vec3(0x0f0f0f0f),
+ );
+ }
+ `;
+ runShaderTest(
+ t,
+ wgsl,
+ new Uint32Array([
+ // buffer[0]
+ 0x12345678, 0x12345678, 0x12345678, 0xdeadbeef,
+ // buffer[1]
+ 0xabcdef01, 0xabcdef01, 0xabcdef01, 0xdeadbeef,
+ // buffer[2]
+ 0x98765432, 0x98765432, 0x98765432, 0xdeadbeef,
+ // buffer[2]
+ 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xdeadbeef,
+ ])
+ );
+ });
+
+g.test('array_of_struct')
+ .desc(
+ `Test that padding bytes in between array elements are preserved.
+
+ This test defines creates a read-write storage buffer with type array<S, 4>, where S is a
+ structure that contains padding bytes. The shader assigns the whole variable at once, and we
+ then test that data in the padding bytes was preserved.
+ `
+ )
+ .fn(t => {
+ const wgsl = `
+ struct S {
+ a : u32,
+ b : vec3<u32>,
+ }
+ @group(0) @binding(0) var<storage, read_write> buffer : array<S, 3>;
+
+ @compute @workgroup_size(1)
+ fn main() {
+ buffer = array<S, 3>(
+ S(0x12345678, vec3(0x0f0f0f0f)),
+ S(0xabcdef01, vec3(0x7c7c7c7c)),
+ S(0x98765432, vec3(0x18181818)),
+ );
+ }
+ `;
+ runShaderTest(
+ t,
+ wgsl,
+ new Uint32Array([
+ // buffer[0]
+ 0x12345678, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f,
+ 0xdeadbeef,
+ // buffer[1]
+ 0xabcdef01, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0x7c7c7c7c, 0x7c7c7c7c, 0x7c7c7c7c,
+ 0xdeadbeef,
+ // buffer[2]
+ 0x98765432, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0x18181818, 0x18181818, 0x18181818,
+ 0xdeadbeef,
+ ])
+ );
+ });
+
+g.test('vec3')
+ .desc(
+ `Test padding bytes are preserved when assigning to a variable of type vec3 (without a struct).
+ `
+ )
+ .fn(t => {
+ const wgsl = `
+ @group(0) @binding(0) var<storage, read_write> buffer : vec3<u32>;
+
+ @compute @workgroup_size(1)
+ fn main() {
+ buffer = vec3<u32>(0x12345678, 0xabcdef01, 0x98765432);
+ }
+ `;
+ runShaderTest(t, wgsl, new Uint32Array([0x12345678, 0xabcdef01, 0x98765432, 0xdeadbeef]));
+ });
+
+g.test('matCx3')
+ .desc(
+ `Test padding bytes are preserved when assigning to a variable of type matCx3.
+ `
+ )
+ .params(u =>
+ u
+ .combine('columns', [2, 3, 4] as const)
+ .combine('use_struct', [true, false] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ const cols = t.params.columns;
+ const wgsl = `
+ alias Mat = mat${cols}x3<f32>;
+ ${t.params.use_struct ? `struct S { m : Mat } alias Type = S;` : `alias Type = Mat;`}
+ @group(0) @binding(0) var<storage, read_write> buffer : Type;
+
+ @compute @workgroup_size(1)
+ fn main() {
+ var m : Mat;
+ for (var c = 0u; c < ${cols}; c++) {
+ m[c] = vec3(f32(c*3 + 1), f32(c*3 + 2), f32(c*3 + 3));
+ }
+ buffer = Type(m);
+ }
+ `;
+ const f_values = new Float32Array(cols * 4);
+ const u_values = new Uint32Array(f_values.buffer);
+ for (let c = 0; c < cols; c++) {
+ f_values[c * 4 + 0] = c * 3 + 1;
+ f_values[c * 4 + 1] = c * 3 + 2;
+ f_values[c * 4 + 2] = c * 3 + 3;
+ u_values[c * 4 + 3] = 0xdeadbeef;
+ }
+ runShaderTest(t, wgsl, u_values);
+ });
+
+g.test('array_of_matCx3')
+ .desc(
+ `Test that padding bytes in between array elements are preserved.
+
+ This test defines creates a read-write storage buffer with type array<matCx3<f32>, 4>. The
+ shader assigns the whole variable at once, and we then test that data in the padding bytes was
+ preserved.
+ `
+ )
+ .params(u =>
+ u
+ .combine('columns', [2, 3, 4] as const)
+ .combine('use_struct', [true, false] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ const cols = t.params.columns;
+ const wgsl = `
+ alias Mat = mat${cols}x3<f32>;
+ ${t.params.use_struct ? `struct S { m : Mat } alias Type = S;` : `alias Type = Mat;`}
+ @group(0) @binding(0) var<storage, read_write> buffer : array<Type, 4>;
+
+ @compute @workgroup_size(1)
+ fn main() {
+ var m : Mat;
+ for (var c = 0u; c < ${cols}; c++) {
+ m[c] = vec3(f32(c*3 + 1), f32(c*3 + 2), f32(c*3 + 3));
+ }
+ buffer = array<Type, 4>(Type(m), Type(m * 2), Type(m * 3), Type(m * 4));
+ }
+ `;
+ const f_values = new Float32Array(cols * 4 * 4);
+ const u_values = new Uint32Array(f_values.buffer);
+ for (let i = 0; i < 4; i++) {
+ for (let c = 0; c < cols; c++) {
+ f_values[i * (cols * 4) + c * 4 + 0] = (c * 3 + 1) * (i + 1);
+ f_values[i * (cols * 4) + c * 4 + 1] = (c * 3 + 2) * (i + 1);
+ f_values[i * (cols * 4) + c * 4 + 2] = (c * 3 + 3) * (i + 1);
+ u_values[i * (cols * 4) + c * 4 + 3] = 0xdeadbeef;
+ }
+ }
+ runShaderTest(t, wgsl, u_values);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/robust_access.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/robust_access.spec.ts
new file mode 100644
index 0000000000..965dd283dd
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/robust_access.spec.ts
@@ -0,0 +1,480 @@
+export const description = `
+Tests to check datatype clamping in shaders is correctly implemented for all indexable types
+(vectors, matrices, sized/unsized arrays) visible to shaders in various ways.
+
+TODO: add tests to check that textureLoad operations stay in-bounds.
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { assert } from '../../../common/util/util.js';
+import { GPUTest } from '../../gpu_test.js';
+import { align } from '../../util/math.js';
+import { generateTypes, supportedScalarTypes, supportsAtomics } from '../types.js';
+
+export const g = makeTestGroup(GPUTest);
+
+const kMaxU32 = 0xffff_ffff;
+const kMaxI32 = 0x7fff_ffff;
+const kMinI32 = -0x8000_0000;
+
+/**
+ * Wraps the provided source into a harness that checks calling `runTest()` returns 0.
+ *
+ * Non-test bindings are in bind group 1, including:
+ * - `constants.zero`: a dynamically-uniform `0u` value.
+ */
+async function runShaderTest(
+ t: GPUTest,
+ stage: GPUShaderStageFlags,
+ testSource: string,
+ layout: GPUPipelineLayout,
+ testBindings: GPUBindGroupEntry[],
+ dynamicOffsets?: number[]
+): Promise<void> {
+ assert(stage === GPUShaderStage.COMPUTE, 'Only know how to deal with compute for now');
+
+ // Contains just zero (for now).
+ const constantsBuffer = t.device.createBuffer({ size: 4, usage: GPUBufferUsage.UNIFORM });
+
+ const resultBuffer = t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.COPY_SRC | GPUBufferUsage.STORAGE,
+ });
+
+ const source = `
+struct Constants {
+ zero: u32
+};
+@group(1) @binding(0) var<uniform> constants: Constants;
+
+struct Result {
+ value: u32
+};
+@group(1) @binding(1) var<storage, read_write> result: Result;
+
+${testSource}
+
+@compute @workgroup_size(1)
+fn main() {
+ _ = constants.zero; // Ensure constants buffer is statically-accessed
+ result.value = runTest();
+}`;
+
+ t.debug(source);
+ const module = t.device.createShaderModule({ code: source });
+ const pipeline = await t.device.createComputePipelineAsync({
+ layout,
+ compute: { module, entryPoint: 'main' },
+ });
+
+ const group = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(1),
+ entries: [
+ { binding: 0, resource: { buffer: constantsBuffer } },
+ { binding: 1, resource: { buffer: resultBuffer } },
+ ],
+ });
+
+ const testGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: testBindings,
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, testGroup, dynamicOffsets);
+ pass.setBindGroup(1, group);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+
+ t.queue.submit([encoder.finish()]);
+
+ t.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array([0]));
+}
+
+/** Fill an ArrayBuffer with sentinel values, except clear a region to zero. */
+function testFillArrayBuffer(
+ array: ArrayBuffer,
+ type: 'u32' | 'i32' | 'f32',
+ { zeroByteStart, zeroByteCount }: { zeroByteStart: number; zeroByteCount: number }
+) {
+ const constructor = { u32: Uint32Array, i32: Int32Array, f32: Float32Array }[type];
+ assert(zeroByteCount % constructor.BYTES_PER_ELEMENT === 0);
+ new constructor(array).fill(42);
+ new constructor(array, zeroByteStart, zeroByteCount / constructor.BYTES_PER_ELEMENT).fill(0);
+}
+
+/**
+ * Generate a bunch of indexable types (vec, mat, sized/unsized array) for testing.
+ */
+
+g.test('linear_memory')
+ .desc(
+ `For each indexable data type (vec, mat, sized/unsized array, of various scalar types), attempts
+ to access (read, write, atomic load/store) a region of memory (buffer or internal) at various
+ (signed/unsigned) indices. Checks that the accesses conform to robust access (OOB reads only
+ return bound memory, OOB writes don't write OOB).
+
+ TODO: Test in/out storage classes.
+ TODO: Test vertex and fragment stages.
+ TODO: Test using a dynamic offset instead of a static offset into uniform/storage bindings.
+ TODO: Test types like vec2<atomic<i32>>, if that's allowed.
+ TODO: Test exprIndexAddon as constexpr.
+ TODO: Test exprIndexAddon as pipeline-overridable constant expression.
+ `
+ )
+ .params(u =>
+ u
+ .combineWithParams([
+ { addressSpace: 'storage', storageMode: 'read', access: 'read', dynamicOffset: false },
+ {
+ addressSpace: 'storage',
+ storageMode: 'read_write',
+ access: 'read',
+ dynamicOffset: false,
+ },
+ {
+ addressSpace: 'storage',
+ storageMode: 'read_write',
+ access: 'write',
+ dynamicOffset: false,
+ },
+ { addressSpace: 'storage', storageMode: 'read', access: 'read', dynamicOffset: true },
+ { addressSpace: 'storage', storageMode: 'read_write', access: 'read', dynamicOffset: true },
+ {
+ addressSpace: 'storage',
+ storageMode: 'read_write',
+ access: 'write',
+ dynamicOffset: true,
+ },
+ { addressSpace: 'uniform', access: 'read', dynamicOffset: false },
+ { addressSpace: 'uniform', access: 'read', dynamicOffset: true },
+ { addressSpace: 'private', access: 'read' },
+ { addressSpace: 'private', access: 'write' },
+ { addressSpace: 'function', access: 'read' },
+ { addressSpace: 'function', access: 'write' },
+ { addressSpace: 'workgroup', access: 'read' },
+ { addressSpace: 'workgroup', access: 'write' },
+ ] as const)
+ .combineWithParams([
+ { containerType: 'array' },
+ { containerType: 'matrix' },
+ { containerType: 'vector' },
+ ] as const)
+ .combineWithParams([
+ { shadowingMode: 'none' },
+ { shadowingMode: 'module-scope' },
+ { shadowingMode: 'function-scope' },
+ ])
+ .expand('isAtomic', p => (supportsAtomics(p) ? [false, true] : [false]))
+ .beginSubcases()
+ .expand('baseType', supportedScalarTypes)
+ .expandWithParams(generateTypes)
+ )
+ .fn(async t => {
+ const {
+ addressSpace,
+ storageMode,
+ access,
+ dynamicOffset,
+ isAtomic,
+ containerType,
+ baseType,
+ type,
+ shadowingMode,
+ _kTypeInfo,
+ } = t.params;
+
+ assert(_kTypeInfo !== undefined, 'not an indexable type');
+ assert('arrayLength' in _kTypeInfo);
+
+ let usesCanary = false;
+ let globalSource = '';
+ let testFunctionSource = '';
+ const testBufferSize = 512;
+ const bufferBindingOffset = 256;
+ /** Undefined if no buffer binding is needed */
+ let bufferBindingSize: number | undefined = undefined;
+
+ // Declare the data that will be accessed to check robust access, as a buffer or a struct
+ // in the global scope or inside the test function itself.
+ const structDecl = `
+struct S {
+ startCanary: array<u32, 10>,
+ data: ${type},
+ endCanary: array<u32, 10>,
+};`;
+
+ const testGroupBGLEntires: GPUBindGroupLayoutEntry[] = [];
+ switch (addressSpace) {
+ case 'uniform':
+ case 'storage':
+ {
+ assert(_kTypeInfo.layout !== undefined);
+ const layout = _kTypeInfo.layout;
+ bufferBindingSize = align(layout.size, layout.alignment);
+ const qualifiers = addressSpace === 'storage' ? `storage, ${storageMode}` : addressSpace;
+ globalSource += `
+struct TestData {
+ data: ${type},
+};
+@group(0) @binding(0) var<${qualifiers}> s: TestData;`;
+
+ testGroupBGLEntires.push({
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: {
+ type:
+ addressSpace === 'uniform'
+ ? 'uniform'
+ : storageMode === 'read'
+ ? 'read-only-storage'
+ : 'storage',
+ hasDynamicOffset: dynamicOffset,
+ },
+ });
+ }
+ break;
+
+ case 'private':
+ case 'workgroup':
+ usesCanary = true;
+ globalSource += structDecl;
+ globalSource += `var<${addressSpace}> s: S;`;
+ break;
+
+ case 'function':
+ usesCanary = true;
+ globalSource += structDecl;
+ testFunctionSource += 'var s: S;';
+ break;
+ }
+
+ // Build the test function that will do the tests.
+
+ // If we use a local canary declared in the shader, initialize it.
+ if (usesCanary) {
+ testFunctionSource += `
+ for (var i = 0u; i < 10u; i = i + 1u) {
+ s.startCanary[i] = 0xFFFFFFFFu;
+ s.endCanary[i] = 0xFFFFFFFFu;
+ }`;
+ }
+
+ /** Returns a different number each time, kind of like a `__LINE__` to ID the failing check. */
+ const nextErrorReturnValue = (() => {
+ let errorReturnValue = 0x1000;
+ return () => {
+ ++errorReturnValue;
+ return `0x${errorReturnValue.toString(16)}u`;
+ };
+ })();
+
+ // This is here, instead of in subcases, so only a single shader is needed to test many modes.
+ for (const indexSigned of [false, true]) {
+ const indicesToTest = indexSigned
+ ? [
+ // Exactly in bounds (should be OK)
+ '0',
+ `${_kTypeInfo.arrayLength} - 1`,
+ // Exactly out of bounds
+ '-1',
+ `${_kTypeInfo.arrayLength}`,
+ // Far out of bounds
+ '-1000000',
+ '1000000',
+ `${kMinI32}`,
+ `${kMaxI32}`,
+ ]
+ : [
+ // Exactly in bounds (should be OK)
+ '0u',
+ `${_kTypeInfo.arrayLength}u - 1u`,
+ // Exactly out of bounds
+ `${_kTypeInfo.arrayLength}u`,
+ // Far out of bounds
+ '1000000u',
+ `${kMaxU32}u`,
+ `${kMaxI32}u`,
+ ];
+
+ const indexTypeLiteral = indexSigned ? '0' : '0u';
+ const indexTypeCast = indexSigned ? 'i32' : 'u32';
+ for (const exprIndexAddon of [
+ '', // No addon
+ ` + ${indexTypeLiteral}`, // Add a literal 0
+ ` + ${indexTypeCast}(constants.zero)`, // Add a uniform 0
+ ]) {
+ // Produce the accesses to the variable.
+ for (const indexToTest of indicesToTest) {
+ testFunctionSource += `
+ {
+ let index = (${indexToTest})${exprIndexAddon};`;
+ const exprZeroElement = `${_kTypeInfo.elementBaseType}()`;
+ const exprElement = `s.data[index]`;
+
+ switch (access) {
+ case 'read':
+ {
+ let exprLoadElement = isAtomic ? `atomicLoad(&${exprElement})` : exprElement;
+ if (addressSpace === 'uniform' && containerType === 'array') {
+ // Scalar types will be wrapped in a vec4 to satisfy array element size
+ // requirements for the uniform address space, so we need an additional index
+ // accessor expression.
+ exprLoadElement += '[0]';
+ }
+ let condition = `${exprLoadElement} != ${exprZeroElement}`;
+ if (containerType === 'matrix') condition = `any(${condition})`;
+ testFunctionSource += `
+ if (${condition}) { return ${nextErrorReturnValue()}; }`;
+ }
+ break;
+
+ case 'write':
+ if (isAtomic) {
+ testFunctionSource += `
+ atomicStore(&s.data[index], ${exprZeroElement});`;
+ } else {
+ testFunctionSource += `
+ s.data[index] = ${exprZeroElement};`;
+ }
+ break;
+ }
+ testFunctionSource += `
+ }`;
+ }
+ }
+ }
+
+ // Check that the canaries haven't been modified
+ if (usesCanary) {
+ testFunctionSource += `
+ for (var i = 0u; i < 10u; i = i + 1u) {
+ if (s.startCanary[i] != 0xFFFFFFFFu) {
+ return ${nextErrorReturnValue()};
+ }
+ if (s.endCanary[i] != 0xFFFFFFFFu) {
+ return ${nextErrorReturnValue()};
+ }
+ }`;
+ }
+
+ // Shadowing case declarations
+ let moduleScopeShadowDecls = '';
+ let functionScopeShadowDecls = '';
+
+ switch (shadowingMode) {
+ case 'module-scope':
+ // Shadow the builtins likely used by robustness as module-scope variables
+ moduleScopeShadowDecls = `
+var<private> min = 0;
+var<private> max = 0;
+var<private> arrayLength = 0;
+`;
+ // Make sure that these are referenced by the function.
+ // This ensures that compilers don't strip away unused variables.
+ functionScopeShadowDecls = `
+ _ = min;
+ _ = max;
+ _ = arrayLength;
+`;
+ break;
+ case 'function-scope':
+ // Shadow the builtins likely used by robustness as function-scope variables
+ functionScopeShadowDecls = `
+ let min = 0;
+ let max = 0;
+ let arrayLength = 0;
+`;
+ break;
+ }
+
+ // Run the test
+
+ // First aggregate the test source
+ const testSource = `
+${globalSource}
+${moduleScopeShadowDecls}
+
+fn runTest() -> u32 {
+ ${functionScopeShadowDecls}
+ ${testFunctionSource}
+ return 0u;
+}`;
+
+ const layout = t.device.createPipelineLayout({
+ bindGroupLayouts: [
+ t.device.createBindGroupLayout({
+ entries: testGroupBGLEntires,
+ }),
+ t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: {
+ type: 'uniform',
+ },
+ },
+ {
+ binding: 1,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: {
+ type: 'storage',
+ },
+ },
+ ],
+ }),
+ ],
+ });
+
+ // Run it.
+ if (bufferBindingSize !== undefined && baseType !== 'bool') {
+ const expectedData = new ArrayBuffer(testBufferSize);
+ const bufferBindingEnd = bufferBindingOffset + bufferBindingSize;
+ testFillArrayBuffer(expectedData, baseType, {
+ zeroByteStart: bufferBindingOffset,
+ zeroByteCount: bufferBindingSize,
+ });
+
+ // Create a buffer that contains zeroes in the allowed access area, and 42s everywhere else.
+ const testBuffer = t.makeBufferWithContents(
+ new Uint8Array(expectedData),
+ GPUBufferUsage.COPY_SRC |
+ GPUBufferUsage.UNIFORM |
+ GPUBufferUsage.STORAGE |
+ GPUBufferUsage.COPY_DST
+ );
+
+ // Run the shader, accessing the buffer.
+ await runShaderTest(
+ t,
+ GPUShaderStage.COMPUTE,
+ testSource,
+ layout,
+ [
+ {
+ binding: 0,
+ resource: {
+ buffer: testBuffer,
+ offset: dynamicOffset ? 0 : bufferBindingOffset,
+ size: bufferBindingSize,
+ },
+ },
+ ],
+ dynamicOffset ? [bufferBindingOffset] : undefined
+ );
+
+ // Check that content of the buffer outside of the allowed area didn't change.
+ const expectedBytes = new Uint8Array(expectedData);
+ t.expectGPUBufferValuesEqual(testBuffer, expectedBytes.subarray(0, bufferBindingOffset), 0);
+ t.expectGPUBufferValuesEqual(
+ testBuffer,
+ expectedBytes.subarray(bufferBindingEnd, testBufferSize),
+ bufferBindingEnd
+ );
+ } else {
+ await runShaderTest(t, GPUShaderStage.COMPUTE, testSource, layout, []);
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/robust_access_vertex.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/robust_access_vertex.spec.ts
new file mode 100644
index 0000000000..de90301592
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/robust_access_vertex.spec.ts
@@ -0,0 +1,607 @@
+export const description = `
+Test vertex attributes behave correctly (no crash / data leak) when accessed out of bounds
+
+Test coverage:
+
+The following is parameterized (all combinations tested):
+
+1) Draw call type? (drawIndexed, drawIndirect, drawIndexedIndirect)
+ - Run the draw call using an index buffer and/or an indirect buffer.
+ - Doesn't test direct draw, as vertex buffer OOB are CPU validated and treated as validation errors.
+ - Also the instance step mode vertex buffer OOB are CPU validated for drawIndexed, so we only test
+ robustness access for vertex step mode vertex buffers.
+
+2) Draw call parameter (vertexCount, firstVertex, indexCount, firstIndex, baseVertex, instanceCount,
+ vertexCountInIndexBuffer)
+ - The parameter which goes out of bounds. Filtered depending on the draw call type.
+ - vertexCount, firstVertex: used for drawIndirect only, test for vertex step mode buffer OOB
+ - instanceCount: used for both drawIndirect and drawIndexedIndirect, test for instance step mode buffer OOB
+ - baseVertex, vertexCountInIndexBuffer: used for both drawIndexed and drawIndexedIndirect, test
+ for vertex step mode buffer OOB. vertexCountInIndexBuffer indicates how many vertices are used
+ within the index buffer, i.e. [0, 1, ..., vertexCountInIndexBuffer-1].
+ - indexCount, firstIndex: used for drawIndexedIndirect only, validate the vertex buffer access
+ when the vertex itself is OOB in index buffer. This never happens in drawIndexed as we have index
+ buffer OOB CPU validation for it.
+
+3) Attribute type (float32, float32x2, float32x3, float32x4)
+ - The input attribute type in the vertex shader
+
+4) Error scale (0, 1, 4, 10^2, 10^4, 10^6)
+ - Offset to add to the correct draw call parameter
+ - 0 For control case
+
+5) Additional vertex buffers (0, +4)
+ - Tests that no OOB occurs if more vertex buffers are used
+
+6) Partial last number and offset vertex buffer (false, true)
+ - Tricky cases that make vertex buffer OOB.
+ - With partial last number enabled, vertex buffer size will be 1 byte less than enough, making the
+ last vertex OOB with 1 byte.
+ - Offset vertex buffer will bind the vertex buffer to render pass with 4 bytes offset, causing OOB
+ - For drawIndexed, these two flags are suppressed for instance step mode vertex buffer to make sure
+ it pass the CPU validation.
+
+The tests have one instance step mode vertex buffer bound for instanced attributes, to make sure
+instanceCount / firstInstance are tested.
+
+The tests include multiple attributes per vertex buffer.
+
+The vertex buffers are filled by repeating a few values randomly chosen for each test until the
+end of the buffer.
+
+The tests run a render pipeline which verifies the following:
+1) All vertex attribute values occur in the buffer or are 0 (for control case it can't be 0)
+2) All gl_VertexIndex values are within the index buffer or 0
+
+TODO:
+Currently firstInstance is not tested, as for drawIndexed it is CPU validated, and for drawIndirect
+and drawIndexedIndirect it should always be 0. Once there is an extension to allow making them non-zero,
+it should be added into drawCallTestParameter list.
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { assert } from '../../../common/util/util.js';
+import { GPUTest, TextureTestMixin } from '../../gpu_test.js';
+
+// Encapsulates a draw call (either indexed or non-indexed)
+class DrawCall {
+ private test: GPUTest;
+ private vertexBuffers: GPUBuffer[];
+
+ // Add a float offset when binding vertex buffer
+ private offsetVertexBuffer: boolean;
+
+ // Keep instance step mode vertex buffer in range, in order to test vertex step
+ // mode buffer OOB in drawIndexed. Setting true will suppress partialLastNumber
+ // and offsetVertexBuffer for instance step mode vertex buffer.
+ private keepInstanceStepModeBufferInRange: boolean;
+
+ // Draw
+ public vertexCount: number;
+ public firstVertex: number;
+
+ // DrawIndexed
+ public vertexCountInIndexBuffer: number; // For generating index buffer in drawIndexed and drawIndexedIndirect
+ public indexCount: number; // For accessing index buffer in drawIndexed and drawIndexedIndirect
+ public firstIndex: number;
+ public baseVertex: number;
+
+ // Both Draw and DrawIndexed
+ public instanceCount: number;
+ public firstInstance: number;
+
+ constructor({
+ test,
+ vertexArrays,
+ vertexCount,
+ partialLastNumber,
+ offsetVertexBuffer,
+ keepInstanceStepModeBufferInRange,
+ }: {
+ test: GPUTest;
+ vertexArrays: Float32Array[];
+ vertexCount: number;
+ partialLastNumber: boolean;
+ offsetVertexBuffer: boolean;
+ keepInstanceStepModeBufferInRange: boolean;
+ }) {
+ this.test = test;
+
+ // Default arguments (valid call)
+ this.vertexCount = vertexCount;
+ this.firstVertex = 0;
+ this.vertexCountInIndexBuffer = vertexCount;
+ this.indexCount = vertexCount;
+ this.firstIndex = 0;
+ this.baseVertex = 0;
+ this.instanceCount = vertexCount;
+ this.firstInstance = 0;
+
+ this.offsetVertexBuffer = offsetVertexBuffer;
+ this.keepInstanceStepModeBufferInRange = keepInstanceStepModeBufferInRange;
+
+ // Since vertexInIndexBuffer is mutable, generation of the index buffer should be deferred to right before calling draw
+
+ // Generate vertex buffer
+ this.vertexBuffers = vertexArrays.map((v, i) => {
+ if (i === 0 && keepInstanceStepModeBufferInRange) {
+ // Suppress partialLastNumber for the first vertex buffer, aka the instance step mode buffer
+ return this.generateVertexBuffer(v, false);
+ } else {
+ return this.generateVertexBuffer(v, partialLastNumber);
+ }
+ });
+ }
+
+ // Insert a draw call into |pass| with specified type
+ public insertInto(pass: GPURenderPassEncoder, indexed: boolean, indirect: boolean) {
+ if (indexed) {
+ if (indirect) {
+ this.drawIndexedIndirect(pass);
+ } else {
+ this.drawIndexed(pass);
+ }
+ } else {
+ if (indirect) {
+ this.drawIndirect(pass);
+ } else {
+ this.draw(pass);
+ }
+ }
+ }
+
+ // Insert a draw call into |pass|
+ public draw(pass: GPURenderPassEncoder) {
+ this.bindVertexBuffers(pass);
+ pass.draw(this.vertexCount, this.instanceCount, this.firstVertex, this.firstInstance);
+ }
+
+ // Insert an indexed draw call into |pass|
+ public drawIndexed(pass: GPURenderPassEncoder) {
+ // Generate index buffer
+ const indexArray = new Uint32Array(this.vertexCountInIndexBuffer).map((_, i) => i);
+ const indexBuffer = this.test.makeBufferWithContents(indexArray, GPUBufferUsage.INDEX);
+ this.bindVertexBuffers(pass);
+ pass.setIndexBuffer(indexBuffer, 'uint32');
+ pass.drawIndexed(
+ this.indexCount,
+ this.instanceCount,
+ this.firstIndex,
+ this.baseVertex,
+ this.firstInstance
+ );
+ }
+
+ // Insert an indirect draw call into |pass|
+ public drawIndirect(pass: GPURenderPassEncoder) {
+ this.bindVertexBuffers(pass);
+ pass.drawIndirect(this.generateIndirectBuffer(), 0);
+ }
+
+ // Insert an indexed indirect draw call into |pass|
+ public drawIndexedIndirect(pass: GPURenderPassEncoder) {
+ // Generate index buffer
+ const indexArray = new Uint32Array(this.vertexCountInIndexBuffer).map((_, i) => i);
+ const indexBuffer = this.test.makeBufferWithContents(indexArray, GPUBufferUsage.INDEX);
+ this.bindVertexBuffers(pass);
+ pass.setIndexBuffer(indexBuffer, 'uint32');
+ pass.drawIndexedIndirect(this.generateIndexedIndirectBuffer(), 0);
+ }
+
+ // Bind all vertex buffers generated
+ private bindVertexBuffers(pass: GPURenderPassEncoder) {
+ let currSlot = 0;
+ for (let i = 0; i < this.vertexBuffers.length; i++) {
+ if (i === 0 && this.keepInstanceStepModeBufferInRange) {
+ // Keep the instance step mode buffer in range
+ pass.setVertexBuffer(currSlot++, this.vertexBuffers[i], 0);
+ } else {
+ pass.setVertexBuffer(currSlot++, this.vertexBuffers[i], this.offsetVertexBuffer ? 4 : 0);
+ }
+ }
+ }
+
+ // Create a vertex buffer from |vertexArray|
+ // If |partialLastNumber| is true, delete one byte off the end
+ private generateVertexBuffer(vertexArray: Float32Array, partialLastNumber: boolean): GPUBuffer {
+ let size = vertexArray.byteLength;
+ let length = vertexArray.length;
+ if (partialLastNumber) {
+ size -= 1; // Shave off one byte from the buffer size.
+ length -= 1; // And one whole element from the writeBuffer.
+ }
+ const buffer = this.test.device.createBuffer({
+ size,
+ usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST, // Ensure that buffer can be used by writeBuffer
+ });
+ this.test.device.queue.writeBuffer(buffer, 0, vertexArray.slice(0, length));
+ return buffer;
+ }
+
+ // Create an indirect buffer containing draw call values
+ private generateIndirectBuffer(): GPUBuffer {
+ const indirectArray = new Int32Array([
+ this.vertexCount,
+ this.instanceCount,
+ this.firstVertex,
+ this.firstInstance,
+ ]);
+ return this.test.makeBufferWithContents(indirectArray, GPUBufferUsage.INDIRECT);
+ }
+
+ // Create an indirect buffer containing indexed draw call values
+ private generateIndexedIndirectBuffer(): GPUBuffer {
+ const indirectArray = new Int32Array([
+ this.indexCount,
+ this.instanceCount,
+ this.firstIndex,
+ this.baseVertex,
+ this.firstInstance,
+ ]);
+ return this.test.makeBufferWithContents(indirectArray, GPUBufferUsage.INDIRECT);
+ }
+}
+
+// Parameterize different sized types
+interface VertexInfo {
+ wgslType: string;
+ sizeInBytes: number;
+ validationFunc: string;
+}
+
+const typeInfoMap: { [k: string]: VertexInfo } = {
+ float32: {
+ wgslType: 'f32',
+ sizeInBytes: 4,
+ validationFunc: 'return valid(v);',
+ },
+ float32x2: {
+ wgslType: 'vec2<f32>',
+ sizeInBytes: 8,
+ validationFunc: 'return valid(v.x) && valid(v.y);',
+ },
+ float32x3: {
+ wgslType: 'vec3<f32>',
+ sizeInBytes: 12,
+ validationFunc: 'return valid(v.x) && valid(v.y) && valid(v.z);',
+ },
+ float32x4: {
+ wgslType: 'vec4<f32>',
+ sizeInBytes: 16,
+ validationFunc: `return (valid(v.x) && valid(v.y) && valid(v.z) && valid(v.w)) ||
+ (v.x == 0.0 && v.y == 0.0 && v.z == 0.0 && (v.w == 0.0 || v.w == 1.0));`,
+ },
+};
+
+class F extends TextureTestMixin(GPUTest) {
+ generateBufferContents(
+ numVertices: number,
+ attributesPerBuffer: number,
+ typeInfo: VertexInfo,
+ arbitraryValues: number[],
+ bufferCount: number
+ ): Float32Array[] {
+ // Make an array big enough for the vertices, attributes, and size of each element
+ const vertexArray = new Float32Array(
+ numVertices * attributesPerBuffer * (typeInfo.sizeInBytes / 4)
+ );
+
+ for (let i = 0; i < vertexArray.length; ++i) {
+ vertexArray[i] = arbitraryValues[i % arbitraryValues.length];
+ }
+
+ // Only the first buffer is instance step mode, all others are vertex step mode buffer
+ assert(bufferCount >= 2);
+ const bufferContents: Float32Array[] = [];
+ for (let i = 0; i < bufferCount; i++) {
+ bufferContents.push(vertexArray);
+ }
+
+ return bufferContents;
+ }
+
+ generateVertexBufferDescriptors(
+ bufferCount: number,
+ attributesPerBuffer: number,
+ format: GPUVertexFormat
+ ) {
+ const typeInfo = typeInfoMap[format];
+ // Vertex buffer descriptors
+ const buffers: GPUVertexBufferLayout[] = [];
+ {
+ let currAttribute = 0;
+ for (let i = 0; i < bufferCount; i++) {
+ buffers.push({
+ arrayStride: attributesPerBuffer * typeInfo.sizeInBytes,
+ stepMode: i === 0 ? 'instance' : 'vertex',
+ attributes: Array(attributesPerBuffer)
+ .fill(0)
+ .map((_, i) => ({
+ shaderLocation: currAttribute++,
+ offset: i * typeInfo.sizeInBytes,
+ format,
+ })),
+ });
+ }
+ }
+ return buffers;
+ }
+
+ generateVertexShaderCode({
+ bufferCount,
+ attributesPerBuffer,
+ validValues,
+ typeInfo,
+ vertexIndexOffset,
+ numVertices,
+ isIndexed,
+ }: {
+ bufferCount: number;
+ attributesPerBuffer: number;
+ validValues: number[];
+ typeInfo: VertexInfo;
+ vertexIndexOffset: number;
+ numVertices: number;
+ isIndexed: boolean;
+ }): string {
+ // Create layout and attributes listing
+ let layoutStr = 'struct Attributes {';
+ const attributeNames = [];
+ {
+ let currAttribute = 0;
+ for (let i = 0; i < bufferCount; i++) {
+ for (let j = 0; j < attributesPerBuffer; j++) {
+ layoutStr += `@location(${currAttribute}) a_${currAttribute} : ${typeInfo.wgslType},\n`;
+ attributeNames.push(`a_${currAttribute}`);
+ currAttribute++;
+ }
+ }
+ }
+ layoutStr += '};';
+
+ const vertexShaderCode: string = `
+ ${layoutStr}
+
+ fn valid(f : f32) -> bool {
+ return ${validValues.map(v => `f == ${v}.0`).join(' || ')};
+ }
+
+ fn validationFunc(v : ${typeInfo.wgslType}) -> bool {
+ ${typeInfo.validationFunc}
+ }
+
+ @vertex fn main(
+ @builtin(vertex_index) VertexIndex : u32,
+ attributes : Attributes
+ ) -> @builtin(position) vec4<f32> {
+ var attributesInBounds = ${attributeNames
+ .map(a => `validationFunc(attributes.${a})`)
+ .join(' && ')};
+
+ var indexInBoundsCountFromBaseVertex =
+ (VertexIndex >= ${vertexIndexOffset}u &&
+ VertexIndex < ${vertexIndexOffset + numVertices}u);
+ var indexInBounds = VertexIndex == 0u || indexInBoundsCountFromBaseVertex;
+
+ var Position : vec4<f32>;
+ if (attributesInBounds && (${!isIndexed} || indexInBounds)) {
+ // Success case, move the vertex to the right of the viewport to show that at least one case succeed
+ Position = vec4<f32>(0.5, 0.0, 0.0, 1.0);
+ } else {
+ // Failure case, move the vertex to the left of the viewport
+ Position = vec4<f32>(-0.5, 0.0, 0.0, 1.0);
+ }
+ return Position;
+ }`;
+ return vertexShaderCode;
+ }
+
+ createRenderPipeline({
+ bufferCount,
+ attributesPerBuffer,
+ validValues,
+ typeInfo,
+ vertexIndexOffset,
+ numVertices,
+ isIndexed,
+ buffers,
+ }: {
+ bufferCount: number;
+ attributesPerBuffer: number;
+ validValues: number[];
+ typeInfo: VertexInfo;
+ vertexIndexOffset: number;
+ numVertices: number;
+ isIndexed: boolean;
+ buffers: GPUVertexBufferLayout[];
+ }): GPURenderPipeline {
+ const pipeline = this.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: this.device.createShaderModule({
+ code: this.generateVertexShaderCode({
+ bufferCount,
+ attributesPerBuffer,
+ validValues,
+ typeInfo,
+ vertexIndexOffset,
+ numVertices,
+ isIndexed,
+ }),
+ }),
+ entryPoint: 'main',
+ buffers,
+ },
+ fragment: {
+ module: this.device.createShaderModule({
+ code: `
+ @fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(1.0, 0.0, 0.0, 1.0);
+ }`,
+ }),
+ entryPoint: 'main',
+ targets: [{ format: 'rgba8unorm' }],
+ },
+ primitive: { topology: 'point-list' },
+ });
+ return pipeline;
+ }
+
+ doTest({
+ bufferCount,
+ attributesPerBuffer,
+ dataType,
+ validValues,
+ vertexIndexOffset,
+ numVertices,
+ isIndexed,
+ isIndirect,
+ drawCall,
+ }: {
+ bufferCount: number;
+ attributesPerBuffer: number;
+ dataType: GPUVertexFormat;
+ validValues: number[];
+ vertexIndexOffset: number;
+ numVertices: number;
+ isIndexed: boolean;
+ isIndirect: boolean;
+ drawCall: DrawCall;
+ }): void {
+ // Vertex buffer descriptors
+ const buffers: GPUVertexBufferLayout[] = this.generateVertexBufferDescriptors(
+ bufferCount,
+ attributesPerBuffer,
+ dataType
+ );
+
+ // Pipeline setup, texture setup
+ const pipeline = this.createRenderPipeline({
+ bufferCount,
+ attributesPerBuffer,
+ validValues,
+ typeInfo: typeInfoMap[dataType],
+ vertexIndexOffset,
+ numVertices,
+ isIndexed,
+ buffers,
+ });
+
+ const colorAttachment = this.device.createTexture({
+ format: 'rgba8unorm',
+ size: { width: 2, height: 1, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ const colorAttachmentView = colorAttachment.createView();
+
+ const encoder = this.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachmentView,
+ storeOp: 'store',
+ clearValue: { r: 0.0, g: 1.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ },
+ ],
+ });
+ pass.setPipeline(pipeline);
+
+ // Run the draw variant
+ drawCall.insertInto(pass, isIndexed, isIndirect);
+
+ pass.end();
+ this.device.queue.submit([encoder.finish()]);
+
+ // Validate we see green on the left pixel, showing that no failure case is detected
+ this.expectSinglePixelComparisonsAreOkInTexture({ texture: colorAttachment }, [
+ { coord: { x: 0, y: 0 }, exp: new Uint8Array([0x00, 0xff, 0x00, 0xff]) },
+ ]);
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('vertex_buffer_access')
+ .params(
+ u =>
+ u
+ .combineWithParams([
+ { indexed: false, indirect: true },
+ { indexed: true, indirect: false },
+ { indexed: true, indirect: true },
+ ])
+ .expand('drawCallTestParameter', function* (p) {
+ if (p.indexed) {
+ yield* ['baseVertex', 'vertexCountInIndexBuffer'] as const;
+ if (p.indirect) {
+ yield* ['indexCount', 'instanceCount', 'firstIndex'] as const;
+ }
+ } else if (p.indirect) {
+ yield* ['vertexCount', 'instanceCount', 'firstVertex'] as const;
+ }
+ })
+ .combine('type', Object.keys(typeInfoMap) as GPUVertexFormat[])
+ .combine('additionalBuffers', [0, 4])
+ .combine('partialLastNumber', [false, true])
+ .combine('offsetVertexBuffer', [false, true])
+ .combine('errorScale', [0, 1, 4, 10 ** 2, 10 ** 4, 10 ** 6])
+ .unless(p => p.drawCallTestParameter === 'instanceCount' && p.errorScale > 10 ** 4) // To avoid timeout
+ )
+ .fn(t => {
+ const p = t.params;
+ const typeInfo = typeInfoMap[p.type];
+
+ // Number of vertices to draw
+ const numVertices = 4;
+ // Each buffer is bound to this many attributes (2 would mean 2 attributes per buffer)
+ const attributesPerBuffer = 2;
+ // Some arbitrary values to fill our buffer with to avoid collisions with other tests
+ const arbitraryValues = [990, 685, 446, 175];
+
+ // A valid value is 0 or one in the buffer
+ const validValues =
+ p.errorScale === 0 && !p.offsetVertexBuffer && !p.partialLastNumber
+ ? arbitraryValues // Control case with no OOB access, must read back valid values in buffer
+ : [0, ...arbitraryValues]; // Testing case with OOB access, can be 0 for OOB data
+
+ // Generate vertex buffer contents. Only the first buffer is instance step mode, all others are vertex step mode
+ const bufferCount = p.additionalBuffers + 2; // At least one instance step mode and one vertex step mode buffer
+ const bufferContents = t.generateBufferContents(
+ numVertices,
+ attributesPerBuffer,
+ typeInfo,
+ arbitraryValues,
+ bufferCount
+ );
+
+ // Mutable draw call
+ const draw = new DrawCall({
+ test: t,
+ vertexArrays: bufferContents,
+ vertexCount: numVertices,
+ partialLastNumber: p.partialLastNumber,
+ offsetVertexBuffer: p.offsetVertexBuffer,
+ keepInstanceStepModeBufferInRange: p.indexed && !p.indirect, // keep instance step mode buffer in range for drawIndexed
+ });
+
+ // Offset the draw call parameter we are testing by |errorScale|
+ draw[p.drawCallTestParameter] += p.errorScale;
+ // Offset the range checks for gl_VertexIndex in the shader if we use BaseVertex
+ let vertexIndexOffset = 0;
+ if (p.drawCallTestParameter === 'baseVertex') {
+ vertexIndexOffset += p.errorScale;
+ }
+
+ t.doTest({
+ bufferCount,
+ attributesPerBuffer,
+ dataType: p.type,
+ validValues,
+ vertexIndexOffset,
+ numVertices,
+ isIndexed: p.indexed,
+ isIndirect: p.indirect,
+ drawCall: draw,
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/shader_io/compute_builtins.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/shader_io/compute_builtins.spec.ts
new file mode 100644
index 0000000000..fcf3159c64
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/shader_io/compute_builtins.spec.ts
@@ -0,0 +1,297 @@
+export const description = `Test compute shader builtin variables`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { iterRange } from '../../../../common/util/util.js';
+import { GPUTest } from '../../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// Test that the values for each input builtin are correct.
+g.test('inputs')
+ .desc(`Test compute shader builtin inputs values`)
+ .params(u =>
+ u
+ .combine('method', ['param', 'struct', 'mixed'] as const)
+ .combine('dispatch', ['direct', 'indirect'] as const)
+ .combineWithParams([
+ {
+ groupSize: { x: 1, y: 1, z: 1 },
+ numGroups: { x: 1, y: 1, z: 1 },
+ },
+ {
+ groupSize: { x: 8, y: 4, z: 2 },
+ numGroups: { x: 1, y: 1, z: 1 },
+ },
+ {
+ groupSize: { x: 1, y: 1, z: 1 },
+ numGroups: { x: 8, y: 4, z: 2 },
+ },
+ {
+ groupSize: { x: 3, y: 7, z: 5 },
+ numGroups: { x: 13, y: 9, z: 11 },
+ },
+ ] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ const invocationsPerGroup = t.params.groupSize.x * t.params.groupSize.y * t.params.groupSize.z;
+ const totalInvocations =
+ invocationsPerGroup * t.params.numGroups.x * t.params.numGroups.y * t.params.numGroups.z;
+
+ // Generate the structures, parameters, and builtin expressions used in the shader.
+ let params = '';
+ let structures = '';
+ let local_id = '';
+ let local_index = '';
+ let global_id = '';
+ let group_id = '';
+ let num_groups = '';
+ switch (t.params.method) {
+ case 'param':
+ params = `
+ @builtin(local_invocation_id) local_id : vec3<u32>,
+ @builtin(local_invocation_index) local_index : u32,
+ @builtin(global_invocation_id) global_id : vec3<u32>,
+ @builtin(workgroup_id) group_id : vec3<u32>,
+ @builtin(num_workgroups) num_groups : vec3<u32>,
+ `;
+ local_id = 'local_id';
+ local_index = 'local_index';
+ global_id = 'global_id';
+ group_id = 'group_id';
+ num_groups = 'num_groups';
+ break;
+ case 'struct':
+ structures = `struct Inputs {
+ @builtin(local_invocation_id) local_id : vec3<u32>,
+ @builtin(local_invocation_index) local_index : u32,
+ @builtin(global_invocation_id) global_id : vec3<u32>,
+ @builtin(workgroup_id) group_id : vec3<u32>,
+ @builtin(num_workgroups) num_groups : vec3<u32>,
+ };`;
+ params = `inputs : Inputs`;
+ local_id = 'inputs.local_id';
+ local_index = 'inputs.local_index';
+ global_id = 'inputs.global_id';
+ group_id = 'inputs.group_id';
+ num_groups = 'inputs.num_groups';
+ break;
+ case 'mixed':
+ structures = `struct InputsA {
+ @builtin(local_invocation_index) local_index : u32,
+ @builtin(global_invocation_id) global_id : vec3<u32>,
+ };
+ struct InputsB {
+ @builtin(workgroup_id) group_id : vec3<u32>
+ };`;
+ params = `@builtin(local_invocation_id) local_id : vec3<u32>,
+ inputsA : InputsA,
+ inputsB : InputsB,
+ @builtin(num_workgroups) num_groups : vec3<u32>,`;
+ local_id = 'local_id';
+ local_index = 'inputsA.local_index';
+ global_id = 'inputsA.global_id';
+ group_id = 'inputsB.group_id';
+ num_groups = 'num_groups';
+ break;
+ }
+
+ // WGSL shader that stores every builtin value to a buffer, for every invocation in the grid.
+ const wgsl = `
+ struct S {
+ data : array<u32>
+ };
+ struct V {
+ data : array<vec3<u32>>
+ };
+ @group(0) @binding(0) var<storage, read_write> local_id_out : V;
+ @group(0) @binding(1) var<storage, read_write> local_index_out : S;
+ @group(0) @binding(2) var<storage, read_write> global_id_out : V;
+ @group(0) @binding(3) var<storage, read_write> group_id_out : V;
+ @group(0) @binding(4) var<storage, read_write> num_groups_out : V;
+
+ ${structures}
+
+ const group_width = ${t.params.groupSize.x}u;
+ const group_height = ${t.params.groupSize.y}u;
+ const group_depth = ${t.params.groupSize.z}u;
+
+ @compute @workgroup_size(group_width, group_height, group_depth)
+ fn main(
+ ${params}
+ ) {
+ let group_index = ((${group_id}.z * ${num_groups}.y) + ${group_id}.y) * ${num_groups}.x + ${group_id}.x;
+ let global_index = group_index * ${invocationsPerGroup}u + ${local_index};
+ local_id_out.data[global_index] = ${local_id};
+ local_index_out.data[global_index] = ${local_index};
+ global_id_out.data[global_index] = ${global_id};
+ group_id_out.data[global_index] = ${group_id};
+ num_groups_out.data[global_index] = ${num_groups};
+ }
+ `;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({
+ code: wgsl,
+ }),
+ entryPoint: 'main',
+ },
+ });
+
+ // Helper to create a `size`-byte buffer with binding number `binding`.
+ function createBuffer(size: number, binding: number) {
+ const buffer = t.device.createBuffer({
+ size,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(buffer);
+
+ bindGroupEntries.push({
+ binding,
+ resource: {
+ buffer,
+ },
+ });
+
+ return buffer;
+ }
+
+ // Create the output buffers.
+ const bindGroupEntries: GPUBindGroupEntry[] = [];
+ const localIdBuffer = createBuffer(totalInvocations * 16, 0);
+ const localIndexBuffer = createBuffer(totalInvocations * 4, 1);
+ const globalIdBuffer = createBuffer(totalInvocations * 16, 2);
+ const groupIdBuffer = createBuffer(totalInvocations * 16, 3);
+ const numGroupsBuffer = createBuffer(totalInvocations * 16, 4);
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: bindGroupEntries,
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ switch (t.params.dispatch) {
+ case 'direct':
+ pass.dispatchWorkgroups(t.params.numGroups.x, t.params.numGroups.y, t.params.numGroups.z);
+ break;
+ case 'indirect': {
+ const dispatchBuffer = t.device.createBuffer({
+ size: 3 * Uint32Array.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.INDIRECT,
+ mappedAtCreation: true,
+ });
+ t.trackForCleanup(dispatchBuffer);
+ const dispatchData = new Uint32Array(dispatchBuffer.getMappedRange());
+ dispatchData[0] = t.params.numGroups.x;
+ dispatchData[1] = t.params.numGroups.y;
+ dispatchData[2] = t.params.numGroups.z;
+ dispatchBuffer.unmap();
+ pass.dispatchWorkgroupsIndirect(dispatchBuffer, 0);
+ break;
+ }
+ }
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ type vec3 = { x: number; y: number; z: number };
+
+ // Helper to check that the vec3<u32> value at each index of the provided `output` buffer
+ // matches the expected value for that invocation, as generated by the `getBuiltinValue`
+ // function. The `name` parameter is the builtin name, used for error messages.
+ const checkEachIndex = (
+ output: Uint32Array,
+ name: string,
+ getBuiltinValue: (groupId: vec3, localId: vec3) => vec3
+ ) => {
+ // Loop over workgroups.
+ for (let gz = 0; gz < t.params.numGroups.z; gz++) {
+ for (let gy = 0; gy < t.params.numGroups.y; gy++) {
+ for (let gx = 0; gx < t.params.numGroups.x; gx++) {
+ // Loop over invocations within a group.
+ for (let lz = 0; lz < t.params.groupSize.z; lz++) {
+ for (let ly = 0; ly < t.params.groupSize.y; ly++) {
+ for (let lx = 0; lx < t.params.groupSize.x; lx++) {
+ const groupIndex = (gz * t.params.numGroups.y + gy) * t.params.numGroups.x + gx;
+ const localIndex = (lz * t.params.groupSize.y + ly) * t.params.groupSize.x + lx;
+ const globalIndex = groupIndex * invocationsPerGroup + localIndex;
+ const expected = getBuiltinValue(
+ { x: gx, y: gy, z: gz },
+ { x: lx, y: ly, z: lz }
+ );
+ if (output[globalIndex * 4 + 0] !== expected.x) {
+ return new Error(
+ `${name}.x failed at group(${gx},${gy},${gz}) local(${lx},${ly},${lz}))\n` +
+ ` expected: ${expected.x}\n` +
+ ` got: ${output[globalIndex * 4 + 0]}`
+ );
+ }
+ if (output[globalIndex * 4 + 1] !== expected.y) {
+ return new Error(
+ `${name}.y failed at group(${gx},${gy},${gz}) local(${lx},${ly},${lz}))\n` +
+ ` expected: ${expected.y}\n` +
+ ` got: ${output[globalIndex * 4 + 1]}`
+ );
+ }
+ if (output[globalIndex * 4 + 2] !== expected.z) {
+ return new Error(
+ `${name}.z failed at group(${gx},${gy},${gz}) local(${lx},${ly},${lz}))\n` +
+ ` expected: ${expected.z}\n` +
+ ` got: ${output[globalIndex * 4 + 2]}`
+ );
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return undefined;
+ };
+
+ // Check @builtin(local_invocation_index) values.
+ t.expectGPUBufferValuesEqual(
+ localIndexBuffer,
+ new Uint32Array([...iterRange(totalInvocations, x => x % invocationsPerGroup)])
+ );
+
+ // Check @builtin(local_invocation_id) values.
+ t.expectGPUBufferValuesPassCheck(
+ localIdBuffer,
+ outputData => checkEachIndex(outputData, 'local_invocation_id', (_, localId) => localId),
+ { type: Uint32Array, typedLength: totalInvocations * 4 }
+ );
+
+ // Check @builtin(global_invocation_id) values.
+ const getGlobalId = (groupId: vec3, localId: vec3) => {
+ return {
+ x: groupId.x * t.params.groupSize.x + localId.x,
+ y: groupId.y * t.params.groupSize.y + localId.y,
+ z: groupId.z * t.params.groupSize.z + localId.z,
+ };
+ };
+ t.expectGPUBufferValuesPassCheck(
+ globalIdBuffer,
+ outputData => checkEachIndex(outputData, 'global_invocation_id', getGlobalId),
+ { type: Uint32Array, typedLength: totalInvocations * 4 }
+ );
+
+ // Check @builtin(workgroup_id) values.
+ t.expectGPUBufferValuesPassCheck(
+ groupIdBuffer,
+ outputData => checkEachIndex(outputData, 'workgroup_id', (groupId, _) => groupId),
+ { type: Uint32Array, typedLength: totalInvocations * 4 }
+ );
+
+ // Check @builtin(num_workgroups) values.
+ t.expectGPUBufferValuesPassCheck(
+ numGroupsBuffer,
+ outputData => checkEachIndex(outputData, 'num_workgroups', () => t.params.numGroups),
+ { type: Uint32Array, typedLength: totalInvocations * 4 }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/shader_io/shared_structs.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/shader_io/shared_structs.spec.ts
new file mode 100644
index 0000000000..65f53eaa8d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/shader_io/shared_structs.spec.ts
@@ -0,0 +1,332 @@
+export const description = `Test the shared use of structures containing entry point IO attributes`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { GPUTest, TextureTestMixin } from '../../../gpu_test.js';
+import { checkElementsEqual } from '../../../util/check_contents.js';
+
+export const g = makeTestGroup(TextureTestMixin(GPUTest));
+
+g.test('shared_with_buffer')
+ .desc(
+ `Test sharing an entry point IO struct with a buffer.
+
+ This test defines a structure that contains both builtin attributes and layout attributes,
+ and uses that structure as both an entry point input and the store type of a storage buffer.
+ The builtin attributes should be ignored when used for the storage buffer, and the layout
+ attributes should be ignored when used as an entry point IO parameter.
+ `
+ )
+ .fn(t => {
+ // Set the dispatch parameters such that we get some interesting (non-zero) built-in variables.
+ const wgsize = new Uint32Array([8, 4, 2]);
+ const numGroups = new Uint32Array([4, 2, 8]);
+
+ // Pick a single invocation to copy the input structure to the output buffer.
+ const targetLocalIndex = 13;
+ const targetGroup = new Uint32Array([2, 1, 5]);
+
+ // The test shader defines a structure that contains members decorated with built-in variable
+ // attributes, and also layout attributes for the storage buffer.
+ const wgsl = `
+ struct S {
+ /* byte offset: 0 */ @size(32) @builtin(workgroup_id) group_id : vec3<u32>,
+ /* byte offset: 32 */ @builtin(local_invocation_index) local_index : u32,
+ /* byte offset: 64 */ @align(64) @builtin(num_workgroups) numGroups : vec3<u32>,
+ };
+
+ @group(0) @binding(0)
+ var<storage, read_write> outputs : S;
+
+ @compute @workgroup_size(${wgsize[0]}, ${wgsize[1]}, ${wgsize[2]})
+ fn main(inputs : S) {
+ if (inputs.group_id.x == ${targetGroup[0]}u &&
+ inputs.group_id.y == ${targetGroup[1]}u &&
+ inputs.group_id.z == ${targetGroup[2]}u &&
+ inputs.local_index == ${targetLocalIndex}u) {
+ outputs = inputs;
+ }
+ }
+ `;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ // Allocate a buffer to hold the output structure.
+ const bufferNumElements = 32;
+ const outputBuffer = t.device.createBuffer({
+ size: bufferNumElements * Uint32Array.BYTES_PER_ELEMENT,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer: outputBuffer } }],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(numGroups[0], numGroups[1], numGroups[2]);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // Check the output values.
+ const checkOutput = (outputs: Uint32Array) => {
+ if (checkElementsEqual(outputs.slice(0, 3), targetGroup)) {
+ return new Error(
+ `group_id comparison failed\n` +
+ ` expected: ${targetGroup}\n` +
+ ` got: ${outputs.slice(0, 3)}`
+ );
+ }
+ if (outputs[8] !== targetLocalIndex) {
+ return new Error(
+ `local_index comparison failed\n` +
+ ` expected: ${targetLocalIndex}\n` +
+ ` got: ${outputs[8]}`
+ );
+ }
+ if (checkElementsEqual(outputs.slice(16, 19), numGroups)) {
+ return new Error(
+ `numGroups comparison failed\n` +
+ ` expected: ${numGroups}\n` +
+ ` got: ${outputs.slice(16, 19)}`
+ );
+ }
+ return undefined;
+ };
+ t.expectGPUBufferValuesPassCheck(outputBuffer, outputData => checkOutput(outputData), {
+ type: Uint32Array,
+ typedLength: bufferNumElements,
+ });
+ });
+
+g.test('shared_between_stages')
+ .desc(
+ `Test sharing an entry point IO struct between different pipeline stages.
+
+ This test defines an entry point IO structure, and uses it as both the output of a vertex
+ shader and the input to a fragment shader.
+ `
+ )
+ .fn(t => {
+ const size = [31, 31];
+ const wgsl = `
+ struct Interface {
+ @builtin(position) position : vec4<f32>,
+ @location(0) color : f32,
+ };
+
+ var<private> vertices : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
+ vec2<f32>(-0.7, -0.7),
+ vec2<f32>( 0.0, 0.7),
+ vec2<f32>( 0.7, -0.7),
+ );
+
+ @vertex
+ fn vert_main(@builtin(vertex_index) index : u32) -> Interface {
+ return Interface(vec4<f32>(vertices[index], 0.0, 1.0), 1.0);
+ }
+
+ @fragment
+ fn frag_main(inputs : Interface) -> @location(0) vec4<f32> {
+ // Toggle red vs green based on the x position.
+ var color = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ if (inputs.position.x > f32(${size[0] / 2})) {
+ color.r = inputs.color;
+ } else {
+ color.g = inputs.color;
+ }
+ return color;
+ }
+ `;
+
+ // Set up the render pipeline.
+ const module = t.device.createShaderModule({ code: wgsl });
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vert_main',
+ },
+ fragment: {
+ module,
+ entryPoint: 'frag_main',
+ targets: [
+ {
+ format: 'rgba8unorm',
+ },
+ ],
+ },
+ });
+
+ // Draw a red triangle.
+ const renderTarget = t.device.createTexture({
+ size,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ format: 'rgba8unorm',
+ });
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.setPipeline(pipeline);
+ pass.draw(3);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // Test a few points to make sure we rendered a half-red/half-green triangle.
+ const redPixel = new Uint8Array([255, 0, 0, 255]);
+ const greenPixel = new Uint8Array([0, 255, 0, 255]);
+ const blackPixel = new Uint8Array([0, 0, 0, 0]);
+ t.expectSinglePixelComparisonsAreOkInTexture({ texture: renderTarget }, [
+ // Red pixels
+ { coord: { x: 16, y: 15 }, exp: redPixel },
+ { coord: { x: 16, y: 8 }, exp: redPixel },
+ { coord: { x: 22, y: 20 }, exp: redPixel },
+ // Green pixels
+ { coord: { x: 14, y: 15 }, exp: greenPixel },
+ { coord: { x: 14, y: 8 }, exp: greenPixel },
+ { coord: { x: 8, y: 20 }, exp: greenPixel },
+ // Black pixels
+ { coord: { x: 2, y: 2 }, exp: blackPixel },
+ { coord: { x: 2, y: 28 }, exp: blackPixel },
+ { coord: { x: 28, y: 2 }, exp: blackPixel },
+ { coord: { x: 28, y: 28 }, exp: blackPixel },
+ ]);
+ });
+
+g.test('shared_with_non_entry_point_function')
+ .desc(
+ `Test sharing an entry point IO struct with a non entry point function.
+
+ This test defines structures that contain builtin and location attributes, and uses those
+ structures as parameter and return types for entry point functions and regular functions.
+ `
+ )
+ .fn(t => {
+ // The test shader defines structures that contain members decorated with built-in variable
+ // attributes and user-defined IO. These structures are passed to and returned from regular
+ // functions.
+ const wgsl = `
+ struct Inputs {
+ @builtin(vertex_index) index : u32,
+ @location(0) color : vec4<f32>,
+ };
+ struct Outputs {
+ @builtin(position) position : vec4<f32>,
+ @location(0) color : vec4<f32>,
+ };
+
+ var<private> vertices : array<vec2<f32>, 3> = array<vec2<f32>, 3>(
+ vec2<f32>(-0.7, -0.7),
+ vec2<f32>( 0.0, 0.7),
+ vec2<f32>( 0.7, -0.7),
+ );
+
+ fn process(in : Inputs) -> Outputs {
+ var out : Outputs;
+ out.position = vec4<f32>(vertices[in.index], 0.0, 1.0);
+ out.color = in.color;
+ return out;
+ }
+
+ @vertex
+ fn vert_main(inputs : Inputs) -> Outputs {
+ return process(inputs);
+ }
+
+ @fragment
+ fn frag_main(@location(0) color : vec4<f32>) -> @location(0) vec4<f32> {
+ return color;
+ }
+ `;
+
+ // Set up the render pipeline.
+ const module = t.device.createShaderModule({ code: wgsl });
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vert_main',
+ buffers: [
+ {
+ attributes: [
+ {
+ shaderLocation: 0,
+ format: 'float32x4',
+ offset: 0,
+ },
+ ],
+ arrayStride: 4 * Float32Array.BYTES_PER_ELEMENT,
+ },
+ ],
+ },
+ fragment: {
+ module,
+ entryPoint: 'frag_main',
+ targets: [
+ {
+ format: 'rgba8unorm',
+ },
+ ],
+ },
+ });
+
+ // Draw a triangle.
+ // The vertex buffer contains the vertex colors (all red).
+ const vertexBuffer = t.makeBufferWithContents(
+ new Float32Array([1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0]),
+ GPUBufferUsage.VERTEX
+ );
+ const renderTarget = t.device.createTexture({
+ size: [31, 31],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ format: 'rgba8unorm',
+ });
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ clearValue: [0, 0, 0, 0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.setPipeline(pipeline);
+ pass.setVertexBuffer(0, vertexBuffer);
+ pass.draw(3);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // Test a few points to make sure we rendered a red triangle.
+ const redPixel = new Uint8Array([255, 0, 0, 255]);
+ const blackPixel = new Uint8Array([0, 0, 0, 0]);
+ t.expectSinglePixelComparisonsAreOkInTexture({ texture: renderTarget }, [
+ // Red pixels
+ { coord: { x: 15, y: 15 }, exp: redPixel },
+ { coord: { x: 15, y: 8 }, exp: redPixel },
+ { coord: { x: 8, y: 20 }, exp: redPixel },
+ { coord: { x: 22, y: 20 }, exp: redPixel },
+ // Black pixels
+ { coord: { x: 2, y: 2 }, exp: blackPixel },
+ { coord: { x: 2, y: 28 }, exp: blackPixel },
+ { coord: { x: 28, y: 2 }, exp: blackPixel },
+ { coord: { x: 28, y: 28 }, exp: blackPixel },
+ ]);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/shadow.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/shadow.spec.ts
new file mode 100644
index 0000000000..92ec6cca18
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/shadow.spec.ts
@@ -0,0 +1,406 @@
+export const description = `
+Execution Tests for shadowing
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { iterRange } from '../../../common/util/util.js';
+import { GPUTest } from '../../gpu_test.js';
+
+export const g = makeTestGroup(GPUTest);
+
+/**
+ * Run a shader and check that the buffer output matches expectations.
+ *
+ * @param t The test object
+ * @param wgsl The shader source
+ * @param expected The array of expected values after running the shader
+ */
+function runShaderTest(t: GPUTest, wgsl: string, expected: Uint32Array): void {
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ // Allocate a buffer and fill it with 0xdeadbeef words.
+ const outputBuffer = t.makeBufferWithContents(
+ new Uint32Array([...iterRange(expected.length, _i => 0xdeadbeef)]),
+ GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC
+ );
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer: outputBuffer } }],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ // Check that only the non-padding bytes were modified.
+ t.expectGPUBufferValuesEqual(outputBuffer, expected);
+}
+
+g.test('declaration')
+ .desc(`Test that shadowing is handled correctly`)
+ .fn(t => {
+ const wgsl = `
+ struct S {
+ my_var_start: u32,
+ my_var_block_shadow: u32,
+ my_var_unshadow: u32,
+ my_var_param_shadow: u32,
+ my_var_param_reshadow: u32,
+ my_var_after_func: u32,
+
+ my_const_start: u32,
+ my_const_block_shadow: u32,
+ my_const_unshadow: u32,
+ my_const_param_shadow: u32,
+ my_const_param_reshadow: u32,
+ my_const_after_func: u32,
+
+ my_let_block_shadow: u32,
+ my_let_param_reshadow: u32,
+ my_let_after_func: u32,
+
+ my_func_param_shadow: u32,
+ my_func_shadow: u32,
+ }
+ @group(0) @binding(0) var<storage, read_write> buffer : S;
+
+ var<private> my_var: u32 = 1;
+ const my_const: u32 = 100;
+
+ @compute @workgroup_size(1)
+ fn main() {
+ let my_let = 200u;
+
+ buffer.my_var_start = my_var; // 1
+ buffer.my_const_start = my_const; // 100
+
+ {
+ var my_var: u32 = 10;
+ const my_const: u32 = 110;
+
+ buffer.my_var_block_shadow = my_var; // 10
+ buffer.my_const_block_shadow = my_const; // 110
+
+ let my_let = 210u;
+ buffer.my_let_block_shadow = my_let; // 210
+ }
+
+ buffer.my_var_unshadow = my_var; // 1
+ buffer.my_const_unshadow = my_const; // 100
+
+ my_func(20, 120, my_let, 300);
+
+ buffer.my_var_after_func = my_var; // 1
+ buffer.my_const_after_func = my_const; // 100
+ buffer.my_let_after_func = my_let; // 200;
+ };
+
+ // Note, defined after |main|
+ fn my_func(my_var: u32, my_const: u32, my_let: u32, my_func: u32) {
+ buffer.my_var_param_shadow = my_var; // 20
+ buffer.my_const_param_shadow = my_const; // 120
+
+ buffer.my_func_param_shadow = my_func; // 300
+
+ // Need block here because of scoping rules for parameters
+ {
+ var my_var = 30u;
+ const my_const = 130u;
+
+ buffer.my_var_param_reshadow = my_var; // 30
+ buffer.my_const_param_reshadow = my_const; // 130
+
+ let my_let = 220u;
+ buffer.my_let_param_reshadow = my_let; // 220
+
+ let my_func: u32 = 310;
+ buffer.my_func_shadow = my_func; // 310
+ }
+ }
+ `;
+ runShaderTest(
+ t,
+ wgsl,
+ new Uint32Array([
+ // my_var
+ 1, // my_var_start
+ 10, // my_var_block_shadow
+ 1, // my_var_unshadow
+ 20, // my_var_param_shadow
+ 30, // my_var_param_reshadow
+ 1, // my_var_after_func
+ // my_const
+ 100, // my_const_start
+ 110, // my_const_block_shadow
+ 100, // my_const_unshadow
+ 120, // my_const_param_shadow
+ 130, // my_const_param_reshadow
+ 100, // my_const_after_func
+ // my_let
+ 210, // my_let_block_shadow
+ 220, // my_let_param_reshadow
+ 200, // my_let_after_func
+ // my_func
+ 300, // my_func_param_shadow
+ 310, // my_func_shadow
+ ])
+ );
+ });
+
+g.test('builtin')
+ .desc(`Test that shadowing a builtin name is handled correctly`)
+ .fn(t => {
+ const wgsl = `
+ struct S {
+ my_max_shadow: u32,
+ max_call: u32,
+ }
+ @group(0) @binding(0) var<storage, read_write> buffer : S;
+
+ @compute @workgroup_size(1)
+ fn main() {
+ let max = 400u;
+ buffer.my_max_shadow = max;
+
+ my_func();
+ };
+
+ fn my_func() {
+ buffer.max_call = max(310u, 410u);
+ }
+ `;
+ runShaderTest(
+ t,
+ wgsl,
+ new Uint32Array([
+ // my_max
+ 400, // my_max_shadow
+ 410, // max_call
+ ])
+ );
+ });
+
+g.test('for_loop')
+ .desc(`Test that shadowing is handled correctly with for loops`)
+ .fn(t => {
+ const wgsl = `
+ struct S {
+ my_idx_before: u32,
+ my_idx_loop: array<u32, 2>,
+ my_idx_after: u32,
+ }
+ @group(0) @binding(0) var<storage, read_write> buffer : S;
+
+ @compute @workgroup_size(1)
+ fn main() {
+ var my_idx = 500u;
+ buffer.my_idx_before = my_idx; // 500;
+ for (var my_idx = 0u; my_idx < 2u; my_idx++) {
+ let pos = my_idx;
+ var my_idx = 501u + my_idx;
+ buffer.my_idx_loop[pos] = my_idx; // 501, 502
+ }
+ buffer.my_idx_after = my_idx; // 500;
+ };
+ `;
+ runShaderTest(
+ t,
+ wgsl,
+ new Uint32Array([
+ 500, // my_idx_before
+ 501, // my_idx_loop[0]
+ 502, // my_idx_loop[1]
+ 500, // my_idx_after
+ ])
+ );
+ });
+
+g.test('while')
+ .desc(`Test that shadowing is handled correctly with while loops`)
+ .fn(t => {
+ const wgsl = `
+ struct S {
+ my_idx_before: u32,
+ my_idx_loop: array<u32, 2>,
+ my_idx_after: u32,
+ }
+ @group(0) @binding(0) var<storage, read_write> buffer : S;
+
+ @compute @workgroup_size(1)
+ fn main() {
+ var my_idx = 0u;
+ buffer.my_idx_before = my_idx; // 0;
+
+ var counter = 0u;
+ while (counter < 2) {
+ var my_idx = 500u + counter;
+ buffer.my_idx_loop[counter] = my_idx; // 500, 501
+
+ counter += 1;
+ }
+
+ buffer.my_idx_after = my_idx; // 1;
+ };
+ `;
+ runShaderTest(
+ t,
+ wgsl,
+ new Uint32Array([
+ 0, // my_idx_before
+ 500, // my_idx_loop[0]
+ 501, // my_idx_loop[1]
+ 0, // my_idx_after
+ ])
+ );
+ });
+
+g.test('loop')
+ .desc(`Test that shadowing is handled correctly with loops`)
+ .fn(t => {
+ const wgsl = `
+ struct S {
+ my_idx_before: u32,
+ my_idx_loop: array<u32, 2>,
+ my_idx_continuing: array<u32, 2>,
+ my_idx_after: u32,
+ }
+ @group(0) @binding(0) var<storage, read_write> buffer : S;
+
+ @compute @workgroup_size(1)
+ fn main() {
+ var my_idx = 0u;
+ buffer.my_idx_before = my_idx; // 0;
+
+ var counter = 0u;
+ loop {
+ var my_idx = 500u + counter;
+ buffer.my_idx_loop[counter] = my_idx; // 500, 501
+
+
+ continuing {
+ var my_idx = 600u + counter;
+ buffer.my_idx_continuing[counter] = my_idx; // 600, 601
+
+ counter += 1;
+ break if counter == 2;
+ }
+ }
+ buffer.my_idx_after = my_idx; // 1;
+ };
+ `;
+ runShaderTest(
+ t,
+ wgsl,
+ new Uint32Array([
+ 0, // my_idx_before
+ 500, // my_idx_loop[0]
+ 501, // my_idx_loop[1]
+ 600, // my_idx_continuing[0]
+ 601, // my_idx_continuing[1]
+ 0, // my_idx_after
+ ])
+ );
+ });
+
+g.test('switch')
+ .desc(`Test that shadowing is handled correctly with a switch`)
+ .fn(t => {
+ const wgsl = `
+ struct S {
+ my_idx_before: u32,
+ my_idx_case: u32,
+ my_idx_default: u32,
+ my_idx_after: u32,
+ }
+ @group(0) @binding(0) var<storage, read_write> buffer : S;
+
+ @compute @workgroup_size(1)
+ fn main() {
+ var my_idx = 0u;
+ buffer.my_idx_before = my_idx; // 0;
+
+ for (var i = 0; i < 2; i++) {
+ switch (i) {
+ case 0: {
+ var my_idx = 10u;
+ buffer.my_idx_case = my_idx; // 10
+ }
+ default: {
+ var my_idx = 20u;
+ buffer.my_idx_default = my_idx; // 20
+ }
+ }
+ }
+
+ buffer.my_idx_after = my_idx; // 1;
+ };
+ `;
+ runShaderTest(
+ t,
+ wgsl,
+ new Uint32Array([
+ 0, // my_idx_before
+ 10, // my_idx_case
+ 20, // my_idx_default
+ 0, // my_idx_after
+ ])
+ );
+ });
+
+g.test('if')
+ .desc(`Test that shadowing is handled correctly with a switch`)
+ .fn(t => {
+ const wgsl = `
+ struct S {
+ my_idx_before: u32,
+ my_idx_if: u32,
+ my_idx_elseif: u32,
+ my_idx_else: u32,
+ my_idx_after: u32,
+ }
+ @group(0) @binding(0) var<storage, read_write> buffer : S;
+
+ @compute @workgroup_size(1)
+ fn main() {
+ var my_idx = 0u;
+ buffer.my_idx_before = my_idx; // 0;
+
+ for (var i = 0; i < 3; i++) {
+ if i == 0 {
+ var my_idx = 10u;
+ buffer.my_idx_if = my_idx; // 10
+ } else if i == 1 {
+ var my_idx = 20u;
+ buffer.my_idx_elseif = my_idx; // 20
+ } else {
+ var my_idx = 30u;
+ buffer.my_idx_else = my_idx; // 30
+ }
+ }
+
+ buffer.my_idx_after = my_idx; // 1;
+ };
+ `;
+ runShaderTest(
+ t,
+ wgsl,
+ new Uint32Array([
+ 0, // my_idx_before
+ 10, // my_idx_if
+ 20, // my_idx_elseif
+ 30, // my_idx_else
+ 0, // my_idx_after
+ ])
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/statement/increment_decrement.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/statement/increment_decrement.spec.ts
new file mode 100644
index 0000000000..fc0f60b15d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/statement/increment_decrement.spec.ts
@@ -0,0 +1,381 @@
+export const description = `
+Increment and decrement statement tests.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { TypedArrayBufferView } from '../../../../common/util/util.js';
+import { GPUTest } from '../../../gpu_test.js';
+import { kValue } from '../../../util/constants.js';
+
+export const g = makeTestGroup(GPUTest);
+
+/**
+ * Builds, runs then checks the output of a statement shader test.
+ *
+ * @param t The test object
+ * @param builder The shader builder function that takes a
+ * StatementTestBuilder as the single argument, and returns either a WGSL
+ * string which is embedded into the WGSL entrypoint function, or a structure
+ * with entrypoint-scoped WGSL code and extra module-scope WGSL code.
+ */
+export function runStatementTest(
+ t: GPUTest,
+ fmt: string,
+ values: TypedArrayBufferView,
+ wgsl_main: string
+) {
+ const wgsl = `
+struct Outputs {
+ data : array<${fmt}>,
+};
+var<private> count: u32 = 0;
+
+@group(0) @binding(1) var<storage, read_write> outputs : Outputs;
+
+fn push_output(value : ${fmt}) {
+ outputs.data[count] = value;
+ count += 1;
+}
+
+@compute @workgroup_size(1)
+fn main() {
+ _ = &outputs;
+ ${wgsl_main}
+}
+`;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({ code: wgsl }),
+ entryPoint: 'main',
+ },
+ });
+
+ const maxOutputValues = 1000;
+ const outputBuffer = t.device.createBuffer({
+ size: 4 * (1 + maxOutputValues),
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 1, resource: { buffer: outputBuffer } }],
+ });
+
+ // Run the shader.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+
+ t.expectGPUBufferValuesEqual(outputBuffer, values);
+}
+
+g.test('scalar_i32_increment')
+ .desc('Tests increment of scalar i32 values')
+ .fn(t => {
+ runStatementTest(
+ t,
+ 'i32',
+ new Int32Array([-9, 11, kValue.i32.negative.min + 1, kValue.i32.positive.max, 1]),
+ `
+ var a: i32 = -10;
+ var b: i32 = 10;
+ var c: i32 = ${kValue.i32.negative.min};
+ var d: i32 = ${kValue.i32.positive.max - 1};
+ var e: i32 = 0;
+
+ a++;
+ b++;
+ c++;
+ d++;
+ e++;
+
+ push_output(a);
+ push_output(b);
+ push_output(c);
+ push_output(d);
+ push_output(e);
+`
+ );
+ });
+
+g.test('scalar_i32_increment_overflow')
+ .desc('Tests increment of scalar i32 values which overflows')
+ .fn(t => {
+ runStatementTest(
+ t,
+ 'i32',
+ new Int32Array([kValue.i32.negative.min]),
+ `
+ var a: i32 = ${kValue.i32.positive.max};
+ a++;
+ push_output(a);
+`
+ );
+ });
+
+g.test('scalar_u32_increment')
+ .desc('Tests increment of scalar u32 values')
+ .fn(t => {
+ runStatementTest(
+ t,
+ 'u32',
+ new Uint32Array([1, 11, kValue.u32.max]),
+ `
+ var a: u32 = 0;
+ var b: u32 = 10;
+ var c: u32 = ${kValue.u32.max - 1};
+
+ a++;
+ b++;
+ c++;
+
+ push_output(a);
+ push_output(b);
+ push_output(c);
+`
+ );
+ });
+
+g.test('scalar_u32_increment_overflow')
+ .desc('Tests increment of scalar u32 values which overflows')
+ .fn(t => {
+ runStatementTest(
+ t,
+ 'u32',
+ new Uint32Array([0]),
+ `
+ var a: u32 = ${kValue.u32.max};
+ a++;
+ push_output(a);
+`
+ );
+ });
+
+g.test('scalar_i32_decrement')
+ .desc('Tests decrement of scalar i32 values')
+ .fn(t => {
+ runStatementTest(
+ t,
+ 'i32',
+ new Int32Array([-11, 9, kValue.i32.negative.min, kValue.i32.positive.max - 1, -1]),
+ `
+ var a: i32 = -10;
+ var b: i32 = 10;
+ var c: i32 = ${kValue.i32.negative.min + 1};
+ var d: i32 = ${kValue.i32.positive.max};
+ var e: i32 = 0;
+
+ a--;
+ b--;
+ c--;
+ d--;
+ e--;
+
+ push_output(a);
+ push_output(b);
+ push_output(c);
+ push_output(d);
+ push_output(e);
+`
+ );
+ });
+
+g.test('scalar_i32_decrement_underflow')
+ .desc('Tests decrement of scalar i32 values which underflow')
+ .fn(t => {
+ runStatementTest(
+ t,
+ 'i32',
+ new Int32Array([kValue.i32.positive.max]),
+ `
+ var a: i32 = ${kValue.i32.negative.min};
+ a--;
+ push_output(a);
+`
+ );
+ });
+
+g.test('scalar_u32_decrement')
+ .desc('Tests decrement of scalar u32 values')
+ .fn(t => {
+ runStatementTest(
+ t,
+ 'u32',
+ new Uint32Array([0, 9, kValue.u32.max - 1]),
+ `
+ var a: u32 = 1;
+ var b: u32 = 10;
+ var c: u32 = ${kValue.u32.max};
+
+ a--;
+ b--;
+ c--;
+
+ push_output(a);
+ push_output(b);
+ push_output(c);
+`
+ );
+ });
+
+g.test('scalar_u32_decrement_underflow')
+ .desc('Tests decrement of scalar u32 values which underflow')
+ .fn(t => {
+ runStatementTest(
+ t,
+ 'u32',
+ new Uint32Array([kValue.u32.max]),
+ `
+ var a: u32 = 0;
+ a--;
+ push_output(a);
+`
+ );
+ });
+
+g.test('vec2_element_increment')
+ .desc('Tests increment of ve2 values')
+ .fn(t => {
+ runStatementTest(
+ t,
+ 'i32',
+ new Int32Array([-9, 11]),
+ `
+ var a = vec2(-10, 10);
+
+ a.x++;
+ a.g++;
+
+ push_output(a.x);
+ push_output(a.y);
+`
+ );
+ });
+
+g.test('vec3_element_increment')
+ .desc('Tests increment of vec3 values')
+ .fn(t => {
+ runStatementTest(
+ t,
+ 'i32',
+ new Int32Array([-9, 11, kValue.i32.negative.min + 1]),
+ `
+ var a = vec3(-10, 10, ${kValue.i32.negative.min});
+
+ a.x++;
+ a.g++;
+ a.z++;
+
+ push_output(a.x);
+ push_output(a.y);
+ push_output(a.z);
+`
+ );
+ });
+
+g.test('vec4_element_increment')
+ .desc('Tests increment of vec4 values')
+ .fn(t => {
+ runStatementTest(
+ t,
+ 'i32',
+ new Int32Array([-9, 11, kValue.i32.negative.min + 1, kValue.i32.positive.max]),
+ `
+ var a: vec4<i32> = vec4(-10, 10, ${kValue.i32.negative.min}, ${kValue.i32.positive.max - 1});
+
+ a.x++;
+ a.g++;
+ a.z++;
+ a.a++;
+
+ push_output(a.x);
+ push_output(a.y);
+ push_output(a.z);
+ push_output(a.w);
+`
+ );
+ });
+
+g.test('vec2_element_decrement')
+ .desc('Tests decrement of vec2 values')
+ .fn(t => {
+ runStatementTest(
+ t,
+ 'i32',
+ new Int32Array([-11, 9]),
+ `
+ var a = vec2(-10, 10);
+
+ a.x--;
+ a.g--;
+
+ push_output(a.x);
+ push_output(a.y);
+`
+ );
+ });
+
+g.test('vec3_element_decrement')
+ .desc('Tests decrement of vec3 values')
+ .fn(t => {
+ runStatementTest(
+ t,
+ 'i32',
+ new Int32Array([-11, 9, kValue.i32.negative.min]),
+ `
+ var a = vec3(-10, 10, ${kValue.i32.negative.min + 1});
+
+ a.x--;
+ a.g--;
+ a.z--;
+
+ push_output(a.x);
+ push_output(a.y);
+ push_output(a.z);
+`
+ );
+ });
+
+g.test('vec4_element_decrement')
+ .desc('Tests decrement of vec4 values')
+ .fn(t => {
+ runStatementTest(
+ t,
+ 'i32',
+ new Int32Array([-11, 9, kValue.i32.negative.min, kValue.i32.positive.max - 1]),
+ `
+ var a: vec4<i32> = vec4(-10, 10, ${kValue.i32.negative.min + 1}, ${kValue.i32.positive.max});
+
+ a.x--;
+ a.g--;
+ a.z--;
+ a.a--;
+
+ push_output(a.x);
+ push_output(a.y);
+ push_output(a.z);
+ push_output(a.w);
+`
+ );
+ });
+
+g.test('frexp_exp_increment')
+ .desc('Tests increment can be used on a frexp field')
+ .fn(t => {
+ runStatementTest(
+ t,
+ 'i32',
+ new Int32Array([2]),
+ `
+ var a = frexp(1.23);
+ a.exp++;
+ push_output(a.exp);
+`
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/zero_init.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/zero_init.spec.ts
new file mode 100644
index 0000000000..e03a72f8df
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/execution/zero_init.spec.ts
@@ -0,0 +1,546 @@
+export const description = `Test that variables in the shader are zero initialized`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { iterRange, unreachable } from '../../../common/util/util.js';
+import { GPUTest } from '../../gpu_test.js';
+import {
+ ScalarType,
+ kVectorContainerTypes,
+ kVectorContainerTypeInfo,
+ kMatrixContainerTypes,
+ kMatrixContainerTypeInfo,
+ supportedScalarTypes,
+ supportsAtomics,
+} from '../types.js';
+
+type ShaderTypeInfo =
+ | { type: 'container'; containerType: 'array'; elementType: ShaderTypeInfo; length: number }
+ | { type: 'container'; containerType: 'struct'; members: readonly ShaderTypeInfo[] }
+ | {
+ type: 'container';
+ containerType: keyof typeof kVectorContainerTypeInfo | keyof typeof kMatrixContainerTypeInfo;
+ scalarType: ScalarType;
+ }
+ | { type: 'scalar'; scalarType: ScalarType; isAtomic: boolean };
+
+function prettyPrint(t: ShaderTypeInfo): string {
+ switch (t.type) {
+ case 'container':
+ switch (t.containerType) {
+ case 'array':
+ return `array<${prettyPrint(t.elementType)}, ${t.length}>`;
+ case 'struct':
+ return `struct { ${t.members.map(m => prettyPrint(m)).join(', ')} }`;
+ default:
+ return `${t.containerType}<${prettyPrint({
+ type: 'scalar',
+ scalarType: t.scalarType,
+ isAtomic: false,
+ })}>`;
+ }
+ break;
+ case 'scalar':
+ if (t.isAtomic) {
+ return `atomic<${t.scalarType}>`;
+ }
+ return t.scalarType;
+ }
+}
+
+export const g = makeTestGroup(GPUTest);
+g.test('compute,zero_init')
+ .desc(
+ `Test that uninitialized variables in workgroup, private, and function storage classes are initialized to zero.`
+ )
+ .params(u =>
+ u
+ // Only workgroup, function, and private variables can be declared without data bound to them.
+ // The implementation's shader translator should ensure these values are initialized.
+ .combine('addressSpace', ['workgroup', 'private', 'function'] as const)
+ .expand('workgroupSize', ({ addressSpace }) => {
+ switch (addressSpace) {
+ case 'workgroup':
+ return [
+ [1, 1, 1],
+ [1, 32, 1],
+ [64, 1, 1],
+ [1, 1, 48],
+ [1, 47, 1],
+ [33, 1, 1],
+ [1, 1, 63],
+ [8, 8, 2],
+ [7, 7, 3],
+ ];
+ case 'function':
+ case 'private':
+ return [[1, 1, 1]];
+ }
+ })
+ .beginSubcases()
+ // Fewer subcases: Only 0 and 2. If double-nested containers work, single-nested should too.
+ .combine('_containerDepth', [0, 2])
+ .expandWithParams(function* (p) {
+ const kElementCounts = [
+ [], // Not used. Depth 0 is always scalars.
+ [1, 3, 67], // Test something above the workgroup size.
+ [1, 3],
+ ] as const;
+ const kMemberCounts = [1, 3] as const;
+
+ const memoizedTypes: ShaderTypeInfo[][] = [];
+
+ function generateTypesMemo(depth: number): ShaderTypeInfo[] {
+ if (memoizedTypes[depth] === undefined) {
+ memoizedTypes[depth] = Array.from(generateTypes(depth));
+ }
+ return memoizedTypes[depth];
+ }
+
+ function* generateTypes(depth: number): Generator<ShaderTypeInfo> {
+ if (depth === 0) {
+ for (const isAtomic of supportsAtomics({
+ ...p,
+ access: 'read_write',
+ storageMode: undefined,
+ containerType: 'scalar',
+ })
+ ? [true, false]
+ : [false]) {
+ for (const scalarType of supportedScalarTypes({ isAtomic, ...p })) {
+ // Fewer subcases: For nested types, skip atomic u32 and non-atomic i32.
+ if (p._containerDepth > 0) {
+ if (scalarType === 'u32' && isAtomic) continue;
+ if (scalarType === 'i32' && !isAtomic) continue;
+ }
+
+ yield {
+ type: 'scalar',
+ scalarType,
+ isAtomic,
+ };
+ if (!isAtomic) {
+ // Vector types
+ for (const vectorType of kVectorContainerTypes) {
+ // Fewer subcases: For nested types, only include
+ // vec2<u32>, vec3<i32>, and vec4<f32>
+ if (p._containerDepth > 0) {
+ if (
+ !(
+ (vectorType === 'vec2' && scalarType === 'u32') ||
+ (vectorType === 'vec3' && scalarType === 'i32') ||
+ (vectorType === 'vec4' && scalarType === 'f32')
+ )
+ ) {
+ continue;
+ }
+ }
+ yield {
+ type: 'container',
+ containerType: vectorType,
+ scalarType,
+ };
+ }
+ // Matrices can only be f32.
+ if (scalarType === 'f32') {
+ for (const matrixType of kMatrixContainerTypes) {
+ yield {
+ type: 'container',
+ containerType: matrixType,
+ scalarType,
+ };
+ }
+ }
+ }
+ }
+ }
+ return;
+ }
+
+ for (const containerType of ['array', 'struct']) {
+ const innerTypes = generateTypesMemo(depth - 1);
+ switch (containerType) {
+ case 'array':
+ for (const elementCount of kElementCounts[depth]) {
+ for (const innerType of innerTypes) {
+ yield {
+ type: 'container',
+ containerType,
+ elementType: innerType,
+ length: elementCount,
+ };
+ }
+ }
+ break;
+ case 'struct':
+ for (const memberCount of kMemberCounts) {
+ const memberIndices = new Array(memberCount);
+ for (let m = 0; m < memberCount; ++m) {
+ memberIndices[m] = m;
+ }
+
+ // Don't generate all possible combinations of inner struct members,
+ // because that's in the millions. Instead, just round-robin through
+ // to pick member types. Loop through the types, concatenated forward
+ // and backward, three times to produce a bounded but variable set of
+ // types.
+ const memberTypes = [...innerTypes, ...[...innerTypes].reverse()];
+ const seenTypes = new Set();
+ let typeIndex = 0;
+ while (typeIndex < memberTypes.length * 3) {
+ const prevTypeIndex = typeIndex;
+ const members: ShaderTypeInfo[] = [];
+ for (const m of memberIndices) {
+ members[m] = memberTypes[typeIndex % memberTypes.length];
+ typeIndex += 1;
+ }
+
+ const t: ShaderTypeInfo = {
+ type: 'container',
+ containerType,
+ members,
+ };
+ const serializedT = prettyPrint(t);
+ if (seenTypes.has(serializedT)) {
+ // We produced an identical type. shuffle the member indices,
+ // "revert" typeIndex back to where it was before this loop, and
+ // shift it by one. This helps ensure we don't loop forever, and
+ // that we produce a different type on the next iteration.
+ memberIndices.push(memberIndices.shift());
+ typeIndex = prevTypeIndex + 1;
+ continue;
+ }
+ seenTypes.add(serializedT);
+ yield t;
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ for (const t of generateTypesMemo(p._containerDepth)) {
+ yield {
+ shaderTypeParam: prettyPrint(t),
+ _type: t,
+ };
+ }
+ })
+ )
+ .batch(15)
+ .fn(async t => {
+ const { workgroupSize } = t.params;
+ const { maxComputeInvocationsPerWorkgroup } = t.device.limits;
+ const numWorkgroupInvocations = workgroupSize.reduce((a, b) => a * b);
+ t.skipIf(
+ numWorkgroupInvocations > maxComputeInvocationsPerWorkgroup,
+ `workgroupSize: ${workgroupSize} > maxComputeInvocationsPerWorkgroup: ${maxComputeInvocationsPerWorkgroup}`
+ );
+
+ let moduleScope = `
+ struct Output {
+ failed : atomic<u32>
+ }
+ @group(0) @binding(0) var<storage, read_write> output : Output;
+
+ // This uniform value that's a zero is used to prevent the shader compilers from trying to
+ // unroll the massive loops generated by these tests.
+ @group(0) @binding(1) var<uniform> zero : u32;
+ `;
+ let functionScope = '';
+
+ const declaredStructTypes = new Map<ShaderTypeInfo, string>();
+ const typeDecl = (function ensureType(
+ typeName: string,
+ type: ShaderTypeInfo,
+ depth: number = 0
+ ): string {
+ switch (type.type) {
+ case 'container':
+ switch (type.containerType) {
+ case 'array':
+ return `array<${ensureType(
+ `${typeName}_ArrayElement`,
+ type.elementType,
+ depth + 1
+ )}, ${type.length}>`;
+ case 'struct': {
+ if (declaredStructTypes.has(type)) {
+ return declaredStructTypes.get(type)!;
+ }
+
+ const members = type.members
+ .map((member, i) => {
+ return `\n member${i} : ${ensureType(
+ `${typeName}_Member${i}`,
+ member,
+ depth + 1
+ )},`;
+ })
+ .join('');
+ declaredStructTypes.set(type, typeName);
+ moduleScope += `\nstruct ${typeName} {`;
+ moduleScope += members;
+ moduleScope += '\n};';
+
+ return typeName;
+ }
+ default:
+ return `${type.containerType}<${ensureType(
+ typeName,
+ {
+ type: 'scalar',
+ scalarType: type.scalarType,
+ isAtomic: false,
+ },
+ depth + 1
+ )}>`;
+ }
+ break;
+ case 'scalar':
+ return type.isAtomic ? `atomic<${type.scalarType}>` : type.scalarType;
+ }
+ })('TestType', t.params._type);
+
+ switch (t.params.addressSpace) {
+ case 'workgroup':
+ case 'private':
+ moduleScope += `\nvar<${t.params.addressSpace}> testVar: ${typeDecl};`;
+ break;
+ case 'function':
+ functionScope += `\nvar testVar: ${typeDecl};`;
+ break;
+ }
+
+ const checkZeroCode = (function checkZero(
+ value: string,
+ type: ShaderTypeInfo,
+ depth: number = 0
+ ): string {
+ switch (type.type) {
+ case 'container':
+ switch (type.containerType) {
+ case 'array':
+ return `\nfor (var i${depth} = 0u; i${depth} < ${
+ type.length
+ }u + zero; i${depth} = i${depth} + 1u) {
+ ${checkZero(`${value}[i${depth}]`, type.elementType, depth + 1)}
+ }`;
+ case 'struct':
+ return type.members
+ .map((member, i) => {
+ return checkZero(`${value}.member${i}`, member, depth + 1);
+ })
+ .join('\n');
+ default:
+ if (type.containerType.indexOf('vec') !== -1) {
+ const length = type.containerType[3];
+ return `\nfor (var i${depth} = 0u; i${depth} < ${length}u + zero; i${depth} = i${depth} + 1u) {
+ ${checkZero(
+ `${value}[i${depth}]`,
+ {
+ type: 'scalar',
+ scalarType: type.scalarType,
+ isAtomic: false,
+ },
+ depth + 1
+ )}
+ }`;
+ } else if (type.containerType.indexOf('mat') !== -1) {
+ const cols = type.containerType[3];
+ const rows = type.containerType[5];
+ return `\nfor (var c${depth} = 0u; c${depth} < ${cols}u + zero; c${depth} = c${depth} + 1u) {
+ for (var r${depth} = 0u; r${depth} < ${rows}u; r${depth} = r${depth} + 1u) {
+ ${checkZero(
+ `${value}[c${depth}][r${depth}]`,
+ {
+ type: 'scalar',
+ scalarType: type.scalarType,
+ isAtomic: false,
+ },
+ depth + 1
+ )}
+ }
+ }`;
+ } else {
+ unreachable();
+ }
+ }
+ break;
+ case 'scalar': {
+ let expected;
+ switch (type.scalarType) {
+ case 'bool':
+ expected = 'false';
+ break;
+ case 'f32':
+ expected = '0.0';
+ break;
+ case 'i32':
+ expected = '0';
+ break;
+ case 'u32':
+ expected = '0u';
+ break;
+ }
+ if (type.isAtomic) {
+ value = `atomicLoad(&${value})`;
+ }
+
+ // Note: this could have an early return, but we omit it because it makes
+ // the tests fail cause with DXGI_ERROR_DEVICE_HUNG on Windows.
+ return `\nif (${value} != ${expected}) { atomicStore(&output.failed, 1u); }`;
+ }
+ }
+ })('testVar', t.params._type);
+
+ const wgsl = `
+ ${moduleScope}
+ @compute @workgroup_size(${t.params.workgroupSize})
+ fn main() {
+ ${functionScope}
+ ${checkZeroCode}
+ _ = zero;
+ }
+ `;
+
+ if (t.params.addressSpace === 'workgroup') {
+ // Populate the maximum amount of workgroup memory with known values to
+ // ensure initialization overrides in another shader.
+ const wg_memory_limits = t.device.limits.maxComputeWorkgroupStorageSize;
+ const wg_x_dim = t.device.limits.maxComputeWorkgroupSizeX;
+
+ const wgsl = `
+ @group(0) @binding(0) var<storage, read> inputs : array<u32>;
+ @group(0) @binding(1) var<storage, read_write> outputs : array<u32>;
+ var<workgroup> wg_mem : array<u32, ${wg_memory_limits} / 4>;
+
+ @compute @workgroup_size(${wg_x_dim})
+ fn fill(@builtin(local_invocation_index) lid : u32) {
+ const num_u32_per_invocation = ${wg_memory_limits} / (4 * ${wg_x_dim});
+
+ for (var i = 0u; i < num_u32_per_invocation; i++) {
+ let idx = num_u32_per_invocation * lid + i;
+ wg_mem[idx] = inputs[idx];
+ }
+ workgroupBarrier();
+ // Copy out to avoid wg_mem being elided.
+ for (var i = 0u; i < num_u32_per_invocation; i++) {
+ let idx = num_u32_per_invocation * lid + i;
+ outputs[idx] = wg_mem[idx];
+ }
+ }
+ `;
+
+ const fillLayout = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: { type: 'read-only-storage' },
+ },
+ {
+ binding: 1,
+ visibility: GPUShaderStage.COMPUTE,
+ buffer: { type: 'storage' },
+ },
+ ],
+ });
+
+ const fillPipeline = await t.device.createComputePipelineAsync({
+ layout: t.device.createPipelineLayout({ bindGroupLayouts: [fillLayout] }),
+ label: 'Workgroup Fill Pipeline',
+ compute: {
+ module: t.device.createShaderModule({
+ code: wgsl,
+ }),
+ entryPoint: 'fill',
+ },
+ });
+
+ const inputBuffer = t.makeBufferWithContents(
+ new Uint32Array([...iterRange(wg_memory_limits / 4, _i => 0xdeadbeef)]),
+ GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
+ );
+ t.trackForCleanup(inputBuffer);
+ const outputBuffer = t.device.createBuffer({
+ size: wg_memory_limits,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(outputBuffer);
+
+ const bg = t.device.createBindGroup({
+ layout: fillPipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer: inputBuffer,
+ },
+ },
+ {
+ binding: 1,
+ resource: {
+ buffer: outputBuffer,
+ },
+ },
+ ],
+ });
+
+ const e = t.device.createCommandEncoder();
+ const p = e.beginComputePass();
+ p.setPipeline(fillPipeline);
+ p.setBindGroup(0, bg);
+ p.dispatchWorkgroups(1);
+ p.end();
+ t.queue.submit([e.finish()]);
+ }
+
+ const pipeline = await t.device.createComputePipelineAsync({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({
+ code: wgsl,
+ }),
+ entryPoint: 'main',
+ },
+ });
+
+ const resultBuffer = t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+ t.trackForCleanup(resultBuffer);
+
+ const zeroBuffer = t.device.createBuffer({
+ size: 4,
+ usage: GPUBufferUsage.UNIFORM,
+ });
+ t.trackForCleanup(zeroBuffer);
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: {
+ buffer: resultBuffer,
+ },
+ },
+ {
+ binding: 1,
+ resource: {
+ buffer: zeroBuffer,
+ },
+ },
+ ],
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ t.queue.submit([encoder.finish()]);
+ t.expectGPUBufferValuesEqual(resultBuffer, new Uint32Array([0]));
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/regression/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/regression/README.txt
new file mode 100644
index 0000000000..eff2f830eb
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/regression/README.txt
@@ -0,0 +1,2 @@
+One-off tests that reproduce shader bugs found in implementations to prevent the bugs from
+appearing again.
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/types.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/types.ts
new file mode 100644
index 0000000000..799ea3affb
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/types.ts
@@ -0,0 +1,289 @@
+import { keysOf } from '../../common/util/data_tables.js';
+import { assert } from '../../common/util/util.js';
+import { align } from '../util/math.js';
+
+const kArrayLength = 3;
+
+export type Requirement = 'never' | 'may' | 'must'; // never is the same as "must not"
+export type ContainerType = 'scalar' | 'vector' | 'matrix' | 'atomic' | 'array';
+export type ScalarType = 'i32' | 'u32' | 'f32' | 'bool';
+
+export const HostSharableTypes = ['i32', 'u32', 'f32'] as const;
+
+/** Info for each plain scalar type. */
+export const kScalarTypeInfo =
+ /* prettier-ignore */ {
+ 'i32': { layout: { alignment: 4, size: 4 }, supportsAtomics: true, arrayLength: 1, innerLength: 0 },
+ 'u32': { layout: { alignment: 4, size: 4 }, supportsAtomics: true, arrayLength: 1, innerLength: 0 },
+ 'f32': { layout: { alignment: 4, size: 4 }, supportsAtomics: false, arrayLength: 1, innerLength: 0 },
+ 'bool': { layout: undefined, supportsAtomics: false, arrayLength: 1, innerLength: 0 },
+} as const;
+/** List of all plain scalar types. */
+export const kScalarTypes = keysOf(kScalarTypeInfo);
+
+/** Info for each vecN<> container type. */
+export const kVectorContainerTypeInfo =
+ /* prettier-ignore */ {
+ 'vec2': { layout: { alignment: 8, size: 8 }, arrayLength: 2 , innerLength: 0 },
+ 'vec3': { layout: { alignment: 16, size: 12 }, arrayLength: 3 , innerLength: 0 },
+ 'vec4': { layout: { alignment: 16, size: 16 }, arrayLength: 4 , innerLength: 0 },
+} as const;
+/** List of all vecN<> container types. */
+export const kVectorContainerTypes = keysOf(kVectorContainerTypeInfo);
+
+/** Info for each matNxN<> container type. */
+export const kMatrixContainerTypeInfo =
+ /* prettier-ignore */ {
+ 'mat2x2': { layout: { alignment: 8, size: 16 }, arrayLength: 2, innerLength: 2 },
+ 'mat3x2': { layout: { alignment: 8, size: 24 }, arrayLength: 3, innerLength: 2 },
+ 'mat4x2': { layout: { alignment: 8, size: 32 }, arrayLength: 4, innerLength: 2 },
+ 'mat2x3': { layout: { alignment: 16, size: 32 }, arrayLength: 2, innerLength: 3 },
+ 'mat3x3': { layout: { alignment: 16, size: 48 }, arrayLength: 3, innerLength: 3 },
+ 'mat4x3': { layout: { alignment: 16, size: 64 }, arrayLength: 4, innerLength: 3 },
+ 'mat2x4': { layout: { alignment: 16, size: 32 }, arrayLength: 2, innerLength: 4 },
+ 'mat3x4': { layout: { alignment: 16, size: 48 }, arrayLength: 3, innerLength: 4 },
+ 'mat4x4': { layout: { alignment: 16, size: 64 }, arrayLength: 4, innerLength: 4 },
+} as const;
+/** List of all matNxN<> container types. */
+export const kMatrixContainerTypes = keysOf(kMatrixContainerTypeInfo);
+
+export type AddressSpace = 'storage' | 'uniform' | 'private' | 'function' | 'workgroup' | 'handle';
+export type AccessMode = 'read' | 'write' | 'read_write';
+export type Scope = 'module' | 'function';
+
+export const kAccessModeInfo = {
+ read: { read: true, write: false },
+ write: { read: false, write: true },
+ read_write: { read: true, write: true },
+} as const;
+
+export type AddressSpaceInfo = {
+ // Variables in this address space must be declared in what scope?
+ scope: Scope;
+
+ // True if a variable in this address space requires a binding.
+ binding: boolean;
+
+ // Spell the address space in var declarations?
+ spell: Requirement;
+
+ // Access modes for ordinary accesses (loads, stores).
+ // The first one is the default.
+ // This is empty for the 'handle' address space where access is opaque.
+ accessModes: readonly AccessMode[];
+
+ // Spell the access mode in var declarations?
+ // 7.3 var Declarations
+ // The access mode always has a default value, and except for variables
+ // in the storage address space, must not be specified in the WGSL source.
+ // See §13.3 Address Spaces.
+ spellAccessMode: Requirement;
+};
+
+export const kAddressSpaceInfo: Record<string, AddressSpaceInfo> = {
+ storage: {
+ scope: 'module',
+ binding: true,
+ spell: 'must',
+ accessModes: ['read', 'read_write'],
+ spellAccessMode: 'may',
+ },
+ uniform: {
+ scope: 'module',
+ binding: true,
+ spell: 'must',
+ accessModes: ['read'],
+ spellAccessMode: 'never',
+ },
+ private: {
+ scope: 'module',
+ binding: false,
+ spell: 'must',
+ accessModes: ['read_write'],
+ spellAccessMode: 'never',
+ },
+ workgroup: {
+ scope: 'module',
+ binding: false,
+ spell: 'must',
+ accessModes: ['read_write'],
+ spellAccessMode: 'never',
+ },
+ function: {
+ scope: 'function',
+ binding: false,
+ spell: 'may',
+ accessModes: ['read_write'],
+ spellAccessMode: 'never',
+ },
+ handle: {
+ scope: 'module',
+ binding: true,
+ spell: 'never',
+ accessModes: [],
+ spellAccessMode: 'never',
+ },
+} as const;
+
+/** List of texel formats and their shader representation */
+export const TexelFormats = [
+ { format: 'rgba8unorm', _shaderType: 'f32' },
+ { format: 'rgba8snorm', _shaderType: 'f32' },
+ { format: 'rgba8uint', _shaderType: 'u32' },
+ { format: 'rgba8sint', _shaderType: 'i32' },
+ { format: 'rgba16uint', _shaderType: 'u32' },
+ { format: 'rgba16sint', _shaderType: 'i32' },
+ { format: 'rgba16float', _shaderType: 'f32' },
+ { format: 'r32uint', _shaderType: 'u32' },
+ { format: 'r32sint', _shaderType: 'i32' },
+ { format: 'r32float', _shaderType: 'f32' },
+ { format: 'rg32uint', _shaderType: 'u32' },
+ { format: 'rg32sint', _shaderType: 'i32' },
+ { format: 'rg32float', _shaderType: 'f32' },
+ { format: 'rgba32uint', _shaderType: 'i32' },
+ { format: 'rgba32sint', _shaderType: 'i32' },
+ { format: 'rgba32float', _shaderType: 'f32' },
+] as const;
+
+/**
+ * Generate a bunch types (vec, mat, sized/unsized array) for testing.
+ */
+export function* generateTypes({
+ addressSpace,
+ baseType,
+ containerType,
+ isAtomic = false,
+}: {
+ addressSpace: AddressSpace;
+ /** Base scalar type (i32/u32/f32/bool). */
+ baseType: ScalarType;
+ /** Container type (scalar/vector/matrix/array) */
+ containerType: ContainerType;
+ /** Whether to wrap the baseType in `atomic<>`. */
+ isAtomic?: boolean;
+}) {
+ const scalarInfo = kScalarTypeInfo[baseType];
+ if (isAtomic) {
+ assert(scalarInfo.supportsAtomics, 'type does not support atomics');
+ }
+ const scalarType = isAtomic ? `atomic<${baseType}>` : baseType;
+
+ // Storage and uniform require host-sharable types.
+ if (addressSpace === 'storage' || addressSpace === 'uniform') {
+ assert(isHostSharable(baseType), 'type ' + baseType.toString() + ' is not host sharable');
+ }
+
+ // Scalar types
+ if (containerType === 'scalar') {
+ yield {
+ type: `${scalarType}`,
+ _kTypeInfo: {
+ elementBaseType: `${scalarType}`,
+ ...scalarInfo,
+ },
+ };
+ }
+
+ // Vector types
+ if (containerType === 'vector') {
+ for (const vectorType of kVectorContainerTypes) {
+ yield {
+ type: `${vectorType}<${scalarType}>`,
+ _kTypeInfo: { elementBaseType: baseType, ...kVectorContainerTypeInfo[vectorType] },
+ };
+ }
+ }
+
+ if (containerType === 'matrix') {
+ // Matrices can only be f32.
+ if (baseType === 'f32') {
+ for (const matrixType of kMatrixContainerTypes) {
+ const matrixInfo = kMatrixContainerTypeInfo[matrixType];
+ yield {
+ type: `${matrixType}<${scalarType}>`,
+ _kTypeInfo: {
+ elementBaseType: `vec${matrixInfo.innerLength}<${scalarType}>`,
+ ...matrixInfo,
+ },
+ };
+ }
+ }
+ }
+
+ // Array types
+ if (containerType === 'array') {
+ const arrayTypeInfo = {
+ elementBaseType: `${baseType}`,
+ arrayLength: kArrayLength,
+ layout: scalarInfo.layout
+ ? {
+ alignment: scalarInfo.layout.alignment,
+ size:
+ addressSpace === 'uniform'
+ ? // Uniform storage class must have array elements aligned to 16.
+ kArrayLength *
+ arrayStride({
+ ...scalarInfo.layout,
+ alignment: 16,
+ })
+ : kArrayLength * arrayStride(scalarInfo.layout),
+ }
+ : undefined,
+ };
+
+ // Sized
+ if (addressSpace === 'uniform') {
+ yield {
+ type: `array<vec4<${scalarType}>,${kArrayLength}>`,
+ _kTypeInfo: arrayTypeInfo,
+ };
+ } else {
+ yield { type: `array<${scalarType},${kArrayLength}>`, _kTypeInfo: arrayTypeInfo };
+ }
+ // Unsized
+ if (addressSpace === 'storage') {
+ yield { type: `array<${scalarType}>`, _kTypeInfo: arrayTypeInfo };
+ }
+ }
+
+ function arrayStride(elementLayout: { size: number; alignment: number }) {
+ return align(elementLayout.size, elementLayout.alignment);
+ }
+
+ function isHostSharable(baseType: ScalarType) {
+ for (const sharableType of HostSharableTypes) {
+ if (sharableType === baseType) return true;
+ }
+ return false;
+ }
+}
+
+/** Atomic access requires scalar/array container type and storage/workgroup memory. */
+export function supportsAtomics(p: {
+ addressSpace: string;
+ storageMode: AccessMode | undefined;
+ access: string;
+ containerType: ContainerType;
+}) {
+ return (
+ ((p.addressSpace === 'storage' && p.storageMode === 'read_write') ||
+ p.addressSpace === 'workgroup') &&
+ (p.containerType === 'scalar' || p.containerType === 'array')
+ );
+}
+
+/** Generates an iterator of supported base types (i32/u32/f32/bool) */
+export function* supportedScalarTypes(p: { isAtomic: boolean; addressSpace: string }) {
+ for (const scalarType of kScalarTypes) {
+ const info = kScalarTypeInfo[scalarType];
+
+ // Test atomics only on supported scalar types.
+ if (p.isAtomic && !info.supportsAtomics) continue;
+
+ // Storage and uniform require host-sharable types.
+ const isHostShared = p.addressSpace === 'storage' || p.addressSpace === 'uniform';
+ if (isHostShared && info.layout === undefined) continue;
+
+ yield scalarType;
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/README.txt
new file mode 100644
index 0000000000..3fd3b075ae
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/README.txt
@@ -0,0 +1 @@
+Positive and negative tests for all the validation rules of the shading language.
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/const_assert/const_assert.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/const_assert/const_assert.spec.ts
new file mode 100644
index 0000000000..caaabc54d3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/const_assert/const_assert.spec.ts
@@ -0,0 +1,201 @@
+export const description = `Validation tests for const_assert`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+/**
+ * Builds a const_assert() statement.
+ * @param expr the constant expression
+ * @param scope module-scope or function-scope constant expression
+ * @returns the WGSL code
+ */
+function buildStaticAssert(expr: string, scope: 'module' | 'function') {
+ const stmt = `const_assert (${expr});`;
+ return scope === 'module' ? stmt : `fn f() { ${stmt} }`;
+}
+
+interface Condition {
+ expr: string;
+ val: boolean;
+}
+
+interface MappedCondition {
+ [key: string]: Condition;
+}
+
+const kConditionCases: MappedCondition = {
+ any_false: { expr: `any(vec3(false, false, false))`, val: false },
+ any_true: { expr: `any(vec3(false, true, false))`, val: true },
+ binary_op_eq_const_false: { expr: `one + 5 == two`, val: false },
+ binary_op_eq_const_true: { expr: `one + 1 == two`, val: true },
+ const_eq_literal_float_false: { expr: `one == 0.0`, val: false },
+ const_eq_literal_float_true: { expr: `one == 1.0`, val: true },
+ const_eq_literal_int_false: { expr: `one == 10`, val: false },
+ const_eq_literal_int_true: { expr: `one == 1`, val: true },
+ literal_false: { expr: `false`, val: false },
+ literal_not_false: { expr: `!false`, val: true },
+ literal_not_true: { expr: `!true`, val: false },
+ literal_true: { expr: `true`, val: true },
+ min_max_false: { expr: `min(three, max(two, one)) == 3`, val: false },
+ min_max_true: { expr: `min(three, max(two, one)) == 2`, val: true },
+ variable_false: { expr: `is_false`, val: false },
+ variable_not_false: { expr: `!is_false`, val: true },
+ variable_not_true: { expr: `!is_true`, val: false },
+ variable_true: { expr: `is_true`, val: true },
+};
+
+const kConditionConstants = `
+const one = 1;
+const two = 2;
+const three = 3;
+const is_true = true;
+const is_false = false;
+`;
+
+g.test('constant_expression_no_assert')
+ .desc(`Test that const_assert does not assert on a true conditional expression`)
+ .params(u =>
+ u
+ .combine('case', keysOf(kConditionCases))
+ .combine('scope', ['module', 'function'] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ const expr = kConditionCases[t.params.case].expr;
+ const val = kConditionCases[t.params.case].val;
+ t.expectCompileResult(
+ true,
+ kConditionConstants + buildStaticAssert(val ? expr : `!(${expr})`, t.params.scope)
+ );
+ });
+
+g.test('constant_expression_assert')
+ .desc(`Test that const_assert does assert on a false conditional expression`)
+ .params(u =>
+ u
+ .combine('case', keysOf(kConditionCases))
+ .combine('scope', ['module', 'function'] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ const expr = kConditionCases[t.params.case].expr;
+ const val = kConditionCases[t.params.case].val;
+ t.expectCompileResult(
+ false,
+ kConditionConstants + buildStaticAssert(val ? `!(${expr})` : expr, t.params.scope)
+ );
+ });
+
+g.test('constant_expression_logical_or_no_assert')
+ .desc(
+ `Test that const_assert does not assert on a condition expression that contains a logical-or which evaluates to true`
+ )
+ .params(u =>
+ u
+ .combine('lhs', keysOf(kConditionCases))
+ .combine('rhs', keysOf(kConditionCases))
+ .combine('scope', ['module', 'function'] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ const expr = `(${kConditionCases[t.params.lhs].expr}) || (${
+ kConditionCases[t.params.rhs].expr
+ })`;
+ const val = kConditionCases[t.params.lhs].val || kConditionCases[t.params.rhs].val;
+ t.expectCompileResult(
+ true,
+ kConditionConstants + buildStaticAssert(val ? expr : `!(${expr})`, t.params.scope)
+ );
+ });
+
+g.test('constant_expression_logical_or_assert')
+ .desc(
+ `Test that const_assert does assert on a condition expression that contains a logical-or which evaluates to false`
+ )
+ .params(u =>
+ u
+ .combine('lhs', keysOf(kConditionCases))
+ .combine('rhs', keysOf(kConditionCases))
+ .combine('scope', ['module', 'function'] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ const expr = `(${kConditionCases[t.params.lhs].expr}) || (${
+ kConditionCases[t.params.rhs].expr
+ })`;
+ const val = kConditionCases[t.params.lhs].val || kConditionCases[t.params.rhs].val;
+ t.expectCompileResult(
+ false,
+ kConditionConstants + buildStaticAssert(val ? `!(${expr})` : expr, t.params.scope)
+ );
+ });
+
+g.test('constant_expression_logical_and_no_assert')
+ .desc(
+ `Test that const_assert does not assert on a condition expression that contains a logical-and which evaluates to true`
+ )
+ .params(u =>
+ u
+ .combine('lhs', keysOf(kConditionCases))
+ .combine('rhs', keysOf(kConditionCases))
+ .combine('scope', ['module', 'function'] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ const expr = `(${kConditionCases[t.params.lhs].expr}) && (${
+ kConditionCases[t.params.rhs].expr
+ })`;
+ const val = kConditionCases[t.params.lhs].val && kConditionCases[t.params.rhs].val;
+ t.expectCompileResult(
+ true,
+ kConditionConstants + buildStaticAssert(val ? expr : `!(${expr})`, t.params.scope)
+ );
+ });
+
+g.test('constant_expression_logical_and_assert')
+ .desc(
+ `Test that const_assert does assert on a condition expression that contains a logical-and which evaluates to false`
+ )
+ .params(u =>
+ u
+ .combine('lhs', keysOf(kConditionCases))
+ .combine('rhs', keysOf(kConditionCases))
+ .combine('scope', ['module', 'function'] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ const expr = `(${kConditionCases[t.params.lhs].expr}) && (${
+ kConditionCases[t.params.rhs].expr
+ })`;
+ const val = kConditionCases[t.params.lhs].val && kConditionCases[t.params.rhs].val;
+ t.expectCompileResult(
+ false,
+ kConditionConstants + buildStaticAssert(val ? `!(${expr})` : expr, t.params.scope)
+ );
+ });
+
+g.test('evaluation_stage')
+ .desc(`Test that the const_assert expression must be a constant expression.`)
+ .params(u =>
+ u
+ .combine('scope', ['module', 'function'] as const)
+ .combine('stage', ['constant', 'override', 'runtime'] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ const staticAssert = buildStaticAssert('value', t.params.scope);
+ switch (t.params.stage) {
+ case 'constant':
+ t.expectCompileResult(true, `const value = true;\n${staticAssert}`);
+ break;
+ case 'override':
+ t.expectCompileResult(false, `override value = true;\n${staticAssert}`);
+ break;
+ case 'runtime':
+ t.expectCompileResult(false, `var<private> value = true;\n${staticAssert}`);
+ break;
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/const.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/const.spec.ts
new file mode 100644
index 0000000000..6ded2480c7
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/const.spec.ts
@@ -0,0 +1,61 @@
+export const description = `
+Validation tests for const declarations
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+g.test('no_direct_recursion')
+ .desc('Test that direct recursion of const declarations is rejected')
+ .params(u => u.combine('target', ['a', 'b']))
+ .fn(t => {
+ const wgsl = `
+const a : i32 = 42;
+const b : i32 = ${t.params.target};
+`;
+ t.expectCompileResult(t.params.target === 'a', wgsl);
+ });
+
+g.test('no_indirect_recursion')
+ .desc('Test that indirect recursion of const declarations is rejected')
+ .params(u => u.combine('target', ['a', 'b']))
+ .fn(t => {
+ const wgsl = `
+const a : i32 = 42;
+const b : i32 = c;
+const c : i32 = ${t.params.target};
+`;
+ t.expectCompileResult(t.params.target === 'a', wgsl);
+ });
+
+g.test('no_indirect_recursion_via_array_size')
+ .desc('Test that indirect recursion of const declarations via array size expressions is rejected')
+ .params(u => u.combine('target', ['a', 'b']))
+ .fn(t => {
+ const wgsl = `
+const a = 4;
+const b = c[0];
+const c = array<i32, ${t.params.target}>(4, 4, 4, 4);
+`;
+ t.expectCompileResult(t.params.target === 'a', wgsl);
+ });
+
+g.test('no_indirect_recursion_via_struct_attribute')
+ .desc('Test that indirect recursion of const declarations via struct members is rejected')
+ .params(u =>
+ u //
+ .combine('target', ['a', 'b'])
+ .combine('attribute', ['align', 'location', 'size'])
+ )
+ .fn(t => {
+ const wgsl = `
+struct S {
+ @${t.params.attribute}(${t.params.target}) a : i32
+}
+const a = 4;
+const b = S(4).a;
+`;
+ t.expectCompileResult(t.params.target === 'a', wgsl);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/override.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/override.spec.ts
new file mode 100644
index 0000000000..82a35a2f59
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/override.spec.ts
@@ -0,0 +1,31 @@
+export const description = `
+Validation tests for override declarations
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+g.test('no_direct_recursion')
+ .desc('Test that direct recursion of override declarations is rejected')
+ .params(u => u.combine('target', ['a', 'b']))
+ .fn(t => {
+ const wgsl = `
+override a : i32 = 42;
+override b : i32 = ${t.params.target};
+`;
+ t.expectCompileResult(t.params.target === 'a', wgsl);
+ });
+
+g.test('no_indirect_recursion')
+ .desc('Test that indirect recursion of override declarations is rejected')
+ .params(u => u.combine('target', ['a', 'b']))
+ .fn(t => {
+ const wgsl = `
+override a : i32 = 42;
+override b : i32 = c;
+override c : i32 = ${t.params.target};
+`;
+ t.expectCompileResult(t.params.target === 'a', wgsl);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/ptr_spelling.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/ptr_spelling.spec.ts
new file mode 100644
index 0000000000..7ce122e518
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/ptr_spelling.spec.ts
@@ -0,0 +1,153 @@
+export const description = `
+Validate spelling of pointer types.
+
+Pointer types may appear.
+
+They are parameterized by:
+- address space, always
+- store type
+- and access mode, as specified by the table in Address Spaces.
+ Concretely, only 'storage' address space allows it, and allows 'read', and 'read_write'.
+
+A pointer type can be spelled only if it corresponds to a variable that could be
+declared in the program. So we need to test combinations against possible variable
+declarations.
+`;
+
+// This file tests spelling of the pointer type on let-declared pointers.
+//
+// Spelling of pointer-typed parameters on user-declared functions is tested by
+// webgpu:shader,validation,functions,restrictions:function_parameter_types:"*"
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { AddressSpace, kAccessModeInfo, kAddressSpaceInfo } from '../../types.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+import {
+ pointerType,
+ explicitSpaceExpander,
+ accessModeExpander,
+ getVarDeclShader,
+ supportsWrite,
+ ShaderStage,
+} from './util.js';
+
+// Address spaces that can hold an i32 variable.
+const kNonHandleAddressSpaces = keysOf(kAddressSpaceInfo).filter(
+ as => as !== 'handle'
+) as AddressSpace[];
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+g.test('let_ptr_explicit_type_matches_var')
+ .desc(
+ 'Let-declared pointer with explicit type initialized from var with same address space and access mode'
+ )
+ .specURL('https://w3.org/TR#ref-ptr-types')
+ .params(u =>
+ u // Generate non-handle variables in all valid permutations of address space and access mode.
+ .combine('addressSpace', kNonHandleAddressSpaces)
+ .expand('explicitSpace', explicitSpaceExpander)
+ .combine('explicitAccess', [false, true])
+ .expand('accessMode', accessModeExpander)
+ .combine('stage', ['compute' as ShaderStage]) // Only need to check compute shaders
+ // Vary the store type.
+ .combine('ptrStoreType', ['i32', 'u32'])
+ )
+ .fn(t => {
+ // Match the address space and access mode.
+ const prog = getVarDeclShader(t.params, `let p: ${pointerType(t.params)} = &x;`);
+ const ok = t.params.ptrStoreType === 'i32'; // The store type matches the variable's store type.
+
+ t.expectCompileResult(ok, prog);
+ });
+
+g.test('let_ptr_reads')
+ .desc('Validate reading via ptr when permitted by access mode')
+ .params(u =>
+ u // Generate non-handle variables in all valid permutations of address space and access mode.
+ .combine('addressSpace', kNonHandleAddressSpaces)
+ .expand('explicitSpace', explicitSpaceExpander)
+ .combine('explicitAccess', [false, true])
+ .expand('accessMode', accessModeExpander)
+ .combine('stage', ['compute' as ShaderStage]) // Only need to check compute shaders
+ .combine('inferPtrType', [false, true])
+ .combine('ptrStoreType', ['i32'])
+ )
+ .fn(t => {
+ // Try reading through the pointer.
+ const typePart = t.params.inferPtrType ? `: ${pointerType(t.params)}` : '';
+ const prog = getVarDeclShader(t.params, `let p${typePart} = &x; let read = *p;`);
+ const ok = true; // We can always read.
+
+ t.expectCompileResult(ok, prog);
+ });
+
+g.test('let_ptr_writes')
+ .desc('Validate writing via ptr when permitted by access mode')
+ .specURL('https://w3.org/TR#ref-ptr-types')
+ .params(u =>
+ u // Generate non-handle variables in all valid permutations of address space and access mode.
+ .combine('addressSpace', kNonHandleAddressSpaces)
+ .expand('explicitSpace', explicitSpaceExpander)
+ .combine('explicitAccess', [false, true])
+ .expand('accessMode', accessModeExpander)
+ .combine('stage', ['compute' as ShaderStage]) // Only need to check compute shaders
+ .combine('inferPtrType', [false, true])
+ .combine('ptrStoreType', ['i32'])
+ )
+ .fn(t => {
+ // Try writing through the pointer.
+ const typePart = t.params.inferPtrType ? `: ${pointerType(t.params)}` : '';
+ const prog = getVarDeclShader(t.params, `let p${typePart} = &x; *p = 42;`);
+ const ok = supportsWrite(t.params);
+
+ t.expectCompileResult(ok, prog);
+ });
+
+g.test('ptr_handle_space_invalid').fn(t => {
+ t.expectCompileResult(false, 'alias p = ptr<handle,u32>;');
+});
+
+g.test('ptr_bad_store_type')
+ .params(u => u.combine('storeType', ['undeclared', 'clamp', '1']))
+ .fn(t => {
+ t.expectCompileResult(false, `alias p = ptr<private,${t.params.storeType}>;`);
+ });
+
+g.test('ptr_address_space_never_uses_access_mode')
+ .params(u =>
+ u
+ .combine(
+ 'addressSpace',
+ keysOf(kAddressSpaceInfo).filter(i => kAddressSpaceInfo[i].spellAccessMode === 'never')
+ )
+ .combine('accessMode', keysOf(kAccessModeInfo))
+ )
+ .fn(t => {
+ const prog = `alias pty = ptr<${t.params.addressSpace},u32,;${t.params.accessMode}>;`;
+ t.expectCompileResult(false, prog);
+ });
+
+const kStoreTypeNotInstantiable: Record<string, string> = {
+ ptr: 'alias p = ptr<storage,ptr<private,i32>>;',
+ privateAtomic: 'alias p = ptr<private,atomic<u32>>;',
+ functionAtomic: 'alias p = ptr<function,atomic<u32>>;',
+ uniformAtomic: 'alias p = ptr<uniform,atomic<u32>>;',
+ workgroupRTArray: 'alias p = ptr<workgroup,array<i32>>;',
+ uniformRTArray: 'alias p = ptr<uniform,array<i32>>;',
+ privateRTArray: 'alias p = ptr<private,array<i32>>;',
+ functionRTArray: 'alias p = ptr<function,array<i32>>;',
+ RTArrayNotLast: 'struct S { a: array<i32>, b: i32 } alias p = ptr<storage,S>;',
+ nestedRTArray: 'struct S { a: array<i32>, b: i32 } struct { s: S } alias p = ptr<storage,T>;',
+} as const;
+
+g.test('ptr_not_instantiable')
+ .desc(
+ 'Validate that ptr type must correspond to a variable that could be declared somewhere; test bad cases'
+ )
+ .params(u => u.combine('case', keysOf(kStoreTypeNotInstantiable)))
+ .fn(t => {
+ t.expectCompileResult(false, kStoreTypeNotInstantiable[t.params.case]);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/util.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/util.ts
new file mode 100644
index 0000000000..ab1b08e12a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/util.ts
@@ -0,0 +1,163 @@
+import {
+ AccessMode,
+ AddressSpace,
+ AddressSpaceInfo,
+ kAccessModeInfo,
+ kAddressSpaceInfo,
+} from '../../types.js';
+
+/** An enumerator of shader stages */
+export type ShaderStage = 'vertex' | 'fragment' | 'compute';
+
+/** The list of all shader stages */
+export const kShaderStages = ['vertex', 'fragment', 'compute'] as const;
+
+/**
+ * declareEntrypoint emits the WGSL to declare an entry point with the name, stage and body.
+ * The generated function will have an appropriate return type and return statement, so that `body`
+ * does not have to change between stage.
+ * @param arg - arg specifies the
+ * optional entry point function name, the shader stage, and the body of the
+ * function, excluding any automatically generated return statements.
+ * @returns the WGSL string for the entry point
+ */
+export function declareEntryPoint(arg: {
+ name?: string;
+ stage: ShaderStage;
+ body: string;
+}): string {
+ if (arg.name === undefined) {
+ arg.name = 'main';
+ }
+ switch (arg.stage) {
+ case 'vertex':
+ return `@vertex
+fn ${arg.name}() -> @builtin(position) vec4f {
+ ${arg.body}
+ return vec4f();
+}`;
+ case 'fragment':
+ return `@fragment
+fn ${arg.name}() {
+ ${arg.body}
+}`;
+ case 'compute':
+ return `@compute @workgroup_size(1)
+fn ${arg.name}() {
+ ${arg.body}
+}`;
+ }
+}
+
+/**
+ * @returns a WGSL var declaration with given parameters for variable 'x' and
+ * store type i32.
+ */
+export function declareVarX(addressSpace: AddressSpace | '', accessMode: AccessMode | ''): string {
+ const parts: string[] = [];
+ if (addressSpace && kAddressSpaceInfo[addressSpace].binding) parts.push('@group(0) @binding(0) ');
+ parts.push('var');
+
+ const template_parts: string[] = [];
+ if (addressSpace) template_parts.push(addressSpace);
+ if (accessMode) template_parts.push(accessMode);
+ if (template_parts.length > 0) parts.push(`<${template_parts.join(',')}>`);
+
+ parts.push(' x: i32;');
+ return parts.join('');
+}
+
+/**
+ * @returns a list of booleans indicating valid cases of specifying the address
+ * space.
+ */
+export function explicitSpaceExpander(p: { addressSpace: AddressSpace }): readonly boolean[] {
+ const info = kAddressSpaceInfo[p.addressSpace];
+ return info.spell === 'must' ? [true] : [true, false];
+}
+
+/**
+ * @returns a list of usable access modes under given experiment conditions, or undefined
+ * if none are allowed.
+ */
+export function accessModeExpander(p: {
+ addressSpace: AddressSpace;
+ explicitAccess: boolean; // Whether the access mode will be emitted.
+}): readonly (AccessMode | '')[] {
+ const info = kAddressSpaceInfo[p.addressSpace];
+ return p.explicitAccess && info.spellAccessMode !== 'never' ? info.accessModes : [''];
+}
+
+/**
+ * @returns a WGSL program with a single variable declaration, with the
+ * given parameterization
+ */
+export function getVarDeclShader(
+ p: {
+ addressSpace: AddressSpace; // Address space for the variable.
+ explicitSpace: boolean; // Should the address space be explicitly spelled?
+ accessMode: AccessMode | ''; // What access mode to use.
+ explicitAccess: boolean; // Should the access mode be explicitly spelled?
+ stage: ShaderStage; // What shader stage to use.
+ },
+ additionalBody?: string
+): string {
+ const info = kAddressSpaceInfo[p.addressSpace];
+ const decl = declareVarX(
+ p.explicitSpace ? p.addressSpace : '',
+ p.explicitAccess ? p.accessMode : ''
+ );
+
+ additionalBody = additionalBody ?? '';
+
+ switch (info.scope) {
+ case 'module':
+ return decl + '\n' + declareEntryPoint({ stage: p.stage, body: additionalBody });
+
+ case 'function':
+ return declareEntryPoint({ stage: p.stage, body: decl + '\n' + additionalBody });
+ }
+}
+
+/**
+ * @returns the WGSL spelling of a pointer type corresponding to a variable
+ * declared with the given parameters.
+ */
+export function pointerType(p: {
+ addressSpace: AddressSpace; // Address space to use if p.explicitSpace
+ explicitSpace: boolean; // If false, use 'function' address space
+ accessMode: AccessMode | ''; // The access mode to use, if any
+ ptrStoreType: string; // The store type.
+}): string {
+ const space = p.explicitSpace ? p.addressSpace : 'function';
+ const modePart = p.accessMode ? ',' + p.accessMode : '';
+ return `ptr<${space},${p.ptrStoreType}${modePart}>`;
+}
+
+/** @returns the effective access mode for the given experiment. */
+export function effectiveAccessMode(
+ info: AddressSpaceInfo,
+ accessMode: AccessMode | ''
+): AccessMode {
+ return accessMode || info.accessModes[0]; // default is first.
+}
+
+/** @returns whether the setup allows reads */
+export function supportsRead(p: {
+ addressSpace: AddressSpace;
+ accessMode: AccessMode | '';
+}): boolean {
+ const info = kAddressSpaceInfo[p.addressSpace];
+ const mode = effectiveAccessMode(info, p.accessMode);
+ return info.accessModes.includes(mode) && kAccessModeInfo[mode].read;
+}
+
+/** @returns whether the setup allows writes */
+export function supportsWrite(p: {
+ addressSpace: AddressSpace;
+ accessMode: AccessMode | '';
+}): boolean {
+ const info = kAddressSpaceInfo[p.addressSpace];
+ const mode = effectiveAccessMode(info, p.accessMode);
+ return info.accessModes.includes(mode) && kAccessModeInfo[mode].write;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/var_access_mode.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/var_access_mode.spec.ts
new file mode 100644
index 0000000000..467f61d0da
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/decl/var_access_mode.spec.ts
@@ -0,0 +1,116 @@
+export const description = `
+7.3 var Declarations
+
+The access mode always has a default value, and except for variables in the
+storage address space, must not be specified in the WGSL source. See §13.3 Address Spaces.
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { AddressSpace, kAccessModeInfo, kAddressSpaceInfo } from '../../types.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+import {
+ explicitSpaceExpander,
+ getVarDeclShader,
+ accessModeExpander,
+ supportsRead,
+ supportsWrite,
+ ShaderStage,
+} from './util.js';
+
+// Address spaces that can hold an i32 variable.
+const kNonHandleAddressSpaces = keysOf(kAddressSpaceInfo).filter(
+ as => as !== 'handle'
+) as AddressSpace[];
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+g.test('explicit_access_mode')
+ .desc('Validate uses of an explicit access mode on a var declaration')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#var-decls')
+ .params(
+ u =>
+ u
+ .combine('addressSpace', kNonHandleAddressSpaces)
+ .combine('explicitSpace', [true, false])
+ // Only keep cases where:
+ // *if* the address space must be specified on a var decl (e.g. var<private>)
+ // then the address space will actually be specified in this test case.
+ .filter(t => kAddressSpaceInfo[t.addressSpace].spell !== 'must' || t.explicitSpace)
+ .combine('explicitAccess', [true])
+ .combine('accessMode', keysOf(kAccessModeInfo))
+ .combine('stage', ['compute' as ShaderStage]) // Only need to check compute shaders
+ )
+ .fn(t => {
+ const prog = getVarDeclShader(t.params);
+ const info = kAddressSpaceInfo[t.params.addressSpace];
+
+ const ok =
+ // The address space must be explicitly specified.
+ t.params.explicitSpace &&
+ // The address space must allow an access mode to be spelled, and the
+ // access mode must be in the list of modes for the address space.
+ info.spellAccessMode !== 'never' &&
+ info.accessModes.includes(t.params.accessMode);
+
+ t.expectCompileResult(ok, prog);
+ });
+
+g.test('implicit_access_mode')
+ .desc('Validate an implicit access mode on a var declaration')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#var-decls')
+ .params(
+ u =>
+ u
+ .combine('addressSpace', kNonHandleAddressSpaces)
+ .expand('explicitSpace', explicitSpaceExpander)
+ .combine('explicitAccess', [false])
+ .combine('accessMode', [''] as const)
+ .combine('stage', ['compute' as ShaderStage]) // Only need to check compute shaders
+ )
+ .fn(t => {
+ const prog = getVarDeclShader(t.params);
+
+ // 7.3 var Declarations
+ // "The access mode always has a default value,.."
+ const ok = true;
+
+ t.expectCompileResult(ok, prog);
+ });
+
+g.test('read_access')
+ .desc('A variable can be read from when the access mode permits')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#var-decls')
+ .params(
+ u =>
+ u
+ .combine('addressSpace', kNonHandleAddressSpaces)
+ .expand('explicitSpace', explicitSpaceExpander)
+ .combine('explicitAccess', [false, true])
+ .expand('accessMode', accessModeExpander)
+ .combine('stage', ['compute' as ShaderStage]) // Only need to check compute shaders
+ )
+ .fn(t => {
+ const prog = getVarDeclShader(t.params, 'let copy = x;');
+ const ok = supportsRead(t.params);
+ t.expectCompileResult(ok, prog);
+ });
+
+g.test('write_access')
+ .desc('A variable can be written to when the access mode permits')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#var-decls')
+ .params(
+ u =>
+ u
+ .combine('addressSpace', kNonHandleAddressSpaces)
+ .expand('explicitSpace', explicitSpaceExpander)
+ .combine('explicitAccess', [false, true])
+ .expand('accessMode', accessModeExpander)
+ .combine('stage', ['compute' as ShaderStage]) // Only need to check compute shaders
+ )
+ .fn(t => {
+ const prog = getVarDeclShader(t.params, 'x = 0;');
+ const ok = supportsWrite(t.params);
+ t.expectCompileResult(ok, prog);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/access/vector.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/access/vector.spec.ts
new file mode 100644
index 0000000000..0294fc2d56
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/access/vector.spec.ts
@@ -0,0 +1,223 @@
+export const description = `
+Validation tests for vector accesses
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kCases = {
+ // indexing with literal
+ literal_0: { wgsl: 'let r : T = v[0];', ok: true },
+ literal_1: { wgsl: 'let r : T = v[1];', ok: true },
+ literal_2: { wgsl: 'let r : T = v[2];', ok: (width: number) => width > 2 },
+ literal_3: { wgsl: 'let r : T = v[3];', ok: (width: number) => width > 3 },
+ literal_0i: { wgsl: 'let r : T = v[0i];', ok: true },
+ literal_1i: { wgsl: 'let r : T = v[1i];', ok: true },
+ literal_2i: { wgsl: 'let r : T = v[2i];', ok: (width: number) => width > 2 },
+ literal_3i: { wgsl: 'let r : T = v[3i];', ok: (width: number) => width > 3 },
+ literal_0u: { wgsl: 'let r : T = v[0u];', ok: true },
+ literal_1u: { wgsl: 'let r : T = v[1u];', ok: true },
+ literal_2u: { wgsl: 'let r : T = v[2u];', ok: (width: number) => width > 2 },
+ literal_3u: { wgsl: 'let r : T = v[3u];', ok: (width: number) => width > 3 },
+
+ // indexing with 'const' variable
+ const_0: { wgsl: 'const i = 0; let r : T = v[i];', ok: true },
+ const_1: { wgsl: 'const i = 1; let r : T = v[i];', ok: true },
+ const_2: { wgsl: 'const i = 2; let r : T = v[i];', ok: (width: number) => width > 2 },
+ const_3: { wgsl: 'const i = 3; let r : T = v[i];', ok: (width: number) => width > 3 },
+ const_0i: { wgsl: 'const i = 0i; let r : T = v[i];', ok: true },
+ const_1i: { wgsl: 'const i = 1i; let r : T = v[i];', ok: true },
+ const_2i: { wgsl: 'const i = 2i; let r : T = v[i];', ok: (width: number) => width > 2 },
+ const_3i: { wgsl: 'const i = 3i; let r : T = v[i];', ok: (width: number) => width > 3 },
+ const_0u: { wgsl: 'const i = 0u; let r : T = v[i];', ok: true },
+ const_1u: { wgsl: 'const i = 1u; let r : T = v[i];', ok: true },
+ const_2u: { wgsl: 'const i = 2u; let r : T = v[i];', ok: (width: number) => width > 2 },
+ const_3u: { wgsl: 'const i = 3u; let r : T = v[i];', ok: (width: number) => width > 3 },
+
+ // indexing with 'let' variable
+ let_0: { wgsl: 'let i = 0; let r : T = v[i];', ok: true },
+ let_1: { wgsl: 'let i = 1; let r : T = v[i];', ok: true },
+ let_2: { wgsl: 'let i = 2; let r : T = v[i];', ok: true },
+ let_3: { wgsl: 'let i = 3; let r : T = v[i];', ok: true },
+ let_0i: { wgsl: 'let i = 0i; let r : T = v[i];', ok: true },
+ let_1i: { wgsl: 'let i = 1i; let r : T = v[i];', ok: true },
+ let_2i: { wgsl: 'let i = 2i; let r : T = v[i];', ok: true },
+ let_3i: { wgsl: 'let i = 3i; let r : T = v[i];', ok: true },
+ let_0u: { wgsl: 'let i = 0u; let r : T = v[i];', ok: true },
+ let_1u: { wgsl: 'let i = 1u; let r : T = v[i];', ok: true },
+ let_2u: { wgsl: 'let i = 2u; let r : T = v[i];', ok: true },
+ let_3u: { wgsl: 'let i = 3u; let r : T = v[i];', ok: true },
+
+ // indexing with 'var' variable
+ var_0: { wgsl: 'var i = 0; let r : T = v[i];', ok: true },
+ var_1: { wgsl: 'var i = 1; let r : T = v[i];', ok: true },
+ var_2: { wgsl: 'var i = 2; let r : T = v[i];', ok: true },
+ var_3: { wgsl: 'var i = 3; let r : T = v[i];', ok: true },
+ var_0i: { wgsl: 'var i = 0i; let r : T = v[i];', ok: true },
+ var_1i: { wgsl: 'var i = 1i; let r : T = v[i];', ok: true },
+ var_2i: { wgsl: 'var i = 2i; let r : T = v[i];', ok: true },
+ var_3i: { wgsl: 'var i = 3i; let r : T = v[i];', ok: true },
+ var_0u: { wgsl: 'var i = 0u; let r : T = v[i];', ok: true },
+ var_1u: { wgsl: 'var i = 1u; let r : T = v[i];', ok: true },
+ var_2u: { wgsl: 'var i = 2u; let r : T = v[i];', ok: true },
+ var_3u: { wgsl: 'var i = 3u; let r : T = v[i];', ok: true },
+
+ // indexing with const expression
+ const_expr_0: { wgsl: 'let r : T = v[0 / 2];', ok: true },
+ const_expr_1: { wgsl: 'let r : T = v[2 / 2];', ok: true },
+ const_expr_2: { wgsl: 'let r : T = v[4 / 2];', ok: (width: number) => width > 2 },
+ const_expr_3: { wgsl: 'let r : T = v[6 / 2];', ok: (width: number) => width > 3 },
+ const_expr_2_via_trig: {
+ wgsl: 'let r : T = v[i32(tan(1.10714872) + 0.5)];',
+ ok: (width: number) => width > 2,
+ },
+ const_expr_3_via_trig: {
+ wgsl: 'let r : T = v[u32(tan(1.24904577) + 0.5)];',
+ ok: (width: number) => width > 3,
+ },
+ const_expr_2_via_vec2: {
+ wgsl: 'let r : T = v[vec2(3, 2)[1]];',
+ ok: (width: number) => width > 2,
+ },
+ const_expr_3_via_vec2: {
+ wgsl: 'let r : T = v[vec2(3, 2).x];',
+ ok: (width: number) => width > 3,
+ },
+ const_expr_2_via_vec2u: {
+ wgsl: 'let r : T = v[vec2u(3, 2)[1]];',
+ ok: (width: number) => width > 2,
+ },
+ const_expr_3_via_vec2i: {
+ wgsl: 'let r : T = v[vec2i(3, 2).x];',
+ ok: (width: number) => width > 3,
+ },
+ const_expr_2_via_array: {
+ wgsl: 'let r : T = v[array<i32, 2>(3, 2)[1]];',
+ ok: (width: number) => width > 2,
+ },
+ const_expr_3_via_array: {
+ wgsl: 'let r : T = v[array<i32, 2>(3, 2)[0]];',
+ ok: (width: number) => width > 3,
+ },
+ const_expr_2_via_struct: {
+ wgsl: 'let r : T = v[S(2).i];',
+ ok: (width: number) => width > 2,
+ },
+ const_expr_3_via_struct: {
+ wgsl: 'let r : T = v[S(3).i];',
+ ok: (width: number) => width > 3,
+ },
+
+ // single element convenience name accesses
+ x: { wgsl: 'let r : T = v.x;', ok: true },
+ y: { wgsl: 'let r : T = v.y;', ok: true },
+ z: { wgsl: 'let r : T = v.z;', ok: (width: number) => width > 2 },
+ w: { wgsl: 'let r : T = v.w;', ok: (width: number) => width > 3 },
+ r: { wgsl: 'let r : T = v.r;', ok: true },
+ g: { wgsl: 'let r : T = v.g;', ok: true },
+ b: { wgsl: 'let r : T = v.b;', ok: (width: number) => width > 2 },
+ a: { wgsl: 'let r : T = v.a;', ok: (width: number) => width > 3 },
+
+ // swizzles
+ xy: { wgsl: 'let r : vec2<T> = v.xy;', ok: true },
+ yx: { wgsl: 'let r : vec2<T> = v.yx;', ok: true },
+ xyx: { wgsl: 'let r : vec3<T> = v.xyx;', ok: true },
+ xyz: { wgsl: 'let r : vec3<T> = v.xyz;', ok: (width: number) => width > 2 },
+ zyx: { wgsl: 'let r : vec3<T> = v.zyx;', ok: (width: number) => width > 2 },
+ xyxy: { wgsl: 'let r : vec4<T> = v.xyxy;', ok: true },
+ xyxz: { wgsl: 'let r : vec4<T> = v.xyxz;', ok: (width: number) => width > 2 },
+ xyzw: { wgsl: 'let r : vec4<T> = v.xyzw;', ok: (width: number) => width > 3 },
+ yxwz: { wgsl: 'let r : vec4<T> = v.yxwz;', ok: (width: number) => width > 3 },
+ rg: { wgsl: 'let r : vec2<T> = v.rg;', ok: true },
+ gr: { wgsl: 'let r : vec2<T> = v.gr;', ok: true },
+ rgg: { wgsl: 'let r : vec3<T> = v.rgg;', ok: true },
+ rgb: { wgsl: 'let r : vec3<T> = v.rgb;', ok: (width: number) => width > 2 },
+ grb: { wgsl: 'let r : vec3<T> = v.grb;', ok: (width: number) => width > 2 },
+ rgbr: { wgsl: 'let r : vec4<T> = v.rgbr;', ok: (width: number) => width > 2 },
+ rgba: { wgsl: 'let r : vec4<T> = v.rgba;', ok: (width: number) => width > 3 },
+ gbra: { wgsl: 'let r : vec4<T> = v.gbra;', ok: (width: number) => width > 3 },
+
+ // swizzle chains
+ xy_yx: { wgsl: 'let r : vec2<T> = v.xy.yx;', ok: true },
+ xyx_xxy: { wgsl: 'let r : vec3<T> = v.xyx.xxy;', ok: true },
+ xyz_zyx: { wgsl: 'let r : vec3<T> = v.xyz.zyx;', ok: (width: number) => width > 2 },
+ xyxy_rrgg: { wgsl: 'let r : vec4<T> = v.xyxy.rrgg;', ok: true },
+ rbrg_xyzw: { wgsl: 'let r : vec4<T> = v.rbrg.xyzw;', ok: (width: number) => width > 2 },
+ xyxz_rbg_yx: { wgsl: 'let r : vec2<T> = v.xyxz.rbg.yx;', ok: (width: number) => width > 2 },
+ wxyz_bga_xy: { wgsl: 'let r : vec2<T> = v.wxyz.bga.xy;', ok: (width: number) => width > 3 },
+
+ // error: invalid convenience letterings
+ xq: { wgsl: 'let r : vec2<T> = v.xq;', ok: false },
+ py: { wgsl: 'let r : vec2<T> = v.py;', ok: false },
+
+ // error: mixed convenience letterings
+ xg: { wgsl: 'let r : vec2<T> = v.xg;', ok: false },
+ ryb: { wgsl: 'let r : vec3<T> = v.ryb;', ok: false },
+ xgza: { wgsl: 'let r : vec4<T> = v.xgza;', ok: false },
+
+ // error: too many swizzle elements
+ xxxxx: { wgsl: 'let r = v.xxxxx;', ok: false },
+ rrrrr: { wgsl: 'let r = v.rrrrr;', ok: false },
+ yxwxy: { wgsl: 'let r = v.yxwxy;', ok: false },
+ rgbar: { wgsl: 'let r = v.rgbar;', ok: false },
+
+ // error: invalid index value
+ literal_5: { wgsl: 'let r : T = v[5];', ok: false },
+ literal_minus_1: { wgsl: 'let r : T = v[-1];', ok: false },
+
+ // error: invalid index type
+ float_idx: { wgsl: 'let r : T = v[1.0];', ok: false },
+ bool_idx: { wgsl: 'let r : T = v[true];', ok: false },
+ array_idx: { wgsl: 'let r : T = v[array<i32, 2>()];', ok: false },
+};
+
+g.test('vector')
+ .desc('Tests validation of vector indexed and swizzles')
+ .params(u =>
+ u
+ .combine('case', keysOf(kCases)) //
+ .combine('vector_decl', ['const', 'let', 'var', 'param'] as const)
+ .combine('vector_width', [2, 3, 4] as const)
+ .combine('element_type', ['i32', 'u32', 'f32', 'f16', 'bool'] as const)
+ )
+ .beforeAllSubcases(t => {
+ if (t.params.element_type === 'f16') {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const c = kCases[t.params.case];
+ const enables = t.params.element_type === 'f16' ? 'enable f16;' : '';
+ const prefix = `${enables}
+
+alias T = ${t.params.element_type};
+
+struct S {
+ i : i32,
+}
+
+@compute @workgroup_size(1)
+`;
+ const code =
+ t.params.vector_decl === 'param'
+ ? `${prefix}
+fn main() {
+ F(vec${t.params.vector_width}<T>());
+}
+
+fn F(v : vec${t.params.vector_width}<T>) {
+ ${c.wgsl}
+}
+`
+ : `${prefix}
+fn main() {
+ ${t.params.vector_decl} v = vec${t.params.vector_width}<T>();
+ ${c.wgsl}
+}
+`;
+ const pass = typeof c.ok === 'function' ? c.ok(t.params.vector_width) : c.ok;
+ t.expectCompileResult(pass, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/binary/bitwise_shift.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/binary/bitwise_shift.spec.ts
new file mode 100644
index 0000000000..5f7b995ded
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/binary/bitwise_shift.spec.ts
@@ -0,0 +1,166 @@
+export const description = `
+Validation tests for the bitwise shift binary expression operations
+`;
+
+import { makeTestGroup } from '../../../../../common/framework/test_group.js';
+import { ShaderValidationTest } from '../../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+// Converts v to signed decimal number.
+// Required because JS binary literals are always interpreted as unsigned numbers.
+function signed(v: number): number {
+ return new Int32Array([v])[0];
+}
+
+// Return vector form of size `size` of input value `v`, or `v` if size is undefined.
+function vectorize(v: string, size: number | undefined): string {
+ if (size !== undefined) {
+ return `vec${size}(${v})`;
+ }
+ return v;
+}
+
+const kLeftShiftCases = [
+ // rhs >= bitwidth fails
+ { lhs: `0u`, rhs: `31u`, pass: true },
+ { lhs: `0u`, rhs: `32u`, pass: false },
+ { lhs: `0u`, rhs: `33u`, pass: false },
+ { lhs: `0u`, rhs: `1000u`, pass: false },
+ { lhs: `0u`, rhs: `0xFFFFFFFFu`, pass: false },
+
+ { lhs: `0i`, rhs: `31u`, pass: true },
+ { lhs: `0i`, rhs: `32u`, pass: false },
+ { lhs: `0i`, rhs: `33u`, pass: false },
+ { lhs: `0i`, rhs: `1000u`, pass: false },
+ { lhs: `0i`, rhs: `0xFFFFFFFFu`, pass: false },
+
+ // Signed overflow (sign change)
+ { lhs: `${0b01000000000000000000000000000000}i`, rhs: `1u`, pass: false },
+ { lhs: `${0b01111111111111111111111111111111}i`, rhs: `1u`, pass: false },
+ { lhs: `${0b00000000000000000000000000000001}i`, rhs: `31u`, pass: false },
+ // Same cases should pass if lhs is unsigned
+ { lhs: `${0b01000000000000000000000000000000}u`, rhs: `1u`, pass: true },
+ { lhs: `${0b01111111111111111111111111111111}u`, rhs: `1u`, pass: true },
+ { lhs: `${0b00000000000000000000000000000001}u`, rhs: `31u`, pass: true },
+
+ // Unsigned overflow
+ { lhs: `${0b11000000000000000000000000000000}u`, rhs: `1u`, pass: false },
+ { lhs: `${0b11111111111111111111111111111111}u`, rhs: `1u`, pass: false },
+ { lhs: `${0b11111111111111111111111111111111}u`, rhs: `31u`, pass: false },
+ // Same cases should pass if lhs is signed
+ { lhs: `${signed(0b11000000000000000000000000000000)}i`, rhs: `1u`, pass: true },
+ { lhs: `${signed(0b11111111111111111111111111111111)}i`, rhs: `1u`, pass: true },
+ { lhs: `${signed(0b11111111111111111111111111111111)}i`, rhs: `31u`, pass: true },
+
+ // Shift by negative is an error
+ { lhs: `1`, rhs: `-1`, pass: false },
+ { lhs: `1i`, rhs: `-1`, pass: false },
+ { lhs: `1u`, rhs: `-1`, pass: false },
+];
+
+g.test('shift_left_concrete')
+ .desc('Tests validation of binary left shift of concrete values')
+ .params(u =>
+ u
+ .combine('case', kLeftShiftCases) //
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(t => {
+ const lhs = t.params.case.lhs;
+ const rhs = t.params.case.rhs;
+ const vec_size = t.params.vectorize;
+
+ const code = `
+@compute @workgroup_size(1)
+fn main() {
+ const r = ${vectorize(lhs, vec_size)} << ${vectorize(rhs, vec_size)};
+}
+ `;
+ t.expectCompileResult(t.params.case.pass, code);
+ });
+
+g.test('shift_left_vec_size_mismatch')
+ .desc('Tests validation of binary left shift of vectors with mismatched sizes')
+ .params(u =>
+ u
+ .combine('vectorize_lhs', [2, 3, 4]) //
+ .combine('vectorize_rhs', [2, 3, 4])
+ )
+ .fn(t => {
+ const lhs = `1`;
+ const rhs = `1`;
+ const lhs_vec_size = t.params.vectorize_lhs;
+ const rhs_vec_size = t.params.vectorize_rhs;
+ const code = `
+@compute @workgroup_size(1)
+fn main() {
+ const r = ${vectorize(lhs, lhs_vec_size)} << ${vectorize(rhs, rhs_vec_size)};
+}
+ `;
+ const pass = lhs_vec_size === rhs_vec_size;
+ t.expectCompileResult(pass, code);
+ });
+
+const kRightShiftCases = [
+ // rhs >= bitwidth fails
+ { lhs: `0u`, rhs: `31u`, pass: true },
+ { lhs: `0u`, rhs: `32u`, pass: false },
+ { lhs: `0u`, rhs: `33u`, pass: false },
+ { lhs: `0u`, rhs: `1000u`, pass: false },
+ { lhs: `0u`, rhs: `0xFFFFFFFFu`, pass: false },
+
+ { lhs: `0i`, rhs: `31u`, pass: true },
+ { lhs: `0i`, rhs: `32u`, pass: false },
+ { lhs: `0i`, rhs: `33u`, pass: false },
+ { lhs: `0i`, rhs: `1000u`, pass: false },
+ { lhs: `0i`, rhs: `0xFFFFFFFFu`, pass: false },
+
+ // Shift by negative is an error
+ { lhs: `1`, rhs: `-1`, pass: false },
+ { lhs: `1i`, rhs: `-1`, pass: false },
+ { lhs: `1u`, rhs: `-1`, pass: false },
+];
+
+g.test('shift_right_concrete')
+ .desc('Tests validation of binary right shift of concrete values')
+ .params(u =>
+ u
+ .combine('case', kRightShiftCases) //
+ .combine('vectorize', [undefined, 2, 3, 4] as const)
+ )
+ .fn(t => {
+ const lhs = t.params.case.lhs;
+ const rhs = t.params.case.rhs;
+ const vec_size = t.params.vectorize;
+
+ const code = `
+@compute @workgroup_size(1)
+fn main() {
+ const r = ${vectorize(lhs, vec_size)} >> ${vectorize(rhs, vec_size)};
+}
+ `;
+ t.expectCompileResult(t.params.case.pass, code);
+ });
+
+g.test('shift_right_vec_size_mismatch')
+ .desc('Tests validation of binary right shift of vectors with mismatched sizes')
+ .params(u =>
+ u
+ .combine('vectorize_lhs', [2, 3, 4]) //
+ .combine('vectorize_rhs', [2, 3, 4])
+ )
+ .fn(t => {
+ const lhs = `1`;
+ const rhs = `1`;
+ const lhs_vec_size = t.params.vectorize_lhs;
+ const rhs_vec_size = t.params.vectorize_rhs;
+ const code = `
+@compute @workgroup_size(1)
+fn main() {
+ const r = ${vectorize(lhs, lhs_vec_size)} >> ${vectorize(rhs, rhs_vec_size)};
+}
+ `;
+ const pass = lhs_vec_size === rhs_vec_size;
+ t.expectCompileResult(pass, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/abs.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/abs.spec.ts
new file mode 100644
index 0000000000..32ecb0cbc8
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/abs.spec.ts
@@ -0,0 +1,54 @@
+const builtin = 'abs';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ elementType,
+ kAllFloatAndIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ stageSupportsType,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatAndIntegerScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() never errors
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => fullRangeForType(kValuesTypes[u.type]))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const expectedResult = true; // abs() should never error
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [kValuesTypes[t.params.type].create(t.params.value)],
+ t.params.stage
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/acos.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/acos.spec.ts
new file mode 100644
index 0000000000..82171ed4b1
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/acos.spec.ts
@@ -0,0 +1,78 @@
+const builtin = 'acos';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ kMinusTwoToTwo,
+ stageSupportsType,
+ unique,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => unique(kMinusTwoToTwo, fullRangeForType(kValuesTypes[u.type])))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const expectedResult = Math.abs(t.params.value) <= 1;
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [kValuesTypes[t.params.type].create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(0)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/acosh.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/acosh.spec.ts
new file mode 100644
index 0000000000..a7ab8d83f9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/acosh.spec.ts
@@ -0,0 +1,80 @@
+const builtin = 'acosh';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { isRepresentable } from '../../../../../util/floating_point.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ kMinusTwoToTwo,
+ stageSupportsType,
+ unique,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => unique(fullRangeForType(kValuesTypes[u.type]), kMinusTwoToTwo))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const type = kValuesTypes[t.params.type];
+ const expectedResult = isRepresentable(Math.acosh(t.params.value), elementType(type));
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [type.create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(1)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/asin.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/asin.spec.ts
new file mode 100644
index 0000000000..8af7706169
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/asin.spec.ts
@@ -0,0 +1,78 @@
+const builtin = 'asin';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ kMinusTwoToTwo,
+ stageSupportsType,
+ unique,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => unique(kMinusTwoToTwo, fullRangeForType(kValuesTypes[u.type])))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const expectedResult = Math.abs(t.params.value) <= 1;
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [kValuesTypes[t.params.type].create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(0)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/asinh.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/asinh.spec.ts
new file mode 100644
index 0000000000..4558d30966
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/asinh.spec.ts
@@ -0,0 +1,82 @@
+const builtin = 'asinh';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { isRepresentable } from '../../../../../util/floating_point.js';
+import { linearRange } from '../../../../../util/math.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ stageSupportsType,
+ unique,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u =>
+ unique(fullRangeForType(kValuesTypes[u.type]), linearRange(-2000, 2000, 10))
+ )
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const type = kValuesTypes[t.params.type];
+ const expectedResult = isRepresentable(Math.asinh(t.params.value), elementType(type));
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [type.create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(1)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/atan.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/atan.spec.ts
new file mode 100644
index 0000000000..3080f4e971
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/atan.spec.ts
@@ -0,0 +1,79 @@
+const builtin = 'atan';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ kMinus3PiTo3Pi,
+ stageSupportsType,
+ unique,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => unique(kMinus3PiTo3Pi, fullRangeForType(kValuesTypes[u.type])))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const type = kValuesTypes[t.params.type];
+ const expectedResult = true;
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [type.create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(0)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/atan2.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/atan2.spec.ts
new file mode 100644
index 0000000000..33f1970697
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/atan2.spec.ts
@@ -0,0 +1,106 @@
+const builtin = 'atan2';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ Vector,
+ VectorType,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { isRepresentable } from '../../../../../util/floating_point.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ kSparseMinus3PiTo3Pi,
+ stageSupportsType,
+ unique,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('y', u => unique(kSparseMinus3PiTo3Pi, fullRangeForType(kValuesTypes[u.type], 4)))
+ .expand('x', u => unique(kSparseMinus3PiTo3Pi, fullRangeForType(kValuesTypes[u.type], 4)))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const type = kValuesTypes[t.params.type];
+ const expectedResult = isRepresentable(
+ Math.abs(Math.atan2(t.params.y, t.params.x)),
+ elementType(type)
+ );
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [type.create(t.params.y), type.create(t.params.x)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument_y')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const yTy = kIntegerArgumentTypes[t.params.type];
+ const xTy = yTy instanceof Vector ? new VectorType(yTy.size, TypeF32) : TypeF32;
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ yTy === TypeF32,
+ [yTy.create(1), xTy.create(1)],
+ 'constant'
+ );
+ });
+
+g.test('integer_argument_x')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const xTy = kIntegerArgumentTypes[t.params.type];
+ const yTy = xTy instanceof Vector ? new VectorType(xTy.size, TypeF32) : TypeF32;
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ xTy === TypeF32,
+ [yTy.create(1), xTy.create(1)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/atanh.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/atanh.spec.ts
new file mode 100644
index 0000000000..63a96f0f70
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/atanh.spec.ts
@@ -0,0 +1,78 @@
+const builtin = 'atanh';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ kMinusTwoToTwo,
+ stageSupportsType,
+ unique,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => unique(kMinusTwoToTwo, fullRangeForType(kValuesTypes[u.type])))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const expectedResult = Math.abs(t.params.value) < 1;
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [kValuesTypes[t.params.type].create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(0)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/atomics.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/atomics.spec.ts
new file mode 100644
index 0000000000..57c5aae613
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/atomics.spec.ts
@@ -0,0 +1,70 @@
+export const description = `
+Validation tests for atomic builtins.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kAtomicOps = {
+ add: { src: 'atomicAdd(&a,1)' },
+ sub: { src: 'atomicSub(&a,1)' },
+ max: { src: 'atomicMax(&a,1)' },
+ min: { src: 'atomicMin(&a,1)' },
+ and: { src: 'atomicAnd(&a,1)' },
+ or: { src: 'atomicOr(&a,1)' },
+ xor: { src: 'atomicXor(&a,1)' },
+ load: { src: 'atomicLoad(&a)' },
+ store: { src: 'atomicStore(&a,1)' },
+ exchange: { src: 'atomicExchange(&a,1)' },
+ compareexchangeweak: { src: 'atomicCompareExchangeWeak(&a,1,1)' },
+};
+
+g.test('stage')
+ .specURL('https://www.w3.org/TR/WGSL/#atomic-rmw')
+ .desc(
+ `
+Atomic built-in functions must not be used in a vertex shader stage.
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', ['fragment', 'vertex', 'compute'] as const) //
+ .combine('atomicOp', keysOf(kAtomicOps))
+ )
+ .fn(t => {
+ const atomicOp = kAtomicOps[t.params.atomicOp].src;
+ let code = `
+@group(0) @binding(0) var<storage, read_write> a: atomic<i32>;
+`;
+
+ switch (t.params.stage) {
+ case 'compute':
+ code += `
+@compute @workgroup_size(1,1,1) fn main() {
+ ${atomicOp};
+}`;
+ break;
+
+ case 'fragment':
+ code += `
+@fragment fn main() -> @location(0) vec4<f32> {
+ ${atomicOp};
+ return vec4<f32>();
+}`;
+ break;
+
+ case 'vertex':
+ code += `
+@vertex fn vmain() -> @builtin(position) vec4<f32> {
+ ${atomicOp};
+ return vec4<f32>();
+}`;
+ break;
+ }
+
+ const pass = t.params.stage !== 'vertex';
+ t.expectCompileResult(pass, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/bitcast.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/bitcast.spec.ts
new file mode 100644
index 0000000000..20c2c40664
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/bitcast.spec.ts
@@ -0,0 +1,393 @@
+export const description = `
+Validation negative tests for bitcast builtins.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../../../common/util/data_tables.js';
+import { assert } from '../../../../../../common/util/util.js';
+import { kBit } from '../../../../../util/constants.js';
+import { linearRange } from '../../../../../util/math.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+// A VectorCase specifies the number of components a vector type has,
+// and which component will have a bad value.
+// Use width = 1 to indicate a scalar.
+type VectorCase = { width: number; badIndex: number };
+const kVectorCases: Record<string, VectorCase> = {
+ v1_b0: { width: 1, badIndex: 0 },
+ v2_b0: { width: 2, badIndex: 0 },
+ v2_b1: { width: 2, badIndex: 1 },
+ v3_b0: { width: 3, badIndex: 0 },
+ v3_b1: { width: 3, badIndex: 1 },
+ v3_b2: { width: 3, badIndex: 2 },
+ v4_b0: { width: 4, badIndex: 0 },
+ v4_b1: { width: 4, badIndex: 1 },
+ v4_b2: { width: 4, badIndex: 2 },
+ v4_b3: { width: 4, badIndex: 3 },
+};
+
+const numNaNs = 4;
+const f32InfAndNaNInU32: number[] = [
+ // Cover NaNs evenly in integer space.
+ // The positive NaN with the lowest integer representation is the integer
+ // for infinity, plus one.
+ // The positive NaN with the highest integer representation is i32.max (!)
+ ...linearRange(kBit.f32.positive.infinity + 1, kBit.i32.positive.max, numNaNs),
+ // The negative NaN with the lowest integer representation is the integer
+ // for negative infinity, plus one.
+ // The negative NaN with the highest integer representation is u32.max (!)
+ ...linearRange(kBit.f32.negative.infinity + 1, kBit.u32.max, numNaNs),
+ kBit.f32.positive.infinity,
+ kBit.f32.negative.infinity,
+];
+
+g.test('bad_const_to_f32')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+It is a shader-creation error if any const-expression of floating-point type evaluates to NaN or infinity.
+`
+ )
+ .params(u =>
+ u
+ .combine('fromScalarType', ['i32', 'u32'] as const)
+ .combine('vectorize', keysOf(kVectorCases))
+ .beginSubcases()
+ // Also validate that testcases without using bad bit can pass the exam
+ .combine('useBadValue', [true, false])
+ .expand('bitBadValue', p =>
+ p.useBadValue ? ([...f32InfAndNaNInU32] as const) : [0 as const]
+ )
+ )
+ .fn(t => {
+ // For scalar cases, generate code like:
+ // const f = bitcast<f32>(i32(u32(0x7f800000)));
+ // For vector cases, generate code where one component is bad. In this case
+ // width=4 and badIndex=2
+ // const f = bitcast<vec4f>(vec4<32>(0,0,i32(u32(0x7f800000)),0));
+ const vectorize = kVectorCases[t.params.vectorize];
+ const width = vectorize.width;
+ const badIndex = vectorize.badIndex;
+ const badScalar = `${t.params.fromScalarType}(u32(${t.params.bitBadValue}))`;
+ const destType = width === 1 ? 'f32' : `vec${width}f`;
+ const srcType =
+ width === 1 ? t.params.fromScalarType : `vec${width}<${t.params.fromScalarType}>`;
+ const components = [...Array(width).keys()]
+ .map(i => (i === badIndex ? badScalar : '0'))
+ .join(',');
+ const code = `const f = bitcast<${destType}>(${srcType}(${components}));`;
+ t.expectCompileResult(!t.params.useBadValue, code);
+ });
+
+const f16InfAndNaNInU16: number[] = [
+ // Cover NaNs evenly in integer space.
+ // The positive NaN with the lowest integer representation is the integer
+ // for infinity, plus one.
+ // The positive NaN with the highest integer representation is i16.max = 32767
+ ...linearRange(kBit.f16.positive.infinity + 1, 32767, numNaNs),
+ // The negative NaN with the lowest integer representation is the integer
+ // for negative infinity, plus one.
+ // The negative NaN with the highest integer representation is u16.max = 65535
+ ...linearRange(kBit.f16.negative.infinity + 1, 65535, numNaNs),
+ kBit.f16.positive.infinity,
+ kBit.f16.negative.infinity,
+];
+
+/**
+ * @returns an u32 whose lower and higher 16bits are the two elements of the
+ * given array of two u16 respectively, in little-endian.
+ */
+function u16x2ToU32(u16x2: number[]): number {
+ assert(u16x2.length === 2);
+ // Create a DataView with 4 bytes buffer.
+ const buffer = new ArrayBuffer(4);
+ const view = new DataView(buffer);
+ // Enforce little-endian.
+ view.setUint16(0, u16x2[0], true);
+ view.setUint16(2, u16x2[1], true);
+ return view.getUint32(0, true);
+}
+
+g.test('bad_const_to_f16')
+ .specURL('https://www.w3.org/TR/WGSL/#floating-point-evaluation')
+ .desc(
+ `
+It is a shader-creation error if any const-expression of floating-point type evaluates to NaN or infinity.
+`
+ )
+ .params(u =>
+ u
+ .combine('fromScalarType', ['i32', 'u32'] as const)
+ .combine('vectorize', keysOf(kVectorCases))
+ // Only test valid bitcast to vec2<f16> or vec4<f16>
+ .filter(p => kVectorCases[p.vectorize].width % 2 === 0)
+ .beginSubcases()
+ // Also validate that testcases without using bad bit can pass the exam
+ .combine('useBadValue', [true, false])
+ .expand('bitBadValue', p =>
+ p.useBadValue ? ([...f16InfAndNaNInU16] as const) : [0 as const]
+ )
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(t => {
+ // For width = 2 generate code like:
+ // const f = bitcast<vec2<f16>>(i32(u32(0x7f800000)));
+ // And for width = 4:
+ // const f = bitcast<vec4<f16>>(vec2<i32>(0,i32(u32(0x7f800000))));
+ const vectorize = kVectorCases[t.params.vectorize];
+ const width = vectorize.width;
+ const badIndex = vectorize.badIndex;
+
+ // Only bistcast to vec2<f16> or vec4<f16> is valid.
+ assert(width === 2 || width === 4);
+
+ // Put the bad f16 bits into lower 16 bits of source element if bad index is 0 or 2, else higher 16 bits.
+ const badSrcElemBitsInU32 = u16x2ToU32(
+ badIndex % 2 === 0 ? [t.params.bitBadValue, 0] : [0, t.params.bitBadValue]
+ );
+ const badScalar = `${t.params.fromScalarType}(u32(${badSrcElemBitsInU32}))`;
+
+ const destType = `vec${width}<f16>`;
+ const srcType = width === 2 ? t.params.fromScalarType : `vec2<${t.params.fromScalarType}>`;
+ const components = [...Array(width / 2).keys()]
+ .map(i => (i === badIndex >> 1 ? badScalar : '0'))
+ .join(',');
+ const code = `
+ enable f16;
+ const f = bitcast<${destType}>(${srcType}(${components}));`;
+ t.expectCompileResult(!t.params.useBadValue, code);
+ });
+
+const f32_matrix_types = [2, 3, 4]
+ .map(i => [2, 3, 4].map(j => `mat${i}x${j}f`))
+ .reduce((a, c) => a.concat(c), []);
+const f16_matrix_types = [2, 3, 4]
+ .map(i => [2, 3, 4].map(j => `mat${i}x${j}<f16>`))
+ .reduce((a, c) => a.concat(c), []);
+const bool_types = ['bool', ...[2, 3, 4].map(i => `vec${i}<bool>`)];
+
+g.test('bad_type_constructible')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(
+ `
+Bitcast only applies to concrete numeric scalar or concrete numeric vector.
+Test constructible types.
+`
+ )
+ .params(u =>
+ u
+ .combine('type', [
+ ...f32_matrix_types,
+ ...f16_matrix_types,
+ ...bool_types,
+ 'array<i32,2>',
+ 'S',
+ ])
+ .combine('direction', ['to', 'from'])
+ )
+ .beforeAllSubcases(t => {
+ if (t.params.type.includes('f16')) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const T = t.params.type;
+ const enable_directives = t.params.type.includes('f16') ? 'enable f16;\n' : '';
+ const preamble = T === 'S' ? 'struct S { a:i32 } ' : '';
+ // Create a value of type T using zero-construction: T().
+ const srcVal = t.params.direction === 'to' ? '0' : `${T}()`;
+ const destType = t.params.direction === 'to' ? T : 'i32';
+ const code = enable_directives + preamble + `const x = bitcast<${destType}>(${srcVal});`;
+ t.expectCompileResult(false, code);
+ });
+
+g.test('bad_type_nonconstructible')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(
+ `
+Bitcast only applies to concrete numeric scalar or concrete numeric vector.
+Test non-constructible types.
+`
+ )
+ .params(u => u.combine('var', ['s', 't', 'b', 'p']).combine('direction', ['to', 'from']))
+ .fn(t => {
+ const typeOf: Record<string, string> = {
+ s: 'sampler',
+ t: 'texture_depth_2d',
+ b: 'array<i32>',
+ p: 'ptr<private,i32>',
+ };
+ const srcVal = t.params.direction === 'to' ? '0' : t.params.var;
+ const destType = t.params.direction === 'to' ? typeOf[t.params.var] : 'i32';
+ const code = `
+ @group(0) @binding(0) var s: sampler;
+ @group(0) @binding(1) var t: texture_depth_2d;
+ @group(0) @binding(2) var<storage> b: array<i32>;
+ var<private> v: i32;
+ @compute @workgroup_size(1)
+ fn main() {
+ let p = &v;
+ let x = bitcast<${destType}>(${srcVal});
+ }
+ `;
+ t.expectCompileResult(false, code);
+ });
+
+g.test('bad_to_vec3h')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(
+ `
+Can't cast numeric type to vec3<f16> because it is 48 bits wide
+and no other type is that size.
+`
+ )
+ .params(u =>
+ u
+ .combine('other_type', [
+ 'bool',
+ 'u32',
+ 'i32',
+ 'f32',
+ 'vec2<bool>',
+ 'vec3<bool>',
+ 'vec4<bool>',
+ 'vec2u',
+ 'vec3u',
+ 'vec4u',
+ 'vec2i',
+ 'vec3i',
+ 'vec4i',
+ 'vec2f',
+ 'vec3f',
+ 'vec4f',
+ 'vec2h',
+ 'vec4h',
+ ] as const)
+ .combine('direction', ['to', 'from'] as const)
+ .combine('type', ['vec3<f16>', 'vec3h'])
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(t => {
+ const src_type = t.params.direction === 'to' ? t.params.type : t.params.other_type;
+ const dst_type = t.params.direction === 'from' ? t.params.type : t.params.other_type;
+ const code = `
+enable f16;
+@fragment
+fn main() {
+ var src : ${src_type};
+ let dst = bitcast<${dst_type}>(src);
+}`;
+ t.expectCompileResult(false, code);
+ });
+
+g.test('bad_to_f16')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(
+ `
+Can't cast non-16-bit types to f16 because it is 16 bits wide
+and no other type is that size.
+`
+ )
+ .params(u =>
+ u
+ .combine('other_type', [
+ 'bool',
+ 'u32',
+ 'i32',
+ 'f32',
+ 'vec2<bool>',
+ 'vec3<bool>',
+ 'vec4<bool>',
+ 'vec2u',
+ 'vec3u',
+ 'vec4u',
+ 'vec2i',
+ 'vec3i',
+ 'vec4i',
+ 'vec2f',
+ 'vec3f',
+ 'vec4f',
+ 'vec2h',
+ 'vec3h',
+ 'vec4h',
+ ] as const)
+ .combine('direction', ['to', 'from'] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(t => {
+ const src_type = t.params.direction === 'to' ? 'f16' : t.params.other_type;
+ const dst_type = t.params.direction === 'from' ? 'f16' : t.params.other_type;
+ const code = `
+enable f16;
+@fragment
+fn main() {
+ var src : ${src_type};
+ let dst = bitcast<${dst_type}>(src);
+}`;
+ t.expectCompileResult(false, code);
+ });
+
+g.test('valid_vec2h')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`Check valid vec2<f16> bitcasts`)
+ .params(u =>
+ u
+ .combine('other_type', ['u32', 'i32', 'f32'] as const)
+ .combine('type', ['vec2<f16>', 'vec2h'] as const)
+ .combine('direction', ['to', 'from'] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(t => {
+ const src_type = t.params.direction === 'to' ? t.params.type : t.params.other_type;
+ const dst_type = t.params.direction === 'from' ? t.params.type : t.params.other_type;
+ const code = `
+enable f16;
+@fragment
+fn main() {
+ var src : ${src_type};
+ let dst = bitcast<${dst_type}>(src);
+}`;
+ t.expectCompileResult(true, code);
+ });
+
+g.test('valid_vec4h')
+ .specURL('https://www.w3.org/TR/WGSL/#bitcast-builtin')
+ .desc(`Check valid vec2<f16> bitcasts`)
+ .params(u =>
+ u
+ .combine('other_type', [
+ 'vec2<u32>',
+ 'vec2u',
+ 'vec2<i32>',
+ 'vec2i',
+ 'vec2<f32>',
+ 'vec2f',
+ ] as const)
+ .combine('type', ['vec4<f16>', 'vec4h'] as const)
+ .combine('direction', ['to', 'from'] as const)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(t => {
+ const src_type = t.params.direction === 'to' ? t.params.type : t.params.other_type;
+ const dst_type = t.params.direction === 'from' ? t.params.type : t.params.other_type;
+ const code = `
+enable f16;
+@fragment
+fn main() {
+ var src : ${src_type};
+ let dst = bitcast<${dst_type}>(src);
+}`;
+ t.expectCompileResult(true, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/ceil.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/ceil.spec.ts
new file mode 100644
index 0000000000..0f287907f8
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/ceil.spec.ts
@@ -0,0 +1,75 @@
+const builtin = 'ceil';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ stageSupportsType,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() never errors
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .expand('value', u => fullRangeForType(kValuesTypes[u.type]))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const expectedResult = true; // ceil() should never error
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [kValuesTypes[t.params.type].create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(0)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/clamp.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/clamp.spec.ts
new file mode 100644
index 0000000000..1cf28ffc2b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/clamp.spec.ts
@@ -0,0 +1,57 @@
+const builtin = 'clamp';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ elementType,
+ kAllFloatAndIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ stageSupportsType,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatAndIntegerScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('e', u => fullRangeForType(kValuesTypes[u.type], 3))
+ .expand('low', u => fullRangeForType(kValuesTypes[u.type], 4))
+ .expand('high', u => fullRangeForType(kValuesTypes[u.type], 4))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const type = kValuesTypes[t.params.type];
+ const expectedResult = t.params.low <= t.params.high;
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [type.create(t.params.e), type.create(t.params.low), type.create(t.params.high)],
+ t.params.stage
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/const_override_validation.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/const_override_validation.ts
new file mode 100644
index 0000000000..86b88cb159
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/const_override_validation.ts
@@ -0,0 +1,202 @@
+import { assert, unreachable } from '../../../../../../common/util/util.js';
+import { kValue } from '../../../../../util/constants.js';
+import {
+ Type,
+ TypeF16,
+ Value,
+ elementType,
+ elementsOf,
+ isAbstractType,
+} from '../../../../../util/conversion.js';
+import { fullF16Range, fullF32Range, fullF64Range, linearRange } from '../../../../../util/math.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+/// A linear sweep between -2 to 2
+export const kMinusTwoToTwo = linearRange(-2, 2, 10);
+
+/// An array of values ranging from -3π to 3π, with a focus on multiples of π
+export const kMinus3PiTo3Pi = [
+ -3 * Math.PI,
+ -2.999 * Math.PI,
+
+ -2.501 * Math.PI,
+ -2.5 * Math.PI,
+ -2.499 * Math.PI,
+
+ -2.001 * Math.PI,
+ -2.0 * Math.PI,
+ -1.999 * Math.PI,
+
+ -1.501 * Math.PI,
+ -1.5 * Math.PI,
+ -1.499 * Math.PI,
+
+ -1.001 * Math.PI,
+ -1.0 * Math.PI,
+ -0.999 * Math.PI,
+
+ -0.501 * Math.PI,
+ -0.5 * Math.PI,
+ -0.499 * Math.PI,
+
+ -0.001,
+ 0,
+ 0.001,
+
+ 0.499 * Math.PI,
+ 0.5 * Math.PI,
+ 0.501 * Math.PI,
+
+ 0.999 * Math.PI,
+ 1.0 * Math.PI,
+ 1.001 * Math.PI,
+
+ 1.499 * Math.PI,
+ 1.5 * Math.PI,
+ 1.501 * Math.PI,
+
+ 1.999 * Math.PI,
+ 2.0 * Math.PI,
+ 2.001 * Math.PI,
+
+ 2.499 * Math.PI,
+ 2.5 * Math.PI,
+ 2.501 * Math.PI,
+
+ 2.999 * Math.PI,
+ 3 * Math.PI,
+] as const;
+
+/// A minimal array of values ranging from -3π to 3π, with a focus on multiples
+/// of π. Used when multiple parameters are being passed in, so the number of
+/// cases becomes the square or more of this list.
+export const kSparseMinus3PiTo3Pi = [
+ -3 * Math.PI,
+ -2.5 * Math.PI,
+ -2.0 * Math.PI,
+ -1.5 * Math.PI,
+ -1.0 * Math.PI,
+ -0.5 * Math.PI,
+ 0,
+ 0.5 * Math.PI,
+ Math.PI,
+ 1.5 * Math.PI,
+ 2.0 * Math.PI,
+ 2.5 * Math.PI,
+ 3 * Math.PI,
+] as const;
+
+/// The evaluation stages to test
+export const kConstantAndOverrideStages = ['constant', 'override'] as const;
+
+export type ConstantOrOverrideStage = 'constant' | 'override';
+
+/**
+ * @returns true if evaluation stage `stage` supports expressions of type @p.
+ */
+export function stageSupportsType(stage: ConstantOrOverrideStage, type: Type) {
+ if (stage === 'override' && isAbstractType(elementType(type)!)) {
+ // Abstract numerics are concretized before being used in an override expression.
+ return false;
+ }
+ return true;
+}
+
+/**
+ * Runs a validation test to check that evaluation of `builtin` either evaluates with or without
+ * error at shader creation time or pipeline creation time.
+ * @param t the ShaderValidationTest
+ * @param builtin the name of the builtin
+ * @param expectedResult false if an error is expected, true if no error is expected
+ * @param args the arguments to pass to the builtin
+ * @param stage the evaluation stage
+ */
+export function validateConstOrOverrideBuiltinEval(
+ t: ShaderValidationTest,
+ builtin: string,
+ expectedResult: boolean,
+ args: Value[],
+ stage: ConstantOrOverrideStage
+) {
+ const elTys = args.map(arg => elementType(arg.type)!);
+ const enables = elTys.some(ty => ty === TypeF16) ? 'enable f16;' : '';
+
+ switch (stage) {
+ case 'constant': {
+ t.expectCompileResult(
+ expectedResult,
+ `${enables}
+const v = ${builtin}(${args.map(arg => arg.wgsl()).join(', ')});`
+ );
+ break;
+ }
+ case 'override': {
+ assert(!elTys.some(ty => isAbstractType(ty)));
+ const constants: Record<string, number> = {};
+ const overrideDecls: string[] = [];
+ const callArgs: string[] = [];
+ let numOverrides = 0;
+ for (const arg of args) {
+ const argOverrides: string[] = [];
+ for (const el of elementsOf(arg)) {
+ const name = `o${numOverrides++}`;
+ overrideDecls.push(`override ${name} : ${el.type};`);
+ argOverrides.push(name);
+ constants[name] = Number(el.value);
+ }
+ callArgs.push(`${arg.type}(${argOverrides.join(', ')})`);
+ }
+ t.expectPipelineResult({
+ expectedResult,
+ code: `${enables}
+${overrideDecls.join('\n')}
+var<private> v = ${builtin}(${callArgs.join(', ')});`,
+ constants,
+ reference: ['v'],
+ });
+ break;
+ }
+ }
+}
+
+/** @returns a sweep of the representable values for element type of `type` */
+export function fullRangeForType(type: Type, count?: number) {
+ if (count === undefined) {
+ count = 25;
+ }
+ switch (elementType(type)?.kind) {
+ case 'abstract-float':
+ return fullF64Range({
+ pos_sub: Math.ceil((count * 1) / 5),
+ pos_norm: Math.ceil((count * 4) / 5),
+ });
+ case 'f32':
+ return fullF32Range({
+ pos_sub: Math.ceil((count * 1) / 5),
+ pos_norm: Math.ceil((count * 4) / 5),
+ });
+ case 'f16':
+ return fullF16Range({
+ pos_sub: Math.ceil((count * 1) / 5),
+ pos_norm: Math.ceil((count * 4) / 5),
+ });
+ case 'i32':
+ return linearRange(kValue.i32.negative.min, kValue.i32.positive.max, count).map(f =>
+ Math.floor(f)
+ );
+ case 'u32':
+ return linearRange(0, kValue.u32.max, count).map(f => Math.floor(f));
+ }
+ unreachable();
+}
+
+/** @returns all the values in the provided arrays with duplicates removed */
+export function unique<T>(...arrays: Array<readonly T[]>): T[] {
+ const set = new Set<T>();
+ for (const arr of arrays) {
+ for (const item of arr) {
+ set.add(item);
+ }
+ }
+ return [...set];
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/cos.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/cos.spec.ts
new file mode 100644
index 0000000000..b65593ccaa
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/cos.spec.ts
@@ -0,0 +1,77 @@
+const builtin = 'cos';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ kMinus3PiTo3Pi,
+ stageSupportsType,
+ unique,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => unique(kMinus3PiTo3Pi, fullRangeForType(kValuesTypes[u.type])))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ true,
+ [kValuesTypes[t.params.type].create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(0)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/cosh.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/cosh.spec.ts
new file mode 100644
index 0000000000..126fc19e7e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/cosh.spec.ts
@@ -0,0 +1,78 @@
+const builtin = 'cosh';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { isRepresentable } from '../../../../../util/floating_point.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ stageSupportsType,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => fullRangeForType(kValuesTypes[u.type]))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const type = kValuesTypes[t.params.type];
+ const expectedResult = isRepresentable(Math.cosh(t.params.value), elementType(type));
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [type.create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(0)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/degrees.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/degrees.spec.ts
new file mode 100644
index 0000000000..154455857d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/degrees.spec.ts
@@ -0,0 +1,78 @@
+const builtin = 'degrees';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { isRepresentable } from '../../../../../util/floating_point.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ stageSupportsType,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() inputs rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => fullRangeForType(kValuesTypes[u.type]))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const type = kValuesTypes[t.params.type];
+ const expectedResult = isRepresentable((t.params.value * 180) / Math.PI, elementType(type));
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [type.create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(1)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/exp.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/exp.spec.ts
new file mode 100644
index 0000000000..244e91f2ae
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/exp.spec.ts
@@ -0,0 +1,102 @@
+const builtin = 'exp';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import { kValue } from '../../../../../util/constants.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { isRepresentable } from '../../../../../util/floating_point.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ kConstantAndOverrideStages,
+ stageSupportsType,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .combine('value', [
+ -1e2,
+ -1e3,
+ -4,
+ -3,
+ -2,
+ -1,
+ -1e-1,
+ -1e-2,
+ -1e-3,
+ 0,
+ 1e-3,
+ 1e-2,
+ 1e-1,
+ 1,
+ 2,
+ 3,
+ 4,
+ 1e2,
+ 1e3,
+ Math.log2(kValue.f16.positive.max) - 0.1,
+ Math.log2(kValue.f16.positive.max) + 0.1,
+ Math.log2(kValue.f32.positive.max) - 0.1,
+ Math.log2(kValue.f32.positive.max) + 0.1,
+ ])
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const type = kValuesTypes[t.params.type];
+ const expectedResult = isRepresentable(Math.exp(t.params.value), elementType(type));
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [type.create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(0)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/exp2.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/exp2.spec.ts
new file mode 100644
index 0000000000..9addbc076b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/exp2.spec.ts
@@ -0,0 +1,102 @@
+const builtin = 'exp2';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import { kValue } from '../../../../../util/constants.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { isRepresentable } from '../../../../../util/floating_point.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ kConstantAndOverrideStages,
+ stageSupportsType,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .combine('value', [
+ -1e2,
+ -1e3,
+ -4,
+ -3,
+ -2,
+ -1,
+ -1e-1,
+ -1e-2,
+ -1e-3,
+ 0,
+ 1e-3,
+ 1e-2,
+ 1e-1,
+ 1,
+ 2,
+ 3,
+ 4,
+ 1e2,
+ 1e3,
+ Math.log2(kValue.f16.positive.max) - 0.1,
+ Math.log2(kValue.f16.positive.max) + 0.1,
+ Math.log2(kValue.f32.positive.max) - 0.1,
+ Math.log2(kValue.f32.positive.max) + 0.1,
+ ])
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const type = kValuesTypes[t.params.type];
+ const expectedResult = isRepresentable(Math.pow(2, t.params.value), elementType(type));
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [type.create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(0)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/inverseSqrt.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/inverseSqrt.spec.ts
new file mode 100644
index 0000000000..b2813cbe0a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/inverseSqrt.spec.ts
@@ -0,0 +1,81 @@
+const builtin = 'inverseSqrt';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { isRepresentable } from '../../../../../util/floating_point.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ kMinusTwoToTwo,
+ stageSupportsType,
+ unique,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() inputs rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => unique(kMinusTwoToTwo, fullRangeForType(kValuesTypes[u.type])))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const type = kValuesTypes[t.params.type];
+ const expectedResult =
+ t.params.value > 0 && isRepresentable(1 / Math.sqrt(t.params.value), elementType(type));
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [type.create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(1)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/length.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/length.spec.ts
new file mode 100644
index 0000000000..60fbe6e285
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/length.spec.ts
@@ -0,0 +1,221 @@
+const builtin = 'length';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ ScalarType,
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalars,
+ kAllFloatVector2,
+ kAllFloatVector3,
+ kAllFloatVector4,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { isRepresentable } from '../../../../../util/floating_point.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ stageSupportsType,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+/**
+ * Evaluates the result and information about a call to length(), with a vector
+ * formed from `vec` of the element type `type`.
+ */
+function calculate(
+ vec: number[],
+ type: ScalarType
+): {
+ /**
+ * True iff the sum of the squares can be represented by the data type.
+ * @note The specification does not enforce the method or precision of how
+ * length() is calculated. If intermediate is not representable but the result
+ * is representable, then the test case is skipped as it is undefined whether
+ * the evaluation should error or not.
+ */
+ isIntermediateRepresentable: boolean;
+ /** True iff the result of length() can be represented by the data type. */
+ isResultRepresentable: boolean;
+ /** The computed value of length(). */
+ result: number;
+} {
+ const squareSum = vec.reduce((prev, curr) => prev + curr * curr, 0);
+ const result = Math.sqrt(squareSum);
+ return {
+ isIntermediateRepresentable: isRepresentable(squareSum, type),
+ isResultRepresentable: isRepresentable(result, type),
+ result,
+ };
+}
+
+const kScalarTypes = objectsToRecord(kAllFloatScalars);
+
+g.test('scalar')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() with
+the input scalar value always compiles without error
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kScalarTypes))
+ .filter(u => stageSupportsType(u.stage, kScalarTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => fullRangeForType(kScalarTypes[u.type]))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kScalarTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ // We only validate with numbers known to be representable by the type
+ const expectedResult = true;
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [kScalarTypes[t.params.type].create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kVec2Types = objectsToRecord(kAllFloatVector2);
+
+g.test('vec2')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() with a vec2 compiles with valid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kVec2Types))
+ .filter(u => stageSupportsType(u.stage, kVec2Types[u.type]))
+ .beginSubcases()
+ .expand('x', u => fullRangeForType(kVec2Types[u.type], 5))
+ .expand('y', u => fullRangeForType(kVec2Types[u.type], 5))
+ .expand('_result', u => [calculate([u.x, u.y], elementType(kVec2Types[u.type]))])
+ .filter(u => u._result.isResultRepresentable === u._result.isIntermediateRepresentable)
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kVec2Types[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const expectedResult = t.params._result.isResultRepresentable;
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [kVec2Types[t.params.type].create([t.params.x, t.params.y])],
+ t.params.stage
+ );
+ });
+
+const kVec3Types = objectsToRecord(kAllFloatVector3);
+
+g.test('vec3')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() with a vec3 compiles with valid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kVec3Types))
+ .filter(u => stageSupportsType(u.stage, kVec3Types[u.type]))
+ .beginSubcases()
+ .expand('x', u => fullRangeForType(kVec3Types[u.type], 4))
+ .expand('y', u => fullRangeForType(kVec3Types[u.type], 4))
+ .expand('z', u => fullRangeForType(kVec3Types[u.type], 4))
+ .expand('_result', u => [calculate([u.x, u.y, u.z], elementType(kVec3Types[u.type]))])
+ .filter(u => u._result.isResultRepresentable === u._result.isIntermediateRepresentable)
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kVec3Types[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const expectedResult = t.params._result.isResultRepresentable;
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [kVec3Types[t.params.type].create([t.params.x, t.params.y, t.params.z])],
+ t.params.stage
+ );
+ });
+
+const kVec4Types = objectsToRecord(kAllFloatVector4);
+
+g.test('vec4')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() with a vec4 compiles with valid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kVec4Types))
+ .filter(u => stageSupportsType(u.stage, kVec4Types[u.type]))
+ .beginSubcases()
+ .expand('x', u => fullRangeForType(kVec4Types[u.type], 3))
+ .expand('y', u => fullRangeForType(kVec4Types[u.type], 3))
+ .expand('z', u => fullRangeForType(kVec4Types[u.type], 3))
+ .expand('w', u => fullRangeForType(kVec4Types[u.type], 3))
+ .expand('_result', u => [calculate([u.x, u.y, u.z, u.w], elementType(kVec4Types[u.type]))])
+ .filter(u => u._result.isResultRepresentable === u._result.isIntermediateRepresentable)
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kVec4Types[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const expectedResult = t.params._result.isResultRepresentable;
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [kVec4Types[t.params.type].create([t.params.x, t.params.y, t.params.z, t.params.w])],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(1)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/log.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/log.spec.ts
new file mode 100644
index 0000000000..5d84d0c0be
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/log.spec.ts
@@ -0,0 +1,76 @@
+const builtin = 'log';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ stageSupportsType,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() inputs rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => fullRangeForType(kValuesTypes[u.type]))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const expectedResult = t.params.value > 0;
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [kValuesTypes[t.params.type].create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(1)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/log2.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/log2.spec.ts
new file mode 100644
index 0000000000..60f32d99c7
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/log2.spec.ts
@@ -0,0 +1,76 @@
+const builtin = 'log2';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ stageSupportsType,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() inputs rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => fullRangeForType(kValuesTypes[u.type]))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const expectedResult = t.params.value > 0;
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [kValuesTypes[t.params.type].create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(1)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/modf.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/modf.spec.ts
new file mode 100644
index 0000000000..b890f9026e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/modf.spec.ts
@@ -0,0 +1,76 @@
+const builtin = 'modf';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ stageSupportsType,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => fullRangeForType(kValuesTypes[u.type]))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const expectedResult = true; // Result should always be representable by the type
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [kValuesTypes[t.params.type].create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(0)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/radians.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/radians.spec.ts
new file mode 100644
index 0000000000..dd432ac194
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/radians.spec.ts
@@ -0,0 +1,76 @@
+const builtin = 'radians';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ stageSupportsType,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() inputs rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => fullRangeForType(kValuesTypes[u.type]))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const expectedResult = true; // The result is always smaller than the input, so can't go OOB.
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [kValuesTypes[t.params.type].create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(1)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/round.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/round.spec.ts
new file mode 100644
index 0000000000..3a4ea0408a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/round.spec.ts
@@ -0,0 +1,84 @@
+const builtin = 'round';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { fpTraitsFor } from '../../../../../util/floating_point.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ stageSupportsType,
+ unique,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() inputs rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => {
+ const constants = fpTraitsFor(elementType(kValuesTypes[u.type])).constants();
+ return unique(fullRangeForType(kValuesTypes[u.type]), [
+ constants.negative.min + 0.1,
+ constants.positive.max - 0.1,
+ ]);
+ })
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const expectedResult = true; // Result should always be representable by the type
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [kValuesTypes[t.params.type].create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(1)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/saturate.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/saturate.spec.ts
new file mode 100644
index 0000000000..1c7aa66a65
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/saturate.spec.ts
@@ -0,0 +1,76 @@
+const builtin = 'saturate';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ stageSupportsType,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() inputs rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => fullRangeForType(kValuesTypes[u.type]))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const expectedResult = true; // Result should always be representable by the type
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [kValuesTypes[t.params.type].create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(1)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/sign.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/sign.spec.ts
new file mode 100644
index 0000000000..f844961aee
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/sign.spec.ts
@@ -0,0 +1,79 @@
+const builtin = 'sign';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatAndSignedIntegerScalarsAndVectors,
+ kAllUnsignedIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ stageSupportsType,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatAndSignedIntegerScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() inputs rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => fullRangeForType(kValuesTypes[u.type]))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const expectedResult = true; // Result should always be representable by the type
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [kValuesTypes[t.params.type].create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kUnsignedIntegerArgumentTypes = objectsToRecord([
+ TypeF32,
+ ...kAllUnsignedIntegerScalarsAndVectors,
+]);
+
+g.test('unsigned_integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kUnsignedIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kUnsignedIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(1)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/sin.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/sin.spec.ts
new file mode 100644
index 0000000000..3822fccd3a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/sin.spec.ts
@@ -0,0 +1,77 @@
+const builtin = 'sin';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ kMinus3PiTo3Pi,
+ stageSupportsType,
+ unique,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => unique(kMinus3PiTo3Pi, fullRangeForType(kValuesTypes[u.type])))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ true,
+ [kValuesTypes[t.params.type].create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(0)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/sinh.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/sinh.spec.ts
new file mode 100644
index 0000000000..09f48751fc
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/sinh.spec.ts
@@ -0,0 +1,78 @@
+const builtin = 'sinh';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { isRepresentable } from '../../../../../util/floating_point.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ stageSupportsType,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => fullRangeForType(kValuesTypes[u.type]))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const type = kValuesTypes[t.params.type];
+ const expectedResult = isRepresentable(Math.sinh(t.params.value), elementType(type));
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [type.create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(0)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/sqrt.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/sqrt.spec.ts
new file mode 100644
index 0000000000..a570ce4bc0
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/sqrt.spec.ts
@@ -0,0 +1,81 @@
+const builtin = 'sqrt';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { isRepresentable } from '../../../../../util/floating_point.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ kMinusTwoToTwo,
+ stageSupportsType,
+ unique,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() inputs rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => unique(kMinusTwoToTwo, fullRangeForType(kValuesTypes[u.type])))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const type = kValuesTypes[t.params.type];
+ const expectedResult =
+ t.params.value >= 0 && isRepresentable(Math.sqrt(t.params.value), elementType(type));
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [type.create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(1)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/tan.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/tan.spec.ts
new file mode 100644
index 0000000000..b9744643f6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/expression/call/builtin/tan.spec.ts
@@ -0,0 +1,83 @@
+const builtin = 'tan';
+export const description = `
+Validation tests for the ${builtin}() builtin.
+`;
+
+import { makeTestGroup } from '../../../../../../common/framework/test_group.js';
+import { keysOf, objectsToRecord } from '../../../../../../common/util/data_tables.js';
+import {
+ TypeF16,
+ TypeF32,
+ elementType,
+ kAllFloatScalarsAndVectors,
+ kAllIntegerScalarsAndVectors,
+} from '../../../../../util/conversion.js';
+import { fpTraitsFor } from '../../../../../util/floating_point.js';
+import { ShaderValidationTest } from '../../../shader_validation_test.js';
+
+import {
+ fullRangeForType,
+ kConstantAndOverrideStages,
+ kMinus3PiTo3Pi,
+ stageSupportsType,
+ unique,
+ validateConstOrOverrideBuiltinEval,
+} from './const_override_validation.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValuesTypes = objectsToRecord(kAllFloatScalarsAndVectors);
+
+g.test('values')
+ .desc(
+ `
+Validates that constant evaluation and override evaluation of ${builtin}() rejects invalid values
+`
+ )
+ .params(u =>
+ u
+ .combine('stage', kConstantAndOverrideStages)
+ .combine('type', keysOf(kValuesTypes))
+ .filter(u => stageSupportsType(u.stage, kValuesTypes[u.type]))
+ .beginSubcases()
+ .expand('value', u => unique(kMinus3PiTo3Pi, fullRangeForType(kValuesTypes[u.type])))
+ )
+ .beforeAllSubcases(t => {
+ if (elementType(kValuesTypes[t.params.type]) === TypeF16) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const type = kValuesTypes[t.params.type];
+ const fp = fpTraitsFor(elementType(type));
+ const smallestPositive = fp.constants().positive.min;
+ const v = fp.quantize(t.params.value);
+ const expectedResult = Math.abs(Math.cos(v)) > smallestPositive;
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ expectedResult,
+ [type.create(t.params.value)],
+ t.params.stage
+ );
+ });
+
+const kIntegerArgumentTypes = objectsToRecord([TypeF32, ...kAllIntegerScalarsAndVectors]);
+
+g.test('integer_argument')
+ .desc(
+ `
+Validates that scalar and vector integer arguments are rejected by ${builtin}()
+`
+ )
+ .params(u => u.combine('type', keysOf(kIntegerArgumentTypes)))
+ .fn(t => {
+ const type = kIntegerArgumentTypes[t.params.type];
+ validateConstOrOverrideBuiltinEval(
+ t,
+ builtin,
+ /* expectedResult */ type === TypeF32,
+ [type.create(0)],
+ 'constant'
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/functions/alias_analysis.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/functions/alias_analysis.spec.ts
new file mode 100644
index 0000000000..ba39485449
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/functions/alias_analysis.spec.ts
@@ -0,0 +1,202 @@
+export const description = `Validation tests for function alias analysis`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+interface Use {
+ is_write: boolean;
+ gen: (ref: string) => string;
+}
+
+const kUses: Record<string, Use> = {
+ no_access: { is_write: false, gen: ref => `{ let p = &*&${ref}; }` },
+ assign: { is_write: true, gen: ref => `${ref} = 42;` },
+ compound_assign_lhs: { is_write: true, gen: ref => `${ref} += 1;` },
+ compound_assign_rhs: { is_write: false, gen: ref => `{ var tmp : i32; tmp += ${ref}; }` },
+ increment: { is_write: true, gen: ref => `${ref}++;` },
+ binary_lhs: { is_write: false, gen: ref => `_ = ${ref} + 1;` },
+ binary_rhs: { is_write: false, gen: ref => `_ = 1 + ${ref};` },
+ unary_minus: { is_write: false, gen: ref => `_ = -${ref};` },
+ bitcast: { is_write: false, gen: ref => `_ = bitcast<f32>(${ref});` },
+ convert: { is_write: false, gen: ref => `_ = f32(${ref});` },
+ builtin_arg: { is_write: false, gen: ref => `_ = abs(${ref});` },
+ index_access: { is_write: false, gen: ref => `{ var arr : array<i32, 4>; _ = arr[${ref}]; }` },
+ let_init: { is_write: false, gen: ref => `{ let tmp = ${ref}; }` },
+ var_init: { is_write: false, gen: ref => `{ var tmp = ${ref}; }` },
+ return: { is_write: false, gen: ref => `{ return ${ref}; }` },
+ switch_cond: { is_write: false, gen: ref => `switch(${ref}) { default { break; } }` },
+};
+
+type UseName = keyof typeof kUses;
+
+function shouldPass(aliased: boolean, ...uses: UseName[]): boolean {
+ // Expect fail if the pointers are aliased and at least one of the accesses is a write.
+ // If either of the accesses is a "no access" then expect pass.
+ return !aliased || !uses.some(u => kUses[u].is_write) || uses.includes('no_access');
+}
+
+g.test('two_pointers')
+ .desc(`Test aliasing of two pointers passed to a function.`)
+ .params(u =>
+ u
+ .combine('address_space', ['private', 'function'] as const)
+ .combine('a_use', keysOf(kUses))
+ .combine('b_use', keysOf(kUses))
+ .combine('aliased', [true, false])
+ .beginSubcases()
+ )
+ .fn(t => {
+ const code = `
+${t.params.address_space === 'private' ? `var<private> x : i32; var<private> y : i32;` : ``}
+
+fn callee(pa : ptr<${t.params.address_space}, i32>,
+ pb : ptr<${t.params.address_space}, i32>) -> i32 {
+ ${kUses[t.params.a_use].gen(`*pa`)}
+ ${kUses[t.params.b_use].gen(`*pb`)}
+ return 0;
+}
+
+fn caller() {
+ ${t.params.address_space === 'function' ? `var x : i32; var y : i32;` : ``}
+ callee(&x, ${t.params.aliased ? `&x` : `&y`});
+}
+`;
+ t.expectCompileResult(shouldPass(t.params.aliased, t.params.a_use, t.params.b_use), code);
+ });
+
+g.test('one_pointer_one_module_scope')
+ .desc(`Test aliasing of a pointer with a direct access to a module-scope variable.`)
+ .params(u =>
+ u
+ .combine('a_use', keysOf(kUses))
+ .combine('b_use', keysOf(kUses))
+ .combine('aliased', [true, false])
+ .beginSubcases()
+ )
+ .fn(t => {
+ const code = `
+var<private> x : i32;
+var<private> y : i32;
+
+fn callee(pb : ptr<private, i32>) -> i32 {
+ ${kUses[t.params.a_use].gen(`x`)}
+ ${kUses[t.params.b_use].gen(`*pb`)}
+ return 0;
+}
+
+fn caller() {
+ callee(${t.params.aliased ? `&x` : `&y`});
+}
+`;
+ t.expectCompileResult(shouldPass(t.params.aliased, t.params.a_use, t.params.b_use), code);
+ });
+
+g.test('subcalls')
+ .desc(`Test aliasing of two pointers passed to a function, and then passed to other functions.`)
+ .params(u =>
+ u
+ .combine('a_use', ['no_access', 'assign', 'binary_lhs'] as UseName[])
+ .combine('b_use', ['no_access', 'assign', 'binary_lhs'] as UseName[])
+ .combine('aliased', [true, false])
+ .beginSubcases()
+ )
+ .fn(t => {
+ const code = `
+var<private> x : i32;
+var<private> y : i32;
+
+fn subcall_no_access(p : ptr<private, i32>) {
+ let pp = &*p;
+}
+
+fn subcall_binary_lhs(p : ptr<private, i32>) -> i32 {
+ return *p + 1;
+}
+
+fn subcall_assign(p : ptr<private, i32>) {
+ *p = 42;
+}
+
+fn callee(pa : ptr<private, i32>, pb : ptr<private, i32>) -> i32 {
+ let new_pa = &*pa;
+ let new_pb = &*pb;
+ subcall_${t.params.a_use}(new_pa);
+ subcall_${t.params.b_use}(new_pb);
+ return 0;
+}
+
+fn caller() {
+ callee(&x, ${t.params.aliased ? `&x` : `&y`});
+}
+`;
+ t.expectCompileResult(shouldPass(t.params.aliased, t.params.a_use, t.params.b_use), code);
+ });
+
+g.test('member_accessors')
+ .desc(`Test aliasing of two pointers passed to a function and used with member accessors.`)
+ .params(u =>
+ u
+ .combine('a_use', ['no_access', 'assign', 'binary_lhs'] as UseName[])
+ .combine('b_use', ['no_access', 'assign', 'binary_lhs'] as UseName[])
+ .combine('aliased', [true, false])
+ .beginSubcases()
+ )
+ .fn(t => {
+ const code = `
+struct S { a : i32 }
+
+var<private> x : S;
+var<private> y : S;
+
+fn callee(pa : ptr<private, S>,
+ pb : ptr<private, S>) -> i32 {
+ ${kUses[t.params.a_use].gen(`(*pa).a`)}
+ ${kUses[t.params.b_use].gen(`(*pb).a`)}
+ return 0;
+}
+
+fn caller() {
+ callee(&x, ${t.params.aliased ? `&x` : `&y`});
+}
+`;
+ t.expectCompileResult(shouldPass(t.params.aliased, t.params.a_use, t.params.b_use), code);
+ });
+
+g.test('same_pointer_read_and_write')
+ .desc(`Test that we can read from and write to the same pointer.`)
+ .params(u => u.beginSubcases())
+ .fn(t => {
+ const code = `
+var<private> v : i32;
+
+fn callee(p : ptr<private, i32>) {
+ *p = *p + 1;
+}
+
+fn caller() {
+ callee(&v);
+}
+`;
+ t.expectCompileResult(true, code);
+ });
+
+g.test('aliasing_inside_function')
+ .desc(`Test that we can alias pointers inside a function.`)
+ .params(u => u.beginSubcases())
+ .fn(t => {
+ const code = `
+var<private> v : i32;
+
+fn foo() {
+ var v : i32;
+ let p1 = &v;
+ let p2 = &v;
+ *p1 = 42;
+ *p2 = 42;
+}
+`;
+ t.expectCompileResult(true, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/functions/restrictions.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/functions/restrictions.spec.ts
new file mode 100644
index 0000000000..b6affd14d6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/functions/restrictions.spec.ts
@@ -0,0 +1,757 @@
+export const description = `Validation tests for function restrictions`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+interface VertexPosCase {
+ name: string;
+ value: string;
+ valid: boolean;
+}
+
+const kVertexPosCases: Record<string, VertexPosCase> = {
+ bare_position: { name: `@builtin(position) vec4f`, value: `vec4f()`, valid: true },
+ nested_position: { name: `pos_struct`, value: `pos_struct()`, valid: true },
+ no_bare_position: { name: `vec4f`, value: `vec4f()`, valid: false },
+ no_nested_position: { name: `no_pos_struct`, value: `no_pos_struct()`, valid: false },
+};
+
+g.test('vertex_returns_position')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#function-restriction')
+ .desc(`Test that a vertex shader should return position`)
+ .params(u => u.combine('case', keysOf(kVertexPosCases)))
+ .fn(t => {
+ const testcase = kVertexPosCases[t.params.case];
+ const code = `
+struct pos_struct {
+ @builtin(position) pos : vec4f
+}
+
+struct no_pos_struct {
+ @location(0) x : vec4f
+}
+
+@vertex
+fn main() -> ${testcase.name} {
+ return ${testcase.value};
+}`;
+
+ t.expectCompileResult(testcase.valid, code);
+ });
+
+g.test('entry_point_call_target')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#function-restriction')
+ .desc(`Test that an entry point cannot be the target of a function call`)
+ .params(u =>
+ u
+ .combine('stage', ['@fragment', '@vertex', '@compute @workgroup_size(1,1,1)'] as const)
+ .combine('entry_point', ['with', 'without'] as const)
+ )
+ .fn(t => {
+ const use_attr = t.params.entry_point === 'with';
+ let ret_attr = '';
+ if (use_attr && t.params.stage === '@vertex') {
+ ret_attr = '@builtin(position)';
+ }
+ const ret = t.params.stage.indexOf('@vertex') === 0 ? `-> ${ret_attr} vec4f` : '';
+ const ret_value = t.params.stage.indexOf('@vertex') === 0 ? `return vec4f();` : '';
+ const call = t.params.stage.indexOf('@vertex') === 0 ? 'let tmp = bar();' : 'bar();';
+ const stage_attr = use_attr ? t.params.stage : '';
+ const code = `
+${stage_attr}
+fn bar() ${ret} {
+ ${ret_value}
+}
+
+fn foo() {
+ ${call}
+}
+`;
+ t.expectCompileResult(!use_attr, code);
+ });
+
+interface RetTypeCase {
+ name: string;
+ value: string;
+ valid: boolean;
+}
+
+const kFunctionRetTypeCases: Record<string, RetTypeCase> = {
+ // Constructible types,
+ u32: { name: `u32`, value: ``, valid: true },
+ i32: { name: `i32`, value: ``, valid: true },
+ f32: { name: `f32`, value: ``, valid: true },
+ bool: { name: `bool`, value: ``, valid: true },
+ f16: { name: `f16`, value: ``, valid: true },
+ vec2: { name: `vec2u`, value: ``, valid: true },
+ vec3: { name: `vec3i`, value: ``, valid: true },
+ vec4: { name: `vec4f`, value: ``, valid: true },
+ mat2x2: { name: `mat2x2f`, value: ``, valid: true },
+ mat2x3: { name: `mat2x3f`, value: ``, valid: true },
+ mat2x4: { name: `mat2x4f`, value: ``, valid: true },
+ mat3x2: { name: `mat3x2f`, value: ``, valid: true },
+ mat3x3: { name: `mat3x3f`, value: ``, valid: true },
+ mat3x4: { name: `mat3x4f`, value: ``, valid: true },
+ mat4x2: { name: `mat4x2f`, value: ``, valid: true },
+ mat4x3: { name: `mat4x3f`, value: ``, valid: true },
+ mat4x4: { name: `mat4x4f`, value: ``, valid: true },
+ array1: { name: `array<u32, 4>`, value: ``, valid: true },
+ array2: { name: `array<vec2f, 2>`, value: ``, valid: true },
+ array3: { name: `array<constructible, 4>`, value: ``, valid: true },
+ array4: { name: `array<mat2x2f, 4>`, value: ``, valid: true },
+ array5: { name: `array<bool, 4>`, value: ``, valid: true },
+ struct1: { name: `constructible`, value: ``, valid: true },
+ struct2: { name: `struct_with_array`, value: ``, valid: true },
+
+ // Non-constructible types.
+ runtime_array: { name: `array<u32>`, value: ``, valid: false },
+ runtime_struct: { name: `runtime_array_struct`, value: ``, valid: false },
+ override_array: { name: `array<u32, override_size>`, value: ``, valid: false },
+ atomic_u32: { name: `atomic<u32>`, value: `atomic_wg`, valid: false },
+ atomic_struct: { name: `atomic_struct`, value: ``, valid: false },
+ texture_sample: { name: `texture_2d<f32>`, value: `t`, valid: false },
+ texture_depth: { name: `texture_depth_2d`, value: `t_depth`, valid: false },
+ texture_multisampled: {
+ name: `texture_multisampled_2d<f32>`,
+ value: `t_multisampled`,
+ valid: false,
+ },
+ texture_storage: {
+ name: `texture_storage_2d<rgba8unorm, write>`,
+ value: `t_storage`,
+ valid: false,
+ },
+ sampler: { name: `sampler`, value: `s`, valid: false },
+ sampler_comparison: { name: `sampler_comparison`, value: `s_depth`, valid: false },
+ ptr: { name: `ptr<workgroup, atomic<u32>>`, value: `&atomic_wg`, valid: false },
+};
+
+g.test('function_return_types')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#function-restriction')
+ .desc(`Test that function return types must be constructible`)
+ .params(u => u.combine('case', keysOf(kFunctionRetTypeCases)))
+ .beforeAllSubcases(t => {
+ if (kFunctionRetTypeCases[t.params.case].name === 'f16') {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const testcase = kFunctionRetTypeCases[t.params.case];
+ const enable = testcase.name === 'f16' ? 'enable f16;' : '';
+ const value = testcase.value === '' ? `${testcase.name}()` : testcase.value;
+ const code = `
+${enable}
+
+struct runtime_array_struct {
+ arr : array<u32>
+}
+
+struct constructible {
+ a : i32,
+ b : u32,
+ c : f32,
+ d : bool,
+}
+
+struct struct_with_array {
+ a : array<constructible, 4>
+}
+
+struct atomic_struct {
+ a : atomic<u32>
+};
+
+override override_size : u32;
+
+var<workgroup> atomic_wg : atomic<u32>;
+
+@group(0) @binding(0)
+var t : texture_2d<f32>;
+@group(0) @binding(1)
+var s : sampler;
+@group(0) @binding(2)
+var s_depth : sampler_comparison;
+@group(0) @binding(3)
+var t_storage : texture_storage_2d<rgba8unorm, write>;
+@group(0) @binding(4)
+var t_depth : texture_depth_2d;
+@group(0) @binding(5)
+var t_multisampled : texture_multisampled_2d<f32>;
+@group(0) @binding(6)
+var t_external : texture_external;
+
+fn foo() -> ${testcase.name} {
+ return ${value};
+}`;
+
+ t.expectCompileResult(testcase.valid, code);
+ });
+
+interface ParamTypeCase {
+ name: string;
+ valid: boolean;
+}
+
+const kFunctionParamTypeCases: Record<string, ParamTypeCase> = {
+ // Constructible types,
+ u32: { name: `u32`, valid: true },
+ i32: { name: `i32`, valid: true },
+ f32: { name: `f32`, valid: true },
+ bool: { name: `bool`, valid: true },
+ f16: { name: `f16`, valid: true },
+ vec2: { name: `vec2u`, valid: true },
+ vec3: { name: `vec3i`, valid: true },
+ vec4: { name: `vec4f`, valid: true },
+ mat2x2: { name: `mat2x2f`, valid: true },
+ mat2x3: { name: `mat2x3f`, valid: true },
+ mat2x4: { name: `mat2x4f`, valid: true },
+ mat3x2: { name: `mat3x2f`, valid: true },
+ mat3x3: { name: `mat3x3f`, valid: true },
+ mat3x4: { name: `mat3x4f`, valid: true },
+ mat4x2: { name: `mat4x2f`, valid: true },
+ mat4x3: { name: `mat4x3f`, valid: true },
+ mat4x4: { name: `mat4x4f`, valid: true },
+ array1: { name: `array<u32, 4>`, valid: true },
+ array2: { name: `array<vec2f, 2>`, valid: true },
+ array3: { name: `array<constructible, 4>`, valid: true },
+ array4: { name: `array<mat2x2f, 4>`, valid: true },
+ array5: { name: `array<bool, 4>`, valid: true },
+ struct1: { name: `constructible`, valid: true },
+ struct2: { name: `struct_with_array`, valid: true },
+
+ // Non-constructible types.
+ runtime_array: { name: `array<u32>`, valid: false },
+ runtime_struct: { name: `runtime_array_struct`, valid: false },
+ override_array: { name: `array<u32, override_size>`, valid: false },
+ atomic_u32: { name: `atomic<u32>`, valid: false },
+ atomic_struct: { name: `atomic_struct`, valid: false },
+
+ // Textures and samplers.
+ texture_sample: { name: `texture_2d<f32>`, valid: true },
+ texture_depth: { name: `texture_depth_2d`, valid: true },
+ texture_multisampled: {
+ name: `texture_multisampled_2d<f32>`,
+ valid: true,
+ },
+ texture_storage: { name: `texture_storage_2d<rgba8unorm, write>`, valid: true },
+ sampler: { name: `sampler`, valid: true },
+ sampler_comparison: { name: `sampler_comparison`, valid: true },
+
+ // Valid pointers.
+ ptr1: { name: `ptr<function, u32>`, valid: true },
+ ptr2: { name: `ptr<function, constructible>`, valid: true },
+ ptr3: { name: `ptr<private, u32>`, valid: true },
+ ptr4: { name: `ptr<private, constructible>`, valid: true },
+
+ // Invalid pointers.
+ ptr5: { name: `ptr<storage, u32>`, valid: false },
+ ptr6: { name: `ptr<storage, u32, read>`, valid: false },
+ ptr7: { name: `ptr<storage, u32, read_write>`, valid: false },
+ ptr8: { name: `ptr<uniform, u32>`, valid: false },
+ ptr9: { name: `ptr<workgroup, u32>`, valid: false },
+ ptr10: { name: `ptr<handle, u32>`, valid: false }, // Can't spell handle address space
+ ptr12: { name: `ptr<not_an_address_space, u32>`, valid: false },
+ ptr13: { name: `ptr<storage>`, valid: false }, // No store type
+ ptr14: { name: `ptr<private,clamp>`, valid: false }, // Invalid store type
+ ptr15: { name: `ptr<private,u32,read>`, valid: false }, // Can't specify access mode
+ ptr16: { name: `ptr<private,u32,write>`, valid: false }, // Can't specify access mode
+ ptr17: { name: `ptr<private,u32,read_write>`, valid: false }, // Can't specify access mode
+ ptrWorkgroupAtomic: { name: `ptr<workgroup, atomic<u32>>`, valid: false },
+ ptrWorkgroupNestedAtomic: { name: `ptr<workgroup, array<atomic<u32>,1>>`, valid: false },
+};
+
+g.test('function_parameter_types')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#function-restriction')
+ .desc(`Test validation of user-declared function parameter types`)
+ .params(u => u.combine('case', keysOf(kFunctionParamTypeCases)))
+ .beforeAllSubcases(t => {
+ if (kFunctionParamTypeCases[t.params.case].name === 'f16') {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const testcase = kFunctionParamTypeCases[t.params.case];
+ const enable = testcase.name === 'f16' ? 'enable f16;' : '';
+ const code = `
+${enable}
+
+struct runtime_array_struct {
+ arr : array<u32>
+}
+
+struct constructible {
+ a : i32,
+ b : u32,
+ c : f32,
+ d : bool,
+}
+
+struct struct_with_array {
+ a : array<constructible, 4>
+}
+
+fn foo(param : ${testcase.name}) {
+}`;
+
+ t.expectCompileResult(testcase.valid, code);
+ });
+
+interface ParamValueCase {
+ value: string;
+ matches: string[];
+}
+
+const kFunctionParamValueCases: Record<string, ParamValueCase> = {
+ // Values
+ u32_literal: { value: `0u`, matches: ['u32'] },
+ i32_literal: { value: `0i`, matches: ['i32'] },
+ f32_literal: { value: `0f`, matches: ['f32'] },
+ bool_literal: { value: `false`, matches: ['bool'] },
+ abstract_int_literal: { value: `0`, matches: ['u32', 'i32', 'f32', 'f16'] },
+ abstract_float_literal: { value: `0.0`, matches: ['f32', 'f16'] },
+ vec2u_constructor: { value: `vec2u()`, matches: ['vec2'] },
+ vec2i_constructor: { value: `vec2i()`, matches: [] },
+ vec2f_constructor: { value: `vec2f()`, matches: [] },
+ vec2b_constructor: { value: `vec2<bool>()`, matches: [] },
+ vec3u_constructor: { value: `vec3u()`, matches: [] },
+ vec3i_constructor: { value: `vec3i()`, matches: ['vec3'] },
+ vec3f_constructor: { value: `vec3f()`, matches: [] },
+ vec3b_constructor: { value: `vec3<bool>()`, matches: [] },
+ vec4u_constructor: { value: `vec4u()`, matches: [] },
+ vec4i_constructor: { value: `vec4i()`, matches: [] },
+ vec4f_constructor: { value: `vec4f()`, matches: ['vec4'] },
+ vec4b_constructor: { value: `vec4<bool>()`, matches: [] },
+ vec2_abstract_int: { value: `vec2(0,0)`, matches: ['vec2'] },
+ vec2_abstract_float: { value: `vec2(0.0,0)`, matches: [] },
+ vec3_abstract_int: { value: `vec3(0,0,0)`, matches: ['vec3'] },
+ vec3_abstract_float: { value: `vec3(0.0,0,0)`, matches: [] },
+ vec4_abstract_int: { value: `vec4(0,0,0,0)`, matches: ['vec4'] },
+ vec4_abstract_float: { value: `vec4(0.0,0,0,0)`, matches: ['vec4'] },
+ mat2x2_constructor: { value: `mat2x2f()`, matches: ['mat2x2'] },
+ mat2x3_constructor: { value: `mat2x3f()`, matches: ['mat2x3'] },
+ mat2x4_constructor: { value: `mat2x4f()`, matches: ['mat2x4'] },
+ mat3x2_constructor: { value: `mat3x2f()`, matches: ['mat3x2'] },
+ mat3x3_constructor: { value: `mat3x3f()`, matches: ['mat3x3'] },
+ mat3x4_constructor: { value: `mat3x4f()`, matches: ['mat3x4'] },
+ mat4x2_constructor: { value: `mat4x2f()`, matches: ['mat4x2'] },
+ mat4x3_constructor: { value: `mat4x3f()`, matches: ['mat4x3'] },
+ mat4x4_constructor: { value: `mat4x4f()`, matches: ['mat4x4'] },
+ array1_constructor: { value: `array<u32, 4>()`, matches: ['array1'] },
+ array2_constructor: { value: `array<vec2f, 2>()`, matches: ['array2'] },
+ array3_constructor: { value: `array<constructible, 4>()`, matches: ['array3'] },
+ array4_constructor: { value: `array<mat2x2f, 4>()`, matches: ['array4'] },
+ array5_constructor: { value: `array<bool, 4>()`, matches: ['array5'] },
+ struct1_constructor: { value: `constructible()`, matches: ['struct1'] },
+ struct2_constructor: { value: `struct_with_array()`, matches: ['struct2'] },
+
+ // Variable references
+ g_u32: { value: `g_u32`, matches: ['u32'] },
+ g_i32: { value: `g_i32`, matches: ['i32'] },
+ g_f32: { value: `g_f32`, matches: ['f32'] },
+ g_bool: { value: `g_bool`, matches: ['bool'] },
+ g_vec2: { value: `g_vec2`, matches: ['vec2'] },
+ g_vec3: { value: `g_vec3`, matches: ['vec3'] },
+ g_vec4: { value: `g_vec4`, matches: ['vec4'] },
+ g_mat2x2: { value: `g_mat2x2`, matches: ['mat2x2'] },
+ g_mat2x3: { value: `g_mat2x3`, matches: ['mat2x3'] },
+ g_mat2x4: { value: `g_mat2x4`, matches: ['mat2x4'] },
+ g_mat3x2: { value: `g_mat3x2`, matches: ['mat3x2'] },
+ g_mat3x3: { value: `g_mat3x3`, matches: ['mat3x3'] },
+ g_mat3x4: { value: `g_mat3x4`, matches: ['mat3x4'] },
+ g_mat4x2: { value: `g_mat4x2`, matches: ['mat4x2'] },
+ g_mat4x3: { value: `g_mat4x3`, matches: ['mat4x3'] },
+ g_mat4x4: { value: `g_mat4x4`, matches: ['mat4x4'] },
+ g_array1: { value: `g_array1`, matches: ['array1'] },
+ g_array2: { value: `g_array2`, matches: ['array2'] },
+ g_array3: { value: `g_array3`, matches: ['array3'] },
+ g_array4: { value: `g_array4`, matches: ['array4'] },
+ g_array5: { value: `g_array5`, matches: ['array5'] },
+ g_constructible: { value: `g_constructible`, matches: ['struct1'] },
+ g_struct_with_array: { value: `g_struct_with_array`, matches: ['struct2'] },
+ f_u32: { value: `f_u32`, matches: ['u32'] },
+ f_i32: { value: `f_i32`, matches: ['i32'] },
+ f_f32: { value: `f_f32`, matches: ['f32'] },
+ f_bool: { value: `f_bool`, matches: ['bool'] },
+ f_vec2: { value: `f_vec2`, matches: ['vec2'] },
+ f_vec3: { value: `f_vec3`, matches: ['vec3'] },
+ f_vec4: { value: `f_vec4`, matches: ['vec4'] },
+ f_mat2x2: { value: `f_mat2x2`, matches: ['mat2x2'] },
+ f_mat2x3: { value: `f_mat2x3`, matches: ['mat2x3'] },
+ f_mat2x4: { value: `f_mat2x4`, matches: ['mat2x4'] },
+ f_mat3x2: { value: `f_mat3x2`, matches: ['mat3x2'] },
+ f_mat3x3: { value: `f_mat3x3`, matches: ['mat3x3'] },
+ f_mat3x4: { value: `f_mat3x4`, matches: ['mat3x4'] },
+ f_mat4x2: { value: `f_mat4x2`, matches: ['mat4x2'] },
+ f_mat4x3: { value: `f_mat4x3`, matches: ['mat4x3'] },
+ f_mat4x4: { value: `f_mat4x4`, matches: ['mat4x4'] },
+ f_array1: { value: `f_array1`, matches: ['array1'] },
+ f_array2: { value: `f_array2`, matches: ['array2'] },
+ f_array3: { value: `f_array3`, matches: ['array3'] },
+ f_array4: { value: `f_array4`, matches: ['array4'] },
+ f_array5: { value: `f_array5`, matches: ['array5'] },
+ f_constructible: { value: `f_constructible`, matches: ['struct1'] },
+ f_struct_with_array: { value: `f_struct_with_array`, matches: ['struct2'] },
+ g_index_u32: { value: `g_constructible.b`, matches: ['u32'] },
+ g_index_i32: { value: `g_constructible.a`, matches: ['i32'] },
+ g_index_f32: { value: `g_constructible.c`, matches: ['f32'] },
+ g_index_bool: { value: `g_constructible.d`, matches: ['bool'] },
+ f_index_u32: { value: `f_constructible.b`, matches: ['u32'] },
+ f_index_i32: { value: `f_constructible.a`, matches: ['i32'] },
+ f_index_f32: { value: `f_constructible.c`, matches: ['f32'] },
+ f_index_bool: { value: `f_constructible.d`, matches: ['bool'] },
+ g_array_index_u32: { value: `g_struct_with_array.a[0].b`, matches: ['u32'] },
+ g_array_index_i32: { value: `g_struct_with_array.a[1].a`, matches: ['i32'] },
+ g_array_index_f32: { value: `g_struct_with_array.a[2].c`, matches: ['f32'] },
+ g_array_index_bool: { value: `g_struct_with_array.a[3].d`, matches: ['bool'] },
+ f_array_index_u32: { value: `f_struct_with_array.a[0].b`, matches: ['u32'] },
+ f_array_index_i32: { value: `f_struct_with_array.a[1].a`, matches: ['i32'] },
+ f_array_index_f32: { value: `f_struct_with_array.a[2].c`, matches: ['f32'] },
+ f_array_index_bool: { value: `f_struct_with_array.a[3].d`, matches: ['bool'] },
+
+ // Textures and samplers
+ texture_sample: { value: `t`, matches: ['texture_sample'] },
+ texture_depth: { value: `t_depth`, matches: ['texture_depth'] },
+ texture_multisampled: { value: `t_multisampled`, matches: ['texture_multisampled'] },
+ texture_storage: { value: `t_storage`, matches: ['texture_storage'] },
+ texture_external: { value: `t_external`, matches: ['texture_external'] },
+ sampler: { value: `s`, matches: ['sampler'] },
+ sampler_comparison: { value: `s_depth`, matches: ['sampler_comparison'] },
+
+ // Pointers
+ ptr1: { value: `&f_u32`, matches: ['ptr1'] },
+ ptr2: { value: `&f_constructible`, matches: ['ptr2'] },
+ ptr3: { value: `&g_u32`, matches: ['ptr3'] },
+ ptr4: { value: `&g_constructible`, matches: ['ptr4'] },
+
+ // Invalid pointers
+ ptr5: { value: `&f_constructible.b`, matches: [] },
+ ptr6: { value: `&g_constructible.b`, matches: [] },
+ ptr7: { value: `&f_struct_with_array.a[1].b`, matches: [] },
+ ptr8: { value: `&g_struct_with_array.a[2]`, matches: [] },
+ ptr9: { value: `&ro_constructible.b`, matches: [] },
+ ptr10: { value: `&rw_constructible`, matches: [] },
+ ptr11: { value: `&uniform_constructible`, matches: [] },
+ ptr12: { value: `&ro_constructible`, matches: [] },
+};
+
+function parameterMatches(decl: string, matches: string[]): boolean {
+ for (const val of matches) {
+ if (decl === val) {
+ return true;
+ }
+ }
+ return false;
+}
+
+g.test('function_parameter_matching')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#function-restriction')
+ .desc(
+ `Test that function parameter types match function parameter type on user-declared functions`
+ )
+ .params(u =>
+ u
+ .combine('decl', keysOf(kFunctionParamTypeCases))
+ .combine('arg', keysOf(kFunctionParamValueCases))
+ .filter(u => {
+ return kFunctionParamTypeCases[u.decl].valid;
+ })
+ )
+ .beforeAllSubcases(t => {
+ if (kFunctionParamTypeCases[t.params.decl].name === 'f16') {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const param = kFunctionParamTypeCases[t.params.decl];
+ const arg = kFunctionParamValueCases[t.params.arg];
+ const enable = param.name === 'f16' ? 'enable f16;' : '';
+ const code = `
+${enable}
+
+struct runtime_array_struct {
+ arr : array<u32>
+}
+
+struct constructible {
+ a : i32,
+ b : u32,
+ c : f32,
+ d : bool,
+}
+
+struct host_shareable {
+ a : i32,
+ b : u32,
+ c : f32,
+}
+
+struct struct_with_array {
+ a : array<constructible, 4>
+}
+@group(0) @binding(0)
+var t : texture_2d<f32>;
+@group(0) @binding(1)
+var s : sampler;
+@group(0) @binding(2)
+var s_depth : sampler_comparison;
+@group(0) @binding(3)
+var t_storage : texture_storage_2d<rgba8unorm, write>;
+@group(0) @binding(4)
+var t_depth : texture_depth_2d;
+@group(0) @binding(5)
+var t_multisampled : texture_multisampled_2d<f32>;
+@group(0) @binding(6)
+var t_external : texture_external;
+
+@group(1) @binding(0)
+var<storage> ro_constructible : host_shareable;
+@group(1) @binding(1)
+var<storage, read_write> rw_constructible : host_shareable;
+@group(1) @binding(2)
+var<uniform> uniform_constructible : host_shareable;
+
+fn bar(param : ${param.name}) { }
+
+var<private> g_u32 : u32;
+var<private> g_i32 : i32;
+var<private> g_f32 : f32;
+var<private> g_bool : bool;
+var<private> g_vec2 : vec2u;
+var<private> g_vec3 : vec3i;
+var<private> g_vec4 : vec4f;
+var<private> g_mat2x2 : mat2x2f;
+var<private> g_mat2x3 : mat2x3f;
+var<private> g_mat2x4 : mat2x4f;
+var<private> g_mat3x2 : mat3x2f;
+var<private> g_mat3x3 : mat3x3f;
+var<private> g_mat3x4 : mat3x4f;
+var<private> g_mat4x2 : mat4x2f;
+var<private> g_mat4x3 : mat4x3f;
+var<private> g_mat4x4 : mat4x4f;
+var<private> g_array1 : array<u32, 4>;
+var<private> g_array2 : array<vec2f, 2>;
+var<private> g_array3 : array<constructible, 4>;
+var<private> g_array4 : array<mat2x2f, 4>;
+var<private> g_array5 : array<bool, 4>;
+var<private> g_constructible : constructible;
+var<private> g_struct_with_array : struct_with_array;
+
+fn foo() {
+ var f_u32 : u32;
+ var f_i32 : i32;
+ var f_f32 : f32;
+ var f_bool : bool;
+ var f_vec2 : vec2u;
+ var f_vec3 : vec3i;
+ var f_vec4 : vec4f;
+ var f_mat2x2 : mat2x2f;
+ var f_mat2x3 : mat2x3f;
+ var f_mat2x4 : mat2x4f;
+ var f_mat3x2 : mat3x2f;
+ var f_mat3x3 : mat3x3f;
+ var f_mat3x4 : mat3x4f;
+ var f_mat4x2 : mat4x2f;
+ var f_mat4x3 : mat4x3f;
+ var f_mat4x4 : mat4x4f;
+ var f_array1 : array<u32, 4>;
+ var f_array2 : array<vec2f, 2>;
+ var f_array3 : array<constructible, 4>;
+ var f_array4 : array<mat2x2f, 4>;
+ var f_array5 : array<bool, 4>;
+ var f_constructible : constructible;
+ var f_struct_with_array : struct_with_array;
+
+ bar(${arg.value});
+}
+`;
+
+ t.expectCompileResult(parameterMatches(t.params.decl, arg.matches), code);
+ });
+
+g.test('no_direct_recursion')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#function-restriction')
+ .desc(`Test that functions cannot be directly recursive`)
+ .fn(t => {
+ const code = `
+fn foo() {
+ foo();
+}`;
+
+ t.expectCompileResult(false, code);
+ });
+
+g.test('no_indirect_recursion')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#function-restriction')
+ .desc(`Test that functions cannot be indirectly recursive`)
+ .fn(t => {
+ const code = `
+fn bar() {
+ foo();
+}
+fn foo() {
+ bar();
+}`;
+
+ t.expectCompileResult(false, code);
+ });
+
+g.test('param_names_must_differ')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#function-declaration-sec')
+ .desc(`Test that function parameters must have different names`)
+ .params(u => u.combine('p1', ['a', 'b', 'c'] as const).combine('p2', ['a', 'b', 'c'] as const))
+ .fn(t => {
+ const code = `fn foo(${t.params.p1} : u32, ${t.params.p2} : f32) { }`;
+ t.expectCompileResult(t.params.p1 !== t.params.p2, code);
+ });
+
+const kParamUseCases: Record<string, string> = {
+ body: `fn foo(param : u32) {
+ let tmp = param;
+ }`,
+ var: `var<private> v : u32 = param;
+ fn foo(param : u32) { }`,
+ const: `const c : u32 = param;
+ fn foo(param : u32) { }`,
+ override: `override o : u32 = param;
+ fn foo(param : u32) { }`,
+ function: `fn bar() { let tmp = param; }
+ fn foo(param : u32) { }`,
+};
+
+g.test('param_scope_is_function_body')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#function-declaration-sec')
+ .desc(`Test that function parameters are only in scope in the function body`)
+ .params(u => u.combine('use', keysOf(kParamUseCases)))
+ .fn(t => {
+ t.expectCompileResult(t.params.use === 'body', kParamUseCases[t.params.use]);
+ });
+
+g.test('param_number_matches_call')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#function-calls')
+ .desc(`Test that function calls have an equal number of arguments as the number of parameters`)
+ .params(u =>
+ u
+ .combine('num_args', [0, 1, 2, 3, 4, 255] as const)
+ .combine('num_params', [0, 1, 2, 3, 4, 255] as const)
+ )
+ .fn(t => {
+ let code = `
+ fn bar(`;
+ for (let i = 0; i < t.params.num_params; i++) {
+ code += `p${i} : u32,`;
+ }
+ code += `) { }\n`;
+ code += `fn foo() {\nbar(`;
+ for (let i = 0; i < t.params.num_args; i++) {
+ code += `0,`;
+ }
+ code += `);\n}`;
+ t.expectCompileResult(t.params.num_args === t.params.num_params, code);
+ });
+
+const kParamsTypes = ['u32', 'i32', 'f32'];
+
+interface ArgValue {
+ value: string;
+ matches: string[];
+}
+
+const kArgValues: Record<string, ArgValue> = {
+ abstract_int: {
+ value: '0',
+ matches: ['u32', 'i32', 'f32'],
+ },
+ abstract_float: {
+ value: '0.0',
+ matches: ['f32'],
+ },
+ unsigned_int: {
+ value: '0u',
+ matches: ['u32'],
+ },
+ signed_int: {
+ value: '0i',
+ matches: ['i32'],
+ },
+ float: {
+ value: '0f',
+ matches: ['f32'],
+ },
+};
+
+function checkArgTypeMatch(param_type: string, arg_matches: string[]): boolean {
+ for (const match of arg_matches) {
+ if (match === param_type) {
+ return true;
+ }
+ }
+ return false;
+}
+
+g.test('call_arg_types_match_params')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#function-calls')
+ .desc(`Test that the argument types match in order`)
+ .params(u =>
+ u
+ .combine('num_args', [1, 2, 3] as const)
+ .combine('p1_type', kParamsTypes)
+ .combine('p2_type', kParamsTypes)
+ .combine('p3_type', kParamsTypes)
+ .combine('arg1_value', keysOf(kArgValues))
+ .combine('arg2_value', keysOf(kArgValues))
+ .combine('arg3_value', keysOf(kArgValues))
+ )
+ .fn(t => {
+ let code = `
+ fn bar(`;
+ for (let i = 0; i < t.params.num_args; i++) {
+ switch (i) {
+ case 0:
+ default: {
+ code += `p${i} : ${t.params.p1_type},`;
+ break;
+ }
+ case 1: {
+ code += `p${i} : ${t.params.p2_type},`;
+ break;
+ }
+ case 2: {
+ code += `p${i} : ${t.params.p3_type},`;
+ break;
+ }
+ }
+ }
+ code += `) { }
+ fn foo() {
+ bar(`;
+ for (let i = 0; i < t.params.num_args; i++) {
+ switch (i) {
+ case 0:
+ default: {
+ code += `${kArgValues[t.params.arg1_value].value},`;
+ break;
+ }
+ case 1: {
+ code += `${kArgValues[t.params.arg2_value].value},`;
+ break;
+ }
+ case 2: {
+ code += `${kArgValues[t.params.arg3_value].value},`;
+ break;
+ }
+ }
+ }
+ code += `);\n}`;
+
+ let res = checkArgTypeMatch(t.params.p1_type, kArgValues[t.params.arg1_value].matches);
+ if (res && t.params.num_args > 1) {
+ res = checkArgTypeMatch(t.params.p2_type, kArgValues[t.params.arg2_value].matches);
+ }
+ if (res && t.params.num_args > 2) {
+ res = checkArgTypeMatch(t.params.p3_type, kArgValues[t.params.arg3_value].matches);
+ }
+ t.expectCompileResult(res, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/align.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/align.spec.ts
new file mode 100644
index 0000000000..d912da6404
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/align.spec.ts
@@ -0,0 +1,341 @@
+export const description = `Validation tests for @align`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kTests = {
+ blank: {
+ src: '',
+ pass: true,
+ },
+ one: {
+ src: '@align(1)',
+ pass: true,
+ },
+ four_a: {
+ src: '@align(4)',
+ pass: true,
+ },
+ four_i: {
+ src: '@align(4i)',
+ pass: true,
+ },
+ four_u: {
+ src: '@align(4u)',
+ pass: true,
+ },
+ four_hex: {
+ src: '@align(0x4)',
+ pass: true,
+ },
+ trailing_comma: {
+ src: '@align(4,)',
+ pass: true,
+ },
+ const_u: {
+ src: '@align(u_val)',
+ pass: true,
+ },
+ const_i: {
+ src: '@align(i_val)',
+ pass: true,
+ },
+ const_expr: {
+ src: '@align(i_val + 4 - 6)',
+ pass: true,
+ },
+ large: {
+ src: '@align(1073741824)',
+ pass: true,
+ },
+ tabs: {
+ src: '@\talign\t(4)',
+ pass: true,
+ },
+ comment: {
+ src: '@/*comment*/align/*comment*/(4)',
+ pass: true,
+ },
+ misspelling: {
+ src: '@malign(4)',
+ pass: false,
+ },
+ empty: {
+ src: '@align()',
+ pass: false,
+ },
+ missing_left_paren: {
+ src: '@align 4)',
+ pass: false,
+ },
+ missing_right_paren: {
+ src: '@align(4',
+ pass: false,
+ },
+ multiple_values: {
+ src: '@align(4, 2)',
+ pass: false,
+ },
+ non_power_two: {
+ src: '@align(3)',
+ pass: false,
+ },
+ const_f: {
+ src: '@align(f_val)',
+ pass: false,
+ },
+ one_f: {
+ src: '@align(1.0)',
+ pass: false,
+ },
+ four_f: {
+ src: '@align(4f)',
+ pass: false,
+ },
+ four_h: {
+ src: '@align(4h)',
+ pass: false,
+ },
+ no_params: {
+ src: '@align',
+ pass: false,
+ },
+ zero_a: {
+ src: '@align(0)',
+ pass: false,
+ },
+ negative: {
+ src: '@align(-4)',
+ pass: false,
+ },
+ large_no_power_two: {
+ src: '@align(2147483646)',
+ pass: false,
+ },
+ larger_than_max_i32: {
+ src: '@align(2147483648)',
+ pass: false,
+ },
+};
+
+g.test('parsing')
+ .desc(`Test that @align is parsed correctly.`)
+ .params(u => u.combine('align', keysOf(kTests)))
+ .fn(t => {
+ const src = kTests[t.params.align].src;
+ const code = `
+const i_val: i32 = 4;
+const u_val: u32 = 4;
+const f_val: f32 = 4.2;
+struct B {
+ ${src} a: i32,
+}
+
+@group(0) @binding(0)
+var<uniform> uniform_buffer: B;
+
+@fragment
+fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(.4, .2, .3, .1);
+}`;
+ t.expectCompileResult(kTests[t.params.align].pass, code);
+ });
+
+g.test('required_alignment')
+ .desc('Test that the align with an invalid size is an error')
+ .params(u =>
+ u
+ .combine('address_space', ['storage', 'uniform'])
+ .combine('align', [1, 2, 'alignment', 32])
+ .combine('type', [
+ { name: 'i32', storage: 4, uniform: 4 },
+ { name: 'u32', storage: 4, uniform: 4 },
+ { name: 'f32', storage: 4, uniform: 4 },
+ { name: 'f16', storage: 2, uniform: 2 },
+ { name: 'atomic<i32>', storage: 4, uniform: 4 },
+ { name: 'vec2<i32>', storage: 8, uniform: 8 },
+ { name: 'vec2<f16>', storage: 4, uniform: 4 },
+ { name: 'vec3<u32>', storage: 16, uniform: 16 },
+ { name: 'vec3<f16>', storage: 8, uniform: 8 },
+ { name: 'vec4<f32>', storage: 16, uniform: 16 },
+ { name: 'vec4<f16>', storage: 8, uniform: 8 },
+ { name: 'mat2x2<f32>', storage: 8, uniform: 8 },
+ { name: 'mat3x2<f32>', storage: 8, uniform: 8 },
+ { name: 'mat4x2<f32>', storage: 8, uniform: 8 },
+ { name: 'mat2x2<f16>', storage: 4, uniform: 4 },
+ { name: 'mat3x2<f16>', storage: 4, uniform: 4 },
+ { name: 'mat4x2<f16>', storage: 4, uniform: 4 },
+ { name: 'mat2x3<f32>', storage: 16, uniform: 16 },
+ { name: 'mat3x3<f32>', storage: 16, uniform: 16 },
+ { name: 'mat4x3<f32>', storage: 16, uniform: 16 },
+ { name: 'mat2x3<f16>', storage: 8, uniform: 8 },
+ { name: 'mat3x3<f16>', storage: 8, uniform: 8 },
+ { name: 'mat4x3<f16>', storage: 8, uniform: 8 },
+ { name: 'mat2x4<f32>', storage: 16, uniform: 16 },
+ { name: 'mat3x4<f32>', storage: 16, uniform: 16 },
+ { name: 'mat4x4<f32>', storage: 16, uniform: 16 },
+ { name: 'mat2x4<f16>', storage: 8, uniform: 8 },
+ { name: 'mat3x4<f16>', storage: 8, uniform: 8 },
+ { name: 'mat4x4<f16>', storage: 8, uniform: 8 },
+ { name: 'array<vec2<i32>, 2>', storage: 8, uniform: 16 },
+ { name: 'array<vec4<i32>, 2>', storage: 8, uniform: 16 },
+ { name: 'S', storage: 8, uniform: 16 },
+ ])
+ .beginSubcases()
+ )
+ .beforeAllSubcases(t => {
+ if (t.params.type.name.includes('f16')) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ // While this would fail validation, it doesn't fail for any reasons related to alignment.
+ // Atomics are not allowed in uniform address space as they have to be read_write.
+ if (t.params.address_space === 'uniform' && t.params.type.name.startsWith('atomic')) {
+ t.skip('No atomics in uniform address space');
+ }
+
+ let code = '';
+ if (t.params.type.name.includes('f16')) {
+ code += 'enable f16;\n';
+ }
+
+ // Testing the struct case, generate the structf
+ if (t.params.type.name === 'S') {
+ code += `struct S {
+ a: mat4x2<f32>, // Align 8
+ b: array<vec${
+ t.params.address_space === 'storage' ? 2 : 4
+ }<i32>, 2>, // Storage align 8, uniform 16
+ }
+ `;
+ }
+
+ let align = t.params.align;
+ if (t.params.align === 'alignment') {
+ // Alignment value listed in the spec
+ if (t.params.address_space === 'storage') {
+ align = `${t.params.type.storage}`;
+ } else {
+ align = `${t.params.type.uniform}`;
+ }
+ }
+
+ let address_space = 'uniform';
+ if (t.params.address_space === 'storage') {
+ // atomics require read_write, not just the default of read
+ address_space = 'storage, read_write';
+ }
+
+ code += `struct MyStruct {
+ @align(${align}) a: ${t.params.type.name},
+ }
+
+ @group(0) @binding(0)
+ var<${address_space}> a : MyStruct;`;
+
+ code += `
+ @fragment
+ fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(.4, .2, .3, .1);
+ }`;
+
+ // An array of `vec2` in uniform will not validate because, while the alignment on the array
+ // itself is fine, the `vec2` element inside the array will have the wrong alignment. Uniform
+ // requires that inner vec2 to have an align 16 which can only be done by specifying `vec4`
+ // instead.
+ const fails =
+ t.params.address_space === 'uniform' && t.params.type.name.startsWith('array<vec2');
+
+ t.expectCompileResult(!fails, code);
+ });
+
+g.test('placement')
+ .desc('Tests the locations @align is allowed to appear')
+ .params(u =>
+ u
+ .combine('scope', [
+ 'private-var',
+ 'storage-var',
+ 'struct-member',
+ 'fn-decl',
+ 'fn-param',
+ 'fn-var',
+ 'fn-return',
+ 'while-stmt',
+ undefined,
+ ] as const)
+ .combine('attribute', [
+ {
+ 'private-var': false,
+ 'storage-var': false,
+ 'struct-member': true,
+ 'fn-decl': false,
+ 'fn-param': false,
+ 'fn-var': false,
+ 'fn-return': false,
+ 'while-stmt': false,
+ },
+ ])
+ .beginSubcases()
+ )
+ .fn(t => {
+ const scope = t.params.scope;
+
+ const attr = '@align(32)';
+ const code = `
+ ${scope === 'private-var' ? attr : ''}
+ var<private> priv_var : i32;
+
+ ${scope === 'storage-var' ? attr : ''}
+ @group(0) @binding(0)
+ var<storage> stor_var : i32;
+
+ struct A {
+ ${scope === 'struct-member' ? attr : ''}
+ a : i32,
+ }
+
+ @vertex
+ ${scope === 'fn-decl' ? attr : ''}
+ fn f(
+ ${scope === 'fn-param' ? attr : ''}
+ @location(0) b : i32,
+ ) -> ${scope === 'fn-return' ? attr : ''} @builtin(position) vec4f {
+ ${scope === 'fn-var' ? attr : ''}
+ var<function> func_v : i32;
+
+ ${scope === 'while-stmt' ? attr : ''}
+ while false {}
+
+ return vec4(1, 1, 1, 1);
+ }
+ `;
+
+ t.expectCompileResult(scope === undefined || t.params.attribute[scope], code);
+ });
+
+g.test('multi_align')
+ .desc('Tests that align multiple times is an error')
+ .params(u => u.combine('multi', [true, false]))
+ .fn(t => {
+ let code = `struct A {
+ @align(128) `;
+
+ if (t.params.multi === true) {
+ code += '@align(128) ';
+ }
+
+ code += `a : i32,
+ }
+
+ @fragment
+ fn main() -> @location(0) vec4<f32> {
+ return vec4(1., 1., 1., 1.);
+ }`;
+
+ t.expectCompileResult(!t.params.multi, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/attribute.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/attribute.spec.ts
new file mode 100644
index 0000000000..5e5fb18d3c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/attribute.spec.ts
@@ -0,0 +1,87 @@
+export const description = `Validation tests for attributes`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kPossibleValues = {
+ val: '32',
+ expr: '30 + 2',
+ override: 'a_override',
+ user_func: 'a_func()',
+ const_func: 'min(4, 8)',
+ const: 'a_const',
+};
+const kAttributeUsage = {
+ align: '@align($val)',
+ binding: '@binding($val) @group(0)',
+ group: '@binding(1) @group($val)',
+ id: '@id($val)',
+ location: '@location($val)',
+ size: '@size($val)',
+ workgroup_size: '@workgroup_size($val, $val, $val)',
+};
+const kAllowedUsages = {
+ align: ['val', 'expr', 'const', 'const_func'],
+ binding: ['val', 'expr', 'const', 'const_func'],
+ group: ['val', 'expr', 'const', 'const_func'],
+ id: ['val', 'expr', 'const', 'const_func'],
+ location: ['val', 'expr', 'const', 'const_func'],
+ size: ['val', 'expr', 'const', 'const_func'],
+ workgroup_size: ['val', 'expr', 'const', 'const_func', 'override'],
+};
+
+g.test('expressions')
+ .desc(`Tests attributes which allow expressions`)
+ .params(u =>
+ u.combine('value', keysOf(kPossibleValues)).combine('attribute', keysOf(kAllowedUsages))
+ )
+ .fn(t => {
+ const attributes = {
+ align: '',
+ binding: '@binding(0) @group(0)',
+ group: '@binding(1) @group(1)',
+ id: '@id(2)',
+ location: '@location(0)',
+ size: '',
+ workgroup_size: '@workgroup_size(1)',
+ };
+
+ const val = kPossibleValues[t.params.value];
+ attributes[t.params.attribute] = kAttributeUsage[t.params.attribute].replace(/(\$val)/g, val);
+
+ const code = `
+fn a_func() -> i32 {
+ return 4;
+}
+
+const a_const = -2 + 10;
+override a_override: i32 = 2;
+
+${attributes.id} override my_id: i32 = 4;
+
+struct B {
+ ${attributes.align} ${attributes.size} a: i32,
+}
+
+${attributes.binding}
+var<uniform> uniform_buffer_1: B;
+
+${attributes.group}
+var<uniform> uniform_buffer_2: B;
+
+@fragment
+fn main() -> ${attributes.location} vec4<f32> {
+ return vec4<f32>(.4, .2, .3, .1);
+}
+
+@compute
+${attributes.workgroup_size}
+fn compute_main() {}
+`;
+
+ const pass = kAllowedUsages[t.params.attribute].includes(t.params.value);
+ t.expectCompileResult(pass, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/binary_ops.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/binary_ops.spec.ts
new file mode 100644
index 0000000000..3c82033924
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/binary_ops.spec.ts
@@ -0,0 +1,89 @@
+export const description = `Validation tests for binary ops`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kTests = {
+ and_bool_literal_bool_literal: {
+ src: `let a = true & true;`,
+ pass: true,
+ },
+ and_bool_expr_bool_expr: {
+ src: `let a = (1 == 2) & (3 == 4);`,
+ pass: true,
+ },
+ and_bool_literal_bool_expr: {
+ src: `let a = true & (1 == 2);`,
+ pass: true,
+ },
+ and_bool_expr_bool_literal: {
+ src: `let a = (1 == 2) & true;`,
+ pass: true,
+ },
+ and_bool_literal_int_literal: {
+ src: `let a = true & 1;`,
+ pass: false,
+ },
+ and_int_literal_bool_literal: {
+ src: `let a = 1 & true;`,
+ pass: false,
+ },
+ and_bool_expr_int_literal: {
+ src: `let a = (1 == 2) & 1;`,
+ pass: false,
+ },
+ and_int_literal_bool_expr: {
+ src: `let a = 1 & (1 == 2);`,
+ pass: false,
+ },
+
+ or_bool_literal_bool_literal: {
+ src: `let a = true | true;`,
+ pass: true,
+ },
+ or_bool_expr_bool_expr: {
+ src: `let a = (1 == 2) | (3 == 4);`,
+ pass: true,
+ },
+ or_bool_literal_bool_expr: {
+ src: `let a = true | (1 == 2);`,
+ pass: true,
+ },
+ or_bool_expr_bool_literal: {
+ src: `let a = (1 == 2) | true;`,
+ pass: true,
+ },
+ or_bool_literal_int_literal: {
+ src: `let a = true | 1;`,
+ pass: false,
+ },
+ or_int_literal_bool_literal: {
+ src: `let a = 1 | true;`,
+ pass: false,
+ },
+ or_bool_expr_int_literal: {
+ src: `let a = (1 == 2) | 1;`,
+ pass: false,
+ },
+ or_int_literal_bool_expr: {
+ src: `let a = 1 | (1 == 2);`,
+ pass: false,
+ },
+};
+
+g.test('all')
+ .desc('Test that binary operators are validated correctly')
+ .params(u => u.combine('stmt', keysOf(kTests)))
+ .fn(t => {
+ const code = `
+@vertex
+fn vtx() -> @builtin(position) vec4f {
+ ${kTests[t.params.stmt].src}
+ return vec4f(1);
+}
+ `;
+ t.expectCompileResult(kTests[t.params.stmt].pass, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/blankspace.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/blankspace.spec.ts
new file mode 100644
index 0000000000..92440d0210
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/blankspace.spec.ts
@@ -0,0 +1,65 @@
+export const description = `Validation tests for blankspace handling`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+g.test('null_characters')
+ .desc(`Test that WGSL source containing a null character is rejected.`)
+ .params(u =>
+ u
+ .combine('contains_null', [true, false])
+ .combine('placement', ['comment', 'delimiter', 'eol'])
+ .beginSubcases()
+ )
+ .fn(t => {
+ let code = '';
+ if (t.params.placement === 'comment') {
+ code = `// Here is a ${t.params.contains_null ? '\0' : 'Z'} character`;
+ } else if (t.params.placement === 'delimiter') {
+ code = `const${t.params.contains_null ? '\0' : ' '}name : i32 = 0;`;
+ } else if (t.params.placement === 'eol') {
+ code = `const name : i32 = 0;${t.params.contains_null ? '\0' : ''}`;
+ }
+ t.expectCompileResult(!t.params.contains_null, code);
+ });
+
+g.test('blankspace')
+ .desc(`Test that all blankspace characters act as delimiters.`)
+ .params(u =>
+ u
+ .combine('blankspace', [
+ ['\u0020', 'space'],
+ ['\u0009', 'horizontal_tab'],
+ ['\u000a', 'line_feed'],
+ ['\u000b', 'vertical_tab'],
+ ['\u000c', 'form_feed'],
+ ['\u000d', 'carriage_return'],
+ ['\u0085', 'next_line'],
+ ['\u200e', 'left_to_right_mark'],
+ ['\u200f', 'right_to_left_mark'],
+ ['\u2028', 'line_separator'],
+ ['\u2029', 'paragraph_separator'],
+ ])
+ .beginSubcases()
+ )
+ .fn(t => {
+ const code = `const${t.params.blankspace[0]}ident : i32 = 0;`;
+ t.expectCompileResult(true, code);
+ });
+
+g.test('bom')
+ .desc(
+ `Tests that including a BOM causes a shader compile error.
+
+Note, per RFC 2632, for protocols which forbit the use of U+FEFF then the BOM is treated as a
+"ZERO WIDTH NO-BREAK SPACE". The "ZERO WIDTH NO-BREAK SPACE" is not a valid WGSL blankspace code
+point, so the BOM ends up as a shader compilation error.
+ `
+ )
+ .params(u => u.combine('include_bom', [true, false]))
+ .fn(t => {
+ const code = `${t.params.include_bom ? '\uFEFF' : ''}const name : i32 = 0;`;
+ t.expectCompileResult(!t.params.include_bom, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/break.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/break.spec.ts
new file mode 100644
index 0000000000..7c0f067140
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/break.spec.ts
@@ -0,0 +1,84 @@
+export const description = `Validation tests for break`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kTests = {
+ loop_break: {
+ src: 'loop { break; }',
+ pass: true,
+ },
+ loop_if_break: {
+ src: 'loop { if true { break; } }',
+ pass: true,
+ },
+ continuing_break_if: {
+ src: 'loop { continuing { break if (true); } }',
+ pass: true,
+ },
+ while_break: {
+ src: 'while true { break; }',
+ pass: true,
+ },
+ while_if_break: {
+ src: 'while true { if true { break; } }',
+ pass: true,
+ },
+ for_break: {
+ src: 'for (;;) { break; }',
+ pass: true,
+ },
+ for_if_break: {
+ src: 'for (;;) { if true { break; } }',
+ pass: true,
+ },
+ switch_case_break: {
+ src: 'switch(1) { default: { break; } }',
+ pass: true,
+ },
+ switch_case_if_break: {
+ src: 'switch(1) { default: { if true { break; } } }',
+ pass: true,
+ },
+ break: {
+ src: 'break;',
+ pass: false,
+ },
+ return_break: {
+ src: 'return break;',
+ pass: false,
+ },
+ if_break: {
+ src: 'if true { break; }',
+ pass: false,
+ },
+ continuing_break: {
+ src: 'loop { continuing { break; } }',
+ pass: false,
+ },
+ continuing_if_break: {
+ src: 'loop { continuing { if (true) { break; } } }',
+ pass: false,
+ },
+ switch_break: {
+ src: 'switch(1) { break; }',
+ pass: false,
+ },
+};
+
+g.test('placement')
+ .desc('Test that break placement is validated correctly')
+ .params(u => u.combine('stmt', keysOf(kTests)))
+ .fn(t => {
+ const code = `
+@vertex
+fn vtx() -> @builtin(position) vec4f {
+ ${kTests[t.params.stmt].src}
+ return vec4f(1);
+}
+ `;
+ t.expectCompileResult(kTests[t.params.stmt].pass, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/builtin.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/builtin.spec.ts
new file mode 100644
index 0000000000..7ef5d672a2
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/builtin.spec.ts
@@ -0,0 +1,144 @@
+export const description = `Validation tests for @builtin`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kTests = {
+ pos: {
+ src: `@builtin(position)`,
+ pass: true,
+ },
+ trailing_comma: {
+ src: `@builtin(position,)`,
+ pass: true,
+ },
+ newline_in_attr: {
+ src: `@ \n builtin(position)`,
+ pass: true,
+ },
+ whitespace_in_attr: {
+ src: `@/* comment */builtin/* comment */\n\n(\t/*comment*/position/*comment*/)`,
+ pass: true,
+ },
+ invalid_name: {
+ src: `@abuiltin(position)`,
+ pass: false,
+ },
+ no_params: {
+ src: `@builtin`,
+ pass: false,
+ },
+ missing_param: {
+ src: `@builtin()`,
+ pass: false,
+ },
+ missing_parens: {
+ src: `@builtin position`,
+ pass: false,
+ },
+ missing_lparen: {
+ src: `@builtin position)`,
+ pass: false,
+ },
+ missing_rparen: {
+ src: `@builtin(position`,
+ pass: false,
+ },
+ multiple_params: {
+ src: `@builtin(position, frag_depth)`,
+ pass: false,
+ },
+ ident_param: {
+ src: `@builtin(identifier)`,
+ pass: false,
+ },
+ number_param: {
+ src: `@builtin(2)`,
+ pass: false,
+ },
+};
+
+g.test('parse')
+ .desc(`Test that @builtin is parsed correctly.`)
+ .params(u => u.combine('builtin', keysOf(kTests)))
+ .fn(t => {
+ const src = kTests[t.params.builtin].src;
+ const code = `
+@vertex
+fn main() -> ${src} vec4<f32> {
+ return vec4<f32>(.4, .2, .3, .1);
+}`;
+ t.expectCompileResult(kTests[t.params.builtin].pass, code);
+ });
+
+g.test('placement')
+ .desc('Tests the locations @builtin is allowed to appear')
+ .params(u =>
+ u
+ .combine('scope', [
+ // The fn-param and fn-ret are part of the shader_io/builtins tests
+ 'private-var',
+ 'storage-var',
+ 'struct-member',
+ 'non-ep-param',
+ 'non-ep-ret',
+ 'fn-decl',
+ 'fn-var',
+ 'while-stmt',
+ undefined,
+ ] as const)
+ .combine('attribute', [
+ {
+ 'private-var': false,
+ 'storage-var': false,
+ 'struct-member': true,
+ 'non-ep-param': false,
+ 'non-ep-ret': false,
+ 'fn-decl': false,
+ 'fn-var': false,
+ 'fn-return': false,
+ 'while-stmt': false,
+ },
+ ])
+ .beginSubcases()
+ )
+ .fn(t => {
+ const scope = t.params.scope;
+
+ const attr = '@builtin(vertex_index)';
+ const code = `
+ ${scope === 'private-var' ? attr : ''}
+ var<private> priv_var : u32;
+
+ ${scope === 'storage-var' ? attr : ''}
+ @group(0) @binding(0)
+ var<storage> stor_var : u32;
+
+ struct A {
+ ${scope === 'struct-member' ? attr : ''}
+ a : u32,
+ }
+
+ fn v(${scope === 'non-ep-param' ? attr : ''} i : u32) ->
+ ${scope === 'non-ep-ret' ? attr : ''} u32 { return 1; }
+
+ @vertex
+ ${scope === 'fn-decl' ? attr : ''}
+ fn f(
+ @location(0) b : u32,
+ ) -> @builtin(position) vec4f {
+ ${scope === 'fn-var' ? attr : ''}
+ var<function> func_v : u32;
+
+ ${scope === 'while-stmt' ? attr : ''}
+ while false {}
+
+ return vec4(1, 1, 1, 1);
+ }
+ `;
+
+ t.expectCompileResult(scope === undefined || t.params.attribute[scope], code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/comments.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/comments.spec.ts
new file mode 100644
index 0000000000..af49c49619
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/comments.spec.ts
@@ -0,0 +1,75 @@
+export const description = `Validation tests for comments`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+g.test('comments')
+ .desc(`Test that valid comments are handled correctly, including nesting.`)
+ .fn(t => {
+ const code = `
+/**
+ * Here is my shader.
+ *
+ * /* I can nest /**/ comments. */
+ * // I can nest line comments too.
+ **/
+@fragment // This is the stage
+fn main(/*
+no
+parameters
+*/) -> @location(0) vec4<f32> {
+ return/*block_comments_delimit_tokens*/vec4<f32>(.4, .2, .3, .1);
+}/* terminated block comments are OK at EOF...*/`;
+ t.expectCompileResult(true, code);
+ });
+
+g.test('line_comment_eof')
+ .desc(`Test that line comments can come at EOF.`)
+ .fn(t => {
+ const code = `
+@fragment
+fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(.4, .2, .3, .1);
+}
+// line comments are OK at EOF...`;
+ t.expectCompileResult(true, code);
+ });
+
+g.test('line_comment_terminators')
+ .desc(`Test that line comments are terminated by any blankspace other than space and \t`)
+ .params(u =>
+ u
+ .combine('blankspace', [
+ [' ', 'space'],
+ ['\t', 'tab'],
+ ['\u000a', 'line_feed'],
+ ['\u000b', 'vertical_tab'],
+ ['\u000c', 'form_feed'],
+ ['\u000d', 'carriage_return'],
+ ['\u000d\u000a', 'carriage_return_line_feed'],
+ ['\u0085', 'next_line'],
+ ['\u2028', 'line_separator'],
+ ['\u2029', 'paragraph_separator'],
+ ])
+ .beginSubcases()
+ )
+ .fn(t => {
+ const code = `// Line comment${t.params.blankspace[0]}const invalid_outside_comment = should_fail`;
+
+ t.expectCompileResult([' ', '\t'].includes(t.params.blankspace[0]), code);
+ });
+
+g.test('unterminated_block_comment')
+ .desc(`Test that unterminated block comments cause an error`)
+ .params(u => u.combine('terminated', [true, false]).beginSubcases())
+ .fn(t => {
+ const code = `
+/**
+ * Unterminated block comment.
+ *
+ ${t.params.terminated ? '*/' : ''}`;
+
+ t.expectCompileResult(t.params.terminated, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/const.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/const.spec.ts
new file mode 100644
index 0000000000..5a50f7d210
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/const.spec.ts
@@ -0,0 +1,57 @@
+export const description = `Validation tests for @const`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+g.test('placement')
+ .desc('Tests @const is not allowed to appear')
+ .params(u =>
+ u.combine('scope', [
+ 'private-var',
+ 'storage-var',
+ 'struct-member',
+ 'fn-decl',
+ 'fn-param',
+ 'fn-var',
+ 'fn-return',
+ 'while-stmt',
+ undefined,
+ ] as const)
+ )
+ .fn(t => {
+ const scope = t.params.scope;
+
+ const attr = '@const';
+ const code = `
+ ${scope === 'private-var' ? attr : ''}
+ var<private> priv_var : i32;
+
+ ${scope === 'storage-var' ? attr : ''}
+ @group(0) @binding(0)
+ var<storage> stor_var : i32;
+
+ struct A {
+ ${scope === 'struct-member' ? attr : ''}
+ a : i32,
+ }
+
+ @vertex
+ ${scope === 'fn-decl' ? attr : ''}
+ fn f(
+ ${scope === 'fn-param' ? attr : ''}
+ @location(0) b : i32,
+ ) -> ${scope === 'fn-return' ? attr : ''} @builtin(position) vec4f {
+ ${scope === 'fn-var' ? attr : ''}
+ var<function> func_v : i32;
+
+ ${scope === 'while-stmt' ? attr : ''}
+ while false {}
+
+ return vec4(1, 1, 1, 1);
+ }
+ `;
+
+ t.expectCompileResult(scope === undefined, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/const_assert.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/const_assert.spec.ts
new file mode 100644
index 0000000000..c5512842cf
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/const_assert.spec.ts
@@ -0,0 +1,38 @@
+export const description = `Parser validation tests for const_assert`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kCases = {
+ no_parentheses: { code: `const_assert true;`, pass: true },
+ left_parenthesis_only: { code: `const_assert(true;`, pass: false },
+ right_parenthesis_only: { code: `const_assert true);`, pass: false },
+ both_parentheses: { code: `const_assert(true);`, pass: true },
+ condition_on_newline: {
+ code: `const_assert
+true;`,
+ pass: true,
+ },
+ multiline_with_parentheses: {
+ code: `const_assert
+(
+ true
+);`,
+ pass: true,
+ },
+ invalid_expression: { code: `const_assert(1!2);`, pass: false },
+ no_condition_no_parentheses: { code: `const_assert;`, pass: false },
+ no_condition_with_parentheses: { code: `const_assert();`, pass: false },
+ not_a_boolean: { code: `const_assert 42;`, pass: false },
+};
+
+g.test('parse')
+ .desc(`Tests that the const_assert statement parses correctly.`)
+ .params(u => u.combine('case', keysOf(kCases)))
+ .fn(t => {
+ const c = kCases[t.params.case];
+ t.expectCompileResult(c.pass, c.code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/diagnostic.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/diagnostic.spec.ts
new file mode 100644
index 0000000000..154a4253ea
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/diagnostic.spec.ts
@@ -0,0 +1,201 @@
+export const description = `Validation tests for diagnostic directive and attribute`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kSpecDiagnosticRules = ['derivative_uniformity'];
+const kSpecDiagnosticSeverities = ['off', 'info', 'warning', 'error'];
+const kDiagnosticTypes = ['attribute', 'directive'];
+
+const kBadSeverities = ['none', 'warn', 'goose', 'fatal', 'severe'];
+const kBadSingleTokenRules = ['unknown', 'blahblahblah', 'derivative_uniform'];
+
+function generateDiagnostic(type: string, severity: string, rule: string): string {
+ const diagnostic = `diagnostic(${severity}, ${rule})`;
+ if (type === 'directive') {
+ return diagnostic;
+ } else {
+ return '@' + diagnostic;
+ }
+}
+
+const kValidLocations = {
+ module: (diag: string) => `${diag};`,
+ function: (diag: string) => `${diag} fn foo() { }`,
+ compound: (diag: string) => `fn foo() { ${diag} { } }`,
+ if_stmt: (diag: string) => `fn foo() { ${diag} if true { } }`,
+ if_then: (diag: string) => `fn foo() { if true ${diag} { } }`,
+ if_else: (diag: string) => `fn foo() { if true { } else ${diag} { } }`,
+ switch_stmt: (diag: string) => `fn foo() { ${diag} switch 0 { default { } } }`,
+ switch_body: (diag: string) => `fn foo() { switch 0 ${diag} { default { } } }`,
+ switch_default: (diag: string) => `fn foo() { switch 0 { default ${diag} { } } }`,
+ switch_case: (diag: string) => `fn foo() { switch 0 { case 0 ${diag} { } default { } } }`,
+ loop_stmt: (diag: string) => `fn foo() { ${diag} loop { break; } }`,
+ loop_body: (diag: string) => `fn foo() { loop ${diag} { break; } }`,
+ loop_continuing: (diag: string) => `fn foo() { loop { continuing ${diag} { break if true; } } }`,
+ while_stmt: (diag: string) => `fn foo() { ${diag} while true { break; } }`,
+ while_body: (diag: string) => `fn foo() { while true ${diag} { break; } }`,
+ for_stmt: (diag: string) => `fn foo() { ${diag} for (var i = 0; i < 10; i++) { } }`,
+ for_body: (diag: string) => `fn foo() { for (var i = 0; i < 10; i++) ${diag} { } }`,
+};
+
+const kInvalidLocations = {
+ module_var: (diag: string) => `${diag} var<private> x : u32;`,
+ module_const: (diag: string) => `${diag} const x = 0;`,
+ module_override: (diag: string) => `${diag} override x : u32;`,
+ struct: (diag: string) => `${diag} struct S { x : u32 }`,
+ struct_member: (diag: string) => ` struct S { ${diag} x : u32 }`,
+ function_params: (diag: string) => `fn foo${diag}() { }`,
+ function_var: (diag: string) => `fn foo() { ${diag} var x = 0; }`,
+ function_let: (diag: string) => `fn foo() { ${diag} let x = 0; }`,
+ function_const: (diag: string) => `fn foo() { ${diag} const x = 0; }`,
+ pre_else: (diag: string) => `fn foo() { if true { } ${diag} else { } }`,
+ pre_default: (diag: string) => `fn foo() { switch 0 { ${diag} default { } } }`,
+ pre_case: (diag: string) => `fn foo() { switch 0 { ${diag} case 0 { } default { } } }`,
+ pre_continuing: (diag: string) => `fn foo() { loop { ${diag} continuing { break if true; } } }`,
+ pre_for_params: (diag: string) => `fn foo() { for ${diag} (var i = 0; i < 10; i++) { } }`,
+};
+
+const kNestedLocations = {
+ compound: (d1: string, d2: string) => `${d1} fn foo() { ${d2} { } }`,
+ if_stmt: (d1: string, d2: string) => `fn foo() { ${d1} if true ${d2} { } }`,
+ switch_stmt: (d1: string, d2: string) => `fn foo() { ${d1} switch 0 ${d2} { default { } } }`,
+ switch_body: (d1: string, d2: string) => `fn foo() { switch 0 ${d1} { default ${d2} { } } }`,
+ switch_case: (d1: string, d2: string) =>
+ `fn foo() { switch 0 { case 0 ${d1} { } default ${d2} { } } }`,
+ loop_stmt: (d1: string, d2: string) => `fn foo() { ${d1} loop ${d2} { break; } }`,
+ while_stmt: (d1: string, d2: string) => `fn foo() { ${d1} while true ${d2} { break; } }`,
+ for_stmt: (d1: string, d2: string) => `fn foo() { ${d1} for (var i = 0; i < 10; i++) ${d2} { } }`,
+};
+
+g.test('valid_params')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#diagnostics')
+ .desc(`Tests required accepted diagnostic parameters`)
+ .params(u =>
+ u
+ .combine('severity', kSpecDiagnosticSeverities)
+ .combine('rule', kSpecDiagnosticRules)
+ .combine('type', kDiagnosticTypes)
+ )
+ .fn(t => {
+ const diag = generateDiagnostic(t.params.type, t.params.severity, t.params.rule);
+ let code = ``;
+ if (t.params.type === 'directive') {
+ code = kValidLocations['module'](diag);
+ } else {
+ code = kValidLocations['function'](diag);
+ }
+ t.expectCompileResult(true, code);
+ });
+
+g.test('invalid_severity')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#diagnostics')
+ .desc(`Tests invalid severities are rejected`)
+ .params(u => u.combine('severity', kBadSeverities).combine('type', kDiagnosticTypes))
+ .fn(t => {
+ const diag = generateDiagnostic(t.params.type, t.params.severity, 'derivative_uniformity');
+ let code = ``;
+ if (t.params.type === 'directive') {
+ code = kValidLocations['module'](diag);
+ } else {
+ code = kValidLocations['function'](diag);
+ }
+ t.expectCompileResult(false, code);
+ });
+
+g.test('warning_unknown_rule')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#diagnostics')
+ .desc(`Tests unknown single token rules issue a warning`)
+ .params(u => u.combine('type', kDiagnosticTypes).combine('rule', kBadSingleTokenRules))
+ .fn(t => {
+ const diag = generateDiagnostic(t.params.type, 'info', t.params.rule);
+ let code = ``;
+ if (t.params.type === 'directive') {
+ code = kValidLocations['module'](diag);
+ } else {
+ code = kValidLocations['function'](diag);
+ }
+ t.expectCompileWarning(true, code);
+ });
+
+g.test('valid_locations')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#diagnostics')
+ .desc(`Tests valid locations`)
+ .params(u => u.combine('type', kDiagnosticTypes).combine('location', keysOf(kValidLocations)))
+ .fn(t => {
+ const diag = generateDiagnostic(t.params.type, 'info', 'derivative_uniformity');
+ const code = kValidLocations[t.params.location](diag);
+ let res = true;
+ if (t.params.type === 'directive') {
+ res = t.params.location === 'module';
+ } else {
+ res = t.params.location !== 'module';
+ }
+ if (res === false) {
+ t.expectCompileResult(true, kValidLocations[t.params.location](''));
+ }
+ t.expectCompileResult(res, code);
+ });
+
+g.test('invalid_locations')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#diagnostics')
+ .desc(`Tests invalid locations`)
+ .params(u => u.combine('type', kDiagnosticTypes).combine('location', keysOf(kInvalidLocations)))
+ .fn(t => {
+ const diag = generateDiagnostic(t.params.type, 'info', 'derivative_uniformity');
+ t.expectCompileResult(true, kInvalidLocations[t.params.location](''));
+ t.expectCompileResult(false, kInvalidLocations[t.params.location](diag));
+ });
+
+g.test('conflicting_directive')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#diagnostics')
+ .desc(`Tests conflicts between directives`)
+ .params(u => u.combine('s1', kSpecDiagnosticSeverities).combine('s2', kSpecDiagnosticSeverities))
+ .fn(t => {
+ const d1 = generateDiagnostic('directive', t.params.s1, 'derivative_uniformity');
+ const d2 = generateDiagnostic('directive', t.params.s2, 'derivative_uniformity');
+ const code = `${kValidLocations['module'](d1)}\n${kValidLocations['module'](d2)}`;
+ t.expectCompileResult(t.params.s1 === t.params.s2, code);
+ });
+
+g.test('conflicting_attribute_same_location')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#diagnostics')
+ .desc(`Tests conflicts between attributes`)
+ .params(u =>
+ u
+ .combine('loc', keysOf(kValidLocations))
+ .combine('s1', kSpecDiagnosticSeverities)
+ .combine('s2', kSpecDiagnosticSeverities)
+ .filter(u => {
+ return u.loc !== 'module';
+ })
+ )
+ .fn(t => {
+ const d1 = generateDiagnostic('attribute', t.params.s1, 'derivative_uniformity');
+ const d2 = generateDiagnostic('attribute', t.params.s2, 'derivative_uniformity');
+ const diag = d1 + ' ' + d2;
+ const code = `${kValidLocations[t.params.loc](diag)}`;
+ t.expectCompileResult(t.params.s1 === t.params.s2, code);
+ });
+
+g.test('conflicting_attribute_different_location')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#diagnostics')
+ .desc(`Tests conflicts between attributes`)
+ .params(u =>
+ u
+ .combine('loc', keysOf(kNestedLocations))
+ .combine('s1', kSpecDiagnosticSeverities)
+ .combine('s2', kSpecDiagnosticSeverities)
+ .filter(u => {
+ return u.s1 !== u.s2;
+ })
+ )
+ .fn(t => {
+ const d1 = generateDiagnostic('attribute', t.params.s1, 'derivative_uniformity');
+ const d2 = generateDiagnostic('attribute', t.params.s2, 'derivative_uniformity');
+ const code = `${kNestedLocations[t.params.loc](d1, d2)}`;
+ t.expectCompileResult(true, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/discard.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/discard.spec.ts
new file mode 100644
index 0000000000..24f4692b34
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/discard.spec.ts
@@ -0,0 +1,65 @@
+export const description = `Validation tests for discard`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+g.test('placement')
+ .desc('Test that discard usage is validated')
+ .params(u =>
+ u.combine('place', ['compute', 'vertex', 'fragment', 'module', 'subfrag', 'subvert', 'subcomp'])
+ )
+ .fn(t => {
+ const pos: { [key: string]: string } = {
+ module: '',
+ subvert: '',
+ subfrag: '',
+ subcomp: '',
+ vertex: '',
+ fragment: '',
+ compute: '',
+ };
+
+ pos[t.params.place] = 'discard;';
+
+ const code = `
+${pos.module}
+
+fn subvert() {
+ ${pos.subvert}
+}
+
+@vertex
+fn vtx() -> @builtin(position) vec4f {
+ ${pos.vertex}
+ subvert();
+ return vec4f(1);
+}
+
+fn subfrag() {
+ ${pos.subfrag}
+}
+
+@fragment
+fn frag() -> @location(0) vec4f {
+ ${pos.fragment}
+ subfrag();
+ return vec4f(1);
+}
+
+fn subcomp() {
+ ${pos.subcomp}
+}
+
+@compute
+@workgroup_size(1)
+fn comp() {
+ ${pos.compute}
+ subcomp();
+}
+`;
+
+ const pass = ['fragment', 'subfrag'].includes(t.params.place);
+ t.expectCompileResult(pass, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/enable.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/enable.spec.ts
new file mode 100644
index 0000000000..230244c6b8
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/enable.spec.ts
@@ -0,0 +1,70 @@
+export const description = `Parser validation tests for enable`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kCases = {
+ f16: { code: `enable f16;`, pass: true },
+ decl_before: {
+ code: `alias i = i32;
+enable f16;`,
+ pass: false,
+ },
+ after_decl: {
+ code: `enable f16;
+alias i = i32;`,
+ pass: true,
+ },
+ const_assert_before: {
+ code: `const_assert 1 == 1;
+enable f16;`,
+ pass: false,
+ },
+ const_assert_after: {
+ code: `enable f16;
+const_assert 1 == 1;`,
+ pass: true,
+ },
+ embedded_comment: {
+ code: `/* comment
+
+*/enable f16;`,
+ pass: true,
+ },
+ parens: {
+ code: `enable(f16);`,
+ pass: false,
+ },
+ multi_line: {
+ code: `enable
+f16;`,
+ pass: true,
+ },
+ multiple_enables: {
+ code: `enable f16;
+enable f16;`,
+ pass: true,
+ },
+ multipe_entries: {
+ code: `enable f16, f16, f16;`,
+ pass: true,
+ },
+ unknown: {
+ code: `enable unknown;`,
+ pass: false,
+ },
+};
+
+g.test('enable')
+ .desc(`Tests that enables are validated correctly`)
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .params(u => u.combine('case', keysOf(kCases)))
+ .fn(t => {
+ const c = kCases[t.params.case];
+ t.expectCompileResult(c.pass, c.code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/identifiers.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/identifiers.spec.ts
new file mode 100644
index 0000000000..0dd429d0a7
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/identifiers.spec.ts
@@ -0,0 +1,407 @@
+export const description = `Validation tests for identifiers`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValidIdentifiers = new Set([
+ 'foo',
+ 'Foo',
+ 'FOO',
+ '_0',
+ '_foo0',
+ '_0foo',
+ 'foo__0',
+ 'Δέλτα',
+ 'réflexion',
+ 'Кызыл',
+ '𐰓𐰏𐰇',
+ '朝焼け',
+ 'سلام',
+ '검정',
+ 'שָׁלוֹם',
+ 'गुलाबी',
+ 'փիրուզ',
+ // Builtin type identifiers:
+ 'array',
+ 'atomic',
+ 'bool',
+ 'bf16',
+ 'bitcast',
+ 'f32',
+ 'f16',
+ 'f64',
+ 'i32',
+ 'i16',
+ 'i64',
+ 'i8',
+ 'mat2x2',
+ 'mat2x3',
+ 'mat2x4',
+ 'mat3x2',
+ 'mat3x3',
+ 'mat3x4',
+ 'mat4x2',
+ 'mat4x3',
+ 'mat4x4',
+ 'ptr',
+ 'quat',
+ 'sampler',
+ 'sampler_comparison',
+ 'signed',
+ 'texture_1d',
+ 'texture_2d',
+ 'texture_2d_array',
+ 'texture_3d',
+ 'texture_cube',
+ 'texture_cube_array',
+ 'texture_multisampled_2d',
+ 'texture_storage_1d',
+ 'texture_storage_2d',
+ 'texture_storage_2d_array',
+ 'texture_storage_3d',
+ 'texture_depth_2d',
+ 'texture_depth_2d_array',
+ 'texture_depth_cube',
+ 'texture_depth_cube_array',
+ 'texture_depth_multisampled_2d',
+ 'u32',
+ 'u16',
+ 'u64',
+ 'u8',
+ 'unsigned',
+ 'vec2',
+ 'vec3',
+ 'vec4',
+]);
+const kInvalidIdentifiers = new Set([
+ '_', // Single underscore is a syntactic token for phony assignment.
+ '__', // Leading double underscore is reserved.
+ '__foo', // Leading double underscore is reserved.
+ '0foo', // Must start with single underscore or a letter.
+ // No punctuation:
+ 'foo.bar',
+ 'foo-bar',
+ 'foo+bar',
+ 'foo#bar',
+ 'foo!bar',
+ 'foo\\bar',
+ 'foo/bar',
+ 'foo,bar',
+ 'foo@bar',
+ 'foo::bar',
+ // Keywords:
+ 'alias',
+ 'break',
+ 'case',
+ 'const',
+ 'const_assert',
+ 'continue',
+ 'continuing',
+ 'default',
+ 'diagnostic',
+ 'discard',
+ 'else',
+ 'enable',
+ 'false',
+ 'fn',
+ 'for',
+ 'if',
+ 'let',
+ 'loop',
+ 'override',
+ 'requires',
+ 'return',
+ 'struct',
+ 'switch',
+ 'true',
+ 'var',
+ 'while',
+ // Reserved Words
+ 'NULL',
+ 'Self',
+ 'abstract',
+ 'active',
+ 'alignas',
+ 'alignof',
+ 'as',
+ 'asm',
+ 'asm_fragment',
+ 'async',
+ 'attribute',
+ 'auto',
+ 'await',
+ 'become',
+ 'binding_array',
+ 'cast',
+ 'catch',
+ 'class',
+ 'co_await',
+ 'co_return',
+ 'co_yield',
+ 'coherent',
+ 'column_major',
+ 'common',
+ 'compile',
+ 'compile_fragment',
+ 'concept',
+ 'const_cast',
+ 'consteval',
+ 'constexpr',
+ 'constinit',
+ 'crate',
+ 'debugger',
+ 'decltype',
+ 'delete',
+ 'demote',
+ 'demote_to_helper',
+ 'do',
+ 'dynamic_cast',
+ 'enum',
+ 'explicit',
+ 'export',
+ 'extends',
+ 'extern',
+ 'external',
+ 'fallthrough',
+ 'filter',
+ 'final',
+ 'finally',
+ 'friend',
+ 'from',
+ 'fxgroup',
+ 'get',
+ 'goto',
+ 'groupshared',
+ 'highp',
+ 'impl',
+ 'implements',
+ 'import',
+ 'inline',
+ 'instanceof',
+ 'interface',
+ 'layout',
+ 'lowp',
+ 'macro',
+ 'macro_rules',
+ 'match',
+ 'mediump',
+ 'meta',
+ 'mod',
+ 'module',
+ 'move',
+ 'mut',
+ 'mutable',
+ 'namespace',
+ 'new',
+ 'nil',
+ 'noexcept',
+ 'noinline',
+ 'nointerpolation',
+ 'noperspective',
+ 'null',
+ 'nullptr',
+ 'of',
+ 'operator',
+ 'package',
+ 'packoffset',
+ 'partition',
+ 'pass',
+ 'patch',
+ 'pixelfragment',
+ 'precise',
+ 'precision',
+ 'premerge',
+ 'priv',
+ 'protected',
+ 'pub',
+ 'public',
+ 'readonly',
+ 'ref',
+ 'regardless',
+ 'register',
+ 'reinterpret_cast',
+ 'require',
+ 'resource',
+ 'restrict',
+ 'self',
+ 'set',
+ 'shared',
+ 'sizeof',
+ 'smooth',
+ 'snorm',
+ 'static',
+ 'static_assert',
+ 'static_cast',
+ 'std',
+ 'subroutine',
+ 'super',
+ 'target',
+ 'template',
+ 'this',
+ 'thread_local',
+ 'throw',
+ 'trait',
+ 'try',
+ 'type',
+ 'typedef',
+ 'typeid',
+ 'typename',
+ 'typeof',
+ 'union',
+ 'unless',
+ 'unorm',
+ 'unsafe',
+ 'unsized',
+ 'use',
+ 'using',
+ 'varying',
+ 'virtual',
+ 'volatile',
+ 'wgsl',
+ 'where',
+ 'with',
+ 'writeonly',
+ 'yield',
+]);
+
+g.test('module_var_name')
+ .desc(
+ `Test that valid identifiers are accepted for names of module-scope 'var's, and invalid identifiers are rejected.`
+ )
+ .params(u =>
+ u.combine('ident', new Set([...kValidIdentifiers, ...kInvalidIdentifiers])).beginSubcases()
+ )
+ .fn(t => {
+ const type = t.params.ident === 'i32' ? 'u32' : 'i32';
+ const code = `var<private> ${t.params.ident} : ${type};`;
+ t.expectCompileResult(kValidIdentifiers.has(t.params.ident), code);
+ });
+
+g.test('module_const_name')
+ .desc(
+ `Test that valid identifiers are accepted for names of module-scope 'const's, and invalid identifiers are rejected.`
+ )
+ .params(u =>
+ u.combine('ident', new Set([...kValidIdentifiers, ...kInvalidIdentifiers])).beginSubcases()
+ )
+ .fn(t => {
+ const type = t.params.ident === 'i32' ? 'u32' : 'i32';
+ const code = `const ${t.params.ident} : ${type} = 0;`;
+ t.expectCompileResult(kValidIdentifiers.has(t.params.ident), code);
+ });
+
+g.test('override_name')
+ .desc(
+ `Test that valid identifiers are accepted for names of 'override's, and invalid identifiers are rejected.`
+ )
+ .params(u =>
+ u.combine('ident', new Set([...kValidIdentifiers, ...kInvalidIdentifiers])).beginSubcases()
+ )
+ .fn(t => {
+ const type = t.params.ident === 'i32' ? 'u32' : 'i32';
+ const code = `override ${t.params.ident} : ${type} = 0;`;
+ t.expectCompileResult(kValidIdentifiers.has(t.params.ident), code);
+ });
+
+g.test('function_name')
+ .desc(
+ `Test that valid identifiers are accepted for names of functions, and invalid identifiers are rejected.`
+ )
+ .params(u =>
+ u.combine('ident', new Set([...kValidIdentifiers, ...kInvalidIdentifiers])).beginSubcases()
+ )
+ .fn(t => {
+ const code = `fn ${t.params.ident}() {}`;
+ t.expectCompileResult(kValidIdentifiers.has(t.params.ident), code);
+ });
+
+g.test('struct_name')
+ .desc(
+ `Test that valid identifiers are accepted for names of structs, and invalid identifiers are rejected.`
+ )
+ .params(u =>
+ u.combine('ident', new Set([...kValidIdentifiers, ...kInvalidIdentifiers])).beginSubcases()
+ )
+ .fn(t => {
+ const type = t.params.ident === 'i32' ? 'u32' : 'i32';
+ const code = `struct ${t.params.ident} { i : ${type} }`;
+ t.expectCompileResult(kValidIdentifiers.has(t.params.ident), code);
+ });
+
+g.test('alias_name')
+ .desc(
+ `Test that valid identifiers are accepted for names of aliases, and invalid identifiers are rejected.`
+ )
+ .params(u =>
+ u.combine('ident', new Set([...kValidIdentifiers, ...kInvalidIdentifiers])).beginSubcases()
+ )
+ .fn(t => {
+ const type = t.params.ident === 'i32' ? 'u32' : 'i32';
+ const code = `alias ${t.params.ident} = ${type};`;
+ t.expectCompileResult(kValidIdentifiers.has(t.params.ident), code);
+ });
+
+g.test('function_param_name')
+ .desc(
+ `Test that valid identifiers are accepted for names of function parameters, and invalid identifiers are rejected.`
+ )
+ .params(u =>
+ u.combine('ident', new Set([...kValidIdentifiers, ...kInvalidIdentifiers])).beginSubcases()
+ )
+ .fn(t => {
+ const type = t.params.ident === 'i32' ? 'u32' : 'i32';
+ const code = `fn F(${t.params.ident} : ${type}) {}`;
+ t.expectCompileResult(kValidIdentifiers.has(t.params.ident), code);
+ });
+
+g.test('function_const_name')
+ .desc(
+ `Test that valid identifiers are accepted for names of function-scoped 'const's, and invalid identifiers are rejected.`
+ )
+ .params(u =>
+ u.combine('ident', new Set([...kValidIdentifiers, ...kInvalidIdentifiers])).beginSubcases()
+ )
+ .fn(t => {
+ const code = `fn F() {
+ const ${t.params.ident} = 1;
+}`;
+ t.expectCompileResult(kValidIdentifiers.has(t.params.ident), code);
+ });
+
+g.test('function_let_name')
+ .desc(
+ `Test that valid identifiers are accepted for names of function-scoped 'let's, and invalid identifiers are rejected.`
+ )
+ .params(u =>
+ u.combine('ident', new Set([...kValidIdentifiers, ...kInvalidIdentifiers])).beginSubcases()
+ )
+ .fn(t => {
+ const code = `fn F() {
+ let ${t.params.ident} = 1;
+}`;
+ t.expectCompileResult(kValidIdentifiers.has(t.params.ident), code);
+ });
+
+g.test('function_var_name')
+ .desc(
+ `Test that valid identifiers are accepted for names of function-scoped 'var's, and invalid identifiers are rejected.`
+ )
+ .params(u =>
+ u.combine('ident', new Set([...kValidIdentifiers, ...kInvalidIdentifiers])).beginSubcases()
+ )
+ .fn(t => {
+ const code = `fn F() {
+ var ${t.params.ident} = 1;
+}`;
+ t.expectCompileResult(kValidIdentifiers.has(t.params.ident), code);
+ });
+
+g.test('non_normalized')
+ .desc(`Test that identifiers are not unicode normalized`)
+ .fn(t => {
+ const code = `var<private> \u212b : i32; // \u212b normalizes with NFC to \u00c5
+var<private> \u00c5 : i32;`;
+ t.expectCompileResult(true, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/literal.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/literal.spec.ts
new file mode 100644
index 0000000000..576d646a7b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/literal.spec.ts
@@ -0,0 +1,302 @@
+export const description = `Validation tests for literals`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+g.test('bools')
+ .desc(`Test that valid bools are accepted.`)
+ .params(u => u.combine('val', ['true', 'false']).beginSubcases())
+ .fn(t => {
+ const code = `var test = ${t.params.val};`;
+ t.expectCompileResult(true, t.wrapInEntryPoint(code));
+ });
+
+const kAbstractIntNonNegative = new Set([
+ '0x123', // hex number
+ '123', // signed number, no suffix
+ '0', // zero
+ '0x3f', // hex with 'f' as last character
+ '2147483647', // max signed int
+]);
+
+const kAbstractIntNegative = new Set([
+ '-0x123', // hex number
+ '-123', // signed number, no suffix
+ '-0x3f', // hex with 'f' as last character
+ '-2147483647', // nagative of max signed int
+ '-2147483648', // min signed int
+]);
+
+const kI32 = new Set([
+ '94i', // signed number
+ '2147483647i', // max signed int
+ '-2147483647i', // min parsable signed int
+ 'i32(-2147483648)', // min signed int
+]);
+
+const kU32 = new Set([
+ '42u', // unsigned number
+ '0u', // min unsigned int
+ '4294967295u', // max unsigned int
+]);
+
+{
+ const kValidIntegers = new Set([
+ ...kAbstractIntNonNegative,
+ ...kAbstractIntNegative,
+ ...kI32,
+ ...kU32,
+ ]);
+ const kInvalidIntegers = new Set([
+ '0123', // Integer does not start with zero
+ '2147483648i', // max signed int + 1
+ '-2147483649i', // min signed int - 1
+ '4294967295', // a untyped lhs will be i32, so this is too big
+ '4294967295i', // max unsigned int with i suffix
+ '4294967296u', // max unsigned int + 1
+ '-1u', // negative unsigned
+ ]);
+ g.test('abstract_int')
+ .desc(`Test that valid integers are accepted, and invalid integers are rejected.`)
+ .params(u =>
+ u.combine('val', new Set([...kValidIntegers, ...kInvalidIntegers])).beginSubcases()
+ )
+ .fn(t => {
+ const code = `var test = ${t.params.val};`;
+ t.expectCompileResult(kValidIntegers.has(t.params.val), t.wrapInEntryPoint(code));
+ });
+}
+
+{
+ const kValidI32 = new Set([...kAbstractIntNonNegative, ...kAbstractIntNegative, ...kI32]);
+ const kInvalidI32 = new Set([
+ ...kU32,
+ '2147483648', // max signed int + 1
+ '2147483648i', // max signed int + 1
+ '-2147483649', // min signed int - 1
+ '-2147483649i', // min signed int - 1
+ '1.0', // no conversion from float
+ '1.0f', // no conversion from float
+ '1.0h', // no conversion from float
+ ]);
+ g.test('i32')
+ .desc(`Test that valid signed integers are accepted, and invalid signed integers are rejected.`)
+ .params(u => u.combine('val', new Set([...kValidI32, ...kInvalidI32])).beginSubcases())
+ .beforeAllSubcases(t => {
+ if (t.params.val.includes('h')) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const { val } = t.params;
+ const code = `var test: i32 = ${val};`;
+ const extensionList = val.includes('h') ? ['f16'] : [];
+ t.expectCompileResult(kValidI32.has(val), t.wrapInEntryPoint(code, extensionList));
+ });
+}
+
+{
+ const kValidU32 = new Set([
+ ...kAbstractIntNonNegative,
+ ...kU32,
+ '4294967295', // max unsigned
+ ]);
+ const kInvalidU32 = new Set([
+ ...kAbstractIntNegative,
+ ...kI32,
+ '4294967296', // max unsigned int + 1
+ '4294967296u', // min unsigned int + 1
+ '-1', // min unsigned int - 1
+ '1.0', // no conversion from float
+ '1.0f', // no conversion from float
+ '1.0h', // no conversion from float
+ ]);
+ g.test('u32')
+ .desc(
+ `Test that valid unsigned integers are accepted, and invalid unsigned integers are rejected.`
+ )
+ .params(u => u.combine('val', new Set([...kValidU32, ...kInvalidU32])).beginSubcases())
+ .beforeAllSubcases(t => {
+ if (t.params.val.includes('h')) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const { val } = t.params;
+ const code = `var test: u32 = ${val};`;
+ const extensionList = val.includes('h') ? ['f16'] : [];
+ t.expectCompileResult(kValidU32.has(val), t.wrapInEntryPoint(code, extensionList));
+ });
+}
+
+const kF32 = new Set([
+ '0f', // Zero float
+ '0.0f', // Zero float
+ '12.223f', // float value
+ '12.f', // .f
+ '.12f', // No leading number with a f
+ '2.4e+4f', // Positive exponent with f suffix
+ '2.4e-2f', // Negative exponent with f suffix
+ '2.e+4f', // Exponent without decimals
+ '1e-4f', // Exponennt without decimal point
+ '0x1P+4f', // Hex float no decimal
+]);
+
+const kF16 = new Set([
+ '0h', // Zero half
+ '1h', // Half no decimal
+ '.1h', // Half no leading value
+ '1.1e2h', // Exponent half no sign
+ '1.1E+2h', // Exponent half, plus (uppercase E)
+ '2.4e-2h', // Exponent half, negative
+ '0xep2h', // Hexfloat half lower case p
+ '0xEp-2h', // Hexfloat uppcase hex value
+ '0x3p+2h', // Hex float half positive exponent
+ '0x3.2p+2h', // Hex float with decimal half
+]);
+
+const kAbstractFloat = new Set([
+ '0.0', // Zero float without suffix
+ '.0', // Zero float without leading value
+ '12.', // No decimal points
+ '00012.', // Leading zeros allowed
+ '.12', // No leading digits
+ '1.2e2', // Exponent without sign (lowercase e)
+ '1.2E2', // Exponent without sign (uppercase e)
+ '1.2e+2', // positive exponent
+ '2.4e-2', // Negative exponent
+ '.1e-2', // Exponent without leading number
+ '0x.3', // Hex float, lowercase X
+ '0X.3', // Hex float, uppercase X
+ '0xa.fp+2', // Hex float, lowercase p
+ '0xa.fP+2', // Hex float, uppercase p
+ '0xE.fp+2', // Uppercase E (as hex, but matches non hex exponent char)
+ '0X1.fp-4', // Hex float negative exponent
+]);
+
+{
+ const kValidFloats = new Set([...kF32, ...kF16, ...kAbstractFloat]);
+ const kInvalidFloats = new Set([
+ '.f', // Must have a number
+ '.e-2', // Exponent without leading values
+ '1.e&2f', // Exponent invalid sign
+ '1.ef', // Exponent without value
+ '1.e+f', // Exponent sign no value
+ '0x.p2', // Hex float no value
+ '0x1p', // Hex float missing exponent
+ '0x1p^', // Hex float invalid exponent
+ '1.0e+999999999999f', // Too big
+ '0x1.0p+999999999999f', // Too big hex
+ '0x1.00000001pf0', // Mantissa too big
+ ]);
+ const kInvalidF16s = new Set([
+ '1.1eh', // Missing exponent value
+ '1.1e!2h', // Invalid exponent sign
+ '1.1e+h', // Missing exponent with sign
+ '1.0e+999999h', // Too large
+ '0x1.0p+999999h', // Too large hex
+ '0xf.h', // Having suffix "h" without "p" or "P"
+ '0x3h', // Having suffix "h" without "p" or "P"
+ ]);
+
+ g.test('abstract_float')
+ .desc(`Test that valid floats are accepted, and invalid floats are rejected`)
+ .params(u =>
+ u
+ .combine('val', new Set([...kValidFloats, ...kInvalidFloats, ...kInvalidF16s]))
+ .beginSubcases()
+ )
+ .beforeAllSubcases(t => {
+ if (kF16.has(t.params.val) || kInvalidF16s.has(t.params.val)) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const code = `var test = ${t.params.val};`;
+ const extensionList = kF16.has(t.params.val) || kInvalidF16s.has(t.params.val) ? ['f16'] : [];
+ t.expectCompileResult(
+ kValidFloats.has(t.params.val),
+ t.wrapInEntryPoint(code, extensionList)
+ );
+ });
+}
+
+{
+ const kValidF32 = new Set([
+ ...kF32,
+ ...kAbstractFloat,
+ '1', // AbstractInt
+ '-1', // AbstractInt
+ ]);
+ const kInvalidF32 = new Set([
+ ...kF16, // no conversion
+ '1u', // unsigned
+ '1i', // signed
+ '1h', // half float
+ '.f', // Must have a number
+ '.e-2', // Exponent without leading values
+ '1.e&2f', // Exponent invalid sign
+ '1.ef', // Exponent without value
+ '1.e+f', // Exponent sign no value
+ '0x.p2', // Hex float no value
+ '0x1p', // Hex float missing exponent
+ '0x1p^', // Hex float invalid exponent
+ '1.0e+999999999999f', // Too big
+ '0x1.0p+999999999999f', // Too big hex
+ '0x1.00000001pf0', // Mantissa too big
+ ]);
+
+ g.test('f32')
+ .desc(`Test that valid floats are accepted, and invalid floats are rejected`)
+ .params(u => u.combine('val', new Set([...kValidF32, ...kInvalidF32])).beginSubcases())
+ .beforeAllSubcases(t => {
+ if (kF16.has(t.params.val)) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const { val } = t.params;
+ const code = `var test: f32 = ${val};`;
+ const extensionList = kF16.has(val) ? ['f16'] : [];
+ t.expectCompileResult(kValidF32.has(val), t.wrapInEntryPoint(code, extensionList));
+ });
+}
+
+{
+ const kValidF16 = new Set([
+ ...kF16,
+ ...kAbstractFloat,
+ '1', // AbstractInt
+ '-1', // AbstractInt
+ ]);
+ const kInvalidF16 = new Set([
+ ...kF32,
+ '1i', // signed int
+ '1u', // unsigned int
+ '1f', // no conversion from f32 to f16
+ '1.1eh', // Missing exponent value
+ '1.1e!2h', // Invalid exponent sign
+ '1.1e+h', // Missing exponent with sign
+ '1.0e+999999h', // Too large
+ '0x1.0p+999999h', // Too large hex
+ ]);
+
+ g.test('f16')
+ .desc(
+ `
+Test that valid half floats are accepted, and invalid half floats are rejected
+`
+ )
+ .params(u => u.combine('val', new Set([...kValidF16, ...kInvalidF16])).beginSubcases())
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(t => {
+ const { val } = t.params;
+ const code = `var test: f16 = ${val};`;
+ const extensionList = ['f16'];
+ t.expectCompileResult(kValidF16.has(val), t.wrapInEntryPoint(code, extensionList));
+ });
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/must_use.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/must_use.spec.ts
new file mode 100644
index 0000000000..dd36fabcf6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/must_use.spec.ts
@@ -0,0 +1,269 @@
+export const description = `Validation tests for @must_use`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kMustUseDeclarations = {
+ var: {
+ code: `@must_use @group(0) @binding(0)
+ var<storage> x : array<u32>;`,
+ valid: false,
+ },
+ function_no_return: {
+ code: `@must_use fn foo() { }`,
+ valid: false,
+ },
+ function_scalar_return: {
+ code: `@must_use fn foo() -> u32 { return 0; }`,
+ valid: true,
+ },
+ function_struct_return: {
+ code: `struct S { x : u32 }
+ @must_use fn foo() -> S { return S(); }`,
+ valid: true,
+ },
+ function_var: {
+ code: `fn foo() { @must_use var x = 0; }`,
+ valid: false,
+ },
+ function_call: {
+ code: `fn bar() -> u32 { return 0; }
+ fn foo() { @must_use bar(); }`,
+ valid: false,
+ },
+ function_parameter: {
+ code: `fn foo(@must_use param : u32) -> u32 { return param; }`,
+ valid: false,
+ },
+ empty_parameter: {
+ code: `@must_use() fn foo() -> u32 { return 0; }`,
+ valid: false,
+ },
+ parameter: {
+ code: `@must_use(0) fn foo() -> u32 { return 0; }`,
+ valid: false,
+ },
+};
+
+g.test('declaration')
+ .desc(`Validate attribute can only be applied to a function declaration with a return type`)
+ .params(u => u.combine('test', keysOf(kMustUseDeclarations)))
+ .fn(t => {
+ const test = kMustUseDeclarations[t.params.test];
+ t.expectCompileResult(test.valid, test.code);
+ });
+
+const kMustUseCalls = {
+ phony: `_ = bar();`,
+ let: `let tmp = bar();`,
+ var: `var tmp = bar();`,
+ condition: `if bar() == 0 { }`,
+ param: `baz(bar());`,
+ statement: `bar();`,
+};
+
+g.test('call')
+ .desc(`Validate that a call to must_use function cannot be the whole function call statement`)
+ .params(u => u.combine('use', ['@must_use', ''] as const).combine('call', keysOf(kMustUseCalls)))
+ .fn(t => {
+ const test = kMustUseCalls[t.params.call];
+ const code = `
+ fn baz(param : u32) { }
+ ${t.params.use} fn bar() -> u32 { return 0; }
+ fn foo() {
+ ${test}
+ }`;
+ const res = t.params.call !== 'statement' || t.params.use === '';
+ t.expectCompileResult(res, code);
+ });
+
+const kMustUseBuiltinCalls = {
+ // Type constructors
+ u32: `u32()`,
+ i32: `i32(0)`,
+ struct: `S()`,
+ // Reinterpretation
+ bitcast: `bitcast<f32>(8u)`,
+ // Logical
+ all: `all(vec2<bool>(true))`,
+ any: `any(vec2<bool>(true))`,
+ select: `select(0i, 1i, true)`,
+ // Array
+ arrayLength: `arrayLength(&storage_var)`,
+ // Numeric
+ abs: `abs(0.5)`,
+ acos: `acos(0.5)`,
+ acosh: `acosh(1.0)`,
+ asin: `asin(0.5)`,
+ asinh: `asinh(0.5)`,
+ atan: `atan(0.5)`,
+ atanh: `atanh(0.5)`,
+ atan2: `atan2(0.5, 0.5)`,
+ ceil: `ceil(0.5)`,
+ clamp: `clamp(0.5, 0.1, 1.0)`,
+ cos: `cos(0.5)`,
+ cosh: `cosh(0.5)`,
+ countLeadingZeros: `countLeadingZeros(0)`,
+ countOneBits: `countOneBits(0)`,
+ countTrailingZeros: `countTrailingZeros(0)`,
+ cross: `cross(vec3f(), vec3f())`,
+ degrees: `degrees(0.5)`,
+ determinant: `determinant(mat2x2f())`,
+ distance: `distance(0.5, 0.5)`,
+ dot: `dot(vec2f(0.5, 0.5), vec2f(0.5, 0.5))`,
+ exp: `exp(0.5)`,
+ exp2: `exp2(0.5)`,
+ extractBits: `extractBits(0, 0, 1)`,
+ faceForward: `faceForward(vec2f(), vec2f(), vec2f())`,
+ firstLeadingBit: `firstLeadingBit(0)`,
+ firstTrailingBit: `firstTrailingBit(0)`,
+ floor: `floor(0.5)`,
+ fma: `fma(0.5, 0.5, 0.5)`,
+ fract: `fract(0.5)`,
+ frexp: `frexp(0.5)`,
+ insertBits: `insertBits(0, 0, 0, 1)`,
+ inverseSqrt: `inverseSqrt(0.5)`,
+ ldexp: `ldexp(0.5, 1)`,
+ length: `length(0.5)`,
+ log: `log(0.5)`,
+ log2: `log2(0.5)`,
+ max: `max(0, 0)`,
+ min: `min(0, 0)`,
+ mix: `mix(0.5, 0.5, 0.5)`,
+ modf: `modf(0.5)`,
+ normalize: `normalize(vec2f(0.5, 0.5))`,
+ pow: `pow(0.5, 0.5)`,
+ quantizeToF16: `quantizeToF16(0.5)`,
+ radians: `radians(0.5)`,
+ reflect: `reflect(vec2f(0.5, 0.5), vec2f(0.5, 0.5))`,
+ refract: `refract(vec2f(0.5, 0.5), vec2f(0.5, 0.5), 0.5)`,
+ reverseBits: `reverseBits(0)`,
+ round: `round(0.5)`,
+ saturate: `saturate(0.5)`,
+ sign: `sign(0.5)`,
+ sin: `sin(0.5)`,
+ sinh: `sinh(0.5)`,
+ smoothstep: `smoothstep(0.1, 1.0, 0.5)`,
+ sqrt: `sqrt(0.5)`,
+ step: `step(0.1, 0.5)`,
+ tan: `tan(0.5)`,
+ tanh: `tanh(0.5)`,
+ transpose: `transpose(mat2x2f())`,
+ trunc: `trunc(0.5)`,
+ // Derivative
+ dpdx: `dpdx(0.5)`,
+ dpdxCoarse: `dpdxCoarse(0.5)`,
+ dpdxFine: `dpdxFine(0.5)`,
+ dpdy: `dpdy(0.5)`,
+ dpdyCoarse: `dpdyCoarse(0.5)`,
+ dpdyFine: `dpdyFine(0.5)`,
+ fwidth: `fwidth(0.5)`,
+ fwidthCoarse: `fwidthCoarse(0.5)`,
+ fwidthFine: `fwidthFine(0.5)`,
+ // Texture
+ textureDimensions: `textureDimensions(tex_2d)`,
+ textureGather: `textureGather(0, tex_2d, s, vec2f(0,0))`,
+ textureGatherCompare: `textureGatherCompare(tex_depth_2d, s_comp, vec2f(0,0), 0)`,
+ textureLoad: `textureLoad(tex_2d, vec2i(0,0), 0)`,
+ textureNumLayers: `textureNumLayers(tex_array_2d)`,
+ textureNumLevels: `textureNumLevels(tex_2d)`,
+ textureNumSamples: `textureNumSamples(tex_multi_2d)`,
+ textureSample: `textureSample(tex_2d, s, vec2f(0,0))`,
+ textureSampleBias: `textureSampleBias(tex_2d, s, vec2f(0,0), 0)`,
+ textureSampleCompare: `textureSampleCompare(tex_depth_2d, s_comp, vec2f(0,0), 0)`,
+ textureSampleCompareLevel: `textureSampleCompareLevel(tex_depth_2d, s_comp, vec2f(0,0), 0)`,
+ textureSampleGrad: `textureSampleGrad(tex_2d, s, vec2f(0,0), vec2f(0,0), vec2f(0,0))`,
+ textureSampleLevel: `textureSampleLevel(tex_2d, s, vec2f(0,0), 0)`,
+ textureSampleBaseClampToEdge: `textureSampleBaseClampToEdge(tex_2d, s, vec2f(0,0))`,
+ // Data Packing
+ pack4x8snorm: `pack4x8snorm(vec4f())`,
+ pack4x8unorm: `pack4x8unorm(vec4f())`,
+ pack2x16snorm: `pack2x16snorm(vec2f())`,
+ pack2x16unorm: `pack2x16unorm(vec2f())`,
+ pack2x16float: `pack2x16float(vec2f())`,
+ // Data Unpacking
+ unpack4x8snorm: `unpack4x8snorm(0)`,
+ unpack4x8unorm: `unpack4x8unorm(0)`,
+ unpack2x16snorm: `unpack2x16snorm(0)`,
+ unpack2x16unorm: `unpack2x16unorm(0)`,
+ unpack2x16float: `unpack2x16float(0)`,
+ // Synchronization
+ workgroupUniformLoad: `workgroupUniformLoad(&wg_var)`,
+};
+
+g.test('builtin_must_use')
+ .desc(`Validate must_use built-in functions`)
+ .params(u =>
+ u.combine('call', keysOf(kMustUseBuiltinCalls)).combine('use', [true, false] as const)
+ )
+ .fn(t => {
+ let call = kMustUseBuiltinCalls[t.params.call];
+ if (t.params.use) {
+ call = `_ = ${call}`;
+ }
+ const code = `
+struct S {
+ x : u32
+}
+
+@group(0) @binding(0)
+var<storage> storage_var : array<u32>;
+@group(0) @binding(1)
+var tex_2d : texture_2d<f32>;
+@group(0) @binding(2)
+var s : sampler;
+@group(0) @binding(3)
+var tex_depth_2d : texture_depth_2d;
+@group(0) @binding(4)
+var s_comp : sampler_comparison;
+@group(0) @binding(5)
+var tex_storage_2d : texture_storage_2d<rgba8unorm, write>;
+@group(0) @binding(6)
+var tex_multi_2d : texture_multisampled_2d<f32>;
+@group(0) @binding(7)
+var tex_array_2d : texture_2d_array<f32>;
+
+var<workgroup> wg_var : u32;
+
+fn foo() {
+ ${call};
+}`;
+
+ t.expectCompileResult(t.params.use, code);
+ });
+
+const kNoMustUseBuiltinCalls = {
+ atomicLoad: `atomicLoad(&a)`,
+ atomicAdd: `atomicAdd(&a, 0)`,
+ atomicSub: `atomicSub(&a, 0)`,
+ atomicMax: `atomicMax(&a, 0)`,
+ atomicMin: `atomicMin(&a, 0)`,
+ atomicAnd: `atomicAnd(&a, 0)`,
+ atomicOr: `atomicOr(&a, 0)`,
+ atomicXor: `atomicXor(&a, 0)`,
+ atomicExchange: `atomicExchange(&a, 0)`,
+ atomicCompareExchangeWeak: `atomicCompareExchangeWeak(&a, 0, 0)`,
+};
+
+g.test('builtin_no_must_use')
+ .desc(`Validate built-in functions without must_use`)
+ .params(u =>
+ u.combine('call', keysOf(kNoMustUseBuiltinCalls)).combine('use', [true, false] as const)
+ )
+ .fn(t => {
+ let call = kNoMustUseBuiltinCalls[t.params.call];
+ if (t.params.use) {
+ call = `_ = ${call}`;
+ }
+ const code = `
+var<workgroup> a : atomic<u32>;
+
+fn foo() {
+ ${call};
+}`;
+
+ t.expectCompileResult(true, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/pipeline_stage.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/pipeline_stage.spec.ts
new file mode 100644
index 0000000000..78dcb95782
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/pipeline_stage.spec.ts
@@ -0,0 +1,155 @@
+export const description = `Validation tests for pipeline stage`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValidVertex = new Set(['', '@vertex', '@\tvertex', '@/^comment^/vertex']);
+const kInvalidVertex = new Set(['@mvertex', '@vertex()', '@vertex )', '@vertex(']);
+g.test('vertex_parsing')
+ .desc(`Test that @vertex is parsed correctly.`)
+ .params(u => u.combine('val', new Set([...kValidVertex, ...kInvalidVertex])))
+ .fn(t => {
+ const v = t.params.val.replace(/\^/g, '*');
+ const r = t.params.val !== '' ? '@builtin(position)' : '';
+ const code = `
+${v}
+fn main() -> ${r} vec4<f32> {
+ return vec4<f32>(.4, .2, .3, .1);
+}`;
+ t.expectCompileResult(kValidVertex.has(t.params.val), code);
+ });
+
+const kValidFragment = new Set(['', '@fragment', '@\tfragment', '@/^comment^/fragment']);
+const kInvalidFragment = new Set(['@mfragment', '@fragment()', '@fragment )', '@fragment(']);
+g.test('fragment_parsing')
+ .desc(`Test that @fragment is parsed correctly.`)
+ .params(u => u.combine('val', new Set([...kValidFragment, ...kInvalidFragment])))
+ .fn(t => {
+ const v = t.params.val.replace(/\^/g, '*');
+ const r = t.params.val !== '' ? '@location(0)' : '';
+ const code = `
+${v}
+fn main() -> ${r} vec4<f32> {
+ return vec4<f32>(.4, .2, .3, .1);
+}`;
+ t.expectCompileResult(kValidFragment.has(t.params.val), code);
+ });
+
+const kValidCompute = new Set(['', '@compute', '@\tcompute', '@/^comment^/compute']);
+const kInvalidCompute = new Set(['@mcompute', '@compute()', '@compute )', '@compute(']);
+g.test('compute_parsing')
+ .desc(`Test that @compute is parsed correctly.`)
+ .params(u => u.combine('val', new Set([...kValidCompute, ...kInvalidCompute])))
+ .fn(t => {
+ let v = t.params.val.replace(/\^/g, '*');
+ // Always add a workgroup size unless there is no parameter
+ if (v !== '') {
+ v += '\n@workgroup_size(1)';
+ }
+ const code = `
+${v}
+fn main() {}
+`;
+ t.expectCompileResult(kValidCompute.has(t.params.val), code);
+ });
+
+g.test('multiple_entry_points')
+ .desc(`Test that multiple entry points are allowed.`)
+ .fn(t => {
+ const code = `
+@compute @workgroup_size(1) fn compute_1() {}
+@compute @workgroup_size(1) fn compute_2() {}
+
+@fragment fn frag_1() -> @location(2) vec4f { return vec4f(1); }
+@fragment fn frag_2() -> @location(2) vec4f { return vec4f(1); }
+@fragment fn frag_3() -> @location(2) vec4f { return vec4f(1); }
+
+@vertex fn vtx_1() -> @builtin(position) vec4f { return vec4f(1); }
+@vertex fn vtx_2() -> @builtin(position) vec4f { return vec4f(1); }
+@vertex fn vtx_3() -> @builtin(position) vec4f { return vec4f(1); }
+`;
+ t.expectCompileResult(true, code);
+ });
+
+g.test('duplicate_compute_on_function')
+ .desc(`Test that duplcate @compute attributes are not allowed.`)
+ .params(u => u.combine('dupe', ['', '@compute']))
+ .fn(t => {
+ const code = `
+@compute ${t.params.dupe} @workgroup_size(1) fn compute_1() {}
+`;
+ t.expectCompileResult(t.params.dupe === '', code);
+ });
+
+g.test('duplicate_fragment_on_function')
+ .desc(`Test that duplcate @fragment attributes are not allowed.`)
+ .params(u => u.combine('dupe', ['', '@fragment']))
+ .fn(t => {
+ const code = `
+@fragment ${t.params.dupe} fn vtx() -> @location(0) vec4f { return vec4f(1); }
+`;
+ t.expectCompileResult(t.params.dupe === '', code);
+ });
+
+g.test('duplicate_vertex_on_function')
+ .desc(`Test that duplcate @vertex attributes are not allowed.`)
+ .params(u => u.combine('dupe', ['', '@vertex']))
+ .fn(t => {
+ const code = `
+@vertex ${t.params.dupe} fn vtx() -> @builtin(position) vec4f { return vec4f(1); }
+`;
+ t.expectCompileResult(t.params.dupe === '', code);
+ });
+
+g.test('placement')
+ .desc('Tests the locations @align is allowed to appear')
+ .params(u =>
+ u
+ .combine('scope', [
+ 'private-var',
+ 'storage-var',
+ 'struct-member',
+ 'fn-param',
+ 'fn-var',
+ 'fn-return',
+ 'while-stmt',
+ undefined,
+ ] as const)
+ .combine('attr', ['@compute', '@fragment', '@vertex'])
+ )
+ .fn(t => {
+ const scope = t.params.scope;
+
+ const attr = t.params.attr;
+ const code = `
+ ${scope === 'private-var' ? attr : ''}
+ var<private> priv_var : i32;
+
+ ${scope === 'storage-var' ? attr : ''}
+ @group(0) @binding(0)
+ var<storage> stor_var : i32;
+
+ struct A {
+ ${scope === 'struct-member' ? attr : ''}
+ a : i32,
+ }
+
+ @vertex
+ fn f(
+ ${scope === 'fn-param' ? attr : ''}
+ @location(0) b : i32,
+ ) -> ${scope === 'fn-return' ? attr : ''} @builtin(position) vec4f {
+ ${scope === 'fn-var' ? attr : ''}
+ var<function> func_v : i32;
+
+ ${scope === 'while-stmt' ? attr : ''}
+ while false {}
+
+ return vec4(1, 1, 1, 1);
+ }
+ `;
+
+ t.expectCompileResult(scope === undefined, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/semicolon.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/semicolon.spec.ts
new file mode 100644
index 0000000000..87cffcfafc
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/semicolon.spec.ts
@@ -0,0 +1,269 @@
+export const description = `Validation tests for semicolon placements`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+g.test('module_scope_single')
+ .desc(`Test that a semicolon can be placed at module scope.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `;`);
+ });
+
+g.test('module_scope_multiple')
+ .desc(`Test that multiple semicolons can be placed at module scope.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `;;;`);
+ });
+
+g.test('after_enable')
+ .desc(`Test that a semicolon must be placed after an enable directive.`)
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase({ requiredFeatures: ['shader-f16'] });
+ })
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `enable f16;`);
+ t.expectCompileResult(/* pass */ false, `enable f16`);
+ });
+
+g.test('after_struct_decl')
+ .desc(`Test that a semicolon can be placed after an struct declaration.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `struct S { x : i32 };`);
+ t.expectCompileResult(/* pass */ true, `struct S { x : i32 }`);
+ });
+
+g.test('after_member')
+ .desc(`Test that a semicolon must not be placed after an struct member declaration.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `struct S { x : i32 }`);
+ t.expectCompileResult(/* pass */ false, `struct S { x : i32; }`);
+ });
+
+g.test('after_func_decl')
+ .desc(`Test that a semicolon can be placed after a function declaration.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() {};`);
+ t.expectCompileResult(/* pass */ true, `fn f() {}`);
+ });
+
+g.test('after_type_alias_decl')
+ .desc(`Test that a semicolon must be placed after an type alias declaration.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `alias T = i32;`);
+ t.expectCompileResult(/* pass */ false, `alias T = i32`);
+ });
+
+g.test('after_return')
+ .desc(`Test that a semicolon must be placed after a return statement.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { return; }`);
+ t.expectCompileResult(/* pass */ false, `fn f() { return }`);
+ });
+
+g.test('after_call')
+ .desc(`Test that a semicolon must be placed after a function call.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { workgroupBarrier(); }`);
+ t.expectCompileResult(/* pass */ false, `fn f() { workgroupBarrier() }`);
+ });
+
+g.test('after_module_const_decl')
+ .desc(`Test that a semicolon must be placed after a module-scope const declaration.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `const v = 1;`);
+ t.expectCompileResult(/* pass */ false, `const v = 1`);
+ });
+
+g.test('after_fn_const_decl')
+ .desc(`Test that a semicolon must be placed after a function-scope const declaration.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { const v = 1; }`);
+ t.expectCompileResult(/* pass */ false, `fn f() { const v = 1 }`);
+ });
+
+g.test('after_module_var_decl')
+ .desc(`Test that a semicolon must be placed after a module-scope var declaration.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `var<private> v = 1;`);
+ t.expectCompileResult(/* pass */ false, `var<private> v = 1`);
+ });
+
+g.test('after_fn_var_decl')
+ .desc(`Test that a semicolon must be placed after a function-scope var declaration.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { var v = 1; }`);
+ t.expectCompileResult(/* pass */ false, `fn f() { var v = 1 }`);
+ });
+
+g.test('after_let_decl')
+ .desc(`Test that a semicolon must be placed after a let declaration.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { let v = 1; }`);
+ t.expectCompileResult(/* pass */ false, `fn f() { let v = 1 }`);
+ });
+
+g.test('after_discard')
+ .desc(`Test that a semicolon must be placed after a discard statement.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { discard; }`);
+ t.expectCompileResult(/* pass */ false, `fn f() { discard }`);
+ });
+
+g.test('after_assignment')
+ .desc(`Test that a semicolon must be placed after an assignment statement.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { var v = 1; v = 2; }`);
+ t.expectCompileResult(/* pass */ false, `fn f() { var v = 1; v = 2 }`);
+ });
+
+g.test('after_fn_const_assert')
+ .desc(`Test that a semicolon must be placed after an function-scope static assert.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { const_assert(true); }`);
+ t.expectCompileResult(/* pass */ false, `fn f() { const_assert(true) }`);
+ });
+
+g.test('function_body_single')
+ .desc(`Test that a semicolon can be placed in a function body.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { ; }`);
+ });
+
+g.test('function_body_multiple')
+ .desc(`Test that multiple semicolons can be placed in a function body.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { ;;; }`);
+ });
+
+g.test('compound_statement_single')
+ .desc(`Test that a semicolon can be placed in a compound statement.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { { ; } }`);
+ });
+
+g.test('compound_statement_multiple')
+ .desc(`Test that multiple semicolons can be placed in a compound statement.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { { ;;; } }`);
+ });
+
+g.test('after_compound_statement')
+ .desc(`Test that a semicolon can be placed after a compound statement.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { {} ; }`);
+ });
+
+g.test('after_if')
+ .desc(`Test that a semicolon can be placed after an if-statement.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { if true {} ; }`);
+ });
+
+g.test('after_if_else')
+ .desc(`Test that a semicolon can be placed after an if-else-statement.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { if true {} else {} ; }`);
+ });
+
+g.test('after_switch')
+ .desc(`Test that a semicolon can be placed after an switch-statement.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { switch 1 { default {} } ; }`);
+ });
+
+g.test('after_case')
+ .desc(`Test that a semicolon cannot be placed after a non-default switch case.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ false, `fn f() { switch 1 { case 1 {}; default {} } }`);
+ t.expectCompileResult(/* pass */ true, `fn f() { switch 1 { case 1 {} default {} } }`);
+ });
+
+g.test('after_case_break')
+ .desc(`Test that a semicolon must be placed after a case break statement.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ false, `fn f() { switch 1 { case 1 { break } default {} } }`);
+ t.expectCompileResult(/* pass */ true, `fn f() { switch 1 { case 1 { break; } default {} } }`);
+ });
+
+g.test('after_default_case')
+ .desc(`Test that a semicolon cannot be placed after a default switch case.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ false, `fn f() { switch 1 { default {}; } }`);
+ t.expectCompileResult(/* pass */ true, `fn f() { switch 1 { default {} } }`);
+ });
+
+g.test('after_default_case_break')
+ .desc(`Test that a semicolon cannot be placed after a default switch case.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ false, `fn f() { switch 1 { default { break } } }`);
+ t.expectCompileResult(/* pass */ true, `fn f() { switch 1 { default { break; } } }`);
+ });
+
+g.test('after_for')
+ .desc(`Test that a semicolon can be placed after a for-loop.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { for (; false;) {}; }`);
+ });
+
+g.test('after_for_break')
+ .desc(`Test that a semicolon must be placed after a for-loop break statement.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { for (; false;) { break; } }`);
+ t.expectCompileResult(/* pass */ false, `fn f() { for (; false;) { break } }`);
+ });
+
+g.test('after_loop')
+ .desc(`Test that a semicolon can be placed after a loop.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { loop { break; }; }`);
+ });
+
+g.test('after_loop_break')
+ .desc(`Test that a semicolon must be placed after a loop break statement.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { loop { break; }; }`);
+ t.expectCompileResult(/* pass */ false, `fn f() { loop { break }; }`);
+ });
+
+g.test('after_loop_break_if')
+ .desc(`Test that a semicolon must be placed after a loop break-if statement.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { loop { continuing { break if true; } }; }`);
+ t.expectCompileResult(/* pass */ false, `fn f() { loop { continuing { break if true } }; }`);
+ });
+
+g.test('after_loop_continue')
+ .desc(`Test that a semicolon must be placed after a loop continue statement.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { loop { if true { continue; } { break; } } }`);
+ t.expectCompileResult(/* pass */ false, `fn f() { loop { if true { continue } { break; } } }`);
+ });
+
+g.test('after_continuing')
+ .desc(`Test that a semicolon cannot be placed after a continuing.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ false, `fn f() { loop { break; continuing{}; } }`);
+ t.expectCompileResult(/* pass */ true, `fn f() { loop { break; continuing{} } }`);
+ });
+
+g.test('after_while')
+ .desc(`Test that a semicolon cannot be placed after a while-loop.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { while false {}; }`);
+ });
+
+g.test('after_while_break')
+ .desc(`Test that a semicolon must be placed after a while break statement.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { while false { break; } }`);
+ t.expectCompileResult(/* pass */ false, `fn f() { while false { break } }`);
+ });
+
+g.test('after_while_continue')
+ .desc(`Test that a semicolon must be placed after a while continue statement.`)
+ .fn(t => {
+ t.expectCompileResult(/* pass */ true, `fn f() { while false { continue; } }`);
+ t.expectCompileResult(/* pass */ false, `fn f() { while false { continue } }`);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/source.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/source.spec.ts
new file mode 100644
index 0000000000..40da5d2baf
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/source.spec.ts
@@ -0,0 +1,29 @@
+export const description = `Validation tests for source parsing`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+g.test('valid_source')
+ .desc(`Tests that a valid source is consumed successfully.`)
+ .fn(t => {
+ const code = `
+ @fragment
+ fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(.4, .2, .3, .1);
+ }`;
+ t.expectCompileResult(true, code);
+ });
+
+g.test('empty')
+ .desc(`Test that an empty source is consumed successfully.`)
+ .fn(t => {
+ t.expectCompileResult(true, '');
+ });
+
+g.test('invalid_source')
+ .desc(`Tests that a source which does not match the grammar fails.`)
+ .fn(t => {
+ t.expectCompileResult(false, 'invalid_source');
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/unary_ops.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/unary_ops.spec.ts
new file mode 100644
index 0000000000..d1963864c3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/unary_ops.spec.ts
@@ -0,0 +1,48 @@
+export const description = `Validation tests for unary ops`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kTests = {
+ not_bool_literal: {
+ src: 'let a = !true;',
+ pass: true,
+ },
+ not_bool_expr: {
+ src: `let a = !(1 == 2);`,
+ pass: true,
+ },
+ not_not_bool_literal: {
+ src: 'let a = !!true;',
+ pass: true,
+ },
+ not_not_bool_expr: {
+ src: `let a = !!(1 == 2);`,
+ pass: true,
+ },
+ not_int_literal: {
+ src: `let a = !42;`,
+ pass: false,
+ },
+ not_int_expr: {
+ src: `let a = !(40 + 2);`,
+ pass: false,
+ },
+};
+
+g.test('all')
+ .desc('Test that unary operators are validated correctly')
+ .params(u => u.combine('stmt', keysOf(kTests)))
+ .fn(t => {
+ const code = `
+@vertex
+fn vtx() -> @builtin(position) vec4f {
+ ${kTests[t.params.stmt].src}
+ return vec4f(1);
+}
+ `;
+ t.expectCompileResult(kTests[t.params.stmt].pass, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/var_and_let.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/var_and_let.spec.ts
new file mode 100644
index 0000000000..868e0a0bea
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/parse/var_and_let.spec.ts
@@ -0,0 +1,106 @@
+export const description = `
+Positive and negative validation tests for variable and const.
+
+TODO: Find a better way to test arrays than using a single arbitrary size. [1]
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kTestTypes = [
+ 'f32',
+ 'i32',
+ 'u32',
+ 'bool',
+ 'vec2<f32>',
+ 'vec2<i32>',
+ 'vec2<u32>',
+ 'vec2<bool>',
+ 'vec3<f32>',
+ 'vec3<i32>',
+ 'vec3<u32>',
+ 'vec3<bool>',
+ 'vec4<f32>',
+ 'vec4<i32>',
+ 'vec4<u32>',
+ 'vec4<bool>',
+ 'mat2x2<f32>',
+ 'mat2x3<f32>',
+ 'mat2x4<f32>',
+ 'mat3x2<f32>',
+ 'mat3x3<f32>',
+ 'mat3x4<f32>',
+ 'mat4x2<f32>',
+ 'mat4x3<f32>',
+ 'mat4x4<f32>',
+ // [1]: 12 is a random number here. find a solution to replace it.
+ 'array<f32, 12>',
+ 'array<i32, 12>',
+ 'array<u32, 12>',
+ 'array<bool, 12>',
+] as const;
+
+g.test('initializer_type')
+ .desc(
+ `
+ If present, the initializer's type must match the store type of the variable.
+ Testing scalars, vectors, and matrices of every dimension and type.
+ TODO: add test for: structs - arrays of vectors and matrices - arrays of different length
+`
+ )
+ .params(u =>
+ u
+ .combine('variableOrConstant', ['var', 'let'])
+ .beginSubcases()
+ .combine('lhsType', kTestTypes)
+ .combine('rhsType', kTestTypes)
+ )
+ .fn(t => {
+ const { variableOrConstant, lhsType, rhsType } = t.params;
+
+ const code = `
+ @fragment
+ fn main() {
+ ${variableOrConstant} a : ${lhsType} = ${rhsType}();
+ }
+ `;
+
+ const expectation = lhsType === rhsType;
+ t.expectCompileResult(expectation, code);
+ });
+
+g.test('var_access_mode_bad_other_template_contents')
+ .desc(
+ 'A variable declaration with explicit access mode with varying other template list contents'
+ )
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#var-decls')
+ .params(u =>
+ u
+ .combine('accessMode', ['read', 'read_write'])
+ .combine('prefix', ['storage,', '', ','])
+ .combine('suffix', [',storage', ',read', ',', ''])
+ )
+ .fn(t => {
+ const prog = `@group(0) @binding(0)
+ var<${t.params.prefix}${t.params.accessMode}${t.params.suffix}> x: i32;`;
+ const ok = t.params.prefix === 'storage,' && t.params.suffix === '';
+ t.expectCompileResult(ok, prog);
+ });
+
+g.test('var_access_mode_bad_template_delim')
+ .desc('A variable declaration has explicit access mode with varying template list delimiters')
+ .specURL('https://gpuweb.github.io/gpuweb/wgsl/#var-decls')
+ .params(u =>
+ u
+ .combine('accessMode', ['read', 'read_write'])
+ .combine('prefix', ['', '<', '>', ','])
+ .combine('suffix', ['', '<', '>', ','])
+ )
+ .fn(t => {
+ const prog = `@group(0) @binding(0)
+ var ${t.params.prefix}storage,${t.params.accessMode}${t.params.suffix} x: i32;`;
+ const ok = t.params.prefix === '<' && t.params.suffix === '>';
+ t.expectCompileResult(ok, prog);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/binding.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/binding.spec.ts
new file mode 100644
index 0000000000..2462025016
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/binding.spec.ts
@@ -0,0 +1,140 @@
+export const description = `Validation tests for binding`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kTests = {
+ const_expr: {
+ src: `const z = 5;
+ const y = 2;
+ @binding(z + y)`,
+ pass: true,
+ },
+ override_expr: {
+ src: `override z = 5;
+ @binding(z)`,
+ pass: false,
+ },
+
+ zero: {
+ src: `@binding(0)`,
+ pass: true,
+ },
+ one: {
+ src: `@binding(1)`,
+ pass: true,
+ },
+ comment: {
+ src: `@/* comment */binding(1)`,
+ pass: true,
+ },
+ split_line: {
+ src: '@ \n binding(1)',
+ pass: true,
+ },
+ trailing_comma: {
+ src: `@binding(1,)`,
+ pass: true,
+ },
+ int_literal: {
+ src: `@binding(1i)`,
+ pass: true,
+ },
+ uint_literal: {
+ src: `@binding(1u)`,
+ pass: true,
+ },
+ hex_literal: {
+ src: `@binding(0x1)`,
+ pass: true,
+ },
+
+ negative: {
+ src: `@binding(-1)`,
+ pass: false,
+ },
+ missing_value: {
+ src: `@binding()`,
+ pass: false,
+ },
+ missing_left_paren: {
+ src: `@binding 1)`,
+ pass: false,
+ },
+ missing_right_paren: {
+ src: `@binding(1`,
+ pass: false,
+ },
+ multiple_values: {
+ src: `@binding(1,2)`,
+ pass: false,
+ },
+ f32_val_literal: {
+ src: `@binding(1.0)`,
+ pass: false,
+ },
+ f32_val: {
+ src: `@binding(1f)`,
+ pass: false,
+ },
+ no_params: {
+ src: `@binding`,
+ pass: false,
+ },
+ misspelling: {
+ src: `@abinding(1)`,
+ pass: false,
+ },
+ multi_binding: {
+ src: `@binding(1) @binding(1)`,
+ pass: false,
+ },
+};
+g.test('binding')
+ .desc(`Test validation of binding`)
+ .params(u => u.combine('attr', keysOf(kTests)))
+ .fn(t => {
+ const code = `
+${kTests[t.params.attr].src} @group(1)
+var<storage> a: i32;
+
+@workgroup_size(1, 1, 1)
+@compute fn main() {
+ _ = a;
+}`;
+ t.expectCompileResult(kTests[t.params.attr].pass, code);
+ });
+
+g.test('binding_f16')
+ .desc(`Test validation of binding with f16`)
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(t => {
+ const code = `
+@group(1) @binding(1h)
+var<storage> a: i32;
+
+@workgroup_size(1, 1, 1)
+@compute fn main() {
+ _ = a;
+}`;
+ t.expectCompileResult(false, code);
+ });
+
+g.test('binding_without_group')
+ .desc(`Test validation of binding without group`)
+ .fn(t => {
+ const code = `
+@binding(1)
+var<storage> a: i32;
+
+@workgroup_size(1, 1, 1)
+@compute fn main() {
+ _ = a;
+}`;
+ t.expectCompileResult(false, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/builtins.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/builtins.spec.ts
new file mode 100644
index 0000000000..4b32d05539
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/builtins.spec.ts
@@ -0,0 +1,277 @@
+export const description = `Validation tests for entry point built-in variables`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+import { generateShader } from './util.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+// List of all built-in variables and their stage, in|out usage, and type.
+// Taken from table in Section 15:
+// https://www.w3.org/TR/2021/WD-WGSL-20211013/#builtin-variables
+export const kBuiltins = [
+ { name: 'vertex_index', stage: 'vertex', io: 'in', type: 'u32' },
+ { name: 'instance_index', stage: 'vertex', io: 'in', type: 'u32' },
+ { name: 'position', stage: 'vertex', io: 'out', type: 'vec4<f32>' },
+ { name: 'position', stage: 'fragment', io: 'in', type: 'vec4<f32>' },
+ { name: 'front_facing', stage: 'fragment', io: 'in', type: 'bool' },
+ { name: 'frag_depth', stage: 'fragment', io: 'out', type: 'f32' },
+ { name: 'local_invocation_id', stage: 'compute', io: 'in', type: 'vec3<u32>' },
+ { name: 'local_invocation_index', stage: 'compute', io: 'in', type: 'u32' },
+ { name: 'global_invocation_id', stage: 'compute', io: 'in', type: 'vec3<u32>' },
+ { name: 'workgroup_id', stage: 'compute', io: 'in', type: 'vec3<u32>' },
+ { name: 'num_workgroups', stage: 'compute', io: 'in', type: 'vec3<u32>' },
+ { name: 'sample_index', stage: 'fragment', io: 'in', type: 'u32' },
+ { name: 'sample_mask', stage: 'fragment', io: 'in', type: 'u32' },
+ { name: 'sample_mask', stage: 'fragment', io: 'out', type: 'u32' },
+] as const;
+
+// List of types to test against.
+const kTestTypes = [
+ 'bool',
+ 'u32',
+ 'i32',
+ 'f32',
+ 'vec2<bool>',
+ 'vec2<u32>',
+ 'vec2<i32>',
+ 'vec2<f32>',
+ 'vec3<bool>',
+ 'vec3<u32>',
+ 'vec3<i32>',
+ 'vec3<f32>',
+ 'vec4<bool>',
+ 'vec4<u32>',
+ 'vec4<i32>',
+ 'vec4<f32>',
+ 'mat2x2<f32>',
+ 'mat2x3<f32>',
+ 'mat2x4<f32>',
+ 'mat3x2<f32>',
+ 'mat3x3<f32>',
+ 'mat3x4<f32>',
+ 'mat4x2<f32>',
+ 'mat4x3<f32>',
+ 'mat4x4<f32>',
+ 'atomic<u32>',
+ 'atomic<i32>',
+ 'array<bool,4>',
+ 'array<u32,4>',
+ 'array<i32,4>',
+ 'array<f32,4>',
+ 'MyStruct',
+] as const;
+
+g.test('stage_inout')
+ .desc(
+ `Test that each @builtin attribute is validated against the required stage and in/out usage for that built-in variable.`
+ )
+ .params(u =>
+ u
+ .combineWithParams(kBuiltins)
+ .combine('use_struct', [true, false] as const)
+ .combine('target_stage', ['', 'vertex', 'fragment', 'compute'] as const)
+ .combine('target_io', ['in', 'out'] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ const code = generateShader({
+ attribute: `@builtin(${t.params.name})`,
+ type: t.params.type,
+ stage: t.params.target_stage,
+ io: t.params.target_io,
+ use_struct: t.params.use_struct,
+ });
+
+ // Expect to pass iff the built-in table contains an entry that matches.
+ const expectation = kBuiltins.some(
+ x =>
+ x.name === t.params.name &&
+ (x.stage === t.params.target_stage ||
+ (t.params.use_struct && t.params.target_stage === '')) &&
+ (x.io === t.params.target_io || t.params.target_stage === '') &&
+ x.type === t.params.type
+ );
+ t.expectCompileResult(expectation, code);
+ });
+
+g.test('type')
+ .desc(
+ `Test that each @builtin attribute is validated against the required type of that built-in variable.`
+ )
+ .params(u =>
+ u
+ .combineWithParams(kBuiltins)
+ .combine('use_struct', [true, false] as const)
+ .combine('target_type', kTestTypes)
+ .beginSubcases()
+ )
+ .fn(t => {
+ let code = '';
+
+ if (t.params.target_type === 'MyStruct') {
+ // Generate a struct that contains the correct built-in type.
+ code += 'struct MyStruct {\n';
+ code += ` value : ${t.params.type}\n`;
+ code += '};\n\n';
+ }
+
+ code += generateShader({
+ attribute: `@builtin(${t.params.name})`,
+ type: t.params.target_type,
+ stage: t.params.stage,
+ io: t.params.io,
+ use_struct: t.params.use_struct,
+ });
+
+ // Expect to pass iff the built-in table contains an entry that matches.
+ const expectation = kBuiltins.some(
+ x =>
+ x.name === t.params.name &&
+ x.stage === t.params.stage &&
+ x.io === t.params.io &&
+ x.type === t.params.target_type
+ );
+ t.expectCompileResult(expectation, code);
+ });
+
+g.test('nesting')
+ .desc(`Test validation of nested built-in variables`)
+ .params(u =>
+ u
+ .combine('target_stage', ['fragment', ''] as const)
+ .combine('target_io', ['in', 'out'] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ // Generate a struct that contains a sample_mask builtin, nested inside another struct.
+ let code = `
+ struct Inner {
+ @builtin(sample_mask) value : u32
+ };
+ struct Outer {
+ inner : Inner
+ };`;
+
+ code += generateShader({
+ attribute: '',
+ type: 'Outer',
+ stage: t.params.target_stage,
+ io: t.params.target_io,
+ use_struct: false,
+ });
+
+ // Expect to pass only if the struct is not used for entry point IO.
+ t.expectCompileResult(t.params.target_stage === '', code);
+ });
+
+g.test('duplicates')
+ .desc(`Test that duplicated built-in variables are validated.`)
+ .params(u =>
+ u
+ // Place two @builtin(sample_mask) attributes onto the entry point function.
+ // We use `sample_mask` as it is valid as both an input and output for the same entry point.
+ // The function:
+ // - has two non-struct parameters (`p1` and `p2`)
+ // - has two struct parameters each with two members (`s1{a,b}` and `s2{a,b}`)
+ // - returns a struct with two members (`ra` and `rb`)
+ // By default, all of these variables will have unique @location() attributes.
+ .combine('first', ['p1', 's1a', 's2a', 'ra'] as const)
+ .combine('second', ['p2', 's1b', 's2b', 'rb'] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ const p1 =
+ t.params.first === 'p1' ? '@builtin(sample_mask)' : '@location(1) @interpolate(flat)';
+ const p2 =
+ t.params.second === 'p2' ? '@builtin(sample_mask)' : '@location(2) @interpolate(flat)';
+ const s1a =
+ t.params.first === 's1a' ? '@builtin(sample_mask)' : '@location(3) @interpolate(flat)';
+ const s1b =
+ t.params.second === 's1b' ? '@builtin(sample_mask)' : '@location(4) @interpolate(flat)';
+ const s2a =
+ t.params.first === 's2a' ? '@builtin(sample_mask)' : '@location(5) @interpolate(flat)';
+ const s2b =
+ t.params.second === 's2b' ? '@builtin(sample_mask)' : '@location(6) @interpolate(flat)';
+ const ra =
+ t.params.first === 'ra' ? '@builtin(sample_mask)' : '@location(1) @interpolate(flat)';
+ const rb =
+ t.params.second === 'rb' ? '@builtin(sample_mask)' : '@location(2) @interpolate(flat)';
+ const code = `
+ struct S1 {
+ ${s1a} a : u32,
+ ${s1b} b : u32,
+ };
+ struct S2 {
+ ${s2a} a : u32,
+ ${s2b} b : u32,
+ };
+ struct R {
+ ${ra} a : u32,
+ ${rb} b : u32,
+ };
+ @fragment
+ fn main(${p1} p1 : u32,
+ ${p2} p2 : u32,
+ s1 : S1,
+ s2 : S2,
+ ) -> R {
+ return R();
+ }
+ `;
+
+ // The test should fail if both @builtin(sample_mask) attributes are on the input parameters
+ // or structures, or it they are both on the output struct. Otherwise it should pass.
+ const firstIsRet = t.params.first === 'ra';
+ const secondIsRet = t.params.second === 'rb';
+ const expectation = firstIsRet !== secondIsRet;
+ t.expectCompileResult(expectation, code);
+ });
+
+g.test('missing_vertex_position')
+ .desc(`Test that vertex shaders are required to output @builtin(position).`)
+ .params(u =>
+ u
+ .combine('use_struct', [true, false] as const)
+ .combine('attribute', ['@builtin(position)', '@location(0)'] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ const code = `
+ struct S {
+ ${t.params.attribute} value : vec4<f32>
+ };
+
+ @vertex
+ fn main() -> ${t.params.use_struct ? 'S' : `${t.params.attribute} vec4<f32>`} {
+ return ${t.params.use_struct ? 'S' : 'vec4<f32>'}();
+ }
+ `;
+
+ // Expect to pass only when using @builtin(position).
+ t.expectCompileResult(t.params.attribute === '@builtin(position)', code);
+ });
+
+g.test('reuse_builtin_name')
+ .desc(`Test that a builtin name can be used in different contexts`)
+ .params(u =>
+ u
+ .combineWithParams(kBuiltins)
+ .combine('use', ['alias', 'struct', 'function', 'module-var', 'function-var'])
+ )
+ .fn(t => {
+ let code = '';
+ if (t.params.use === 'alias') {
+ code += `alias ${t.params.name} = i32;`;
+ } else if (t.params.use === `struct`) {
+ code += `struct ${t.params.name} { i: f32, }`;
+ } else if (t.params.use === `function`) {
+ code += `fn ${t.params.name}() {}`;
+ } else if (t.params.use === `module-var`) {
+ code += `const ${t.params.name} = 1;`;
+ } else if (t.params.use === `function-var`) {
+ code += `fn test() { let ${t.params.name} = 1; }`;
+ }
+ t.expectCompileResult(true, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/entry_point.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/entry_point.spec.ts
new file mode 100644
index 0000000000..9aa7319348
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/entry_point.spec.ts
@@ -0,0 +1,141 @@
+export const description = `Validation tests for attributes and entry point requirements`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+g.test('missing_attribute_on_param')
+ .desc(`Test that an entry point without an IO attribute on one of its parameters is rejected.`)
+ .params(u =>
+ u.combine('target_stage', ['', 'vertex', 'fragment', 'compute'] as const).beginSubcases()
+ )
+ .fn(t => {
+ const vertex_attr = t.params.target_stage === 'vertex' ? '' : '@location(1)';
+ const fragment_attr = t.params.target_stage === 'fragment' ? '' : '@location(1)';
+ const compute_attr = t.params.target_stage === 'compute' ? '' : '@builtin(workgroup_id)';
+ const code = `
+@vertex
+fn vert_main(@location(0) a : f32,
+ ${vertex_attr} b : f32,
+@ location(2) c : f32) -> @builtin(position) vec4<f32> {
+ return vec4<f32>();
+}
+
+@fragment
+fn frag_main(@location(0) a : f32,
+ ${fragment_attr} b : f32,
+@ location(2) c : f32) {
+}
+
+@compute @workgroup_size(1)
+fn comp_main(@builtin(global_invocation_id) a : vec3<u32>,
+ ${compute_attr} b : vec3<u32>,
+ @builtin(local_invocation_id) c : vec3<u32>) {
+}
+`;
+ t.expectCompileResult(t.params.target_stage === '', code);
+ });
+
+g.test('missing_attribute_on_param_struct')
+ .desc(
+ `Test that an entry point struct parameter without an IO attribute on one of its members is rejected.`
+ )
+ .params(u =>
+ u.combine('target_stage', ['', 'vertex', 'fragment', 'compute'] as const).beginSubcases()
+ )
+ .fn(t => {
+ const vertex_attr = t.params.target_stage === 'vertex' ? '' : '@location(1)';
+ const fragment_attr = t.params.target_stage === 'fragment' ? '' : '@location(1)';
+ const compute_attr = t.params.target_stage === 'compute' ? '' : '@builtin(workgroup_id)';
+ const code = `
+struct VertexInputs {
+ @location(0) a : f32,
+ ${vertex_attr} b : f32,
+@ location(2) c : f32,
+};
+struct FragmentInputs {
+ @location(0) a : f32,
+ ${fragment_attr} b : f32,
+@ location(2) c : f32,
+};
+struct ComputeInputs {
+ @builtin(global_invocation_id) a : vec3<u32>,
+ ${compute_attr} b : vec3<u32>,
+ @builtin(local_invocation_id) c : vec3<u32>,
+};
+
+@vertex
+fn vert_main(inputs : VertexInputs) -> @builtin(position) vec4<f32> {
+ return vec4<f32>();
+}
+
+@fragment
+fn frag_main(inputs : FragmentInputs) {
+}
+
+@compute @workgroup_size(1)
+fn comp_main(inputs : ComputeInputs) {
+}
+`;
+ t.expectCompileResult(t.params.target_stage === '', code);
+ });
+
+g.test('missing_attribute_on_return_type')
+ .desc(`Test that an entry point without an IO attribute on its return type is rejected.`)
+ .params(u => u.combine('target_stage', ['', 'vertex', 'fragment'] as const).beginSubcases())
+ .fn(t => {
+ const vertex_attr = t.params.target_stage === 'vertex' ? '' : '@builtin(position)';
+ const fragment_attr = t.params.target_stage === 'fragment' ? '' : '@location(0)';
+ const code = `
+@vertex
+fn vert_main() -> ${vertex_attr} vec4<f32> {
+ return vec4<f32>();
+}
+
+@fragment
+fn frag_main() -> ${fragment_attr} vec4<f32> {
+ return vec4<f32>();
+}
+`;
+ t.expectCompileResult(t.params.target_stage === '', code);
+ });
+
+g.test('missing_attribute_on_return_type_struct')
+ .desc(
+ `Test that an entry point struct return type without an IO attribute on one of its members is rejected.`
+ )
+ .params(u => u.combine('target_stage', ['', 'vertex', 'fragment'] as const).beginSubcases())
+ .fn(t => {
+ const vertex_attr = t.params.target_stage === 'vertex' ? '' : '@location(1)';
+ const fragment_attr = t.params.target_stage === 'fragment' ? '' : '@location(1)';
+ const code = `
+struct VertexOutputs {
+ @location(0) a : f32,
+ ${vertex_attr} b : f32,
+ @builtin(position) c : vec4<f32>,
+};
+struct FragmentOutputs {
+ @location(0) a : f32,
+ ${fragment_attr} b : f32,
+@ location(2) c : f32,
+};
+
+@vertex
+fn vert_main() -> VertexOutputs {
+ return VertexOutputs();
+}
+
+@fragment
+fn frag_main() -> FragmentOutputs {
+ return FragmentOutputs();
+}
+`;
+ t.expectCompileResult(t.params.target_stage === '', code);
+ });
+
+g.test('no_entry_point_provided')
+ .desc(`Tests that a shader without an entry point is accepted`)
+ .fn(t => {
+ t.expectCompileResult(true, 'fn main() {}');
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/group.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/group.spec.ts
new file mode 100644
index 0000000000..4d37c43a99
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/group.spec.ts
@@ -0,0 +1,140 @@
+export const description = `Validation tests for group`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kTests = {
+ const_expr: {
+ src: `const z = 5;
+ const y = 2;
+ @group(z + y)`,
+ pass: true,
+ },
+ override_expr: {
+ src: `override z = 5;
+ @group(z)`,
+ pass: false,
+ },
+
+ zero: {
+ src: `@group(0)`,
+ pass: true,
+ },
+ one: {
+ src: `@group(1)`,
+ pass: true,
+ },
+ comment: {
+ src: `@/* comment */group(1)`,
+ pass: true,
+ },
+ split_line: {
+ src: '@ \n group(1)',
+ pass: true,
+ },
+ trailing_comma: {
+ src: `@group(1,)`,
+ pass: true,
+ },
+ int_literal: {
+ src: `@group(1i)`,
+ pass: true,
+ },
+ uint_literal: {
+ src: `@group(1u)`,
+ pass: true,
+ },
+ hex_literal: {
+ src: `@group(0x1)`,
+ pass: true,
+ },
+
+ negative: {
+ src: `@group(-1)`,
+ pass: false,
+ },
+ missing_value: {
+ src: `@group()`,
+ pass: false,
+ },
+ missing_left_paren: {
+ src: `@group 1)`,
+ pass: false,
+ },
+ missing_right_paren: {
+ src: `@group(1`,
+ pass: false,
+ },
+ multiple_values: {
+ src: `@group(1,2)`,
+ pass: false,
+ },
+ f32_val_literal: {
+ src: `@group(1.0)`,
+ pass: false,
+ },
+ f32_val: {
+ src: `@group(1f)`,
+ pass: false,
+ },
+ no_params: {
+ src: `@group`,
+ pass: false,
+ },
+ misspelling: {
+ src: `@agroup(1)`,
+ pass: false,
+ },
+ multi_group: {
+ src: `@group(1) @group(1)`,
+ pass: false,
+ },
+};
+g.test('group')
+ .desc(`Test validation of group`)
+ .params(u => u.combine('attr', keysOf(kTests)))
+ .fn(t => {
+ const code = `
+${kTests[t.params.attr].src} @binding(1)
+var<storage> a: i32;
+
+@workgroup_size(1, 1, 1)
+@compute fn main() {
+ _ = a;
+}`;
+ t.expectCompileResult(kTests[t.params.attr].pass, code);
+ });
+
+g.test('group_f16')
+ .desc(`Test validation of group with f16`)
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(t => {
+ const code = `
+@group(1h) @binding(1)
+var<storage> a: i32;
+
+@workgroup_size(1, 1, 1)
+@compute fn main() {
+ _ = a;
+}`;
+ t.expectCompileResult(false, code);
+ });
+
+g.test('group_without_binding')
+ .desc(`Test validation of group without binding`)
+ .fn(t => {
+ const code = `
+@group(1)
+var<storage> a: i32;
+
+@workgroup_size(1, 1, 1)
+@compute fn main() {
+ _ = a;
+}`;
+ t.expectCompileResult(false, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/group_and_binding.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/group_and_binding.spec.ts
new file mode 100644
index 0000000000..08b4b2738a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/group_and_binding.spec.ts
@@ -0,0 +1,171 @@
+export const description = `Validation tests for group and binding`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+import {
+ declareEntrypoint,
+ kResourceEmitters,
+ kResourceKindsA,
+ kResourceKindsAll,
+ kResourceKindsB,
+ ResourceDeclarationEmitter,
+} from './util.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+g.test('binding_attributes')
+ .desc(`Test that both @group and @binding attributes must both be declared.`)
+ .params(u =>
+ u
+ .combine('stage', ['vertex', 'fragment', 'compute'] as const)
+ .combine('has_group', [true, false] as const)
+ .combine('has_binding', [true, false] as const)
+ .combine('resource', kResourceKindsAll)
+ .beginSubcases()
+ )
+ .fn(t => {
+ const emitter = kResourceEmitters.get(t.params.resource) as ResourceDeclarationEmitter;
+ //const emitter = kResourceEmitters.get('uniform') as ResourceDeclarationEmitter;
+ const code = emitter(
+ 'R',
+ t.params.has_group ? 0 : undefined,
+ t.params.has_binding ? 0 : undefined
+ );
+ const expect = t.params.has_group && t.params.has_binding;
+ t.expectCompileResult(expect, code);
+ });
+
+g.test('private_module_scope')
+ .desc(`Test validation of group and binding on private resources`)
+ .fn(t => {
+ const code = `
+@group(1) @binding(1)
+var<private> a: i32;
+
+@workgroup_size(1, 1, 1)
+@compute fn main() {
+ _ = a;
+}`;
+ t.expectCompileResult(false, code);
+ });
+
+g.test('private_function_scope')
+ .desc(`Test validation of group and binding on function-scope private resources`)
+ .fn(t => {
+ const code = `
+@workgroup_size(1, 1, 1)
+@compute fn main() {
+ @group(1) @binding(1)
+ var<private> a: i32;
+}`;
+ t.expectCompileResult(false, code);
+ });
+
+g.test('function_scope')
+ .desc(`Test validation of group and binding on function-scope private resources`)
+ .fn(t => {
+ const code = `
+@workgroup_size(1, 1, 1)
+@compute fn main() {
+ @group(1) @binding(1)
+ var a: i32;
+}`;
+ t.expectCompileResult(false, code);
+ });
+
+g.test('function_scope_texture')
+ .desc(`Test validation of group and binding on function-scope private resources`)
+ .fn(t => {
+ const code = `
+@workgroup_size(1, 1, 1)
+@compute fn main() {
+ @group(1) @binding(1)
+ var a: texture_2d<f32>;
+}`;
+ t.expectCompileResult(false, code);
+ });
+
+g.test('single_entry_point')
+ .desc(
+ `Test that two different resource variables in a shader must not have the same group and binding values, when considered as a pair.`
+ )
+ .params(u =>
+ u
+ .combine('stage', ['vertex', 'fragment', 'compute'] as const)
+ .combine('a_kind', kResourceKindsA)
+ .combine('b_kind', kResourceKindsB)
+ .combine('a_group', [0, 3] as const)
+ .combine('b_group', [0, 3] as const)
+ .combine('a_binding', [0, 3] as const)
+ .combine('b_binding', [0, 3] as const)
+ .combine('usage', ['direct', 'transitive'] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ const resourceA = kResourceEmitters.get(t.params.a_kind) as ResourceDeclarationEmitter;
+ const resourceB = kResourceEmitters.get(t.params.b_kind) as ResourceDeclarationEmitter;
+ const resources = `
+${resourceA('resource_a', t.params.a_group, t.params.a_binding)}
+${resourceB('resource_b', t.params.b_group, t.params.b_binding)}
+`;
+ const expect =
+ t.params.a_group !== t.params.b_group || t.params.a_binding !== t.params.b_binding;
+
+ if (t.params.usage === 'direct') {
+ const code = `
+${resources}
+${declareEntrypoint('main', t.params.stage, '_ = resource_a; _ = resource_b;')}
+`;
+ t.expectCompileResult(expect, code);
+ } else {
+ const code = `
+${resources}
+fn use_a() { _ = resource_a; }
+fn use_b() { _ = resource_b; }
+${declareEntrypoint('main', t.params.stage, 'use_a(); use_b();')}
+`;
+ t.expectCompileResult(expect, code);
+ }
+ });
+
+g.test('different_entry_points')
+ .desc(
+ `Test that resources may use the same binding points if exclusively accessed by different entry points.`
+ )
+ .params(u =>
+ u
+ .combine('a_stage', ['vertex', 'fragment', 'compute'] as const)
+ .combine('b_stage', ['vertex', 'fragment', 'compute'] as const)
+ .combine('a_kind', kResourceKindsA)
+ .combine('b_kind', kResourceKindsB)
+ .combine('usage', ['direct', 'transitive'] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ const resourceA = kResourceEmitters.get(t.params.a_kind) as ResourceDeclarationEmitter;
+ const resourceB = kResourceEmitters.get(t.params.b_kind) as ResourceDeclarationEmitter;
+ const resources = `
+${resourceA('resource_a', /* group */ 0, /* binding */ 0)}
+${resourceB('resource_b', /* group */ 0, /* binding */ 0)}
+`;
+ const expect = true; // Binding reuse between different entry points is fine.
+
+ if (t.params.usage === 'direct') {
+ const code = `
+${resources}
+${declareEntrypoint('main_a', t.params.a_stage, '_ = resource_a;')}
+${declareEntrypoint('main_b', t.params.b_stage, '_ = resource_b;')}
+`;
+ t.expectCompileResult(expect, code);
+ } else {
+ const code = `
+${resources}
+fn use_a() { _ = resource_a; }
+fn use_b() { _ = resource_b; }
+${declareEntrypoint('main_a', t.params.a_stage, 'use_a();')}
+${declareEntrypoint('main_b', t.params.b_stage, 'use_b();')}
+`;
+ t.expectCompileResult(expect, code);
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/id.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/id.spec.ts
new file mode 100644
index 0000000000..67df4ba875
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/id.spec.ts
@@ -0,0 +1,170 @@
+export const description = `Validation tests for id`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kTests = {
+ zero: {
+ src: `@id(0)`,
+ pass: true,
+ },
+ one: {
+ src: `@id(1)`,
+ pass: true,
+ },
+ hex: {
+ src: `@id(0x1)`,
+ pass: true,
+ },
+ trailing_comma: {
+ src: `@id(1,)`,
+ pass: true,
+ },
+ i32: {
+ src: `@id(1i)`,
+ pass: true,
+ },
+ ui32: {
+ src: `@id(1u)`,
+ pass: true,
+ },
+ largest: {
+ src: `@id(65535)`,
+ pass: true,
+ },
+ newline: {
+ src: '@\nid(1)',
+ pass: true,
+ },
+ comment: {
+ src: `@/* comment */id(1)`,
+ pass: true,
+ },
+ const_expr: {
+ src: `const z = 5;
+ const y = 2;
+ @id(z + y)`,
+ pass: true,
+ },
+
+ misspelling: {
+ src: `@aid(1)`,
+ pass: false,
+ },
+ empty: {
+ src: `@id()`,
+ pass: false,
+ },
+ missing_left_paren: {
+ src: `@id 1)`,
+ pass: false,
+ },
+ missing_right_paren: {
+ src: `@id(1`,
+ pass: false,
+ },
+ multi_value: {
+ src: `@id(1, 2)`,
+ pass: false,
+ },
+ overide_expr: {
+ src: `override z = 5;
+ override y = 2;
+ @id(z + y)`,
+ pass: false,
+ },
+ f32_literal: {
+ src: `@id(1.0)`,
+ pass: false,
+ },
+ f32: {
+ src: `@id(1f)`,
+ pass: false,
+ },
+ negative: {
+ src: `@id(-1)`,
+ pass: false,
+ },
+ too_large: {
+ src: `@id(65536)`,
+ pass: false,
+ },
+ no_params: {
+ src: `@id`,
+ pass: false,
+ },
+ duplicate: {
+ src: `@id(1) @id(1)`,
+ pass: false,
+ },
+};
+
+g.test('id')
+ .desc(`Test validation of id`)
+ .params(u => u.combine('attr', keysOf(kTests)))
+ .fn(t => {
+ const code = `
+${kTests[t.params.attr].src}
+override a = 4;
+
+@workgroup_size(1, 1, 1)
+@compute fn main() {}`;
+ t.expectCompileResult(kTests[t.params.attr].pass, code);
+ });
+
+g.test('id_fp16')
+ .desc(`Test validation of id with fp16`)
+ .params(u => u.combine('ext', ['', 'h']))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(t => {
+ const code = `
+@id(1${t.params.ext})
+override a = 4;
+
+@workgroup_size(1, 1, 1)
+@compute fn main() {}`;
+ t.expectCompileResult(t.params.ext === '', code);
+ });
+
+g.test('id_struct_member')
+ .desc(`Test validation of id with struct member`)
+ .params(u => u.combine('id', ['@id(1) override', '@id(1)', '']))
+ .fn(t => {
+ const code = `
+struct S {
+ ${t.params.id} a: i32,
+}
+
+@workgroup_size(1, 1, 1)
+@compute fn main() {}`;
+ t.expectCompileResult(t.params.id === '', code);
+ });
+
+g.test('id_non_override')
+ .desc(`Test validation of id with non-override`)
+ .params(u => u.combine('type', ['var', 'const', 'override']))
+ .fn(t => {
+ const code = `
+@id(1) ${t.params['type']} a = 4;
+
+@workgroup_size(1, 1, 1)
+@compute fn main() {}`;
+ t.expectCompileResult(t.params['type'] === 'override', code);
+ });
+
+g.test('id_in_function')
+ .desc(`Test validation of id inside a function`)
+ .params(u => u.combine('id', ['@id(1)', '']))
+ .fn(t => {
+ const code = `
+@workgroup_size(1, 1, 1)
+@compute fn main() {
+ ${t.params['id']} var a = 4;
+}`;
+ t.expectCompileResult(t.params['id'] === '', code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/interpolate.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/interpolate.spec.ts
new file mode 100644
index 0000000000..1e18ba0bc7
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/interpolate.spec.ts
@@ -0,0 +1,217 @@
+export const description = `Validation tests for the interpolate attribute`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+import { generateShader } from './util.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+// List of valid interpolation attributes.
+const kValidInterpolationAttributes = new Set([
+ '',
+ '@interpolate(flat)',
+ '@interpolate(perspective)',
+ '@interpolate(perspective, center)',
+ '@interpolate(perspective, centroid)',
+ '@interpolate(perspective, sample)',
+ '@interpolate(linear)',
+ '@interpolate(linear, center)',
+ '@interpolate(linear, centroid)',
+ '@interpolate(linear, sample)',
+]);
+
+g.test('type_and_sampling')
+ .desc(`Test that all combinations of interpolation type and sampling are validated correctly.`)
+ .params(u =>
+ u
+ .combine('stage', ['vertex', 'fragment'] as const)
+ .combine('io', ['in', 'out'] as const)
+ .combine('use_struct', [true, false] as const)
+ .combine('type', [
+ '',
+ 'flat',
+ 'perspective',
+ 'linear',
+ 'center', // Invalid as first param
+ 'centroid', // Invalid as first param
+ 'sample', // Invalid as first param
+ ] as const)
+ .combine('sampling', [
+ '',
+ 'center',
+ 'centroid',
+ 'sample',
+ 'flat', // Invalid as second param
+ 'perspective', // Invalid as second param
+ 'linear', // Invalid as second param
+ ] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ if (t.params.stage === 'vertex' && t.params.use_struct === false) {
+ t.skip('vertex output must include a position builtin, so must use a struct');
+ }
+
+ let interpolate = '';
+ if (t.params.type !== '' || t.params.sampling !== '') {
+ interpolate = '@interpolate(';
+ if (t.params.type !== '') {
+ interpolate += `${t.params.type}`;
+ }
+ if (t.params.sampling !== '') {
+ interpolate += `, ${t.params.sampling}`;
+ }
+ interpolate += `)`;
+ }
+ const code = generateShader({
+ attribute: '@location(0)' + interpolate,
+ type: 'f32',
+ stage: t.params.stage,
+ io: t.params.io,
+ use_struct: t.params.use_struct,
+ });
+
+ t.expectCompileResult(kValidInterpolationAttributes.has(interpolate), code);
+ });
+
+g.test('require_location')
+ .desc(`Test that the interpolate attribute is only accepted with user-defined IO.`)
+ .params(u =>
+ u
+ .combine('stage', ['vertex', 'fragment'] as const)
+ .combine('attribute', ['@location(0)', '@builtin(position)'] as const)
+ .combine('use_struct', [true, false] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ if (
+ t.params.stage === 'vertex' &&
+ t.params.use_struct === false &&
+ !t.params.attribute.includes('position')
+ ) {
+ t.skip('vertex output must include a position builtin, so must use a struct');
+ }
+
+ const code = generateShader({
+ attribute: t.params.attribute + `@interpolate(flat)`,
+ type: 'vec4<f32>',
+ stage: t.params.stage,
+ io: t.params.stage === 'fragment' ? 'in' : 'out',
+ use_struct: t.params.use_struct,
+ });
+ t.expectCompileResult(t.params.attribute === '@location(0)', code);
+ });
+
+g.test('integral_types')
+ .desc(`Test that the implementation requires @interpolate(flat) for integral user-defined IO.`)
+ .params(u =>
+ u
+ .combine('stage', ['vertex', 'fragment'] as const)
+ .combine('type', ['i32', 'u32', 'vec2<i32>', 'vec4<u32>'] as const)
+ .combine('use_struct', [true, false] as const)
+ .combine('attribute', kValidInterpolationAttributes)
+ .beginSubcases()
+ )
+ .fn(t => {
+ if (t.params.stage === 'vertex' && t.params.use_struct === false) {
+ t.skip('vertex output must include a position builtin, so must use a struct');
+ }
+
+ const code = generateShader({
+ attribute: '@location(0)' + t.params.attribute,
+ type: t.params.type,
+ stage: t.params.stage,
+ io: t.params.stage === 'vertex' ? 'out' : 'in',
+ use_struct: t.params.use_struct,
+ });
+
+ t.expectCompileResult(t.params.attribute === '@interpolate(flat)', code);
+ });
+
+g.test('duplicate')
+ .desc(`Test that the interpolate attribute can only be applied once.`)
+ .params(u => u.combine('attr', ['', '@interpolate(flat)'] as const))
+ .fn(t => {
+ const code = generateShader({
+ attribute: `@location(0) @interpolate(flat) ${t.params.attr}`,
+ type: 'vec4<f32>',
+ stage: 'fragment',
+ io: 'in',
+ use_struct: false,
+ });
+ t.expectCompileResult(t.params.attr === '', code);
+ });
+
+const kValidationTests = {
+ valid: {
+ src: `@interpolate(flat)`,
+ pass: true,
+ },
+ no_space: {
+ src: `@interpolate(perspective,center)`,
+ pass: true,
+ },
+ trailing_comma_one_arg: {
+ src: `@interpolate(flat,)`,
+ pass: true,
+ },
+ trailing_comma_two_arg: {
+ src: `@interpolate(perspective, center,)`,
+ pass: true,
+ },
+ newline: {
+ src: '@\ninterpolate(flat)',
+ pass: true,
+ },
+ comment: {
+ src: `@/* comment */interpolate(flat)`,
+ pass: true,
+ },
+
+ no_params: {
+ src: `@interpolate()`,
+ pass: false,
+ },
+ missing_left_paren: {
+ src: `@interpolate flat)`,
+ pass: false,
+ },
+ missing_value_and_left_paren: {
+ src: `@interpolate)`,
+ pass: false,
+ },
+ missing_right_paren: {
+ src: `@interpolate(flat`,
+ pass: false,
+ },
+ missing_parens: {
+ src: `@interpolate`,
+ pass: false,
+ },
+ missing_comma: {
+ src: `@interpolate(perspective center)`,
+ pass: false,
+ },
+ numeric: {
+ src: `@interpolate(1)`,
+ pass: false,
+ },
+ numeric_second_param: {
+ src: `@interpolate(perspective, 1)`,
+ pass: false,
+ },
+};
+
+g.test('interpolation_validation')
+ .desc(`Test validation of interpolation`)
+ .params(u => u.combine('attr', keysOf(kValidationTests)))
+ .fn(t => {
+ const code = `
+@vertex fn main(${kValidationTests[t.params.attr].src} @location(0) b: f32) ->
+ @builtin(position) vec4<f32> {
+ return vec4f(0);
+}`;
+ t.expectCompileResult(kValidationTests[t.params.attr].pass, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/invariant.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/invariant.spec.ts
new file mode 100644
index 0000000000..64ab91fa34
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/invariant.spec.ts
@@ -0,0 +1,99 @@
+export const description = `Validation tests for the invariant attribute`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+import { kBuiltins } from './builtins.spec.js';
+import { generateShader } from './util.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kTests = {
+ invariant: {
+ src: `@invariant`,
+ pass: true,
+ },
+ comment: {
+ src: `@/* comment */invariant`,
+ pass: true,
+ },
+ split_line: {
+ src: '@\ninvariant',
+ pass: true,
+ },
+ empty_parens: {
+ src: `@invariant()`,
+ pass: false,
+ },
+ value: {
+ src: `@invariant(0)`,
+ pass: false,
+ },
+ missing_right_paren: {
+ src: `@invariant(`,
+ pass: false,
+ },
+ missing_left_paren: {
+ src: `@invariant)`,
+ pass: false,
+ },
+ duplicate: {
+ src: `@invariant @invariant`,
+ pass: false,
+ },
+};
+
+g.test('parsing')
+ .desc(`Test parsing of the invariant attribute`)
+ .params(u => u.combine('attr', keysOf(kTests)))
+ .fn(t => {
+ const code = `
+ struct VertexOut {
+ @builtin(position) ${kTests[t.params.attr].src} position : vec4<f32>
+ };
+ @vertex
+ fn main() -> VertexOut {
+ return VertexOut();
+ }
+ `;
+ t.expectCompileResult(kTests[t.params.attr].pass, code);
+ });
+
+g.test('valid_only_with_vertex_position_builtin')
+ .desc(`Test that the invariant attribute is only accepted with the vertex position builtin`)
+ .params(u =>
+ u
+ .combineWithParams(kBuiltins)
+ .combine('use_struct', [true, false] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ const code = generateShader({
+ attribute: `@builtin(${t.params.name}) @invariant`,
+ type: t.params.type,
+ stage: t.params.stage,
+ io: t.params.io,
+ use_struct: t.params.use_struct,
+ });
+
+ t.expectCompileResult(t.params.name === 'position', code);
+ });
+
+g.test('not_valid_on_user_defined_io')
+ .desc(`Test that the invariant attribute is not accepted on user-defined IO attributes.`)
+ .params(u => u.combine('use_invariant', [true, false] as const).beginSubcases())
+ .fn(t => {
+ const invariant = t.params.use_invariant ? '@invariant' : '';
+ const code = `
+ struct VertexOut {
+ @location(0) ${invariant} loc0 : vec4<f32>,
+ @builtin(position) position : vec4<f32>,
+ };
+ @vertex
+ fn main() -> VertexOut {
+ return VertexOut();
+ }
+ `;
+ t.expectCompileResult(!t.params.use_invariant, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/locations.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/locations.spec.ts
new file mode 100644
index 0000000000..8452679d71
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/locations.spec.ts
@@ -0,0 +1,382 @@
+export const description = `Validation tests for entry point user-defined IO`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+import { generateShader } from './util.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kValidLocationTypes = new Set([
+ 'f16',
+ 'f32',
+ 'i32',
+ 'u32',
+ 'vec2<f32>',
+ 'vec2<i32>',
+ 'vec2<u32>',
+ 'vec3<f32>',
+ 'vec3<i32>',
+ 'vec3<u32>',
+ 'vec4<f32>',
+ 'vec4<i32>',
+ 'vec4<u32>',
+ 'vec2h',
+ 'vec2f',
+ 'vec2i',
+ 'vec2u',
+ 'vec3h',
+ 'vec3f',
+ 'vec3i',
+ 'vec3u',
+ 'vec4h',
+ 'vec4f',
+ 'vec4i',
+ 'vec4u',
+ 'MyAlias',
+]);
+
+const kInvalidLocationTypes = new Set([
+ 'bool',
+ 'vec2<bool>',
+ 'vec3<bool>',
+ 'vec4<bool>',
+ 'mat2x2<f32>',
+ 'mat2x3<f32>',
+ 'mat2x4<f32>',
+ 'mat3x2<f32>',
+ 'mat3x3<f32>',
+ 'mat3x4<f32>',
+ 'mat4x2<f32>',
+ 'mat4x3<f32>',
+ 'mat4x4<f32>',
+ 'mat2x2f',
+ 'mat2x3f',
+ 'mat2x4f',
+ 'mat3x2f',
+ 'mat3x3f',
+ 'mat3x4f',
+ 'mat4x2f',
+ 'mat4x3f',
+ 'mat4x4f',
+ 'mat2x2h',
+ 'mat2x3h',
+ 'mat2x4h',
+ 'mat3x2h',
+ 'mat3x3h',
+ 'mat3x4h',
+ 'mat4x2h',
+ 'mat4x3h',
+ 'mat4x4h',
+ 'array<f32, 12>',
+ 'array<i32, 12>',
+ 'array<u32, 12>',
+ 'array<bool, 12>',
+ 'atomic<i32>',
+ 'atomic<u32>',
+ 'MyStruct',
+ 'texture_1d<i32>',
+ 'texture_2d<f32>',
+ 'texture_2d_array<i32>',
+ 'texture_3d<f32>',
+ 'texture_cube<u32>',
+ 'texture_cube_array<i32>',
+ 'texture_multisampled_2d<i32>',
+ 'texture_external',
+ 'texture_storage_1d<rgba8unorm, write>',
+ 'texture_storage_2d<rg32float, write>',
+ 'texture_storage_2d_array<r32float, write>',
+ 'texture_storage_3d<r32float, write>',
+ 'texture_depth_2d',
+ 'texture_depth_2d_array',
+ 'texture_depth_cube',
+ 'texture_depth_cube_array',
+ 'texture_depth_multisampled_2d',
+ 'sampler',
+ 'sampler_comparison',
+]);
+
+g.test('stage_inout')
+ .desc(`Test validation of user-defined IO stage and in/out usage`)
+ .params(u =>
+ u
+ .combine('use_struct', [true, false] as const)
+ .combine('target_stage', ['vertex', 'fragment', 'compute'] as const)
+ .combine('target_io', ['in', 'out'] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ const code = generateShader({
+ attribute: '@location(0)',
+ type: 'f32',
+ stage: t.params.target_stage,
+ io: t.params.target_io,
+ use_struct: t.params.use_struct,
+ });
+
+ // Expect to fail for compute shaders or when used as a non-struct vertex output (since the
+ // position built-in must also be specified).
+ const expectation =
+ t.params.target_stage === 'fragment' ||
+ (t.params.target_stage === 'vertex' && (t.params.target_io === 'in' || t.params.use_struct));
+ t.expectCompileResult(expectation, code);
+ });
+
+g.test('type')
+ .desc(`Test validation of user-defined IO types`)
+ .params(u =>
+ u
+ .combine('use_struct', [true, false] as const)
+ .combine('type', new Set([...kValidLocationTypes, ...kInvalidLocationTypes]))
+ .beginSubcases()
+ )
+ .beforeAllSubcases(t => {
+ if (
+ t.params.type === 'f16' ||
+ ((t.params.type.startsWith('mat') || t.params.type.startsWith('vec')) &&
+ t.params.type.endsWith('h'))
+ ) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ let code = '';
+
+ if (
+ t.params.type === 'f16' ||
+ ((t.params.type.startsWith('mat') || t.params.type.startsWith('vec')) &&
+ t.params.type.endsWith('h'))
+ ) {
+ code += 'enable f16;\n';
+ }
+
+ if (t.params.type === 'MyStruct') {
+ // Generate a struct that contains a valid type.
+ code += `struct MyStruct {
+ value : f32,
+ }
+ `;
+ }
+ if (t.params.type === 'MyAlias') {
+ code += 'alias MyAlias = i32;\n';
+ }
+
+ code += generateShader({
+ attribute: '@location(0) @interpolate(flat)',
+ type: t.params.type,
+ stage: 'fragment',
+ io: 'in',
+ use_struct: t.params.use_struct,
+ });
+
+ t.expectCompileResult(kValidLocationTypes.has(t.params.type), code);
+ });
+
+g.test('nesting')
+ .desc(`Test validation of nested user-defined IO`)
+ .params(u =>
+ u
+ .combine('target_stage', ['vertex', 'fragment', ''] as const)
+ .combine('target_io', ['in', 'out'] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ let code = '';
+
+ // Generate a struct that contains a valid type.
+ code += `struct Inner {
+ @location(0) value : f32,
+ }
+ struct Outer {
+ inner : Inner,
+ }
+ `;
+
+ code += generateShader({
+ attribute: '',
+ type: 'Outer',
+ stage: t.params.target_stage,
+ io: t.params.target_io,
+ use_struct: false,
+ });
+
+ // Expect to pass only if the struct is not used for entry point IO.
+ t.expectCompileResult(t.params.target_stage === '', code);
+ });
+
+g.test('duplicates')
+ .desc(`Test that duplicated user-defined IO attributes are validated.`)
+ .params(u =>
+ u
+ // Place two @location(0) attributes onto the entry point function.
+ // The function:
+ // - has two non-struct parameters (`p1` and `p2`)
+ // - has two struct parameters each with two members (`s1{a,b}` and `s2{a,b}`)
+ // - returns a struct with two members (`ra` and `rb`)
+ // By default, all of these user-defined IO variables will have unique location attributes.
+ .combine('first', ['p1', 's1a', 's2a', 'ra'] as const)
+ .combine('second', ['p2', 's1b', 's2b', 'rb'] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ const p1 = t.params.first === 'p1' ? '0' : '1';
+ const p2 = t.params.second === 'p2' ? '0' : '2';
+ const s1a = t.params.first === 's1a' ? '0' : '3';
+ const s1b = t.params.second === 's1b' ? '0' : '4';
+ const s2a = t.params.first === 's2a' ? '0' : '5';
+ const s2b = t.params.second === 's2b' ? '0' : '6';
+ const ra = t.params.first === 'ra' ? '0' : '1';
+ const rb = t.params.second === 'rb' ? '0' : '2';
+ const code = `
+ struct S1 {
+ @location(${s1a}) a : f32,
+ @location(${s1b}) b : f32,
+ };
+ struct S2 {
+ @location(${s2a}) a : f32,
+ @location(${s2b}) b : f32,
+ };
+ struct R {
+ @location(${ra}) a : f32,
+ @location(${rb}) b : f32,
+ };
+ @fragment
+ fn main(@location(${p1}) p1 : f32,
+ @location(${p2}) p2 : f32,
+ s1 : S1,
+ s2 : S2,
+ ) -> R {
+ return R();
+ }
+ `;
+
+ // The test should fail if both @location(0) attributes are on the input parameters or
+ // structures, or it they are both on the output struct. Otherwise it should pass.
+ const firstIsRet = t.params.first === 'ra';
+ const secondIsRet = t.params.second === 'rb';
+ const expectation = firstIsRet !== secondIsRet;
+ t.expectCompileResult(expectation, code);
+ });
+
+const kValidationTests = {
+ zero: {
+ src: `@location(0)`,
+ pass: true,
+ },
+ one: {
+ src: `@location(1)`,
+ pass: true,
+ },
+ extra_comma: {
+ src: `@location(1,)`,
+ pass: true,
+ },
+ i32: {
+ src: `@location(1i)`,
+ pass: true,
+ },
+ u32: {
+ src: `@location(1u)`,
+ pass: true,
+ },
+ hex: {
+ src: `@location(0x1)`,
+ pass: true,
+ },
+ const_expr: {
+ src: `@location(a + b)`,
+ pass: true,
+ },
+ max: {
+ src: `@location(2147483647)`,
+ pass: true,
+ },
+ newline: {
+ src: '@\nlocation(1)',
+ pass: true,
+ },
+ comment: {
+ src: `@/* comment */location(1)`,
+ pass: true,
+ },
+
+ misspelling: {
+ src: `@mlocation(1)`,
+ pass: false,
+ },
+ no_parens: {
+ src: `@location`,
+ pass: false,
+ },
+ empty_params: {
+ src: `@location()`,
+ pass: false,
+ },
+ missing_left_paren: {
+ src: `@location 1)`,
+ pass: false,
+ },
+ missing_right_paren: {
+ src: `@location(1`,
+ pass: false,
+ },
+ extra_params: {
+ src: `@location(1, 2)`,
+ pass: false,
+ },
+ f32: {
+ src: `@location(1f)`,
+ pass: false,
+ },
+ f32_literal: {
+ src: `@location(1.0)`,
+ pass: false,
+ },
+ negative: {
+ src: `@location(-1)`,
+ pass: false,
+ },
+ override_expr: {
+ src: `@location(z + y)`,
+ pass: false,
+ },
+ vec: {
+ src: `@location(vec2(1,1))`,
+ pass: false,
+ },
+};
+g.test('validation')
+ .desc(`Test validation of location`)
+ .params(u => u.combine('attr', keysOf(kValidationTests)))
+ .fn(t => {
+ const code = `
+const a = 5;
+const b = 6;
+override z = 7;
+override y = 8;
+
+@vertex fn main(
+ ${kValidationTests[t.params.attr].src} res: f32
+) -> @builtin(position) vec4f {
+ return vec4f(0);
+}`;
+ t.expectCompileResult(kValidationTests[t.params.attr].pass, code);
+ });
+
+g.test('location_fp16')
+ .desc(`Test validation of location with fp16`)
+ .params(u => u.combine('ext', ['', 'h']))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(t => {
+ const code = `
+
+@vertex fn main(
+ @location(1${t.params.ext}) res: f32
+) -> @builtin(position) vec4f {
+ return vec4f();
+}`;
+ t.expectCompileResult(t.params.ext === '', code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/size.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/size.spec.ts
new file mode 100644
index 0000000000..f81dde4a1d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/size.spec.ts
@@ -0,0 +1,212 @@
+export const description = `Validation tests for size`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kSizeTests = {
+ valid: {
+ src: `@size(4)`,
+ pass: true,
+ },
+ non_align_size: {
+ src: `@size(5)`,
+ pass: true,
+ },
+ i32: {
+ src: `@size(4i)`,
+ pass: true,
+ },
+ u32: {
+ src: `@size(4u)`,
+ pass: true,
+ },
+ constant: {
+ src: `@size(z)`,
+ pass: true,
+ },
+ trailing_comma: {
+ src: `@size(4,)`,
+ pass: true,
+ },
+ hex: {
+ src: `@size(0x4)`,
+ pass: true,
+ },
+ whitespace: {
+ src: '@\nsize(4)',
+ pass: true,
+ },
+ comment: {
+ src: `@/* comment */size(4)`,
+ pass: true,
+ },
+ large: {
+ src: `@size(2147483647)`,
+ pass: true,
+ },
+
+ misspelling: {
+ src: `@msize(4)`,
+ pass: false,
+ },
+ no_value: {
+ src: `@size()`,
+ pass: false,
+ },
+ missing_left_paren: {
+ src: `@size 4)`,
+ pass: false,
+ },
+ missing_right_paren: {
+ src: `@size(4`,
+ pass: false,
+ },
+ missing_parens: {
+ src: `@size`,
+ pass: false,
+ },
+ multiple_values: {
+ src: `@size(4, 8)`,
+ pass: false,
+ },
+ override: {
+ src: `@size(over)`,
+ pass: false,
+ },
+ zero: {
+ src: `@size(0)`,
+ pass: false,
+ },
+ negative: {
+ src: `@size(-4)`,
+ pass: false,
+ },
+ f32_literal: {
+ src: `@size(4.0)`,
+ pass: false,
+ },
+ f32: {
+ src: `@size(4f)`,
+ pass: false,
+ },
+ duplicate: {
+ src: `@size(4) @size(8)`,
+ pass: false,
+ },
+ too_small: {
+ src: `@size(1)`,
+ pass: false,
+ },
+};
+
+g.test('size')
+ .desc(`Test validation of ize`)
+ .params(u => u.combine('attr', keysOf(kSizeTests)))
+ .fn(t => {
+ const code = `
+override over: i32 = 4;
+const z: i32 = 4;
+
+struct S {
+ ${kSizeTests[t.params.attr].src} a: f32,
+};
+@group(0) @binding(0)
+var<storage> a: S;
+
+@workgroup_size(1)
+@compute fn main() {
+ _ = a;
+}`;
+ t.expectCompileResult(kSizeTests[t.params.attr].pass, code);
+ });
+
+g.test('size_fp16')
+ .desc(`Test validation of size with fp16`)
+ .params(u => u.combine('ext', ['', 'h']))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(t => {
+ const code = `
+struct S {
+ @size(4${t.params.ext}) a: f32,
+}
+@group(0) @binding(0)
+var<storage> a: S;
+
+@workgroup_size(1)
+@compute fn main() {
+ _ = a;
+}`;
+ t.expectCompileResult(t.params.ext === '', code);
+ });
+
+const kNonStructTests = {
+ control: {
+ mod_src: ``,
+ func_src: ``,
+ size: 0,
+ pass: true,
+ },
+ struct: {
+ mod_src: `struct S { a: f32 }`,
+ func_src: ``,
+ size: 4,
+ pass: false,
+ },
+ constant: {
+ mod_src: `const a: f32 = 4.0;`,
+ func_src: ``,
+ size: 4,
+ pass: false,
+ },
+ vec: {
+ mod_src: ``,
+ func_src: `vec4<f32>`,
+ size: 16,
+ pass: false,
+ },
+ mat: {
+ mod_src: ``,
+ func_src: `mat4x4<f32>`,
+ size: 64,
+ pass: false,
+ },
+ array: {
+ mod_src: ``,
+ func_src: `array<f32, 4>`,
+ size: 16,
+ pass: false,
+ },
+ scalar: {
+ mod_src: ``,
+ func_src: `f32`,
+ size: 4,
+ pass: false,
+ },
+};
+
+g.test('size_non_struct')
+ .desc(`Test validation of size outside of a struct`)
+ .params(u => u.combine('attr', keysOf(kNonStructTests)))
+ .fn(t => {
+ const data = kNonStructTests[t.params.attr];
+ let code = '';
+ if (data.mod_src !== '') {
+ code += `@size(${data.size}) ${data.mod_src}`;
+ }
+
+ code += `
+@workgroup_size(1)
+@compute fn main() {
+`;
+ if (data.func_src !== '') {
+ code += `@size(${data.size}) var a: ${data.func_src};`;
+ }
+ code += '}';
+
+ t.expectCompileResult(data.pass, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/util.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/util.ts
new file mode 100644
index 0000000000..20610753e6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/util.ts
@@ -0,0 +1,196 @@
+/**
+ * Generate an entry point that uses an entry point IO variable.
+ *
+ * @param {Object} params
+ * @param params.attribute The entry point IO attribute.
+ * @param params.type The type to use for the entry point IO variable.
+ * @param params.stage The shader stage.
+ * @param params.io An "in|out" string specifying whether the entry point IO is an input or an output.
+ * @param params.use_struct True to wrap the entry point IO in a struct.
+ * @returns The generated shader code.
+ */
+export function generateShader({
+ attribute,
+ type,
+ stage,
+ io,
+ use_struct,
+}: {
+ attribute: string;
+ type: string;
+ stage: string;
+ io: string;
+ use_struct: boolean;
+}) {
+ let code = '';
+
+ if (use_struct) {
+ // Generate a struct that wraps the entry point IO variable.
+ code += 'struct S {\n';
+ code += ` ${attribute} value : ${type},\n`;
+ if (stage === 'vertex' && io === 'out' && !attribute.includes('builtin(position)')) {
+ // Add position builtin for vertex outputs.
+ code += ` @builtin(position) position : vec4<f32>,\n`;
+ }
+ code += '};\n\n';
+ }
+
+ if (stage !== '') {
+ // Generate the entry point attributes.
+ code += `@${stage}`;
+ if (stage === 'compute') {
+ code += ' @workgroup_size(1)';
+ }
+ }
+
+ // Generate the entry point parameter and return type.
+ let param = '';
+ let retType = '';
+ let retVal = '';
+ if (io === 'in') {
+ if (use_struct) {
+ param = `in : S`;
+ } else {
+ param = `${attribute} value : ${type}`;
+ }
+
+ // Vertex shaders must always return `@builtin(position)`.
+ if (stage === 'vertex') {
+ retType = `-> @builtin(position) vec4<f32>`;
+ retVal = `return vec4<f32>();`;
+ }
+ } else if (io === 'out') {
+ if (use_struct) {
+ retType = '-> S';
+ retVal = `return S();`;
+ } else {
+ retType = `-> ${attribute} ${type}`;
+ retVal = `return ${type}();`;
+ }
+ }
+
+ code += `
+ fn main(${param}) ${retType} {
+ ${retVal}
+ }
+ `;
+
+ return code;
+}
+
+/**
+ * ResourceDeclarationEmitter is a function that emits the WGSL declaring a resource variable with
+ * the given group, binding and name.
+ */
+export type ResourceDeclarationEmitter = (name: string, group?: number, binding?: number) => string;
+
+/** Helper function for emitting a resource declaration's group and binding attributes */
+function groupAndBinding(group?: number, binding?: number): string {
+ return (
+ `${group !== undefined ? `@group(${group})` : '/* no group */'} ` +
+ `${binding !== undefined ? `@binding(${binding})` : '/* no binding */'}`
+ );
+}
+
+/** Helper function for emitting a resource declaration for the given type */
+function basicEmitter(type: string): ResourceDeclarationEmitter {
+ return (name: string, group?: number, binding?: number) =>
+ `${groupAndBinding(group, binding)} var ${name} : ${type};\n`;
+}
+
+/** Map of resource declaration name, to an emitter. */
+export const kResourceEmitters = new Map<string, ResourceDeclarationEmitter>([
+ ['texture_1d', basicEmitter('texture_1d<i32>')],
+ ['texture_2d', basicEmitter('texture_2d<i32>')],
+ ['texture_2d_array', basicEmitter('texture_2d_array<f32>')],
+ ['texture_3d', basicEmitter('texture_3d<i32>')],
+ ['texture_cube', basicEmitter('texture_cube<u32>')],
+ ['texture_cube_array', basicEmitter('texture_cube_array<u32>')],
+ ['texture_multisampled_2d', basicEmitter('texture_multisampled_2d<i32>')],
+ ['texture_external', basicEmitter('texture_external')],
+ ['texture_storage_1d', basicEmitter('texture_storage_1d<rgba8unorm, write>')],
+ ['texture_storage_2d', basicEmitter('texture_storage_2d<rgba8sint, write>')],
+ ['texture_storage_2d_array', basicEmitter('texture_storage_2d_array<r32uint, write>')],
+ ['texture_storage_3d', basicEmitter('texture_storage_3d<rg32uint, write>')],
+ ['texture_depth_2d', basicEmitter('texture_depth_2d')],
+ ['texture_depth_2d_array', basicEmitter('texture_depth_2d_array')],
+ ['texture_depth_cube', basicEmitter('texture_depth_cube')],
+ ['texture_depth_cube_array', basicEmitter('texture_depth_cube_array')],
+ ['texture_depth_multisampled_2d', basicEmitter('texture_depth_multisampled_2d')],
+ ['sampler', basicEmitter('sampler')],
+ ['sampler_comparison', basicEmitter('sampler_comparison')],
+ [
+ 'uniform',
+ (name: string, group?: number, binding?: number) =>
+ `${groupAndBinding(group, binding)} var<uniform> ${name} : array<vec4<f32>, 16>;\n`,
+ ],
+ [
+ 'storage',
+ (name: string, group?: number, binding?: number) =>
+ `${groupAndBinding(group, binding)} var<storage> ${name} : array<vec4<f32>, 16>;\n`,
+ ],
+]);
+
+/** All possible resource types for use as test permutations. */
+export const kResourceKindsAll = [
+ 'texture_1d',
+ 'texture_2d',
+ 'texture_2d_array',
+ 'texture_3d',
+ 'texture_cube',
+ 'texture_cube_array',
+ 'texture_multisampled_2d',
+ 'texture_external',
+ 'texture_storage_1d',
+ 'texture_storage_2d',
+ 'texture_storage_2d_array',
+ 'texture_storage_3d',
+ 'texture_depth_2d',
+ 'texture_depth_2d_array',
+ 'texture_depth_cube',
+ 'texture_depth_cube_array',
+ 'texture_depth_multisampled_2d',
+ 'sampler',
+ 'sampler_comparison',
+ 'uniform',
+ 'storage',
+];
+
+/** A small selection of resource declaration names, which can be used in test permutations */
+export const kResourceKindsA = ['storage', 'texture_2d', 'texture_external', 'uniform'];
+
+/** A small selection of resource declaration names, which can be used in test permutations */
+export const kResourceKindsB = ['texture_3d', 'texture_storage_1d', 'uniform'];
+
+/** An enumerator of shader stages */
+export type ShaderStage = 'vertex' | 'fragment' | 'compute';
+
+/**
+ * declareEntrypoint emits the WGSL to declare an entry point with the given name, stage and body.
+ * The generated function will have an appropriate return type and return statement, so that `body`
+ * does not have to change between stage.
+ * @param name the entry point function name
+ * @param stage the entry point stage
+ * @param body the body of the function (excluding any automatically suffixed return statements)
+ * @returns the WGSL string for the entry point
+ */
+export function declareEntrypoint(name: string, stage: ShaderStage, body: string): string {
+ switch (stage) {
+ case 'vertex':
+ return `@vertex
+fn ${name}() -> @builtin(position) vec4f {
+ ${body}
+ return vec4f();
+}`;
+ case 'fragment':
+ return `@fragment
+fn ${name}() {
+ ${body}
+}`;
+ case 'compute':
+ return `@compute @workgroup_size(1)
+fn ${name}() {
+ ${body}
+}`;
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/workgroup_size.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/workgroup_size.spec.ts
new file mode 100644
index 0000000000..49d2088817
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_io/workgroup_size.spec.ts
@@ -0,0 +1,300 @@
+export const description = `Validation tests for workgroup_size`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kWorkgroupSizeTests = {
+ x_only_float: {
+ src: `@workgroup_size(8f)`,
+ pass: false,
+ },
+ xy_only_float: {
+ src: `@workgroup_size(8, 8f)`,
+ pass: false,
+ },
+ xyz_float: {
+ src: `@workgroup_size(8, 8, 8f)`,
+ pass: false,
+ },
+ x_only_float_literal: {
+ src: `@workgroup_size(8.0)`,
+ pass: false,
+ },
+ xy_only_float_literal: {
+ src: `@workgroup_size(8, 8.0)`,
+ pass: false,
+ },
+ xyz_float_literal: {
+ src: `@workgroup_size(8, 8, 8.0)`,
+ pass: false,
+ },
+ empty: {
+ src: `@workgroup_size()`,
+ pass: false,
+ },
+ empty_x: {
+ src: `@workgroup_size(, 8)`,
+ pass: false,
+ },
+ empty_y: {
+ src: `@workgroup_size(8, , 8)`,
+ pass: false,
+ },
+ invalid_entry: {
+ src: `@workgroup_size(let)`,
+ pass: false,
+ },
+
+ x_only_abstract: {
+ src: `@workgroup_size(8)`,
+ pass: true,
+ },
+ xy_only_abstract: {
+ src: `@workgroup_size(8, 8)`,
+ pass: true,
+ },
+ xyz_abstract: {
+ src: `@workgroup_size(8, 8, 8)`,
+ pass: true,
+ },
+ x_only_unsigned: {
+ src: `@workgroup_size(8u)`,
+ pass: true,
+ },
+ xy_only_unsigned: {
+ src: `@workgroup_size(8u, 8u)`,
+ pass: true,
+ },
+ xyz_unsigned: {
+ src: `@workgroup_size(8u, 8u, 8u)`,
+ pass: true,
+ },
+ x_only_signed: {
+ src: `@workgroup_size(8i)`,
+ pass: true,
+ },
+ xy_only_signed: {
+ src: `@workgroup_size(8i, 8i)`,
+ pass: true,
+ },
+ xyz_signed: {
+ src: `@workgroup_size(8i, 8i, 8i)`,
+ pass: true,
+ },
+ x_only_hex: {
+ src: `@workgroup_size(0x1)`,
+ pass: true,
+ },
+ xy_only_hex: {
+ src: `@workgroup_size(0x1, 0x1)`,
+ pass: true,
+ },
+ xyz_hex: {
+ src: `@workgroup_size(0x1, 0x1, 0x1)`,
+ pass: true,
+ },
+
+ const_expr: {
+ src: `const a = 4;
+ const b = 5;
+ @workgroup_size(a, b, a + b)`,
+ pass: true,
+ },
+
+ override: {
+ src: `@id(42) override block_width = 12u;
+@workgroup_size(block_width)`,
+ pass: true,
+ },
+ override_no_default: {
+ src: `override block_width: i32;
+@workgroup_size(block_width)`,
+ pass: true,
+ },
+
+ trailing_comma_x: {
+ src: `@workgroup_size(8, )`,
+ pass: true,
+ },
+ trailing_comma_y: {
+ src: `@workgroup_size(8, 8,)`,
+ pass: true,
+ },
+ trailing_comma_z: {
+ src: `@workgroup_size(8, 8, 8,)`,
+ pass: true,
+ },
+
+ override_expr: {
+ src: `override a = 5;
+ override b = 6;
+ @workgroup_size(a, b, a + b)`,
+ pass: true,
+ },
+
+ // Mixed abstract is ok
+ mixed_abstract_signed: {
+ src: `@workgroup_size(8, 8i)`,
+ pass: true,
+ },
+ mixed_abstract_unsigned: {
+ src: `@workgroup_size(8u, 8)`,
+ pass: true,
+ },
+ // Mixed signed and unsigned is not
+ mixed_signed_unsigned: {
+ src: `@workgroup_size(8i, 8i, 8u)`,
+ pass: false,
+ },
+
+ zero_x: {
+ src: `@workgroup_size(0)`,
+ pass: false,
+ },
+ zero_y: {
+ src: `@workgroup_size(8, 0)`,
+ pass: false,
+ },
+ zero_z: {
+ src: `@workgroup_size(8, 8, 0)`,
+ pass: false,
+ },
+ negative_x: {
+ src: `@workgroup_size(-8)`,
+ pass: false,
+ },
+ negative_y: {
+ src: `@workgroup_size(8, -8)`,
+ pass: false,
+ },
+ negative_z: {
+ src: `@workgroup_size(8, 8, -8)`,
+ pass: false,
+ },
+
+ max_values: {
+ src: `@workgroup_size(256, 256, 64)`,
+ pass: true,
+ },
+
+ missing_left_paren: {
+ src: `@workgroup_size 1, 2, 3)`,
+ pass: false,
+ },
+ missing_right_paren: {
+ src: `@workgroup_size(1, 2, 3`,
+ pass: false,
+ },
+ misspelling: {
+ src: `@aworkgroup_size(1)`,
+ pass: false,
+ },
+ no_params: {
+ src: `@workgroup_size`,
+ pass: false,
+ },
+ multi_line: {
+ src: '@\nworkgroup_size(1)',
+ pass: true,
+ },
+ comment: {
+ src: `@/* comment */workgroup_size(1)`,
+ pass: true,
+ },
+
+ mix_ux: {
+ src: `@workgroup_size(1u, 1i, 1i)`,
+ pass: false,
+ },
+ mix_uy: {
+ src: `@workgroup_size(1i, 1u, 1i)`,
+ pass: false,
+ },
+ mix_uz: {
+ src: `@workgroup_size(1i, 1i, 1u)`,
+ pass: false,
+ },
+
+ duplicate: {
+ src: `@workgroup_size(1)
+@workgroup_size(2, 2, 2)`,
+ pass: false,
+ },
+};
+g.test('workgroup_size')
+ .desc(`Test validation of workgroup_size`)
+ .params(u => u.combine('attr', keysOf(kWorkgroupSizeTests)))
+ .fn(t => {
+ const code = `
+${kWorkgroupSizeTests[t.params.attr].src}
+@compute fn main() {}`;
+ t.expectCompileResult(kWorkgroupSizeTests[t.params.attr].pass, code);
+ });
+
+g.test('workgroup_size_fragment_shader')
+ .desc(`Test validation of workgroup_size on a fragment shader`)
+ .fn(t => {
+ const code = `
+@workgroup_size(1)
+@fragment fn main(@builtin(position) pos: vec4<f32>) {}`;
+ t.expectCompileResult(false, code);
+ });
+
+g.test('workgroup_size_vertex_shader')
+ .desc(`Test validation of workgroup_size on a vertex shader`)
+ .fn(t => {
+ const code = `
+@workgroup_size(1)
+@vertex fn main() -> @builtin(position) vec4<f32> {}`;
+ t.expectCompileResult(false, code);
+ });
+
+g.test('workgroup_size_function')
+ .desc(`Test validation of workgroup_size on user function`)
+ .fn(t => {
+ const code = `
+@workgroup_size(1)
+fn my_func() {}`;
+ t.expectCompileResult(false, code);
+ });
+
+g.test('workgroup_size_const')
+ .desc(`Test validation of workgroup_size on a const`)
+ .fn(t => {
+ const code = `
+@workgroup_size(1)
+const a : i32 = 4;
+
+fn my_func() {}`;
+ t.expectCompileResult(false, code);
+ });
+
+g.test('workgroup_size_var')
+ .desc(`Test validation of workgroup_size on a var`)
+ .fn(t => {
+ const code = `
+@workgroup_size(1)
+@group(1) @binding(1)
+var<storage> a: i32;
+
+fn my_func() {
+ _ = a;
+}`;
+ t.expectCompileResult(false, code);
+ });
+
+g.test('workgroup_size_fp16')
+ .desc(`Test validation of workgroup_size with fp16`)
+ .params(u => u.combine('ext', ['', 'h']))
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ })
+ .fn(t => {
+ const code = `
+@workgroup_size(1${t.params.ext})
+@compute fn main() {}`;
+ t.expectCompileResult(t.params.ext === '', code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_validation_test.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_validation_test.ts
new file mode 100644
index 0000000000..8029ef4e67
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/shader_validation_test.ts
@@ -0,0 +1,177 @@
+import { keysOf } from '../../../common/util/data_tables.js';
+import { ErrorWithExtra } from '../../../common/util/util.js';
+import { GPUTest } from '../../gpu_test.js';
+
+/**
+ * Base fixture for WGSL shader validation tests.
+ */
+export class ShaderValidationTest extends GPUTest {
+ /**
+ * Add a test expectation for whether a createShaderModule call succeeds or not.
+ *
+ * @example
+ * ```ts
+ * t.expectCompileResult(true, `wgsl code`); // Expect success
+ * t.expectCompileResult(false, `wgsl code`); // Expect validation error with any error string
+ * ```
+ */
+ expectCompileResult(expectedResult: boolean, code: string) {
+ let shaderModule: GPUShaderModule;
+ this.expectGPUError(
+ 'validation',
+ () => {
+ shaderModule = this.device.createShaderModule({ code });
+ },
+ expectedResult !== true
+ );
+
+ const error = new ErrorWithExtra('', () => ({ shaderModule }));
+ this.eventualAsyncExpectation(async () => {
+ const compilationInfo = await shaderModule!.getCompilationInfo();
+
+ // MAINTENANCE_TODO: Pretty-print error messages with source context.
+ const messagesLog = compilationInfo.messages
+ .map(m => `${m.lineNum}:${m.linePos}: ${m.type}: ${m.message}`)
+ .join('\n');
+ error.extra.compilationInfo = compilationInfo;
+
+ if (compilationInfo.messages.some(m => m.type === 'error')) {
+ if (expectedResult) {
+ error.message = `Unexpected compilationInfo 'error' message.\n` + messagesLog;
+ this.rec.validationFailed(error);
+ } else {
+ error.message = `Found expected compilationInfo 'error' message.\n` + messagesLog;
+ this.rec.debug(error);
+ }
+ } else {
+ if (!expectedResult) {
+ error.message = `Missing expected compilationInfo 'error' message.\n` + messagesLog;
+ this.rec.validationFailed(error);
+ } else {
+ error.message = `No compilationInfo 'error' messages, as expected.\n` + messagesLog;
+ this.rec.debug(error);
+ }
+ }
+ });
+ }
+
+ /**
+ * Add a test expectation for whether a createShaderModule call issues a warning.
+ *
+ * @example
+ * ```ts
+ * t.expectCompileWarning(true, `wgsl code`); // Expect compile success and any warning message
+ * t.expectCompileWarning(false, `wgsl code`); // Expect compile success and no warning messages
+ * ```
+ */
+ expectCompileWarning(expectWarning: boolean, code: string) {
+ let shaderModule: GPUShaderModule;
+ this.expectGPUError(
+ 'validation',
+ () => {
+ shaderModule = this.device.createShaderModule({ code });
+ },
+ false
+ );
+
+ const error = new ErrorWithExtra('', () => ({ shaderModule }));
+ this.eventualAsyncExpectation(async () => {
+ const compilationInfo = await shaderModule!.getCompilationInfo();
+
+ // MAINTENANCE_TODO: Pretty-print error messages with source context.
+ const messagesLog = compilationInfo.messages
+ .map(m => `${m.lineNum}:${m.linePos}: ${m.type}: ${m.message}`)
+ .join('\n');
+ error.extra.compilationInfo = compilationInfo;
+
+ if (compilationInfo.messages.some(m => m.type === 'warning')) {
+ if (expectWarning) {
+ error.message = `No 'warning' message as expected.\n` + messagesLog;
+ this.rec.debug(error);
+ } else {
+ error.message = `Missing expected compilationInfo 'warning' message.\n` + messagesLog;
+ this.rec.validationFailed(error);
+ }
+ } else {
+ if (expectWarning) {
+ error.message = `Missing expected 'warning' message.\n` + messagesLog;
+ this.rec.validationFailed(error);
+ } else {
+ error.message = `Found a 'warning' message as expected.\n` + messagesLog;
+ this.rec.debug(error);
+ }
+ }
+ });
+ }
+
+ /**
+ * Add a test expectation for whether a createComputePipeline call succeeds or not.
+ */
+ expectPipelineResult(args: {
+ // True if the pipeline should build without error
+ expectedResult: boolean;
+ // The WGSL shader code
+ code: string;
+ // Pipeline overridable constants
+ constants?: Record<string, GPUPipelineConstantValue>;
+ // List of additional module-scope variable the entrypoint needs to reference
+ reference?: string[];
+ }) {
+ const phonies: Array<string> = [];
+
+ if (args.constants !== undefined) {
+ phonies.push(...keysOf(args.constants).map(c => `_ = ${c};`));
+ }
+ if (args.reference !== undefined) {
+ phonies.push(...args.reference.map(c => `_ = ${c};`));
+ }
+
+ const code =
+ args.code +
+ `
+@compute @workgroup_size(1)
+fn main() {
+ ${phonies.join('\n')}
+}`;
+
+ let shaderModule: GPUShaderModule;
+ this.expectGPUError(
+ 'validation',
+ () => {
+ shaderModule = this.device.createShaderModule({ code });
+ },
+ false
+ );
+
+ this.expectGPUError(
+ 'validation',
+ () => {
+ this.device.createComputePipeline({
+ layout: 'auto',
+ compute: { module: shaderModule!, entryPoint: 'main', constants: args.constants },
+ });
+ },
+ !args.expectedResult
+ );
+ }
+
+ /**
+ * Wraps the code fragment into an entry point.
+ *
+ * @example
+ * ```ts
+ * t.wrapInEntryPoint(`var i = 0;`);
+ * ```
+ */
+ wrapInEntryPoint(code: string, enabledExtensions: string[] = []) {
+ const enableDirectives = enabledExtensions.map(x => `enable ${x};`).join('\n ');
+
+ return `
+ ${enableDirectives}
+
+ @compute @workgroup_size(1)
+ fn main() {
+ ${code}
+ }`;
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/types/alias.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/types/alias.spec.ts
new file mode 100644
index 0000000000..266b4f9a12
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/types/alias.spec.ts
@@ -0,0 +1,123 @@
+export const description = `
+Validation tests for type aliases
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+g.test('no_direct_recursion')
+ .desc('Test that direct recursion of type aliases is rejected')
+ .params(u => u.combine('target', ['i32', 'T']))
+ .fn(t => {
+ const wgsl = `alias T = ${t.params.target};`;
+ t.expectCompileResult(t.params.target === 'i32', wgsl);
+ });
+
+g.test('no_indirect_recursion')
+ .desc('Test that indirect recursion of type aliases is rejected')
+ .params(u => u.combine('target', ['i32', 'S']))
+ .fn(t => {
+ const wgsl = `
+alias S = T;
+alias T = ${t.params.target};
+`;
+ t.expectCompileResult(t.params.target === 'i32', wgsl);
+ });
+
+g.test('no_indirect_recursion_via_vector_element')
+ .desc('Test that indirect recursion of type aliases via vector element types is rejected')
+ .params(u => u.combine('target', ['i32', 'V']))
+ .fn(t => {
+ const wgsl = `
+alias V = vec4<T>;
+alias T = ${t.params.target};
+`;
+ t.expectCompileResult(t.params.target === 'i32', wgsl);
+ });
+
+g.test('no_indirect_recursion_via_matrix_element')
+ .desc('Test that indirect recursion of type aliases via matrix element types is rejected')
+ .params(u => u.combine('target', ['f32', 'M']))
+ .fn(t => {
+ const wgsl = `
+alias M = mat4x4<T>;
+alias T = ${t.params.target};
+`;
+ t.expectCompileResult(t.params.target === 'f32', wgsl);
+ });
+
+g.test('no_indirect_recursion_via_array_element')
+ .desc('Test that indirect recursion of type aliases via array element types is rejected')
+ .params(u => u.combine('target', ['i32', 'A']))
+ .fn(t => {
+ const wgsl = `
+alias A = array<T, 4>;
+alias T = ${t.params.target};
+`;
+ t.expectCompileResult(t.params.target === 'i32', wgsl);
+ });
+
+g.test('no_indirect_recursion_via_array_size')
+ .desc('Test that indirect recursion of type aliases via array size expressions is rejected')
+ .params(u => u.combine('target', ['i32', 'A']))
+ .fn(t => {
+ const wgsl = `
+alias A = array<i32, T(1)>;
+alias T = ${t.params.target};
+`;
+ t.expectCompileResult(t.params.target === 'i32', wgsl);
+ });
+
+g.test('no_indirect_recursion_via_atomic')
+ .desc('Test that indirect recursion of type aliases via atomic types is rejected')
+ .params(u => u.combine('target', ['i32', 'A']))
+ .fn(t => {
+ const wgsl = `
+alias A = atomic<T>;
+alias T = ${t.params.target};
+`;
+ t.expectCompileResult(t.params.target === 'i32', wgsl);
+ });
+
+g.test('no_indirect_recursion_via_ptr_store_type')
+ .desc('Test that indirect recursion of type aliases via pointer store types is rejected')
+ .params(u => u.combine('target', ['i32', 'P']))
+ .fn(t => {
+ const wgsl = `
+alias P = ptr<function, T>;
+alias T = ${t.params.target};
+`;
+ t.expectCompileResult(t.params.target === 'i32', wgsl);
+ });
+
+g.test('no_indirect_recursion_via_struct_member')
+ .desc('Test that indirect recursion of type aliases via struct members is rejected')
+ .params(u => u.combine('target', ['i32', 'S']))
+ .fn(t => {
+ const wgsl = `
+struct S {
+ a : T
+}
+alias T = ${t.params.target};
+`;
+ t.expectCompileResult(t.params.target === 'i32', wgsl);
+ });
+
+g.test('no_indirect_recursion_via_struct_attribute')
+ .desc('Test that indirect recursion of type aliases via struct members is rejected')
+ .params(u =>
+ u //
+ .combine('target', ['i32', 'S'])
+ .combine('attribute', ['align', 'location', 'size'])
+ )
+ .fn(t => {
+ const wgsl = `
+struct S {
+ @${t.params.attribute}(T(4)) a : f32
+}
+alias T = ${t.params.target};
+`;
+ t.expectCompileResult(t.params.target === 'i32', wgsl);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/types/struct.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/types/struct.spec.ts
new file mode 100644
index 0000000000..6b192ba5bc
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/types/struct.spec.ts
@@ -0,0 +1,99 @@
+export const description = `
+Validation tests for struct types
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+g.test('no_direct_recursion')
+ .desc('Test that direct recursion of structures is rejected')
+ .params(u => u.combine('target', ['i32', 'S']))
+ .fn(t => {
+ const wgsl = `
+struct S {
+ a : ${t.params.target}
+}`;
+ t.expectCompileResult(t.params.target === 'i32', wgsl);
+ });
+
+g.test('no_indirect_recursion')
+ .desc('Test that indirect recursion of structures is rejected')
+ .params(u => u.combine('target', ['i32', 'S']))
+ .fn(t => {
+ const wgsl = `
+struct S {
+ a : T
+}
+struct T {
+ a : ${t.params.target}
+}`;
+ t.expectCompileResult(t.params.target === 'i32', wgsl);
+ });
+
+g.test('no_indirect_recursion_via_array_element')
+ .desc('Test that indirect recursion of structures via array element types is rejected')
+ .params(u => u.combine('target', ['i32', 'S']))
+ .fn(t => {
+ const wgsl = `
+struct S {
+ a : array<${t.params.target}, 4>
+}
+`;
+ t.expectCompileResult(t.params.target === 'i32', wgsl);
+ });
+
+g.test('no_indirect_recursion_via_array_size')
+ .desc('Test that indirect recursion of structures via array size expressions is rejected')
+ .params(u => u.combine('target', ['S1', 'S2']))
+ .fn(t => {
+ const wgsl = `
+struct S1 {
+ a : i32,
+}
+struct S2 {
+ a : i32,
+ b : array<i32, ${t.params.target}().a + 1>,
+}
+`;
+ t.expectCompileResult(t.params.target === 'S1', wgsl);
+ });
+
+g.test('no_indirect_recursion_via_struct_attribute')
+ .desc('Test that indirect recursion of structures via struct members is rejected')
+ .params(u =>
+ u //
+ .combine('target', ['S1', 'S2'])
+ .combine('attribute', ['align', 'location', 'size'])
+ )
+ .fn(t => {
+ const wgsl = `
+struct S1 {
+ a : i32
+}
+struct S2 {
+ @${t.params.attribute}(${t.params.target}(4).a) a : i32
+}
+`;
+ t.expectCompileResult(t.params.target === 'S1', wgsl);
+ });
+
+g.test('no_indirect_recursion_via_struct_member_nested_in_alias')
+ .desc(
+ `Test that indirect recursion of structures via struct members is rejected when the member type
+ is an alias that contains the structure`
+ )
+ .params(u => u.combine('target', ['i32', 'A']))
+ .fn(t => {
+ const wgsl = `
+alias A = array<S2, 4>;
+struct S1 {
+ a : ${t.params.target}
+}
+struct S2 {
+ a : S1
+}
+`;
+ t.expectCompileResult(t.params.target === 'i32', wgsl);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/types/vector.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/types/vector.spec.ts
new file mode 100644
index 0000000000..96f6ba6815
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/types/vector.spec.ts
@@ -0,0 +1,78 @@
+export const description = `
+Validation tests for vector types
+`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kCases = {
+ // Valid vector types
+ vec2_bool: { wgsl: 'alias T = vec2<bool>;', ok: true },
+ vec3_bool: { wgsl: 'alias T = vec3<bool>;', ok: true },
+ vec4_bool: { wgsl: 'alias T = vec4<bool>;', ok: true },
+ vec2_i32: { wgsl: 'alias T = vec2<i32>;', ok: true },
+ vec3_i32: { wgsl: 'alias T = vec3<i32>;', ok: true },
+ vec4_i32: { wgsl: 'alias T = vec4<i32>;', ok: true },
+ vec2_u32: { wgsl: 'alias T = vec2<u32>;', ok: true },
+ vec3_u32: { wgsl: 'alias T = vec3<u32>;', ok: true },
+ vec4_u32: { wgsl: 'alias T = vec4<u32>;', ok: true },
+ vec2_f32: { wgsl: 'alias T = vec2<f32>;', ok: true },
+ vec3_f32: { wgsl: 'alias T = vec3<f32>;', ok: true },
+ vec4_f32: { wgsl: 'alias T = vec4<f32>;', ok: true },
+ vec2_f16: { wgsl: 'enable f16;\nalias T = vec2<f16>;', ok: true },
+ vec3_f16: { wgsl: 'enable f16;\nalias T = vec3<f16>;', ok: true },
+ vec4_f16: { wgsl: 'enable f16;\nalias T = vec4<f16>;', ok: true },
+
+ // Pre-declared type aliases
+ vec2i: { wgsl: 'const c : vec2i = vec2<i32>();', ok: true },
+ vec3i: { wgsl: 'const c : vec3i = vec3<i32>();', ok: true },
+ vec4i: { wgsl: 'const c : vec4i = vec4<i32>();', ok: true },
+ vec2u: { wgsl: 'const c : vec2u = vec2<u32>();', ok: true },
+ vec3u: { wgsl: 'const c : vec3u = vec3<u32>();', ok: true },
+ vec4u: { wgsl: 'const c : vec4u = vec4<u32>();', ok: true },
+ vec2f: { wgsl: 'const c : vec2f = vec2<f32>();', ok: true },
+ vec3f: { wgsl: 'const c : vec3f = vec3<f32>();', ok: true },
+ vec4f: { wgsl: 'const c : vec4f = vec4<f32>();', ok: true },
+ vec2h: { wgsl: 'enable f16;\nconst c : vec2h = vec2<f16>();', ok: true },
+ vec3h: { wgsl: 'enable f16;\nconst c : vec3h = vec3<f16>();', ok: true },
+ vec4h: { wgsl: 'enable f16;\nconst c : vec4h = vec4<f16>();', ok: true },
+
+ // pass
+ trailing_comma: { wgsl: 'alias T = vec3<u32,>;', ok: true },
+ aliased_el_ty: { wgsl: 'alias EL = i32;\nalias T = vec3<EL>;', ok: true },
+
+ // invalid
+ vec: { wgsl: 'alias T = vec;', ok: false },
+ vec_f32: { wgsl: 'alias T = vec<f32>;', ok: false },
+ vec1_i32: { wgsl: 'alias T = vec1<i32>;', ok: false },
+ vec5_u32: { wgsl: 'alias T = vec5<u32>;', ok: false },
+ missing_el_ty: { wgsl: 'alias T = vec3<>;', ok: false },
+ missing_t_left: { wgsl: 'alias T = vec3 u32>;', ok: false },
+ missing_t_right: { wgsl: 'alias T = vec3<u32;', ok: false },
+ vec_of_array: { wgsl: 'alias T = vec3<array<i32, 2>>;', ok: false },
+ vec_of_runtime_array: { wgsl: 'alias T = vec3<array<i32>>;', ok: false },
+ vec_of_struct: { wgsl: 'struct S { i : i32 }\nalias T = vec3<S>;', ok: false },
+ vec_of_atomic: { wgsl: 'alias T = vec3<atomic<i32>>;', ok: false },
+ vec_of_matrix: { wgsl: 'alias T = vec3<mat2x2f>;', ok: false },
+ vec_of_vec: { wgsl: 'alias T = vec3<vec2f>;', ok: false },
+ no_bool_shortform: { wgsl: 'const c : vec2b = vec2<bool>();', ok: false },
+};
+
+g.test('vector')
+ .desc('Tests validation of vector types')
+ .params(
+ u => u.combine('case', keysOf(kCases)) //
+ )
+ .beforeAllSubcases(t => {
+ const c = kCases[t.params.case];
+ if (c.wgsl.indexOf('enable f16') >= 0) {
+ t.selectDeviceOrSkipTestCase('shader-f16');
+ }
+ })
+ .fn(t => {
+ const c = kCases[t.params.case];
+ t.expectCompileResult(c.ok, c.wgsl);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/uniformity/uniformity.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/uniformity/uniformity.spec.ts
new file mode 100644
index 0000000000..41249e445d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/validation/uniformity/uniformity.spec.ts
@@ -0,0 +1,2444 @@
+export const description = `Validation tests for uniformity analysis`;
+
+import { makeTestGroup } from '../../../../common/framework/test_group.js';
+import { keysOf } from '../../../../common/util/data_tables.js';
+import { unreachable } from '../../../../common/util/util.js';
+import { ShaderValidationTest } from '../shader_validation_test.js';
+
+export const g = makeTestGroup(ShaderValidationTest);
+
+const kCollectiveOps = [
+ { op: 'textureSample', stage: 'fragment' },
+ { op: 'textureSampleBias', stage: 'fragment' },
+ { op: 'textureSampleCompare', stage: 'fragment' },
+ { op: 'dpdx', stage: 'fragment' },
+ { op: 'dpdxCoarse', stage: 'fragment' },
+ { op: 'dpdxFine', stage: 'fragment' },
+ { op: 'dpdy', stage: 'fragment' },
+ { op: 'dpdyCoarse', stage: 'fragment' },
+ { op: 'dpdyFine', stage: 'fragment' },
+ { op: 'fwidth', stage: 'fragment' },
+ { op: 'fwidthCoarse', stage: 'fragment' },
+ { op: 'fwidthFine', stage: 'fragment' },
+ { op: 'storageBarrier', stage: 'compute' },
+ { op: 'workgroupBarrier', stage: 'compute' },
+ { op: 'workgroupUniformLoad', stage: 'compute' },
+];
+
+const kConditions = [
+ { cond: 'uniform_storage_ro', expectation: true },
+ { cond: 'nonuniform_storage_ro', expectation: false },
+ { cond: 'nonuniform_storage_rw', expectation: false },
+ { cond: 'nonuniform_builtin', expectation: false },
+ { cond: 'uniform_literal', expectation: true },
+ { cond: 'uniform_const', expectation: true },
+ { cond: 'uniform_override', expectation: true },
+ { cond: 'uniform_let', expectation: true },
+ { cond: 'nonuniform_let', expectation: false },
+ { cond: 'uniform_or', expectation: true },
+ { cond: 'nonuniform_or1', expectation: false },
+ { cond: 'nonuniform_or2', expectation: false },
+ { cond: 'uniform_and', expectation: true },
+ { cond: 'nonuniform_and1', expectation: false },
+ { cond: 'nonuniform_and2', expectation: false },
+ { cond: 'uniform_func_var', expectation: true },
+ { cond: 'nonuniform_func_var', expectation: false },
+];
+
+function generateCondition(condition: string): string {
+ switch (condition) {
+ case 'uniform_storage_ro': {
+ return `ro_buffer[0] == 0`;
+ }
+ case 'nonuniform_storage_ro': {
+ return `ro_buffer[priv_var[0]] == 0`;
+ }
+ case 'nonuniform_storage_rw': {
+ return `rw_buffer[0] == 0`;
+ }
+ case 'nonuniform_builtin': {
+ return `p.x == 0`;
+ }
+ case 'uniform_literal': {
+ return `false`;
+ }
+ case 'uniform_const': {
+ return `c`;
+ }
+ case 'uniform_override': {
+ return `o == 0`;
+ }
+ case 'uniform_let': {
+ return `u_let == 0`;
+ }
+ case 'nonuniform_let': {
+ return `n_let == 0`;
+ }
+ case 'uniform_or': {
+ return `u_let == 0 || uniform_buffer.y > 1`;
+ }
+ case 'nonuniform_or1': {
+ return `u_let == 0 || n_let == 0`;
+ }
+ case 'nonuniform_or2': {
+ return `n_let == 0 || u_let == 0`;
+ }
+ case 'uniform_and': {
+ return `u_let == 0 && uniform_buffer.y > 1`;
+ }
+ case 'nonuniform_and1': {
+ return `u_let == 0 && n_let == 0`;
+ }
+ case 'nonuniform_and2': {
+ return `n_let == 0 && u_let == 0`;
+ }
+ case 'uniform_func_var': {
+ return `u_f == 0`;
+ }
+ case 'nonuniform_func_var': {
+ return `n_f == 0`;
+ }
+ default: {
+ unreachable(`Unhandled condition`);
+ }
+ }
+}
+
+function generateOp(op: string): string {
+ switch (op) {
+ case 'textureSample': {
+ return `let x = ${op}(tex, s, vec2(0,0));\n`;
+ }
+ case 'textureSampleBias': {
+ return `let x = ${op}(tex, s, vec2(0,0), 0);\n`;
+ }
+ case 'textureSampleCompare': {
+ return `let x = ${op}(tex_depth, s_comp, vec2(0,0), 0);\n`;
+ }
+ case 'storageBarrier':
+ case 'workgroupBarrier': {
+ return `${op}();\n`;
+ }
+ case 'workgroupUniformLoad': {
+ return `let x = ${op}(&wg);`;
+ }
+ case 'dpdx':
+ case 'dpdxCoarse':
+ case 'dpdxFine':
+ case 'dpdy':
+ case 'dpdyCoarse':
+ case 'dpdyFine':
+ case 'fwidth':
+ case 'fwidthCoarse':
+ case 'fwidthFine': {
+ return `let x = ${op}(0);\n`;
+ }
+ default: {
+ unreachable(`Unhandled op`);
+ }
+ }
+}
+
+function generateConditionalStatement(statement: string, condition: string, op: string): string {
+ const code = ``;
+ switch (statement) {
+ case 'if': {
+ return `if ${generateCondition(condition)} {
+ ${generateOp(op)};
+ }
+ `;
+ }
+ case 'for': {
+ return `for (; ${generateCondition(condition)};) {
+ ${generateOp(op)};
+ }
+ `;
+ }
+ case 'while': {
+ return `while ${generateCondition(condition)} {
+ ${generateOp(op)};
+ }
+ `;
+ }
+ case 'switch': {
+ return `switch u32(${generateCondition(condition)}) {
+ case 0: {
+ ${generateOp(op)};
+ }
+ default: { }
+ }
+ `;
+ }
+ default: {
+ unreachable(`Unhandled statement`);
+ }
+ }
+
+ return code;
+}
+
+g.test('basics')
+ .desc(`Test collective operations in simple uniform or non-uniform control flow.`)
+ .params(u =>
+ u
+ .combineWithParams(kCollectiveOps)
+ .combineWithParams(kConditions)
+ .combine('statement', ['if', 'for', 'while', 'switch'] as const)
+ .beginSubcases()
+ )
+ .fn(t => {
+ let code = `
+ @group(0) @binding(0) var s : sampler;
+ @group(0) @binding(1) var s_comp : sampler_comparison;
+ @group(0) @binding(2) var tex : texture_2d<f32>;
+ @group(0) @binding(3) var tex_depth : texture_depth_2d;
+
+ @group(1) @binding(0) var<storage, read> ro_buffer : array<f32, 4>;
+ @group(1) @binding(1) var<storage, read_write> rw_buffer : array<f32, 4>;
+ @group(1) @binding(2) var<uniform> uniform_buffer : vec4<f32>;
+
+ var<private> priv_var : array<f32, 4> = array(0,0,0,0);
+
+ const c = false;
+ override o : f32;
+`;
+
+ if (t.params.stage === 'compute') {
+ code += `var<workgroup> wg : f32;\n`;
+ code += ` @workgroup_size(16, 1, 1)`;
+ }
+ code += `@${t.params.stage}`;
+ code += `\nfn main(`;
+ if (t.params.stage === 'compute') {
+ code += `@builtin(global_invocation_id) p : vec3<u32>`;
+ } else {
+ code += `@builtin(position) p : vec4<f32>`;
+ }
+ code += `) {
+ let u_let = uniform_buffer.x;
+ let n_let = rw_buffer[0];
+ var u_f = uniform_buffer.z;
+ var n_f = rw_buffer[1];
+ `;
+
+ // Simple control statement containing the op.
+ code += generateConditionalStatement(t.params.statement, t.params.cond, t.params.op);
+
+ code += `\n}\n`;
+
+ t.expectCompileResult(t.params.expectation, code);
+ });
+
+const kFragmentBuiltinValues = [
+ {
+ builtin: `position`,
+ type: `vec4<f32>`,
+ },
+ {
+ builtin: `front_facing`,
+ type: `bool`,
+ },
+ {
+ builtin: `sample_index`,
+ type: `u32`,
+ },
+ {
+ builtin: `sample_mask`,
+ type: `u32`,
+ },
+];
+
+g.test('fragment_builtin_values')
+ .desc(`Test uniformity of fragment built-in values`)
+ .params(u => u.combineWithParams(kFragmentBuiltinValues).beginSubcases())
+ .fn(t => {
+ let cond = ``;
+ switch (t.params.type) {
+ case `u32`:
+ case `i32`:
+ case `f32`: {
+ cond = `p > 0`;
+ break;
+ }
+ case `vec4<u32>`:
+ case `vec4<i32>`:
+ case `vec4<f32>`: {
+ cond = `p.x > 0`;
+ break;
+ }
+ case `bool`: {
+ cond = `p`;
+ break;
+ }
+ default: {
+ unreachable(`Unhandled type`);
+ }
+ }
+ const code = `
+@group(0) @binding(0) var s : sampler;
+@group(0) @binding(1) var tex : texture_2d<f32>;
+
+@fragment
+fn main(@builtin(${t.params.builtin}) p : ${t.params.type}) {
+ if ${cond} {
+ let texel = textureSample(tex, s, vec2<f32>(0,0));
+ }
+}
+`;
+
+ t.expectCompileResult(true, `diagnostic(off, derivative_uniformity);\n` + code);
+ t.expectCompileResult(false, code);
+ });
+
+const kComputeBuiltinValues = [
+ {
+ builtin: `local_invocation_id`,
+ type: `vec3<f32>`,
+ uniform: false,
+ },
+ {
+ builtin: `local_invocation_index`,
+ type: `u32`,
+ uniform: false,
+ },
+ {
+ builtin: `global_invocation_id`,
+ type: `vec3<u32>`,
+ uniform: false,
+ },
+ {
+ builtin: `workgroup_id`,
+ type: `vec3<u32>`,
+ uniform: true,
+ },
+ {
+ builtin: `num_workgroups`,
+ type: `vec3<u32>`,
+ uniform: true,
+ },
+];
+
+g.test('compute_builtin_values')
+ .desc(`Test uniformity of compute built-in values`)
+ .params(u => u.combineWithParams(kComputeBuiltinValues).beginSubcases())
+ .fn(t => {
+ let cond = ``;
+ switch (t.params.type) {
+ case `u32`:
+ case `i32`:
+ case `f32`: {
+ cond = `p > 0`;
+ break;
+ }
+ case `vec3<u32>`:
+ case `vec3<i32>`:
+ case `vec3<f32>`: {
+ cond = `p.x > 0`;
+ break;
+ }
+ case `bool`: {
+ cond = `p`;
+ break;
+ }
+ default: {
+ unreachable(`Unhandled type`);
+ }
+ }
+ const code = `
+@compute @workgroup_size(16,1,1)
+fn main(@builtin(${t.params.builtin}) p : ${t.params.type}) {
+ if ${cond} {
+ workgroupBarrier();
+ }
+}
+`;
+
+ t.expectCompileResult(t.params.uniform, code);
+ });
+
+function generatePointerCheck(check: string): string {
+ if (check === `address`) {
+ return `let tmp = workgroupUniformLoad(ptr);`;
+ } else {
+ // check === `contents`
+ return `if test_val > 0 {
+ workgroupBarrier();
+ }`;
+ }
+}
+
+const kPointerCases = {
+ address_uniform_literal: {
+ code: `let ptr = &wg_array[0];`,
+ check: `address`,
+ uniform: true,
+ },
+ address_uniform_value: {
+ code: `let ptr = &wg_array[uniform_value];`,
+ check: `address`,
+ uniform: true,
+ },
+ address_nonuniform_value: {
+ code: `let ptr = &wg_array[nonuniform_value];`,
+ check: `address`,
+ uniform: false,
+ },
+ address_uniform_chain: {
+ code: `let p1 = &wg_struct.x;
+ let p2 = &(*p1)[uniform_value];
+ let p3 = &(*p2).x;
+ let ptr = &(*p3)[uniform_value];`,
+ check: `address`,
+ uniform: true,
+ },
+ address_nonuniform_chain1: {
+ code: `let p1 = &wg_struct.x;
+ let p2 = &(*p1)[nonuniform_value];
+ let p3 = &(*p2).x;
+ let ptr = &(*p3)[uniform_value];`,
+ check: `address`,
+ uniform: false,
+ },
+ address_nonuniform_chain2: {
+ code: `let p1 = &wg_struct.x;
+ let p2 = &(*p1)[uniform_value];
+ let p3 = &(*p2).x;
+ let ptr = &(*p3)[nonuniform_value];`,
+ check: `address`,
+ uniform: false,
+ },
+ wg_uniform_load_is_uniform: {
+ code: `let test_val = workgroupUniformLoad(&wg_scalar);`,
+ check: `contents`,
+ uniform: true,
+ },
+ contents_scalar_uniform1: {
+ code: `let ptr = &func_scalar;
+ let test_val = *ptr;`,
+ check: `contents`,
+ uniform: true,
+ },
+ contents_scalar_uniform2: {
+ code: `func_scalar = nonuniform_value;
+ let ptr = &func_scalar;
+ func_scalar = 0;
+ let test_val = *ptr;`,
+ check: `contents`,
+ uniform: true,
+ },
+ contents_scalar_uniform3: {
+ code: `let ptr = &func_scalar;
+ func_scalar = nonuniform_value;
+ func_scalar = uniform_value;
+ let test_val = *ptr;`,
+ check: `contents`,
+ uniform: true,
+ },
+ contents_scalar_nonuniform1: {
+ code: `func_scalar = nonuniform_value;
+ let ptr = &func_scalar;
+ let test_val = *ptr;`,
+ check: `contents`,
+ uniform: false,
+ },
+ contents_scalar_nonuniform2: {
+ code: `let ptr = &func_scalar;
+ *ptr = nonuniform_value;
+ let test_val = *ptr;`,
+ check: `contents`,
+ uniform: false,
+ },
+ contents_scalar_alias_uniform: {
+ code: `let p = &func_scalar;
+ let ptr = p;
+ let test_val = *ptr;`,
+ check: `contents`,
+ uniform: true,
+ },
+ contents_scalar_alias_nonuniform1: {
+ code: `func_scalar = nonuniform_value;
+ let p = &func_scalar;
+ let ptr = p;
+ let test_val = *ptr;`,
+ check: `contents`,
+ uniform: false,
+ },
+ contents_scalar_alias_nonuniform2: {
+ code: `let p = &func_scalar;
+ *p = nonuniform_value;
+ let ptr = p;
+ let test_val = *ptr;`,
+ check: `contents`,
+ uniform: false,
+ },
+ contents_scalar_alias_nonuniform3: {
+ code: `let p = &func_scalar;
+ let ptr = p;
+ *p = nonuniform_value;
+ let test_val = *ptr;`,
+ check: `contents`,
+ uniform: false,
+ },
+ contents_scalar_alias_nonuniform4: {
+ code: `let p = &func_scalar;
+ func_scalar = nonuniform_value;
+ let test_val = *p;`,
+ check: `contents`,
+ uniform: false,
+ },
+ contents_scalar_alias_nonuniform5: {
+ code: `let p = &func_scalar;
+ *p = nonuniform_value;
+ let test_val = func_scalar;`,
+ check: `contents`,
+ uniform: false,
+ },
+ contents_array_uniform_index: {
+ code: `let ptr = &func_array[uniform_value];
+ let test_val = *ptr;`,
+ check: `contents`,
+ uniform: true,
+ },
+ contents_array_nonuniform_index1: {
+ code: `let ptr = &func_array[nonuniform_value];
+ let test_val = *ptr;`,
+ check: `contents`,
+ uniform: false,
+ },
+ contents_array_nonuniform_index2: {
+ code: `let ptr = &func_array[lid.x];
+ let test_val = *ptr;`,
+ check: `contents`,
+ uniform: false,
+ },
+ contents_array_nonuniform_index3: {
+ code: `let ptr = &func_array[gid.x];
+ let test_val = *ptr;`,
+ check: `contents`,
+ uniform: false,
+ },
+ contents_struct_uniform: {
+ code: `let p1 = &func_struct.x[uniform_value].x[uniform_value].x[uniform_value];
+ let test_val = *p1;`,
+ check: `contents`,
+ uniform: true,
+ },
+ contents_struct_nonuniform1: {
+ code: `let p1 = &func_struct.x[nonuniform_value].x[uniform_value].x[uniform_value];
+ let test_val = *p1;`,
+ check: `contents`,
+ uniform: false,
+ },
+ contents_struct_nonuniform2: {
+ code: `let p1 = &func_struct.x[uniform_value].x[gid.x].x[uniform_value];
+ let test_val = *p1;`,
+ check: `contents`,
+ uniform: false,
+ },
+ contents_struct_nonuniform3: {
+ code: `let p1 = &func_struct.x[uniform_value].x[uniform_value].x[lid.y];
+ let test_val = *p1;`,
+ check: `contents`,
+ uniform: false,
+ },
+ contents_struct_chain_uniform: {
+ code: `let p1 = &func_struct.x;
+ let p2 = &(*p1)[uniform_value];
+ let p3 = &(*p2).x;
+ let p4 = &(*p3)[uniform_value];
+ let p5 = &(*p4).x;
+ let p6 = &(*p5)[uniform_value];
+ let test_val = *p6;`,
+ check: `contents`,
+ uniform: true,
+ },
+ contents_struct_chain_nonuniform1: {
+ code: `let p1 = &func_struct.x;
+ let p2 = &(*p1)[nonuniform_value];
+ let p3 = &(*p2).x;
+ let p4 = &(*p3)[uniform_value];
+ let p5 = &(*p4).x;
+ let p6 = &(*p5)[uniform_value];
+ let test_val = *p6;`,
+ check: `contents`,
+ uniform: false,
+ },
+ contents_struct_chain_nonuniform2: {
+ code: `let p1 = &func_struct.x;
+ let p2 = &(*p1)[uniform_value];
+ let p3 = &(*p2).x;
+ let p4 = &(*p3)[gid.x];
+ let p5 = &(*p4).x;
+ let p6 = &(*p5)[uniform_value];
+ let test_val = *p6;`,
+ check: `contents`,
+ uniform: false,
+ },
+ contents_struct_chain_nonuniform3: {
+ code: `let p1 = &func_struct.x;
+ let p2 = &(*p1)[uniform_value];
+ let p3 = &(*p2).x;
+ let p4 = &(*p3)[uniform_value];
+ let p5 = &(*p4).x;
+ let p6 = &(*p5)[lid.y];
+ let test_val = *p6;`,
+ check: `contents`,
+ uniform: false,
+ },
+};
+
+g.test('pointers')
+ .desc(`Test pointer uniformity (contents and addresses)`)
+ .params(u => u.combine('case', keysOf(kPointerCases)).beginSubcases())
+ .fn(t => {
+ const testcase = kPointerCases[t.params.case];
+ const code = `
+var<workgroup> wg_scalar : u32;
+var<workgroup> wg_array : array<u32, 16>;
+
+struct Inner {
+ x : array<u32, 4>
+}
+struct Middle {
+ x : array<Inner, 4>
+}
+struct Outer {
+ x : array<Middle, 4>
+}
+var<workgroup> wg_struct : Outer;
+
+@group(0) @binding(0)
+var<storage> uniform_value : u32;
+@group(0) @binding(1)
+var<storage, read_write> nonuniform_value : u32;
+
+@compute @workgroup_size(16, 1, 1)
+fn main(@builtin(local_invocation_id) lid : vec3<u32>,
+ @builtin(global_invocation_id) gid : vec3<u32>) {
+ var func_scalar : u32;
+ var func_array : array<u32, 16>;
+ var func_struct : Outer;
+
+ ${testcase.code}
+`;
+
+ const with_check =
+ code +
+ `
+${generatePointerCheck(testcase.check)}
+}`;
+ if (!testcase.uniform) {
+ const without_check = code + `}\n`;
+ t.expectCompileResult(true, without_check);
+ }
+ t.expectCompileResult(testcase.uniform, with_check);
+ });
+
+function expectedUniformity(uniform: string, init: string): boolean {
+ if (uniform === `always`) {
+ return true;
+ } else if (uniform === `init`) {
+ return init === `no_init` || init === `uniform`;
+ }
+
+ // uniform == `never` (or unknown values)
+ return false;
+}
+
+const kFuncVarCases = {
+ no_assign: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: ``,
+ cond: `x > 0`,
+ uniform: `init`,
+ },
+ simple_uniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `x = uniform_value[0];`,
+ cond: `x > 0`,
+ uniform: `always`,
+ },
+ simple_nonuniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `x = nonuniform_value[0];`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ compound_assign_uniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `x += uniform_value[0];`,
+ cond: `x > 0`,
+ uniform: `init`,
+ },
+ compound_assign_nonuniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `x -= nonuniform_value[0];`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ unreachable_uniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ break;
+ x = uniform_value[0];
+ }`,
+ cond: `x > 0`,
+ uniform: `init`,
+ },
+ unreachable_nonuniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ break;
+ x = nonuniform_value[0];
+ }`,
+ cond: `x > 0`,
+ uniform: `init`,
+ },
+ if_no_else_uniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `if uniform_cond {
+ x = uniform_value[0];
+ }`,
+ cond: `x > 0`,
+ uniform: `init`,
+ },
+ if_no_else_nonuniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `if uniform_cond {
+ x = nonuniform_value[0];
+ }`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ if_no_then_uniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `if uniform_cond {
+ } else {
+ x = uniform_value[0];
+ }`,
+ cond: `x > 0`,
+ uniform: `init`,
+ },
+ if_no_then_nonuniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `if uniform_cond {
+ } else {
+ x = nonuniform_value[0];
+ }`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ if_else_uniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `if uniform_cond {
+ x = uniform_value[0];
+ } else {
+ x = uniform_value[1];
+ }`,
+ cond: `x > 0`,
+ uniform: `always`,
+ },
+ if_else_nonuniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `if uniform_cond {
+ x = nonuniform_value[0];
+ } else {
+ x = nonuniform_value[1];
+ }`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ if_else_split: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `if uniform_cond {
+ x = uniform_value[0];
+ } else {
+ x = nonuniform_value[0];
+ }`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ if_unreachable_else_none: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `if uniform_cond {
+ } else {
+ return;
+ }`,
+ cond: `x > 0`,
+ uniform: `init`,
+ },
+ if_unreachable_else_uniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `if uniform_cond {
+ x = uniform_value[0];
+ } else {
+ return;
+ }`,
+ cond: `x > 0`,
+ uniform: `always`,
+ },
+ if_unreachable_else_nonuniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `if uniform_cond {
+ x = nonuniform_value[0];
+ } else {
+ return;
+ }`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ if_unreachable_then_none: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `if uniform_cond {
+ return;
+ }`,
+ cond: `x > 0`,
+ uniform: `init`,
+ },
+ if_unreachable_then_uniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `if uniform_cond {
+ return;
+ } else {
+ x = uniform_value[0];
+ }`,
+ cond: `x > 0`,
+ uniform: `always`,
+ },
+ if_unreachable_then_nonuniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `if uniform_cond {
+ return;
+ } else {
+ x = nonuniform_value[0];
+ }`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ if_nonescaping_nonuniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `if uniform_cond {
+ x = nonuniform_value[0];
+ return;
+ }`,
+ cond: `x > 0`,
+ uniform: `init`,
+ },
+ loop_body_depends_on_continuing_uniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ if x > 0 {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ continuing {
+ x = uniform_value[0];
+ break if uniform_cond;
+ }
+ }`,
+ cond: `true`, // override the standard check
+ uniform: `init`,
+ },
+ loop_body_depends_on_continuing_nonuniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ if x > 0 {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ continuing {
+ x = nonuniform_value[0];
+ break if uniform_cond;
+ }
+ }`,
+ cond: `true`, // override the standard check
+ uniform: `never`,
+ },
+ loop_body_uniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ x = uniform_value[0];
+ continuing {
+ break if uniform_cond;
+ }
+ }`,
+ cond: `x > 0`,
+ uniform: `always`,
+ },
+ loop_body_nonuniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ x = nonuniform_value[0];
+ continuing {
+ break if uniform_cond;
+ }
+ }`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ loop_body_nonuniform_cond: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ // The analysis doesn't recognize the content of the value.
+ x = uniform_value[0];
+ continuing {
+ break if nonuniform_cond;
+ }
+ }`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ loop_unreachable_continuing: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ break;
+ continuing {
+ break if uniform_cond;
+ }
+ }`,
+ cond: `x > 0`,
+ uniform: `init`,
+ },
+ loop_continuing_from_body_uniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ x = uniform_value[0];
+ continuing {
+ if x > 0 {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ break if uniform_cond;
+ }
+ }`,
+ cond: `true`, // override the standard check
+ uniform: `always`,
+ },
+ loop_continuing_from_body_nonuniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ x = nonuniform_value[0];
+ continuing {
+ if x > 0 {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ break if uniform_cond;
+ }
+ }`,
+ cond: `true`, // override the standard check
+ uniform: `never`,
+ },
+ loop_continuing_from_body_split1: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ if uniform_cond {
+ x = uniform_value[0];
+ }
+ continuing {
+ if x > 0 {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ break if uniform_cond;
+ }
+ }`,
+ cond: `true`, // override the standard check
+ uniform: `init`,
+ },
+ loop_continuing_from_body_split2: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ if uniform_cond {
+ x = nonuniform_value[0];
+ }
+ continuing {
+ if x > 0 {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ break if uniform_cond;
+ }
+ }`,
+ cond: `true`, // override the standard check
+ uniform: `never`,
+ },
+ loop_continuing_from_body_split3: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ if uniform_cond {
+ x = uniform_value[0];
+ } else {
+ x = uniform_value[1];
+ }
+ continuing {
+ if x > 0 {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ break if uniform_cond;
+ }
+ }`,
+ cond: `true`, // override the standard check
+ uniform: `always`,
+ },
+ loop_continuing_from_body_split4: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ if nonuniform_cond {
+ x = uniform_value[0];
+ } else {
+ x = uniform_value[1];
+ }
+ continuing {
+ if x > 0 {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ break if uniform_cond;
+ }
+ }`,
+ cond: `true`, // override the standard check
+ uniform: `never`,
+ },
+ loop_continuing_from_body_split5: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ if nonuniform_cond {
+ x = uniform_value[0];
+ } else {
+ x = uniform_value[0];
+ }
+ continuing {
+ if x > 0 {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ break if uniform_cond;
+ }
+ }`,
+ cond: `true`, // override the standard check
+ // The analysis doesn't recognize that uniform_value[0] is assignment on all paths.
+ uniform: `never`,
+ },
+ loop_in_loop_with_continue_uniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ loop {
+ x = nonuniform_value[0];
+ if nonuniform_cond {
+ break;
+ }
+ continue;
+ }
+ x = uniform_value[0];
+ continuing {
+ if x > 0 {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ break if uniform_cond;
+ }
+ }`,
+ cond: `true`, // override the standard check
+ uniform: `always`,
+ },
+ loop_in_loop_with_continue_nonuniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ loop {
+ x = uniform_value[0];
+ if uniform_cond {
+ break;
+ }
+ continue;
+ }
+ x = nonuniform_value[0];
+ continuing {
+ if x > 0 {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ break if uniform_cond;
+ }
+ }`,
+ cond: `true`, // override the standard check
+ uniform: `never`,
+ },
+ after_loop_with_uniform_break_uniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ if uniform_cond {
+ x = uniform_value[0];
+ break;
+ }
+ }`,
+ cond: `x > 0`,
+ uniform: `always`,
+ },
+ after_loop_with_uniform_break_nonuniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ if uniform_cond {
+ x = nonuniform_value[0];
+ break;
+ }
+ }`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ after_loop_with_nonuniform_break: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ if nonuniform_cond {
+ x = uniform_value[0];
+ break;
+ }
+ }`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ after_loop_with_uniform_breaks: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `loop {
+ if uniform_cond {
+ x = uniform_value[0];
+ break;
+ } else {
+ break;
+ }
+ }`,
+ cond: `x > 0`,
+ uniform: `init`,
+ },
+ switch_uniform_case: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `switch uniform_val {
+ case 0 {
+ if x > 0 {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ }
+ default {
+ }
+ }`,
+ cond: `true`, // override default check
+ uniform: `init`,
+ },
+ switch_nonuniform_case: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `switch nonuniform_val {
+ case 0 {
+ if x > 0 {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ }
+ default {
+ }
+ }`,
+ cond: `true`, // override default check
+ uniform: `never`,
+ },
+ after_switch_all_uniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `switch uniform_val {
+ case 0 {
+ x = uniform_value[0];
+ }
+ case 1,2 {
+ x = uniform_value[1];
+ }
+ default {
+ x = uniform_value[2];
+ }
+ }`,
+ cond: `x > 0`,
+ uniform: `always`,
+ },
+ after_switch_some_assign: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `switch uniform_val {
+ case 0 {
+ x = uniform_value[0];
+ }
+ case 1,2 {
+ x = uniform_value[1];
+ }
+ default {
+ }
+ }`,
+ cond: `x > 0`,
+ uniform: `init`,
+ },
+ after_switch_nonuniform: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `switch uniform_val {
+ case 0 {
+ x = uniform_value[0];
+ }
+ case 1,2 {
+ x = uniform_value[1];
+ }
+ default {
+ x = nonuniform_value[0];
+ }
+ }`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ after_switch_with_break_nonuniform1: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `switch uniform_val {
+ default {
+ if uniform_cond {
+ x = uniform_value[0];
+ break;
+ }
+ x = nonuniform_value[0];
+ }
+ }`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ after_switch_with_break_nonuniform2: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `switch uniform_val {
+ default {
+ x = uniform_value[0];
+ if uniform_cond {
+ x = nonuniform_value[0];
+ break;
+ }
+ }
+ }`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ for_loop_uniform_body: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `for (var i = 0; i < 10; i += 1) {
+ x = uniform_value[0];
+ }`,
+ cond: `x > 0`,
+ uniform: `init`,
+ },
+ for_loop_nonuniform_body: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `for (var i = 0; i < 10; i += 1) {
+ x = nonuniform_value[0];
+ }`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ for_loop_uniform_body_no_condition: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `for (var i = 0; ; i += 1) {
+ x = uniform_value[0];
+ if uniform_cond {
+ break;
+ }
+ }`,
+ cond: `x > 0`,
+ uniform: `always`,
+ },
+ for_loop_nonuniform_body_no_condition: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `for (var i = 0; ; i += 1) {
+ x = nonuniform_value[0];
+ if uniform_cond {
+ break;
+ }
+ }`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ for_loop_uniform_increment: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `for (; uniform_cond; x += uniform_value[0]) {
+ }`,
+ cond: `x > 0`,
+ uniform: `init`,
+ },
+ for_loop_nonuniform_increment: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `for (; uniform_cond; x += nonuniform_value[0]) {
+ }`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ for_loop_uniform_init: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `for (x = uniform_value[0]; uniform_cond; ) {
+ }`,
+ cond: `x > 0`,
+ uniform: `always`,
+ },
+ for_loop_nonuniform_init: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `for (x = nonuniform_value[0]; uniform_cond;) {
+ }`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ while_loop_uniform_body: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `while uniform_cond {
+ x = uniform_value[0];
+ }`,
+ cond: `x > 0`,
+ uniform: `init`,
+ },
+ while_loop_nonuniform_body: {
+ typename: `u32`,
+ typedecl: ``,
+ assignment: `while uniform_cond {
+ x = nonuniform_value[0];
+ }`,
+ cond: `x > 0`,
+ uniform: `never`,
+ },
+ partial_assignment_uniform: {
+ typename: `block`,
+ typedecl: `struct block {
+ x : u32,
+ y : u32
+ }`,
+ assignment: `x.x = uniform_value[0].x;`,
+ cond: `x.x > 0`,
+ uniform: `init`,
+ },
+ partial_assignment_nonuniform: {
+ typename: `block`,
+ typedecl: `struct block {
+ x : u32,
+ y : u32
+ }`,
+ assignment: `x.x = nonuniform_value[0].x;`,
+ cond: `x.x > 0`,
+ uniform: `never`,
+ },
+ partial_assignment_all_members_uniform: {
+ typename: `block`,
+ typedecl: `struct block {
+ x : u32,
+ y : u32
+ }`,
+ assignment: `x.x = uniform_value[0].x;
+ x.y = uniform_value[1].y;`,
+ cond: `x.x > 0`,
+ uniform: `init`,
+ },
+ partial_assignment_all_members_nonuniform: {
+ typename: `block`,
+ typedecl: `struct block {
+ x : u32,
+ y : u32
+ }`,
+ assignment: `x.x = nonuniform_value[0].x;
+ x.y = uniform_value[0].x;`,
+ cond: `x.x > 0`,
+ uniform: `never`,
+ },
+ partial_assignment_single_element_struct_uniform: {
+ typename: `block`,
+ typedecl: `struct block {
+ x : u32
+ }`,
+ assignment: `x.x = uniform_value[0].x;`,
+ cond: `x.x > 0`,
+ uniform: `init`,
+ },
+ partial_assignment_single_element_struct_nonuniform: {
+ typename: `block`,
+ typedecl: `struct block {
+ x : u32
+ }`,
+ assignment: `x.x = nonuniform_value[0].x;`,
+ cond: `x.x > 0`,
+ uniform: `never`,
+ },
+ partial_assignment_single_element_array_uniform: {
+ typename: `array<u32, 1>`,
+ typedecl: ``,
+ assignment: `x[0] = uniform_value[0][0];`,
+ cond: `x[0] > 0`,
+ uniform: `init`,
+ },
+ partial_assignment_single_element_array_nonuniform: {
+ typename: `array<u32, 1>`,
+ typedecl: ``,
+ assignment: `x[0] = nonuniform_value[0][0];`,
+ cond: `x[0] > 0`,
+ uniform: `never`,
+ },
+ nested1: {
+ typename: `block`,
+ typedecl: `struct block {
+ x : u32,
+ y : u32
+ }`,
+ assignment: `for (; uniform_cond; ) {
+ if uniform_cond {
+ x = uniform_value[0];
+ break;
+ x.y = nonuniform_value[0].y;
+ } else {
+ if uniform_cond {
+ continue;
+ }
+ x = uniform_value[1];
+ }
+ }`,
+ cond: `x.x > 0`,
+ uniform: `init`,
+ },
+ nested2: {
+ typename: `block`,
+ typedecl: `struct block {
+ x : u32,
+ y : u32
+ }`,
+ assignment: `for (; uniform_cond; ) {
+ if uniform_cond {
+ x = uniform_value[0];
+ break;
+ x.y = nonuniform_value[0].y;
+ } else {
+ if nonuniform_cond {
+ continue;
+ }
+ x = uniform_value[1];
+ }
+ }`,
+ cond: `x.x > 0`,
+ uniform: `never`,
+ },
+};
+
+const kVarInit = {
+ no_init: ``,
+ uniform: `= uniform_value[3];`,
+ nonuniform: `= nonuniform_value[3];`,
+};
+
+g.test('function_variables')
+ .desc(`Test uniformity of function variables`)
+ .params(u => u.combine('case', keysOf(kFuncVarCases)).combine('init', keysOf(kVarInit)))
+ .fn(t => {
+ const func_case = kFuncVarCases[t.params.case];
+ const code = `
+${func_case.typedecl}
+
+@group(0) @binding(0)
+var<storage> uniform_value : array<${func_case.typename}, 4>;
+@group(0) @binding(1)
+var<storage, read_write> nonuniform_value : array<${func_case.typename}, 4>;
+
+@group(1) @binding(0)
+var t : texture_2d<f32>;
+@group(1) @binding(1)
+var s : sampler;
+
+var<private> nonuniform_cond : bool = true;
+const uniform_cond : bool = true;
+var<private> nonuniform_val : u32 = 0;
+const uniform_val : u32 = 0;
+
+@fragment
+fn main() {
+ var x : ${func_case.typename} ${kVarInit[t.params.init]};
+
+ ${func_case.assignment}
+
+ if ${func_case.cond} {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+}
+`;
+
+ const result = expectedUniformity(func_case.uniform, t.params.init);
+ if (!result) {
+ t.expectCompileResult(true, `diagnostic(off, derivative_uniformity);\n` + code);
+ }
+ t.expectCompileResult(result, code);
+ });
+
+const kShortCircuitExpressionCases = {
+ or_uniform_uniform: {
+ code: `
+ let x = uniform_cond || uniform_cond;
+ if x {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ `,
+ uniform: true,
+ },
+ or_uniform_nonuniform: {
+ code: `
+ let x = uniform_cond || nonuniform_cond;
+ if x {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ `,
+ uniform: false,
+ },
+ or_nonuniform_uniform: {
+ code: `
+ let x = nonuniform_cond || uniform_cond;
+ if x {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ `,
+ uniform: false,
+ },
+ or_nonuniform_nonuniform: {
+ code: `
+ let x = nonuniform_cond || nonuniform_cond;
+ if x {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ `,
+ uniform: false,
+ },
+ or_uniform_first_nonuniform: {
+ code: `
+ let x = textureSample(t, s, vec2f(0,0)).x == 0 || nonuniform_cond;
+ `,
+ uniform: true,
+ },
+ or_uniform_second_nonuniform: {
+ code: `
+ let x = nonuniform_cond || textureSample(t, s, vec2f(0,0)).x == 0;
+ `,
+ uniform: false,
+ },
+ and_uniform_uniform: {
+ code: `
+ let x = uniform_cond && uniform_cond;
+ if x {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ `,
+ uniform: true,
+ },
+ and_uniform_nonuniform: {
+ code: `
+ let x = uniform_cond && nonuniform_cond;
+ if x {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ `,
+ uniform: false,
+ },
+ and_nonuniform_uniform: {
+ code: `
+ let x = nonuniform_cond && uniform_cond;
+ if x {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ `,
+ uniform: false,
+ },
+ and_nonuniform_nonuniform: {
+ code: `
+ let x = nonuniform_cond && nonuniform_cond;
+ if x {
+ let tmp = textureSample(t, s, vec2f(0,0));
+ }
+ `,
+ uniform: false,
+ },
+ and_uniform_first_nonuniform: {
+ code: `
+ let x = textureSample(t, s, vec2f(0,0)).x == 0 && nonuniform_cond;
+ `,
+ uniform: true,
+ },
+ and_uniform_second_nonuniform: {
+ code: `
+ let x = nonuniform_cond && textureSample(t, s, vec2f(0,0)).x == 0;
+ `,
+ uniform: false,
+ },
+};
+
+const kPointerParamCases = {
+ pointer_uniform_passthrough_value: {
+ function: `fn foo(p : ptr<function, u32>) -> u32 {
+ return *p;
+ }`,
+ call: `var x = uniform_values[0];
+ let call = foo(&x);`,
+ cond: `x > 0`,
+ uniform: true,
+ },
+ pointer_nonuniform_passthrough_value: {
+ function: `fn foo(p : ptr<function, u32>) -> u32 {
+ return *p;
+ }`,
+ call: `var x = uniform_values[0];
+ let call = foo(&x);`,
+ cond: `x > 0`,
+ uniform: true,
+ },
+ pointer_store_uniform_value: {
+ function: `fn foo(p : ptr<function, u32>) {
+ *p = uniform_values[0];
+ }`,
+ call: `var x = nonuniform_values[0];
+ foo(&x);`,
+ cond: `x > 0`,
+ uniform: true,
+ },
+ pointer_store_nonuniform_value: {
+ function: `fn foo(p : ptr<function, u32>) {
+ *p = nonuniform_values[0];
+ }`,
+ call: `var x = uniform_values[0];
+ foo(&x);`,
+ cond: `x > 0`,
+ uniform: false,
+ },
+ pointer_depends_on_nonpointer_param_uniform: {
+ function: `fn foo(p : ptr<function, u32>, x : u32) {
+ *p = x;
+ }`,
+ call: `var x = nonuniform_values[0];
+ foo(&x, uniform_values[0]);`,
+ cond: `x > 0`,
+ uniform: true,
+ },
+ pointer_depends_on_nonpointer_param_nonuniform: {
+ function: `fn foo(p : ptr<function, u32>, x : u32) {
+ *p = x;
+ }`,
+ call: `var x = uniform_values[0];
+ foo(&x, nonuniform_values[0]);`,
+ cond: `x > 0`,
+ uniform: false,
+ },
+ pointer_depends_on_pointer_param_uniform: {
+ function: `fn foo(p : ptr<function, u32>, q : ptr<function, u32>) {
+ *p = *q;
+ }`,
+ call: `var x = nonuniform_values[0];
+ var y = uniform_values[0];
+ foo(&x, &y);`,
+ cond: `x > 0`,
+ uniform: true,
+ },
+ pointer_depends_on_pointer_param_nonuniform: {
+ function: `fn foo(p : ptr<function, u32>, q : ptr<function, u32>) {
+ *p = *q;
+ }`,
+ call: `var x = uniform_values[0];
+ var y = nonuniform_values[0];
+ foo(&x, &y);`,
+ cond: `x > 0`,
+ uniform: false,
+ },
+ pointer_codependent1: {
+ function: `fn foo(p : ptr<function, u32>, q : ptr<function, u32>) {
+ if *p > 0 {
+ *p = *q;
+ } else {
+ *q++;
+ }
+ }`,
+ call: `var x = uniform_values[0];
+ var y = uniform_values[1];
+ foo(&x, &y);
+ let a = x + y;`,
+ cond: `a > 0`,
+ uniform: true,
+ },
+ pointer_codependent2: {
+ function: `fn foo(p : ptr<function, u32>, q : ptr<function, u32>) {
+ if *p > 0 {
+ *p = *q;
+ } else {
+ *q++;
+ }
+ }`,
+ call: `var x = uniform_values[0];
+ var y = nonuniform_values[1];
+ foo(&x, &y);
+ let a = x + y;`,
+ cond: `a > 0`,
+ uniform: false,
+ },
+ pointer_codependent3: {
+ function: `fn foo(p : ptr<function, u32>, q : ptr<function, u32>) {
+ if *p > 0 {
+ *p = *q;
+ } else {
+ *q++;
+ }
+ }`,
+ call: `var x = nonuniform_values[0];
+ var y = uniform_values[1];
+ foo(&x, &y);
+ let a = x + y;`,
+ cond: `a > 0`,
+ uniform: false,
+ },
+ pointer_codependent4: {
+ function: `fn foo(p : ptr<function, u32>, q : ptr<function, u32>) {
+ if *p > 0 {
+ *p = *q;
+ } else {
+ *q++;
+ }
+ }`,
+ call: `var x = nonuniform_values[0];
+ var y = nonuniform_values[1];
+ foo(&x, &y);
+ let a = x + y;`,
+ cond: `a > 0`,
+ uniform: false,
+ },
+ uniform_param_uniform_assignment: {
+ function: `fn foo(p : ptr<function, array<u32, 2>>, idx : u32) {
+ (*p)[idx] = uniform_values[0];
+ }`,
+ call: `var x = array(uniform_values[0], uniform_values[1]);
+ foo(&x, uniform_values[3]);`,
+ cond: `x[0] > 0`,
+ uniform: true,
+ },
+ uniform_param_nonuniform_assignment: {
+ function: `fn foo(p : ptr<function, array<u32, 2>>, idx : u32) {
+ (*p)[idx] = nonuniform_values[0];
+ }`,
+ call: `var x = array(uniform_values[0], uniform_values[1]);
+ foo(&x, uniform_values[3]);`,
+ cond: `x[0] > 0`,
+ uniform: false,
+ },
+ nonuniform_param_uniform_assignment: {
+ function: `fn foo(p : ptr<function, array<u32, 2>>, idx : u32) {
+ (*p)[idx] = uniform_values[0];
+ }`,
+ call: `var x = array(uniform_values[0], uniform_values[1]);
+ foo(&x, u32(clamp(pos.x, 0, 1)));`,
+ cond: `x[0] > 0`,
+ uniform: false,
+ },
+ nonuniform_param_nonuniform_assignment: {
+ function: `fn foo(p : ptr<function, array<u32, 2>>, idx : u32) {
+ (*p)[idx] = nonuniform_values[0];
+ }`,
+ call: `var x = array(uniform_values[0], uniform_values[1]);
+ foo(&x, u32(clamp(pos.x, 0, 1)));`,
+ cond: `x[0] > 0`,
+ uniform: false,
+ },
+ required_uniform_success: {
+ function: `fn foo(p : ptr<function, u32>) {
+ if *p > 0 {
+ let tmp = textureSample(t,s,vec2f(0,0));
+ }
+ }`,
+ call: `var x = uniform_values[0];
+ foo(&x);`,
+ cond: `uniform_cond`,
+ uniform: true,
+ },
+ required_uniform_failure: {
+ function: `fn foo(p : ptr<function, u32>) {
+ if *p > 0 {
+ let tmp = textureSample(t,s,vec2f(0,0));
+ }
+ }`,
+ call: `var x = nonuniform_values[0];
+ foo(&x);`,
+ cond: `uniform_cond`,
+ uniform: false,
+ },
+ uniform_conditional_call_assign_uniform: {
+ function: `fn foo(p : ptr<function, u32>) {
+ *p = uniform_values[0];
+ }`,
+ call: `var x = uniform_values[1];
+ if uniform_cond {
+ foo(&x);
+ }`,
+ cond: `x > 0`,
+ uniform: true,
+ },
+ uniform_conditional_call_assign_nonuniform1: {
+ function: `fn foo(p : ptr<function, u32>) {
+ *p = nonuniform_values[0];
+ }`,
+ call: `var x = uniform_values[1];
+ if uniform_cond {
+ foo(&x);
+ }`,
+ cond: `x > 0`,
+ uniform: false,
+ },
+ uniform_conditional_call_assign_nonuniform2: {
+ function: `fn foo(p : ptr<function, u32>) {
+ *p = uniform_values[0];
+ }`,
+ call: `var x = nonuniform_values[1];
+ if uniform_cond {
+ foo(&x);
+ }`,
+ cond: `x > 0`,
+ uniform: false,
+ },
+ nonuniform_conditional_call_assign_uniform: {
+ function: `fn foo(p : ptr<function, u32>) {
+ *p = uniform_values[0];
+ }`,
+ call: `var x = uniform_values[1];
+ if nonuniform_cond {
+ foo(&x);
+ }`,
+ cond: `x > 0`,
+ uniform: false,
+ },
+};
+
+g.test('function_pointer_parameters')
+ .desc(`Test functions and calls with pointer parameters`)
+ .params(u => u.combine('case', keysOf(kPointerParamCases)))
+ .fn(t => {
+ const pointer_case = kPointerParamCases[t.params.case];
+ const code = `
+@group(0) @binding(0)
+var t : texture_2d<f32>;
+@group(0) @binding(1)
+var s : sampler;
+
+const uniform_cond = true;
+var<private> nonuniform_cond = true;
+
+@group(1) @binding(0)
+var<storage> uniform_values : array<u32, 4>;
+@group(1) @binding(1)
+var<storage, read_write> nonuniform_values : array<u32, 4>;
+
+${pointer_case.function}
+
+@fragment
+fn main(@builtin(position) pos : vec4f) {
+ ${pointer_case.call}
+
+ if ${pointer_case.cond} {
+ let tmp = textureSample(t,s,vec2f(0,0));
+ }
+}
+`;
+
+ const res = pointer_case.uniform;
+ if (!res) {
+ t.expectCompileResult(true, `diagnostic(off, derivative_uniformity);\n` + code);
+ }
+ t.expectCompileResult(res, code);
+ });
+
+g.test('short_circuit_expressions')
+ .desc(`Test uniformity of expressions`)
+ .params(u => u.combine('case', keysOf(kShortCircuitExpressionCases)))
+ .fn(t => {
+ const testcase = kShortCircuitExpressionCases[t.params.case];
+ const code = `
+@group(1) @binding(0)
+var t : texture_2d<f32>;
+@group(1) @binding(1)
+var s : sampler;
+
+const uniform_cond = true;
+var<private> nonuniform_cond = false;
+
+@fragment
+fn main() {
+ ${testcase.code}
+}
+`;
+
+ const res = testcase.uniform;
+ if (!res) {
+ t.expectCompileResult(true, `diagnostic(off, derivative_uniformity);\n` + code);
+ }
+ t.expectCompileResult(res, code);
+ });
+
+const kExpressionCases = {
+ literal: {
+ code: `1u`,
+ uniform: true,
+ },
+ uniform: {
+ code: `uniform_val`,
+ uniform: true,
+ },
+ nonuniform: {
+ code: `nonuniform_val`,
+ uniform: false,
+ },
+ uniform_index: {
+ code: `uniform_value[uniform_val]`,
+ uniform: true,
+ },
+ nonuniform_index1: {
+ code: `uniform_value[nonuniform_val]`,
+ uniform: false,
+ },
+ nonuniform_index2: {
+ code: `nonuniform_value[uniform_val]`,
+ uniform: false,
+ },
+ uniform_struct: {
+ code: `uniform_struct.x`,
+ uniform: true,
+ },
+ nonuniform_struct: {
+ code: `nonuniform_struct.x`,
+ uniform: false,
+ },
+};
+
+const kBinOps = {
+ plus: {
+ code: '+',
+ test: '> 0',
+ },
+ minus: {
+ code: '-',
+ test: '> 0',
+ },
+ times: {
+ code: '*',
+ test: '> 0',
+ },
+ div: {
+ code: '/',
+ test: '> 0',
+ },
+ rem: {
+ code: '%',
+ test: '> 0',
+ },
+ and: {
+ code: '&',
+ test: '> 0',
+ },
+ or: {
+ code: '|',
+ test: '> 0',
+ },
+ xor: {
+ code: '^',
+ test: '> 0',
+ },
+ shl: {
+ code: '<<',
+ test: '> 0',
+ },
+ shr: {
+ code: '>>',
+ test: '> 0',
+ },
+ less: {
+ code: '<',
+ test: '',
+ },
+ lessequal: {
+ code: '<=',
+ test: '',
+ },
+ greater: {
+ code: '>',
+ test: '',
+ },
+ greaterequal: {
+ code: '>=',
+ test: '',
+ },
+ equal: {
+ code: '==',
+ test: '',
+ },
+ notequal: {
+ code: '!=',
+ test: '',
+ },
+};
+
+g.test('binary_expressions')
+ .desc(`Test uniformity of binary expressions`)
+ .params(u =>
+ u
+ .combine('e1', keysOf(kExpressionCases))
+ .combine('e2', keysOf(kExpressionCases))
+ .combine('op', keysOf(kBinOps))
+ )
+ .fn(t => {
+ const e1 = kExpressionCases[t.params.e1];
+ const e2 = kExpressionCases[t.params.e2];
+ const op = kBinOps[t.params.op];
+ const code = `
+@group(0) @binding(0)
+var t : texture_2d<f32>;
+@group(0) @binding(1)
+var s : sampler;
+
+struct S {
+ x : u32
+}
+
+const uniform_struct = S(1);
+var<private> nonuniform_struct = S(1);
+
+const uniform_value : array<u32, 2> = array(1,1);
+var<private> nonuniform_value : array<u32, 2> = array(1,1);
+
+const uniform_val : u32 = 1;
+var<private> nonuniform_val : u32 = 1;
+
+@fragment
+fn main() {
+ let tmp = ${e1.code} ${op.code} ${e2.code};
+ if tmp ${op.test} {
+ let res = textureSample(t, s, vec2f(0,0));
+ }
+}
+`;
+
+ const res = e1.uniform && e2.uniform;
+ if (!res) {
+ t.expectCompileResult(true, `diagnostic(off, derivative_uniformity);\n` + code);
+ }
+ t.expectCompileResult(res, code);
+ });
+
+g.test('unary_expressions')
+ .desc(`Test uniformity of uniary expressions`)
+ .params(u =>
+ u
+ .combine('e', keysOf(kExpressionCases))
+ .combine('op', ['!b_tmp', '~i_tmp > 0', '-i32(i_tmp) > 0'] as const)
+ )
+ .fn(t => {
+ const e = kExpressionCases[t.params.e];
+ const code = `
+@group(0) @binding(0)
+var t : texture_2d<f32>;
+@group(0) @binding(1)
+var s : sampler;
+
+struct S {
+ x : i32
+}
+
+const uniform_struct = S(1);
+var<private> nonuniform_struct = S(1);
+
+const uniform_value : array<i32, 2> = array(1,1);
+var<private> nonuniform_value : array<i32, 2> = array(1,1);
+
+const uniform_val : i32 = 1;
+var<private> nonuniform_val : i32 = 1;
+
+@fragment
+fn main() {
+ let i_tmp = ${e.code};
+ let b_tmp = bool(i_tmp);
+ let tmp = ${t.params.op};
+ if tmp {
+ let res = textureSample(t, s, vec2f(0,0));
+ }
+}
+`;
+
+ const res = e.uniform;
+ if (!res) {
+ t.expectCompileResult(true, `diagnostic(off, derivative_uniformity);\n` + code);
+ }
+ t.expectCompileResult(res, code);
+ });
+
+const kFunctionCases = {
+ uniform_result: {
+ function: `fn foo() -> u32 {
+ return uniform_values[0];
+ }`,
+ call: `let call = foo();`,
+ cond: `call > 0`,
+ uniform: true,
+ },
+ nonuniform_result: {
+ function: `fn foo() -> u32 {
+ return nonuniform_values[0];
+ }`,
+ call: `let call = foo();`,
+ cond: `call > 0`,
+ uniform: false,
+ },
+ nonuniform_return_is_uniform_after_call: {
+ function: `fn foo() {
+ if nonuniform_values[0] > 0 {
+ return;
+ } else {
+ return;
+ }
+ }`,
+ call: `foo();`,
+ cond: `uniform_cond`,
+ uniform: true,
+ },
+ uniform_passthrough_parameter: {
+ function: `fn foo(x : u32) -> u32 {
+ return x;
+ }`,
+ call: `let call = foo(uniform_values[0]);`,
+ cond: `call > 0`,
+ uniform: true,
+ },
+ nonuniform_passthrough_parameter: {
+ function: `fn foo(x : u32) -> u32 {
+ return x;
+ }`,
+ call: `let call = foo(nonuniform_values[0]);`,
+ cond: `call > 0`,
+ uniform: false,
+ },
+ combined_parameters1: {
+ function: `fn foo(x : u32, y : u32) -> u32 {
+ return x + y;
+ }`,
+ call: `let call = foo(uniform_values[0], uniform_values[1]);`,
+ cond: `call > 0`,
+ uniform: true,
+ },
+ combined_parameters2: {
+ function: `fn foo(x : u32, y : u32) -> u32 {
+ return x + y;
+ }`,
+ call: `let call = foo(nonuniform_values[0], uniform_values[1]);`,
+ cond: `call > 0`,
+ uniform: false,
+ },
+ combined_parameters3: {
+ function: `fn foo(x : u32, y : u32) -> u32 {
+ return x + y;
+ }`,
+ call: `let call = foo(uniform_values[0], nonuniform_values[1]);`,
+ cond: `call > 0`,
+ uniform: false,
+ },
+ combined_parameters4: {
+ function: `fn foo(x : u32, y : u32) -> u32 {
+ return x + y;
+ }`,
+ call: `let call = foo(nonuniform_values[0], nonuniform_values[1]);`,
+ cond: `call > 0`,
+ uniform: false,
+ },
+ uniform_parameter_cf_after_nonuniform_expr: {
+ function: `fn foo(x : bool, y : vec4f) -> f32 {
+ return select(0, y.x, x);
+ }`,
+ call: `let call = foo(nonuniform_cond || uniform_cond, textureSample(t,s,vec2f(0,0)));`,
+ cond: `uniform_cond`,
+ uniform: true,
+ },
+ required_uniform_function_call_in_uniform_cf: {
+ function: `fn foo() -> vec4f {
+ return textureSample(t,s,vec2f(0,0));
+ }`,
+ call: `if uniform_cond {
+ let call = foo();
+ }`,
+ cond: `uniform_cond`,
+ uniform: true,
+ },
+ required_uniform_function_call_in_nonuniform_cf: {
+ function: `fn foo() -> vec4f {
+ return textureSample(t,s,vec2f(0,0));
+ }`,
+ call: `if nonuniform_cond {
+ let call = foo();
+ }`,
+ cond: `uniform_cond`,
+ uniform: false,
+ },
+ required_uniform_function_call_in_nonuniform_cf2: {
+ function: `@diagnostic(warning, derivative_uniformity)
+ fn foo() -> vec4f {
+ return textureSample(t,s,vec2f(0,0));
+ }`,
+ call: `if nonuniform_cond {
+ let call = foo();
+ let sample = textureSample(t,s,vec2f(0,0));
+ }`,
+ cond: `uniform_cond`,
+ uniform: false,
+ },
+ required_uniform_function_call_depends_on_uniform_param: {
+ function: `fn foo(x : bool) -> vec4f {
+ if x {
+ return textureSample(t,s,vec2f(0,0));
+ }
+ return vec4f(0);
+ }`,
+ call: `let call = foo(uniform_cond);`,
+ cond: `uniform_cond`,
+ uniform: true,
+ },
+ required_uniform_function_call_depends_on_nonuniform_param: {
+ function: `fn foo(x : bool) -> vec4f {
+ if x {
+ return textureSample(t,s,vec2f(0,0));
+ }
+ return vec4f(0);
+ }`,
+ call: `let call = foo(nonuniform_cond);`,
+ cond: `uniform_cond`,
+ uniform: false,
+ },
+ dpdx_nonuniform_result: {
+ function: ``,
+ call: `let call = dpdx(1);`,
+ cond: `call > 0`,
+ uniform: false,
+ },
+ dpdy_nonuniform_result: {
+ function: ``,
+ call: `let call = dpdy(1);`,
+ cond: `call > 0`,
+ uniform: false,
+ },
+ dpdxCoarse_nonuniform_result: {
+ function: ``,
+ call: `let call = dpdxCoarse(1);`,
+ cond: `call > 0`,
+ uniform: false,
+ },
+ dpdyCoarse_nonuniform_result: {
+ function: ``,
+ call: `let call = dpdyCoarse(1);`,
+ cond: `call > 0`,
+ uniform: false,
+ },
+ dpdxFine_nonuniform_result: {
+ function: ``,
+ call: `let call = dpdxFine(1);`,
+ cond: `call > 0`,
+ uniform: false,
+ },
+ dpdyFine_nonuniform_result: {
+ function: ``,
+ call: `let call = dpdyFine(1);`,
+ cond: `call > 0`,
+ uniform: false,
+ },
+ fwidth_nonuniform_result: {
+ function: ``,
+ call: `let call = fwidth(1);`,
+ cond: `call > 0`,
+ uniform: false,
+ },
+ fwidthCoarse_nonuniform_result: {
+ function: ``,
+ call: `let call = fwidthCoarse(1);`,
+ cond: `call > 0`,
+ uniform: false,
+ },
+ fwidthFine_nonuniform_result: {
+ function: ``,
+ call: `let call = fwidthFine(1);`,
+ cond: `call > 0`,
+ uniform: false,
+ },
+ textureSample_nonuniform_result: {
+ function: ``,
+ call: `let call = textureSample(t,s,vec2f(0,0));`,
+ cond: `call.x > 0`,
+ uniform: false,
+ },
+ textureSampleBias_nonuniform_result: {
+ function: ``,
+ call: `let call = textureSampleBias(t,s,vec2f(0,0), 0);`,
+ cond: `call.x > 0`,
+ uniform: false,
+ },
+ textureSampleCompare_nonuniform_result: {
+ function: ``,
+ call: `let call = textureSampleCompare(td,sd,vec2f(0,0), 0);`,
+ cond: `call > 0`,
+ uniform: false,
+ },
+ textureDimensions_uniform_input_uniform_result: {
+ function: ``,
+ call: `let call = textureDimensions(t);`,
+ cond: `call.x > 0`,
+ uniform: true,
+ },
+ textureGather_uniform_input_uniform_result: {
+ function: ``,
+ call: `let call = textureGather(0,t,s,vec2f(0,0));`,
+ cond: `call.x > 0`,
+ uniform: true,
+ },
+ textureGatherCompare_uniform_input_uniform_result: {
+ function: ``,
+ call: `let call = textureGatherCompare(td,sd,vec2f(0,0), 0);`,
+ cond: `call.x > 0`,
+ uniform: true,
+ },
+ textureLoad_uniform_input_uniform_result: {
+ function: ``,
+ call: `let call = textureLoad(t,vec2u(0,0),0);`,
+ cond: `call.x > 0`,
+ uniform: true,
+ },
+ textureNumLayers_uniform_input_uniform_result: {
+ function: ``,
+ call: `let call = textureNumLayers(ta);`,
+ cond: `call > 0`,
+ uniform: true,
+ },
+ textureNumLevels_uniform_input_uniform_result: {
+ function: ``,
+ call: `let call = textureNumLevels(t);`,
+ cond: `call > 0`,
+ uniform: true,
+ },
+ textureNumSamples_uniform_input_uniform_result: {
+ function: ``,
+ call: `let call = textureNumSamples(ts);`,
+ cond: `call > 0`,
+ uniform: true,
+ },
+ textureSampleLevel_uniform_input_uniform_result: {
+ function: ``,
+ call: `let call = textureSampleLevel(t,s,vec2f(0,0),0);`,
+ cond: `call.x > 0`,
+ uniform: true,
+ },
+ textureSampleGrad_uniform_input_uniform_result: {
+ function: ``,
+ call: `let call = textureSampleGrad(t,s,vec2f(0,0),vec2f(0,0),vec2f(0,0));`,
+ cond: `call.x > 0`,
+ uniform: true,
+ },
+ textureSampleCompareLevel_uniform_input_uniform_result: {
+ function: ``,
+ call: `let call = textureSampleCompareLevel(td,sd,vec2f(0,0), 0);`,
+ cond: `call > 0`,
+ uniform: true,
+ },
+ textureSampleBaseClampToEdge_uniform_input_uniform_result: {
+ function: ``,
+ call: `let call = textureSampleBaseClampToEdge(t,s,vec2f(0,0));`,
+ cond: `call.x > 0`,
+ uniform: true,
+ },
+ min_uniform_input_uniform_result: {
+ function: ``,
+ call: `let call = min(0,0);`,
+ cond: `call > 0`,
+ uniform: true,
+ },
+ value_constructor_uniform_input_uniform_result: {
+ function: ``,
+ call: `let call = vec2u(0,0);`,
+ cond: `call.x > 0`,
+ uniform: true,
+ },
+};
+
+g.test('functions')
+ .desc(`Test uniformity of function calls (non-pointer parameters)`)
+ .params(u => u.combine('case', keysOf(kFunctionCases)))
+ .fn(t => {
+ const func_case = kFunctionCases[t.params.case];
+ const code = `
+@group(0) @binding(0)
+var t : texture_2d<f32>;
+@group(0) @binding(1)
+var s : sampler;
+@group(0) @binding(2)
+var td : texture_depth_2d;
+@group(0) @binding(3)
+var sd : sampler_comparison;
+@group(0) @binding(4)
+var ta : texture_2d_array<f32>;
+@group(0) @binding(5)
+var ts : texture_multisampled_2d<f32>;
+
+const uniform_cond = true;
+var<private> nonuniform_cond = true;
+
+@group(1) @binding(0)
+var<storage> uniform_values : array<u32, 4>;
+@group(1) @binding(1)
+var<storage, read_write> nonuniform_values : array<u32, 4>;
+
+${func_case.function}
+
+@fragment
+fn main() {
+ ${func_case.call}
+
+ if ${func_case.cond} {
+ let tmp = textureSample(t,s,vec2f(0,0));
+ }
+}
+`;
+
+ const res = func_case.uniform;
+ if (!res) {
+ t.expectCompileResult(true, `diagnostic(off, derivative_uniformity);\n` + code);
+ }
+ t.expectCompileResult(res, code);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/shader/values.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/values.ts
new file mode 100644
index 0000000000..38a2fe46f0
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/shader/values.ts
@@ -0,0 +1,91 @@
+export const description = `Special and sample values for WGSL scalar types`;
+
+import { assert } from '../../common/util/util.js';
+import { uint32ToFloat32 } from '../util/conversion.js';
+
+/** Returns an array of subnormal f32 numbers.
+ * Subnormals are non-zero finite numbers with the minimum representable
+ * exponent.
+ */
+export function subnormalF32Examples(): Array<number> {
+ // The results, as uint32 values.
+ const result_as_bits: number[] = [];
+
+ const max_mantissa = 0x7f_ffff;
+ const sign_bits = [0, 0x8000_0000];
+ for (const sign_bit of sign_bits) {
+ // exponent bits must be zero.
+ const sign_and_exponent = sign_bit;
+
+ // Set all bits
+ result_as_bits.push(sign_and_exponent | max_mantissa);
+
+ // Set each of the lower bits individually.
+ for (let lower_bits = 1; lower_bits <= max_mantissa; lower_bits <<= 1) {
+ result_as_bits.push(sign_and_exponent | lower_bits);
+ }
+ }
+ assert(
+ result_as_bits.length === 2 * (1 + 23),
+ 'subnormal number sample count is ' + result_as_bits.length.toString()
+ );
+ return result_as_bits.map(u => uint32ToFloat32(u));
+}
+
+/** Returns an array of normal f32 numbers.
+ * Normal numbers are not: zero, Nan, infinity, subnormal.
+ */
+export function normalF32Examples(): Array<number> {
+ const result: number[] = [1.0, -2.0];
+
+ const max_mantissa_as_bits = 0x7f_ffff;
+ const min_exponent_as_bits = 0x0080_0000;
+ const max_exponent_as_bits = 0x7f00_0000; // Max normal exponent
+ const sign_bits = [0, 0x8000_0000];
+ for (const sign_bit of sign_bits) {
+ for (let e = min_exponent_as_bits; e <= max_exponent_as_bits; e += min_exponent_as_bits) {
+ const sign_and_exponent = sign_bit | e;
+
+ // Set zero mantissa bits
+ result.push(uint32ToFloat32(sign_and_exponent));
+ // Set all mantissa bits
+ result.push(uint32ToFloat32(sign_and_exponent | max_mantissa_as_bits));
+
+ // Set each of the lower bits individually.
+ for (let lower_bits = 1; lower_bits <= max_mantissa_as_bits; lower_bits <<= 1) {
+ result.push(uint32ToFloat32(sign_and_exponent | lower_bits));
+ }
+ }
+ }
+ assert(
+ result.length === 2 + 2 * 254 * 25,
+ 'normal number sample count is ' + result.length.toString()
+ );
+ return result;
+}
+
+/** Returns an array of 32-bit NaNs, as Uint32 bit patterns.
+ * NaNs have: maximum exponent, but the mantissa is not zero.
+ */
+export function nanF32BitsExamples(): Array<number> {
+ const result: number[] = [];
+ const exponent_bit = 0x7f80_0000;
+ const sign_bits = [0, 0x8000_0000];
+ for (const sign_bit of sign_bits) {
+ const sign_and_exponent = sign_bit | exponent_bit;
+ const bits = sign_and_exponent | 0x40_0000;
+ // Only the most significant bit of the mantissa is set.
+ result.push(bits);
+
+ // Quiet and signalling NaNs differ based on the most significant bit
+ // of the mantissa. Try both.
+ for (const quiet_signalling of [0, 0x40_0000]) {
+ // Set each of the lower bits.
+ for (let lower_bits = 1; lower_bits < 0x40_0000; lower_bits <<= 1) {
+ const bits = sign_and_exponent | quiet_signalling | lower_bits;
+ result.push(bits);
+ }
+ }
+ }
+ return result;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/binary_stream.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/binary_stream.ts
new file mode 100644
index 0000000000..a6512020e6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/binary_stream.ts
@@ -0,0 +1,213 @@
+import { assert } from '../../common/util/util.js';
+
+import { float16ToUint16, uint16ToFloat16 } from './conversion.js';
+import { align } from './math.js';
+
+/**
+ * BinaryStream is a utility to efficiently encode and decode numbers to / from a Uint8Array.
+ * BinaryStream uses a number of internal typed arrays to avoid small array allocations when reading
+ * and writing.
+ */
+export default class BinaryStream {
+ /**
+ * Constructor
+ * @param buffer the buffer to read from / write to. Array length must be a multiple of 8 bytes.
+ */
+ constructor(buffer: ArrayBufferLike) {
+ this.offset = 0;
+ this.view = new DataView(buffer);
+ }
+
+ /** buffer() returns the stream's buffer sliced to the 8-byte rounded read or write offset */
+ buffer(): Uint8Array {
+ return new Uint8Array(this.view.buffer, 0, align(this.offset, 8));
+ }
+
+ /** writeBool() writes a boolean as 255 or 0 to the buffer at the next byte offset */
+ writeBool(value: boolean) {
+ this.view.setUint8(this.offset++, value ? 255 : 0);
+ }
+
+ /** readBool() reads a boolean from the buffer at the next byte offset */
+ readBool(): boolean {
+ const val = this.view.getUint8(this.offset++);
+ assert(val === 0 || val === 255);
+ return val !== 0;
+ }
+
+ /** writeU8() writes a uint8 to the buffer at the next byte offset */
+ writeU8(value: number) {
+ this.view.setUint8(this.offset++, value);
+ }
+
+ /** readU8() reads a uint8 from the buffer at the next byte offset */
+ readU8(): number {
+ return this.view.getUint8(this.offset++);
+ }
+
+ /** writeU16() writes a uint16 to the buffer at the next 16-bit aligned offset */
+ writeU16(value: number) {
+ this.view.setUint16(this.alignedOffset(2), value, /* littleEndian */ true);
+ }
+
+ /** readU16() reads a uint16 from the buffer at the next 16-bit aligned offset */
+ readU16(): number {
+ return this.view.getUint16(this.alignedOffset(2), /* littleEndian */ true);
+ }
+
+ /** writeU32() writes a uint32 to the buffer at the next 32-bit aligned offset */
+ writeU32(value: number) {
+ this.view.setUint32(this.alignedOffset(4), value, /* littleEndian */ true);
+ }
+
+ /** readU32() reads a uint32 from the buffer at the next 32-bit aligned offset */
+ readU32(): number {
+ return this.view.getUint32(this.alignedOffset(4), /* littleEndian */ true);
+ }
+
+ /** writeI8() writes a int8 to the buffer at the next byte offset */
+ writeI8(value: number) {
+ this.view.setInt8(this.offset++, value);
+ }
+
+ /** readI8() reads a int8 from the buffer at the next byte offset */
+ readI8(): number {
+ return this.view.getInt8(this.offset++);
+ }
+
+ /** writeI16() writes a int16 to the buffer at the next 16-bit aligned offset */
+ writeI16(value: number) {
+ this.view.setInt16(this.alignedOffset(2), value, /* littleEndian */ true);
+ }
+
+ /** readI16() reads a int16 from the buffer at the next 16-bit aligned offset */
+ readI16(): number {
+ return this.view.getInt16(this.alignedOffset(2), /* littleEndian */ true);
+ }
+
+ /** writeI32() writes a int32 to the buffer at the next 32-bit aligned offset */
+ writeI32(value: number) {
+ this.view.setInt32(this.alignedOffset(4), value, /* littleEndian */ true);
+ }
+
+ /** readI32() reads a int32 from the buffer at the next 32-bit aligned offset */
+ readI32(): number {
+ return this.view.getInt32(this.alignedOffset(4), /* littleEndian */ true);
+ }
+
+ /** writeF16() writes a float16 to the buffer at the next 16-bit aligned offset */
+ writeF16(value: number) {
+ this.writeU16(float16ToUint16(value));
+ }
+
+ /** readF16() reads a float16 from the buffer at the next 16-bit aligned offset */
+ readF16(): number {
+ return uint16ToFloat16(this.readU16());
+ }
+
+ /** writeF32() writes a float32 to the buffer at the next 32-bit aligned offset */
+ writeF32(value: number) {
+ this.view.setFloat32(this.alignedOffset(4), value, /* littleEndian */ true);
+ }
+
+ /** readF32() reads a float32 from the buffer at the next 32-bit aligned offset */
+ readF32(): number {
+ return this.view.getFloat32(this.alignedOffset(4), /* littleEndian */ true);
+ }
+
+ /** writeF64() writes a float64 to the buffer at the next 64-bit aligned offset */
+ writeF64(value: number) {
+ this.view.setFloat64(this.alignedOffset(8), value, /* littleEndian */ true);
+ }
+
+ /** readF64() reads a float64 from the buffer at the next 64-bit aligned offset */
+ readF64(): number {
+ return this.view.getFloat64(this.alignedOffset(8), /* littleEndian */ true);
+ }
+
+ /**
+ * writeString() writes a length-prefixed UTF-16 string to the buffer at the next 32-bit aligned
+ * offset
+ */
+ writeString(value: string) {
+ this.writeU32(value.length);
+ for (let i = 0; i < value.length; i++) {
+ this.writeU16(value.charCodeAt(i));
+ }
+ }
+
+ /**
+ * readString() writes a length-prefixed UTF-16 string from the buffer at the next 32-bit aligned
+ * offset
+ */
+ readString(): string {
+ const len = this.readU32();
+ const codes = new Array<number>(len);
+ for (let i = 0; i < len; i++) {
+ codes[i] = this.readU16();
+ }
+ return String.fromCharCode(...codes);
+ }
+
+ /**
+ * writeArray() writes a length-prefixed array of T elements to the buffer at the next 32-bit
+ * aligned offset, using the provided callback to write the individual elements
+ */
+ writeArray<T>(value: readonly T[], writeElement: (s: BinaryStream, element: T) => void) {
+ this.writeU32(value.length);
+ for (const element of value) {
+ writeElement(this, element);
+ }
+ }
+
+ /**
+ * readArray() reads a length-prefixed array of T elements from the buffer at the next 32-bit
+ * aligned offset, using the provided callback to read the individual elements
+ */
+ readArray<T>(readElement: (s: BinaryStream) => T): T[] {
+ const len = this.readU32();
+ const array = new Array<T>(len);
+ for (let i = 0; i < len; i++) {
+ array[i] = readElement(this);
+ }
+ return array;
+ }
+
+ /**
+ * writeCond() writes the boolean condition `cond` to the buffer, then either calls if_true if
+ * `cond` is true, otherwise if_false
+ */
+ writeCond<T, F>(cond: boolean, fns: { if_true: () => T; if_false: () => F }) {
+ this.writeBool(cond);
+ if (cond) {
+ return fns.if_true();
+ } else {
+ return fns.if_false();
+ }
+ }
+
+ /**
+ * readCond() reads a boolean condition from the buffer, then either calls if_true if
+ * the condition was is true, otherwise if_false
+ */
+ readCond<T, F>(fns: { if_true: () => T; if_false: () => F }) {
+ if (this.readBool()) {
+ return fns.if_true();
+ } else {
+ return fns.if_false();
+ }
+ }
+
+ /**
+ * alignedOffset() aligns this.offset to `bytes`, then increments this.offset by `bytes`.
+ * @returns the old offset aligned to the next multiple of `bytes`.
+ */
+ private alignedOffset(bytes: number) {
+ const aligned = align(this.offset, bytes);
+ this.offset = aligned + bytes;
+ return aligned;
+ }
+
+ private offset: number;
+ private view: DataView;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/buffer.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/buffer.ts
new file mode 100644
index 0000000000..a7d154a7e6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/buffer.ts
@@ -0,0 +1,23 @@
+import { memcpy, TypedArrayBufferView } from '../../common/util/util.js';
+
+import { align } from './math.js';
+
+/**
+ * Creates a buffer with the contents of some TypedArray.
+ * The buffer size will always be aligned to 4 as we set mappedAtCreation === true when creating the
+ * buffer.
+ */
+export function makeBufferWithContents(
+ device: GPUDevice,
+ dataArray: TypedArrayBufferView,
+ usage: GPUBufferUsageFlags
+): GPUBuffer {
+ const buffer = device.createBuffer({
+ mappedAtCreation: true,
+ size: align(dataArray.byteLength, 4),
+ usage,
+ });
+ memcpy({ src: dataArray }, { dst: buffer.getMappedRange() });
+ buffer.unmap();
+ return buffer;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/check_contents.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/check_contents.ts
new file mode 100644
index 0000000000..298e7ae4a9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/check_contents.ts
@@ -0,0 +1,272 @@
+// MAINTENANCE_TODO: The "checkThingTrue" naming is confusing; these must be used with `expectOK`
+// or the result is dropped on the floor. Rename these to things like `typedArrayIsOK`(??) to
+// make it clearer.
+// MAINTENANCE_TODO: Also, audit to make sure we aren't dropping any on the floor. Consider a
+// no-ignored-return lint check if we can find one that we can use.
+
+import {
+ assert,
+ ErrorWithExtra,
+ iterRange,
+ range,
+ TypedArrayBufferView,
+ TypedArrayBufferViewConstructor,
+} from '../../common/util/util.js';
+import { Float16Array } from '../../external/petamoriken/float16/float16.js';
+
+import { generatePrettyTable } from './pretty_diff_tables.js';
+
+/** Generate an expected value at `index`, to test for equality with the actual value. */
+export type CheckElementsGenerator = (index: number) => number;
+/** Check whether the actual `value` at `index` is as expected. */
+export type CheckElementsPredicate = (index: number, value: number) => boolean;
+/**
+ * Provides a pretty-printing implementation for a particular CheckElementsPredicate.
+ * This is an array; each element provides info to print an additional row in the error message.
+ */
+export type CheckElementsSupplementalTableRows = Array<{
+ /** Row header. */
+ leftHeader: string;
+ /**
+ * Get the value for a cell in the table with element index `index`.
+ * May be a string or a number; a number will be formatted according to the TypedArray type used.
+ */
+ getValueForCell: (index: number) => number | string;
+}>;
+
+/**
+ * Check whether two `TypedArray`s have equal contents.
+ * Returns `undefined` if the check passes, or an `Error` if not.
+ */
+export function checkElementsEqual(
+ actual: TypedArrayBufferView,
+ expected: TypedArrayBufferView
+): ErrorWithExtra | undefined {
+ assert(actual.constructor === expected.constructor, 'TypedArray type mismatch');
+ assert(actual.length === expected.length, 'size mismatch');
+
+ let failedElementsFirstMaybe: number | undefined = undefined;
+ /** Sparse array with `true` for elements that failed. */
+ const failedElements: (true | undefined)[] = [];
+ for (let i = 0; i < actual.length; ++i) {
+ if (actual[i] !== expected[i]) {
+ failedElementsFirstMaybe ??= i;
+ failedElements[i] = true;
+ }
+ }
+
+ if (failedElementsFirstMaybe === undefined) {
+ return undefined;
+ }
+
+ const failedElementsFirst = failedElementsFirstMaybe;
+ return failCheckElements({
+ actual,
+ failedElements,
+ failedElementsFirst,
+ predicatePrinter: [{ leftHeader: 'expected ==', getValueForCell: index => expected[index] }],
+ });
+}
+
+/**
+ * Check whether each value in a `TypedArray` is between the two corresponding "expected" values
+ * (either `a(i) <= actual[i] <= b(i)` or `a(i) >= actual[i] => b(i)`).
+ */
+export function checkElementsBetween(
+ actual: TypedArrayBufferView,
+ expected: readonly [CheckElementsGenerator, CheckElementsGenerator]
+): ErrorWithExtra | undefined {
+ const error = checkElementsPassPredicate(
+ actual,
+ (index, value) =>
+ value >= Math.min(expected[0](index), expected[1](index)) &&
+ value <= Math.max(expected[0](index), expected[1](index)),
+ {
+ predicatePrinter: [
+ { leftHeader: 'between', getValueForCell: index => expected[0](index) },
+ { leftHeader: 'and', getValueForCell: index => expected[1](index) },
+ ],
+ }
+ );
+ // If there was an error, extend it with additional extras.
+ return error ? new ErrorWithExtra(error, () => ({ expected })) : undefined;
+}
+
+/**
+ * Check whether each value in a `TypedArray` is equal to one of the two corresponding "expected"
+ * values (either `actual[i] === a[i]` or `actual[i] === b[i]`)
+ */
+export function checkElementsEqualEither(
+ actual: TypedArrayBufferView,
+ expected: readonly [TypedArrayBufferView, TypedArrayBufferView]
+): ErrorWithExtra | undefined {
+ const error = checkElementsPassPredicate(
+ actual,
+ (index, value) => value === expected[0][index] || value === expected[1][index],
+ {
+ predicatePrinter: [
+ { leftHeader: 'either', getValueForCell: index => expected[0][index] },
+ { leftHeader: 'or', getValueForCell: index => expected[1][index] },
+ ],
+ }
+ );
+ // If there was an error, extend it with additional extras.
+ return error ? new ErrorWithExtra(error, () => ({ expected })) : undefined;
+}
+
+/**
+ * Check whether a `TypedArray`'s contents equal the values produced by a generator function.
+ * Returns `undefined` if the check passes, or an `Error` if not.
+ *
+ * ```text
+ * Array had unexpected contents at indices 2 through 19.
+ * Starting at index 1:
+ * actual == 0x: 00 fe ff 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 00
+ * failed -> xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx
+ * expected == 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * ```
+ *
+ * ```text
+ * Array had unexpected contents at indices 2 through 29.
+ * Starting at index 1:
+ * actual == 0.000 -2.000e+100 -1.000e+100 0.000 1.000e+100 2.000e+100 3.000e+100 4.000e+100 5.000e+100 6.000e+100 7.000e+100 ...
+ * failed -> xx xx xx xx xx xx xx xx xx ...
+ * expected == 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 ...
+ * ```
+ */
+export function checkElementsEqualGenerated(
+ actual: TypedArrayBufferView,
+ generator: CheckElementsGenerator
+): ErrorWithExtra | undefined {
+ let failedElementsFirstMaybe: number | undefined = undefined;
+ /** Sparse array with `true` for elements that failed. */
+ const failedElements: (true | undefined)[] = [];
+ for (let i = 0; i < actual.length; ++i) {
+ if (actual[i] !== generator(i)) {
+ failedElementsFirstMaybe ??= i;
+ failedElements[i] = true;
+ }
+ }
+
+ if (failedElementsFirstMaybe === undefined) {
+ return undefined;
+ }
+
+ const failedElementsFirst = failedElementsFirstMaybe;
+ const error = failCheckElements({
+ actual,
+ failedElements,
+ failedElementsFirst,
+ predicatePrinter: [{ leftHeader: 'expected ==', getValueForCell: index => generator(index) }],
+ });
+ // Add more extras to the error.
+ return new ErrorWithExtra(error, () => ({ generator }));
+}
+
+/**
+ * Check whether a `TypedArray`'s values pass the provided predicate function.
+ * Returns `undefined` if the check passes, or an `Error` if not.
+ */
+export function checkElementsPassPredicate(
+ actual: TypedArrayBufferView,
+ predicate: CheckElementsPredicate,
+ { predicatePrinter }: { predicatePrinter?: CheckElementsSupplementalTableRows }
+): ErrorWithExtra | undefined {
+ let failedElementsFirstMaybe: number | undefined = undefined;
+ /** Sparse array with `true` for elements that failed. */
+ const failedElements: (true | undefined)[] = [];
+ for (let i = 0; i < actual.length; ++i) {
+ if (!predicate(i, actual[i])) {
+ failedElementsFirstMaybe ??= i;
+ failedElements[i] = true;
+ }
+ }
+
+ if (failedElementsFirstMaybe === undefined) {
+ return undefined;
+ }
+
+ const failedElementsFirst = failedElementsFirstMaybe;
+ return failCheckElements({ actual, failedElements, failedElementsFirst, predicatePrinter });
+}
+
+interface CheckElementsFailOpts {
+ actual: TypedArrayBufferView;
+ failedElements: (true | undefined)[];
+ failedElementsFirst: number;
+ predicatePrinter?: CheckElementsSupplementalTableRows;
+}
+
+/**
+ * Implements the failure case of some checkElementsX helpers above. This allows those functions to
+ * implement their checks directly without too many function indirections in between.
+ *
+ * Note: Separating this into its own function significantly speeds up the non-error case in
+ * Chromium (though this may be V8-specific behavior).
+ */
+function failCheckElements({
+ actual,
+ failedElements,
+ failedElementsFirst,
+ predicatePrinter,
+}: CheckElementsFailOpts): ErrorWithExtra {
+ const size = actual.length;
+ const ctor = actual.constructor as TypedArrayBufferViewConstructor;
+ const printAsFloat = ctor === Float16Array || ctor === Float32Array || ctor === Float64Array;
+
+ const failedElementsLast = failedElements.length - 1;
+
+ // Include one extra non-failed element at the beginning and end (if they exist), for context.
+ const printElementsStart = Math.max(0, failedElementsFirst - 1);
+ const printElementsEnd = Math.min(size, failedElementsLast + 2);
+ const printElementsCount = printElementsEnd - printElementsStart;
+
+ const numberToString = printAsFloat
+ ? (n: number) => n.toPrecision(4)
+ : (n: number) => intToPaddedHex(n, { byteLength: ctor.BYTES_PER_ELEMENT });
+ const numberPrefix = printAsFloat ? '' : '0x:';
+
+ const printActual = actual.subarray(printElementsStart, printElementsEnd);
+ const printExpected: Array<Iterable<string | number>> = [];
+ if (predicatePrinter) {
+ for (const { leftHeader, getValueForCell: cell } of predicatePrinter) {
+ printExpected.push(
+ (function* () {
+ yield* [leftHeader, ''];
+ yield* iterRange(printElementsCount, i => cell(printElementsStart + i));
+ })()
+ );
+ }
+ }
+
+ const printFailedValueMarkers = (function* () {
+ yield* ['failed ->', ''];
+ yield* range(printElementsCount, i => (failedElements[printElementsStart + i] ? 'xx' : ''));
+ })();
+
+ const opts = {
+ fillToWidth: 120,
+ numberToString,
+ };
+ const msg = `Array had unexpected contents at indices ${failedElementsFirst} through ${failedElementsLast}.
+ Starting at index ${printElementsStart}:
+${generatePrettyTable(opts, [
+ ['actual ==', numberPrefix, ...printActual],
+ printFailedValueMarkers,
+ ...printExpected,
+])}`;
+ return new ErrorWithExtra(msg, () => ({
+ actual: actual.slice(),
+ }));
+}
+
+// Helper helpers
+
+/** Convert an integral `number` into a hex string, padded to the specified `byteLength`. */
+function intToPaddedHex(number: number, { byteLength }: { byteLength: number }) {
+ assert(Number.isInteger(number), 'number must be integer');
+ let s = Math.abs(number).toString(16);
+ if (byteLength) s = s.padStart(byteLength * 2, '0');
+ if (number < 0) s = '-' + s;
+ return s;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/color_space_conversion.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/color_space_conversion.ts
new file mode 100644
index 0000000000..a1de0e48ba
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/color_space_conversion.ts
@@ -0,0 +1,265 @@
+import { assert, unreachable } from '../../common/util/util.js';
+
+import { multiplyMatrices } from './math.js';
+
+// These color space conversion function definitions are copied directly from
+// CSS Color Module Level 4 Sample Code: https://drafts.csswg.org/css-color/#color-conversion-code
+// *EXCEPT* the conversion matrices are replaced with exact rational forms computed here:
+// https://github.com/kainino0x/exact_css_xyz_matrices
+// using this Rust crate: https://crates.io/crates/rgb_derivation
+// as described for sRGB on this page: https://mina86.com/2019/srgb-xyz-matrix/
+// but using the numbers from the CSS spec: https://www.w3.org/TR/css-color-4/#predefined
+
+// Sample code for color conversions
+// Conversion can also be done using ICC profiles and a Color Management System
+// For clarity, a library is used for matrix multiplication (multiply-matrices.js)
+
+// sRGB-related functions
+
+/**
+ * convert an array of sRGB values
+ * where in-gamut values are in the range [0 - 1]
+ * to linear light (un-companded) form.
+ * https://en.wikipedia.org/wiki/SRGB
+ * Extended transfer function:
+ * for negative values, linear portion is extended on reflection of axis,
+ * then reflected power function is used.
+ */
+function lin_sRGB(RGB: Array<number>) {
+ return RGB.map(val => {
+ const sign = val < 0 ? -1 : 1;
+ const abs = Math.abs(val);
+
+ if (abs < 0.04045) {
+ return val / 12.92;
+ }
+
+ return sign * Math.pow((abs + 0.055) / 1.055, 2.4);
+ });
+}
+
+/**
+ * convert an array of linear-light sRGB values in the range 0.0-1.0
+ * to gamma corrected form
+ * https://en.wikipedia.org/wiki/SRGB
+ * Extended transfer function:
+ * For negative values, linear portion extends on reflection
+ * of axis, then uses reflected pow below that
+ */
+function gam_sRGB(RGB: Array<number>) {
+ return RGB.map(val => {
+ const sign = val < 0 ? -1 : 1;
+ const abs = Math.abs(val);
+
+ if (abs > 0.0031308) {
+ return sign * (1.055 * Math.pow(abs, 1 / 2.4) - 0.055);
+ }
+
+ return 12.92 * val;
+ });
+}
+
+/**
+ * convert an array of linear-light sRGB values to CIE XYZ
+ * using sRGB's own white, D65 (no chromatic adaptation)
+ */
+function lin_sRGB_to_XYZ(rgb: Array<Array<number>>) {
+ /* prettier-ignore */
+ const M = [
+ [506752 / 1228815, 87881 / 245763, 12673 / 70218],
+ [ 87098 / 409605, 175762 / 245763, 12673 / 175545],
+ [ 7918 / 409605, 87881 / 737289, 1001167 / 1053270],
+ ];
+ return multiplyMatrices(M, rgb);
+}
+
+/**
+ * convert XYZ to linear-light sRGB
+ * using sRGB's own white, D65 (no chromatic adaptation)
+ */
+function XYZ_to_lin_sRGB(XYZ: Array<Array<number>>) {
+ /* prettier-ignore */
+ const M = [
+ [ 12831 / 3959, -329 / 214, -1974 / 3959],
+ [-851781 / 878810, 1648619 / 878810, 36519 / 878810],
+ [ 705 / 12673, -2585 / 12673, 705 / 667],
+ ];
+
+ return multiplyMatrices(M, XYZ);
+}
+
+// display-p3-related functions
+
+/**
+ * convert an array of display-p3 RGB values in the range 0.0 - 1.0
+ * to linear light (un-companded) form.
+ */
+function lin_P3(RGB: Array<number>) {
+ return lin_sRGB(RGB); // same as sRGB
+}
+
+/**
+ * convert an array of linear-light display-p3 RGB in the range 0.0-1.0
+ * to gamma corrected form
+ */
+function gam_P3(RGB: Array<number>) {
+ return gam_sRGB(RGB); // same as sRGB
+}
+
+/**
+ * convert an array of linear-light display-p3 values to CIE XYZ
+ * using display-p3's D65 (no chromatic adaptation)
+ */
+function lin_P3_to_XYZ(rgb: Array<Array<number>>) {
+ /* prettier-ignore */
+ const M = [
+ [608311 / 1250200, 189793 / 714400, 198249 / 1000160],
+ [ 35783 / 156275, 247089 / 357200, 198249 / 2500400],
+ [ 0 / 1, 32229 / 714400, 5220557 / 5000800],
+ ];
+
+ return multiplyMatrices(M, rgb);
+}
+
+/**
+ * convert XYZ to linear-light P3
+ * using display-p3's own white, D65 (no chromatic adaptation)
+ */
+function XYZ_to_lin_P3(XYZ: Array<Array<number>>) {
+ /* prettier-ignore */
+ const M = [
+ [446124 / 178915, -333277 / 357830, -72051 / 178915],
+ [-14852 / 17905, 63121 / 35810, 423 / 17905],
+ [ 11844 / 330415, -50337 / 660830, 316169 / 330415],
+ ];
+
+ return multiplyMatrices(M, XYZ);
+}
+
+/**
+ * @returns the converted pixels in `{R: number, G: number, B: number, A: number}`.
+ *
+ * Follow conversion steps in CSS Color Module Level 4
+ * https://drafts.csswg.org/css-color/#predefined-to-predefined
+ * display-p3 and sRGB share the same white points.
+ */
+export function displayP3ToSrgb(pixel: { R: number; G: number; B: number; A: number }): {
+ R: number;
+ G: number;
+ B: number;
+ A: number;
+} {
+ assert(
+ pixel.R !== undefined && pixel.G !== undefined && pixel.B !== undefined,
+ 'color space conversion requires all of R, G and B components'
+ );
+
+ let rgbVec = [pixel.R, pixel.G, pixel.B];
+ rgbVec = lin_P3(rgbVec);
+ let rgbMatrix = [[rgbVec[0]], [rgbVec[1]], [rgbVec[2]]];
+ rgbMatrix = XYZ_to_lin_sRGB(lin_P3_to_XYZ(rgbMatrix));
+ rgbVec = [rgbMatrix[0][0], rgbMatrix[1][0], rgbMatrix[2][0]];
+ rgbVec = gam_sRGB(rgbVec);
+
+ pixel.R = rgbVec[0];
+ pixel.G = rgbVec[1];
+ pixel.B = rgbVec[2];
+
+ return pixel;
+}
+/**
+ * @returns the converted pixels in `{R: number, G: number, B: number, A: number}`.
+ *
+ * Follow conversion steps in CSS Color Module Level 4
+ * https://drafts.csswg.org/css-color/#predefined-to-predefined
+ * display-p3 and sRGB share the same white points.
+ */
+export function srgbToDisplayP3(pixel: { R: number; G: number; B: number; A: number }): {
+ R: number;
+ G: number;
+ B: number;
+ A: number;
+} {
+ assert(
+ pixel.R !== undefined && pixel.G !== undefined && pixel.B !== undefined,
+ 'color space conversion requires all of R, G and B components'
+ );
+
+ let rgbVec = [pixel.R, pixel.G, pixel.B];
+ rgbVec = lin_sRGB(rgbVec);
+ let rgbMatrix = [[rgbVec[0]], [rgbVec[1]], [rgbVec[2]]];
+ rgbMatrix = XYZ_to_lin_P3(lin_sRGB_to_XYZ(rgbMatrix));
+ rgbVec = [rgbMatrix[0][0], rgbMatrix[1][0], rgbMatrix[2][0]];
+ rgbVec = gam_P3(rgbVec);
+
+ pixel.R = rgbVec[0];
+ pixel.G = rgbVec[1];
+ pixel.B = rgbVec[2];
+
+ return pixel;
+}
+
+type InPlaceColorConversion = (rgba: {
+ R: number;
+ G: number;
+ B: number;
+ readonly A: number; // Alpha never changes during a conversion.
+}) => void;
+
+/**
+ * Returns a function which applies the specified colorspace/premultiplication conversion.
+ * Does not clamp, so may return values outside of the `dstColorSpace` gamut, due to either
+ * color space conversion or alpha premultiplication.
+ */
+export function makeInPlaceColorConversion({
+ srcPremultiplied,
+ dstPremultiplied,
+ srcColorSpace = 'srgb',
+ dstColorSpace = 'srgb',
+}: {
+ srcPremultiplied: boolean;
+ dstPremultiplied: boolean;
+ srcColorSpace?: PredefinedColorSpace;
+ dstColorSpace?: PredefinedColorSpace;
+}): InPlaceColorConversion {
+ const requireColorSpaceConversion = srcColorSpace !== dstColorSpace;
+ const requireUnpremultiplyAlpha =
+ srcPremultiplied && (requireColorSpaceConversion || srcPremultiplied !== dstPremultiplied);
+ const requirePremultiplyAlpha =
+ dstPremultiplied && (requireColorSpaceConversion || srcPremultiplied !== dstPremultiplied);
+
+ return rgba => {
+ assert(rgba.A >= 0.0 && rgba.A <= 1.0, 'rgba.A out of bounds');
+
+ if (requireUnpremultiplyAlpha) {
+ if (rgba.A !== 0.0) {
+ rgba.R /= rgba.A;
+ rgba.G /= rgba.A;
+ rgba.B /= rgba.A;
+ } else {
+ assert(
+ rgba.R === 0.0 && rgba.G === 0.0 && rgba.B === 0.0 && rgba.A === 0.0,
+ 'Unpremultiply ops with alpha value 0.0 requires all channels equals to 0.0'
+ );
+ }
+ }
+ // It's possible RGB are now > 1.
+ // This technically represents colors outside the src gamut, so no clamping yet.
+
+ if (requireColorSpaceConversion) {
+ // WebGPU currently only supports dstColorSpace = 'srgb'.
+ if (srcColorSpace === 'display-p3' && dstColorSpace === 'srgb') {
+ rgba = displayP3ToSrgb(rgba);
+ } else {
+ unreachable();
+ }
+ }
+ // Now RGB may also be negative if the src gamut is larger than the dst gamut.
+
+ if (requirePremultiplyAlpha) {
+ rgba.R *= rgba.A;
+ rgba.G *= rgba.A;
+ rgba.B *= rgba.A;
+ }
+ };
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/command_buffer_maker.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/command_buffer_maker.ts
new file mode 100644
index 0000000000..8ac663daf5
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/command_buffer_maker.ts
@@ -0,0 +1,85 @@
+import { ResourceState, GPUTestBase } from '../gpu_test.js';
+
+export const kRenderEncodeTypes = ['render pass', 'render bundle'] as const;
+export type RenderEncodeType = (typeof kRenderEncodeTypes)[number];
+export const kProgrammableEncoderTypes = ['compute pass', ...kRenderEncodeTypes] as const;
+export type ProgrammableEncoderType = (typeof kProgrammableEncoderTypes)[number];
+export const kEncoderTypes = ['non-pass', ...kProgrammableEncoderTypes] as const;
+export type EncoderType = (typeof kEncoderTypes)[number];
+
+// Look up the type of the encoder based on `T`. If `T` is a union, this will be too!
+type EncoderByEncoderType<T extends EncoderType> = {
+ 'non-pass': GPUCommandEncoder;
+ 'compute pass': GPUComputePassEncoder;
+ 'render pass': GPURenderPassEncoder;
+ 'render bundle': GPURenderBundleEncoder;
+}[T];
+
+/** See {@link webgpu/api/validation/validation_test.ValidationTest.createEncoder |
+ * GPUTest.createEncoder()}. */
+export class CommandBufferMaker<T extends EncoderType> {
+ /** `GPU___Encoder` for recording commands into. */
+ // Look up the type of the encoder based on `T`. If `T` is a union, this will be too!
+ readonly encoder: EncoderByEncoderType<T>;
+
+ /**
+ * Finish any passes, finish and record any bundles, and finish/return the command buffer. Any
+ * errors are ignored and the GPUCommandBuffer (which may be an error buffer) is returned.
+ */
+ readonly finish: () => GPUCommandBuffer;
+
+ /**
+ * Finish any passes, finish and record any bundles, and finish/return the command buffer.
+ * Checks for validation errors in (only) the appropriate finish call.
+ */
+ readonly validateFinish: (shouldSucceed: boolean) => GPUCommandBuffer;
+
+ /**
+ * Finish the command buffer and submit it. Checks for validation errors in either the submit or
+ * the appropriate finish call, depending on the state of a resource used in the encoding.
+ */
+ readonly validateFinishAndSubmit: (
+ shouldBeValid: boolean,
+ submitShouldSucceedIfValid: boolean
+ ) => void;
+
+ /**
+ * `validateFinishAndSubmit()` based on the state of a resource in the command encoder.
+ * - `finish()` should fail if the resource is 'invalid'.
+ * - Only `submit()` should fail if the resource is 'destroyed'.
+ */
+ readonly validateFinishAndSubmitGivenState: (resourceState: ResourceState) => void;
+
+ constructor(
+ t: GPUTestBase,
+ encoder: EncoderByEncoderType<EncoderType>,
+ finish: () => GPUCommandBuffer
+ ) {
+ // TypeScript introduces an intersection type here where we don't want one.
+ this.encoder = encoder as EncoderByEncoderType<T>;
+ this.finish = finish;
+
+ // Define extra methods like this, otherwise they get unbound when destructured, e.g.:
+ // const { encoder, validateFinishAndSubmit } = t.createEncoder(type);
+ // Alternatively, do not destructure, and call member functions, e.g.:
+ // const encoder = t.createEncoder(type);
+ // encoder.validateFinish(true);
+ this.validateFinish = (shouldSucceed: boolean) => {
+ return t.expectGPUError('validation', this.finish, !shouldSucceed);
+ };
+
+ this.validateFinishAndSubmit = (
+ shouldBeValid: boolean,
+ submitShouldSucceedIfValid: boolean
+ ) => {
+ const commandBuffer = this.validateFinish(shouldBeValid);
+ if (shouldBeValid) {
+ t.expectValidationError(() => t.queue.submit([commandBuffer]), !submitShouldSucceedIfValid);
+ }
+ };
+
+ this.validateFinishAndSubmitGivenState = (resourceState: ResourceState) => {
+ this.validateFinishAndSubmit(resourceState !== 'invalid', resourceState !== 'destroyed');
+ };
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/compare.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/compare.ts
new file mode 100644
index 0000000000..45599d25f6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/compare.ts
@@ -0,0 +1,472 @@
+import { getIsBuildingDataCache } from '../../common/framework/data_cache.js';
+import { Colors } from '../../common/util/colors.js';
+import { assert, unreachable } from '../../common/util/util.js';
+import {
+ deserializeExpectation,
+ serializeExpectation,
+} from '../shader/execution/expression/case_cache.js';
+import { Expectation, toComparator } from '../shader/execution/expression/expression.js';
+
+import BinaryStream from './binary_stream.js';
+import { isFloatValue, Matrix, Scalar, Value, Vector } from './conversion.js';
+import { FPInterval } from './floating_point.js';
+
+/** Comparison describes the result of a Comparator function. */
+export interface Comparison {
+ matched: boolean; // True if the two values were considered a match
+ got: string; // The string representation of the 'got' value (possibly with markup)
+ expected: string; // The string representation of the 'expected' value (possibly with markup)
+}
+
+// All Comparators must be serializable to be used in the CaseCache.
+// New Comparators should add a new entry to SerializableComparatorKind and
+// define functionality in serialize/deserializeComparator as needed.
+//
+// 'value' and 'packed' are internal framework Comparators that exist, so that
+// the whole Case type hierarchy doesn't need to be split into Serializable vs
+// non-Serializable paths. Passing them into the CaseCache will cause a runtime
+// error.
+// 'value' and 'packed' should never be used in .spec.ts files.
+//
+export type SerializableComparatorKind = 'anyOf' | 'skipUndefined' | 'alwaysPass';
+type InternalComparatorKind = 'value' | 'packed';
+export type ComparatorKind = SerializableComparatorKind | InternalComparatorKind;
+export type ComparatorImpl = (got: Value) => Comparison;
+
+/** Comparator is a function that compares whether the provided value matches an expectation. */
+export interface Comparator {
+ compare: ComparatorImpl;
+ kind: ComparatorKind;
+ data?: Expectation | Expectation[] | string;
+}
+
+/** SerializedComparator is an enum of all the possible serialized comparator types. */
+enum SerializedComparatorKind {
+ AnyOf,
+ SkipUndefined,
+ AlwaysPass,
+}
+
+/** serializeComparatorKind() serializes a ComparatorKind to a BinaryStream */
+function serializeComparatorKind(s: BinaryStream, value: ComparatorKind) {
+ switch (value) {
+ case 'anyOf':
+ return s.writeU8(SerializedComparatorKind.AnyOf);
+ case 'skipUndefined':
+ return s.writeU8(SerializedComparatorKind.SkipUndefined);
+ case 'alwaysPass':
+ return s.writeU8(SerializedComparatorKind.AlwaysPass);
+ }
+}
+
+/** deserializeComparatorKind() deserializes a ComparatorKind from a BinaryStream */
+function deserializeComparatorKind(s: BinaryStream): ComparatorKind {
+ const kind = s.readU8();
+ switch (kind) {
+ case SerializedComparatorKind.AnyOf:
+ return 'anyOf';
+ case SerializedComparatorKind.SkipUndefined:
+ return 'skipUndefined';
+ case SerializedComparatorKind.AlwaysPass:
+ return 'alwaysPass';
+ default:
+ unreachable(`invalid serialized ComparatorKind: ${kind}`);
+ }
+}
+
+/**
+ * compares 'got' Value to 'expected' Value, returning the Comparison information.
+ * @param got the Value obtained from the test
+ * @param expected the expected Value
+ * @returns the comparison results
+ */
+// NOTE: This function does not use objectEquals, since that does not handle FP
+// specific corners cases correctly, i.e. that f64/f32/f16 are all considered
+// the same type for this comparison.
+function compareValue(got: Value, expected: Value): Comparison {
+ {
+ // Check types
+ const gTy = got.type;
+ const eTy = expected.type;
+ const bothFloatTypes = isFloatValue(got) && isFloatValue(expected);
+ if (gTy !== eTy && !bothFloatTypes) {
+ return {
+ matched: false,
+ got: `${Colors.red(gTy.toString())}(${got})`,
+ expected: `${Colors.red(eTy.toString())}(${expected})`,
+ };
+ }
+ }
+
+ if (got instanceof Scalar) {
+ const g = got;
+ const e = expected as Scalar;
+ const isFloat = g.type.kind === 'f64' || g.type.kind === 'f32' || g.type.kind === 'f16';
+ const matched =
+ (isFloat && (g.value as number) === (e.value as number)) || (!isFloat && g.value === e.value);
+ return {
+ matched,
+ got: g.toString(),
+ expected: matched ? Colors.green(e.toString()) : Colors.red(e.toString()),
+ };
+ }
+
+ if (got instanceof Vector) {
+ const e = expected as Vector;
+ const gLen = got.elements.length;
+ const eLen = e.elements.length;
+ let matched = gLen === eLen;
+ if (matched) {
+ // Iterating and calling compare instead of just using objectEquals to use the FP specific logic from above
+ matched = got.elements.every((_, i) => {
+ return compare(got.elements[i], e.elements[i]).matched;
+ });
+ }
+
+ return {
+ matched,
+ got: `${got.toString()}`,
+ expected: matched ? Colors.green(e.toString()) : Colors.red(e.toString()),
+ };
+ }
+
+ if (got instanceof Matrix) {
+ const e = expected as Matrix;
+ const gCols = got.type.cols;
+ const eCols = e.type.cols;
+ const gRows = got.type.rows;
+ const eRows = e.type.rows;
+ let matched = gCols === eCols && gRows === eRows;
+ if (matched) {
+ // Iterating and calling compare instead of just using objectEquals to use the FP specific logic from above
+ matched = got.elements.every((c, i) => {
+ return c.every((_, j) => {
+ return compare(got.elements[i][j], e.elements[i][j]).matched;
+ });
+ });
+ }
+
+ return {
+ matched,
+ got: `${got.toString()}`,
+ expected: matched ? Colors.green(e.toString()) : Colors.red(e.toString()),
+ };
+ }
+
+ throw new Error(`unhandled type '${typeof got}`);
+}
+
+/**
+ * Tests it a 'got' Value is contained in 'expected' interval, returning the Comparison information.
+ * @param got the Value obtained from the test
+ * @param expected the expected FPInterval
+ * @returns the comparison results
+ */
+function compareInterval(got: Value, expected: FPInterval): Comparison {
+ {
+ // Check type
+ const gTy = got.type;
+ if (!isFloatValue(got)) {
+ return {
+ matched: false,
+ got: `${Colors.red(gTy.toString())}(${got})`,
+ expected: `floating point value`,
+ };
+ }
+ }
+
+ if (got instanceof Scalar) {
+ const g = got.value as number;
+ const matched = expected.contains(g);
+ return {
+ matched,
+ got: g.toString(),
+ expected: matched ? Colors.green(expected.toString()) : Colors.red(expected.toString()),
+ };
+ }
+
+ // Vector results are currently not handled
+ throw new Error(`unhandled type '${typeof got}`);
+}
+
+/**
+ * Tests it a 'got' Value is contained in 'expected' vector, returning the Comparison information.
+ * @param got the Value obtained from the test, is expected to be a Vector
+ * @param expected the expected array of FPIntervals, one for each element of the vector
+ * @returns the comparison results
+ */
+function compareVector(got: Value, expected: FPInterval[]): Comparison {
+ // Check got type
+ if (!(got instanceof Vector)) {
+ return {
+ matched: false,
+ got: `${Colors.red((typeof got).toString())}(${got})`,
+ expected: `Vector`,
+ };
+ }
+
+ // Check element type
+ {
+ const gTy = got.type.elementType;
+ if (!isFloatValue(got.elements[0])) {
+ return {
+ matched: false,
+ got: `${Colors.red(gTy.toString())}(${got})`,
+ expected: `floating point elements`,
+ };
+ }
+ }
+
+ if (got.elements.length !== expected.length) {
+ return {
+ matched: false,
+ got: `Vector of ${got.elements.length} elements`,
+ expected: `${expected.length} elements`,
+ };
+ }
+
+ const results = got.elements.map((_, idx) => {
+ const g = got.elements[idx].value as number;
+ return { match: expected[idx].contains(g), index: idx };
+ });
+
+ const failures = results.filter(v => !v.match).map(v => v.index);
+ if (failures.length !== 0) {
+ const expected_string = expected.map((v, idx) =>
+ idx in failures ? Colors.red(`[${v}]`) : Colors.green(`[${v}]`)
+ );
+ return {
+ matched: false,
+ got: `[${got.elements}]`,
+ expected: `[${expected_string}]`,
+ };
+ }
+
+ return {
+ matched: true,
+ got: `[${got.elements}]`,
+ expected: `[${Colors.green(expected.toString())}]`,
+ };
+}
+
+// Utility to get arround not being able to nest `` blocks
+function convertArrayToString<T>(m: T[]): string {
+ return `[${m.join(',')}]`;
+}
+
+/**
+ * Tests it a 'got' Value is contained in 'expected' matrix, returning the Comparison information.
+ * @param got the Value obtained from the test, is expected to be a Matrix
+ * @param expected the expected array of array of FPIntervals, representing a column-major matrix
+ * @returns the comparison results
+ */
+function compareMatrix(got: Value, expected: FPInterval[][]): Comparison {
+ // Check got type
+ if (!(got instanceof Matrix)) {
+ return {
+ matched: false,
+ got: `${Colors.red((typeof got).toString())}(${got})`,
+ expected: `Matrix`,
+ };
+ }
+
+ // Check element type
+ {
+ const gTy = got.type.elementType;
+ if (!isFloatValue(got.elements[0][0])) {
+ return {
+ matched: false,
+ got: `${Colors.red(gTy.toString())}(${got})`,
+ expected: `floating point elements`,
+ };
+ }
+ }
+
+ // Check matrix dimensions
+ {
+ const gCols = got.elements.length;
+ const gRows = got.elements[0].length;
+ const eCols = expected.length;
+ const eRows = expected[0].length;
+
+ if (gCols !== eCols || gRows !== eRows) {
+ assert(false);
+ return {
+ matched: false,
+ got: `Matrix of ${gCols}x${gRows} elements`,
+ expected: `Matrix of ${eCols}x${eRows} elements`,
+ };
+ }
+ }
+
+ // Check that got values fall in expected intervals
+ let matched = true;
+ const expected_strings: string[][] = [...Array(got.elements.length)].map(_ => [
+ ...Array(got.elements[0].length),
+ ]);
+
+ got.elements.forEach((c, i) => {
+ c.forEach((r, j) => {
+ const g = r.value as number;
+ if (expected[i][j].contains(g)) {
+ expected_strings[i][j] = Colors.green(`[${expected[i][j]}]`);
+ } else {
+ matched = false;
+ expected_strings[i][j] = Colors.red(`[${expected[i][j]}]`);
+ }
+ });
+ });
+
+ return {
+ matched,
+ got: convertArrayToString(got.elements.map(convertArrayToString)),
+ expected: convertArrayToString(expected_strings.map(convertArrayToString)),
+ };
+}
+
+/**
+ * compare() compares 'got' to 'expected', returning the Comparison information.
+ * @param got the result obtained from the test
+ * @param expected the expected result
+ * @returns the comparison results
+ */
+export function compare(
+ got: Value,
+ expected: Value | FPInterval | FPInterval[] | FPInterval[][]
+): Comparison {
+ if (expected instanceof Array) {
+ if (expected[0] instanceof Array) {
+ expected = expected as FPInterval[][];
+ return compareMatrix(got, expected);
+ } else {
+ expected = expected as FPInterval[];
+ return compareVector(got, expected);
+ }
+ }
+
+ if (expected instanceof FPInterval) {
+ return compareInterval(got, expected);
+ }
+
+ return compareValue(got, expected);
+}
+
+/** @returns a Comparator that checks whether a test value matches any of the provided options */
+export function anyOf(...expectations: Expectation[]): Comparator {
+ const c: Comparator = {
+ compare: (got: Value) => {
+ const failed = new Set<string>();
+ for (const e of expectations) {
+ const cmp = toComparator(e).compare(got);
+ if (cmp.matched) {
+ return cmp;
+ }
+ failed.add(cmp.expected);
+ }
+ return { matched: false, got: got.toString(), expected: [...failed].join(' or ') };
+ },
+ kind: 'anyOf',
+ };
+
+ if (getIsBuildingDataCache()) {
+ // If there's an active DataCache, and it supports storing, then append the
+ // Expectations to the result, so it can be serialized.
+ c.data = expectations;
+ }
+ return c;
+}
+
+/** @returns a Comparator that skips the test if the expectation is undefined */
+export function skipUndefined(expectation: Expectation | undefined): Comparator {
+ const c: Comparator = {
+ compare: (got: Value) => {
+ if (expectation !== undefined) {
+ return toComparator(expectation).compare(got);
+ }
+ return { matched: true, got: got.toString(), expected: `Treating 'undefined' as Any` };
+ },
+ kind: 'skipUndefined',
+ };
+
+ if (expectation !== undefined && getIsBuildingDataCache()) {
+ // If there's an active DataCache, and it supports storing, then append the
+ // Expectation to the result, so it can be serialized.
+ c.data = expectation;
+ }
+ return c;
+}
+
+/**
+ * @returns a Comparator that always passes, used to test situations where the
+ * result of computation doesn't matter, but the fact it finishes is being
+ * tested.
+ */
+export function alwaysPass(msg: string = 'always pass'): Comparator {
+ const c: Comparator = {
+ compare: (got: Value) => {
+ return { matched: true, got: got.toString(), expected: msg };
+ },
+ kind: 'alwaysPass',
+ };
+
+ if (getIsBuildingDataCache()) {
+ // If there's an active DataCache, and it supports storing, then append the
+ // message string to the result, so it can be serialized.
+ c.data = msg;
+ }
+ return c;
+}
+
+/** serializeComparator() serializes a Comparator to a BinaryStream */
+export function serializeComparator(s: BinaryStream, c: Comparator) {
+ serializeComparatorKind(s, c.kind);
+ switch (c.kind) {
+ case 'anyOf':
+ s.writeArray(c.data as Expectation[], serializeExpectation);
+ return;
+ case 'skipUndefined':
+ s.writeCond(c.data !== undefined, {
+ if_true: () => {
+ // defined data
+ serializeExpectation(s, c.data as Expectation);
+ },
+ if_false: () => {
+ // undefined data
+ },
+ });
+ return;
+ case 'alwaysPass': {
+ s.writeString(c.data as string);
+ return;
+ }
+ case 'value':
+ case 'packed': {
+ unreachable(`Serializing '${c.kind}' comparators is not allowed (${c})`);
+ break;
+ }
+ }
+ unreachable(`Unable serialize comparator '${c}'`);
+}
+
+/** deserializeComparator() deserializes a Comparator from a BinaryStream */
+export function deserializeComparator(s: BinaryStream): Comparator {
+ const kind = deserializeComparatorKind(s);
+ switch (kind) {
+ case 'anyOf':
+ return anyOf(...s.readArray(deserializeExpectation));
+ case 'skipUndefined':
+ return s.readCond({
+ if_true: () => {
+ // defined data
+ return skipUndefined(deserializeExpectation(s));
+ },
+ if_false: () => {
+ // undefined data
+ return skipUndefined(undefined);
+ },
+ });
+ case 'alwaysPass':
+ return alwaysPass(s.readString());
+ }
+ unreachable(`Unable deserialize comparator '${s}'`);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/constants.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/constants.ts
new file mode 100644
index 0000000000..5ee819c64e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/constants.ts
@@ -0,0 +1,487 @@
+import {
+ reinterpretU64AsF64,
+ reinterpretF64AsU64,
+ reinterpretU32AsF32,
+ reinterpretU16AsF16,
+} from './reinterpret.js';
+
+export const kBit = {
+ // Limits of int32
+ i32: {
+ positive: {
+ min: 0x0000_0000, // 0
+ max: 0x7fff_ffff, // 2147483647
+ },
+ negative: {
+ min: 0x8000_0000, // -2147483648
+ max: 0x0000_0000, // 0
+ },
+ },
+
+ // Limits of uint32
+ u32: {
+ min: 0x0000_0000,
+ max: 0xffff_ffff,
+ },
+
+ // Limits of f64
+ // Have to be stored as a BigInt hex value, since number is a f64 internally,
+ // so 64-bit hex values are not guaranteed to be precisely representable.
+ f64: {
+ positive: {
+ min: BigInt(0x0010_0000_0000_0000n),
+ max: BigInt(0x7fef_ffff_ffff_ffffn),
+ zero: BigInt(0x0000_0000_0000_0000n),
+ subnormal: {
+ min: BigInt(0x0000_0000_0000_0001n),
+ max: BigInt(0x000f_ffff_ffff_ffffn),
+ },
+ infinity: BigInt(0x7ff0_0000_0000_0000n),
+ nearest_max: BigInt(0x7fef_ffff_ffff_fffen),
+ less_than_one: BigInt(0x3fef_ffff_ffff_ffffn),
+ pi: {
+ whole: BigInt(0x4009_21fb_5444_2d18n),
+ three_quarters: BigInt(0x4002_d97c_7f33_21d2n),
+ half: BigInt(0x3ff9_21fb_5444_2d18n),
+ third: BigInt(0x3ff0_c152_382d_7365n),
+ quarter: BigInt(0x3fe9_21fb_5444_2d18n),
+ sixth: BigInt(0x3fe0_c152_382d_7365n),
+ },
+ e: BigInt(0x4005_bf0a_8b14_5769n),
+ },
+ negative: {
+ max: BigInt(0x8010_0000_0000_0000n),
+ min: BigInt(0xffef_ffff_ffff_ffffn),
+ zero: BigInt(0x8000_0000_0000_0000n),
+ subnormal: {
+ max: BigInt(0x8000_0000_0000_0001n),
+ min: BigInt(0x800f_ffff_ffff_ffffn),
+ },
+ infinity: BigInt(0xfff0_0000_0000_0000n),
+ nearest_min: BigInt(0xffef_ffff_ffff_fffen),
+ less_than_one: BigInt(0xbfef_ffff_ffff_ffffn),
+ pi: {
+ whole: BigInt(0xc009_21fb_5444_2d18n),
+ three_quarters: BigInt(0xc002_d97c_7f33_21d2n),
+ half: BigInt(0xbff9_21fb_5444_2d18n),
+ third: BigInt(0xbff0_c152_382d_7365n),
+ quarter: BigInt(0xbfe9_21fb_5444_2d18n),
+ sixth: BigInt(0xbfe0_c152_382d_7365n),
+ },
+ },
+ max_ulp: BigInt(0x7ca0_0000_0000_0000n),
+ },
+
+ // Limits of f32
+ f32: {
+ positive: {
+ min: 0x0080_0000,
+ max: 0x7f7f_ffff,
+ zero: 0x0000_0000,
+ subnormal: {
+ min: 0x0000_0001,
+ max: 0x007f_ffff,
+ },
+ infinity: 0x7f80_0000,
+ nearest_max: 0x7f7f_fffe,
+ less_than_one: 0x3f7f_ffff,
+ pi: {
+ whole: 0x4049_0fdb,
+ three_quarters: 0x4016_cbe4,
+ half: 0x3fc9_0fdb,
+ third: 0x3f86_0a92,
+ quarter: 0x3f49_0fdb,
+ sixth: 0x3f06_0a92,
+ },
+ e: 0x402d_f854,
+ },
+ negative: {
+ max: 0x8080_0000,
+ min: 0xff7f_ffff,
+ zero: 0x8000_0000,
+ subnormal: {
+ max: 0x8000_0001,
+ min: 0x807f_ffff,
+ },
+ infinity: 0xff80_0000,
+ nearest_min: 0xff7f_fffe,
+ less_than_one: 0xbf7f_ffff,
+ pi: {
+ whole: 0xc04_90fdb,
+ three_quarters: 0xc016_cbe4,
+ half: 0xbfc9_0fdb,
+ third: 0xbf86_0a92,
+ quarter: 0xbf49_0fdb,
+ sixth: 0xbf06_0a92,
+ },
+ },
+ max_ulp: 0x7380_0000,
+ },
+
+ // Limits of f16
+ f16: {
+ positive: {
+ min: 0x0400,
+ max: 0x7bff,
+ zero: 0x0000,
+ subnormal: {
+ min: 0x0001,
+ max: 0x03ff,
+ },
+ infinity: 0x7c00,
+ nearest_max: 0x7bfe,
+ less_than_one: 0x3bff,
+ pi: {
+ whole: 0x4248,
+ three_quarters: 0x40b6,
+ half: 0x3e48,
+ third: 0x3c30,
+ quarter: 0x3a48,
+ sixth: 0x3830,
+ },
+ e: 0x416f,
+ },
+ negative: {
+ max: 0x8400,
+ min: 0xfbff,
+ zero: 0x8000,
+ subnormal: {
+ max: 0x8001,
+ min: 0x83ff,
+ },
+ infinity: 0xfc00,
+ nearest_min: 0xfbfe,
+ less_than_one: 0xbbff,
+ pi: {
+ whole: 0xc248,
+ three_quarters: 0xc0b6,
+ half: 0xbe48,
+ third: 0xbc30,
+ quarter: 0xba48,
+ sixth: 0xb830,
+ },
+ },
+ max_ulp: 0x5000,
+ },
+
+ // Uint32 representation of power(2, n) n = {0, ..., 31}
+ // Stored as a JS `number`
+ // {to0, ..., to31} ie. {0, ..., 31}
+ powTwo: {
+ to0: 0x0000_0001,
+ to1: 0x0000_0002,
+ to2: 0x0000_0004,
+ to3: 0x0000_0008,
+ to4: 0x0000_0010,
+ to5: 0x0000_0020,
+ to6: 0x0000_0040,
+ to7: 0x0000_0080,
+ to8: 0x0000_0100,
+ to9: 0x0000_0200,
+ to10: 0x0000_0400,
+ to11: 0x0000_0800,
+ to12: 0x0000_1000,
+ to13: 0x0000_2000,
+ to14: 0x0000_4000,
+ to15: 0x0000_8000,
+ to16: 0x0001_0000,
+ to17: 0x0002_0000,
+ to18: 0x0004_0000,
+ to19: 0x0008_0000,
+ to20: 0x0010_0000,
+ to21: 0x0020_0000,
+ to22: 0x0040_0000,
+ to23: 0x0080_0000,
+ to24: 0x0100_0000,
+ to25: 0x0200_0000,
+ to26: 0x0400_0000,
+ to27: 0x0800_0000,
+ to28: 0x1000_0000,
+ to29: 0x2000_0000,
+ to30: 0x4000_0000,
+ to31: 0x8000_0000,
+ },
+
+ // Int32 representation of of -1 * power(2, n) n = {0, ..., 31}
+ // Stored as a JS `number`
+ // {to0, ..., to31} ie. {0, ..., 31}
+ negPowTwo: {
+ to0: 0xffff_ffff,
+ to1: 0xffff_fffe,
+ to2: 0xffff_fffc,
+ to3: 0xffff_fff8,
+ to4: 0xffff_fff0,
+ to5: 0xffff_ffe0,
+ to6: 0xffff_ffc0,
+ to7: 0xffff_ff80,
+ to8: 0xffff_ff00,
+ to9: 0xffff_fe00,
+ to10: 0xffff_fc00,
+ to11: 0xffff_f800,
+ to12: 0xffff_f000,
+ to13: 0xffff_e000,
+ to14: 0xffff_c000,
+ to15: 0xffff_8000,
+ to16: 0xffff_0000,
+ to17: 0xfffe_0000,
+ to18: 0xfffc_0000,
+ to19: 0xfff8_0000,
+ to20: 0xfff0_0000,
+ to21: 0xffe0_0000,
+ to22: 0xffc0_0000,
+ to23: 0xff80_0000,
+ to24: 0xff00_0000,
+ to25: 0xfe00_0000,
+ to26: 0xfc00_0000,
+ to27: 0xf800_0000,
+ to28: 0xf000_0000,
+ to29: 0xe000_0000,
+ to30: 0xc000_0000,
+ to31: 0x8000_0000,
+ },
+} as const;
+
+export const kValue = {
+ // Limits of i32
+ i32: {
+ positive: {
+ min: 0,
+ max: 2147483647,
+ },
+ negative: {
+ min: -2147483648,
+ max: 0,
+ },
+ },
+
+ // Limits of u32
+ u32: {
+ min: 0,
+ max: 4294967295,
+ },
+
+ // Limits of f64
+ f64: {
+ positive: {
+ min: reinterpretU64AsF64(kBit.f64.positive.min),
+ max: reinterpretU64AsF64(kBit.f64.positive.max),
+ zero: reinterpretU64AsF64(kBit.f64.positive.zero),
+ subnormal: {
+ min: reinterpretU64AsF64(kBit.f64.positive.subnormal.min),
+ max: reinterpretU64AsF64(kBit.f64.positive.subnormal.max),
+ },
+ infinity: reinterpretU64AsF64(kBit.f64.positive.infinity),
+ nearest_max: reinterpretU64AsF64(kBit.f64.positive.nearest_max),
+ less_than_one: reinterpretU64AsF64(kBit.f64.positive.less_than_one),
+ pi: {
+ whole: reinterpretU64AsF64(kBit.f64.positive.pi.whole),
+ three_quarters: reinterpretU64AsF64(kBit.f64.positive.pi.three_quarters),
+ half: reinterpretU64AsF64(kBit.f64.positive.pi.half),
+ third: reinterpretU64AsF64(kBit.f64.positive.pi.third),
+ quarter: reinterpretU64AsF64(kBit.f64.positive.pi.quarter),
+ sixth: reinterpretU64AsF64(kBit.f64.positive.pi.sixth),
+ },
+ e: reinterpretU64AsF64(kBit.f64.positive.e),
+ },
+ negative: {
+ max: reinterpretU64AsF64(kBit.f64.negative.max),
+ min: reinterpretU64AsF64(kBit.f64.negative.min),
+ zero: reinterpretU64AsF64(kBit.f64.negative.zero),
+ subnormal: {
+ max: reinterpretU64AsF64(kBit.f64.negative.subnormal.max),
+ min: reinterpretU64AsF64(kBit.f64.negative.subnormal.min),
+ },
+ infinity: reinterpretU64AsF64(kBit.f64.negative.infinity),
+ nearest_min: reinterpretU64AsF64(kBit.f64.negative.nearest_min),
+ less_than_one: reinterpretU64AsF64(kBit.f64.negative.less_than_one), // -0.999999940395
+ pi: {
+ whole: reinterpretU64AsF64(kBit.f64.negative.pi.whole),
+ three_quarters: reinterpretU64AsF64(kBit.f64.negative.pi.three_quarters),
+ half: reinterpretU64AsF64(kBit.f64.negative.pi.half),
+ third: reinterpretU64AsF64(kBit.f64.negative.pi.third),
+ quarter: reinterpretU64AsF64(kBit.f64.negative.pi.quarter),
+ sixth: reinterpretU64AsF64(kBit.f64.negative.pi.sixth),
+ },
+ },
+ max_ulp: reinterpretU64AsF64(kBit.f64.max_ulp),
+ },
+
+ // Limits of f32
+ f32: {
+ positive: {
+ min: reinterpretU32AsF32(kBit.f32.positive.min),
+ max: reinterpretU32AsF32(kBit.f32.positive.max),
+ zero: reinterpretU32AsF32(kBit.f32.positive.zero),
+ subnormal: {
+ min: reinterpretU32AsF32(kBit.f32.positive.subnormal.min),
+ max: reinterpretU32AsF32(kBit.f32.positive.subnormal.max),
+ },
+ infinity: reinterpretU32AsF32(kBit.f32.positive.infinity),
+
+ nearest_max: reinterpretU32AsF32(kBit.f32.positive.nearest_max),
+ less_than_one: reinterpretU32AsF32(kBit.f32.positive.less_than_one),
+ pi: {
+ whole: reinterpretU32AsF32(kBit.f32.positive.pi.whole),
+ three_quarters: reinterpretU32AsF32(kBit.f32.positive.pi.three_quarters),
+ half: reinterpretU32AsF32(kBit.f32.positive.pi.half),
+ third: reinterpretU32AsF32(kBit.f32.positive.pi.third),
+ quarter: reinterpretU32AsF32(kBit.f32.positive.pi.quarter),
+ sixth: reinterpretU32AsF32(kBit.f32.positive.pi.sixth),
+ },
+ e: reinterpretU32AsF32(kBit.f32.positive.e),
+ // The positive pipeline-overridable constant with the smallest magnitude
+ // which when cast to f32 will produce infinity. This comes from WGSL
+ // conversion rules and the rounding rules of WebIDL.
+ first_non_castable_pipeline_override:
+ reinterpretU32AsF32(kBit.f32.positive.max) / 2 + 2 ** 127,
+ // The positive pipeline-overridable constant with the largest magnitude
+ // which when cast to f32 will not produce infinity. This comes from WGSL
+ // conversion rules and the rounding rules of WebIDL
+ last_castable_pipeline_override: reinterpretU64AsF64(
+ reinterpretF64AsU64(reinterpretU32AsF32(kBit.f32.positive.max) / 2 + 2 ** 127) - BigInt(1)
+ ),
+ },
+ negative: {
+ max: reinterpretU32AsF32(kBit.f32.negative.max),
+ min: reinterpretU32AsF32(kBit.f32.negative.min),
+ zero: reinterpretU32AsF32(kBit.f32.negative.zero),
+ subnormal: {
+ max: reinterpretU32AsF32(kBit.f32.negative.subnormal.max),
+ min: reinterpretU32AsF32(kBit.f32.negative.subnormal.min),
+ },
+ infinity: reinterpretU32AsF32(kBit.f32.negative.infinity),
+ nearest_min: reinterpretU32AsF32(kBit.f32.negative.nearest_min),
+ less_than_one: reinterpretU32AsF32(kBit.f32.negative.less_than_one), // -0.999999940395
+ pi: {
+ whole: reinterpretU32AsF32(kBit.f32.negative.pi.whole),
+ three_quarters: reinterpretU32AsF32(kBit.f32.negative.pi.three_quarters),
+ half: reinterpretU32AsF32(kBit.f32.negative.pi.half),
+ third: reinterpretU32AsF32(kBit.f32.negative.pi.third),
+ quarter: reinterpretU32AsF32(kBit.f32.negative.pi.quarter),
+ sixth: reinterpretU32AsF32(kBit.f32.negative.pi.sixth),
+ },
+ // The negative pipeline-overridable constant with the smallest magnitude
+ // which when cast to f32 will produce infinity. This comes from WGSL
+ // conversion rules and the rounding rules of WebIDL.
+ first_non_castable_pipeline_override: -(
+ reinterpretU32AsF32(kBit.f32.positive.max) / 2 +
+ 2 ** 127
+ ),
+ // The negative pipeline-overridable constant with the largest magnitude
+ // which when cast to f32 will not produce infinity. This comes from WGSL
+ // conversion rules and the rounding rules of WebIDL.
+ last_castable_pipeline_override: -reinterpretU64AsF64(
+ reinterpretF64AsU64(reinterpretU32AsF32(kBit.f32.positive.max) / 2 + 2 ** 127) - BigInt(1)
+ ),
+ },
+ max_ulp: reinterpretU32AsF32(kBit.f32.max_ulp),
+ emax: 127,
+ },
+
+ // Limits of i16
+ i16: {
+ positive: {
+ min: 0,
+ max: 32767,
+ },
+ negative: {
+ min: -32768,
+ max: 0,
+ },
+ },
+
+ // Limits of u16
+ u16: {
+ min: 0,
+ max: 65535,
+ },
+
+ // Limits of f16
+ f16: {
+ positive: {
+ min: reinterpretU16AsF16(kBit.f16.positive.min),
+ max: reinterpretU16AsF16(kBit.f16.positive.max),
+ zero: reinterpretU16AsF16(kBit.f16.positive.zero),
+ subnormal: {
+ min: reinterpretU16AsF16(kBit.f16.positive.subnormal.min),
+ max: reinterpretU16AsF16(kBit.f16.positive.subnormal.max),
+ },
+ infinity: reinterpretU16AsF16(kBit.f16.positive.infinity),
+ nearest_max: reinterpretU16AsF16(kBit.f16.positive.nearest_max),
+ less_than_one: reinterpretU16AsF16(kBit.f16.positive.less_than_one),
+ pi: {
+ whole: reinterpretU16AsF16(kBit.f16.positive.pi.whole),
+ three_quarters: reinterpretU16AsF16(kBit.f16.positive.pi.three_quarters),
+ half: reinterpretU16AsF16(kBit.f16.positive.pi.half),
+ third: reinterpretU16AsF16(kBit.f16.positive.pi.third),
+ quarter: reinterpretU16AsF16(kBit.f16.positive.pi.quarter),
+ sixth: reinterpretU16AsF16(kBit.f16.positive.pi.sixth),
+ },
+ e: reinterpretU16AsF16(kBit.f16.positive.e),
+ // The positive pipeline-overridable constant with the smallest magnitude
+ // which when cast to f16 will produce infinity. This comes from WGSL
+ // conversion rules and the rounding rules of WebIDL.
+ first_non_castable_pipeline_override:
+ reinterpretU16AsF16(kBit.f16.positive.max) / 2 + 2 ** 15,
+ // The positive pipeline-overridable constant with the largest magnitude
+ // which when cast to f16 will not produce infinity. This comes from WGSL
+ // conversion rules and the rounding rules of WebIDL
+ last_castable_pipeline_override: reinterpretU64AsF64(
+ reinterpretF64AsU64(reinterpretU16AsF16(kBit.f16.positive.max) / 2 + 2 ** 15) - BigInt(1)
+ ),
+ },
+ negative: {
+ max: reinterpretU16AsF16(kBit.f16.negative.max),
+ min: reinterpretU16AsF16(kBit.f16.negative.min),
+ zero: reinterpretU16AsF16(kBit.f16.negative.zero),
+ subnormal: {
+ max: reinterpretU16AsF16(kBit.f16.negative.subnormal.max),
+ min: reinterpretU16AsF16(kBit.f16.negative.subnormal.min),
+ },
+ infinity: reinterpretU16AsF16(kBit.f16.negative.infinity),
+ nearest_min: reinterpretU16AsF16(kBit.f16.negative.nearest_min),
+ less_than_one: reinterpretU16AsF16(kBit.f16.negative.less_than_one), // -0.9996
+ pi: {
+ whole: reinterpretU16AsF16(kBit.f16.negative.pi.whole),
+ three_quarters: reinterpretU16AsF16(kBit.f16.negative.pi.three_quarters),
+ half: reinterpretU16AsF16(kBit.f16.negative.pi.half),
+ third: reinterpretU16AsF16(kBit.f16.negative.pi.third),
+ quarter: reinterpretU16AsF16(kBit.f16.negative.pi.quarter),
+ sixth: reinterpretU16AsF16(kBit.f16.negative.pi.sixth),
+ },
+ // The negative pipeline-overridable constant with the smallest magnitude
+ // which when cast to f16 will produce infinity. This comes from WGSL
+ // conversion rules and the rounding rules of WebIDL.
+ first_non_castable_pipeline_override: -(
+ reinterpretU16AsF16(kBit.f16.positive.max) / 2 +
+ 2 ** 15
+ ),
+ // The negative pipeline-overridable constant with the largest magnitude
+ // which when cast to f16 will not produce infinity. This comes from WGSL
+ // conversion rules and the rounding rules of WebIDL.
+ last_castable_pipeline_override: -reinterpretU64AsF64(
+ reinterpretF64AsU64(reinterpretU16AsF16(kBit.f16.positive.max) / 2 + 2 ** 15) - BigInt(1)
+ ),
+ },
+ max_ulp: reinterpretU16AsF16(kBit.f16.max_ulp),
+ emax: 15,
+ },
+
+ // Limits of i8
+ i8: {
+ positive: {
+ min: 0,
+ max: 127,
+ },
+ negative: {
+ min: -128,
+ max: 0,
+ },
+ },
+
+ // Limits of u8
+ u8: {
+ min: 0,
+ max: 255,
+ },
+} as const;
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/conversion.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/conversion.ts
new file mode 100644
index 0000000000..d98367447d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/conversion.ts
@@ -0,0 +1,1635 @@
+import { Colors } from '../../common/util/colors.js';
+import { ROArrayArray } from '../../common/util/types.js';
+import { assert, objectEquals, TypedArrayBufferView, unreachable } from '../../common/util/util.js';
+import { Float16Array } from '../../external/petamoriken/float16/float16.js';
+
+import BinaryStream from './binary_stream.js';
+import { kBit } from './constants.js';
+import {
+ cartesianProduct,
+ clamp,
+ correctlyRoundedF16,
+ isFiniteF16,
+ isSubnormalNumberF16,
+ isSubnormalNumberF32,
+ isSubnormalNumberF64,
+} from './math.js';
+
+/**
+ * Encodes a JS `number` into a "normalized" (unorm/snorm) integer representation with `bits` bits.
+ * Input must be between -1 and 1 if signed, or 0 and 1 if unsigned.
+ *
+ * MAINTENANCE_TODO: See if performance of texel_data improves if this function is pre-specialized
+ * for a particular `bits`/`signed`.
+ */
+export function floatAsNormalizedInteger(float: number, bits: number, signed: boolean): number {
+ if (signed) {
+ assert(float >= -1 && float <= 1, () => `${float} out of bounds of snorm`);
+ const max = Math.pow(2, bits - 1) - 1;
+ return Math.round(float * max);
+ } else {
+ assert(float >= 0 && float <= 1, () => `${float} out of bounds of unorm`);
+ const max = Math.pow(2, bits) - 1;
+ return Math.round(float * max);
+ }
+}
+
+/**
+ * Decodes a JS `number` from a "normalized" (unorm/snorm) integer representation with `bits` bits.
+ * Input must be an integer in the range of the specified unorm/snorm type.
+ */
+export function normalizedIntegerAsFloat(integer: number, bits: number, signed: boolean): number {
+ assert(Number.isInteger(integer));
+ if (signed) {
+ const max = Math.pow(2, bits - 1) - 1;
+ assert(integer >= -max - 1 && integer <= max);
+ if (integer === -max - 1) {
+ integer = -max;
+ }
+ return integer / max;
+ } else {
+ const max = Math.pow(2, bits) - 1;
+ assert(integer >= 0 && integer <= max);
+ return integer / max;
+ }
+}
+
+/**
+ * Compares 2 numbers. Returns true if their absolute value is
+ * less than or equal to maxDiff or if they are both NaN or the
+ * same sign infinity.
+ */
+export function numbersApproximatelyEqual(a: number, b: number, maxDiff: number = 0) {
+ return (
+ (Number.isNaN(a) && Number.isNaN(b)) ||
+ (a === Number.POSITIVE_INFINITY && b === Number.POSITIVE_INFINITY) ||
+ (a === Number.NEGATIVE_INFINITY && b === Number.NEGATIVE_INFINITY) ||
+ Math.abs(a - b) <= maxDiff
+ );
+}
+
+/**
+ * Once-allocated ArrayBuffer/views to avoid overhead of allocation when converting between numeric formats
+ *
+ * workingData* is shared between multiple functions in this file, so to avoid re-entrancy problems, make sure in
+ * functions that use it that they don't call themselves or other functions that use workingData*.
+ */
+const workingData = new ArrayBuffer(8);
+const workingDataU32 = new Uint32Array(workingData);
+const workingDataU16 = new Uint16Array(workingData);
+const workingDataU8 = new Uint8Array(workingData);
+const workingDataF32 = new Float32Array(workingData);
+const workingDataF16 = new Float16Array(workingData);
+const workingDataI16 = new Int16Array(workingData);
+const workingDataI32 = new Int32Array(workingData);
+const workingDataI8 = new Int8Array(workingData);
+const workingDataF64 = new Float64Array(workingData);
+const workingDataView = new DataView(workingData);
+
+/**
+ * Encodes a JS `number` into an IEEE754 floating point number with the specified number of
+ * sign, exponent, mantissa bits, and exponent bias.
+ * Returns the result as an integer-valued JS `number`.
+ *
+ * Does not handle clamping, overflow, or denormal inputs.
+ * On underflow (result is subnormal), rounds to (signed) zero.
+ *
+ * MAINTENANCE_TODO: Replace usages of this with numberToFloatBits.
+ */
+export function float32ToFloatBits(
+ n: number,
+ signBits: 0 | 1,
+ exponentBits: number,
+ mantissaBits: number,
+ bias: number
+): number {
+ assert(exponentBits <= 8);
+ assert(mantissaBits <= 23);
+
+ if (Number.isNaN(n)) {
+ // NaN = all exponent bits true, 1 or more mantissia bits true
+ return (((1 << exponentBits) - 1) << mantissaBits) | ((1 << mantissaBits) - 1);
+ }
+
+ workingDataView.setFloat32(0, n, true);
+ const bits = workingDataView.getUint32(0, true);
+ // bits (32): seeeeeeeefffffffffffffffffffffff
+
+ // 0 or 1
+ const sign = (bits >> 31) & signBits;
+
+ if (n === 0) {
+ if (sign === 1) {
+ // Handle negative zero.
+ return 1 << (exponentBits + mantissaBits);
+ }
+ return 0;
+ }
+
+ if (signBits === 0) {
+ assert(n >= 0);
+ }
+
+ if (!Number.isFinite(n)) {
+ // Infinity = all exponent bits true, no mantissa bits true
+ // plus the sign bit.
+ return (
+ (((1 << exponentBits) - 1) << mantissaBits) | (n < 0 ? 2 ** (exponentBits + mantissaBits) : 0)
+ );
+ }
+
+ const mantissaBitsToDiscard = 23 - mantissaBits;
+
+ // >> to remove mantissa, & to remove sign, - 127 to remove bias.
+ const exp = ((bits >> 23) & 0xff) - 127;
+
+ // Convert to the new biased exponent.
+ const newBiasedExp = bias + exp;
+ assert(newBiasedExp < 1 << exponentBits, () => `input number ${n} overflows target type`);
+
+ if (newBiasedExp <= 0) {
+ // Result is subnormal or zero. Round to (signed) zero.
+ return sign << (exponentBits + mantissaBits);
+ } else {
+ // Mask only the mantissa, and discard the lower bits.
+ const newMantissa = (bits & 0x7fffff) >> mantissaBitsToDiscard;
+ return (sign << (exponentBits + mantissaBits)) | (newBiasedExp << mantissaBits) | newMantissa;
+ }
+}
+
+/**
+ * Encodes a JS `number` into an IEEE754 16 bit floating point number.
+ * Returns the result as an integer-valued JS `number`.
+ *
+ * Does not handle clamping, overflow, or denormal inputs.
+ * On underflow (result is subnormal), rounds to (signed) zero.
+ */
+export function float32ToFloat16Bits(n: number) {
+ return float32ToFloatBits(n, 1, 5, 10, 15);
+}
+
+/**
+ * Decodes an IEEE754 16 bit floating point number into a JS `number` and returns.
+ */
+export function float16BitsToFloat32(float16Bits: number): number {
+ return floatBitsToNumber(float16Bits, kFloat16Format);
+}
+
+type FloatFormat = { signed: 0 | 1; exponentBits: number; mantissaBits: number; bias: number };
+
+/** FloatFormat defining IEEE754 32-bit float. */
+export const kFloat32Format = { signed: 1, exponentBits: 8, mantissaBits: 23, bias: 127 } as const;
+/** FloatFormat defining IEEE754 16-bit float. */
+export const kFloat16Format = { signed: 1, exponentBits: 5, mantissaBits: 10, bias: 15 } as const;
+/** FloatFormat for 9 bit mantissa, 5 bit exponent unsigned float */
+export const kUFloat9e5Format = { signed: 0, exponentBits: 5, mantissaBits: 9, bias: 15 } as const;
+
+/** Bitcast u32 (represented as integer Number) to f32 (represented as floating-point Number). */
+export function float32BitsToNumber(bits: number): number {
+ workingDataU32[0] = bits;
+ return workingDataF32[0];
+}
+/** Bitcast f32 (represented as floating-point Number) to u32 (represented as integer Number). */
+export function numberToFloat32Bits(number: number): number {
+ workingDataF32[0] = number;
+ return workingDataU32[0];
+}
+
+/**
+ * Decodes an IEEE754 float with the supplied format specification into a JS number.
+ *
+ * The format MUST be no larger than a 32-bit float.
+ */
+export function floatBitsToNumber(bits: number, fmt: FloatFormat): number {
+ // Pad the provided bits out to f32, then convert to a `number` with the wrong bias.
+ // E.g. for f16 to f32:
+ // - f16: S EEEEE MMMMMMMMMM
+ // ^ 000^^^^^ ^^^^^^^^^^0000000000000
+ // - f32: S eeeEEEEE MMMMMMMMMMmmmmmmmmmmmmm
+
+ const kNonSignBits = fmt.exponentBits + fmt.mantissaBits;
+ const kNonSignBitsMask = (1 << kNonSignBits) - 1;
+ const exponentAndMantissaBits = bits & kNonSignBitsMask;
+ const exponentMask = ((1 << fmt.exponentBits) - 1) << fmt.mantissaBits;
+ const infinityOrNaN = (bits & exponentMask) === exponentMask;
+ if (infinityOrNaN) {
+ const mantissaMask = (1 << fmt.mantissaBits) - 1;
+ const signBit = 2 ** kNonSignBits;
+ const isNegative = (bits & signBit) !== 0;
+ return bits & mantissaMask
+ ? Number.NaN
+ : isNegative
+ ? Number.NEGATIVE_INFINITY
+ : Number.POSITIVE_INFINITY;
+ }
+ let f32BitsWithWrongBias =
+ exponentAndMantissaBits << (kFloat32Format.mantissaBits - fmt.mantissaBits);
+ f32BitsWithWrongBias |= (bits << (31 - kNonSignBits)) & 0x8000_0000;
+ const numberWithWrongBias = float32BitsToNumber(f32BitsWithWrongBias);
+ return numberWithWrongBias * 2 ** (kFloat32Format.bias - fmt.bias);
+}
+
+/**
+ * Convert ufloat9e5 bits from rgb9e5ufloat to a JS number
+ *
+ * The difference between `floatBitsToNumber` and `ufloatBitsToNumber`
+ * is that the latter doesn't use an implicit leading bit:
+ *
+ * floatBitsToNumber = 2^(exponent - bias) * (1 + mantissa / 2 ^ numMantissaBits)
+ * ufloatM9E5BitsToNumber = 2^(exponent - bias) * (mantissa / 2 ^ numMantissaBits)
+ * = 2^(exponent - bias - numMantissaBits) * mantissa
+ */
+export function ufloatM9E5BitsToNumber(bits: number, fmt: FloatFormat): number {
+ const exponent = bits >> fmt.mantissaBits;
+ const mantissaMask = (1 << fmt.mantissaBits) - 1;
+ const mantissa = bits & mantissaMask;
+ return mantissa * 2 ** (exponent - fmt.bias - fmt.mantissaBits);
+}
+
+/**
+ * Encodes a JS `number` into an IEEE754 floating point number with the specified format.
+ * Returns the result as an integer-valued JS `number`.
+ *
+ * Does not handle clamping, overflow, or denormal inputs.
+ * On underflow (result is subnormal), rounds to (signed) zero.
+ */
+export function numberToFloatBits(number: number, fmt: FloatFormat): number {
+ return float32ToFloatBits(number, fmt.signed, fmt.exponentBits, fmt.mantissaBits, fmt.bias);
+}
+
+/**
+ * Given a floating point number (as an integer representing its bits), computes how many ULPs it is
+ * from zero.
+ *
+ * Subnormal numbers are skipped, so that 0 is one ULP from the minimum normal number.
+ * Subnormal values are flushed to 0.
+ * Positive and negative 0 are both considered to be 0 ULPs from 0.
+ */
+export function floatBitsToNormalULPFromZero(bits: number, fmt: FloatFormat): number {
+ const mask_sign = fmt.signed << (fmt.exponentBits + fmt.mantissaBits);
+ const mask_expt = ((1 << fmt.exponentBits) - 1) << fmt.mantissaBits;
+ const mask_mant = (1 << fmt.mantissaBits) - 1;
+ const mask_rest = mask_expt | mask_mant;
+
+ assert(fmt.exponentBits + fmt.mantissaBits <= 31);
+
+ const sign = bits & mask_sign ? -1 : 1;
+ const rest = bits & mask_rest;
+ const subnormal_or_zero = (bits & mask_expt) === 0;
+ const infinity_or_nan = (bits & mask_expt) === mask_expt;
+ assert(!infinity_or_nan, 'no ulp representation for infinity/nan');
+
+ // The first normal number is mask_mant+1, so subtract mask_mant to make min_normal - zero = 1ULP.
+ const abs_ulp_from_zero = subnormal_or_zero ? 0 : rest - mask_mant;
+ return sign * abs_ulp_from_zero;
+}
+
+/**
+ * Encodes three JS `number` values into RGB9E5, returned as an integer-valued JS `number`.
+ *
+ * RGB9E5 represents three partial-precision floating-point numbers encoded into a single 32-bit
+ * value all sharing the same 5-bit exponent.
+ * There is no sign bit, and there is a shared 5-bit biased (15) exponent and a 9-bit
+ * mantissa for each channel. The mantissa does NOT have an implicit leading "1.",
+ * and instead has an implicit leading "0.".
+ *
+ * @see https://registry.khronos.org/OpenGL/extensions/EXT/EXT_texture_shared_exponent.txt
+ */
+export function packRGB9E5UFloat(r: number, g: number, b: number): number {
+ const N = 9; // number of mantissa bits
+ const Emax = 31; // max exponent
+ const B = 15; // exponent bias
+ const sharedexp_max = (((1 << N) - 1) / (1 << N)) * 2 ** (Emax - B);
+ const red_c = clamp(r, { min: 0, max: sharedexp_max });
+ const green_c = clamp(g, { min: 0, max: sharedexp_max });
+ const blue_c = clamp(b, { min: 0, max: sharedexp_max });
+ const max_c = Math.max(red_c, green_c, blue_c);
+ const exp_shared_p = Math.max(-B - 1, Math.floor(Math.log2(max_c))) + 1 + B;
+ const max_s = Math.floor(max_c / 2 ** (exp_shared_p - B - N) + 0.5);
+ const exp_shared = max_s === 1 << N ? exp_shared_p + 1 : exp_shared_p;
+ const scalar = 1 / 2 ** (exp_shared - B - N);
+ const red_s = Math.floor(red_c * scalar + 0.5);
+ const green_s = Math.floor(green_c * scalar + 0.5);
+ const blue_s = Math.floor(blue_c * scalar + 0.5);
+ assert(red_s >= 0 && red_s <= 0b111111111);
+ assert(green_s >= 0 && green_s <= 0b111111111);
+ assert(blue_s >= 0 && blue_s <= 0b111111111);
+ assert(exp_shared >= 0 && exp_shared <= 0b11111);
+ return ((exp_shared << 27) | (blue_s << 18) | (green_s << 9) | red_s) >>> 0;
+}
+
+/**
+ * Decodes a RGB9E5 encoded color.
+ * @see packRGB9E5UFloat
+ */
+export function unpackRGB9E5UFloat(encoded: number): { R: number; G: number; B: number } {
+ const N = 9; // number of mantissa bits
+ const B = 15; // exponent bias
+ const red_s = (encoded >>> 0) & 0b111111111;
+ const green_s = (encoded >>> 9) & 0b111111111;
+ const blue_s = (encoded >>> 18) & 0b111111111;
+ const exp_shared = (encoded >>> 27) & 0b11111;
+ const exp = Math.pow(2, exp_shared - B - N);
+ return {
+ R: exp * red_s,
+ G: exp * green_s,
+ B: exp * blue_s,
+ };
+}
+
+/**
+ * Quantizes two f32s to f16 and then packs them in a u32
+ *
+ * This should implement the same behaviour as the builtin `pack2x16float` from
+ * WGSL.
+ *
+ * Caller is responsible to ensuring inputs are f32s
+ *
+ * @param x first f32 to be packed
+ * @param y second f32 to be packed
+ * @returns an array of possible results for pack2x16float. Elements are either
+ * a number or undefined.
+ * undefined indicates that any value is valid, since the input went
+ * out of bounds.
+ */
+export function pack2x16float(x: number, y: number): (number | undefined)[] {
+ // Generates all possible valid u16 bit fields for a given f32 to f16 conversion.
+ // Assumes FTZ for both the f32 and f16 value is allowed.
+ const generateU16s = (n: number): readonly number[] => {
+ let contains_subnormals = isSubnormalNumberF32(n);
+ const n_f16s = correctlyRoundedF16(n);
+ contains_subnormals ||= n_f16s.some(isSubnormalNumberF16);
+
+ const n_u16s = n_f16s.map(f16 => {
+ workingDataF16[0] = f16;
+ return workingDataU16[0];
+ });
+
+ const contains_poszero = n_u16s.some(u => u === kBit.f16.positive.zero);
+ const contains_negzero = n_u16s.some(u => u === kBit.f16.negative.zero);
+ if (!contains_negzero && (contains_poszero || contains_subnormals)) {
+ n_u16s.push(kBit.f16.negative.zero);
+ }
+
+ if (!contains_poszero && (contains_negzero || contains_subnormals)) {
+ n_u16s.push(kBit.f16.positive.zero);
+ }
+
+ return n_u16s;
+ };
+
+ if (!isFiniteF16(x) || !isFiniteF16(y)) {
+ // This indicates any value is valid, so it isn't worth bothering
+ // calculating the more restrictive possibilities.
+ return [undefined];
+ }
+
+ const results = new Array<number>();
+ for (const p of cartesianProduct(generateU16s(x), generateU16s(y))) {
+ assert(p.length === 2, 'cartesianProduct of 2 arrays returned an entry with not 2 elements');
+ workingDataU16[0] = p[0];
+ workingDataU16[1] = p[1];
+ results.push(workingDataU32[0]);
+ }
+
+ return results;
+}
+
+/**
+ * Converts two normalized f32s to i16s and then packs them in a u32
+ *
+ * This should implement the same behaviour as the builtin `pack2x16snorm` from
+ * WGSL.
+ *
+ * Caller is responsible to ensuring inputs are normalized f32s
+ *
+ * @param x first f32 to be packed
+ * @param y second f32 to be packed
+ * @returns a number that is expected result of pack2x16snorm.
+ */
+export function pack2x16snorm(x: number, y: number): number {
+ // Converts f32 to i16 via the pack2x16snorm formula.
+ // FTZ is not explicitly handled, because all subnormals will produce a value
+ // between 0 and 1, but significantly away from the edges, so floor goes to 0.
+ const generateI16 = (n: number): number => {
+ return Math.floor(0.5 + 32767 * Math.min(1, Math.max(-1, n)));
+ };
+
+ workingDataI16[0] = generateI16(x);
+ workingDataI16[1] = generateI16(y);
+
+ return workingDataU32[0];
+}
+
+/**
+ * Converts two normalized f32s to u16s and then packs them in a u32
+ *
+ * This should implement the same behaviour as the builtin `pack2x16unorm` from
+ * WGSL.
+ *
+ * Caller is responsible to ensuring inputs are normalized f32s
+ *
+ * @param x first f32 to be packed
+ * @param y second f32 to be packed
+ * @returns an number that is expected result of pack2x16unorm.
+ */
+export function pack2x16unorm(x: number, y: number): number {
+ // Converts f32 to u16 via the pack2x16unorm formula.
+ // FTZ is not explicitly handled, because all subnormals will produce a value
+ // between 0.5 and much less than 1, so floor goes to 0.
+ const generateU16 = (n: number): number => {
+ return Math.floor(0.5 + 65535 * Math.min(1, Math.max(0, n)));
+ };
+
+ workingDataU16[0] = generateU16(x);
+ workingDataU16[1] = generateU16(y);
+
+ return workingDataU32[0];
+}
+
+/**
+ * Converts four normalized f32s to i8s and then packs them in a u32
+ *
+ * This should implement the same behaviour as the builtin `pack4x8snorm` from
+ * WGSL.
+ *
+ * Caller is responsible to ensuring inputs are normalized f32s
+ *
+ * @param vals four f32s to be packed
+ * @returns a number that is expected result of pack4x8usorm.
+ */
+export function pack4x8snorm(...vals: [number, number, number, number]): number {
+ // Converts f32 to u8 via the pack4x8snorm formula.
+ // FTZ is not explicitly handled, because all subnormals will produce a value
+ // between 0 and 1, so floor goes to 0.
+ const generateI8 = (n: number): number => {
+ return Math.floor(0.5 + 127 * Math.min(1, Math.max(-1, n)));
+ };
+
+ for (const idx in vals) {
+ workingDataI8[idx] = generateI8(vals[idx]);
+ }
+
+ return workingDataU32[0];
+}
+
+/**
+ * Converts four normalized f32s to u8s and then packs them in a u32
+ *
+ * This should implement the same behaviour as the builtin `pack4x8unorm` from
+ * WGSL.
+ *
+ * Caller is responsible to ensuring inputs are normalized f32s
+ *
+ * @param vals four f32s to be packed
+ * @returns a number that is expected result of pack4x8unorm.
+ */
+export function pack4x8unorm(...vals: [number, number, number, number]): number {
+ // Converts f32 to u8 via the pack4x8unorm formula.
+ // FTZ is not explicitly handled, because all subnormals will produce a value
+ // between 0.5 and much less than 1, so floor goes to 0.
+ const generateU8 = (n: number): number => {
+ return Math.floor(0.5 + 255 * Math.min(1, Math.max(0, n)));
+ };
+
+ for (const idx in vals) {
+ workingDataU8[idx] = generateU8(vals[idx]);
+ }
+
+ return workingDataU32[0];
+}
+
+/**
+ * Asserts that a number is within the representable (inclusive) of the integer type with the
+ * specified number of bits and signedness.
+ *
+ * MAINTENANCE_TODO: Assert isInteger? Then this function "asserts that a number is representable"
+ * by the type.
+ */
+export function assertInIntegerRange(n: number, bits: number, signed: boolean): void {
+ if (signed) {
+ const min = -Math.pow(2, bits - 1);
+ const max = Math.pow(2, bits - 1) - 1;
+ assert(n >= min && n <= max);
+ } else {
+ const max = Math.pow(2, bits) - 1;
+ assert(n >= 0 && n <= max);
+ }
+}
+
+/**
+ * Converts a linear value into a "gamma"-encoded value using the sRGB-clamped transfer function.
+ */
+export function gammaCompress(n: number): number {
+ n = n <= 0.0031308 ? (323 * n) / 25 : (211 * Math.pow(n, 5 / 12) - 11) / 200;
+ return clamp(n, { min: 0, max: 1 });
+}
+
+/**
+ * Converts a "gamma"-encoded value into a linear value using the sRGB-clamped transfer function.
+ */
+export function gammaDecompress(n: number): number {
+ n = n <= 0.04045 ? (n * 25) / 323 : Math.pow((200 * n + 11) / 211, 12 / 5);
+ return clamp(n, { min: 0, max: 1 });
+}
+
+/** Converts a 32-bit float value to a 32-bit unsigned integer value */
+export function float32ToUint32(f32: number): number {
+ workingDataF32[0] = f32;
+ return workingDataU32[0];
+}
+
+/** Converts a 32-bit unsigned integer value to a 32-bit float value */
+export function uint32ToFloat32(u32: number): number {
+ workingDataU32[0] = u32;
+ return workingDataF32[0];
+}
+
+/** Converts a 32-bit float value to a 32-bit signed integer value */
+export function float32ToInt32(f32: number): number {
+ workingDataF32[0] = f32;
+ return workingDataI32[0];
+}
+
+/** Converts a 32-bit unsigned integer value to a 32-bit signed integer value */
+export function uint32ToInt32(u32: number): number {
+ workingDataU32[0] = u32;
+ return workingDataI32[0];
+}
+
+/** Converts a 16-bit float value to a 16-bit unsigned integer value */
+export function float16ToUint16(f16: number): number {
+ workingDataF16[0] = f16;
+ return workingDataU16[0];
+}
+
+/** Converts a 16-bit unsigned integer value to a 16-bit float value */
+export function uint16ToFloat16(u16: number): number {
+ workingDataU16[0] = u16;
+ return workingDataF16[0];
+}
+
+/** Converts a 16-bit float value to a 16-bit signed integer value */
+export function float16ToInt16(f16: number): number {
+ workingDataF16[0] = f16;
+ return workingDataI16[0];
+}
+
+/** A type of number representable by Scalar. */
+export type ScalarKind =
+ | 'abstract-float'
+ | 'f64'
+ | 'f32'
+ | 'f16'
+ | 'u32'
+ | 'u16'
+ | 'u8'
+ | 'i32'
+ | 'i16'
+ | 'i8'
+ | 'bool';
+
+/** ScalarType describes the type of WGSL Scalar. */
+export class ScalarType {
+ readonly kind: ScalarKind; // The named type
+ readonly _size: number; // In bytes
+ readonly read: (buf: Uint8Array, offset: number) => Scalar; // reads a scalar from a buffer
+
+ constructor(kind: ScalarKind, size: number, read: (buf: Uint8Array, offset: number) => Scalar) {
+ this.kind = kind;
+ this._size = size;
+ this.read = read;
+ }
+
+ public toString(): string {
+ return this.kind;
+ }
+
+ public get size(): number {
+ return this._size;
+ }
+
+ /** Constructs a Scalar of this type with `value` */
+ public create(value: number): Scalar {
+ switch (this.kind) {
+ case 'abstract-float':
+ return abstractFloat(value);
+ case 'f64':
+ return f64(value);
+ case 'f32':
+ return f32(value);
+ case 'f16':
+ return f16(value);
+ case 'u32':
+ return u32(value);
+ case 'u16':
+ return u16(value);
+ case 'u8':
+ return u8(value);
+ case 'i32':
+ return i32(value);
+ case 'i16':
+ return i16(value);
+ case 'i8':
+ return i8(value);
+ case 'bool':
+ return bool(value !== 0);
+ }
+ }
+}
+
+/** VectorType describes the type of WGSL Vector. */
+export class VectorType {
+ readonly width: number; // Number of elements in the vector
+ readonly elementType: ScalarType; // Element type
+
+ constructor(width: number, elementType: ScalarType) {
+ this.width = width;
+ this.elementType = elementType;
+ }
+
+ /**
+ * @returns a vector constructed from the values read from the buffer at the
+ * given byte offset
+ */
+ public read(buf: Uint8Array, offset: number): Vector {
+ const elements: Array<Scalar> = [];
+ for (let i = 0; i < this.width; i++) {
+ elements[i] = this.elementType.read(buf, offset);
+ offset += this.elementType.size;
+ }
+ return new Vector(elements);
+ }
+
+ public toString(): string {
+ return `vec${this.width}<${this.elementType}>`;
+ }
+
+ public get size(): number {
+ return this.elementType.size * this.width;
+ }
+
+ /** Constructs a Vector of this type with the given values */
+ public create(value: number | readonly number[]): Vector {
+ if (value instanceof Array) {
+ assert(value.length === this.width);
+ } else {
+ value = Array(this.width).fill(value);
+ }
+ return new Vector(value.map(v => this.elementType.create(v)));
+ }
+}
+
+// Maps a string representation of a vector type to vector type.
+const vectorTypes = new Map<string, VectorType>();
+
+export function TypeVec(width: number, elementType: ScalarType): VectorType {
+ const key = `${elementType.toString()} ${width}}`;
+ let ty = vectorTypes.get(key);
+ if (ty !== undefined) {
+ return ty;
+ }
+ ty = new VectorType(width, elementType);
+ vectorTypes.set(key, ty);
+ return ty;
+}
+
+/** MatrixType describes the type of WGSL Matrix. */
+export class MatrixType {
+ readonly cols: number; // Number of columns in the Matrix
+ readonly rows: number; // Number of elements per column in the Matrix
+ readonly elementType: ScalarType; // Element type
+
+ constructor(cols: number, rows: number, elementType: ScalarType) {
+ this.cols = cols;
+ this.rows = rows;
+ assert(
+ elementType.kind === 'f32' ||
+ elementType.kind === 'f16' ||
+ elementType.kind === 'abstract-float',
+ "MatrixType can only have elementType of 'f32' or 'f16' or 'abstract-float'"
+ );
+ this.elementType = elementType;
+ }
+
+ /**
+ * @returns a Matrix constructed from the values read from the buffer at the
+ * given byte offset
+ */
+ public read(buf: Uint8Array, offset: number): Matrix {
+ const elements: Scalar[][] = [...Array(this.cols)].map(_ => [...Array(this.rows)]);
+ for (let c = 0; c < this.cols; c++) {
+ for (let r = 0; r < this.rows; r++) {
+ elements[c][r] = this.elementType.read(buf, offset);
+ offset += this.elementType.size;
+ }
+
+ // vec3 have one padding element, so need to skip in matrices
+ if (this.rows === 3) {
+ offset += this.elementType.size;
+ }
+ }
+ return new Matrix(elements);
+ }
+
+ public toString(): string {
+ return `mat${this.cols}x${this.rows}<${this.elementType}>`;
+ }
+}
+
+// Maps a string representation of a Matrix type to Matrix type.
+const matrixTypes = new Map<string, MatrixType>();
+
+export function TypeMat(cols: number, rows: number, elementType: ScalarType): MatrixType {
+ const key = `${elementType.toString()} ${cols} ${rows}`;
+ let ty = matrixTypes.get(key);
+ if (ty !== undefined) {
+ return ty;
+ }
+ ty = new MatrixType(cols, rows, elementType);
+ matrixTypes.set(key, ty);
+ return ty;
+}
+
+/** Type is a ScalarType, VectorType, or MatrixType. */
+export type Type = ScalarType | VectorType | MatrixType;
+
+/** Copy bytes from `buf` at `offset` into the working data, then read it out using `workingDataOut` */
+function valueFromBytes(workingDataOut: TypedArrayBufferView, buf: Uint8Array, offset: number) {
+ for (let i = 0; i < workingDataOut.BYTES_PER_ELEMENT; ++i) {
+ workingDataU8[i] = buf[offset + i];
+ }
+ return workingDataOut[0];
+}
+
+export const TypeI32 = new ScalarType('i32', 4, (buf: Uint8Array, offset: number) =>
+ i32(valueFromBytes(workingDataI32, buf, offset))
+);
+export const TypeU32 = new ScalarType('u32', 4, (buf: Uint8Array, offset: number) =>
+ u32(valueFromBytes(workingDataU32, buf, offset))
+);
+export const TypeAbstractFloat = new ScalarType(
+ 'abstract-float',
+ 8,
+ (buf: Uint8Array, offset: number) => abstractFloat(valueFromBytes(workingDataF64, buf, offset))
+);
+export const TypeF64 = new ScalarType('f64', 8, (buf: Uint8Array, offset: number) =>
+ f64(valueFromBytes(workingDataF64, buf, offset))
+);
+export const TypeF32 = new ScalarType('f32', 4, (buf: Uint8Array, offset: number) =>
+ f32(valueFromBytes(workingDataF32, buf, offset))
+);
+export const TypeI16 = new ScalarType('i16', 2, (buf: Uint8Array, offset: number) =>
+ i16(valueFromBytes(workingDataI16, buf, offset))
+);
+export const TypeU16 = new ScalarType('u16', 2, (buf: Uint8Array, offset: number) =>
+ u16(valueFromBytes(workingDataU16, buf, offset))
+);
+export const TypeF16 = new ScalarType('f16', 2, (buf: Uint8Array, offset: number) =>
+ f16Bits(valueFromBytes(workingDataU16, buf, offset))
+);
+export const TypeI8 = new ScalarType('i8', 1, (buf: Uint8Array, offset: number) =>
+ i8(valueFromBytes(workingDataI8, buf, offset))
+);
+export const TypeU8 = new ScalarType('u8', 1, (buf: Uint8Array, offset: number) =>
+ u8(valueFromBytes(workingDataU8, buf, offset))
+);
+export const TypeBool = new ScalarType('bool', 4, (buf: Uint8Array, offset: number) =>
+ bool(valueFromBytes(workingDataU32, buf, offset) !== 0)
+);
+
+/** @returns the ScalarType from the ScalarKind */
+export function scalarType(kind: ScalarKind): ScalarType {
+ switch (kind) {
+ case 'abstract-float':
+ return TypeAbstractFloat;
+ case 'f64':
+ return TypeF64;
+ case 'f32':
+ return TypeF32;
+ case 'f16':
+ return TypeF16;
+ case 'u32':
+ return TypeU32;
+ case 'u16':
+ return TypeU16;
+ case 'u8':
+ return TypeU8;
+ case 'i32':
+ return TypeI32;
+ case 'i16':
+ return TypeI16;
+ case 'i8':
+ return TypeI8;
+ case 'bool':
+ return TypeBool;
+ }
+}
+
+/** @returns the number of scalar (element) types of the given Type */
+export function numElementsOf(ty: Type): number {
+ if (ty instanceof ScalarType) {
+ return 1;
+ }
+ if (ty instanceof VectorType) {
+ return ty.width;
+ }
+ if (ty instanceof MatrixType) {
+ return ty.cols * ty.rows;
+ }
+ throw new Error(`unhandled type ${ty}`);
+}
+
+/** @returns the scalar elements of the given Value */
+export function elementsOf(value: Value): Scalar[] {
+ if (value instanceof Scalar) {
+ return [value];
+ }
+ if (value instanceof Vector) {
+ return value.elements;
+ }
+ if (value instanceof Matrix) {
+ return value.elements.flat();
+ }
+ throw new Error(`unhandled value ${value}`);
+}
+
+/** @returns the scalar (element) type of the given Type */
+export function scalarTypeOf(ty: Type): ScalarType {
+ if (ty instanceof ScalarType) {
+ return ty;
+ }
+ if (ty instanceof VectorType) {
+ return ty.elementType;
+ }
+ if (ty instanceof MatrixType) {
+ return ty.elementType;
+ }
+ throw new Error(`unhandled type ${ty}`);
+}
+
+/** ScalarValue is the JS type that can be held by a Scalar */
+type ScalarValue = boolean | number;
+
+/** Class that encapsulates a single scalar value of various types. */
+export class Scalar {
+ readonly value: ScalarValue; // The scalar value
+ readonly type: ScalarType; // The type of the scalar
+
+ // The scalar value, packed in one or two 32-bit unsigned integers.
+ // Whether or not the bits1 is used depends on `this.type.size`.
+ readonly bits1: number;
+ readonly bits0: number;
+
+ public constructor(type: ScalarType, value: ScalarValue, bits1: number, bits0: number) {
+ this.value = value;
+ this.type = type;
+ this.bits1 = bits1;
+ this.bits0 = bits0;
+ }
+
+ /**
+ * Copies the scalar value to the buffer at the provided byte offset.
+ * @param buffer the destination buffer
+ * @param offset the offset in buffer, in units of `buffer`
+ */
+ public copyTo(buffer: TypedArrayBufferView, offset: number) {
+ assert(this.type.kind !== 'f64', `Copying f64 values to/from buffers is not defined`);
+ workingDataU32[1] = this.bits1;
+ workingDataU32[0] = this.bits0;
+ for (let i = 0; i < this.type.size; i++) {
+ buffer[offset + i] = workingDataU8[i];
+ }
+ }
+
+ /**
+ * @returns the WGSL representation of this scalar value
+ */
+ public wgsl(): string {
+ const withPoint = (x: number) => {
+ const str = `${x}`;
+ return str.indexOf('.') > 0 || str.indexOf('e') > 0 ? str : `${str}.0`;
+ };
+ if (isFinite(this.value as number)) {
+ switch (this.type.kind) {
+ case 'abstract-float':
+ return `${withPoint(this.value as number)}`;
+ case 'f64':
+ return `${withPoint(this.value as number)}`;
+ case 'f32':
+ return `${withPoint(this.value as number)}f`;
+ case 'f16':
+ return `${withPoint(this.value as number)}h`;
+ case 'u32':
+ return `${this.value}u`;
+ case 'i32':
+ return `i32(${this.value})`;
+ case 'bool':
+ return `${this.value}`;
+ }
+ }
+ throw new Error(
+ `scalar of value ${this.value} and type ${this.type} has no WGSL representation`
+ );
+ }
+
+ public toString(): string {
+ if (this.type.kind === 'bool') {
+ return Colors.bold(this.value.toString());
+ }
+ switch (this.value) {
+ case Infinity:
+ case -Infinity:
+ return Colors.bold(this.value.toString());
+ default: {
+ workingDataU32[1] = this.bits1;
+ workingDataU32[0] = this.bits0;
+ let hex = '';
+ for (let i = 0; i < this.type.size; ++i) {
+ hex = workingDataU8[i].toString(16).padStart(2, '0') + hex;
+ }
+ const n = this.value as Number;
+ if (n !== null && isFloatValue(this)) {
+ let str = this.value.toString();
+ str = str.indexOf('.') > 0 || str.indexOf('e') > 0 ? str : `${str}.0`;
+ switch (this.type.kind) {
+ case 'abstract-float':
+ return isSubnormalNumberF64(n.valueOf())
+ ? `${Colors.bold(str)} (0x${hex} subnormal)`
+ : `${Colors.bold(str)} (0x${hex})`;
+ case 'f64':
+ return isSubnormalNumberF64(n.valueOf())
+ ? `${Colors.bold(str)} (0x${hex} subnormal)`
+ : `${Colors.bold(str)} (0x${hex})`;
+ case 'f32':
+ return isSubnormalNumberF32(n.valueOf())
+ ? `${Colors.bold(str)} (0x${hex} subnormal)`
+ : `${Colors.bold(str)} (0x${hex})`;
+ case 'f16':
+ return isSubnormalNumberF16(n.valueOf())
+ ? `${Colors.bold(str)} (0x${hex} subnormal)`
+ : `${Colors.bold(str)} (0x${hex})`;
+ default:
+ unreachable(
+ `Printing of floating point kind ${this.type.kind} is not implemented...`
+ );
+ }
+ }
+ return `${Colors.bold(this.value.toString())} (0x${hex})`;
+ }
+ }
+ }
+}
+
+export interface ScalarBuilder {
+ (value: number): Scalar;
+}
+
+/** Create a Scalar of `type` by storing `value` as an element of `workingDataArray` and retrieving it.
+ * The working data array *must* be an alias of `workingData`.
+ */
+function scalarFromValue(
+ type: ScalarType,
+ workingDataArray: TypedArrayBufferView,
+ value: number
+): Scalar {
+ // Clear all bits of the working data since `value` may be smaller; the upper bits should be 0.
+ workingDataU32[1] = 0;
+ workingDataU32[0] = 0;
+ workingDataArray[0] = value;
+ return new Scalar(type, workingDataArray[0], workingDataU32[1], workingDataU32[0]);
+}
+
+/** Create a Scalar of `type` by storing `value` as an element of `workingDataStoreArray` and
+ * reinterpreting it as an element of `workingDataLoadArray`.
+ * Both working data arrays *must* be aliases of `workingData`.
+ */
+function scalarFromBits(
+ type: ScalarType,
+ workingDataStoreArray: TypedArrayBufferView,
+ workingDataLoadArray: TypedArrayBufferView,
+ bits: number
+): Scalar {
+ // Clear all bits of the working data since `value` may be smaller; the upper bits should be 0.
+ workingDataU32[1] = 0;
+ workingDataU32[0] = 0;
+ workingDataStoreArray[0] = bits;
+ return new Scalar(type, workingDataLoadArray[0], workingDataU32[1], workingDataU32[0]);
+}
+
+/** Create an AbstractFloat from a numeric value, a JS `number`. */
+export const abstractFloat = (value: number): Scalar =>
+ scalarFromValue(TypeAbstractFloat, workingDataF64, value);
+
+/** Create an f64 from a numeric value, a JS `number`. */
+export const f64 = (value: number): Scalar => scalarFromValue(TypeF64, workingDataF64, value);
+
+/** Create an f32 from a numeric value, a JS `number`. */
+export const f32 = (value: number): Scalar => scalarFromValue(TypeF32, workingDataF32, value);
+
+/** Create an f16 from a numeric value, a JS `number`. */
+export const f16 = (value: number): Scalar => scalarFromValue(TypeF16, workingDataF16, value);
+
+/** Create an f32 from a bit representation, a uint32 represented as a JS `number`. */
+export const f32Bits = (bits: number): Scalar =>
+ scalarFromBits(TypeF32, workingDataU32, workingDataF32, bits);
+
+/** Create an f16 from a bit representation, a uint16 represented as a JS `number`. */
+export const f16Bits = (bits: number): Scalar =>
+ scalarFromBits(TypeF16, workingDataU16, workingDataF16, bits);
+
+/** Create an i32 from a numeric value, a JS `number`. */
+export const i32 = (value: number): Scalar => scalarFromValue(TypeI32, workingDataI32, value);
+
+/** Create an i16 from a numeric value, a JS `number`. */
+export const i16 = (value: number): Scalar => scalarFromValue(TypeI16, workingDataI16, value);
+
+/** Create an i8 from a numeric value, a JS `number`. */
+export const i8 = (value: number): Scalar => scalarFromValue(TypeI8, workingDataI8, value);
+
+/** Create an i32 from a bit representation, a uint32 represented as a JS `number`. */
+export const i32Bits = (bits: number): Scalar =>
+ scalarFromBits(TypeI32, workingDataU32, workingDataI32, bits);
+
+/** Create an i16 from a bit representation, a uint16 represented as a JS `number`. */
+export const i16Bits = (bits: number): Scalar =>
+ scalarFromBits(TypeI16, workingDataU16, workingDataI16, bits);
+
+/** Create an i8 from a bit representation, a uint8 represented as a JS `number`. */
+export const i8Bits = (bits: number): Scalar =>
+ scalarFromBits(TypeI8, workingDataU8, workingDataI8, bits);
+
+/** Create a u32 from a numeric value, a JS `number`. */
+export const u32 = (value: number): Scalar => scalarFromValue(TypeU32, workingDataU32, value);
+
+/** Create a u16 from a numeric value, a JS `number`. */
+export const u16 = (value: number): Scalar => scalarFromValue(TypeU16, workingDataU16, value);
+
+/** Create a u8 from a numeric value, a JS `number`. */
+export const u8 = (value: number): Scalar => scalarFromValue(TypeU8, workingDataU8, value);
+
+/** Create an u32 from a bit representation, a uint32 represented as a JS `number`. */
+export const u32Bits = (bits: number): Scalar =>
+ scalarFromBits(TypeU32, workingDataU32, workingDataU32, bits);
+
+/** Create an u16 from a bit representation, a uint16 represented as a JS `number`. */
+export const u16Bits = (bits: number): Scalar =>
+ scalarFromBits(TypeU16, workingDataU16, workingDataU16, bits);
+
+/** Create an u8 from a bit representation, a uint8 represented as a JS `number`. */
+export const u8Bits = (bits: number): Scalar =>
+ scalarFromBits(TypeU8, workingDataU8, workingDataU8, bits);
+
+/** Create a boolean value. */
+export function bool(value: boolean): Scalar {
+ // WGSL does not support using 'bool' types directly in storage / uniform
+ // buffers, so instead we pack booleans in a u32, where 'false' is zero and
+ // 'true' is any non-zero value.
+ workingDataU32[0] = value ? 1 : 0;
+ workingDataU32[1] = 0;
+ return new Scalar(TypeBool, value, workingDataU32[1], workingDataU32[0]);
+}
+
+/** A 'true' literal value */
+export const True = bool(true);
+
+/** A 'false' literal value */
+export const False = bool(false);
+
+/**
+ * Class that encapsulates a vector value.
+ */
+export class Vector {
+ readonly elements: Array<Scalar>;
+ readonly type: VectorType;
+
+ public constructor(elements: Array<Scalar>) {
+ if (elements.length < 2 || elements.length > 4) {
+ throw new Error(`vector element count must be between 2 and 4, got ${elements.length}`);
+ }
+ for (let i = 1; i < elements.length; i++) {
+ const a = elements[0].type;
+ const b = elements[i].type;
+ if (a !== b) {
+ throw new Error(
+ `cannot mix vector element types. Found elements with types '${a}' and '${b}'`
+ );
+ }
+ }
+ this.elements = elements;
+ this.type = TypeVec(elements.length, elements[0].type);
+ }
+
+ /**
+ * Copies the vector value to the Uint8Array buffer at the provided byte offset.
+ * @param buffer the destination buffer
+ * @param offset the byte offset within buffer
+ */
+ public copyTo(buffer: Uint8Array, offset: number) {
+ for (const element of this.elements) {
+ element.copyTo(buffer, offset);
+ offset += this.type.elementType.size;
+ }
+ }
+
+ /**
+ * @returns the WGSL representation of this vector value
+ */
+ public wgsl(): string {
+ const els = this.elements.map(v => v.wgsl()).join(', ');
+ return `vec${this.type.width}(${els})`;
+ }
+
+ public toString(): string {
+ return `${this.type}(${this.elements.map(e => e.toString()).join(', ')})`;
+ }
+
+ public get x() {
+ assert(0 < this.elements.length);
+ return this.elements[0];
+ }
+
+ public get y() {
+ assert(1 < this.elements.length);
+ return this.elements[1];
+ }
+
+ public get z() {
+ assert(2 < this.elements.length);
+ return this.elements[2];
+ }
+
+ public get w() {
+ assert(3 < this.elements.length);
+ return this.elements[3];
+ }
+}
+
+/** Helper for constructing a new two-element vector with the provided values */
+export function vec2(x: Scalar, y: Scalar) {
+ return new Vector([x, y]);
+}
+
+/** Helper for constructing a new three-element vector with the provided values */
+export function vec3(x: Scalar, y: Scalar, z: Scalar) {
+ return new Vector([x, y, z]);
+}
+
+/** Helper for constructing a new four-element vector with the provided values */
+export function vec4(x: Scalar, y: Scalar, z: Scalar, w: Scalar) {
+ return new Vector([x, y, z, w]);
+}
+
+/**
+ * Helper for constructing Vectors from arrays of numbers
+ *
+ * @param v array of numbers to be converted, must contain 2, 3 or 4 elements
+ * @param op function to convert from number to Scalar, e.g. 'f32`
+ */
+export function toVector(v: readonly number[], op: (n: number) => Scalar): Vector {
+ switch (v.length) {
+ case 2:
+ return vec2(op(v[0]), op(v[1]));
+ case 3:
+ return vec3(op(v[0]), op(v[1]), op(v[2]));
+ case 4:
+ return vec4(op(v[0]), op(v[1]), op(v[2]), op(v[3]));
+ }
+ unreachable(`input to 'toVector' must contain 2, 3, or 4 elements`);
+}
+
+/**
+ * Class that encapsulates a Matrix value.
+ */
+export class Matrix {
+ readonly elements: Scalar[][];
+ readonly type: MatrixType;
+
+ public constructor(elements: Array<Array<Scalar>>) {
+ const num_cols = elements.length;
+ if (num_cols < 2 || num_cols > 4) {
+ throw new Error(`matrix cols count must be between 2 and 4, got ${num_cols}`);
+ }
+
+ const num_rows = elements[0].length;
+ if (!elements.every(c => c.length === num_rows)) {
+ throw new Error(`cannot mix matrix column lengths`);
+ }
+
+ if (num_rows < 2 || num_rows > 4) {
+ throw new Error(`matrix rows count must be between 2 and 4, got ${num_rows}`);
+ }
+
+ const elem_type = elements[0][0].type;
+ if (!elements.every(c => c.every(r => objectEquals(r.type, elem_type)))) {
+ throw new Error(`cannot mix matrix element types`);
+ }
+
+ this.elements = elements;
+ this.type = TypeMat(num_cols, num_rows, elem_type);
+ }
+
+ /**
+ * Copies the matrix value to the Uint8Array buffer at the provided byte offset.
+ * @param buffer the destination buffer
+ * @param offset the byte offset within buffer
+ */
+ public copyTo(buffer: Uint8Array, offset: number) {
+ for (let i = 0; i < this.type.cols; i++) {
+ for (let j = 0; j < this.type.rows; j++) {
+ this.elements[i][j].copyTo(buffer, offset);
+ offset += this.type.elementType.size;
+ }
+
+ // vec3 have one padding element, so need to skip in matrices
+ if (this.type.rows === 3) {
+ offset += this.type.elementType.size;
+ }
+ }
+ }
+
+ /**
+ * @returns the WGSL representation of this matrix value
+ */
+ public wgsl(): string {
+ const els = this.elements.flatMap(c => c.map(r => r.wgsl())).join(', ');
+ return `mat${this.type.cols}x${this.type.rows}(${els})`;
+ }
+
+ public toString(): string {
+ return `${this.type}(${this.elements.map(c => c.join(', ')).join(', ')})`;
+ }
+}
+
+/**
+ * Helper for constructing Matrices from arrays of numbers
+ *
+ * @param m array of array of numbers to be converted, all Array of number must
+ * be of the same length. All Arrays must have 2, 3, or 4 elements.
+ * @param op function to convert from number to Scalar, e.g. 'f32`
+ */
+export function toMatrix(m: ROArrayArray<number>, op: (n: number) => Scalar): Matrix {
+ const cols = m.length;
+ const rows = m[0].length;
+ const elements: Scalar[][] = [...Array<Scalar[]>(cols)].map(_ => [...Array<Scalar>(rows)]);
+ for (let i = 0; i < cols; i++) {
+ for (let j = 0; j < rows; j++) {
+ elements[i][j] = op(m[i][j]);
+ }
+ }
+
+ return new Matrix(elements);
+}
+
+/** Value is a Scalar or Vector value. */
+export type Value = Scalar | Vector | Matrix;
+
+export type SerializedValueScalar = {
+ kind: 'scalar';
+ type: ScalarKind;
+ value: boolean | number;
+};
+
+export type SerializedValueVector = {
+ kind: 'vector';
+ type: ScalarKind;
+ value: boolean[] | readonly number[];
+};
+
+export type SerializedValueMatrix = {
+ kind: 'matrix';
+ type: ScalarKind;
+ value: ROArrayArray<number>;
+};
+
+enum SerializedScalarKind {
+ AbstractFloat,
+ F64,
+ F32,
+ F16,
+ U32,
+ U16,
+ U8,
+ I32,
+ I16,
+ I8,
+ Bool,
+}
+
+/** serializeScalarKind() serializes a ScalarKind to a BinaryStream */
+function serializeScalarKind(s: BinaryStream, v: ScalarKind) {
+ switch (v) {
+ case 'abstract-float':
+ s.writeU8(SerializedScalarKind.AbstractFloat);
+ return;
+ case 'f64':
+ s.writeU8(SerializedScalarKind.F64);
+ return;
+ case 'f32':
+ s.writeU8(SerializedScalarKind.F32);
+ return;
+ case 'f16':
+ s.writeU8(SerializedScalarKind.F16);
+ return;
+ case 'u32':
+ s.writeU8(SerializedScalarKind.U32);
+ return;
+ case 'u16':
+ s.writeU8(SerializedScalarKind.U16);
+ return;
+ case 'u8':
+ s.writeU8(SerializedScalarKind.U8);
+ return;
+ case 'i32':
+ s.writeU8(SerializedScalarKind.I32);
+ return;
+ case 'i16':
+ s.writeU8(SerializedScalarKind.I16);
+ return;
+ case 'i8':
+ s.writeU8(SerializedScalarKind.I8);
+ return;
+ case 'bool':
+ s.writeU8(SerializedScalarKind.Bool);
+ return;
+ }
+}
+
+/** deserializeScalarKind() deserializes a ScalarKind from a BinaryStream */
+function deserializeScalarKind(s: BinaryStream): ScalarKind {
+ const kind = s.readU8();
+ switch (kind) {
+ case SerializedScalarKind.AbstractFloat:
+ return 'abstract-float';
+ case SerializedScalarKind.F64:
+ return 'f64';
+ case SerializedScalarKind.F32:
+ return 'f32';
+ case SerializedScalarKind.F16:
+ return 'f16';
+ case SerializedScalarKind.U32:
+ return 'u32';
+ case SerializedScalarKind.U16:
+ return 'u16';
+ case SerializedScalarKind.U8:
+ return 'u8';
+ case SerializedScalarKind.I32:
+ return 'i32';
+ case SerializedScalarKind.I16:
+ return 'i16';
+ case SerializedScalarKind.I8:
+ return 'i8';
+ case SerializedScalarKind.Bool:
+ return 'bool';
+ default:
+ unreachable(`invalid serialized ScalarKind: ${kind}`);
+ }
+}
+
+enum SerializedValueKind {
+ Scalar,
+ Vector,
+ Matrix,
+}
+
+/** serializeValue() serializes a Value to a BinaryStream */
+export function serializeValue(s: BinaryStream, v: Value) {
+ const serializeScalar = (scalar: Scalar, kind: ScalarKind) => {
+ switch (kind) {
+ case 'abstract-float':
+ s.writeF64(scalar.value as number);
+ return;
+ case 'f64':
+ s.writeF64(scalar.value as number);
+ return;
+ case 'f32':
+ s.writeF32(scalar.value as number);
+ return;
+ case 'f16':
+ s.writeF16(scalar.value as number);
+ return;
+ case 'u32':
+ s.writeU32(scalar.value as number);
+ return;
+ case 'u16':
+ s.writeU16(scalar.value as number);
+ return;
+ case 'u8':
+ s.writeU8(scalar.value as number);
+ return;
+ case 'i32':
+ s.writeI32(scalar.value as number);
+ return;
+ case 'i16':
+ s.writeI16(scalar.value as number);
+ return;
+ case 'i8':
+ s.writeI8(scalar.value as number);
+ return;
+ case 'bool':
+ s.writeBool(scalar.value as boolean);
+ return;
+ }
+ };
+
+ if (v instanceof Scalar) {
+ s.writeU8(SerializedValueKind.Scalar);
+ serializeScalarKind(s, v.type.kind);
+ serializeScalar(v, v.type.kind);
+ return;
+ }
+ if (v instanceof Vector) {
+ s.writeU8(SerializedValueKind.Vector);
+ serializeScalarKind(s, v.type.elementType.kind);
+ s.writeU8(v.type.width);
+ for (const element of v.elements) {
+ serializeScalar(element, v.type.elementType.kind);
+ }
+ return;
+ }
+ if (v instanceof Matrix) {
+ s.writeU8(SerializedValueKind.Matrix);
+ serializeScalarKind(s, v.type.elementType.kind);
+ s.writeU8(v.type.cols);
+ s.writeU8(v.type.rows);
+ for (const column of v.elements) {
+ for (const element of column) {
+ serializeScalar(element, v.type.elementType.kind);
+ }
+ }
+ return;
+ }
+
+ unreachable(`unhandled value type: ${v}`);
+}
+
+/** deserializeValue() deserializes a Value from a BinaryStream */
+export function deserializeValue(s: BinaryStream): Value {
+ const deserializeScalar = (kind: ScalarKind) => {
+ switch (kind) {
+ case 'abstract-float':
+ return abstractFloat(s.readF64());
+ case 'f64':
+ return f64(s.readF64());
+ case 'f32':
+ return f32(s.readF32());
+ case 'f16':
+ return f16(s.readF16());
+ case 'u32':
+ return u32(s.readU32());
+ case 'u16':
+ return u16(s.readU16());
+ case 'u8':
+ return u8(s.readU8());
+ case 'i32':
+ return i32(s.readI32());
+ case 'i16':
+ return i16(s.readI16());
+ case 'i8':
+ return i8(s.readI8());
+ case 'bool':
+ return bool(s.readBool());
+ }
+ };
+ const valueKind = s.readU8();
+ const scalarKind = deserializeScalarKind(s);
+ switch (valueKind) {
+ case SerializedValueKind.Scalar:
+ return deserializeScalar(scalarKind);
+ case SerializedValueKind.Vector: {
+ const width = s.readU8();
+ const scalars = new Array<Scalar>(width);
+ for (let i = 0; i < width; i++) {
+ scalars[i] = deserializeScalar(scalarKind);
+ }
+ return new Vector(scalars);
+ }
+ case SerializedValueKind.Matrix: {
+ const numCols = s.readU8();
+ const numRows = s.readU8();
+ const columns = new Array<Scalar[]>(numCols);
+ for (let c = 0; c < numCols; c++) {
+ columns[c] = new Array<Scalar>(numRows);
+ for (let i = 0; i < numRows; i++) {
+ columns[c][i] = deserializeScalar(scalarKind);
+ }
+ }
+ return new Matrix(columns);
+ }
+ default:
+ unreachable(`invalid serialized value kind: ${valueKind}`);
+ }
+}
+
+/** @returns if the Value is a float scalar type */
+export function isFloatValue(v: Value): boolean {
+ return isFloatType(v.type);
+}
+
+/**
+ * @returns if `ty` is an abstract numeric type.
+ * @note this does not consider composite types.
+ * Use elementType() if you want to test the element type.
+ */
+export function isAbstractType(ty: Type): boolean {
+ if (ty instanceof ScalarType) {
+ return ty.kind === 'abstract-float';
+ }
+ return false;
+}
+
+/**
+ * @returns if `ty` is a floating point type.
+ * @note this does not consider composite types.
+ * Use elementType() if you want to test the element type.
+ */
+export function isFloatType(ty: Type): boolean {
+ if (ty instanceof ScalarType) {
+ return (
+ ty.kind === 'abstract-float' || ty.kind === 'f64' || ty.kind === 'f32' || ty.kind === 'f16'
+ );
+ }
+ return false;
+}
+
+/// All floating-point scalar types
+export const kAllFloatScalars = [TypeAbstractFloat, TypeF32, TypeF16] as const;
+
+/// All floating-point vec2 types
+export const kAllFloatVector2 = [
+ TypeVec(2, TypeAbstractFloat),
+ TypeVec(2, TypeF32),
+ TypeVec(2, TypeF16),
+] as const;
+
+/// All floating-point vec3 types
+export const kAllFloatVector3 = [
+ TypeVec(3, TypeAbstractFloat),
+ TypeVec(3, TypeF32),
+ TypeVec(3, TypeF16),
+] as const;
+
+/// All floating-point vec4 types
+export const kAllFloatVector4 = [
+ TypeVec(4, TypeAbstractFloat),
+ TypeVec(4, TypeF32),
+ TypeVec(4, TypeF16),
+] as const;
+
+/// All floating-point vector types
+export const kAllFloatVectors = [
+ ...kAllFloatVector2,
+ ...kAllFloatVector3,
+ ...kAllFloatVector4,
+] as const;
+
+/// All floating-point scalar and vector types
+export const kAllFloatScalarsAndVectors = [...kAllFloatScalars, ...kAllFloatVectors] as const;
+
+/// All integer scalar and vector types
+export const kAllIntegerScalarsAndVectors = [
+ TypeI32,
+ TypeVec(2, TypeI32),
+ TypeVec(3, TypeI32),
+ TypeVec(4, TypeI32),
+ TypeU32,
+ TypeVec(2, TypeU32),
+ TypeVec(3, TypeU32),
+ TypeVec(4, TypeU32),
+] as const;
+
+/// All signed integer scalar and vector types
+export const kAllSignedIntegerScalarsAndVectors = [
+ TypeI32,
+ TypeVec(2, TypeI32),
+ TypeVec(3, TypeI32),
+ TypeVec(4, TypeI32),
+] as const;
+
+/// All unsigned integer scalar and vector types
+export const kAllUnsignedIntegerScalarsAndVectors = [
+ TypeU32,
+ TypeVec(2, TypeU32),
+ TypeVec(3, TypeU32),
+ TypeVec(4, TypeU32),
+] as const;
+
+/// All floating-point and integer scalar and vector types
+export const kAllFloatAndIntegerScalarsAndVectors = [
+ ...kAllFloatScalarsAndVectors,
+ ...kAllIntegerScalarsAndVectors,
+] as const;
+
+/// All floating-point and signed integer scalar and vector types
+export const kAllFloatAndSignedIntegerScalarsAndVectors = [
+ ...kAllFloatScalarsAndVectors,
+ ...kAllSignedIntegerScalarsAndVectors,
+] as const;
+
+/** @returns the inner element type of the given type */
+export function elementType(t: ScalarType | VectorType | MatrixType) {
+ if (t instanceof ScalarType) {
+ return t;
+ }
+ return t.elementType;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/copy_to_texture.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/copy_to_texture.ts
new file mode 100644
index 0000000000..8e0444ffea
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/copy_to_texture.ts
@@ -0,0 +1,192 @@
+import { assert, memcpy } from '../../common/util/util.js';
+import { RegularTextureFormat } from '../format_info.js';
+import { GPUTest, TextureTestMixin } from '../gpu_test.js';
+import { reifyExtent3D, reifyOrigin3D } from '../util/unions.js';
+
+import { makeInPlaceColorConversion } from './color_space_conversion.js';
+import { TexelView } from './texture/texel_view.js';
+import { TexelCompareOptions } from './texture/texture_ok.js';
+
+/**
+ * Predefined copy sub rect meta infos.
+ */
+export const kCopySubrectInfo = [
+ {
+ srcOrigin: { x: 2, y: 2 },
+ dstOrigin: { x: 0, y: 0, z: 0 },
+ srcSize: { width: 16, height: 16 },
+ dstSize: { width: 4, height: 4 },
+ copyExtent: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ },
+ {
+ srcOrigin: { x: 10, y: 2 },
+ dstOrigin: { x: 0, y: 0, z: 0 },
+ srcSize: { width: 16, height: 16 },
+ dstSize: { width: 4, height: 4 },
+ copyExtent: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ },
+ {
+ srcOrigin: { x: 2, y: 10 },
+ dstOrigin: { x: 0, y: 0, z: 0 },
+ srcSize: { width: 16, height: 16 },
+ dstSize: { width: 4, height: 4 },
+ copyExtent: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ },
+ {
+ srcOrigin: { x: 10, y: 10 },
+ dstOrigin: { x: 0, y: 0, z: 0 },
+ srcSize: { width: 16, height: 16 },
+ dstSize: { width: 4, height: 4 },
+ copyExtent: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ },
+ {
+ srcOrigin: { x: 2, y: 2 },
+ dstOrigin: { x: 2, y: 2, z: 0 },
+ srcSize: { width: 16, height: 16 },
+ dstSize: { width: 16, height: 16 },
+ copyExtent: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ },
+ {
+ srcOrigin: { x: 10, y: 2 },
+ dstOrigin: { x: 2, y: 2, z: 0 },
+ srcSize: { width: 16, height: 16 },
+ dstSize: { width: 16, height: 16 },
+ copyExtent: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ },
+] as const;
+
+export class CopyToTextureUtils extends TextureTestMixin(GPUTest) {
+ doFlipY(
+ sourcePixels: Uint8ClampedArray,
+ width: number,
+ height: number,
+ bytesPerPixel: number
+ ): Uint8ClampedArray {
+ const dstPixels = new Uint8ClampedArray(width * height * bytesPerPixel);
+ for (let i = 0; i < height; ++i) {
+ for (let j = 0; j < width; ++j) {
+ const srcPixelPos = i * width + j;
+ // WebGL readPixel returns pixels from bottom-left origin. Using CopyExternalImageToTexture
+ // to copy from WebGL Canvas keeps top-left origin. So the expectation from webgl.readPixel should
+ // be flipped.
+ const dstPixelPos = (height - i - 1) * width + j;
+
+ memcpy(
+ { src: sourcePixels, start: srcPixelPos * bytesPerPixel, length: bytesPerPixel },
+ { dst: dstPixels, start: dstPixelPos * bytesPerPixel }
+ );
+ }
+ }
+
+ return dstPixels;
+ }
+
+ getExpectedDstPixelsFromSrcPixels({
+ srcPixels,
+ srcOrigin,
+ srcSize,
+ dstOrigin,
+ dstSize,
+ subRectSize,
+ format,
+ flipSrcBeforeCopy,
+ srcDoFlipYDuringCopy,
+ conversion,
+ }: {
+ srcPixels: Uint8ClampedArray;
+ srcOrigin: GPUOrigin2D;
+ srcSize: GPUExtent3D;
+ dstOrigin: GPUOrigin3D;
+ dstSize: GPUExtent3D;
+ subRectSize: GPUExtent3D;
+ format: RegularTextureFormat;
+ flipSrcBeforeCopy: boolean;
+ srcDoFlipYDuringCopy: boolean;
+ conversion: {
+ srcPremultiplied: boolean;
+ dstPremultiplied: boolean;
+ srcColorSpace?: PredefinedColorSpace;
+ dstColorSpace?: PredefinedColorSpace;
+ };
+ }): TexelView {
+ const applyConversion = makeInPlaceColorConversion(conversion);
+
+ const reifySrcOrigin = reifyOrigin3D(srcOrigin);
+ const reifySrcSize = reifyExtent3D(srcSize);
+ const reifyDstOrigin = reifyOrigin3D(dstOrigin);
+ const reifyDstSize = reifyExtent3D(dstSize);
+ const reifySubRectSize = reifyExtent3D(subRectSize);
+
+ assert(
+ reifyDstOrigin.x + reifySubRectSize.width <= reifyDstSize.width &&
+ reifyDstOrigin.y + reifySubRectSize.height <= reifyDstSize.height,
+ 'subrect is out of bounds'
+ );
+
+ const divide = 255.0;
+ return TexelView.fromTexelsAsColors(
+ format,
+ coords => {
+ assert(
+ coords.x >= reifyDstOrigin.x &&
+ coords.y >= reifyDstOrigin.y &&
+ coords.x < reifyDstOrigin.x + reifySubRectSize.width &&
+ coords.y < reifyDstOrigin.y + reifySubRectSize.height &&
+ coords.z === 0,
+ 'out of bounds'
+ );
+ // Map dst coords to get candidate src pixel position in y.
+ let yInSubRect = coords.y - reifyDstOrigin.y;
+
+ // If srcDoFlipYDuringCopy is true, a flipY op has been applied to src during copy.
+ // WebGPU spec requires origin option relative to the top-left corner of the source image,
+ // increasing downward consistently.
+ // https://www.w3.org/TR/webgpu/#dom-gpuimagecopyexternalimage-flipy
+ // Flip only happens in copy rect contents and src origin always top-left.
+ // Get candidate src pixel position in y by mirroring in copy sub rect.
+ if (srcDoFlipYDuringCopy) yInSubRect = reifySubRectSize.height - 1 - yInSubRect;
+
+ let src_y = yInSubRect + reifySrcOrigin.y;
+
+ // Test might generate flipped source based on srcPixels, e.g. Create ImageBitmap based on srcPixels but set orientation to 'flipY'
+ // Get candidate src pixel position in y by mirroring in source.
+ if (flipSrcBeforeCopy) src_y = reifySrcSize.height - src_y - 1;
+
+ const pixelPos =
+ src_y * reifySrcSize.width + (coords.x - reifyDstOrigin.x) + reifySrcOrigin.x;
+
+ const rgba = {
+ R: srcPixels[pixelPos * 4] / divide,
+ G: srcPixels[pixelPos * 4 + 1] / divide,
+ B: srcPixels[pixelPos * 4 + 2] / divide,
+ A: srcPixels[pixelPos * 4 + 3] / divide,
+ };
+ applyConversion(rgba);
+ return rgba;
+ },
+ { clampToFormatRange: true }
+ );
+ }
+
+ doTestAndCheckResult(
+ imageCopyExternalImage: GPUImageCopyExternalImage,
+ dstTextureCopyView: GPUImageCopyTextureTagged,
+ expTexelView: TexelView,
+ copySize: Required<GPUExtent3DDict>,
+ texelCompareOptions: TexelCompareOptions
+ ): void {
+ this.device.queue.copyExternalImageToTexture(
+ imageCopyExternalImage,
+ dstTextureCopyView,
+ copySize
+ );
+
+ this.expectTexelViewComparisonIsOkInTexture(
+ { texture: dstTextureCopyView.texture, origin: dstTextureCopyView.origin },
+ expTexelView,
+ copySize,
+ texelCompareOptions
+ );
+ this.trackForCleanup(dstTextureCopyView.texture);
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/create_elements.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/create_elements.ts
new file mode 100644
index 0000000000..71d48ecc07
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/create_elements.ts
@@ -0,0 +1,82 @@
+import { Fixture } from '../../common/framework/fixture.js';
+import { unreachable } from '../../common/util/util.js';
+
+// TESTING_TODO: This should expand to more canvas types (which will enhance a bunch of tests):
+// - canvas element not in dom
+// - canvas element in dom
+// - offscreen canvas from transferControlToOffscreen from canvas not in dom
+// - offscreen canvas from transferControlToOffscreen from canvas in dom
+// - offscreen canvas from new OffscreenCanvas
+export const kAllCanvasTypes = ['onscreen', 'offscreen'] as const;
+export type CanvasType = (typeof kAllCanvasTypes)[number];
+
+type CanvasForCanvasType<T extends CanvasType> = {
+ onscreen: HTMLCanvasElement;
+ offscreen: OffscreenCanvas;
+}[T];
+
+/** Valid contextId for HTMLCanvasElement/OffscreenCanvas,
+ * spec: https://html.spec.whatwg.org/multipage/canvas.html#dom-canvas-getcontext
+ */
+export const kValidCanvasContextIds = [
+ '2d',
+ 'bitmaprenderer',
+ 'webgl',
+ 'webgl2',
+ 'webgpu',
+] as const;
+export type CanvasContext = (typeof kValidCanvasContextIds)[number];
+
+/** Create HTMLCanvas/OffscreenCanvas. */
+export function createCanvas<T extends CanvasType>(
+ test: Fixture,
+ canvasType: T,
+ width: number,
+ height: number
+): CanvasForCanvasType<T> {
+ if (canvasType === 'onscreen') {
+ if (typeof document !== 'undefined') {
+ return createOnscreenCanvas(test, width, height) as CanvasForCanvasType<T>;
+ } else {
+ test.skip('Cannot create HTMLCanvasElement');
+ }
+ } else if (canvasType === 'offscreen') {
+ if (typeof OffscreenCanvas !== 'undefined') {
+ return createOffscreenCanvas(test, width, height) as CanvasForCanvasType<T>;
+ } else {
+ test.skip('Cannot create an OffscreenCanvas');
+ }
+ } else {
+ unreachable();
+ }
+}
+
+/** Create HTMLCanvasElement. */
+export function createOnscreenCanvas(
+ test: Fixture,
+ width: number,
+ height: number
+): HTMLCanvasElement {
+ let canvas: HTMLCanvasElement;
+ if (typeof document !== 'undefined') {
+ canvas = document.createElement('canvas');
+ canvas.width = width;
+ canvas.height = height;
+ } else {
+ test.skip('Cannot create HTMLCanvasElement');
+ }
+ return canvas;
+}
+
+/** Create OffscreenCanvas. */
+export function createOffscreenCanvas(
+ test: Fixture,
+ width: number,
+ height: number
+): OffscreenCanvas {
+ if (typeof OffscreenCanvas === 'undefined') {
+ test.skip('OffscreenCanvas is not supported');
+ }
+
+ return new OffscreenCanvas(width, height);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/device_pool.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/device_pool.ts
new file mode 100644
index 0000000000..1e6c0402cb
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/device_pool.ts
@@ -0,0 +1,414 @@
+import { SkipTestCase, TestCaseRecorder } from '../../common/framework/fixture.js';
+import { attemptGarbageCollection } from '../../common/util/collect_garbage.js';
+import { getGPU, getDefaultRequestAdapterOptions } from '../../common/util/navigator_gpu.js';
+import {
+ assert,
+ raceWithRejectOnTimeout,
+ assertReject,
+ unreachable,
+} from '../../common/util/util.js';
+import { getDefaultLimits, kLimits } from '../capability_info.js';
+
+export interface DeviceProvider {
+ readonly device: GPUDevice;
+ expectDeviceLost(reason: GPUDeviceLostReason): void;
+}
+
+class TestFailedButDeviceReusable extends Error {}
+class FeaturesNotSupported extends Error {}
+export class TestOOMedShouldAttemptGC extends Error {}
+
+export class DevicePool {
+ private holders: 'uninitialized' | 'failed' | DescriptorToHolderMap = 'uninitialized';
+
+ /** Acquire a device from the pool and begin the error scopes. */
+ async acquire(
+ recorder: TestCaseRecorder,
+ descriptor?: UncanonicalizedDeviceDescriptor
+ ): Promise<DeviceProvider> {
+ let errorMessage = '';
+ if (this.holders === 'uninitialized') {
+ this.holders = new DescriptorToHolderMap();
+ try {
+ await this.holders.getOrCreate(recorder, undefined);
+ } catch (ex) {
+ this.holders = 'failed';
+ if (ex instanceof Error) {
+ errorMessage = ` with ${ex.name} "${ex.message}"`;
+ }
+ }
+ }
+
+ assert(
+ this.holders !== 'failed',
+ `WebGPU device failed to initialize${errorMessage}; not retrying`
+ );
+
+ const holder = await this.holders.getOrCreate(recorder, descriptor);
+
+ assert(holder.state === 'free', 'Device was in use on DevicePool.acquire');
+ holder.state = 'acquired';
+ holder.beginTestScope();
+ return holder;
+ }
+
+ /**
+ * End the error scopes and check for errors.
+ * Then, if the device seems reusable, release it back into the pool. Otherwise, drop it.
+ */
+ async release(holder: DeviceProvider): Promise<void> {
+ assert(this.holders instanceof DescriptorToHolderMap, 'DevicePool got into a bad state');
+ assert(holder instanceof DeviceHolder, 'DeviceProvider should always be a DeviceHolder');
+
+ assert(holder.state === 'acquired', 'trying to release a device while already released');
+ try {
+ await holder.endTestScope();
+
+ // (Hopefully if the device was lost, it has been reported by the time endErrorScopes()
+ // has finished (or timed out). If not, it could cause a finite number of extra test
+ // failures following this one (but should recover eventually).)
+ assert(
+ holder.lostInfo === undefined,
+ `Device was unexpectedly lost. Reason: ${holder.lostInfo?.reason}, Message: ${holder.lostInfo?.message}`
+ );
+ } catch (ex) {
+ // Any error that isn't explicitly TestFailedButDeviceReusable forces a new device to be
+ // created for the next test.
+ if (!(ex instanceof TestFailedButDeviceReusable)) {
+ this.holders.delete(holder);
+ if ('destroy' in holder.device) {
+ holder.device.destroy();
+ }
+
+ // Release the (hopefully only) ref to the GPUDevice.
+ holder.releaseGPUDevice();
+
+ // Try to clean up, in case there are stray GPU resources in need of collection.
+ if (ex instanceof TestOOMedShouldAttemptGC) {
+ await attemptGarbageCollection();
+ }
+ }
+ // In the try block, we may throw an error if the device is lost in order to force device
+ // reinitialization, however, if the device lost was expected we want to suppress the error
+ // The device lost is expected when `holder.expectedLostReason` is equal to
+ // `holder.lostInfo.reason`.
+ const expectedDeviceLost =
+ holder.expectedLostReason !== undefined &&
+ holder.lostInfo !== undefined &&
+ holder.expectedLostReason === holder.lostInfo.reason;
+ if (!expectedDeviceLost) {
+ throw ex;
+ }
+ } finally {
+ // Mark the holder as free so the device can be reused (if it's still in this.devices).
+ holder.state = 'free';
+ }
+ }
+}
+
+/**
+ * Map from GPUDeviceDescriptor to DeviceHolder.
+ */
+class DescriptorToHolderMap {
+ /** Map keys that are known to be unsupported and can be rejected quickly. */
+ private unsupported: Set<string> = new Set();
+ private holders: Map<string, DeviceHolder> = new Map();
+
+ /** Deletes an item from the map by DeviceHolder value. */
+ delete(holder: DeviceHolder): void {
+ for (const [k, v] of this.holders) {
+ if (v === holder) {
+ this.holders.delete(k);
+ return;
+ }
+ }
+ unreachable("internal error: couldn't find DeviceHolder to delete");
+ }
+
+ /**
+ * Gets a DeviceHolder from the map if it exists; otherwise, calls create() to create one,
+ * inserts it, and returns it.
+ *
+ * If an `uncanonicalizedDescriptor` is provided, it is canonicalized and used as the map key.
+ * If one is not provided, the map key is `""` (empty string).
+ *
+ * Throws SkipTestCase if devices with this descriptor are unsupported.
+ */
+ async getOrCreate(
+ recorder: TestCaseRecorder,
+ uncanonicalizedDescriptor: UncanonicalizedDeviceDescriptor | undefined
+ ): Promise<DeviceHolder> {
+ const [descriptor, key] = canonicalizeDescriptor(uncanonicalizedDescriptor);
+ // Quick-reject descriptors that are known to be unsupported already.
+ if (this.unsupported.has(key)) {
+ throw new SkipTestCase(
+ `GPUDeviceDescriptor previously failed: ${JSON.stringify(descriptor)}`
+ );
+ }
+
+ // Search for an existing device with the same descriptor.
+ {
+ const value = this.holders.get(key);
+ if (value) {
+ // Move it to the end of the Map (most-recently-used).
+ this.holders.delete(key);
+ this.holders.set(key, value);
+ return value;
+ }
+ }
+
+ // No existing item was found; add a new one.
+ let value;
+ try {
+ value = await DeviceHolder.create(recorder, descriptor);
+ } catch (ex) {
+ if (ex instanceof FeaturesNotSupported) {
+ this.unsupported.add(key);
+ throw new SkipTestCase(
+ `GPUDeviceDescriptor not supported: ${JSON.stringify(descriptor)}\n${ex?.message ?? ''}`
+ );
+ }
+
+ throw ex;
+ }
+ this.insertAndCleanUp(key, value);
+ return value;
+ }
+
+ /** Insert an entry, then remove the least-recently-used items if there are too many. */
+ private insertAndCleanUp(key: string, value: DeviceHolder) {
+ this.holders.set(key, value);
+
+ const kMaxEntries = 5;
+ if (this.holders.size > kMaxEntries) {
+ // Delete the first (least recently used) item in the set.
+ for (const [key] of this.holders) {
+ this.holders.delete(key);
+ return;
+ }
+ }
+ }
+}
+
+export type UncanonicalizedDeviceDescriptor = {
+ requiredFeatures?: Iterable<GPUFeatureName>;
+ requiredLimits?: Record<string, GPUSize32>;
+ /** @deprecated this field cannot be used */
+ nonGuaranteedFeatures?: undefined;
+ /** @deprecated this field cannot be used */
+ nonGuaranteedLimits?: undefined;
+ /** @deprecated this field cannot be used */
+ extensions?: undefined;
+ /** @deprecated this field cannot be used */
+ features?: undefined;
+};
+type CanonicalDeviceDescriptor = Omit<
+ Required<GPUDeviceDescriptor>,
+ 'label' | 'nonGuaranteedFeatures' | 'nonGuaranteedLimits'
+>;
+/**
+ * Make a stringified map-key from a GPUDeviceDescriptor.
+ * Tries to make sure all defaults are resolved, first - but it's okay if some are missed
+ * (it just means some GPUDevice objects won't get deduplicated).
+ *
+ * This does **not** canonicalize `undefined` (the "default" descriptor) into a fully-qualified
+ * GPUDeviceDescriptor. This is just because `undefined` is a common case and we want to use it
+ * as a sanity check that WebGPU is working.
+ */
+function canonicalizeDescriptor(
+ desc: UncanonicalizedDeviceDescriptor | undefined
+): [CanonicalDeviceDescriptor | undefined, string] {
+ if (desc === undefined) {
+ return [undefined, ''];
+ }
+
+ const featuresCanonicalized = desc.requiredFeatures
+ ? Array.from(new Set(desc.requiredFeatures)).sort()
+ : [];
+
+ /** Canonicalized version of the requested limits: in canonical order, with only values which are
+ * specified _and_ non-default. */
+ const limitsCanonicalized: Record<string, number> = {};
+ // MAINTENANCE_TODO: Remove cast when @webgpu/types includes compatibilityMode
+ const adapterOptions = getDefaultRequestAdapterOptions() as unknown as {
+ compatibilityMode?: boolean;
+ };
+ const featureLevel = adapterOptions?.compatibilityMode ? 'compatibility' : 'core';
+ const defaultLimits = getDefaultLimits(featureLevel);
+ if (desc.requiredLimits) {
+ for (const limit of kLimits) {
+ const requestedValue = desc.requiredLimits[limit];
+ const defaultValue = defaultLimits[limit].default;
+ // Skip adding a limit to limitsCanonicalized if it is the same as the default.
+ if (requestedValue !== undefined && requestedValue !== defaultValue) {
+ limitsCanonicalized[limit] = requestedValue;
+ }
+ }
+ }
+
+ // Type ensures every field is carried through.
+ const descriptorCanonicalized: CanonicalDeviceDescriptor = {
+ requiredFeatures: featuresCanonicalized,
+ requiredLimits: limitsCanonicalized,
+ defaultQueue: {},
+ };
+ return [descriptorCanonicalized, JSON.stringify(descriptorCanonicalized)];
+}
+
+function supportsFeature(
+ adapter: GPUAdapter,
+ descriptor: CanonicalDeviceDescriptor | undefined
+): boolean {
+ if (descriptor === undefined) {
+ return true;
+ }
+
+ for (const feature of descriptor.requiredFeatures) {
+ if (!adapter.features.has(feature)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * DeviceHolder has three states:
+ * - 'free': Free to be used for a new test.
+ * - 'acquired': In use by a running test.
+ */
+type DeviceHolderState = 'free' | 'acquired';
+
+/**
+ * Holds a GPUDevice and tracks its state (free/acquired) and handles device loss.
+ */
+class DeviceHolder implements DeviceProvider {
+ /** The device. Will be cleared during cleanup if there were unexpected errors. */
+ private _device: GPUDevice | undefined;
+ /** Whether the device is in use by a test or not. */
+ state: DeviceHolderState = 'free';
+ /** initially undefined; becomes set when the device is lost */
+ lostInfo?: GPUDeviceLostInfo;
+ /** Set if the device is expected to be lost. */
+ expectedLostReason?: GPUDeviceLostReason;
+
+ // Gets a device and creates a DeviceHolder.
+ // If the device is lost, DeviceHolder.lost gets set.
+ static async create(
+ recorder: TestCaseRecorder,
+ descriptor: CanonicalDeviceDescriptor | undefined
+ ): Promise<DeviceHolder> {
+ const gpu = getGPU(recorder);
+ const adapter = await gpu.requestAdapter();
+ assert(adapter !== null, 'requestAdapter returned null');
+ if (!supportsFeature(adapter, descriptor)) {
+ throw new FeaturesNotSupported('One or more features are not supported');
+ }
+ const device = await adapter.requestDevice(descriptor);
+ assert(device !== null, 'requestDevice returned null');
+
+ return new DeviceHolder(device);
+ }
+
+ private constructor(device: GPUDevice) {
+ this._device = device;
+ void this._device.lost.then(ev => {
+ this.lostInfo = ev;
+ });
+ }
+
+ get device() {
+ assert(this._device !== undefined);
+ return this._device;
+ }
+
+ /** Push error scopes that surround test execution. */
+ beginTestScope(): void {
+ assert(this.state === 'acquired');
+ this.device.pushErrorScope('validation');
+ this.device.pushErrorScope('internal');
+ this.device.pushErrorScope('out-of-memory');
+ }
+
+ /** Mark the DeviceHolder as expecting a device loss when the test scope ends. */
+ expectDeviceLost(reason: GPUDeviceLostReason) {
+ assert(this.state === 'acquired');
+ this.expectedLostReason = reason;
+ }
+
+ /**
+ * Attempt to end test scopes: Check that there are no extra error scopes, and that no
+ * otherwise-uncaptured errors occurred during the test. Time out if it takes too long.
+ */
+ endTestScope(): Promise<void> {
+ assert(this.state === 'acquired');
+ const kTimeout = 5000;
+
+ // Time out if attemptEndTestScope (popErrorScope or onSubmittedWorkDone) never completes. If
+ // this rejects, the device won't be reused, so it's OK that popErrorScope calls may not have
+ // finished.
+ //
+ // This could happen due to a browser bug - e.g.,
+ // as of this writing, on Chrome GPU process crash, popErrorScope just hangs.
+ return raceWithRejectOnTimeout(this.attemptEndTestScope(), kTimeout, 'endTestScope timed out');
+ }
+
+ private async attemptEndTestScope(): Promise<void> {
+ let gpuValidationError: GPUError | null;
+ let gpuInternalError: GPUError | null;
+ let gpuOutOfMemoryError: GPUError | null;
+
+ // Submit to the queue to attempt to force a GPU flush.
+ this.device.queue.submit([]);
+
+ try {
+ // May reject if the device was lost.
+ [gpuOutOfMemoryError, gpuInternalError, gpuValidationError] = await Promise.all([
+ this.device.popErrorScope(),
+ this.device.popErrorScope(),
+ this.device.popErrorScope(),
+ ]);
+ } catch (ex) {
+ assert(this.lostInfo !== undefined, 'popErrorScope failed; did beginTestScope get missed?');
+ throw ex;
+ }
+
+ // Attempt to wait for the queue to be idle.
+ if (this.device.queue.onSubmittedWorkDone) {
+ await this.device.queue.onSubmittedWorkDone();
+ }
+
+ await assertReject('OperationError', this.device.popErrorScope(), {
+ allowMissingStack: true,
+ message: 'There was an extra error scope on the stack after a test',
+ });
+
+ if (gpuOutOfMemoryError !== null) {
+ assert(gpuOutOfMemoryError instanceof GPUOutOfMemoryError);
+ // Don't allow the device to be reused; unexpected OOM could break the device.
+ throw new TestOOMedShouldAttemptGC('Unexpected out-of-memory error occurred');
+ }
+ if (gpuInternalError !== null) {
+ assert(gpuInternalError instanceof GPUInternalError);
+ // Allow the device to be reused.
+ throw new TestFailedButDeviceReusable(
+ `Unexpected internal error occurred: ${gpuInternalError.message}`
+ );
+ }
+ if (gpuValidationError !== null) {
+ assert(gpuValidationError instanceof GPUValidationError);
+ // Allow the device to be reused.
+ throw new TestFailedButDeviceReusable(
+ `Unexpected validation error occurred: ${gpuValidationError.message}`
+ );
+ }
+ }
+
+ /**
+ * Release the ref to the GPUDevice. This should be the only ref held by the DevicePool or
+ * GPUTest, so in theory it can get garbage collected.
+ */
+ releaseGPUDevice(): void {
+ this._device = undefined;
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/floating_point.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/floating_point.ts
new file mode 100644
index 0000000000..e271e7db7a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/floating_point.ts
@@ -0,0 +1,5441 @@
+import { ROArrayArray, ROArrayArrayArray } from '../../common/util/types.js';
+import { assert, unreachable } from '../../common/util/util.js';
+import { Float16Array } from '../../external/petamoriken/float16/float16.js';
+import { Case, IntervalFilter } from '../shader/execution/expression/expression.js';
+
+import BinaryStream from './binary_stream.js';
+import { anyOf } from './compare.js';
+import { kValue } from './constants.js';
+import {
+ abstractFloat,
+ f16,
+ f32,
+ isFloatType,
+ Scalar,
+ ScalarType,
+ toMatrix,
+ toVector,
+ u32,
+} from './conversion.js';
+import {
+ calculatePermutations,
+ cartesianProduct,
+ correctlyRoundedF16,
+ correctlyRoundedF32,
+ correctlyRoundedF64,
+ flatten2DArray,
+ FlushMode,
+ flushSubnormalNumberF16,
+ flushSubnormalNumberF32,
+ flushSubnormalNumberF64,
+ isFiniteF16,
+ isFiniteF32,
+ isSubnormalNumberF16,
+ isSubnormalNumberF32,
+ isSubnormalNumberF64,
+ map2DArray,
+ oneULPF16,
+ oneULPF32,
+ quantizeToF32,
+ quantizeToF16,
+ unflatten2DArray,
+ every2DArray,
+} from './math.js';
+
+/** Indicate the kind of WGSL floating point numbers being operated on */
+export type FPKind = 'f32' | 'f16' | 'abstract';
+
+enum SerializedFPIntervalKind {
+ Abstract,
+ F32,
+ F16,
+}
+
+/** serializeFPKind() serializes a FPKind to a BinaryStream */
+export function serializeFPKind(s: BinaryStream, value: FPKind) {
+ switch (value) {
+ case 'abstract':
+ s.writeU8(SerializedFPIntervalKind.Abstract);
+ break;
+ case 'f16':
+ s.writeU8(SerializedFPIntervalKind.F16);
+ break;
+ case 'f32':
+ s.writeU8(SerializedFPIntervalKind.F32);
+ break;
+ }
+}
+
+/** deserializeFPKind() deserializes a FPKind from a BinaryStream */
+export function deserializeFPKind(s: BinaryStream): FPKind {
+ const kind = s.readU8();
+ switch (kind) {
+ case SerializedFPIntervalKind.Abstract:
+ return 'abstract';
+ case SerializedFPIntervalKind.F16:
+ return 'f16';
+ case SerializedFPIntervalKind.F32:
+ return 'f32';
+ default:
+ unreachable(`invalid deserialized FPKind: ${kind}`);
+ }
+}
+// Containers
+
+/**
+ * Representation of bounds for an interval as an array with either one or two
+ * elements. Single element indicates that the interval is a single point. For
+ * two elements, the first is the lower bound of the interval and the second is
+ * the upper bound.
+ */
+export type IntervalBounds = readonly [number] | readonly [number, number];
+
+/** Represents a closed interval of floating point numbers */
+export class FPInterval {
+ public readonly kind: FPKind;
+ public readonly begin: number;
+ public readonly end: number;
+
+ /**
+ * Constructor
+ *
+ * `FPTraits.toInterval` is the preferred way to create FPIntervals
+ *
+ * @param kind the floating point number type this is an interval for
+ * @param bounds beginning and end of the interval
+ */
+ public constructor(kind: FPKind, ...bounds: IntervalBounds) {
+ this.kind = kind;
+
+ const begin = bounds[0];
+ const end = bounds.length === 2 ? bounds[1] : bounds[0];
+ assert(!Number.isNaN(begin) && !Number.isNaN(end), `bounds need to be non-NaN`);
+ assert(begin <= end, `bounds[0] (${begin}) must be less than or equal to bounds[1] (${end})`);
+
+ this.begin = begin;
+ this.end = end;
+ }
+
+ /** @returns the floating point traits for this interval */
+ public traits(): FPTraits {
+ return FP[this.kind];
+ }
+
+ /** @returns begin and end if non-point interval, otherwise just begin */
+ public bounds(): IntervalBounds {
+ return this.isPoint() ? [this.begin] : [this.begin, this.end];
+ }
+
+ /** @returns if a point or interval is completely contained by this interval */
+ public contains(n: number | FPInterval): boolean {
+ if (Number.isNaN(n)) {
+ // Being the 'any' interval indicates that accuracy is not defined for this
+ // test, so the test is just checking that this input doesn't cause the
+ // implementation to misbehave, so NaN is accepted.
+ return this.begin === Number.NEGATIVE_INFINITY && this.end === Number.POSITIVE_INFINITY;
+ }
+
+ if (n instanceof FPInterval) {
+ return this.begin <= n.begin && this.end >= n.end;
+ }
+ return this.begin <= n && this.end >= n;
+ }
+
+ /** @returns if any values in the interval may be flushed to zero, this
+ * includes any subnormals and zero itself.
+ */
+ public containsZeroOrSubnormals(): boolean {
+ return !(
+ this.end < this.traits().constants().negative.subnormal.min ||
+ this.begin > this.traits().constants().positive.subnormal.max
+ );
+ }
+
+ /** @returns if this interval contains a single point */
+ public isPoint(): boolean {
+ return this.begin === this.end;
+ }
+
+ /** @returns if this interval only contains finite values */
+ public isFinite(): boolean {
+ return this.traits().isFinite(this.begin) && this.traits().isFinite(this.end);
+ }
+
+ /** @returns a string representation for logging purposes */
+ public toString(): string {
+ return `{ '${this.kind}', [${this.bounds().map(this.traits().scalarBuilder)}] }`;
+ }
+}
+
+/** serializeFPInterval() serializes a FPInterval to a BinaryStream */
+export function serializeFPInterval(s: BinaryStream, i: FPInterval) {
+ serializeFPKind(s, i.kind);
+ const traits = FP[i.kind];
+ s.writeCond(i !== traits.constants().unboundedInterval, {
+ if_true: () => {
+ // Bounded
+ switch (i.kind) {
+ case 'abstract':
+ s.writeF64(i.begin);
+ s.writeF64(i.end);
+ break;
+ case 'f32':
+ s.writeF32(i.begin);
+ s.writeF32(i.end);
+ break;
+ case 'f16':
+ s.writeF16(i.begin);
+ s.writeF16(i.end);
+ break;
+ default:
+ unreachable(`Unable to serialize FPInterval ${i}`);
+ break;
+ }
+ },
+ if_false: () => {
+ // Unbounded
+ },
+ });
+}
+
+/** deserializeFPInterval() deserializes a FPInterval from a BinaryStream */
+export function deserializeFPInterval(s: BinaryStream): FPInterval {
+ const kind = deserializeFPKind(s);
+ const traits = FP[kind];
+ return s.readCond({
+ if_true: () => {
+ // Bounded
+ switch (kind) {
+ case 'abstract':
+ return new FPInterval(traits.kind, s.readF64(), s.readF64());
+ case 'f32':
+ return new FPInterval(traits.kind, s.readF32(), s.readF32());
+ case 'f16':
+ return new FPInterval(traits.kind, s.readF16(), s.readF16());
+ }
+ unreachable(`Unable to deserialize FPInterval with kind ${kind}`);
+ },
+ if_false: () => {
+ // Unbounded
+ return traits.constants().unboundedInterval;
+ },
+ });
+}
+
+/**
+ * Representation of a vec2/3/4 of floating point intervals as an array of
+ * FPIntervals.
+ */
+export type FPVector =
+ | [FPInterval, FPInterval]
+ | [FPInterval, FPInterval, FPInterval]
+ | [FPInterval, FPInterval, FPInterval, FPInterval];
+
+/** Shorthand for an Array of Arrays that contains a column-major matrix */
+type Array2D<T> = ROArrayArray<T>;
+
+/**
+ * Representation of a matCxR of floating point intervals as an array of arrays
+ * of FPIntervals. This maps onto the WGSL concept of matrix. Internally
+ */
+export type FPMatrix =
+ | readonly [readonly [FPInterval, FPInterval], readonly [FPInterval, FPInterval]]
+ | readonly [
+ readonly [FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval],
+ ]
+ | readonly [
+ readonly [FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval],
+ ]
+ | readonly [
+ readonly [FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval],
+ ]
+ | readonly [
+ readonly [FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval],
+ ]
+ | readonly [
+ readonly [FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval],
+ ]
+ | readonly [
+ readonly [FPInterval, FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval, FPInterval],
+ ]
+ | readonly [
+ readonly [FPInterval, FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval, FPInterval],
+ ]
+ | readonly [
+ readonly [FPInterval, FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval, FPInterval],
+ ];
+
+// Utilities
+
+/** @returns input with an appended 0, if inputs contains non-zero subnormals */
+// When f16 traits is defined, this can be replaced with something like
+// `FP.f16..addFlushIfNeeded`
+function addFlushedIfNeededF16(values: readonly number[]): readonly number[] {
+ return values.some(v => v !== 0 && isSubnormalNumberF16(v)) ? values.concat(0) : values;
+}
+
+// Operations
+
+/**
+ * A function that converts a point to an acceptance interval.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface ScalarToInterval {
+ (x: number): FPInterval;
+}
+
+/** Operation used to implement a ScalarToInterval */
+interface ScalarToIntervalOp {
+ /** @returns acceptance interval for a function at point x */
+ impl: ScalarToInterval;
+
+ /**
+ * Calculates where in the domain defined by x the min/max extrema of impl
+ * occur and returns a span of those points to be used as the domain instead.
+ *
+ * Used by this.runScalarToIntervalOp before invoking impl.
+ * If not defined, the bounds of the existing domain are assumed to be the
+ * extrema.
+ *
+ * This is only implemented for operations that meet all the following
+ * criteria:
+ * a) non-monotonic
+ * b) used in inherited accuracy calculations
+ * c) need to take in an interval for b)
+ * i.e. fooInterval takes in x: number | FPInterval, not x: number
+ */
+ extrema?: (x: FPInterval) => FPInterval;
+}
+
+/**
+ * A function that converts a pair of points to an acceptance interval.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface ScalarPairToInterval {
+ (x: number, y: number): FPInterval;
+}
+
+/** Operation used to implement a ScalarPairToInterval */
+interface ScalarPairToIntervalOp {
+ /** @returns acceptance interval for a function at point (x, y) */
+ impl: ScalarPairToInterval;
+ /**
+ * Calculates where in domain defined by x & y the min/max extrema of impl
+ * occur and returns spans of those points to be used as the domain instead.
+ *
+ * Used by runScalarPairToIntervalOp before invoking impl.
+ * If not defined, the bounds of the existing domain are assumed to be the
+ * extrema.
+ *
+ * This is only implemented for functions that meet all of the following
+ * criteria:
+ * a) non-monotonic
+ * b) used in inherited accuracy calculations
+ * c) need to take in an interval for b)
+ */
+ extrema?: (x: FPInterval, y: FPInterval) => [FPInterval, FPInterval];
+}
+
+/** Domain for a ScalarPairToInterval implementation */
+interface ScalarPairToIntervalDomain {
+ // Arrays to support discrete valid domain intervals
+ x: readonly FPInterval[];
+ y: readonly FPInterval[];
+}
+
+/**
+ * A function that converts a triplet of points to an acceptance interval.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface ScalarTripleToInterval {
+ (x: number, y: number, z: number): FPInterval;
+}
+
+/** Operation used to implement a ScalarTripleToInterval */
+interface ScalarTripleToIntervalOp {
+ // Re-using the *Op interface pattern for symmetry with the other operations.
+ /** @returns acceptance interval for a function at point (x, y, z) */
+ impl: ScalarTripleToInterval;
+}
+
+// Currently ScalarToVector is not integrated with the rest of the floating point
+// framework, because the only builtins that use it are actually
+// u32 -> [f32, f32, f32, f32] functions, so the whole rounding and interval
+// process doesn't get applied to the inputs.
+// They do use the framework internally by invoking divisionInterval on segments
+// of the input.
+/**
+ * A function that converts a point to a vector of acceptance intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface ScalarToVector {
+ (n: number): FPVector;
+}
+
+/**
+ * A function that converts a vector to an acceptance interval.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface VectorToInterval {
+ (x: readonly number[]): FPInterval;
+}
+
+/** Operation used to implement a VectorToInterval */
+interface VectorToIntervalOp {
+ // Re-using the *Op interface pattern for symmetry with the other operations.
+ /** @returns acceptance interval for a function on vector x */
+ impl: VectorToInterval;
+}
+
+/**
+ * A function that converts a pair of vectors to an acceptance interval.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface VectorPairToInterval {
+ (x: readonly number[], y: readonly number[]): FPInterval;
+}
+
+/** Operation used to implement a VectorPairToInterval */
+interface VectorPairToIntervalOp {
+ // Re-using the *Op interface pattern for symmetry with the other operations.
+ /** @returns acceptance interval for a function on vectors (x, y) */
+ impl: VectorPairToInterval;
+}
+
+/**
+ * A function that converts a vector to a vector of acceptance intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface VectorToVector {
+ (x: readonly number[]): FPVector;
+}
+
+/** Operation used to implement a VectorToVector */
+interface VectorToVectorOp {
+ // Re-using the *Op interface pattern for symmetry with the other operations.
+ /** @returns a vector of acceptance intervals for a function on vector x */
+ impl: VectorToVector;
+}
+
+/**
+ * A function that converts a pair of vectors to a vector of acceptance
+ * intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface VectorPairToVector {
+ (x: readonly number[], y: readonly number[]): FPVector;
+}
+
+/** Operation used to implement a VectorPairToVector */
+interface VectorPairToVectorOp {
+ // Re-using the *Op interface pattern for symmetry with the other operations.
+ /** @returns a vector of acceptance intervals for a function on vectors (x, y) */
+ impl: VectorPairToVector;
+}
+
+/**
+ * A function that converts a vector and a scalar to a vector of acceptance
+ * intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface VectorScalarToVector {
+ (x: readonly number[], y: number): FPVector;
+}
+
+/**
+ * A function that converts a scalar and a vector to a vector of acceptance
+ * intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface ScalarVectorToVector {
+ (x: number, y: readonly number[]): FPVector;
+}
+
+/**
+ * A function that converts a matrix to an acceptance interval.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface MatrixToScalar {
+ (m: Array2D<number>): FPInterval;
+}
+
+/** Operation used to implement a MatrixToMatrix */
+interface MatrixToMatrixOp {
+ // Re-using the *Op interface pattern for symmetry with the other operations.
+ /** @returns a matrix of acceptance intervals for a function on matrix x */
+ impl: MatrixToMatrix;
+}
+
+/**
+ * A function that converts a matrix to a matrix of acceptance intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface MatrixToMatrix {
+ (m: Array2D<number>): FPMatrix;
+}
+
+/**
+ * A function that converts a pair of matrices to a matrix of acceptance
+ * intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface MatrixPairToMatrix {
+ (x: Array2D<number>, y: Array2D<number>): FPMatrix;
+}
+
+/**
+ * A function that converts a matrix and a scalar to a matrix of acceptance
+ * intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface MatrixScalarToMatrix {
+ (x: Array2D<number>, y: number): FPMatrix;
+}
+
+/**
+ * A function that converts a scalar and a matrix to a matrix of acceptance
+ * intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface ScalarMatrixToMatrix {
+ (x: number, y: Array2D<number>): FPMatrix;
+}
+
+/**
+ * A function that converts a matrix and a vector to a vector of acceptance
+ * intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface MatrixVectorToVector {
+ (x: Array2D<number>, y: readonly number[]): FPVector;
+}
+
+/**
+ * A function that converts a vector and a matrix to a vector of acceptance
+ * intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface VectorMatrixToVector {
+ (x: readonly number[], y: Array2D<number>): FPVector;
+}
+
+// Traits
+
+/**
+ * Typed structure containing all the limits/constants defined for each
+ * WGSL floating point kind
+ */
+interface FPConstants {
+ positive: {
+ min: number;
+ max: number;
+ infinity: number;
+ nearest_max: number;
+ less_than_one: number;
+ subnormal: {
+ min: number;
+ max: number;
+ };
+ pi: {
+ whole: number;
+ three_quarters: number;
+ half: number;
+ third: number;
+ quarter: number;
+ sixth: number;
+ };
+ e: number;
+ };
+ negative: {
+ min: number;
+ max: number;
+ infinity: number;
+ nearest_min: number;
+ less_than_one: number;
+ subnormal: {
+ min: number;
+ max: number;
+ };
+ pi: {
+ whole: number;
+ three_quarters: number;
+ half: number;
+ third: number;
+ quarter: number;
+ sixth: number;
+ };
+ };
+ unboundedInterval: FPInterval;
+ zeroInterval: FPInterval;
+ negPiToPiInterval: FPInterval;
+ greaterThanZeroInterval: FPInterval;
+ zeroVector: {
+ 2: FPVector;
+ 3: FPVector;
+ 4: FPVector;
+ };
+ unboundedVector: {
+ 2: FPVector;
+ 3: FPVector;
+ 4: FPVector;
+ };
+ unboundedMatrix: {
+ 2: {
+ 2: FPMatrix;
+ 3: FPMatrix;
+ 4: FPMatrix;
+ };
+ 3: {
+ 2: FPMatrix;
+ 3: FPMatrix;
+ 4: FPMatrix;
+ };
+ 4: {
+ 2: FPMatrix;
+ 3: FPMatrix;
+ 4: FPMatrix;
+ };
+ };
+}
+
+/** A representation of an FPInterval for a case param */
+export type FPIntervalParam = {
+ kind: FPKind;
+ interval: number | IntervalBounds;
+};
+
+/** Abstract base class for all floating-point traits */
+export abstract class FPTraits {
+ public readonly kind: FPKind;
+ protected constructor(k: FPKind) {
+ this.kind = k;
+ }
+
+ public abstract constants(): FPConstants;
+
+ // Utilities - Implemented
+
+ /** @returns an interval containing the point or the original interval */
+ public toInterval(n: number | IntervalBounds | FPInterval): FPInterval {
+ if (n instanceof FPInterval) {
+ if (n.kind === this.kind) {
+ return n;
+ }
+
+ // Preserve if the original interval was unbounded or bounded
+ if (!n.isFinite()) {
+ return this.constants().unboundedInterval;
+ }
+
+ return new FPInterval(this.kind, ...n.bounds());
+ }
+
+ if (n instanceof Array) {
+ return new FPInterval(this.kind, ...n);
+ }
+
+ return new FPInterval(this.kind, n, n);
+ }
+
+ /**
+ * Makes a param that can be turned into an interval
+ */
+ public toParam(n: number | IntervalBounds): FPIntervalParam {
+ return {
+ kind: this.kind,
+ interval: n,
+ };
+ }
+
+ /**
+ * Converts p into an FPInterval if it is an FPIntervalPAram
+ */
+ public fromParam(
+ p: number | IntervalBounds | FPIntervalParam
+ ): number | IntervalBounds | FPInterval {
+ const param = p as FPIntervalParam;
+ if (param.interval && param.kind) {
+ assert(param.kind === this.kind);
+ return this.toInterval(param.interval);
+ }
+ return p as number | IntervalBounds;
+ }
+
+ /**
+ * @returns an interval with the tightest bounds that includes all provided
+ * intervals
+ */
+ public spanIntervals(...intervals: readonly FPInterval[]): FPInterval {
+ assert(intervals.length > 0, `span of an empty list of FPIntervals is not allowed`);
+ assert(
+ intervals.every(i => i.kind === this.kind),
+ `span is only defined for intervals with the same kind`
+ );
+ let begin = Number.POSITIVE_INFINITY;
+ let end = Number.NEGATIVE_INFINITY;
+ intervals.forEach(i => {
+ begin = Math.min(i.begin, begin);
+ end = Math.max(i.end, end);
+ });
+ return this.toInterval([begin, end]);
+ }
+
+ /** Narrow an array of values to FPVector if possible */
+ public isVector(v: ReadonlyArray<number | IntervalBounds | FPInterval>): v is FPVector {
+ if (v.every(e => e instanceof FPInterval && e.kind === this.kind)) {
+ return v.length === 2 || v.length === 3 || v.length === 4;
+ }
+ return false;
+ }
+
+ /** @returns an FPVector representation of an array of values if possible */
+ public toVector(v: ReadonlyArray<number | IntervalBounds | FPInterval>): FPVector {
+ if (this.isVector(v) && v.every(e => e.kind === this.kind)) {
+ return v;
+ }
+
+ const f = v.map(e => this.toInterval(e));
+ // The return of the map above is a readonly FPInterval[], which needs to be narrowed
+ // to FPVector, since FPVector is defined as fixed length tuples.
+ if (this.isVector(f)) {
+ return f;
+ }
+ unreachable(`Cannot convert [${v}] to FPVector`);
+ }
+
+ /**
+ * @returns a FPVector where each element is the span for corresponding
+ * elements at the same index in the input vectors
+ */
+ public spanVectors(...vectors: FPVector[]): FPVector {
+ assert(
+ vectors.every(e => this.isVector(e)),
+ 'Vector span is not defined for vectors of differing floating point kinds'
+ );
+
+ const vector_length = vectors[0].length;
+ assert(
+ vectors.every(e => e.length === vector_length),
+ `Vector span is not defined for vectors of differing lengths`
+ );
+
+ const result: FPInterval[] = new Array<FPInterval>(vector_length);
+
+ for (let i = 0; i < vector_length; i++) {
+ result[i] = this.spanIntervals(...vectors.map(v => v[i]));
+ }
+ return this.toVector(result);
+ }
+
+ /** Narrow an array of an array of values to FPMatrix if possible */
+ public isMatrix(m: Array2D<number | IntervalBounds | FPInterval> | FPVector[]): m is FPMatrix {
+ if (!m.every(c => c.every(e => e instanceof FPInterval && e.kind === this.kind))) {
+ return false;
+ }
+ // At this point m guaranteed to be a ROArrayArray<FPInterval>, but maybe typed as a
+ // FPVector[].
+ // Coercing the type since FPVector[] is functionally equivalent to
+ // ROArrayArray<FPInterval> for .length and .every, but they are type compatible,
+ // since tuples are not equivalent to arrays, so TS considers c in .every to
+ // be unresolvable below, even though our usage is safe.
+ m = m as ROArrayArray<FPInterval>;
+
+ if (m.length > 4 || m.length < 2) {
+ return false;
+ }
+
+ const num_rows = m[0].length;
+ if (num_rows > 4 || num_rows < 2) {
+ return false;
+ }
+
+ return m.every(c => c.length === num_rows);
+ }
+
+ /** @returns an FPMatrix representation of an array of an array of values if possible */
+ public toMatrix(m: Array2D<number | IntervalBounds | FPInterval> | FPVector[]): FPMatrix {
+ if (
+ this.isMatrix(m) &&
+ every2DArray(m, (e: FPInterval) => {
+ return e.kind === this.kind;
+ })
+ ) {
+ return m;
+ }
+
+ const result = map2DArray(m, this.toInterval.bind(this));
+
+ // The return of the map above is a ROArrayArray<FPInterval>, which needs to be
+ // narrowed to FPMatrix, since FPMatrix is defined as fixed length tuples.
+ if (this.isMatrix(result)) {
+ return result;
+ }
+ unreachable(`Cannot convert ${m} to FPMatrix`);
+ }
+
+ /**
+ * @returns a FPMatrix where each element is the span for corresponding
+ * elements at the same index in the input matrices
+ */
+ public spanMatrices(...matrices: FPMatrix[]): FPMatrix {
+ // Coercing the type of matrices, since tuples are not generally compatible
+ // with Arrays, but they are functionally equivalent for the usages in this
+ // function.
+ const ms = matrices as Array2D<FPInterval>[];
+ const num_cols = ms[0].length;
+ const num_rows = ms[0][0].length;
+ assert(
+ ms.every(m => m.length === num_cols && m.every(r => r.length === num_rows)),
+ `Matrix span is not defined for Matrices of differing dimensions`
+ );
+
+ const result: FPInterval[][] = [...Array(num_cols)].map(_ => [...Array(num_rows)]);
+ for (let i = 0; i < num_cols; i++) {
+ for (let j = 0; j < num_rows; j++) {
+ result[i][j] = this.spanIntervals(...ms.map(m => m[i][j]));
+ }
+ }
+
+ return this.toMatrix(result);
+ }
+
+ /** @returns input with an appended 0, if inputs contains non-zero subnormals */
+ public addFlushedIfNeeded(values: readonly number[]): readonly number[] {
+ const subnormals = values.filter(this.isSubnormal);
+ const needs_zero = subnormals.length > 0 && subnormals.every(s => s !== 0);
+ return needs_zero ? values.concat(0) : values;
+ }
+
+ /**
+ * Restrict the inputs to an ScalarToInterval operation
+ *
+ * Only used for operations that have tighter domain requirements than 'must
+ * be finite'.
+ *
+ * @param domain interval to restrict inputs to
+ * @param impl operation implementation to run if input is within the required domain
+ * @returns a ScalarToInterval that calls impl if domain contains the input,
+ * otherwise it returns an unbounded interval */
+ protected limitScalarToIntervalDomain(
+ domain: FPInterval,
+ impl: ScalarToInterval
+ ): ScalarToInterval {
+ return (n: number): FPInterval => {
+ return domain.contains(n) ? impl(n) : this.constants().unboundedInterval;
+ };
+ }
+
+ /**
+ * Restrict the inputs to a ScalarPairToInterval
+ *
+ * Only used for operations that have tighter domain requirements than 'must be
+ * finite'.
+ *
+ * @param domain set of intervals to restrict inputs to
+ * @param impl operation implementation to run if input is within the required domain
+ * @returns a ScalarPairToInterval that calls impl if domain contains the input,
+ * otherwise it returns an unbounded interval */
+ protected limitScalarPairToIntervalDomain(
+ domain: ScalarPairToIntervalDomain,
+ impl: ScalarPairToInterval
+ ): ScalarPairToInterval {
+ return (x: number, y: number): FPInterval => {
+ if (!domain.x.some(d => d.contains(x)) || !domain.y.some(d => d.contains(y))) {
+ return this.constants().unboundedInterval;
+ }
+
+ return impl(x, y);
+ };
+ }
+
+ /** Stub for scalar to interval generator */
+ protected unimplementedScalarToInterval(name: string, _x: number | FPInterval): FPInterval {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for scalar pair to interval generator */
+ protected unimplementedScalarPairToInterval(
+ name: string,
+ _x: number | FPInterval,
+ _y: number | FPInterval
+ ): FPInterval {
+ unreachable(`'${name}' is yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for scalar triple to interval generator */
+ protected unimplementedScalarTripleToInterval(
+ name: string,
+ _x: number | FPInterval,
+ _y: number | FPInterval,
+ _z: number | FPInterval
+ ): FPInterval {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for scalar to vector generator */
+ protected unimplementedScalarToVector(name: string, _x: number | FPInterval): FPVector {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for vector to interval generator */
+ protected unimplementedVectorToInterval(name: string, _x: (number | FPInterval)[]): FPInterval {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for vector pair to interval generator */
+ protected unimplementedVectorPairToInterval(
+ name: string,
+ _x: readonly (number | FPInterval)[],
+ _y: readonly (number | FPInterval)[]
+ ): FPInterval {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for vector to vector generator */
+ protected unimplementedVectorToVector(
+ name: string,
+ _x: readonly (number | FPInterval)[]
+ ): FPVector {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for vector pair to vector generator */
+ protected unimplementedVectorPairToVector(
+ name: string,
+ _x: readonly (number | FPInterval)[],
+ _y: readonly (number | FPInterval)[]
+ ): FPVector {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for vector-scalar to vector generator */
+ protected unimplementedVectorScalarToVector(
+ name: string,
+ _x: readonly (number | FPInterval)[],
+ _y: number | FPInterval
+ ): FPVector {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for scalar-vector to vector generator */
+ protected unimplementedScalarVectorToVector(
+ name: string,
+ _x: number | FPInterval,
+ _y: (number | FPInterval)[]
+ ): FPVector {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for matrix to interval generator */
+ protected unimplementedMatrixToInterval(name: string, _x: Array2D<number>): FPInterval {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for matrix to matirx generator */
+ protected unimplementedMatrixToMatrix(name: string, _x: Array2D<number>): FPMatrix {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for matrix pair to matrix generator */
+ protected unimplementedMatrixPairToMatrix(
+ name: string,
+ _x: Array2D<number>,
+ _y: Array2D<number>
+ ): FPMatrix {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for matrix-scalar to matrix generator */
+ protected unimplementedMatrixScalarToMatrix(
+ name: string,
+ _x: Array2D<number>,
+ _y: number | FPInterval
+ ): FPMatrix {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for scalar-matrix to matrix generator */
+ protected unimplementedScalarMatrixToMatrix(
+ name: string,
+ _x: number | FPInterval,
+ _y: Array2D<number>
+ ): FPMatrix {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for matrix-vector to vector generator */
+ protected unimplementedMatrixVectorToVector(
+ name: string,
+ _x: Array2D<number>,
+ _y: readonly (number | FPInterval)[]
+ ): FPVector {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for vector-matrix to vector generator */
+ protected unimplementedVectorMatrixToVector(
+ name: string,
+ _x: readonly (number | FPInterval)[],
+ _y: Array2D<number>
+ ): FPVector {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for distance generator */
+ protected unimplementedDistance(
+ _x: number | readonly number[],
+ _y: number | readonly number[]
+ ): FPInterval {
+ unreachable(`'distance' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for faceForward */
+ protected unimplementedFaceForward(
+ _x: readonly number[],
+ _y: readonly number[],
+ _z: readonly number[]
+ ): (FPVector | undefined)[] {
+ unreachable(`'faceForward' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for length generator */
+ protected unimplementedLength(
+ _x: number | FPInterval | readonly number[] | FPVector
+ ): FPInterval {
+ unreachable(`'length' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for modf generator */
+ protected unimplementedModf(_x: number): { fract: FPInterval; whole: FPInterval } {
+ unreachable(`'modf' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for refract generator */
+ protected unimplementedRefract(
+ _i: readonly number[],
+ _s: readonly number[],
+ _r: number
+ ): FPVector {
+ unreachable(`'refract' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Version of absoluteErrorInterval that always returns the unboundedInterval */
+ protected unboundedAbsoluteErrorInterval(_n: number, _error_range: number): FPInterval {
+ return this.constants().unboundedInterval;
+ }
+
+ /** Version of ulpInterval that always returns the unboundedInterval */
+ protected unboundedUlpInterval(_n: number, _numULP: number): FPInterval {
+ return this.constants().unboundedInterval;
+ }
+
+ // Utilities - Defined by subclass
+ /**
+ * @returns the nearest precise value to the input. Rounding should be IEEE
+ * 'roundTiesToEven'.
+ */
+ public abstract readonly quantize: (n: number) => number;
+ /** @returns all valid roundings of input */
+ public abstract readonly correctlyRounded: (n: number) => readonly number[];
+ /** @returns true if input is considered finite, otherwise false */
+ public abstract readonly isFinite: (n: number) => boolean;
+ /** @returns true if input is considered subnormal, otherwise false */
+ public abstract readonly isSubnormal: (n: number) => boolean;
+ /** @returns 0 if the provided number is subnormal, otherwise returns the proved number */
+ public abstract readonly flushSubnormal: (n: number) => number;
+ /** @returns 1 * ULP: (number) */
+ public abstract readonly oneULP: (target: number, mode?: FlushMode) => number;
+ /** @returns a builder for converting numbers to Scalars */
+ public abstract readonly scalarBuilder: (n: number) => Scalar;
+
+ // Framework - Cases
+
+ /**
+ * @returns a Case for the param and the interval generator provided.
+ * The Case will use an interval comparator for matching results.
+ * @param param the param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ private makeScalarToIntervalCase(
+ param: number,
+ filter: IntervalFilter,
+ ...ops: ScalarToInterval[]
+ ): Case | undefined {
+ param = this.quantize(param);
+
+ const intervals = ops.map(o => o(param));
+ if (filter === 'finite' && intervals.some(i => !i.isFinite())) {
+ return undefined;
+ }
+ return { input: [this.scalarBuilder(param)], expected: anyOf(...intervals) };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param params array of inputs to try
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ public generateScalarToIntervalCases(
+ params: readonly number[],
+ filter: IntervalFilter,
+ ...ops: ScalarToInterval[]
+ ): Case[] {
+ return params.reduce((cases, e) => {
+ const c = this.makeScalarToIntervalCase(e, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the params and the interval generator provided.
+ * The Case will use an interval comparator for matching results.
+ * @param param0 the first param to pass in
+ * @param param1 the second param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ private makeScalarPairToIntervalCase(
+ param0: number,
+ param1: number,
+ filter: IntervalFilter,
+ ...ops: ScalarPairToInterval[]
+ ): Case | undefined {
+ param0 = this.quantize(param0);
+ param1 = this.quantize(param1);
+
+ const intervals = ops.map(o => o(param0, param1));
+ if (filter === 'finite' && intervals.some(i => !i.isFinite())) {
+ return undefined;
+ }
+ return {
+ input: [this.scalarBuilder(param0), this.scalarBuilder(param1)],
+ expected: anyOf(...intervals),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param param0s array of inputs to try for the first input
+ * @param param1s array of inputs to try for the second input
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ public generateScalarPairToIntervalCases(
+ param0s: readonly number[],
+ param1s: readonly number[],
+ filter: IntervalFilter,
+ ...ops: ScalarPairToInterval[]
+ ): Case[] {
+ return cartesianProduct(param0s, param1s).reduce((cases, e) => {
+ const c = this.makeScalarPairToIntervalCase(e[0], e[1], filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the params and the interval generator provided.
+ * The Case will use an interval comparator for matching results.
+ * @param param0 the first param to pass in
+ * @param param1 the second param to pass in
+ * @param param2 the third param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ public makeScalarTripleToIntervalCase(
+ param0: number,
+ param1: number,
+ param2: number,
+ filter: IntervalFilter,
+ ...ops: ScalarTripleToInterval[]
+ ): Case | undefined {
+ param0 = this.quantize(param0);
+ param1 = this.quantize(param1);
+ param2 = this.quantize(param2);
+
+ const intervals = ops.map(o => o(param0, param1, param2));
+ if (filter === 'finite' && intervals.some(i => !i.isFinite())) {
+ return undefined;
+ }
+ return {
+ input: [this.scalarBuilder(param0), this.scalarBuilder(param1), this.scalarBuilder(param2)],
+ expected: anyOf(...intervals),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param param0s array of inputs to try for the first input
+ * @param param1s array of inputs to try for the second input
+ * @param param2s array of inputs to try for the third input
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ public generateScalarTripleToIntervalCases(
+ param0s: readonly number[],
+ param1s: readonly number[],
+ param2s: readonly number[],
+ filter: IntervalFilter,
+ ...ops: ScalarTripleToInterval[]
+ ): Case[] {
+ return cartesianProduct(param0s, param1s, param2s).reduce((cases, e) => {
+ const c = this.makeScalarTripleToIntervalCase(e[0], e[1], e[2], filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the params and the interval generator provided.
+ * The Case will use an interval comparator for matching results.
+ * @param param the param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ private makeVectorToIntervalCase(
+ param: readonly number[],
+ filter: IntervalFilter,
+ ...ops: VectorToInterval[]
+ ): Case | undefined {
+ param = param.map(this.quantize);
+
+ const intervals = ops.map(o => o(param));
+ if (filter === 'finite' && intervals.some(i => !i.isFinite())) {
+ return undefined;
+ }
+ return {
+ input: [toVector(param, this.scalarBuilder)],
+ expected: anyOf(...intervals),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param params array of inputs to try
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ public generateVectorToIntervalCases(
+ params: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: VectorToInterval[]
+ ): Case[] {
+ return params.reduce((cases, e) => {
+ const c = this.makeVectorToIntervalCase(e, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the params and the interval generator provided.
+ * The Case will use an interval comparator for matching results.
+ * @param param0 the first param to pass in
+ * @param param1 the second param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ private makeVectorPairToIntervalCase(
+ param0: readonly number[],
+ param1: readonly number[],
+ filter: IntervalFilter,
+ ...ops: VectorPairToInterval[]
+ ): Case | undefined {
+ param0 = param0.map(this.quantize);
+ param1 = param1.map(this.quantize);
+
+ const intervals = ops.map(o => o(param0, param1));
+ if (filter === 'finite' && intervals.some(i => !i.isFinite())) {
+ return undefined;
+ }
+ return {
+ input: [toVector(param0, this.scalarBuilder), toVector(param1, this.scalarBuilder)],
+ expected: anyOf(...intervals),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param param0s array of inputs to try for the first input
+ * @param param1s array of inputs to try for the second input
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ public generateVectorPairToIntervalCases(
+ param0s: ROArrayArray<number>,
+ param1s: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: VectorPairToInterval[]
+ ): Case[] {
+ return cartesianProduct(param0s, param1s).reduce((cases, e) => {
+ const c = this.makeVectorPairToIntervalCase(e[0], e[1], filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the param and vector of intervals generator provided
+ * @param param the param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance
+ * intervals.
+ */
+ private makeVectorToVectorCase(
+ param: readonly number[],
+ filter: IntervalFilter,
+ ...ops: VectorToVector[]
+ ): Case | undefined {
+ param = param.map(this.quantize);
+
+ const vectors = ops.map(o => o(param));
+ if (filter === 'finite' && vectors.some(v => v.some(e => !e.isFinite()))) {
+ return undefined;
+ }
+ return {
+ input: [toVector(param, this.scalarBuilder)],
+ expected: anyOf(...vectors),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param params array of inputs to try
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance
+ * intervals.
+ */
+ public generateVectorToVectorCases(
+ params: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: VectorToVector[]
+ ): Case[] {
+ return params.reduce((cases, e) => {
+ const c = this.makeVectorToVectorCase(e, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the params and the interval vector generator provided.
+ * The Case will use an interval comparator for matching results.
+ * @param scalar the scalar param to pass in
+ * @param vector the vector param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance intervals
+ */
+ private makeScalarVectorToVectorCase(
+ scalar: number,
+ vector: readonly number[],
+ filter: IntervalFilter,
+ ...ops: ScalarVectorToVector[]
+ ): Case | undefined {
+ scalar = this.quantize(scalar);
+ vector = vector.map(this.quantize);
+
+ const results = ops.map(o => o(scalar, vector));
+ if (filter === 'finite' && results.some(r => r.some(e => !e.isFinite()))) {
+ return undefined;
+ }
+ return {
+ input: [this.scalarBuilder(scalar), toVector(vector, this.scalarBuilder)],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param scalars array of scalar inputs to try
+ * @param vectors array of vector inputs to try
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance intervals
+ */
+ public generateScalarVectorToVectorCases(
+ scalars: readonly number[],
+ vectors: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: ScalarVectorToVector[]
+ ): Case[] {
+ // Cannot use cartesianProduct here, due to heterogeneous types
+ const cases: Case[] = [];
+ scalars.forEach(scalar => {
+ vectors.forEach(vector => {
+ const c = this.makeScalarVectorToVectorCase(scalar, vector, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ });
+ });
+ return cases;
+ }
+
+ /**
+ * @returns a Case for the params and the interval vector generator provided.
+ * The Case will use an interval comparator for matching results.
+ * @param vector the vector param to pass in
+ * @param scalar the scalar param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance intervals
+ */
+ private makeVectorScalarToVectorCase(
+ vector: readonly number[],
+ scalar: number,
+ filter: IntervalFilter,
+ ...ops: VectorScalarToVector[]
+ ): Case | undefined {
+ vector = vector.map(this.quantize);
+ scalar = this.quantize(scalar);
+
+ const results = ops.map(o => o(vector, scalar));
+ if (filter === 'finite' && results.some(r => r.some(e => !e.isFinite()))) {
+ return undefined;
+ }
+ return {
+ input: [toVector(vector, this.scalarBuilder), this.scalarBuilder(scalar)],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param vectors array of vector inputs to try
+ * @param scalars array of scalar inputs to try
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance intervals
+ */
+ public generateVectorScalarToVectorCases(
+ vectors: ROArrayArray<number>,
+ scalars: readonly number[],
+ filter: IntervalFilter,
+ ...ops: VectorScalarToVector[]
+ ): Case[] {
+ // Cannot use cartesianProduct here, due to heterogeneous types
+ const cases: Case[] = [];
+ vectors.forEach(vector => {
+ scalars.forEach(scalar => {
+ const c = this.makeVectorScalarToVectorCase(vector, scalar, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ });
+ });
+ return cases;
+ }
+
+ /**
+ * @returns a Case for the param and vector of intervals generator provided
+ * @param param0 the first param to pass in
+ * @param param1 the second param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance
+ * intervals.
+ */
+ private makeVectorPairToVectorCase(
+ param0: readonly number[],
+ param1: readonly number[],
+ filter: IntervalFilter,
+ ...ops: VectorPairToVector[]
+ ): Case | undefined {
+ param0 = param0.map(this.quantize);
+ param1 = param1.map(this.quantize);
+ const vectors = ops.map(o => o(param0, param1));
+ if (filter === 'finite' && vectors.some(v => v.some(e => !e.isFinite()))) {
+ return undefined;
+ }
+ return {
+ input: [toVector(param0, this.scalarBuilder), toVector(param1, this.scalarBuilder)],
+ expected: anyOf(...vectors),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param param0s array of inputs to try for the first input
+ * @param param1s array of inputs to try for the second input
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance
+ * intervals.
+ */
+ public generateVectorPairToVectorCases(
+ param0s: ROArrayArray<number>,
+ param1s: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: VectorPairToVector[]
+ ): Case[] {
+ return cartesianProduct(param0s, param1s).reduce((cases, e) => {
+ const c = this.makeVectorPairToVectorCase(e[0], e[1], filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the params and the component-wise interval generator provided.
+ * The Case will use an interval comparator for matching results.
+ * @param param0 the first vector param to pass in
+ * @param param1 the second vector param to pass in
+ * @param param2 the scalar param to pass in
+ * @param filter what interval filtering to apply
+ * @param componentWiseOps callbacks that implement generating a component-wise acceptance interval,
+ * one component result at a time.
+ */
+ private makeVectorPairScalarToVectorComponentWiseCase(
+ param0: readonly number[],
+ param1: readonly number[],
+ param2: number,
+ filter: IntervalFilter,
+ ...componentWiseOps: ScalarTripleToInterval[]
+ ): Case | undefined {
+ // Width of input vector
+ const width = param0.length;
+ assert(2 <= width && width <= 4, 'input vector width must between 2 and 4');
+ assert(param1.length === width, 'two input vectors must have the same width');
+ param0 = param0.map(this.quantize);
+ param1 = param1.map(this.quantize);
+ param2 = this.quantize(param2);
+
+ // Call the component-wise interval generator and build the expectation FPVector
+ const results = componentWiseOps.map(o => {
+ return param0.map((el0, index) => o(el0, param1[index], param2)) as FPVector;
+ });
+ if (filter === 'finite' && results.some(r => r.some(e => !e.isFinite()))) {
+ return undefined;
+ }
+ return {
+ input: [
+ toVector(param0, this.scalarBuilder),
+ toVector(param1, this.scalarBuilder),
+ this.scalarBuilder(param2),
+ ],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param param0s array of first vector inputs to try
+ * @param param1s array of second vector inputs to try
+ * @param param2s array of scalar inputs to try
+ * @param filter what interval filtering to apply
+ * @param componentWiseOpscallbacks that implement generating a component-wise acceptance interval
+ */
+ public generateVectorPairScalarToVectorComponentWiseCase(
+ param0s: ROArrayArray<number>,
+ param1s: ROArrayArray<number>,
+ param2s: readonly number[],
+ filter: IntervalFilter,
+ ...componentWiseOps: ScalarTripleToInterval[]
+ ): Case[] {
+ // Cannot use cartesianProduct here, due to heterogeneous types
+ const cases: Case[] = [];
+ param0s.forEach(param0 => {
+ param1s.forEach(param1 => {
+ param2s.forEach(param2 => {
+ const c = this.makeVectorPairScalarToVectorComponentWiseCase(
+ param0,
+ param1,
+ param2,
+ filter,
+ ...componentWiseOps
+ );
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ });
+ });
+ });
+ return cases;
+ }
+
+ /**
+ * @returns a Case for the param and an array of interval generators provided
+ * @param param the param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ private makeMatrixToScalarCase(
+ param: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: MatrixToScalar[]
+ ): Case | undefined {
+ param = map2DArray(param, this.quantize);
+
+ const results = ops.map(o => o(param));
+ if (filter === 'finite' && results.some(e => !e.isFinite())) {
+ return undefined;
+ }
+
+ return {
+ input: [toMatrix(param, this.scalarBuilder)],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param params array of inputs to try
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ public generateMatrixToScalarCases(
+ params: ROArrayArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: MatrixToScalar[]
+ ): Case[] {
+ return params.reduce((cases, e) => {
+ const c = this.makeMatrixToScalarCase(e, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the param and an array of interval generators provided
+ * @param param the param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a matrix of acceptance
+ * intervals
+ */
+ private makeMatrixToMatrixCase(
+ param: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: MatrixToMatrix[]
+ ): Case | undefined {
+ param = map2DArray(param, this.quantize);
+
+ const results = ops.map(o => o(param));
+ if (filter === 'finite' && results.some(m => m.some(c => c.some(r => !r.isFinite())))) {
+ return undefined;
+ }
+
+ return {
+ input: [toMatrix(param, this.scalarBuilder)],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param params array of inputs to try
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a matrix of acceptance
+ * intervals
+ */
+ public generateMatrixToMatrixCases(
+ params: ROArrayArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: MatrixToMatrix[]
+ ): Case[] {
+ return params.reduce((cases, e) => {
+ const c = this.makeMatrixToMatrixCase(e, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the params and matrix of intervals generator provided
+ * @param param0 the first param to pass in
+ * @param param1 the second param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a matrix of acceptance
+ * intervals
+ */
+ private makeMatrixPairToMatrixCase(
+ param0: ROArrayArray<number>,
+ param1: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: MatrixPairToMatrix[]
+ ): Case | undefined {
+ param0 = map2DArray(param0, this.quantize);
+ param1 = map2DArray(param1, this.quantize);
+
+ const results = ops.map(o => o(param0, param1));
+ if (filter === 'finite' && results.some(m => m.some(c => c.some(r => !r.isFinite())))) {
+ return undefined;
+ }
+ return {
+ input: [toMatrix(param0, this.scalarBuilder), toMatrix(param1, this.scalarBuilder)],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param param0s array of inputs to try for the first input
+ * @param param1s array of inputs to try for the second input
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a matrix of acceptance
+ * intervals
+ */
+ public generateMatrixPairToMatrixCases(
+ param0s: ROArrayArrayArray<number>,
+ param1s: ROArrayArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: MatrixPairToMatrix[]
+ ): Case[] {
+ return cartesianProduct(param0s, param1s).reduce((cases, e) => {
+ const c = this.makeMatrixPairToMatrixCase(e[0], e[1], filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the params and matrix of intervals generator provided
+ * @param mat the matrix param to pass in
+ * @param scalar the scalar to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a matrix of acceptance
+ * intervals
+ */
+ private makeMatrixScalarToMatrixCase(
+ mat: ROArrayArray<number>,
+ scalar: number,
+ filter: IntervalFilter,
+ ...ops: MatrixScalarToMatrix[]
+ ): Case | undefined {
+ mat = map2DArray(mat, this.quantize);
+ scalar = this.quantize(scalar);
+
+ const results = ops.map(o => o(mat, scalar));
+ if (filter === 'finite' && results.some(m => m.some(c => c.some(r => !r.isFinite())))) {
+ return undefined;
+ }
+ return {
+ input: [toMatrix(mat, this.scalarBuilder), this.scalarBuilder(scalar)],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param mats array of inputs to try for the matrix input
+ * @param scalars array of inputs to try for the scalar input
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a matrix of acceptance
+ * intervals
+ */
+ public generateMatrixScalarToMatrixCases(
+ mats: ROArrayArrayArray<number>,
+ scalars: readonly number[],
+ filter: IntervalFilter,
+ ...ops: MatrixScalarToMatrix[]
+ ): Case[] {
+ // Cannot use cartesianProduct here, due to heterogeneous types
+ const cases: Case[] = [];
+ mats.forEach(mat => {
+ scalars.forEach(scalar => {
+ const c = this.makeMatrixScalarToMatrixCase(mat, scalar, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ });
+ });
+ return cases;
+ }
+
+ /**
+ * @returns a Case for the params and matrix of intervals generator provided
+ * @param scalar the scalar to pass in
+ * @param mat the matrix param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a matrix of acceptance
+ * intervals
+ */
+ private makeScalarMatrixToMatrixCase(
+ scalar: number,
+ mat: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: ScalarMatrixToMatrix[]
+ ): Case | undefined {
+ scalar = this.quantize(scalar);
+ mat = map2DArray(mat, this.quantize);
+
+ const results = ops.map(o => o(scalar, mat));
+ if (filter === 'finite' && results.some(m => m.some(c => c.some(r => !r.isFinite())))) {
+ return undefined;
+ }
+ return {
+ input: [this.scalarBuilder(scalar), toMatrix(mat, this.scalarBuilder)],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param scalars array of inputs to try for the scalar input
+ * @param mats array of inputs to try for the matrix input
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a matrix of acceptance
+ * intervals
+ */
+ public generateScalarMatrixToMatrixCases(
+ scalars: readonly number[],
+ mats: ROArrayArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: ScalarMatrixToMatrix[]
+ ): Case[] {
+ // Cannot use cartesianProduct here, due to heterogeneous types
+ const cases: Case[] = [];
+ mats.forEach(mat => {
+ scalars.forEach(scalar => {
+ const c = this.makeScalarMatrixToMatrixCase(scalar, mat, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ });
+ });
+ return cases;
+ }
+
+ /**
+ * @returns a Case for the params and the vector of intervals generator provided
+ * @param mat the matrix param to pass in
+ * @param vec the vector to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance
+ * intervals
+ */
+ private makeMatrixVectorToVectorCase(
+ mat: ROArrayArray<number>,
+ vec: readonly number[],
+ filter: IntervalFilter,
+ ...ops: MatrixVectorToVector[]
+ ): Case | undefined {
+ mat = map2DArray(mat, this.quantize);
+ vec = vec.map(this.quantize);
+
+ const results = ops.map(o => o(mat, vec));
+ if (filter === 'finite' && results.some(v => v.some(e => !e.isFinite()))) {
+ return undefined;
+ }
+ return {
+ input: [toMatrix(mat, this.scalarBuilder), toVector(vec, this.scalarBuilder)],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param mats array of inputs to try for the matrix input
+ * @param vecs array of inputs to try for the vector input
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance
+ * intervals
+ */
+ public generateMatrixVectorToVectorCases(
+ mats: ROArrayArrayArray<number>,
+ vecs: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: MatrixVectorToVector[]
+ ): Case[] {
+ // Cannot use cartesianProduct here, due to heterogeneous types
+ const cases: Case[] = [];
+ mats.forEach(mat => {
+ vecs.forEach(vec => {
+ const c = this.makeMatrixVectorToVectorCase(mat, vec, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ });
+ });
+ return cases;
+ }
+
+ /**
+ * @returns a Case for the params and the vector of intervals generator provided
+ * @param vec the vector to pass in
+ * @param mat the matrix param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance
+ * intervals
+ */
+ private makeVectorMatrixToVectorCase(
+ vec: readonly number[],
+ mat: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: VectorMatrixToVector[]
+ ): Case | undefined {
+ vec = vec.map(this.quantize);
+ mat = map2DArray(mat, this.quantize);
+
+ const results = ops.map(o => o(vec, mat));
+ if (filter === 'finite' && results.some(v => v.some(e => !e.isFinite()))) {
+ return undefined;
+ }
+ return {
+ input: [toVector(vec, this.scalarBuilder), toMatrix(mat, this.scalarBuilder)],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param vecs array of inputs to try for the vector input
+ * @param mats array of inputs to try for the matrix input
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance
+ * intervals
+ */
+ public generateVectorMatrixToVectorCases(
+ vecs: ROArrayArray<number>,
+ mats: ROArrayArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: VectorMatrixToVector[]
+ ): Case[] {
+ // Cannot use cartesianProduct here, due to heterogeneous types
+ const cases: Case[] = [];
+ vecs.forEach(vec => {
+ mats.forEach(mat => {
+ const c = this.makeVectorMatrixToVectorCase(vec, mat, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ });
+ });
+ return cases;
+ }
+
+ // Framework - Intervals
+
+ /**
+ * Converts a point to an acceptance interval, using a specific function
+ *
+ * This handles correctly rounding and flushing inputs as needed.
+ * Duplicate inputs are pruned before invoking op.impl.
+ * op.extrema is invoked before this point in the call stack.
+ * op.domain is tested before this point in the call stack.
+ *
+ * @param n value to flush & round then invoke op.impl on
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ private roundAndFlushScalarToInterval(n: number, op: ScalarToIntervalOp) {
+ assert(!Number.isNaN(n), `flush not defined for NaN`);
+ const values = this.correctlyRounded(n);
+ const inputs = this.addFlushedIfNeeded(values);
+ const results = new Set<FPInterval>(inputs.map(op.impl));
+ return this.spanIntervals(...results);
+ }
+
+ /**
+ * Converts a pair to an acceptance interval, using a specific function
+ *
+ * This handles correctly rounding and flushing inputs as needed.
+ * Duplicate inputs are pruned before invoking op.impl.
+ * All unique combinations of x & y are run.
+ * op.extrema is invoked before this point in the call stack.
+ * op.domain is tested before this point in the call stack.
+ *
+ * @param x first param to flush & round then invoke op.impl on
+ * @param y second param to flush & round then invoke op.impl on
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ private roundAndFlushScalarPairToInterval(
+ x: number,
+ y: number,
+ op: ScalarPairToIntervalOp
+ ): FPInterval {
+ assert(!Number.isNaN(x), `flush not defined for NaN`);
+ assert(!Number.isNaN(y), `flush not defined for NaN`);
+ const x_values = this.correctlyRounded(x);
+ const y_values = this.correctlyRounded(y);
+ const x_inputs = this.addFlushedIfNeeded(x_values);
+ const y_inputs = this.addFlushedIfNeeded(y_values);
+ const intervals = new Set<FPInterval>();
+ x_inputs.forEach(inner_x => {
+ y_inputs.forEach(inner_y => {
+ intervals.add(op.impl(inner_x, inner_y));
+ });
+ });
+ return this.spanIntervals(...intervals);
+ }
+
+ /**
+ * Converts a triplet to an acceptance interval, using a specific function
+ *
+ * This handles correctly rounding and flushing inputs as needed.
+ * Duplicate inputs are pruned before invoking op.impl.
+ * All unique combinations of x, y & z are run.
+ *
+ * @param x first param to flush & round then invoke op.impl on
+ * @param y second param to flush & round then invoke op.impl on
+ * @param z third param to flush & round then invoke op.impl on
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ private roundAndFlushScalarTripleToInterval(
+ x: number,
+ y: number,
+ z: number,
+ op: ScalarTripleToIntervalOp
+ ): FPInterval {
+ assert(!Number.isNaN(x), `flush not defined for NaN`);
+ assert(!Number.isNaN(y), `flush not defined for NaN`);
+ assert(!Number.isNaN(z), `flush not defined for NaN`);
+ const x_values = this.correctlyRounded(x);
+ const y_values = this.correctlyRounded(y);
+ const z_values = this.correctlyRounded(z);
+ const x_inputs = this.addFlushedIfNeeded(x_values);
+ const y_inputs = this.addFlushedIfNeeded(y_values);
+ const z_inputs = this.addFlushedIfNeeded(z_values);
+ const intervals = new Set<FPInterval>();
+ // prettier-ignore
+ x_inputs.forEach(inner_x => {
+ y_inputs.forEach(inner_y => {
+ z_inputs.forEach(inner_z => {
+ intervals.add(op.impl(inner_x, inner_y, inner_z));
+ });
+ });
+ });
+
+ return this.spanIntervals(...intervals);
+ }
+
+ /**
+ * Converts a vector to an acceptance interval using a specific function
+ *
+ * This handles correctly rounding and flushing inputs as needed.
+ * Duplicate inputs are pruned before invoking op.impl.
+ *
+ * @param x param to flush & round then invoke op.impl on
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ private roundAndFlushVectorToInterval(x: readonly number[], op: VectorToIntervalOp): FPInterval {
+ assert(
+ x.every(e => !Number.isNaN(e)),
+ `flush not defined for NaN`
+ );
+
+ const x_rounded: ROArrayArray<number> = x.map(this.correctlyRounded);
+ const x_flushed: ROArrayArray<number> = x_rounded.map(this.addFlushedIfNeeded.bind(this));
+ const x_inputs = cartesianProduct<number>(...x_flushed);
+
+ const intervals = new Set<FPInterval>();
+ x_inputs.forEach(inner_x => {
+ intervals.add(op.impl(inner_x));
+ });
+ return this.spanIntervals(...intervals);
+ }
+
+ /**
+ * Converts a pair of vectors to an acceptance interval using a specific
+ * function
+ *
+ * This handles correctly rounding and flushing inputs as needed.
+ * Duplicate inputs are pruned before invoking op.impl.
+ * All unique combinations of x & y are run.
+ *
+ * @param x first param to flush & round then invoke op.impl on
+ * @param y second param to flush & round then invoke op.impl on
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ private roundAndFlushVectorPairToInterval(
+ x: readonly number[],
+ y: readonly number[],
+ op: VectorPairToIntervalOp
+ ): FPInterval {
+ assert(
+ x.every(e => !Number.isNaN(e)),
+ `flush not defined for NaN`
+ );
+ assert(
+ y.every(e => !Number.isNaN(e)),
+ `flush not defined for NaN`
+ );
+
+ const x_rounded: ROArrayArray<number> = x.map(this.correctlyRounded);
+ const y_rounded: ROArrayArray<number> = y.map(this.correctlyRounded);
+ const x_flushed: ROArrayArray<number> = x_rounded.map(this.addFlushedIfNeeded.bind(this));
+ const y_flushed: ROArrayArray<number> = y_rounded.map(this.addFlushedIfNeeded.bind(this));
+ const x_inputs = cartesianProduct<number>(...x_flushed);
+ const y_inputs = cartesianProduct<number>(...y_flushed);
+
+ const intervals = new Set<FPInterval>();
+ x_inputs.forEach(inner_x => {
+ y_inputs.forEach(inner_y => {
+ intervals.add(op.impl(inner_x, inner_y));
+ });
+ });
+ return this.spanIntervals(...intervals);
+ }
+
+ /**
+ * Converts a vector to a vector of acceptance intervals using a specific
+ * function
+ *
+ * This handles correctly rounding and flushing inputs as needed.
+ * Duplicate inputs are pruned before invoking op.impl.
+ *
+ * @param x param to flush & round then invoke op.impl on
+ * @param op operation defining the function being run
+ * @returns a vector of spans for each outputs of op.impl
+ */
+ private roundAndFlushVectorToVector(x: readonly number[], op: VectorToVectorOp): FPVector {
+ assert(
+ x.every(e => !Number.isNaN(e)),
+ `flush not defined for NaN`
+ );
+
+ const x_rounded: ROArrayArray<number> = x.map(this.correctlyRounded);
+ const x_flushed: ROArrayArray<number> = x_rounded.map(this.addFlushedIfNeeded.bind(this));
+ const x_inputs = cartesianProduct<number>(...x_flushed);
+
+ const interval_vectors = new Set<FPVector>();
+ x_inputs.forEach(inner_x => {
+ interval_vectors.add(op.impl(inner_x));
+ });
+
+ return this.spanVectors(...interval_vectors);
+ }
+
+ /**
+ * Converts a pair of vectors to a vector of acceptance intervals using a
+ * specific function
+ *
+ * This handles correctly rounding and flushing inputs as needed.
+ * Duplicate inputs are pruned before invoking op.impl.
+ *
+ * @param x first param to flush & round then invoke op.impl on
+ * @param y second param to flush & round then invoke op.impl on
+ * @param op operation defining the function being run
+ * @returns a vector of spans for each output of op.impl
+ */
+ private roundAndFlushVectorPairToVector(
+ x: readonly number[],
+ y: readonly number[],
+ op: VectorPairToVectorOp
+ ): FPVector {
+ assert(
+ x.every(e => !Number.isNaN(e)),
+ `flush not defined for NaN`
+ );
+ assert(
+ y.every(e => !Number.isNaN(e)),
+ `flush not defined for NaN`
+ );
+
+ const x_rounded: ROArrayArray<number> = x.map(this.correctlyRounded);
+ const y_rounded: ROArrayArray<number> = y.map(this.correctlyRounded);
+ const x_flushed: ROArrayArray<number> = x_rounded.map(this.addFlushedIfNeeded.bind(this));
+ const y_flushed: ROArrayArray<number> = y_rounded.map(this.addFlushedIfNeeded.bind(this));
+ const x_inputs = cartesianProduct<number>(...x_flushed);
+ const y_inputs = cartesianProduct<number>(...y_flushed);
+
+ const interval_vectors = new Set<FPVector>();
+ x_inputs.forEach(inner_x => {
+ y_inputs.forEach(inner_y => {
+ interval_vectors.add(op.impl(inner_x, inner_y));
+ });
+ });
+
+ return this.spanVectors(...interval_vectors);
+ }
+
+ /**
+ * Converts a matrix to a matrix of acceptance intervals using a specific
+ * function
+ *
+ * This handles correctly rounding and flushing inputs as needed.
+ * Duplicate inputs are pruned before invoking op.impl.
+ *
+ * @param m param to flush & round then invoke op.impl on
+ * @param op operation defining the function being run
+ * @returns a matrix of spans for each outputs of op.impl
+ */
+ private roundAndFlushMatrixToMatrix(m: Array2D<number>, op: MatrixToMatrixOp): FPMatrix {
+ const num_cols = m.length;
+ const num_rows = m[0].length;
+ assert(
+ m.every(c => c.every(r => !Number.isNaN(r))),
+ `flush not defined for NaN`
+ );
+
+ const m_flat = flatten2DArray(m);
+ const m_rounded: ROArrayArray<number> = m_flat.map(this.correctlyRounded);
+ const m_flushed: ROArrayArray<number> = m_rounded.map(this.addFlushedIfNeeded.bind(this));
+ const m_options: ROArrayArray<number> = cartesianProduct<number>(...m_flushed);
+ const m_inputs: ROArrayArrayArray<number> = m_options.map(e =>
+ unflatten2DArray(e, num_cols, num_rows)
+ );
+
+ const interval_matrices = new Set<FPMatrix>();
+ m_inputs.forEach(inner_m => {
+ interval_matrices.add(op.impl(inner_m));
+ });
+
+ return this.spanMatrices(...interval_matrices);
+ }
+
+ /**
+ * Calculate the acceptance interval for a unary function over an interval
+ *
+ * If the interval is actually a point, this just decays to
+ * roundAndFlushScalarToInterval.
+ *
+ * The provided domain interval may be adjusted if the operation defines an
+ * extrema function.
+ *
+ * @param x input domain interval
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ protected runScalarToIntervalOp(x: FPInterval, op: ScalarToIntervalOp): FPInterval {
+ if (!x.isFinite()) {
+ return this.constants().unboundedInterval;
+ }
+
+ if (op.extrema !== undefined) {
+ x = op.extrema(x);
+ }
+
+ const result = this.spanIntervals(
+ ...x.bounds().map(b => this.roundAndFlushScalarToInterval(b, op))
+ );
+ return result.isFinite() ? result : this.constants().unboundedInterval;
+ }
+
+ /**
+ * Calculate the acceptance interval for a binary function over an interval
+ *
+ * The provided domain intervals may be adjusted if the operation defines an
+ * extrema function.
+ *
+ * @param x first input domain interval
+ * @param y second input domain interval
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ protected runScalarPairToIntervalOp(
+ x: FPInterval,
+ y: FPInterval,
+ op: ScalarPairToIntervalOp
+ ): FPInterval {
+ if (!x.isFinite() || !y.isFinite()) {
+ return this.constants().unboundedInterval;
+ }
+
+ if (op.extrema !== undefined) {
+ [x, y] = op.extrema(x, y);
+ }
+
+ const outputs = new Set<FPInterval>();
+ x.bounds().forEach(inner_x => {
+ y.bounds().forEach(inner_y => {
+ outputs.add(this.roundAndFlushScalarPairToInterval(inner_x, inner_y, op));
+ });
+ });
+
+ const result = this.spanIntervals(...outputs);
+ return result.isFinite() ? result : this.constants().unboundedInterval;
+ }
+
+ /**
+ * Calculate the acceptance interval for a ternary function over an interval
+ *
+ * @param x first input domain interval
+ * @param y second input domain interval
+ * @param z third input domain interval
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ protected runScalarTripleToIntervalOp(
+ x: FPInterval,
+ y: FPInterval,
+ z: FPInterval,
+ op: ScalarTripleToIntervalOp
+ ): FPInterval {
+ if (!x.isFinite() || !y.isFinite() || !z.isFinite()) {
+ return this.constants().unboundedInterval;
+ }
+
+ const outputs = new Set<FPInterval>();
+ x.bounds().forEach(inner_x => {
+ y.bounds().forEach(inner_y => {
+ z.bounds().forEach(inner_z => {
+ outputs.add(this.roundAndFlushScalarTripleToInterval(inner_x, inner_y, inner_z, op));
+ });
+ });
+ });
+
+ const result = this.spanIntervals(...outputs);
+ return result.isFinite() ? result : this.constants().unboundedInterval;
+ }
+
+ /**
+ * Calculate the acceptance interval for a vector function over given
+ * intervals
+ *
+ * @param x input domain intervals vector
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ protected runVectorToIntervalOp(x: FPVector, op: VectorToIntervalOp): FPInterval {
+ if (x.some(e => !e.isFinite())) {
+ return this.constants().unboundedInterval;
+ }
+
+ const x_values = cartesianProduct<number>(...x.map(e => e.bounds()));
+
+ const outputs = new Set<FPInterval>();
+ x_values.forEach(inner_x => {
+ outputs.add(this.roundAndFlushVectorToInterval(inner_x, op));
+ });
+
+ const result = this.spanIntervals(...outputs);
+ return result.isFinite() ? result : this.constants().unboundedInterval;
+ }
+
+ /**
+ * Calculate the acceptance interval for a vector pair function over given
+ * intervals
+ *
+ * @param x first input domain intervals vector
+ * @param y second input domain intervals vector
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ protected runVectorPairToIntervalOp(
+ x: FPVector,
+ y: FPVector,
+ op: VectorPairToIntervalOp
+ ): FPInterval {
+ if (x.some(e => !e.isFinite()) || y.some(e => !e.isFinite())) {
+ return this.constants().unboundedInterval;
+ }
+
+ const x_values = cartesianProduct<number>(...x.map(e => e.bounds()));
+ const y_values = cartesianProduct<number>(...y.map(e => e.bounds()));
+
+ const outputs = new Set<FPInterval>();
+ x_values.forEach(inner_x => {
+ y_values.forEach(inner_y => {
+ outputs.add(this.roundAndFlushVectorPairToInterval(inner_x, inner_y, op));
+ });
+ });
+
+ const result = this.spanIntervals(...outputs);
+ return result.isFinite() ? result : this.constants().unboundedInterval;
+ }
+
+ /**
+ * Calculate the vector of acceptance intervals for a pair of vector function
+ * over given intervals
+ *
+ * @param x input domain intervals vector
+ * @param op operation defining the function being run
+ * @returns a vector of spans over all the outputs of op.impl
+ */
+ protected runVectorToVectorOp(x: FPVector, op: VectorToVectorOp): FPVector {
+ if (x.some(e => !e.isFinite())) {
+ return this.constants().unboundedVector[x.length];
+ }
+
+ const x_values = cartesianProduct<number>(...x.map(e => e.bounds()));
+
+ const outputs = new Set<FPVector>();
+ x_values.forEach(inner_x => {
+ outputs.add(this.roundAndFlushVectorToVector(inner_x, op));
+ });
+
+ const result = this.spanVectors(...outputs);
+ return result.every(e => e.isFinite())
+ ? result
+ : this.constants().unboundedVector[result.length];
+ }
+
+ /**
+ * Calculate the vector of acceptance intervals by running a scalar operation
+ * component-wise over a vector.
+ *
+ * This is used for situations where a component-wise operation, like vector
+ * negation, is needed as part of an inherited accuracy, but the top-level
+ * operation test don't require an explicit vector definition of the function,
+ * due to the generated 'vectorize' tests being sufficient.
+ *
+ * @param x input domain intervals vector
+ * @param op scalar operation to be run component-wise
+ * @returns a vector of intervals with the outputs of op.impl
+ */
+ protected runScalarToIntervalOpComponentWise(x: FPVector, op: ScalarToIntervalOp): FPVector {
+ return this.toVector(x.map(e => this.runScalarToIntervalOp(e, op)));
+ }
+
+ /**
+ * Calculate the vector of acceptance intervals for a vector function over
+ * given intervals
+ *
+ * @param x first input domain intervals vector
+ * @param y second input domain intervals vector
+ * @param op operation defining the function being run
+ * @returns a vector of spans over all the outputs of op.impl
+ */
+ protected runVectorPairToVectorOp(x: FPVector, y: FPVector, op: VectorPairToVectorOp): FPVector {
+ if (x.some(e => !e.isFinite()) || y.some(e => !e.isFinite())) {
+ return this.constants().unboundedVector[x.length];
+ }
+
+ const x_values = cartesianProduct<number>(...x.map(e => e.bounds()));
+ const y_values = cartesianProduct<number>(...y.map(e => e.bounds()));
+
+ const outputs = new Set<FPVector>();
+ x_values.forEach(inner_x => {
+ y_values.forEach(inner_y => {
+ outputs.add(this.roundAndFlushVectorPairToVector(inner_x, inner_y, op));
+ });
+ });
+
+ const result = this.spanVectors(...outputs);
+ return result.every(e => e.isFinite())
+ ? result
+ : this.constants().unboundedVector[result.length];
+ }
+
+ /**
+ * Calculate the vector of acceptance intervals by running a scalar operation
+ * component-wise over a pair of vectors.
+ *
+ * This is used for situations where a component-wise operation, like vector
+ * subtraction, is needed as part of an inherited accuracy, but the top-level
+ * operation test don't require an explicit vector definition of the function,
+ * due to the generated 'vectorize' tests being sufficient.
+ *
+ * @param x first input domain intervals vector
+ * @param y second input domain intervals vector
+ * @param op scalar operation to be run component-wise
+ * @returns a vector of intervals with the outputs of op.impl
+ */
+ protected runScalarPairToIntervalOpVectorComponentWise(
+ x: FPVector,
+ y: FPVector,
+ op: ScalarPairToIntervalOp
+ ): FPVector {
+ assert(
+ x.length === y.length,
+ `runScalarPairToIntervalOpVectorComponentWise requires vectors of the same dimensions`
+ );
+
+ return this.toVector(
+ x.map((i, idx) => {
+ return this.runScalarPairToIntervalOp(i, y[idx], op);
+ })
+ );
+ }
+
+ /**
+ * Calculate the matrix of acceptance intervals for a pair of matrix function over
+ * given intervals
+ *
+ * @param m input domain intervals matrix
+ * @param op operation defining the function being run
+ * @returns a matrix of spans over all the outputs of op.impl
+ */
+ protected runMatrixToMatrixOp(m: FPMatrix, op: MatrixToMatrixOp): FPMatrix {
+ const num_cols = m.length;
+ const num_rows = m[0].length;
+ if (m.some(c => c.some(r => !r.isFinite()))) {
+ return this.constants().unboundedMatrix[num_cols][num_rows];
+ }
+
+ const m_flat: readonly FPInterval[] = flatten2DArray(m);
+ const m_values: ROArrayArray<number> = cartesianProduct<number>(...m_flat.map(e => e.bounds()));
+
+ const outputs = new Set<FPMatrix>();
+ m_values.forEach(inner_m => {
+ const unflat_m = unflatten2DArray(inner_m, num_cols, num_rows);
+ outputs.add(this.roundAndFlushMatrixToMatrix(unflat_m, op));
+ });
+
+ const result = this.spanMatrices(...outputs);
+ const result_cols = result.length;
+ const result_rows = result[0].length;
+
+ // FPMatrix has to be coerced to ROArrayArray<FPInterval> to use .every. This should
+ // always be safe, since FPMatrix are defined as fixed length array of
+ // arrays.
+ return (result as ROArrayArray<FPInterval>).every(c => c.every(r => r.isFinite()))
+ ? result
+ : this.constants().unboundedMatrix[result_cols][result_rows];
+ }
+
+ /**
+ * Calculate the Matrix of acceptance intervals by running a scalar operation
+ * component-wise over a pair of matrices.
+ *
+ * An example of this is performing matrix addition.
+ *
+ * @param x first input domain intervals matrix
+ * @param y second input domain intervals matrix
+ * @param op scalar operation to be run component-wise
+ * @returns a matrix of intervals with the outputs of op.impl
+ */
+ protected runScalarPairToIntervalOpMatrixComponentWise(
+ x: FPMatrix,
+ y: FPMatrix,
+ op: ScalarPairToIntervalOp
+ ): FPMatrix {
+ assert(
+ x.length === y.length && x[0].length === y[0].length,
+ `runScalarPairToIntervalOpMatrixComponentWise requires matrices of the same dimensions`
+ );
+
+ const cols = x.length;
+ const rows = x[0].length;
+ const flat_x = flatten2DArray(x);
+ const flat_y = flatten2DArray(y);
+
+ return this.toMatrix(
+ unflatten2DArray(
+ flat_x.map((i, idx) => {
+ return this.runScalarPairToIntervalOp(i, flat_y[idx], op);
+ }),
+ cols,
+ rows
+ )
+ );
+ }
+
+ // API - Fundamental Error Intervals
+
+ /** @returns a ScalarToIntervalOp for [n - error_range, n + error_range] */
+ private AbsoluteErrorIntervalOp(error_range: number): ScalarToIntervalOp {
+ const op: ScalarToIntervalOp = {
+ impl: (_: number) => {
+ return this.constants().unboundedInterval;
+ },
+ };
+
+ assert(
+ error_range >= 0,
+ `absoluteErrorInterval must have non-negative error range, get ${error_range}`
+ );
+
+ if (this.isFinite(error_range)) {
+ op.impl = (n: number) => {
+ assert(!Number.isNaN(n), `absolute error not defined for NaN`);
+ // Return anyInterval if given center n is infinity.
+ if (!this.isFinite(n)) {
+ return this.constants().unboundedInterval;
+ }
+ return this.toInterval([n - error_range, n + error_range]);
+ };
+ }
+
+ return op;
+ }
+
+ protected absoluteErrorIntervalImpl(n: number, error_range: number): FPInterval {
+ error_range = Math.abs(error_range);
+ return this.runScalarToIntervalOp(
+ this.toInterval(n),
+ this.AbsoluteErrorIntervalOp(error_range)
+ );
+ }
+
+ /** @returns an interval of the absolute error around the point */
+ public abstract readonly absoluteErrorInterval: (n: number, error_range: number) => FPInterval;
+
+ /**
+ * Defines a ScalarToIntervalOp for an interval of the correctly rounded values
+ * around the point
+ */
+ private readonly CorrectlyRoundedIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number) => {
+ assert(!Number.isNaN(n), `absolute not defined for NaN`);
+ return this.toInterval(n);
+ },
+ };
+
+ protected correctlyRoundedIntervalImpl(n: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.CorrectlyRoundedIntervalOp);
+ }
+
+ /** @returns an interval of the correctly rounded values around the point */
+ public abstract readonly correctlyRoundedInterval: (n: number | FPInterval) => FPInterval;
+
+ protected correctlyRoundedMatrixImpl(m: Array2D<number>): FPMatrix {
+ return this.toMatrix(map2DArray(m, this.correctlyRoundedInterval));
+ }
+
+ /** @returns a matrix of correctly rounded intervals for the provided matrix */
+ public abstract readonly correctlyRoundedMatrix: (m: Array2D<number>) => FPMatrix;
+
+ /** @returns a ScalarToIntervalOp for [n - numULP * ULP(n), n + numULP * ULP(n)] */
+ private ULPIntervalOp(numULP: number): ScalarToIntervalOp {
+ const op: ScalarToIntervalOp = {
+ impl: (_: number) => {
+ return this.constants().unboundedInterval;
+ },
+ };
+
+ if (this.isFinite(numULP)) {
+ op.impl = (n: number) => {
+ assert(!Number.isNaN(n), `ULP error not defined for NaN`);
+
+ const ulp = this.oneULP(n);
+ const begin = n - numULP * ulp;
+ const end = n + numULP * ulp;
+
+ return this.toInterval([
+ Math.min(begin, this.flushSubnormal(begin)),
+ Math.max(end, this.flushSubnormal(end)),
+ ]);
+ };
+ }
+
+ return op;
+ }
+
+ protected ulpIntervalImpl(n: number, numULP: number): FPInterval {
+ numULP = Math.abs(numULP);
+ return this.runScalarToIntervalOp(this.toInterval(n), this.ULPIntervalOp(numULP));
+ }
+
+ /** @returns an interval of N * ULP around the point */
+ public abstract readonly ulpInterval: (n: number, numULP: number) => FPInterval;
+
+ // API - Acceptance Intervals
+
+ private readonly AbsIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number) => {
+ return this.correctlyRoundedInterval(Math.abs(n));
+ },
+ };
+
+ protected absIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.AbsIntervalOp);
+ }
+
+ /** Calculate an acceptance interval for abs(n) */
+ public abstract readonly absInterval: (n: number) => FPInterval;
+
+ // This op is implemented differently for f32 and f16.
+ private readonly AcosIntervalOp: ScalarToIntervalOp = {
+ impl: this.limitScalarToIntervalDomain(this.toInterval([-1.0, 1.0]), (n: number) => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ // acos(n) = atan2(sqrt(1.0 - n * n), n) or a polynomial approximation with absolute error
+ const y = this.sqrtInterval(this.subtractionInterval(1, this.multiplicationInterval(n, n)));
+ const approx_abs_error = this.kind === 'f32' ? 6.77e-5 : 3.91e-3;
+ return this.spanIntervals(
+ this.atan2Interval(y, n),
+ this.absoluteErrorInterval(Math.acos(n), approx_abs_error)
+ );
+ }),
+ };
+
+ protected acosIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.AcosIntervalOp);
+ }
+
+ /** Calculate an acceptance interval for acos(n) */
+ public abstract readonly acosInterval: (n: number) => FPInterval;
+
+ private readonly AcoshAlternativeIntervalOp: ScalarToIntervalOp = {
+ impl: (x: number): FPInterval => {
+ // acosh(x) = log(x + sqrt((x + 1.0f) * (x - 1.0)))
+ const inner_value = this.multiplicationInterval(
+ this.additionInterval(x, 1.0),
+ this.subtractionInterval(x, 1.0)
+ );
+ const sqrt_value = this.sqrtInterval(inner_value);
+ return this.logInterval(this.additionInterval(x, sqrt_value));
+ },
+ };
+
+ protected acoshAlternativeIntervalImpl(x: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(x), this.AcoshAlternativeIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of acosh(x) using log(x + sqrt((x + 1.0f) * (x - 1.0))) */
+ public abstract readonly acoshAlternativeInterval: (x: number | FPInterval) => FPInterval;
+
+ private readonly AcoshPrimaryIntervalOp: ScalarToIntervalOp = {
+ impl: (x: number): FPInterval => {
+ // acosh(x) = log(x + sqrt(x * x - 1.0))
+ const inner_value = this.subtractionInterval(this.multiplicationInterval(x, x), 1.0);
+ const sqrt_value = this.sqrtInterval(inner_value);
+ return this.logInterval(this.additionInterval(x, sqrt_value));
+ },
+ };
+
+ protected acoshPrimaryIntervalImpl(x: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(x), this.AcoshPrimaryIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of acosh(x) using log(x + sqrt(x * x - 1.0)) */
+ protected abstract acoshPrimaryInterval: (x: number | FPInterval) => FPInterval;
+
+ /** All acceptance interval functions for acosh(x) */
+ public abstract readonly acoshIntervals: ScalarToInterval[];
+
+ private readonly AdditionIntervalOp: ScalarPairToIntervalOp = {
+ impl: (x: number, y: number): FPInterval => {
+ return this.correctlyRoundedInterval(x + y);
+ },
+ };
+
+ protected additionIntervalImpl(x: number | FPInterval, y: number | FPInterval): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.AdditionIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of x + y, when x and y are both scalars */
+ public abstract readonly additionInterval: (
+ x: number | FPInterval,
+ y: number | FPInterval
+ ) => FPInterval;
+
+ protected additionMatrixMatrixIntervalImpl(x: Array2D<number>, y: Array2D<number>): FPMatrix {
+ return this.runScalarPairToIntervalOpMatrixComponentWise(
+ this.toMatrix(x),
+ this.toMatrix(y),
+ this.AdditionIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of x + y, when x and y are matrices */
+ public abstract readonly additionMatrixMatrixInterval: (
+ x: Array2D<number>,
+ y: Array2D<number>
+ ) => FPMatrix;
+
+ // This op is implemented differently for f32 and f16.
+ private readonly AsinIntervalOp: ScalarToIntervalOp = {
+ impl: this.limitScalarToIntervalDomain(this.toInterval([-1.0, 1.0]), (n: number) => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ // asin(n) = atan2(n, sqrt(1.0 - n * n)) or a polynomial approximation with absolute error
+ const x = this.sqrtInterval(this.subtractionInterval(1, this.multiplicationInterval(n, n)));
+ const approx_abs_error = this.kind === 'f32' ? 6.77e-5 : 3.91e-3;
+ return this.spanIntervals(
+ this.atan2Interval(n, x),
+ this.absoluteErrorInterval(Math.asin(n), approx_abs_error)
+ );
+ }),
+ };
+
+ /** Calculate an acceptance interval for asin(n) */
+ protected asinIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.AsinIntervalOp);
+ }
+
+ /** Calculate an acceptance interval for asin(n) */
+ public abstract readonly asinInterval: (n: number) => FPInterval;
+
+ private readonly AsinhIntervalOp: ScalarToIntervalOp = {
+ impl: (x: number): FPInterval => {
+ // asinh(x) = log(x + sqrt(x * x + 1.0))
+ const inner_value = this.additionInterval(this.multiplicationInterval(x, x), 1.0);
+ const sqrt_value = this.sqrtInterval(inner_value);
+ return this.logInterval(this.additionInterval(x, sqrt_value));
+ },
+ };
+
+ protected asinhIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.AsinhIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of asinh(x) */
+ public abstract readonly asinhInterval: (n: number) => FPInterval;
+
+ private readonly AtanIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ const ulp_error = this.kind === 'f32' ? 4096 : 5;
+ return this.ulpInterval(Math.atan(n), ulp_error);
+ },
+ };
+
+ /** Calculate an acceptance interval of atan(x) */
+ protected atanIntervalImpl(n: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.AtanIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of atan(x) */
+ public abstract readonly atanInterval: (n: number | FPInterval) => FPInterval;
+
+ // This op is implemented differently for f32 and f16.
+ private Atan2IntervalOpBuilder(): ScalarPairToIntervalOp {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ const constants = this.constants();
+ // For atan2, the params are labelled (y, x), not (x, y), so domain.x is first parameter (y),
+ // and domain.y is the second parameter (x).
+ // The first param must be finite and normal.
+ const domain_x = [
+ this.toInterval([constants.negative.min, constants.negative.max]),
+ this.toInterval([constants.positive.min, constants.positive.max]),
+ ];
+ // inherited from division
+ const domain_y =
+ this.kind === 'f32'
+ ? [this.toInterval([-(2 ** 126), -(2 ** -126)]), this.toInterval([2 ** -126, 2 ** 126])]
+ : [this.toInterval([-(2 ** 14), -(2 ** -14)]), this.toInterval([2 ** -14, 2 ** 14])];
+ const ulp_error = this.kind === 'f32' ? 4096 : 5;
+ return {
+ impl: this.limitScalarPairToIntervalDomain(
+ {
+ x: domain_x,
+ y: domain_y,
+ },
+ (y: number, x: number): FPInterval => {
+ // Accurate result in f64
+ let atan_yx = Math.atan(y / x);
+ // Offset by +/-pi according to the definition. Use pi value in f64 because we are
+ // handling accurate result.
+ if (x < 0) {
+ // x < 0, y > 0, result is atan(y/x) + π
+ if (y > 0) {
+ atan_yx = atan_yx + kValue.f64.positive.pi.whole;
+ } else {
+ // x < 0, y < 0, result is atan(y/x) - π
+ atan_yx = atan_yx - kValue.f64.positive.pi.whole;
+ }
+ }
+
+ return this.ulpInterval(atan_yx, ulp_error);
+ }
+ ),
+ extrema: (y: FPInterval, x: FPInterval): [FPInterval, FPInterval] => {
+ // There is discontinuity, which generates an unbounded result, at y/x = 0 that will dominate the accuracy
+ if (y.contains(0)) {
+ if (x.contains(0)) {
+ return [this.toInterval(0), this.toInterval(0)];
+ }
+ return [this.toInterval(0), x];
+ }
+ return [y, x];
+ },
+ };
+ }
+
+ protected atan2IntervalImpl(y: number | FPInterval, x: number | FPInterval): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(y),
+ this.toInterval(x),
+ this.Atan2IntervalOpBuilder()
+ );
+ }
+
+ /** Calculate an acceptance interval of atan2(y, x) */
+ public abstract readonly atan2Interval: (
+ y: number | FPInterval,
+ x: number | FPInterval
+ ) => FPInterval;
+
+ private readonly AtanhIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number) => {
+ // atanh(x) = log((1.0 + x) / (1.0 - x)) * 0.5
+ const numerator = this.additionInterval(1.0, n);
+ const denominator = this.subtractionInterval(1.0, n);
+ const log_interval = this.logInterval(this.divisionInterval(numerator, denominator));
+ return this.multiplicationInterval(log_interval, 0.5);
+ },
+ };
+
+ protected atanhIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.AtanhIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of atanh(x) */
+ public abstract readonly atanhInterval: (n: number) => FPInterval;
+
+ private readonly CeilIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.correctlyRoundedInterval(Math.ceil(n));
+ },
+ };
+
+ protected ceilIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.CeilIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of ceil(x) */
+ public abstract readonly ceilInterval: (n: number) => FPInterval;
+
+ private readonly ClampMedianIntervalOp: ScalarTripleToIntervalOp = {
+ impl: (x: number, y: number, z: number): FPInterval => {
+ return this.correctlyRoundedInterval(
+ // Default sort is string sort, so have to implement numeric comparison.
+ // Cannot use the b-a one-liner, because that assumes no infinities.
+ [x, y, z].sort((a, b) => {
+ if (a < b) {
+ return -1;
+ }
+ if (a > b) {
+ return 1;
+ }
+ return 0;
+ })[1]
+ );
+ },
+ };
+
+ protected clampMedianIntervalImpl(
+ x: number | FPInterval,
+ y: number | FPInterval,
+ z: number | FPInterval
+ ): FPInterval {
+ return this.runScalarTripleToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.toInterval(z),
+ this.ClampMedianIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of clamp(x, y, z) via median(x, y, z) */
+ public abstract readonly clampMedianInterval: (
+ x: number | FPInterval,
+ y: number | FPInterval,
+ z: number | FPInterval
+ ) => FPInterval;
+
+ private readonly ClampMinMaxIntervalOp: ScalarTripleToIntervalOp = {
+ impl: (x: number, low: number, high: number): FPInterval => {
+ return this.minInterval(this.maxInterval(x, low), high);
+ },
+ };
+
+ protected clampMinMaxIntervalImpl(
+ x: number | FPInterval,
+ low: number | FPInterval,
+ high: number | FPInterval
+ ): FPInterval {
+ return this.runScalarTripleToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(low),
+ this.toInterval(high),
+ this.ClampMinMaxIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of clamp(x, high, low) via min(max(x, low), high) */
+ public abstract readonly clampMinMaxInterval: (
+ x: number | FPInterval,
+ low: number | FPInterval,
+ high: number | FPInterval
+ ) => FPInterval;
+
+ /** All acceptance interval functions for clamp(x, y, z) */
+ public abstract readonly clampIntervals: ScalarTripleToInterval[];
+
+ private readonly CosIntervalOp: ScalarToIntervalOp = {
+ impl: this.limitScalarToIntervalDomain(
+ this.constants().negPiToPiInterval,
+ (n: number): FPInterval => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ const abs_error = this.kind === 'f32' ? 2 ** -11 : 2 ** -7;
+ return this.absoluteErrorInterval(Math.cos(n), abs_error);
+ }
+ ),
+ };
+
+ protected cosIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.CosIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of cos(x) */
+ public abstract readonly cosInterval: (n: number) => FPInterval;
+
+ private readonly CoshIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ // cosh(x) = (exp(x) + exp(-x)) * 0.5
+ const minus_n = this.negationInterval(n);
+ return this.multiplicationInterval(
+ this.additionInterval(this.expInterval(n), this.expInterval(minus_n)),
+ 0.5
+ );
+ },
+ };
+
+ protected coshIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.CoshIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of cosh(x) */
+ public abstract readonly coshInterval: (n: number) => FPInterval;
+
+ private readonly CrossIntervalOp: VectorPairToVectorOp = {
+ impl: (x: readonly number[], y: readonly number[]): FPVector => {
+ assert(x.length === 3, `CrossIntervalOp received x with ${x.length} instead of 3`);
+ assert(y.length === 3, `CrossIntervalOp received y with ${y.length} instead of 3`);
+
+ // cross(x, y) = r, where
+ // r[0] = x[1] * y[2] - x[2] * y[1]
+ // r[1] = x[2] * y[0] - x[0] * y[2]
+ // r[2] = x[0] * y[1] - x[1] * y[0]
+
+ const r0 = this.subtractionInterval(
+ this.multiplicationInterval(x[1], y[2]),
+ this.multiplicationInterval(x[2], y[1])
+ );
+ const r1 = this.subtractionInterval(
+ this.multiplicationInterval(x[2], y[0]),
+ this.multiplicationInterval(x[0], y[2])
+ );
+ const r2 = this.subtractionInterval(
+ this.multiplicationInterval(x[0], y[1]),
+ this.multiplicationInterval(x[1], y[0])
+ );
+ return [r0, r1, r2];
+ },
+ };
+
+ protected crossIntervalImpl(x: readonly number[], y: readonly number[]): FPVector {
+ assert(x.length === 3, `Cross is only defined for vec3`);
+ assert(y.length === 3, `Cross is only defined for vec3`);
+ return this.runVectorPairToVectorOp(this.toVector(x), this.toVector(y), this.CrossIntervalOp);
+ }
+
+ /** Calculate a vector of acceptance intervals for cross(x, y) */
+ public abstract readonly crossInterval: (x: readonly number[], y: readonly number[]) => FPVector;
+
+ private readonly DegreesIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.multiplicationInterval(n, 57.295779513082322865);
+ },
+ };
+
+ protected degreesIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.DegreesIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of degrees(x) */
+ public abstract readonly degreesInterval: (n: number) => FPInterval;
+
+ /**
+ * Calculate the minor of a NxN matrix.
+ *
+ * The ijth minor of a square matrix, is the N-1xN-1 matrix created by removing
+ * the ith column and jth row from the original matrix.
+ */
+ private minorNxN(m: Array2D<number>, col: number, row: number): Array2D<number> {
+ const dim = m.length;
+ assert(m.length === m[0].length, `minorMatrix is only defined for square matrices`);
+ assert(col >= 0 && col < dim, `col ${col} needs be in [0, # of columns '${dim}')`);
+ assert(row >= 0 && row < dim, `row ${row} needs be in [0, # of rows '${dim}')`);
+
+ const result: number[][] = [...Array(dim - 1)].map(_ => [...Array(dim - 1)]);
+
+ const col_indices: readonly number[] = [...Array(dim).keys()].filter(e => e !== col);
+ const row_indices: readonly number[] = [...Array(dim).keys()].filter(e => e !== row);
+
+ col_indices.forEach((c, i) => {
+ row_indices.forEach((r, j) => {
+ result[i][j] = m[c][r];
+ });
+ });
+ return result;
+ }
+
+ /** Calculate an acceptance interval for determinant(m), where m is a 2x2 matrix */
+ private determinant2x2Interval(m: Array2D<number>): FPInterval {
+ assert(
+ m.length === m[0].length && m.length === 2,
+ `determinant2x2Interval called on non-2x2 matrix`
+ );
+ return this.subtractionInterval(
+ this.multiplicationInterval(m[0][0], m[1][1]),
+ this.multiplicationInterval(m[0][1], m[1][0])
+ );
+ }
+
+ /** Calculate an acceptance interval for determinant(m), where m is a 3x3 matrix */
+ private determinant3x3Interval(m: Array2D<number>): FPInterval {
+ assert(
+ m.length === m[0].length && m.length === 3,
+ `determinant3x3Interval called on non-3x3 matrix`
+ );
+
+ // M is a 3x3 matrix
+ // det(M) is A + B + C, where A, B, C are three elements in a row/column times
+ // their own co-factor.
+ // (The co-factor is the determinant of the minor of that position with the
+ // appropriate +/-)
+ // For simplicity sake A, B, C are calculated as the elements of the first
+ // column
+ const A = this.multiplicationInterval(
+ m[0][0],
+ this.determinant2x2Interval(this.minorNxN(m, 0, 0))
+ );
+ const B = this.multiplicationInterval(
+ -m[0][1],
+ this.determinant2x2Interval(this.minorNxN(m, 0, 1))
+ );
+ const C = this.multiplicationInterval(
+ m[0][2],
+ this.determinant2x2Interval(this.minorNxN(m, 0, 2))
+ );
+
+ // Need to calculate permutations, since for fp addition is not associative,
+ // so A + B + C is not guaranteed to equal B + C + A, etc.
+ const permutations: ROArrayArray<FPInterval> = calculatePermutations([A, B, C]);
+ return this.spanIntervals(
+ ...permutations.map(p =>
+ p.reduce((prev: FPInterval, cur: FPInterval) => this.additionInterval(prev, cur))
+ )
+ );
+ }
+
+ /** Calculate an acceptance interval for determinant(m), where m is a 4x4 matrix */
+ private determinant4x4Interval(m: Array2D<number>): FPInterval {
+ assert(
+ m.length === m[0].length && m.length === 4,
+ `determinant3x3Interval called on non-4x4 matrix`
+ );
+
+ // M is a 4x4 matrix
+ // det(M) is A + B + C + D, where A, B, C, D are four elements in a row/column
+ // times their own co-factor.
+ // (The co-factor is the determinant of the minor of that position with the
+ // appropriate +/-)
+ // For simplicity sake A, B, C, D are calculated as the elements of the
+ // first column
+ const A = this.multiplicationInterval(
+ m[0][0],
+ this.determinant3x3Interval(this.minorNxN(m, 0, 0))
+ );
+ const B = this.multiplicationInterval(
+ -m[0][1],
+ this.determinant3x3Interval(this.minorNxN(m, 0, 1))
+ );
+ const C = this.multiplicationInterval(
+ m[0][2],
+ this.determinant3x3Interval(this.minorNxN(m, 0, 2))
+ );
+ const D = this.multiplicationInterval(
+ -m[0][3],
+ this.determinant3x3Interval(this.minorNxN(m, 0, 3))
+ );
+
+ // Need to calculate permutations, since for fp addition is not associative
+ // so A + B + C + D is not guaranteed to equal B + C + A + D, etc.
+ const permutations: ROArrayArray<FPInterval> = calculatePermutations([A, B, C, D]);
+ return this.spanIntervals(
+ ...permutations.map(p =>
+ p.reduce((prev: FPInterval, cur: FPInterval) => this.additionInterval(prev, cur))
+ )
+ );
+ }
+
+ /**
+ * This code calculates 3x3 and 4x4 determinants using the textbook co-factor
+ * method, using the first column for the co-factor selection.
+ *
+ * For matrices composed of integer elements, e, with |e|^4 < 2**21, this
+ * should be fine.
+ *
+ * For e, where e is subnormal or 4*(e^4) might not be precisely expressible as
+ * a f32 values, this approach breaks down, because the rule of all co-factor
+ * definitions of determinant being equal doesn't hold in these cases.
+ *
+ * The general solution for this is to calculate all the permutations of the
+ * operations in the worked out formula for determinant.
+ * For 3x3 this is tractable, but for 4x4 this works out to ~23! permutations
+ * that need to be calculated.
+ * Thus, CTS testing and the spec definition of accuracy is restricted to the
+ * space that the simple implementation is valid.
+ */
+ protected determinantIntervalImpl(x: Array2D<number>): FPInterval {
+ const dim = x.length;
+ assert(
+ x[0].length === dim && (dim === 2 || dim === 3 || dim === 4),
+ `determinantInterval only defined for 2x2, 3x3 and 4x4 matrices`
+ );
+ switch (dim) {
+ case 2:
+ return this.determinant2x2Interval(x);
+ case 3:
+ return this.determinant3x3Interval(x);
+ case 4:
+ return this.determinant4x4Interval(x);
+ }
+ unreachable(
+ "determinantInterval called on x, where which has an unexpected dimension of '${dim}'"
+ );
+ }
+
+ /** Calculate an acceptance interval for determinant(x) */
+ public abstract readonly determinantInterval: (x: Array2D<number>) => FPInterval;
+
+ private readonly DistanceIntervalScalarOp: ScalarPairToIntervalOp = {
+ impl: (x: number, y: number): FPInterval => {
+ return this.lengthInterval(this.subtractionInterval(x, y));
+ },
+ };
+
+ private readonly DistanceIntervalVectorOp: VectorPairToIntervalOp = {
+ impl: (x: readonly number[], y: readonly number[]): FPInterval => {
+ return this.lengthInterval(
+ this.runScalarPairToIntervalOpVectorComponentWise(
+ this.toVector(x),
+ this.toVector(y),
+ this.SubtractionIntervalOp
+ )
+ );
+ },
+ };
+
+ protected distanceIntervalImpl(
+ x: number | readonly number[],
+ y: number | readonly number[]
+ ): FPInterval {
+ if (x instanceof Array && y instanceof Array) {
+ assert(
+ x.length === y.length,
+ `distanceInterval requires both params to have the same number of elements`
+ );
+ return this.runVectorPairToIntervalOp(
+ this.toVector(x),
+ this.toVector(y),
+ this.DistanceIntervalVectorOp
+ );
+ } else if (!(x instanceof Array) && !(y instanceof Array)) {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.DistanceIntervalScalarOp
+ );
+ }
+ unreachable(
+ `distanceInterval requires both params to both the same type, either scalars or vectors`
+ );
+ }
+
+ /** Calculate an acceptance interval of distance(x, y) */
+ public abstract readonly distanceInterval: (
+ x: number | readonly number[],
+ y: number | readonly number[]
+ ) => FPInterval;
+
+ // This op is implemented differently for f32 and f16.
+ private DivisionIntervalOpBuilder(): ScalarPairToIntervalOp {
+ const constants = this.constants();
+ const domain_x = [this.toInterval([constants.negative.min, constants.positive.max])];
+ const domain_y =
+ this.kind === 'f32' || this.kind === 'abstract'
+ ? [this.toInterval([-(2 ** 126), -(2 ** -126)]), this.toInterval([2 ** -126, 2 ** 126])]
+ : [this.toInterval([-(2 ** 14), -(2 ** -14)]), this.toInterval([2 ** -14, 2 ** 14])];
+ return {
+ impl: this.limitScalarPairToIntervalDomain(
+ {
+ x: domain_x,
+ y: domain_y,
+ },
+ (x: number, y: number): FPInterval => {
+ if (y === 0) {
+ return constants.unboundedInterval;
+ }
+ return this.ulpInterval(x / y, 2.5);
+ }
+ ),
+ extrema: (x: FPInterval, y: FPInterval): [FPInterval, FPInterval] => {
+ // division has a discontinuity at y = 0.
+ if (y.contains(0)) {
+ y = this.toInterval(0);
+ }
+ return [x, y];
+ },
+ };
+ }
+
+ protected divisionIntervalImpl(x: number | FPInterval, y: number | FPInterval): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.DivisionIntervalOpBuilder()
+ );
+ }
+
+ /** Calculate an acceptance interval of x / y */
+ public abstract readonly divisionInterval: (
+ x: number | FPInterval,
+ y: number | FPInterval
+ ) => FPInterval;
+
+ private readonly DotIntervalOp: VectorPairToIntervalOp = {
+ impl: (x: readonly number[], y: readonly number[]): FPInterval => {
+ // dot(x, y) = sum of x[i] * y[i]
+ const multiplications = this.runScalarPairToIntervalOpVectorComponentWise(
+ this.toVector(x),
+ this.toVector(y),
+ this.MultiplicationIntervalOp
+ );
+
+ // vec2 doesn't require permutations, since a + b = b + a for floats
+ if (multiplications.length === 2) {
+ return this.additionInterval(multiplications[0], multiplications[1]);
+ }
+
+ // The spec does not state the ordering of summation, so all the
+ // permutations are calculated and their results spanned, since addition
+ // of more than two floats is not transitive, i.e. a + b + c is not
+ // guaranteed to equal b + a + c
+ const permutations: ROArrayArray<FPInterval> = calculatePermutations(multiplications);
+ return this.spanIntervals(
+ ...permutations.map(p => p.reduce((prev, cur) => this.additionInterval(prev, cur)))
+ );
+ },
+ };
+
+ protected dotIntervalImpl(
+ x: readonly number[] | readonly FPInterval[],
+ y: readonly number[] | readonly FPInterval[]
+ ): FPInterval {
+ assert(x.length === y.length, `dot not defined for vectors with different lengths`);
+ return this.runVectorPairToIntervalOp(this.toVector(x), this.toVector(y), this.DotIntervalOp);
+ }
+
+ /** Calculated the acceptance interval for dot(x, y) */
+ public abstract readonly dotInterval: (
+ x: readonly number[] | readonly FPInterval[],
+ y: readonly number[] | readonly FPInterval[]
+ ) => FPInterval;
+
+ private readonly ExpIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ const ulp_error = this.kind === 'f32' ? 3 + 2 * Math.abs(n) : 1 + 2 * Math.abs(n);
+ return this.ulpInterval(Math.exp(n), ulp_error);
+ },
+ };
+
+ protected expIntervalImpl(x: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(x), this.ExpIntervalOp);
+ }
+
+ /** Calculate an acceptance interval for exp(x) */
+ public abstract readonly expInterval: (x: number | FPInterval) => FPInterval;
+
+ private readonly Exp2IntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ const ulp_error = this.kind === 'f32' ? 3 + 2 * Math.abs(n) : 1 + 2 * Math.abs(n);
+ return this.ulpInterval(Math.pow(2, n), ulp_error);
+ },
+ };
+
+ protected exp2IntervalImpl(x: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(x), this.Exp2IntervalOp);
+ }
+
+ /** Calculate an acceptance interval for exp2(x) */
+ public abstract readonly exp2Interval: (x: number | FPInterval) => FPInterval;
+
+ /**
+ * faceForward(x, y, z) = select(-x, x, dot(z, y) < 0.0)
+ *
+ * This builtin selects from two discrete results (delta rounding/flushing),
+ * so the majority of the framework code is not appropriate, since the
+ * framework attempts to span results.
+ *
+ * Thus, a bespoke implementation is used instead of
+ * defining an Op and running that through the framework.
+ */
+ protected faceForwardIntervalsImpl(
+ x: readonly number[],
+ y: readonly number[],
+ z: readonly number[]
+ ): (FPVector | undefined)[] {
+ const x_vec = this.toVector(x);
+ // Running vector through this.runScalarToIntervalOpComponentWise to make
+ // sure that flushing/rounding is handled, since toVector does not perform
+ // those operations.
+ const positive_x = this.runScalarToIntervalOpComponentWise(x_vec, {
+ impl: (i: number): FPInterval => {
+ return this.toInterval(i);
+ },
+ });
+ const negative_x = this.runScalarToIntervalOpComponentWise(x_vec, this.NegationIntervalOp);
+
+ const dot_interval = this.dotInterval(z, y);
+
+ const results: (FPVector | undefined)[] = [];
+
+ if (!dot_interval.isFinite()) {
+ // dot calculation went out of bounds
+ // Inserting undefined in the result, so that the test running framework
+ // is aware of this potential OOB.
+ // For const-eval tests, it means that the test case should be skipped,
+ // since the shader will fail to compile.
+ // For non-const-eval the undefined should be stripped out of the possible
+ // results.
+
+ results.push(undefined);
+ }
+
+ // Because the result of dot can be an interval, it might span across 0, thus
+ // it is possible that both -x and x are valid responses.
+ if (dot_interval.begin < 0 || dot_interval.end < 0) {
+ results.push(positive_x);
+ }
+
+ if (dot_interval.begin >= 0 || dot_interval.end >= 0) {
+ results.push(negative_x);
+ }
+
+ assert(
+ results.length > 0 || results.every(r => r === undefined),
+ `faceForwardInterval selected neither positive x or negative x for the result, this shouldn't be possible`
+ );
+ return results;
+ }
+
+ /** Calculate the acceptance intervals for faceForward(x, y, z) */
+ public abstract readonly faceForwardIntervals: (
+ x: readonly number[],
+ y: readonly number[],
+ z: readonly number[]
+ ) => (FPVector | undefined)[];
+
+ private readonly FloorIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.correctlyRoundedInterval(Math.floor(n));
+ },
+ };
+
+ protected floorIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.FloorIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of floor(x) */
+ public abstract readonly floorInterval: (n: number) => FPInterval;
+
+ private readonly FmaIntervalOp: ScalarTripleToIntervalOp = {
+ impl: (x: number, y: number, z: number): FPInterval => {
+ return this.additionInterval(this.multiplicationInterval(x, y), z);
+ },
+ };
+
+ protected fmaIntervalImpl(x: number, y: number, z: number): FPInterval {
+ return this.runScalarTripleToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.toInterval(z),
+ this.FmaIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval for fma(x, y, z) */
+ public abstract readonly fmaInterval: (x: number, y: number, z: number) => FPInterval;
+
+ private readonly FractIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ // fract(x) = x - floor(x) is defined in the spec.
+ // For people coming from a non-graphics background this will cause some
+ // unintuitive results. For example,
+ // fract(-1.1) is not 0.1 or -0.1, but instead 0.9.
+ // This is how other shading languages operate and allows for a desirable
+ // wrap around in graphics programming.
+ const result = this.subtractionInterval(n, this.floorInterval(n));
+ assert(
+ // negative.subnormal.min instead of 0, because FTZ can occur
+ // selectively during the calculation
+ this.toInterval([this.constants().negative.subnormal.min, 1.0]).contains(result),
+ `fract(${n}) interval [${result}] unexpectedly extends beyond [~0.0, 1.0]`
+ );
+ if (result.contains(1)) {
+ // Very small negative numbers can lead to catastrophic cancellation,
+ // thus calculating a fract of 1.0, which is technically not a
+ // fractional part, so some implementations clamp the result to next
+ // nearest number.
+ return this.spanIntervals(result, this.toInterval(this.constants().positive.less_than_one));
+ }
+ return result;
+ },
+ };
+
+ protected fractIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.FractIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of fract(x) */
+ public abstract readonly fractInterval: (n: number) => FPInterval;
+
+ private readonly InverseSqrtIntervalOp: ScalarToIntervalOp = {
+ impl: this.limitScalarToIntervalDomain(
+ this.constants().greaterThanZeroInterval,
+ (n: number): FPInterval => {
+ return this.ulpInterval(1 / Math.sqrt(n), 2);
+ }
+ ),
+ };
+
+ protected inverseSqrtIntervalImpl(n: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.InverseSqrtIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of inverseSqrt(x) */
+ public abstract readonly inverseSqrtInterval: (n: number | FPInterval) => FPInterval;
+
+ private readonly LdexpIntervalOp: ScalarPairToIntervalOp = {
+ impl: (e1: number, e2: number) => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ assert(Number.isInteger(e2), 'the second param of ldexp must be an integer');
+ const bias = this.kind === 'f32' ? 127 : 15;
+ // Spec explicitly calls indeterminate value if e2 > bias + 1
+ if (e2 > bias + 1) {
+ return this.constants().unboundedInterval;
+ }
+ // The spec says the result of ldexp(e1, e2) = e1 * 2 ^ e2, and the accuracy is correctly
+ // rounded to the true value, so the inheritance framework does not need to be invoked to
+ // determine bounds.
+ // Instead, the value at a higher precision is calculated and passed to
+ // correctlyRoundedInterval.
+ const result = e1 * 2 ** e2;
+ if (!Number.isFinite(result)) {
+ // Overflowed TS's number type, so definitely out of bounds for f32/f16
+ return this.constants().unboundedInterval;
+ }
+ // The result may be zero if e2 + bias <= 0, but we can't simply span the interval to 0.0.
+ // For example, for f32 input e1 = 2**120 and e2 = -130, e2 + bias = -3 <= 0, but
+ // e1 * 2 ** e2 = 2**-10, so the valid result is 2**-10 or 0.0, instead of [0.0, 2**-10].
+ // Always return the correctly-rounded interval, and special examination should be taken when
+ // using the result.
+ return this.correctlyRoundedInterval(result);
+ },
+ };
+
+ protected ldexpIntervalImpl(e1: number, e2: number): FPInterval {
+ // Only round and flush e1, as e2 is of integer type (i32 or abstract integer) and should be
+ // precise.
+ return this.roundAndFlushScalarToInterval(e1, {
+ impl: (e1: number) => this.LdexpIntervalOp.impl(e1, e2),
+ });
+ }
+
+ /**
+ * Calculate an acceptance interval of ldexp(e1, e2), where e2 is integer
+ *
+ * Spec indicate that the result may be zero if e2 + bias <= 0, no matter how large
+ * was e1 * 2 ** e2, i.e. the actual valid result is correctlyRounded(e1 * 2 ** e2) or 0.0, if
+ * e2 + bias <= 0. Such discontinious flush-to-zero behavior is hard to be expressed using
+ * FPInterval, therefore in the situation of e2 + bias <= 0 the returned interval would be just
+ * correctlyRounded(e1 * 2 ** e2), and special examination should be taken when using the result.
+ *
+ */
+ public abstract readonly ldexpInterval: (e1: number, e2: number) => FPInterval;
+
+ private readonly LengthIntervalScalarOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.sqrtInterval(this.multiplicationInterval(n, n));
+ },
+ };
+
+ private readonly LengthIntervalVectorOp: VectorToIntervalOp = {
+ impl: (n: readonly number[]): FPInterval => {
+ return this.sqrtInterval(this.dotInterval(n, n));
+ },
+ };
+
+ protected lengthIntervalImpl(n: number | FPInterval | readonly number[] | FPVector): FPInterval {
+ if (n instanceof Array) {
+ return this.runVectorToIntervalOp(this.toVector(n), this.LengthIntervalVectorOp);
+ } else {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.LengthIntervalScalarOp);
+ }
+ }
+
+ /** Calculate an acceptance interval of length(x) */
+ public abstract readonly lengthInterval: (
+ n: number | FPInterval | readonly number[] | FPVector
+ ) => FPInterval;
+
+ private readonly LogIntervalOp: ScalarToIntervalOp = {
+ impl: this.limitScalarToIntervalDomain(
+ this.constants().greaterThanZeroInterval,
+ (n: number): FPInterval => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ const abs_error = this.kind === 'f32' ? 2 ** -21 : 2 ** -7;
+ if (n >= 0.5 && n <= 2.0) {
+ return this.absoluteErrorInterval(Math.log(n), abs_error);
+ }
+ return this.ulpInterval(Math.log(n), 3);
+ }
+ ),
+ };
+
+ protected logIntervalImpl(x: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(x), this.LogIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of log(x) */
+ public abstract readonly logInterval: (x: number | FPInterval) => FPInterval;
+
+ private readonly Log2IntervalOp: ScalarToIntervalOp = {
+ impl: this.limitScalarToIntervalDomain(
+ this.constants().greaterThanZeroInterval,
+ (n: number): FPInterval => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ const abs_error = this.kind === 'f32' ? 2 ** -21 : 2 ** -7;
+ if (n >= 0.5 && n <= 2.0) {
+ return this.absoluteErrorInterval(Math.log2(n), abs_error);
+ }
+ return this.ulpInterval(Math.log2(n), 3);
+ }
+ ),
+ };
+
+ protected log2IntervalImpl(x: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(x), this.Log2IntervalOp);
+ }
+
+ /** Calculate an acceptance interval of log2(x) */
+ public abstract readonly log2Interval: (x: number | FPInterval) => FPInterval;
+
+ private readonly MaxIntervalOp: ScalarPairToIntervalOp = {
+ impl: (x: number, y: number): FPInterval => {
+ // If both of the inputs are subnormal, then either of the inputs can be returned
+ if (this.isSubnormal(x) && this.isSubnormal(y)) {
+ return this.correctlyRoundedInterval(
+ this.spanIntervals(this.toInterval(x), this.toInterval(y))
+ );
+ }
+
+ return this.correctlyRoundedInterval(Math.max(x, y));
+ },
+ };
+
+ protected maxIntervalImpl(x: number | FPInterval, y: number | FPInterval): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.MaxIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of max(x, y) */
+ public abstract readonly maxInterval: (
+ x: number | FPInterval,
+ y: number | FPInterval
+ ) => FPInterval;
+
+ private readonly MinIntervalOp: ScalarPairToIntervalOp = {
+ impl: (x: number, y: number): FPInterval => {
+ // If both of the inputs are subnormal, then either of the inputs can be returned
+ if (this.isSubnormal(x) && this.isSubnormal(y)) {
+ return this.correctlyRoundedInterval(
+ this.spanIntervals(this.toInterval(x), this.toInterval(y))
+ );
+ }
+
+ return this.correctlyRoundedInterval(Math.min(x, y));
+ },
+ };
+
+ protected minIntervalImpl(x: number | FPInterval, y: number | FPInterval): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.MinIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of min(x, y) */
+ public abstract readonly minInterval: (
+ x: number | FPInterval,
+ y: number | FPInterval
+ ) => FPInterval;
+
+ private readonly MixImpreciseIntervalOp: ScalarTripleToIntervalOp = {
+ impl: (x: number, y: number, z: number): FPInterval => {
+ // x + (y - x) * z =
+ // x + t, where t = (y - x) * z
+ const t = this.multiplicationInterval(this.subtractionInterval(y, x), z);
+ return this.additionInterval(x, t);
+ },
+ };
+
+ protected mixImpreciseIntervalImpl(x: number, y: number, z: number): FPInterval {
+ return this.runScalarTripleToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.toInterval(z),
+ this.MixImpreciseIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of mix(x, y, z) using x + (y - x) * z */
+ public abstract readonly mixImpreciseInterval: (x: number, y: number, z: number) => FPInterval;
+
+ private readonly MixPreciseIntervalOp: ScalarTripleToIntervalOp = {
+ impl: (x: number, y: number, z: number): FPInterval => {
+ // x * (1.0 - z) + y * z =
+ // t + s, where t = x * (1.0 - z), s = y * z
+ const t = this.multiplicationInterval(x, this.subtractionInterval(1.0, z));
+ const s = this.multiplicationInterval(y, z);
+ return this.additionInterval(t, s);
+ },
+ };
+
+ protected mixPreciseIntervalImpl(x: number, y: number, z: number): FPInterval {
+ return this.runScalarTripleToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.toInterval(z),
+ this.MixPreciseIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of mix(x, y, z) using x * (1.0 - z) + y * z */
+ public abstract readonly mixPreciseInterval: (x: number, y: number, z: number) => FPInterval;
+
+ /** All acceptance interval functions for mix(x, y, z) */
+ public abstract readonly mixIntervals: ScalarTripleToInterval[];
+
+ protected modfIntervalImpl(n: number): { fract: FPInterval; whole: FPInterval } {
+ const fract = this.correctlyRoundedInterval(n % 1.0);
+ const whole = this.correctlyRoundedInterval(n - (n % 1.0));
+ return { fract, whole };
+ }
+
+ /** Calculate an acceptance interval of modf(x) */
+ public abstract readonly modfInterval: (n: number) => { fract: FPInterval; whole: FPInterval };
+
+ private readonly MultiplicationInnerOp = {
+ impl: (x: number, y: number): FPInterval => {
+ return this.correctlyRoundedInterval(x * y);
+ },
+ };
+
+ private readonly MultiplicationIntervalOp: ScalarPairToIntervalOp = {
+ impl: (x: number, y: number): FPInterval => {
+ return this.roundAndFlushScalarPairToInterval(x, y, this.MultiplicationInnerOp);
+ },
+ };
+
+ protected multiplicationIntervalImpl(x: number | FPInterval, y: number | FPInterval): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.MultiplicationIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of x * y */
+ public abstract readonly multiplicationInterval: (
+ x: number | FPInterval,
+ y: number | FPInterval
+ ) => FPInterval;
+
+ /**
+ * @returns the vector result of multiplying the given vector by the given
+ * scalar
+ */
+ private multiplyVectorByScalar(v: readonly number[], c: number | FPInterval): FPVector {
+ return this.toVector(v.map(x => this.multiplicationInterval(x, c)));
+ }
+
+ protected multiplicationMatrixScalarIntervalImpl(mat: Array2D<number>, scalar: number): FPMatrix {
+ const cols = mat.length;
+ const rows = mat[0].length;
+ return this.toMatrix(
+ unflatten2DArray(
+ flatten2DArray(mat).map(e => this.multiplicationInterval(e, scalar)),
+ cols,
+ rows
+ )
+ );
+ }
+
+ /** Calculate an acceptance interval of x * y, when x is a matrix and y is a scalar */
+ public abstract readonly multiplicationMatrixScalarInterval: (
+ mat: Array2D<number>,
+ scalar: number
+ ) => FPMatrix;
+
+ protected multiplicationScalarMatrixIntervalImpl(scalar: number, mat: Array2D<number>): FPMatrix {
+ return this.multiplicationMatrixScalarIntervalImpl(mat, scalar);
+ }
+
+ /** Calculate an acceptance interval of x * y, when x is a scalar and y is a matrix */
+ public abstract readonly multiplicationScalarMatrixInterval: (
+ scalar: number,
+ mat: Array2D<number>
+ ) => FPMatrix;
+
+ protected multiplicationMatrixMatrixIntervalImpl(
+ mat_x: Array2D<number>,
+ mat_y: Array2D<number>
+ ): FPMatrix {
+ const x_cols = mat_x.length;
+ const x_rows = mat_x[0].length;
+ const y_cols = mat_y.length;
+ const y_rows = mat_y[0].length;
+ assert(x_cols === y_rows, `'mat${x_cols}x${x_rows} * mat${y_cols}x${y_rows}' is not defined`);
+
+ const x_transposed = this.transposeInterval(mat_x);
+
+ const result: FPInterval[][] = [...Array(y_cols)].map(_ => [...Array(x_rows)]);
+ mat_y.forEach((y, i) => {
+ x_transposed.forEach((x, j) => {
+ result[i][j] = this.dotInterval(x, y);
+ });
+ });
+
+ return result as ROArrayArray<FPInterval> as FPMatrix;
+ }
+
+ /** Calculate an acceptance interval of x * y, when x is a matrix and y is a matrix */
+ public abstract readonly multiplicationMatrixMatrixInterval: (
+ mat_x: Array2D<number>,
+ mat_y: Array2D<number>
+ ) => FPMatrix;
+
+ protected multiplicationMatrixVectorIntervalImpl(
+ x: Array2D<number>,
+ y: readonly number[]
+ ): FPVector {
+ const cols = x.length;
+ const rows = x[0].length;
+ assert(y.length === cols, `'mat${cols}x${rows} * vec${y.length}' is not defined`);
+
+ return this.transposeInterval(x).map(e => this.dotInterval(e, y)) as FPVector;
+ }
+
+ /** Calculate an acceptance interval of x * y, when x is a matrix and y is a vector */
+ public abstract readonly multiplicationMatrixVectorInterval: (
+ x: Array2D<number>,
+ y: readonly number[]
+ ) => FPVector;
+
+ protected multiplicationVectorMatrixIntervalImpl(
+ x: readonly number[],
+ y: Array2D<number>
+ ): FPVector {
+ const cols = y.length;
+ const rows = y[0].length;
+ assert(x.length === rows, `'vec${x.length} * mat${cols}x${rows}' is not defined`);
+
+ return y.map(e => this.dotInterval(x, e)) as FPVector;
+ }
+
+ /** Calculate an acceptance interval of x * y, when x is a vector and y is a matrix */
+ public abstract readonly multiplicationVectorMatrixInterval: (
+ x: readonly number[],
+ y: Array2D<number>
+ ) => FPVector;
+
+ private readonly NegationIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.correctlyRoundedInterval(-n);
+ },
+ };
+
+ protected negationIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.NegationIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of -x */
+ public abstract readonly negationInterval: (n: number) => FPInterval;
+
+ private readonly NormalizeIntervalOp: VectorToVectorOp = {
+ impl: (n: readonly number[]): FPVector => {
+ const length = this.lengthInterval(n);
+ return this.toVector(n.map(e => this.divisionInterval(e, length)));
+ },
+ };
+
+ protected normalizeIntervalImpl(n: readonly number[]): FPVector {
+ return this.runVectorToVectorOp(this.toVector(n), this.NormalizeIntervalOp);
+ }
+
+ public abstract readonly normalizeInterval: (n: readonly number[]) => FPVector;
+
+ private readonly PowIntervalOp: ScalarPairToIntervalOp = {
+ // pow(x, y) has no explicit domain restrictions, but inherits the x <= 0
+ // domain restriction from log2(x). Invoking log2Interval(x) in impl will
+ // enforce this, so there is no need to wrap the impl call here.
+ impl: (x: number, y: number): FPInterval => {
+ return this.exp2Interval(this.multiplicationInterval(y, this.log2Interval(x)));
+ },
+ };
+
+ protected powIntervalImpl(x: number | FPInterval, y: number | FPInterval): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.PowIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of pow(x, y) */
+ public abstract readonly powInterval: (
+ x: number | FPInterval,
+ y: number | FPInterval
+ ) => FPInterval;
+
+ private readonly RadiansIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.multiplicationInterval(n, 0.017453292519943295474);
+ },
+ };
+
+ protected radiansIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.RadiansIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of radians(x) */
+ public abstract readonly radiansInterval: (n: number) => FPInterval;
+
+ private readonly ReflectIntervalOp: VectorPairToVectorOp = {
+ impl: (x: readonly number[], y: readonly number[]): FPVector => {
+ assert(
+ x.length === y.length,
+ `ReflectIntervalOp received x (${x}) and y (${y}) with different numbers of elements`
+ );
+
+ // reflect(x, y) = x - 2.0 * dot(x, y) * y
+ // = x - t * y, t = 2.0 * dot(x, y)
+ // x = incident vector
+ // y = normal of reflecting surface
+ const t = this.multiplicationInterval(2.0, this.dotInterval(x, y));
+ const rhs = this.multiplyVectorByScalar(y, t);
+ return this.runScalarPairToIntervalOpVectorComponentWise(
+ this.toVector(x),
+ rhs,
+ this.SubtractionIntervalOp
+ );
+ },
+ };
+
+ protected reflectIntervalImpl(x: readonly number[], y: readonly number[]): FPVector {
+ assert(
+ x.length === y.length,
+ `reflect is only defined for vectors with the same number of elements`
+ );
+ return this.runVectorPairToVectorOp(this.toVector(x), this.toVector(y), this.ReflectIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of reflect(x, y) */
+ public abstract readonly reflectInterval: (
+ x: readonly number[],
+ y: readonly number[]
+ ) => FPVector;
+
+ /**
+ * refract is a singular function in the sense that it is the only builtin that
+ * takes in (FPVector, FPVector, F32/F16) and returns FPVector and is basically
+ * defined in terms of other functions.
+ *
+ * Instead of implementing all the framework code to integrate it with its
+ * own operation type, etc, it instead has a bespoke implementation that is a
+ * composition of other builtin functions that use the framework.
+ */
+ protected refractIntervalImpl(i: readonly number[], s: readonly number[], r: number): FPVector {
+ assert(
+ i.length === s.length,
+ `refract is only defined for vectors with the same number of elements`
+ );
+
+ const r_squared = this.multiplicationInterval(r, r);
+ const dot = this.dotInterval(s, i);
+ const dot_squared = this.multiplicationInterval(dot, dot);
+ const one_minus_dot_squared = this.subtractionInterval(1, dot_squared);
+ const k = this.subtractionInterval(
+ 1.0,
+ this.multiplicationInterval(r_squared, one_minus_dot_squared)
+ );
+
+ if (!k.isFinite() || k.containsZeroOrSubnormals()) {
+ // There is a discontinuity at k == 0, due to sqrt(k) being calculated, so exiting early
+ return this.constants().unboundedVector[this.toVector(i).length];
+ }
+
+ if (k.end < 0.0) {
+ // if k is negative, then the zero vector is the valid response
+ return this.constants().zeroVector[this.toVector(i).length];
+ }
+
+ const dot_times_r = this.multiplicationInterval(dot, r);
+ const k_sqrt = this.sqrtInterval(k);
+ const t = this.additionInterval(dot_times_r, k_sqrt); // t = r * dot(i, s) + sqrt(k)
+
+ return this.runScalarPairToIntervalOpVectorComponentWise(
+ this.multiplyVectorByScalar(i, r),
+ this.multiplyVectorByScalar(s, t),
+ this.SubtractionIntervalOp
+ ); // (i * r) - (s * t)
+ }
+
+ /** Calculate acceptance interval vectors of reflect(i, s, r) */
+ public abstract readonly refractInterval: (
+ i: readonly number[],
+ s: readonly number[],
+ r: number
+ ) => FPVector;
+
+ private readonly RemainderIntervalOp: ScalarPairToIntervalOp = {
+ impl: (x: number, y: number): FPInterval => {
+ // x % y = x - y * trunc(x/y)
+ return this.subtractionInterval(
+ x,
+ this.multiplicationInterval(y, this.truncInterval(this.divisionInterval(x, y)))
+ );
+ },
+ };
+
+ /** Calculate an acceptance interval for x % y */
+ protected remainderIntervalImpl(x: number, y: number): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.RemainderIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval for x % y */
+ public abstract readonly remainderInterval: (x: number, y: number) => FPInterval;
+
+ private readonly RoundIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ const k = Math.floor(n);
+ const diff_before = n - k;
+ const diff_after = k + 1 - n;
+ if (diff_before < diff_after) {
+ return this.correctlyRoundedInterval(k);
+ } else if (diff_before > diff_after) {
+ return this.correctlyRoundedInterval(k + 1);
+ }
+
+ // n is in the middle of two integers.
+ // The tie breaking rule is 'k if k is even, k + 1 if k is odd'
+ if (k % 2 === 0) {
+ return this.correctlyRoundedInterval(k);
+ }
+ return this.correctlyRoundedInterval(k + 1);
+ },
+ };
+
+ protected roundIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.RoundIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of round(x) */
+ public abstract readonly roundInterval: (n: number) => FPInterval;
+
+ /**
+ * The definition of saturate does not specify which version of clamp to use.
+ * Using min-max here, since it has wider acceptance intervals, that include
+ * all of median's.
+ */
+ protected saturateIntervalImpl(n: number): FPInterval {
+ return this.runScalarTripleToIntervalOp(
+ this.toInterval(n),
+ this.toInterval(0.0),
+ this.toInterval(1.0),
+ this.ClampMinMaxIntervalOp
+ );
+ }
+
+ /*** Calculate an acceptance interval of saturate(n) as clamp(n, 0.0, 1.0) */
+ public abstract readonly saturateInterval: (n: number) => FPInterval;
+
+ private readonly SignIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ if (n > 0.0) {
+ return this.correctlyRoundedInterval(1.0);
+ }
+ if (n < 0.0) {
+ return this.correctlyRoundedInterval(-1.0);
+ }
+
+ return this.correctlyRoundedInterval(0.0);
+ },
+ };
+
+ protected signIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.SignIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of sign(x) */
+ public abstract readonly signInterval: (n: number) => FPInterval;
+
+ private readonly SinIntervalOp: ScalarToIntervalOp = {
+ impl: this.limitScalarToIntervalDomain(
+ this.constants().negPiToPiInterval,
+ (n: number): FPInterval => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ const abs_error = this.kind === 'f32' ? 2 ** -11 : 2 ** -7;
+ return this.absoluteErrorInterval(Math.sin(n), abs_error);
+ }
+ ),
+ };
+
+ protected sinIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.SinIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of sin(x) */
+ public abstract readonly sinInterval: (n: number) => FPInterval;
+
+ private readonly SinhIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ // sinh(x) = (exp(x) - exp(-x)) * 0.5
+ const minus_n = this.negationInterval(n);
+ return this.multiplicationInterval(
+ this.subtractionInterval(this.expInterval(n), this.expInterval(minus_n)),
+ 0.5
+ );
+ },
+ };
+
+ protected sinhIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.SinhIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of sinh(x) */
+ public abstract readonly sinhInterval: (n: number) => FPInterval;
+
+ private readonly SmoothStepOp: ScalarTripleToIntervalOp = {
+ impl: (low: number, high: number, x: number): FPInterval => {
+ // For clamp(foo, 0.0, 1.0) the different implementations of clamp provide
+ // the same value, so arbitrarily picking the minmax version to use.
+ // t = clamp((x - low) / (high - low), 0.0, 1.0)
+ // prettier-ignore
+ const t = this.clampMedianInterval(
+ this.divisionInterval(
+ this.subtractionInterval(x, low),
+ this.subtractionInterval(high, low)),
+ 0.0,
+ 1.0);
+ // Inherited from t * t * (3.0 - 2.0 * t)
+ // prettier-ignore
+ return this.multiplicationInterval(
+ t,
+ this.multiplicationInterval(t,
+ this.subtractionInterval(3.0,
+ this.multiplicationInterval(2.0, t))));
+ },
+ };
+
+ protected smoothStepIntervalImpl(low: number, high: number, x: number): FPInterval {
+ return this.runScalarTripleToIntervalOp(
+ this.toInterval(low),
+ this.toInterval(high),
+ this.toInterval(x),
+ this.SmoothStepOp
+ );
+ }
+
+ /** Calculate an acceptance interval of smoothStep(low, high, x) */
+ public abstract readonly smoothStepInterval: (low: number, high: number, x: number) => FPInterval;
+
+ private readonly SqrtIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.divisionInterval(1.0, this.inverseSqrtInterval(n));
+ },
+ };
+
+ protected sqrtIntervalImpl(n: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.SqrtIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of sqrt(x) */
+ public abstract readonly sqrtInterval: (n: number | FPInterval) => FPInterval;
+
+ private readonly StepIntervalOp: ScalarPairToIntervalOp = {
+ impl: (edge: number, x: number): FPInterval => {
+ if (edge <= x) {
+ return this.correctlyRoundedInterval(1.0);
+ }
+ return this.correctlyRoundedInterval(0.0);
+ },
+ };
+
+ protected stepIntervalImpl(edge: number, x: number): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(edge),
+ this.toInterval(x),
+ this.StepIntervalOp
+ );
+ }
+
+ /**
+ * Calculate an acceptance 'interval' for step(edge, x)
+ *
+ * step only returns two possible values, so its interval requires special
+ * interpretation in CTS tests.
+ * This interval will be one of four values: [0, 0], [0, 1], [1, 1] & [-∞, +∞].
+ * [0, 0] and [1, 1] indicate that the correct answer in point they encapsulate.
+ * [0, 1] should not be treated as a span, i.e. 0.1 is acceptable, but instead
+ * indicate either 0.0 or 1.0 are acceptable answers.
+ * [-∞, +∞] is treated as unbounded interval, since an unbounded or
+ * infinite value was passed in.
+ */
+ public abstract readonly stepInterval: (edge: number, x: number) => FPInterval;
+
+ private readonly SubtractionIntervalOp: ScalarPairToIntervalOp = {
+ impl: (x: number, y: number): FPInterval => {
+ return this.correctlyRoundedInterval(x - y);
+ },
+ };
+
+ protected subtractionIntervalImpl(x: number | FPInterval, y: number | FPInterval): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.SubtractionIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of x - y */
+ public abstract readonly subtractionInterval: (
+ x: number | FPInterval,
+ y: number | FPInterval
+ ) => FPInterval;
+
+ protected subtractionMatrixMatrixIntervalImpl(x: Array2D<number>, y: Array2D<number>): FPMatrix {
+ return this.runScalarPairToIntervalOpMatrixComponentWise(
+ this.toMatrix(x),
+ this.toMatrix(y),
+ this.SubtractionIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of x - y, when x and y are matrices */
+ public abstract readonly subtractionMatrixMatrixInterval: (
+ x: Array2D<number>,
+ y: Array2D<number>
+ ) => FPMatrix;
+
+ private readonly TanIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.divisionInterval(this.sinInterval(n), this.cosInterval(n));
+ },
+ };
+
+ protected tanIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.TanIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of tan(x) */
+ public abstract readonly tanInterval: (n: number) => FPInterval;
+
+ private readonly TanhIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.divisionInterval(this.sinhInterval(n), this.coshInterval(n));
+ },
+ };
+
+ protected tanhIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.TanhIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of tanh(x) */
+ public abstract readonly tanhInterval: (n: number) => FPInterval;
+
+ private readonly TransposeIntervalOp: MatrixToMatrixOp = {
+ impl: (m: Array2D<number>): FPMatrix => {
+ const num_cols = m.length;
+ const num_rows = m[0].length;
+ const result: FPInterval[][] = [...Array(num_rows)].map(_ => [...Array(num_cols)]);
+
+ for (let i = 0; i < num_cols; i++) {
+ for (let j = 0; j < num_rows; j++) {
+ result[j][i] = this.correctlyRoundedInterval(m[i][j]);
+ }
+ }
+ return this.toMatrix(result);
+ },
+ };
+
+ protected transposeIntervalImpl(m: Array2D<number>): FPMatrix {
+ return this.runMatrixToMatrixOp(this.toMatrix(m), this.TransposeIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of transpose(m) */
+ public abstract readonly transposeInterval: (m: Array2D<number>) => FPMatrix;
+
+ private readonly TruncIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.correctlyRoundedInterval(Math.trunc(n));
+ },
+ };
+
+ protected truncIntervalImpl(n: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.TruncIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of trunc(x) */
+ public abstract readonly truncInterval: (n: number | FPInterval) => FPInterval;
+}
+
+// Pre-defined values that get used multiple times in _constants' initializers. Cannot use FPTraits members, since this
+// executes before they are defined.
+const kF32UnboundedInterval = new FPInterval(
+ 'f32',
+ Number.NEGATIVE_INFINITY,
+ Number.POSITIVE_INFINITY
+);
+const kF32ZeroInterval = new FPInterval('f32', 0);
+
+class F32Traits extends FPTraits {
+ private static _constants: FPConstants = {
+ positive: {
+ min: kValue.f32.positive.min,
+ max: kValue.f32.positive.max,
+ infinity: kValue.f32.positive.infinity,
+ nearest_max: kValue.f32.positive.nearest_max,
+ less_than_one: kValue.f32.positive.less_than_one,
+ subnormal: {
+ min: kValue.f32.positive.subnormal.min,
+ max: kValue.f32.positive.subnormal.max,
+ },
+ pi: {
+ whole: kValue.f32.positive.pi.whole,
+ three_quarters: kValue.f32.positive.pi.three_quarters,
+ half: kValue.f32.positive.pi.half,
+ third: kValue.f32.positive.pi.third,
+ quarter: kValue.f32.positive.pi.quarter,
+ sixth: kValue.f32.positive.pi.sixth,
+ },
+ e: kValue.f32.positive.e,
+ },
+ negative: {
+ min: kValue.f32.negative.min,
+ max: kValue.f32.negative.max,
+ infinity: kValue.f32.negative.infinity,
+ nearest_min: kValue.f32.negative.nearest_min,
+ less_than_one: kValue.f32.negative.less_than_one,
+ subnormal: {
+ min: kValue.f32.negative.subnormal.min,
+ max: kValue.f32.negative.subnormal.max,
+ },
+ pi: {
+ whole: kValue.f32.negative.pi.whole,
+ three_quarters: kValue.f32.negative.pi.three_quarters,
+ half: kValue.f32.negative.pi.half,
+ third: kValue.f32.negative.pi.third,
+ quarter: kValue.f32.negative.pi.quarter,
+ sixth: kValue.f32.negative.pi.sixth,
+ },
+ },
+ unboundedInterval: kF32UnboundedInterval,
+ zeroInterval: kF32ZeroInterval,
+ // Have to use the constants.ts values here, because values defined in the
+ // initializer cannot be referenced in the initializer
+ negPiToPiInterval: new FPInterval(
+ 'f32',
+ kValue.f32.negative.pi.whole,
+ kValue.f32.positive.pi.whole
+ ),
+ greaterThanZeroInterval: new FPInterval(
+ 'f32',
+ kValue.f32.positive.subnormal.min,
+ kValue.f32.positive.max
+ ),
+ zeroVector: {
+ 2: [kF32ZeroInterval, kF32ZeroInterval],
+ 3: [kF32ZeroInterval, kF32ZeroInterval, kF32ZeroInterval],
+ 4: [kF32ZeroInterval, kF32ZeroInterval, kF32ZeroInterval, kF32ZeroInterval],
+ },
+ unboundedVector: {
+ 2: [kF32UnboundedInterval, kF32UnboundedInterval],
+ 3: [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ 4: [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ },
+ unboundedMatrix: {
+ 2: {
+ 2: [
+ [kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval],
+ ],
+ 3: [
+ [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ ],
+ 4: [
+ [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ ],
+ },
+ 3: {
+ 2: [
+ [kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval],
+ ],
+ 3: [
+ [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ ],
+ 4: [
+ [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ ],
+ },
+ 4: {
+ 2: [
+ [kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval],
+ ],
+ 3: [
+ [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ ],
+ 4: [
+ [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ ],
+ },
+ },
+ };
+
+ public constructor() {
+ super('f32');
+ }
+
+ public constants(): FPConstants {
+ return F32Traits._constants;
+ }
+
+ // Utilities - Overrides
+ public readonly quantize = quantizeToF32;
+ public readonly correctlyRounded = correctlyRoundedF32;
+ public readonly isFinite = isFiniteF32;
+ public readonly isSubnormal = isSubnormalNumberF32;
+ public readonly flushSubnormal = flushSubnormalNumberF32;
+ public readonly oneULP = oneULPF32;
+ public readonly scalarBuilder = f32;
+
+ // Framework - Fundamental Error Intervals - Overrides
+ public readonly absoluteErrorInterval = this.absoluteErrorIntervalImpl.bind(this);
+ public readonly correctlyRoundedInterval = this.correctlyRoundedIntervalImpl.bind(this);
+ public readonly correctlyRoundedMatrix = this.correctlyRoundedMatrixImpl.bind(this);
+ public readonly ulpInterval = this.ulpIntervalImpl.bind(this);
+
+ // Framework - API - Overrides
+ public readonly absInterval = this.absIntervalImpl.bind(this);
+ public readonly acosInterval = this.acosIntervalImpl.bind(this);
+ public readonly acoshAlternativeInterval = this.acoshAlternativeIntervalImpl.bind(this);
+ public readonly acoshPrimaryInterval = this.acoshPrimaryIntervalImpl.bind(this);
+ public readonly acoshIntervals = [this.acoshAlternativeInterval, this.acoshPrimaryInterval];
+ public readonly additionInterval = this.additionIntervalImpl.bind(this);
+ public readonly additionMatrixMatrixInterval = this.additionMatrixMatrixIntervalImpl.bind(this);
+ public readonly asinInterval = this.asinIntervalImpl.bind(this);
+ public readonly asinhInterval = this.asinhIntervalImpl.bind(this);
+ public readonly atanInterval = this.atanIntervalImpl.bind(this);
+ public readonly atan2Interval = this.atan2IntervalImpl.bind(this);
+ public readonly atanhInterval = this.atanhIntervalImpl.bind(this);
+ public readonly ceilInterval = this.ceilIntervalImpl.bind(this);
+ public readonly clampMedianInterval = this.clampMedianIntervalImpl.bind(this);
+ public readonly clampMinMaxInterval = this.clampMinMaxIntervalImpl.bind(this);
+ public readonly clampIntervals = [this.clampMedianInterval, this.clampMinMaxInterval];
+ public readonly cosInterval = this.cosIntervalImpl.bind(this);
+ public readonly coshInterval = this.coshIntervalImpl.bind(this);
+ public readonly crossInterval = this.crossIntervalImpl.bind(this);
+ public readonly degreesInterval = this.degreesIntervalImpl.bind(this);
+ public readonly determinantInterval = this.determinantIntervalImpl.bind(this);
+ public readonly distanceInterval = this.distanceIntervalImpl.bind(this);
+ public readonly divisionInterval = this.divisionIntervalImpl.bind(this);
+ public readonly dotInterval = this.dotIntervalImpl.bind(this);
+ public readonly expInterval = this.expIntervalImpl.bind(this);
+ public readonly exp2Interval = this.exp2IntervalImpl.bind(this);
+ public readonly faceForwardIntervals = this.faceForwardIntervalsImpl.bind(this);
+ public readonly floorInterval = this.floorIntervalImpl.bind(this);
+ public readonly fmaInterval = this.fmaIntervalImpl.bind(this);
+ public readonly fractInterval = this.fractIntervalImpl.bind(this);
+ public readonly inverseSqrtInterval = this.inverseSqrtIntervalImpl.bind(this);
+ public readonly ldexpInterval = this.ldexpIntervalImpl.bind(this);
+ public readonly lengthInterval = this.lengthIntervalImpl.bind(this);
+ public readonly logInterval = this.logIntervalImpl.bind(this);
+ public readonly log2Interval = this.log2IntervalImpl.bind(this);
+ public readonly maxInterval = this.maxIntervalImpl.bind(this);
+ public readonly minInterval = this.minIntervalImpl.bind(this);
+ public readonly mixImpreciseInterval = this.mixImpreciseIntervalImpl.bind(this);
+ public readonly mixPreciseInterval = this.mixPreciseIntervalImpl.bind(this);
+ public readonly mixIntervals = [this.mixImpreciseInterval, this.mixPreciseInterval];
+ public readonly modfInterval = this.modfIntervalImpl.bind(this);
+ public readonly multiplicationInterval = this.multiplicationIntervalImpl.bind(this);
+ public readonly multiplicationMatrixMatrixInterval =
+ this.multiplicationMatrixMatrixIntervalImpl.bind(this);
+ public readonly multiplicationMatrixScalarInterval =
+ this.multiplicationMatrixScalarIntervalImpl.bind(this);
+ public readonly multiplicationScalarMatrixInterval =
+ this.multiplicationScalarMatrixIntervalImpl.bind(this);
+ public readonly multiplicationMatrixVectorInterval =
+ this.multiplicationMatrixVectorIntervalImpl.bind(this);
+ public readonly multiplicationVectorMatrixInterval =
+ this.multiplicationVectorMatrixIntervalImpl.bind(this);
+ public readonly negationInterval = this.negationIntervalImpl.bind(this);
+ public readonly normalizeInterval = this.normalizeIntervalImpl.bind(this);
+ public readonly powInterval = this.powIntervalImpl.bind(this);
+ public readonly radiansInterval = this.radiansIntervalImpl.bind(this);
+ public readonly reflectInterval = this.reflectIntervalImpl.bind(this);
+ public readonly refractInterval = this.refractIntervalImpl.bind(this);
+ public readonly remainderInterval = this.remainderIntervalImpl.bind(this);
+ public readonly roundInterval = this.roundIntervalImpl.bind(this);
+ public readonly saturateInterval = this.saturateIntervalImpl.bind(this);
+ public readonly signInterval = this.signIntervalImpl.bind(this);
+ public readonly sinInterval = this.sinIntervalImpl.bind(this);
+ public readonly sinhInterval = this.sinhIntervalImpl.bind(this);
+ public readonly smoothStepInterval = this.smoothStepIntervalImpl.bind(this);
+ public readonly sqrtInterval = this.sqrtIntervalImpl.bind(this);
+ public readonly stepInterval = this.stepIntervalImpl.bind(this);
+ public readonly subtractionInterval = this.subtractionIntervalImpl.bind(this);
+ public readonly subtractionMatrixMatrixInterval =
+ this.subtractionMatrixMatrixIntervalImpl.bind(this);
+ public readonly tanInterval = this.tanIntervalImpl.bind(this);
+ public readonly tanhInterval = this.tanhIntervalImpl.bind(this);
+ public readonly transposeInterval = this.transposeIntervalImpl.bind(this);
+ public readonly truncInterval = this.truncIntervalImpl.bind(this);
+
+ // Framework - Cases
+
+ // U32 -> Interval is used for testing f32 specific unpack* functions
+ /**
+ * @returns a Case for the param and the interval generator provided.
+ * The Case will use an interval comparator for matching results.
+ * @param param the param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ private makeU32ToVectorCase(
+ param: number,
+ filter: IntervalFilter,
+ ...ops: ScalarToVector[]
+ ): Case | undefined {
+ param = Math.trunc(param);
+
+ const vectors = ops.map(o => o(param));
+ if (filter === 'finite' && vectors.some(v => !v.every(e => e.isFinite()))) {
+ return undefined;
+ }
+ return {
+ input: u32(param),
+ expected: anyOf(...vectors),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param params array of inputs to try
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ public generateU32ToIntervalCases(
+ params: readonly number[],
+ filter: IntervalFilter,
+ ...ops: ScalarToVector[]
+ ): Case[] {
+ return params.reduce((cases, e) => {
+ const c = this.makeU32ToVectorCase(e, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ // Framework - API
+
+ private readonly QuantizeToF16IntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ const rounded = correctlyRoundedF16(n);
+ const flushed = addFlushedIfNeededF16(rounded);
+ return this.spanIntervals(...flushed.map(f => this.toInterval(f)));
+ },
+ };
+
+ protected quantizeToF16IntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.QuantizeToF16IntervalOp);
+ }
+
+ /** Calculate an acceptance interval of quantizeToF16(x) */
+ public readonly quantizeToF16Interval = this.quantizeToF16IntervalImpl.bind(this);
+
+ /**
+ * Once-allocated ArrayBuffer/views to avoid overhead of allocation when
+ * converting between numeric formats
+ *
+ * unpackData* is shared between all the unpack*Interval functions, so to
+ * avoid re-entrancy problems, they should not call each other or themselves
+ * directly or indirectly.
+ */
+ private readonly unpackData = new ArrayBuffer(4);
+ private readonly unpackDataU32 = new Uint32Array(this.unpackData);
+ private readonly unpackDataU16 = new Uint16Array(this.unpackData);
+ private readonly unpackDataU8 = new Uint8Array(this.unpackData);
+ private readonly unpackDataI16 = new Int16Array(this.unpackData);
+ private readonly unpackDataI8 = new Int8Array(this.unpackData);
+ private readonly unpackDataF16 = new Float16Array(this.unpackData);
+
+ private unpack2x16floatIntervalImpl(n: number): FPVector {
+ assert(
+ n >= kValue.u32.min && n <= kValue.u32.max,
+ 'unpack2x16floatInterval only accepts values on the bounds of u32'
+ );
+ this.unpackDataU32[0] = n;
+ if (this.unpackDataF16.some(f => !isFiniteF16(f))) {
+ return [this.constants().unboundedInterval, this.constants().unboundedInterval];
+ }
+
+ const result: FPVector = [
+ this.quantizeToF16Interval(this.unpackDataF16[0]),
+ this.quantizeToF16Interval(this.unpackDataF16[1]),
+ ];
+
+ if (result.some(r => !r.isFinite())) {
+ return [this.constants().unboundedInterval, this.constants().unboundedInterval];
+ }
+ return result;
+ }
+
+ /** Calculate an acceptance interval vector for unpack2x16float(x) */
+ public readonly unpack2x16floatInterval = this.unpack2x16floatIntervalImpl.bind(this);
+
+ private unpack2x16snormIntervalImpl(n: number): FPVector {
+ assert(
+ n >= kValue.u32.min && n <= kValue.u32.max,
+ 'unpack2x16snormInterval only accepts values on the bounds of u32'
+ );
+ const op = (n: number): FPInterval => {
+ return this.ulpInterval(Math.max(n / 32767, -1), 3);
+ };
+
+ this.unpackDataU32[0] = n;
+ return [op(this.unpackDataI16[0]), op(this.unpackDataI16[1])];
+ }
+
+ /** Calculate an acceptance interval vector for unpack2x16snorm(x) */
+ public readonly unpack2x16snormInterval = this.unpack2x16snormIntervalImpl.bind(this);
+
+ private unpack2x16unormIntervalImpl(n: number): FPVector {
+ assert(
+ n >= kValue.u32.min && n <= kValue.u32.max,
+ 'unpack2x16unormInterval only accepts values on the bounds of u32'
+ );
+ const op = (n: number): FPInterval => {
+ return this.ulpInterval(n / 65535, 3);
+ };
+
+ this.unpackDataU32[0] = n;
+ return [op(this.unpackDataU16[0]), op(this.unpackDataU16[1])];
+ }
+
+ /** Calculate an acceptance interval vector for unpack2x16unorm(x) */
+ public readonly unpack2x16unormInterval = this.unpack2x16unormIntervalImpl.bind(this);
+
+ private unpack4x8snormIntervalImpl(n: number): FPVector {
+ assert(
+ n >= kValue.u32.min && n <= kValue.u32.max,
+ 'unpack4x8snormInterval only accepts values on the bounds of u32'
+ );
+ const op = (n: number): FPInterval => {
+ return this.ulpInterval(Math.max(n / 127, -1), 3);
+ };
+ this.unpackDataU32[0] = n;
+ return [
+ op(this.unpackDataI8[0]),
+ op(this.unpackDataI8[1]),
+ op(this.unpackDataI8[2]),
+ op(this.unpackDataI8[3]),
+ ];
+ }
+
+ /** Calculate an acceptance interval vector for unpack4x8snorm(x) */
+ public readonly unpack4x8snormInterval = this.unpack4x8snormIntervalImpl.bind(this);
+
+ private unpack4x8unormIntervalImpl(n: number): FPVector {
+ assert(
+ n >= kValue.u32.min && n <= kValue.u32.max,
+ 'unpack4x8unormInterval only accepts values on the bounds of u32'
+ );
+ const op = (n: number): FPInterval => {
+ return this.ulpInterval(n / 255, 3);
+ };
+
+ this.unpackDataU32[0] = n;
+ return [
+ op(this.unpackDataU8[0]),
+ op(this.unpackDataU8[1]),
+ op(this.unpackDataU8[2]),
+ op(this.unpackDataU8[3]),
+ ];
+ }
+
+ /** Calculate an acceptance interval vector for unpack4x8unorm(x) */
+ public readonly unpack4x8unormInterval = this.unpack4x8unormIntervalImpl.bind(this);
+}
+
+// Need to separately allocate f32 traits, so they can be referenced by
+// FPAbstractTraits for forwarding.
+const kF32Traits = new F32Traits();
+
+// Pre-defined values that get used multiple times in _constants' initializers. Cannot use FPTraits members, since this
+// executes before they are defined.
+const kAbstractUnboundedInterval = new FPInterval(
+ 'abstract',
+ Number.NEGATIVE_INFINITY,
+ Number.POSITIVE_INFINITY
+);
+const kAbstractZeroInterval = new FPInterval('abstract', 0);
+
+// This is implementation is incomplete
+class FPAbstractTraits extends FPTraits {
+ private static _constants: FPConstants = {
+ positive: {
+ min: kValue.f64.positive.min,
+ max: kValue.f64.positive.max,
+ infinity: kValue.f64.positive.infinity,
+ nearest_max: kValue.f64.positive.nearest_max,
+ less_than_one: kValue.f64.positive.less_than_one,
+ subnormal: {
+ min: kValue.f64.positive.subnormal.min,
+ max: kValue.f64.positive.subnormal.max,
+ },
+ pi: {
+ whole: kValue.f64.positive.pi.whole,
+ three_quarters: kValue.f64.positive.pi.three_quarters,
+ half: kValue.f64.positive.pi.half,
+ third: kValue.f64.positive.pi.third,
+ quarter: kValue.f64.positive.pi.quarter,
+ sixth: kValue.f64.positive.pi.sixth,
+ },
+ e: kValue.f64.positive.e,
+ },
+ negative: {
+ min: kValue.f64.negative.min,
+ max: kValue.f64.negative.max,
+ infinity: kValue.f64.negative.infinity,
+ nearest_min: kValue.f64.negative.nearest_min,
+ less_than_one: kValue.f64.negative.less_than_one,
+ subnormal: {
+ min: kValue.f64.negative.subnormal.min,
+ max: kValue.f64.negative.subnormal.max,
+ },
+ pi: {
+ whole: kValue.f64.negative.pi.whole,
+ three_quarters: kValue.f64.negative.pi.three_quarters,
+ half: kValue.f64.negative.pi.half,
+ third: kValue.f64.negative.pi.third,
+ quarter: kValue.f64.negative.pi.quarter,
+ sixth: kValue.f64.negative.pi.sixth,
+ },
+ },
+ unboundedInterval: kAbstractUnboundedInterval,
+ zeroInterval: kAbstractZeroInterval,
+ // Have to use the constants.ts values here, because values defined in the
+ // initializer cannot be referenced in the initializer
+ negPiToPiInterval: new FPInterval(
+ 'abstract',
+ kValue.f64.negative.pi.whole,
+ kValue.f64.positive.pi.whole
+ ),
+ greaterThanZeroInterval: new FPInterval(
+ 'abstract',
+ kValue.f64.positive.subnormal.min,
+ kValue.f64.positive.max
+ ),
+ zeroVector: {
+ 2: [kAbstractZeroInterval, kAbstractZeroInterval],
+ 3: [kAbstractZeroInterval, kAbstractZeroInterval, kAbstractZeroInterval],
+ 4: [
+ kAbstractZeroInterval,
+ kAbstractZeroInterval,
+ kAbstractZeroInterval,
+ kAbstractZeroInterval,
+ ],
+ },
+ unboundedVector: {
+ 2: [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ 3: [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ 4: [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ },
+ unboundedMatrix: {
+ 2: {
+ 2: [
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ ],
+ 3: [
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ ],
+ 4: [
+ [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ ],
+ },
+ 3: {
+ 2: [
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ ],
+ 3: [
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ ],
+ 4: [
+ [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ ],
+ },
+ 4: {
+ 2: [
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ ],
+ 3: [
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ ],
+ 4: [
+ [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ ],
+ },
+ },
+ };
+
+ public constructor() {
+ super('abstract');
+ }
+
+ public constants(): FPConstants {
+ return FPAbstractTraits._constants;
+ }
+
+ // Utilities - Overrides
+ // number is represented as a f64 internally, so all number values are already
+ // quantized to f64
+ public readonly quantize = (n: number) => {
+ return n;
+ };
+ public readonly correctlyRounded = correctlyRoundedF64;
+ public readonly isFinite = Number.isFinite;
+ public readonly isSubnormal = isSubnormalNumberF64;
+ public readonly flushSubnormal = flushSubnormalNumberF64;
+ public readonly oneULP = (_target: number, _mode: FlushMode = 'flush'): number => {
+ unreachable(`'FPAbstractTraits.oneULP should never be called`);
+ };
+ public readonly scalarBuilder = abstractFloat;
+
+ // Framework - Fundamental Error Intervals - Overrides
+ public readonly absoluteErrorInterval = this.unboundedAbsoluteErrorInterval.bind(this);
+ public readonly correctlyRoundedInterval = this.correctlyRoundedIntervalImpl.bind(this);
+ public readonly correctlyRoundedMatrix = this.correctlyRoundedMatrixImpl.bind(this);
+ public readonly ulpInterval = (n: number, numULP: number): FPInterval => {
+ return this.toInterval(kF32Traits.ulpInterval(n, numULP));
+ };
+
+ // Framework - API - Overrides
+ public readonly absInterval = this.absIntervalImpl.bind(this);
+ public readonly acosInterval = this.unimplementedScalarToInterval.bind(this, 'acosInterval');
+ public readonly acoshAlternativeInterval = this.unimplementedScalarToInterval.bind(
+ this,
+ 'acoshAlternativeInterval'
+ );
+ public readonly acoshPrimaryInterval = this.unimplementedScalarToInterval.bind(
+ this,
+ 'acoshPrimaryInterval'
+ );
+ public readonly acoshIntervals = [this.acoshAlternativeInterval, this.acoshPrimaryInterval];
+ public readonly additionInterval = this.additionIntervalImpl.bind(this);
+ public readonly additionMatrixMatrixInterval = this.additionMatrixMatrixIntervalImpl.bind(this);
+ public readonly asinInterval = this.unimplementedScalarToInterval.bind(this, 'asinInterval');
+ public readonly asinhInterval = this.unimplementedScalarToInterval.bind(this, 'asinhInterval');
+ public readonly atanInterval = this.unimplementedScalarToInterval.bind(this, 'atanInterval');
+ public readonly atan2Interval = this.unimplementedScalarPairToInterval.bind(
+ this,
+ 'atan2Interval'
+ );
+ public readonly atanhInterval = this.unimplementedScalarToInterval.bind(this, 'atanhInterval');
+ public readonly ceilInterval = this.unimplementedScalarToInterval.bind(this, 'ceilInterval');
+ public readonly clampMedianInterval = this.clampMedianIntervalImpl.bind(this);
+ public readonly clampMinMaxInterval = this.clampMinMaxIntervalImpl.bind(this);
+ public readonly clampIntervals = [this.clampMedianInterval, this.clampMinMaxInterval];
+ public readonly cosInterval = this.unimplementedScalarToInterval.bind(this, 'cosInterval');
+ public readonly coshInterval = this.unimplementedScalarToInterval.bind(this, 'coshInterval');
+ public readonly crossInterval = this.crossIntervalImpl.bind(this);
+ public readonly degreesInterval = this.degreesIntervalImpl.bind(this);
+ public readonly determinantInterval = this.unimplementedMatrixToInterval.bind(
+ this,
+ 'determinantInterval'
+ );
+ public readonly distanceInterval = this.unimplementedDistance.bind(this);
+ public readonly divisionInterval = (
+ x: number | FPInterval,
+ y: number | FPInterval
+ ): FPInterval => {
+ return this.toInterval(kF32Traits.divisionInterval(x, y));
+ };
+ public readonly dotInterval = this.unimplementedVectorPairToInterval.bind(this, 'dotInterval');
+ public readonly expInterval = this.unimplementedScalarToInterval.bind(this, 'expInterval');
+ public readonly exp2Interval = this.unimplementedScalarToInterval.bind(this, 'exp2Interval');
+ public readonly faceForwardIntervals = this.unimplementedFaceForward.bind(this);
+ public readonly floorInterval = this.floorIntervalImpl.bind(this);
+ public readonly fmaInterval = this.fmaIntervalImpl.bind(this);
+ public readonly fractInterval = this.unimplementedScalarToInterval.bind(this, 'fractInterval');
+ public readonly inverseSqrtInterval = this.unimplementedScalarToInterval.bind(
+ this,
+ 'inverseSqrtInterval'
+ );
+ public readonly ldexpInterval = this.unimplementedScalarPairToInterval.bind(
+ this,
+ 'ldexpInterval'
+ );
+ public readonly lengthInterval = this.unimplementedLength.bind(this);
+ public readonly logInterval = this.unimplementedScalarToInterval.bind(this, 'logInterval');
+ public readonly log2Interval = this.unimplementedScalarToInterval.bind(this, 'log2Interval');
+ public readonly maxInterval = this.maxIntervalImpl.bind(this);
+ public readonly minInterval = this.minIntervalImpl.bind(this);
+ public readonly mixImpreciseInterval = this.unimplementedScalarTripleToInterval.bind(
+ this,
+ 'mixImpreciseInterval'
+ );
+ public readonly mixPreciseInterval = this.unimplementedScalarTripleToInterval.bind(
+ this,
+ 'mixPreciseInterval'
+ );
+ public readonly mixIntervals = [this.mixImpreciseInterval, this.mixPreciseInterval];
+ public readonly modfInterval = this.modfIntervalImpl.bind(this);
+ public readonly multiplicationInterval = this.multiplicationIntervalImpl.bind(this);
+ public readonly multiplicationMatrixMatrixInterval = this.unimplementedMatrixPairToMatrix.bind(
+ this,
+ 'multiplicationMatrixMatrixInterval'
+ );
+ public readonly multiplicationMatrixScalarInterval = this.unimplementedMatrixScalarToMatrix.bind(
+ this,
+ 'multiplicationMatrixScalarInterval'
+ );
+ public readonly multiplicationScalarMatrixInterval = this.unimplementedScalarMatrixToMatrix.bind(
+ this,
+ 'multiplicationScalarMatrixInterval'
+ );
+ public readonly multiplicationMatrixVectorInterval = this.unimplementedMatrixVectorToVector.bind(
+ this,
+ 'multiplicationMatrixVectorInterval'
+ );
+ public readonly multiplicationVectorMatrixInterval = this.unimplementedVectorMatrixToVector.bind(
+ this,
+ 'multiplicationVectorMatrixInterval'
+ );
+ public readonly negationInterval = this.negationIntervalImpl.bind(this);
+ public readonly normalizeInterval = this.unimplementedVectorToVector.bind(
+ this,
+ 'normalizeInterval'
+ );
+ public readonly powInterval = this.unimplementedScalarPairToInterval.bind(this, 'powInterval');
+ public readonly radiansInterval = this.radiansIntervalImpl.bind(this);
+ public readonly reflectInterval = this.unimplementedVectorPairToVector.bind(
+ this,
+ 'reflectInterval'
+ );
+ public readonly refractInterval = this.unimplementedRefract.bind(this);
+ public readonly remainderInterval = (x: number, y: number): FPInterval => {
+ return this.toInterval(kF32Traits.remainderInterval(x, y));
+ };
+ public readonly roundInterval = this.unimplementedScalarToInterval.bind(this, 'roundInterval');
+ public readonly saturateInterval = this.saturateIntervalImpl.bind(this);
+ public readonly signInterval = this.signIntervalImpl.bind(this);
+ public readonly sinInterval = this.unimplementedScalarToInterval.bind(this, 'sinInterval');
+ public readonly sinhInterval = this.unimplementedScalarToInterval.bind(this, 'sinhInterval');
+ public readonly smoothStepInterval = this.unimplementedScalarTripleToInterval.bind(
+ this,
+ 'smoothStepInterval'
+ );
+ public readonly sqrtInterval = this.unimplementedScalarToInterval.bind(this, 'sqrtInterval');
+ public readonly stepInterval = this.unimplementedScalarPairToInterval.bind(this, 'stepInterval');
+ public readonly subtractionInterval = this.subtractionIntervalImpl.bind(this);
+ public readonly subtractionMatrixMatrixInterval =
+ this.subtractionMatrixMatrixIntervalImpl.bind(this);
+ public readonly tanInterval = this.unimplementedScalarToInterval.bind(this, 'tanInterval');
+ public readonly tanhInterval = this.unimplementedScalarToInterval.bind(this, 'tanhInterval');
+ public readonly transposeInterval = this.transposeIntervalImpl.bind(this);
+ public readonly truncInterval = this.truncIntervalImpl.bind(this);
+}
+
+// Pre-defined values that get used multiple times in _constants' initializers. Cannot use FPTraits members, since this
+// executes before they are defined.
+const kF16UnboundedInterval = new FPInterval(
+ 'f16',
+ Number.NEGATIVE_INFINITY,
+ Number.POSITIVE_INFINITY
+);
+const kF16ZeroInterval = new FPInterval('f16', 0);
+
+// This is implementation is incomplete
+class F16Traits extends FPTraits {
+ private static _constants: FPConstants = {
+ positive: {
+ min: kValue.f16.positive.min,
+ max: kValue.f16.positive.max,
+ infinity: kValue.f16.positive.infinity,
+ nearest_max: kValue.f16.positive.nearest_max,
+ less_than_one: kValue.f16.positive.less_than_one,
+ subnormal: {
+ min: kValue.f16.positive.subnormal.min,
+ max: kValue.f16.positive.subnormal.max,
+ },
+ pi: {
+ whole: kValue.f16.positive.pi.whole,
+ three_quarters: kValue.f16.positive.pi.three_quarters,
+ half: kValue.f16.positive.pi.half,
+ third: kValue.f16.positive.pi.third,
+ quarter: kValue.f16.positive.pi.quarter,
+ sixth: kValue.f16.positive.pi.sixth,
+ },
+ e: kValue.f16.positive.e,
+ },
+ negative: {
+ min: kValue.f16.negative.min,
+ max: kValue.f16.negative.max,
+ infinity: kValue.f16.negative.infinity,
+ nearest_min: kValue.f16.negative.nearest_min,
+ less_than_one: kValue.f16.negative.less_than_one,
+ subnormal: {
+ min: kValue.f16.negative.subnormal.min,
+ max: kValue.f16.negative.subnormal.max,
+ },
+ pi: {
+ whole: kValue.f16.negative.pi.whole,
+ three_quarters: kValue.f16.negative.pi.three_quarters,
+ half: kValue.f16.negative.pi.half,
+ third: kValue.f16.negative.pi.third,
+ quarter: kValue.f16.negative.pi.quarter,
+ sixth: kValue.f16.negative.pi.sixth,
+ },
+ },
+ unboundedInterval: kF16UnboundedInterval,
+ zeroInterval: kF16ZeroInterval,
+ // Have to use the constants.ts values here, because values defined in the
+ // initializer cannot be referenced in the initializer
+ negPiToPiInterval: new FPInterval(
+ 'f16',
+ kValue.f16.negative.pi.whole,
+ kValue.f16.positive.pi.whole
+ ),
+ greaterThanZeroInterval: new FPInterval(
+ 'f16',
+ kValue.f16.positive.subnormal.min,
+ kValue.f16.positive.max
+ ),
+ zeroVector: {
+ 2: [kF16ZeroInterval, kF16ZeroInterval],
+ 3: [kF16ZeroInterval, kF16ZeroInterval, kF16ZeroInterval],
+ 4: [kF16ZeroInterval, kF16ZeroInterval, kF16ZeroInterval, kF16ZeroInterval],
+ },
+ unboundedVector: {
+ 2: [kF16UnboundedInterval, kF16UnboundedInterval],
+ 3: [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ 4: [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ },
+ unboundedMatrix: {
+ 2: {
+ 2: [
+ [kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval],
+ ],
+ 3: [
+ [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ ],
+ 4: [
+ [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ ],
+ },
+ 3: {
+ 2: [
+ [kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval],
+ ],
+ 3: [
+ [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ ],
+ 4: [
+ [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ ],
+ },
+ 4: {
+ 2: [
+ [kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval],
+ ],
+ 3: [
+ [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ ],
+ 4: [
+ [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ ],
+ },
+ },
+ };
+
+ public constructor() {
+ super('f16');
+ }
+
+ public constants(): FPConstants {
+ return F16Traits._constants;
+ }
+
+ // Utilities - Overrides
+ public readonly quantize = quantizeToF16;
+ public readonly correctlyRounded = correctlyRoundedF16;
+ public readonly isFinite = isFiniteF16;
+ public readonly isSubnormal = isSubnormalNumberF16;
+ public readonly flushSubnormal = flushSubnormalNumberF16;
+ public readonly oneULP = oneULPF16;
+ public readonly scalarBuilder = f16;
+
+ // Framework - Fundamental Error Intervals - Overrides
+ public readonly absoluteErrorInterval = this.absoluteErrorIntervalImpl.bind(this);
+ public readonly correctlyRoundedInterval = this.correctlyRoundedIntervalImpl.bind(this);
+ public readonly correctlyRoundedMatrix = this.correctlyRoundedMatrixImpl.bind(this);
+ public readonly ulpInterval = this.ulpIntervalImpl.bind(this);
+
+ // Framework - API - Overrides
+ public readonly absInterval = this.absIntervalImpl.bind(this);
+ public readonly acosInterval = this.acosIntervalImpl.bind(this);
+ public readonly acoshAlternativeInterval = this.acoshAlternativeIntervalImpl.bind(this);
+ public readonly acoshPrimaryInterval = this.acoshPrimaryIntervalImpl.bind(this);
+ public readonly acoshIntervals = [this.acoshAlternativeInterval, this.acoshPrimaryInterval];
+ public readonly additionInterval = this.additionIntervalImpl.bind(this);
+ public readonly additionMatrixMatrixInterval = this.additionMatrixMatrixIntervalImpl.bind(this);
+ public readonly asinInterval = this.asinIntervalImpl.bind(this);
+ public readonly asinhInterval = this.asinhIntervalImpl.bind(this);
+ public readonly atanInterval = this.atanIntervalImpl.bind(this);
+ public readonly atan2Interval = this.atan2IntervalImpl.bind(this);
+ public readonly atanhInterval = this.atanhIntervalImpl.bind(this);
+ public readonly ceilInterval = this.ceilIntervalImpl.bind(this);
+ public readonly clampMedianInterval = this.clampMedianIntervalImpl.bind(this);
+ public readonly clampMinMaxInterval = this.clampMinMaxIntervalImpl.bind(this);
+ public readonly clampIntervals = [this.clampMedianInterval, this.clampMinMaxInterval];
+ public readonly cosInterval = this.cosIntervalImpl.bind(this);
+ public readonly coshInterval = this.coshIntervalImpl.bind(this);
+ public readonly crossInterval = this.crossIntervalImpl.bind(this);
+ public readonly degreesInterval = this.degreesIntervalImpl.bind(this);
+ public readonly determinantInterval = this.determinantIntervalImpl.bind(this);
+ public readonly distanceInterval = this.distanceIntervalImpl.bind(this);
+ public readonly divisionInterval = this.divisionIntervalImpl.bind(this);
+ public readonly dotInterval = this.dotIntervalImpl.bind(this);
+ public readonly expInterval = this.expIntervalImpl.bind(this);
+ public readonly exp2Interval = this.exp2IntervalImpl.bind(this);
+ public readonly faceForwardIntervals = this.faceForwardIntervalsImpl.bind(this);
+ public readonly floorInterval = this.floorIntervalImpl.bind(this);
+ public readonly fmaInterval = this.fmaIntervalImpl.bind(this);
+ public readonly fractInterval = this.fractIntervalImpl.bind(this);
+ public readonly inverseSqrtInterval = this.inverseSqrtIntervalImpl.bind(this);
+ public readonly ldexpInterval = this.ldexpIntervalImpl.bind(this);
+ public readonly lengthInterval = this.lengthIntervalImpl.bind(this);
+ public readonly logInterval = this.logIntervalImpl.bind(this);
+ public readonly log2Interval = this.log2IntervalImpl.bind(this);
+ public readonly maxInterval = this.maxIntervalImpl.bind(this);
+ public readonly minInterval = this.minIntervalImpl.bind(this);
+ public readonly mixImpreciseInterval = this.mixImpreciseIntervalImpl.bind(this);
+ public readonly mixPreciseInterval = this.mixPreciseIntervalImpl.bind(this);
+ public readonly mixIntervals = [this.mixImpreciseInterval, this.mixPreciseInterval];
+ public readonly modfInterval = this.modfIntervalImpl.bind(this);
+ public readonly multiplicationInterval = this.multiplicationIntervalImpl.bind(this);
+ public readonly multiplicationMatrixMatrixInterval =
+ this.multiplicationMatrixMatrixIntervalImpl.bind(this);
+ public readonly multiplicationMatrixScalarInterval =
+ this.multiplicationMatrixScalarIntervalImpl.bind(this);
+ public readonly multiplicationScalarMatrixInterval =
+ this.multiplicationScalarMatrixIntervalImpl.bind(this);
+ public readonly multiplicationMatrixVectorInterval =
+ this.multiplicationMatrixVectorIntervalImpl.bind(this);
+ public readonly multiplicationVectorMatrixInterval =
+ this.multiplicationVectorMatrixIntervalImpl.bind(this);
+ public readonly negationInterval = this.negationIntervalImpl.bind(this);
+ public readonly normalizeInterval = this.normalizeIntervalImpl.bind(this);
+ public readonly powInterval = this.powIntervalImpl.bind(this);
+ public readonly radiansInterval = this.radiansIntervalImpl.bind(this);
+ public readonly reflectInterval = this.reflectIntervalImpl.bind(this);
+ public readonly refractInterval = this.refractIntervalImpl.bind(this);
+ public readonly remainderInterval = this.remainderIntervalImpl.bind(this);
+ public readonly roundInterval = this.roundIntervalImpl.bind(this);
+ public readonly saturateInterval = this.saturateIntervalImpl.bind(this);
+ public readonly signInterval = this.signIntervalImpl.bind(this);
+ public readonly sinInterval = this.sinIntervalImpl.bind(this);
+ public readonly sinhInterval = this.sinhIntervalImpl.bind(this);
+ public readonly smoothStepInterval = this.smoothStepIntervalImpl.bind(this);
+ public readonly sqrtInterval = this.sqrtIntervalImpl.bind(this);
+ public readonly stepInterval = this.stepIntervalImpl.bind(this);
+ public readonly subtractionInterval = this.subtractionIntervalImpl.bind(this);
+ public readonly subtractionMatrixMatrixInterval =
+ this.subtractionMatrixMatrixIntervalImpl.bind(this);
+ public readonly tanInterval = this.tanIntervalImpl.bind(this);
+ public readonly tanhInterval = this.tanhIntervalImpl.bind(this);
+ public readonly transposeInterval = this.transposeIntervalImpl.bind(this);
+ public readonly truncInterval = this.truncIntervalImpl.bind(this);
+}
+
+export const FP = {
+ f32: kF32Traits,
+ f16: new F16Traits(),
+ abstract: new FPAbstractTraits(),
+};
+
+/** @returns the floating-point traits for `type` */
+export function fpTraitsFor(type: ScalarType): FPTraits {
+ switch (type.kind) {
+ case 'abstract-float':
+ return FP.abstract;
+ case 'f32':
+ return FP.f32;
+ case 'f16':
+ return FP.f16;
+ default:
+ unreachable(`unsupported type: ${type}`);
+ }
+}
+
+/** @returns true if the value `value` is representable with `type` */
+export function isRepresentable(value: number, type: ScalarType) {
+ if (!Number.isFinite(value)) {
+ return false;
+ }
+ if (isFloatType(type)) {
+ const constants = fpTraitsFor(type).constants();
+ return value >= constants.negative.min && value <= constants.positive.max;
+ }
+ assert(false, `isRepresentable() is not yet implemented for type ${type}`);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/math.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/math.ts
new file mode 100644
index 0000000000..851db40c71
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/math.ts
@@ -0,0 +1,2247 @@
+import { ROArrayArray, ROArrayArrayArray } from '../../common/util/types.js';
+import { assert } from '../../common/util/util.js';
+import {
+ Float16Array,
+ getFloat16,
+ hfround,
+ setFloat16,
+} from '../../external/petamoriken/float16/float16.js';
+
+import { kBit, kValue } from './constants.js';
+import {
+ reinterpretF64AsU64,
+ reinterpretU64AsF64,
+ reinterpretU32AsF32,
+ reinterpretU16AsF16,
+} from './reinterpret.js';
+
+/**
+ * A multiple of 8 guaranteed to be way too large to allocate (just under 8 pebibytes).
+ * This is a "safe" integer (ULP <= 1.0) very close to MAX_SAFE_INTEGER.
+ *
+ * Note: allocations of this size are likely to exceed limitations other than just the system's
+ * physical memory, so test cases are also needed to try to trigger "true" OOM.
+ */
+export const kMaxSafeMultipleOf8 = Number.MAX_SAFE_INTEGER - 7;
+
+/** Round `n` up to the next multiple of `alignment` (inclusive). */
+// MAINTENANCE_TODO: Rename to `roundUp`
+export function align(n: number, alignment: number): number {
+ assert(Number.isInteger(n) && n >= 0, 'n must be a non-negative integer');
+ assert(Number.isInteger(alignment) && alignment > 0, 'alignment must be a positive integer');
+ return Math.ceil(n / alignment) * alignment;
+}
+
+/** Round `n` down to the next multiple of `alignment` (inclusive). */
+export function roundDown(n: number, alignment: number): number {
+ assert(Number.isInteger(n) && n >= 0, 'n must be a non-negative integer');
+ assert(Number.isInteger(alignment) && alignment > 0, 'alignment must be a positive integer');
+ return Math.floor(n / alignment) * alignment;
+}
+
+/** Clamp a number to the provided range. */
+export function clamp(n: number, { min, max }: { min: number; max: number }): number {
+ assert(max >= min);
+ return Math.min(Math.max(n, min), max);
+}
+
+/** @returns 0 if |val| is a subnormal f64 number, otherwise returns |val| */
+export function flushSubnormalNumberF64(val: number): number {
+ return isSubnormalNumberF64(val) ? 0 : val;
+}
+
+/** @returns if number is within subnormal range of f64 */
+export function isSubnormalNumberF64(n: number): boolean {
+ return n > kValue.f64.negative.max && n < kValue.f64.positive.min;
+}
+
+/** @returns 0 if |val| is a subnormal f32 number, otherwise returns |val| */
+export function flushSubnormalNumberF32(val: number): number {
+ return isSubnormalNumberF32(val) ? 0 : val;
+}
+
+/** @returns if number is within subnormal range of f32 */
+export function isSubnormalNumberF32(n: number): boolean {
+ return n > kValue.f32.negative.max && n < kValue.f32.positive.min;
+}
+
+/** @returns if number is in the finite range of f32 */
+export function isFiniteF32(n: number) {
+ return n >= kValue.f32.negative.min && n <= kValue.f32.positive.max;
+}
+
+/** @returns 0 if |val| is a subnormal f16 number, otherwise returns |val| */
+export function flushSubnormalNumberF16(val: number): number {
+ return isSubnormalNumberF16(val) ? 0 : val;
+}
+
+/** @returns if number is within subnormal range of f16 */
+export function isSubnormalNumberF16(n: number): boolean {
+ return n > kValue.f16.negative.max && n < kValue.f16.positive.min;
+}
+
+/** @returns if number is in the finite range of f16 */
+export function isFiniteF16(n: number) {
+ return n >= kValue.f16.negative.min && n <= kValue.f16.positive.max;
+}
+
+/** Should FTZ occur during calculations or not */
+export type FlushMode = 'flush' | 'no-flush';
+
+/** Should nextAfter calculate towards positive infinity or negative infinity */
+export type NextDirection = 'positive' | 'negative';
+
+/**
+ * Once-allocated ArrayBuffer/views to avoid overhead of allocation when
+ * converting between numeric formats
+ *
+ * Usage of a once-allocated pattern like this makes nextAfterF64 non-reentrant,
+ * so cannot call itself directly or indirectly.
+ */
+const nextAfterF64Data = new ArrayBuffer(8);
+const nextAfterF64Int = new BigUint64Array(nextAfterF64Data);
+const nextAfterF64Float = new Float64Array(nextAfterF64Data);
+
+/**
+ * @returns the next f64 value after |val|, towards +inf or -inf as specified by |dir|.
+
+ * If |mode| is 'flush', all subnormal values will be flushed to 0,
+ * before processing and for -/+0 the nextAfterF64 will be the closest normal in
+ * the correct direction.
+
+ * If |mode| is 'no-flush', the next subnormal will be calculated when appropriate,
+ * and for -/+0 the nextAfterF64 will be the closest subnormal in the correct
+ * direction.
+ *
+ * val needs to be in [min f64, max f64]
+ */
+export function nextAfterF64(val: number, dir: NextDirection, mode: FlushMode): number {
+ if (Number.isNaN(val)) {
+ return val;
+ }
+
+ if (val === Number.POSITIVE_INFINITY) {
+ return kValue.f64.positive.infinity;
+ }
+
+ if (val === Number.NEGATIVE_INFINITY) {
+ return kValue.f64.negative.infinity;
+ }
+
+ assert(
+ val <= kValue.f64.positive.max && val >= kValue.f64.negative.min,
+ `${val} is not in the range of f64`
+ );
+
+ val = mode === 'flush' ? flushSubnormalNumberF64(val) : val;
+
+ // -/+0 === 0 returns true
+ if (val === 0) {
+ if (dir === 'positive') {
+ return mode === 'flush' ? kValue.f64.positive.min : kValue.f64.positive.subnormal.min;
+ } else {
+ return mode === 'flush' ? kValue.f64.negative.max : kValue.f64.negative.subnormal.max;
+ }
+ }
+
+ nextAfterF64Float[0] = val;
+ const is_positive = (nextAfterF64Int[0] & 0x8000_0000_0000_0000n) === 0n;
+ if (is_positive === (dir === 'positive')) {
+ nextAfterF64Int[0] += 1n;
+ } else {
+ nextAfterF64Int[0] -= 1n;
+ }
+
+ // Checking for overflow
+ if ((nextAfterF64Int[0] & 0x7ff0_0000_0000_0000n) === 0x7ff0_0000_0000_0000n) {
+ if (dir === 'positive') {
+ return kValue.f64.positive.infinity;
+ } else {
+ return kValue.f64.negative.infinity;
+ }
+ }
+
+ return mode === 'flush' ? flushSubnormalNumberF64(nextAfterF64Float[0]) : nextAfterF64Float[0];
+}
+
+/**
+ * Once-allocated ArrayBuffer/views to avoid overhead of allocation when
+ * converting between numeric formats
+ *
+ * Usage of a once-allocated pattern like this makes nextAfterF32 non-reentrant,
+ * so cannot call itself directly or indirectly.
+ */
+const nextAfterF32Data = new ArrayBuffer(4);
+const nextAfterF32Int = new Uint32Array(nextAfterF32Data);
+const nextAfterF32Float = new Float32Array(nextAfterF32Data);
+
+/**
+ * @returns the next f32 value after |val|, towards +inf or -inf as specified by |dir|.
+
+ * If |mode| is 'flush', all subnormal values will be flushed to 0,
+ * before processing and for -/+0 the nextAfterF32 will be the closest normal in
+ * the correct direction.
+
+ * If |mode| is 'no-flush', the next subnormal will be calculated when appropriate,
+ * and for -/+0 the nextAfterF32 will be the closest subnormal in the correct
+ * direction.
+ *
+ * val needs to be in [min f32, max f32]
+ */
+export function nextAfterF32(val: number, dir: NextDirection, mode: FlushMode): number {
+ if (Number.isNaN(val)) {
+ return val;
+ }
+
+ if (val === Number.POSITIVE_INFINITY) {
+ return kValue.f32.positive.infinity;
+ }
+
+ if (val === Number.NEGATIVE_INFINITY) {
+ return kValue.f32.negative.infinity;
+ }
+
+ assert(
+ val <= kValue.f32.positive.max && val >= kValue.f32.negative.min,
+ `${val} is not in the range of f32`
+ );
+
+ val = mode === 'flush' ? flushSubnormalNumberF32(val) : val;
+
+ // -/+0 === 0 returns true
+ if (val === 0) {
+ if (dir === 'positive') {
+ return mode === 'flush' ? kValue.f32.positive.min : kValue.f32.positive.subnormal.min;
+ } else {
+ return mode === 'flush' ? kValue.f32.negative.max : kValue.f32.negative.subnormal.max;
+ }
+ }
+
+ nextAfterF32Float[0] = val; // This quantizes from number (f64) to f32
+ if (
+ (dir === 'positive' && nextAfterF32Float[0] <= val) ||
+ (dir === 'negative' && nextAfterF32Float[0] >= val)
+ ) {
+ // val is either f32 precise or quantizing rounded in the opposite direction
+ // from what is needed, so need to calculate the value in the correct
+ // direction.
+ const is_positive = (nextAfterF32Int[0] & 0x80000000) === 0;
+ if (is_positive === (dir === 'positive')) {
+ nextAfterF32Int[0] += 1;
+ } else {
+ nextAfterF32Int[0] -= 1;
+ }
+ }
+
+ // Checking for overflow
+ if ((nextAfterF32Int[0] & 0x7f800000) === 0x7f800000) {
+ if (dir === 'positive') {
+ return kValue.f32.positive.infinity;
+ } else {
+ return kValue.f32.negative.infinity;
+ }
+ }
+
+ return mode === 'flush' ? flushSubnormalNumberF32(nextAfterF32Float[0]) : nextAfterF32Float[0];
+}
+
+/**
+ * Once-allocated ArrayBuffer/views to avoid overhead of allocation when
+ * converting between numeric formats
+ *
+ * Usage of a once-allocated pattern like this makes nextAfterF16 non-reentrant,
+ * so cannot call itself directly or indirectly.
+ */
+const nextAfterF16Data = new ArrayBuffer(2);
+const nextAfterF16Hex = new Uint16Array(nextAfterF16Data);
+const nextAfterF16Float = new Float16Array(nextAfterF16Data);
+
+/**
+ * @returns the next f16 value after |val|, towards +inf or -inf as specified by |dir|.
+
+ * If |mode| is 'flush', all subnormal values will be flushed to 0,
+ * before processing and for -/+0 the nextAfterF16 will be the closest normal in
+ * the correct direction.
+
+ * If |mode| is 'no-flush', the next subnormal will be calculated when appropriate,
+ * and for -/+0 the nextAfterF16 will be the closest subnormal in the correct
+ * direction.
+ *
+ * val needs to be in [min f16, max f16]
+ */
+export function nextAfterF16(val: number, dir: NextDirection, mode: FlushMode): number {
+ if (Number.isNaN(val)) {
+ return val;
+ }
+
+ if (val === Number.POSITIVE_INFINITY) {
+ return kValue.f16.positive.infinity;
+ }
+
+ if (val === Number.NEGATIVE_INFINITY) {
+ return kValue.f16.negative.infinity;
+ }
+
+ assert(
+ val <= kValue.f16.positive.max && val >= kValue.f16.negative.min,
+ `${val} is not in the range of f16`
+ );
+
+ val = mode === 'flush' ? flushSubnormalNumberF16(val) : val;
+
+ // -/+0 === 0 returns true
+ if (val === 0) {
+ if (dir === 'positive') {
+ return mode === 'flush' ? kValue.f16.positive.min : kValue.f16.positive.subnormal.min;
+ } else {
+ return mode === 'flush' ? kValue.f16.negative.max : kValue.f16.negative.subnormal.max;
+ }
+ }
+
+ nextAfterF16Float[0] = val; // This quantizes from number (f64) to f16
+ if (
+ (dir === 'positive' && nextAfterF16Float[0] <= val) ||
+ (dir === 'negative' && nextAfterF16Float[0] >= val)
+ ) {
+ // val is either f16 precise or quantizing rounded in the opposite direction
+ // from what is needed, so need to calculate the value in the correct
+ // direction.
+ const is_positive = (nextAfterF16Hex[0] & 0x8000) === 0;
+ if (is_positive === (dir === 'positive')) {
+ nextAfterF16Hex[0] += 1;
+ } else {
+ nextAfterF16Hex[0] -= 1;
+ }
+ }
+
+ // Checking for overflow
+ if ((nextAfterF16Hex[0] & 0x7c00) === 0x7c00) {
+ if (dir === 'positive') {
+ return kValue.f16.positive.infinity;
+ } else {
+ return kValue.f16.negative.infinity;
+ }
+ }
+
+ return mode === 'flush' ? flushSubnormalNumberF16(nextAfterF16Float[0]) : nextAfterF16Float[0];
+}
+
+/**
+ * @returns ulp(x), the unit of least precision for a specific number as a 64-bit float
+ *
+ * ulp(x) is the distance between the two floating point numbers nearest x.
+ * This value is also called unit of last place, ULP, and 1 ULP.
+ * See the WGSL spec and http://www.ens-lyon.fr/LIP/Pub/Rapports/RR/RR2005/RR2005-09.pdf
+ * for a more detailed/nuanced discussion of the definition of ulp(x).
+ *
+ * @param target number to calculate ULP for
+ * @param mode should FTZ occurring during calculation or not
+ */
+export function oneULPF64(target: number, mode: FlushMode = 'flush'): number {
+ if (Number.isNaN(target)) {
+ return Number.NaN;
+ }
+
+ target = mode === 'flush' ? flushSubnormalNumberF64(target) : target;
+
+ // For values out of bounds for f64 ulp(x) is defined as the
+ // distance between the two nearest f64 representable numbers to the
+ // appropriate edge, which also happens to be the maximum possible ULP.
+ if (
+ target === Number.POSITIVE_INFINITY ||
+ target >= kValue.f64.positive.max ||
+ target === Number.NEGATIVE_INFINITY ||
+ target <= kValue.f64.negative.min
+ ) {
+ return kValue.f64.max_ulp;
+ }
+
+ // ulp(x) is min(after - before), where
+ // before <= x <= after
+ // before =/= after
+ // before and after are f64 representable
+ const before = nextAfterF64(target, 'negative', mode);
+ const after = nextAfterF64(target, 'positive', mode);
+ // Since number is internally a f64, |target| is always f64 representable, so
+ // either before or after will be x
+ return Math.min(target - before, after - target);
+}
+
+/**
+ * @returns ulp(x), the unit of least precision for a specific number as a 32-bit float
+ *
+ * ulp(x) is the distance between the two floating point numbers nearest x.
+ * This value is also called unit of last place, ULP, and 1 ULP.
+ * See the WGSL spec and http://www.ens-lyon.fr/LIP/Pub/Rapports/RR/RR2005/RR2005-09.pdf
+ * for a more detailed/nuanced discussion of the definition of ulp(x).
+ *
+ * @param target number to calculate ULP for
+ * @param mode should FTZ occurring during calculation or not
+ */
+export function oneULPF32(target: number, mode: FlushMode = 'flush'): number {
+ if (Number.isNaN(target)) {
+ return Number.NaN;
+ }
+
+ target = mode === 'flush' ? flushSubnormalNumberF32(target) : target;
+
+ // For values out of bounds for f32 ulp(x) is defined as the
+ // distance between the two nearest f32 representable numbers to the
+ // appropriate edge, which also happens to be the maximum possible ULP.
+ if (
+ target === Number.POSITIVE_INFINITY ||
+ target >= kValue.f32.positive.max ||
+ target === Number.NEGATIVE_INFINITY ||
+ target <= kValue.f32.negative.min
+ ) {
+ return kValue.f32.max_ulp;
+ }
+
+ // ulp(x) is min(after - before), where
+ // before <= x <= after
+ // before =/= after
+ // before and after are f32 representable
+ const before = nextAfterF32(target, 'negative', mode);
+ const after = nextAfterF32(target, 'positive', mode);
+ const converted: number = quantizeToF32(target);
+ if (converted === target) {
+ // |target| is f32 representable, so either before or after will be x
+ return Math.min(target - before, after - target);
+ } else {
+ // |target| is not f32 representable so taking distance of neighbouring f32s.
+ return after - before;
+ }
+}
+
+/**
+ * @returns ulp(x), the unit of least precision for a specific number as a 32-bit float
+ *
+ * ulp(x) is the distance between the two floating point numbers nearest x.
+ * This value is also called unit of last place, ULP, and 1 ULP.
+ * See the WGSL spec and http://www.ens-lyon.fr/LIP/Pub/Rapports/RR/RR2005/RR2005-09.pdf
+ * for a more detailed/nuanced discussion of the definition of ulp(x).
+ *
+ * @param target number to calculate ULP for
+ * @param mode should FTZ occurring during calculation or not
+ */
+export function oneULPF16(target: number, mode: FlushMode = 'flush'): number {
+ if (Number.isNaN(target)) {
+ return Number.NaN;
+ }
+
+ target = mode === 'flush' ? flushSubnormalNumberF16(target) : target;
+
+ // For values out of bounds for f16 ulp(x) is defined as the
+ // distance between the two nearest f16 representable numbers to the
+ // appropriate edge, which also happens to be the maximum possible ULP.
+ if (
+ target === Number.POSITIVE_INFINITY ||
+ target >= kValue.f16.positive.max ||
+ target === Number.NEGATIVE_INFINITY ||
+ target <= kValue.f16.negative.min
+ ) {
+ return kValue.f16.max_ulp;
+ }
+
+ // ulp(x) is min(after - before), where
+ // before <= x <= after
+ // before =/= after
+ // before and after are f16 representable
+ const before = nextAfterF16(target, 'negative', mode);
+ const after = nextAfterF16(target, 'positive', mode);
+ const converted: number = quantizeToF16(target);
+ if (converted === target) {
+ // |target| is f16 representable, so either before or after will be x
+ return Math.min(target - before, after - target);
+ } else {
+ // |target| is not f16 representable so taking distance of neighbouring f16s.
+ return after - before;
+ }
+}
+
+/**
+ * Calculate the valid roundings when quantizing to 64-bit floats
+ *
+ * TS/JS's number type is internally a f64, so the supplied value will be
+ * quanitized by definition. The only corner cases occur if a non-finite value
+ * is provided, since the valid roundings include the appropriate min or max
+ * value.
+ *
+ * @param n number to be quantized
+ * @returns all of the acceptable roundings for quantizing to 64-bits in
+ * ascending order.
+ */
+export function correctlyRoundedF64(n: number): readonly number[] {
+ assert(!Number.isNaN(n), `correctlyRoundedF32 not defined for NaN`);
+ // Above f64 range
+ if (n === Number.POSITIVE_INFINITY) {
+ return [kValue.f64.positive.max, Number.POSITIVE_INFINITY];
+ }
+
+ // Below f64 range
+ if (n === Number.NEGATIVE_INFINITY) {
+ return [Number.NEGATIVE_INFINITY, kValue.f64.negative.min];
+ }
+
+ return [n];
+}
+
+/**
+ * Calculate the valid roundings when quantizing to 32-bit floats
+ *
+ * TS/JS's number type is internally a f64, so quantization needs to occur when
+ * converting to f32 for WGSL. WGSL does not specify a specific rounding mode,
+ * so if a number is not precisely representable in 32-bits, but in the
+ * range, there are two possible valid quantizations. If it is precisely
+ * representable, there is only one valid quantization. This function calculates
+ * the valid roundings and returns them in an array.
+ *
+ * This function does not consider flushing mode, so subnormals are maintained.
+ * The caller is responsible to flushing before and after as appropriate.
+ *
+ * Out of bounds values need to consider how they interact with the overflow
+ * rules.
+ * * If a value is OOB but not too far out, an implementation may choose to round
+ * to nearest finite value or the correct infinity. This boundary is at
+ * 2^(f32.emax + 1) and -(2^(f32.emax + 1)) respectively.
+ * Values that are at or beyond these limits must be rounded towards the
+ * appropriate infinity.
+ *
+ * @param n number to be quantized
+ * @returns all of the acceptable roundings for quantizing to 32-bits in
+ * ascending order.
+ */
+export function correctlyRoundedF32(n: number): readonly number[] {
+ if (Number.isNaN(n)) {
+ return [n];
+ }
+
+ // Greater than or equal to the upper overflow boundry
+ if (n >= 2 ** (kValue.f32.emax + 1)) {
+ return [Number.POSITIVE_INFINITY];
+ }
+
+ // OOB, but less than the upper overflow boundary
+ if (n > kValue.f32.positive.max) {
+ return [kValue.f32.positive.max, Number.POSITIVE_INFINITY];
+ }
+
+ // f32 finite
+ if (n <= kValue.f32.positive.max && n >= kValue.f32.negative.min) {
+ const n_32 = quantizeToF32(n);
+ if (n === n_32) {
+ // n is precisely expressible as a f32, so should not be rounded
+ return [n];
+ }
+
+ if (n_32 > n) {
+ // n_32 rounded towards +inf, so is after n
+ const other = nextAfterF32(n_32, 'negative', 'no-flush');
+ return [other, n_32];
+ } else {
+ // n_32 rounded towards -inf, so is before n
+ const other = nextAfterF32(n_32, 'positive', 'no-flush');
+ return [n_32, other];
+ }
+ }
+
+ // OOB, but greater the lower overflow boundary
+ if (n > -(2 ** (kValue.f32.emax + 1))) {
+ return [Number.NEGATIVE_INFINITY, kValue.f32.negative.min];
+ }
+
+ // Less than or equal to the lower overflow boundary
+ return [Number.NEGATIVE_INFINITY];
+}
+
+/**
+ * Calculate the valid roundings when quantizing to 16-bit floats
+ *
+ * TS/JS's number type is internally a f64, so quantization needs to occur when
+ * converting to f16 for WGSL. WGSL does not specify a specific rounding mode,
+ * so if a number is not precisely representable in 16-bits, but in the
+ * range, there are two possible valid quantizations. If it is precisely
+ * representable, there is only one valid quantization. This function calculates
+ * the valid roundings and returns them in an array.
+ *
+ * This function does not consider flushing mode, so subnormals are maintained.
+ * The caller is responsible to flushing before and after as appropriate.
+ *
+ * Out of bounds values need to consider how they interact with the overflow
+ * rules.
+ * * If a value is OOB but not too far out, an implementation may choose to round
+ * to nearest finite value or the correct infinity. This boundary is at
+ * 2^(f16.emax + 1) and -(2^(f16.emax + 1)) respectively.
+ * Values that are at or beyond these limits must be rounded towards the
+ * appropriate infinity.
+ *
+ * @param n number to be quantized
+ * @returns all of the acceptable roundings for quantizing to 16-bits in
+ * ascending order.
+ */
+export function correctlyRoundedF16(n: number): readonly number[] {
+ if (Number.isNaN(n)) {
+ return [n];
+ }
+
+ // Greater than or equal to the upper overflow boundry
+ if (n >= 2 ** (kValue.f16.emax + 1)) {
+ return [Number.POSITIVE_INFINITY];
+ }
+
+ // OOB, but less than the upper overflow boundary
+ if (n > kValue.f16.positive.max) {
+ return [kValue.f16.positive.max, Number.POSITIVE_INFINITY];
+ }
+
+ // f16 finite
+ if (n <= kValue.f16.positive.max && n >= kValue.f16.negative.min) {
+ const n_16 = quantizeToF16(n);
+ if (n === n_16) {
+ // n is precisely expressible as a f16, so should not be rounded
+ return [n];
+ }
+
+ if (n_16 > n) {
+ // n_16 rounded towards +inf, so is after n
+ const other = nextAfterF16(n_16, 'negative', 'no-flush');
+ return [other, n_16];
+ } else {
+ // n_16 rounded towards -inf, so is before n
+ const other = nextAfterF16(n_16, 'positive', 'no-flush');
+ return [n_16, other];
+ }
+ }
+
+ // OOB, but greater the lower overflow boundary
+ if (n > -(2 ** (kValue.f16.emax + 1))) {
+ return [Number.NEGATIVE_INFINITY, kValue.f16.negative.min];
+ }
+
+ // Less than or equal to the lower overflow boundary
+ return [Number.NEGATIVE_INFINITY];
+}
+
+/**
+ * Calculates WGSL frexp
+ *
+ * Splits val into a fraction and an exponent so that
+ * val = fraction * 2 ^ exponent.
+ * The fraction is 0.0 or its magnitude is in the range [0.5, 1.0).
+ *
+ * @param val the float to split
+ * @param trait the float type, f32 or f16 or f64
+ * @returns the results of splitting val
+ */
+export function frexp(val: number, trait: 'f32' | 'f16' | 'f64'): { fract: number; exp: number } {
+ const buffer = new ArrayBuffer(8);
+ const dataView = new DataView(buffer);
+
+ // expBitCount and fractBitCount is the bitwidth of exponent and fractional part of the given FP type.
+ // expBias is the bias constant of exponent of the given FP type.
+ // Biased exponent (unsigned integer, i.e. the exponent part of float) = unbiased exponent (signed integer) + expBias.
+ let expBitCount: number, fractBitCount: number, expBias: number;
+ // To handle the exponent bits of given FP types (f16, f32, and f64), considering the highest 16
+ // bits is enough.
+ // expMaskForHigh16Bits indicates the exponent bitfield in the highest 16 bits of the given FP
+ // type, and targetExpBitsForHigh16Bits is the exponent bits that corresponding to unbiased
+ // exponent -1, i.e. the exponent bits when the FP values is in range [0.5, 1.0).
+ let expMaskForHigh16Bits: number, targetExpBitsForHigh16Bits: number;
+ // Helper function that store the given FP value into buffer as the given FP types
+ let setFloatToBuffer: (v: number) => void;
+ // Helper function that read back FP value from buffer as the given FP types
+ let getFloatFromBuffer: () => number;
+
+ let isFinite: (v: number) => boolean;
+ let isSubnormal: (v: number) => boolean;
+
+ if (trait === 'f32') {
+ // f32 bit pattern: s_eeeeeeee_fffffff_ffffffffffffffff
+ expBitCount = 8;
+ fractBitCount = 23;
+ expBias = 127;
+ // The exponent bitmask for high 16 bits of f32.
+ expMaskForHigh16Bits = 0x7f80;
+ // The target exponent bits is equal to those for f32 0.5 = 0x3f000000.
+ targetExpBitsForHigh16Bits = 0x3f00;
+ isFinite = isFiniteF32;
+ isSubnormal = isSubnormalNumberF32;
+ // Enforce big-endian so that offset 0 is highest byte.
+ setFloatToBuffer = (v: number) => dataView.setFloat32(0, v, false);
+ getFloatFromBuffer = () => dataView.getFloat32(0, false);
+ } else if (trait === 'f16') {
+ // f16 bit pattern: s_eeeee_ffffffffff
+ expBitCount = 5;
+ fractBitCount = 10;
+ expBias = 15;
+ // The exponent bitmask for 16 bits of f16.
+ expMaskForHigh16Bits = 0x7c00;
+ // The target exponent bits is equal to those for f16 0.5 = 0x3800.
+ targetExpBitsForHigh16Bits = 0x3800;
+ isFinite = isFiniteF16;
+ isSubnormal = isSubnormalNumberF16;
+ // Enforce big-endian so that offset 0 is highest byte.
+ setFloatToBuffer = (v: number) => setFloat16(dataView, 0, v, false);
+ getFloatFromBuffer = () => getFloat16(dataView, 0, false);
+ } else {
+ assert(trait === 'f64');
+ // f64 bit pattern: s_eeeeeeeeeee_ffff_ffffffffffffffffffffffffffffffffffffffffffffffff
+ expBitCount = 11;
+ fractBitCount = 52;
+ expBias = 1023;
+ // The exponent bitmask for 16 bits of f64.
+ expMaskForHigh16Bits = 0x7ff0;
+ // The target exponent bits is equal to those for f64 0.5 = 0x3fe0_0000_0000_0000.
+ targetExpBitsForHigh16Bits = 0x3fe0;
+ isFinite = Number.isFinite;
+ isSubnormal = isSubnormalNumberF64;
+ // Enforce big-endian so that offset 0 is highest byte.
+ setFloatToBuffer = (v: number) => dataView.setFloat64(0, v, false);
+ getFloatFromBuffer = () => dataView.getFloat64(0, false);
+ }
+ // Helper function that extract the unbiased exponent of the float in buffer.
+ const extractUnbiasedExpFromNormalFloatInBuffer = () => {
+ // Assert the float in buffer is finite normal float.
+ assert(isFinite(getFloatFromBuffer()) && !isSubnormal(getFloatFromBuffer()));
+ // Get the highest 16 bits of float as uint16, which can contain the whole exponent part for both f16, f32, and f64.
+ const high16BitsAsUint16 = dataView.getUint16(0, false);
+ // Return the unbiased exp by masking, shifting and unbiasing.
+ return ((high16BitsAsUint16 & expMaskForHigh16Bits) >> (16 - 1 - expBitCount)) - expBias;
+ };
+ // Helper function that modify the exponent of float in buffer to make it in range [0.5, 1.0).
+ // By setting the unbiased exponent to -1, the fp value will be in range 2**-1 * [1.0, 2.0), i.e. [0.5, 1.0).
+ const modifyExpOfNormalFloatInBuffer = () => {
+ // Assert the float in buffer is finite normal float.
+ assert(isFinite(getFloatFromBuffer()) && !isSubnormal(getFloatFromBuffer()));
+ // Get the highest 16 bits of float as uint16, which contains the whole exponent part for both f16, f32, and f64.
+ const high16BitsAsUint16 = dataView.getUint16(0, false);
+ // Modify the exponent bits.
+ const modifiedHigh16Bits =
+ (high16BitsAsUint16 & ~expMaskForHigh16Bits) | targetExpBitsForHigh16Bits;
+ // Set back to buffer
+ dataView.setUint16(0, modifiedHigh16Bits, false);
+ };
+
+ // +/- 0.0
+ if (val === 0) {
+ return { fract: val, exp: 0 };
+ }
+ // NaN and Inf
+ if (!isFinite(val)) {
+ return { fract: val, exp: 0 };
+ }
+
+ setFloatToBuffer(val);
+ // Don't use val below. Use helper functions working with buffer instead.
+
+ let exp = 0;
+ // Normailze the value if it is subnormal. Increase the exponent by multiplying a subnormal value
+ // with 2**fractBitCount will result in a finite normal FP value of the given FP type.
+ if (isSubnormal(getFloatFromBuffer())) {
+ setFloatToBuffer(getFloatFromBuffer() * 2 ** fractBitCount);
+ exp = -fractBitCount;
+ }
+ // A normal FP value v is represented as v = ((-1)**s)*(2**(unbiased exponent))*f, where f is in
+ // range [1.0, 2.0). By moving a factor 2 from f to exponent, we have
+ // v = ((-1)**s)*(2**(unbiased exponent + 1))*(f / 2), where (f / 2) is in range [0.5, 1.0), so
+ // the exp = (unbiased exponent + 1) and fract = ((-1)**s)*(f / 2) is what we expect to get from
+ // frexp function. Note that fract and v only differs in exponent bitfield as long as v is normal.
+ // Calc the result exp by getting the unbiased float exponent and plus 1.
+ exp += extractUnbiasedExpFromNormalFloatInBuffer() + 1;
+ // Modify the exponent of float in buffer to make it be in range [0.5, 1.0) to get fract.
+ modifyExpOfNormalFloatInBuffer();
+
+ return { fract: getFloatFromBuffer(), exp };
+}
+
+/**
+ * Calculates the linear interpolation between two values of a given fractional.
+ *
+ * If |t| is 0, |a| is returned, if |t| is 1, |b| is returned, otherwise
+ * interpolation/extrapolation equivalent to a + t(b - a) is performed.
+ *
+ * Numerical stable version is adapted from http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0811r2.html
+ */
+export function lerp(a: number, b: number, t: number): number {
+ if (!Number.isFinite(a) || !Number.isFinite(b)) {
+ return Number.NaN;
+ }
+
+ if ((a <= 0.0 && b >= 0.0) || (a >= 0.0 && b <= 0.0)) {
+ return t * b + (1 - t) * a;
+ }
+
+ if (t === 1.0) {
+ return b;
+ }
+
+ const x = a + t * (b - a);
+ return t > 1.0 === b > a ? Math.max(b, x) : Math.min(b, x);
+}
+
+/**
+ * Version of lerp that operates on bigint values
+ *
+ * lerp was not made into a generic or to take in (number|bigint), because that
+ * introduces a bunch of complexity overhead related to type differentiation
+ */
+export function lerpBigInt(a: bigint, b: bigint, idx: number, steps: number): bigint {
+ assert(Math.trunc(idx) === idx);
+ assert(Math.trunc(steps) === steps);
+
+ // This constrains t to [0.0, 1.0]
+ assert(idx >= 0);
+ assert(steps > 0);
+ assert(idx < steps);
+
+ if (steps === 1) {
+ return a;
+ }
+ if (idx === 0) {
+ return a;
+ }
+ if (idx === steps - 1) {
+ return b;
+ }
+
+ const min = (x: bigint, y: bigint): bigint => {
+ return x < y ? x : y;
+ };
+ const max = (x: bigint, y: bigint): bigint => {
+ return x > y ? x : y;
+ };
+
+ // For number the variable t is used, there t = idx / (steps - 1),
+ // but that is a fraction on [0, 1], so becomes either 0 or 1 when converted
+ // to bigint, so need to expand things out.
+ const big_idx = BigInt(idx);
+ const big_steps = BigInt(steps);
+ if ((a <= 0n && b >= 0n) || (a >= 0n && b <= 0n)) {
+ return (b * big_idx) / (big_steps - 1n) + (a - (a * big_idx) / (big_steps - 1n));
+ }
+
+ const x = a + (b * big_idx) / (big_steps - 1n) - (a * big_idx) / (big_steps - 1n);
+ return !(b > a) ? max(b, x) : min(b, x);
+}
+
+/** @returns a linear increasing range of numbers. */
+export function linearRange(a: number, b: number, num_steps: number): readonly number[] {
+ if (num_steps <= 0) {
+ return [];
+ }
+
+ // Avoid division by 0
+ if (num_steps === 1) {
+ return [a];
+ }
+
+ return Array.from(Array(num_steps).keys()).map(i => lerp(a, b, i / (num_steps - 1)));
+}
+
+/**
+ * Version of linearRange that operates on bigint values
+ *
+ * linearRange was not made into a generic or to take in (number|bigint),
+ * because that introduces a bunch of complexity overhead related to type
+ * differentiation
+ */
+export function linearRangeBigInt(a: bigint, b: bigint, num_steps: number): Array<bigint> {
+ if (num_steps <= 0) {
+ return [];
+ }
+
+ // Avoid division by 0
+ if (num_steps === 1) {
+ return [a];
+ }
+
+ return Array.from(Array(num_steps).keys()).map(i => lerpBigInt(a, b, i, num_steps));
+}
+
+/**
+ * @returns a non-linear increasing range of numbers, with a bias towards the beginning.
+ *
+ * Generates a linear range on [0,1] with |num_steps|, then squares all the values to make the curve be quadratic,
+ * thus biasing towards 0, but remaining on the [0, 1] range.
+ * This biased range is then scaled to the desired range using lerp.
+ * Different curves could be generated by changing c, where greater values of c will bias more towards 0.
+ */
+export function biasedRange(a: number, b: number, num_steps: number): readonly number[] {
+ const c = 2;
+ if (num_steps <= 0) {
+ return [];
+ }
+
+ // Avoid division by 0
+ if (num_steps === 1) {
+ return [a];
+ }
+
+ return Array.from(Array(num_steps).keys()).map(i => lerp(a, b, Math.pow(i / (num_steps - 1), c)));
+}
+
+/**
+ * @returns an ascending sorted array of numbers spread over the entire range of 32-bit floats
+ *
+ * Numbers are divided into 4 regions: negative normals, negative subnormals, positive subnormals & positive normals.
+ * Zero is included.
+ *
+ * Numbers are generated via taking a linear spread of the bit field representations of the values in each region. This
+ * means that number of precise f32 values between each returned value in a region should be about the same. This allows
+ * for a wide range of magnitudes to be generated, instead of being extremely biased towards the edges of the f32 range.
+ *
+ * This function is intended to provide dense coverage of the f32 range, for a minimal list of values to use to cover
+ * f32 behaviour, use sparseF32Range instead.
+ *
+ * @param counts structure param with 4 entries indicating the number of entries to be generated each region, entries
+ * must be 0 or greater.
+ */
+export function fullF32Range(
+ counts: {
+ neg_norm?: number;
+ neg_sub?: number;
+ pos_sub: number;
+ pos_norm: number;
+ } = { pos_sub: 10, pos_norm: 50 }
+): Array<number> {
+ counts.neg_norm = counts.neg_norm === undefined ? counts.pos_norm : counts.neg_norm;
+ counts.neg_sub = counts.neg_sub === undefined ? counts.pos_sub : counts.neg_sub;
+
+ // Generating bit fields first and then converting to f32, so that the spread across the possible f32 values is more
+ // even. Generating against the bounds of f32 values directly results in the values being extremely biased towards the
+ // extremes, since they are so much larger.
+ const bit_fields = [
+ ...linearRange(kBit.f32.negative.min, kBit.f32.negative.max, counts.neg_norm),
+ ...linearRange(
+ kBit.f32.negative.subnormal.min,
+ kBit.f32.negative.subnormal.max,
+ counts.neg_sub
+ ),
+ // -0.0
+ 0x80000000,
+ // +0.0
+ 0,
+ ...linearRange(
+ kBit.f32.positive.subnormal.min,
+ kBit.f32.positive.subnormal.max,
+ counts.pos_sub
+ ),
+ ...linearRange(kBit.f32.positive.min, kBit.f32.positive.max, counts.pos_norm),
+ ].map(Math.trunc);
+ return bit_fields.map(reinterpretU32AsF32);
+}
+
+/**
+ * @returns an ascending sorted array of numbers.
+ *
+ * The numbers returned are based on the `full32Range` as described above. The difference comes depending
+ * on the `source` parameter. If the `source` is `const` then the numbers will be restricted to be
+ * in the range `[low, high]`. This allows filtering out a set of `f32` values which are invalid for
+ * const-evaluation but are needed to test the non-const implementation.
+ *
+ * @param source the input source for the test. If the `source` is `const` then the return will be filtered
+ * @param low the lowest f32 value to permit when filtered
+ * @param high the highest f32 value to permit when filtered
+ */
+export function sourceFilteredF32Range(source: String, low: number, high: number): Array<number> {
+ return fullF32Range().filter(x => source !== 'const' || (x >= low && x <= high));
+}
+
+/**
+ * @returns an ascending sorted array of numbers spread over the entire range of 16-bit floats
+ *
+ * Numbers are divided into 4 regions: negative normals, negative subnormals, positive subnormals & positive normals.
+ * Zero is included.
+ *
+ * Numbers are generated via taking a linear spread of the bit field representations of the values in each region. This
+ * means that number of precise f16 values between each returned value in a region should be about the same. This allows
+ * for a wide range of magnitudes to be generated, instead of being extremely biased towards the edges of the f16 range.
+ *
+ * This function is intended to provide dense coverage of the f16 range, for a minimal list of values to use to cover
+ * f16 behaviour, use sparseF16Range instead.
+ *
+ * @param counts structure param with 4 entries indicating the number of entries to be generated each region, entries
+ * must be 0 or greater.
+ */
+export function fullF16Range(
+ counts: {
+ neg_norm?: number;
+ neg_sub?: number;
+ pos_sub: number;
+ pos_norm: number;
+ } = { pos_sub: 10, pos_norm: 50 }
+): Array<number> {
+ counts.neg_norm = counts.neg_norm === undefined ? counts.pos_norm : counts.neg_norm;
+ counts.neg_sub = counts.neg_sub === undefined ? counts.pos_sub : counts.neg_sub;
+
+ // Generating bit fields first and then converting to f16, so that the spread across the possible f16 values is more
+ // even. Generating against the bounds of f16 values directly results in the values being extremely biased towards the
+ // extremes, since they are so much larger.
+ const bit_fields = [
+ ...linearRange(kBit.f16.negative.min, kBit.f16.negative.max, counts.neg_norm),
+ ...linearRange(
+ kBit.f16.negative.subnormal.min,
+ kBit.f16.negative.subnormal.max,
+ counts.neg_sub
+ ),
+ // -0.0
+ 0x8000,
+ // +0.0
+ 0,
+ ...linearRange(
+ kBit.f16.positive.subnormal.min,
+ kBit.f16.positive.subnormal.max,
+ counts.pos_sub
+ ),
+ ...linearRange(kBit.f16.positive.min, kBit.f16.positive.max, counts.pos_norm),
+ ].map(Math.trunc);
+ return bit_fields.map(reinterpretU16AsF16);
+}
+
+/**
+ * @returns an ascending sorted array of numbers spread over the entire range of 64-bit floats
+ *
+ * Numbers are divided into 4 regions: negative normals, negative subnormals, positive subnormals & positive normals.
+ * Zero is included.
+ *
+ * Numbers are generated via taking a linear spread of the bit field representations of the values in each region. This
+ * means that number of precise f64 values between each returned value in a region should be about the same. This allows
+ * for a wide range of magnitudes to be generated, instead of being extremely biased towards the edges of the f64 range.
+ *
+ * This function is intended to provide dense coverage of the f64 range, for a minimal list of values to use to cover
+ * f64 behaviour, use sparseF64Range instead.
+ *
+ * @param counts structure param with 4 entries indicating the number of entries to be generated each region, entries
+ * must be 0 or greater.
+ */
+export function fullF64Range(
+ counts: {
+ neg_norm?: number;
+ neg_sub?: number;
+ pos_sub: number;
+ pos_norm: number;
+ } = { pos_sub: 10, pos_norm: 50 }
+): Array<number> {
+ counts.neg_norm = counts.neg_norm === undefined ? counts.pos_norm : counts.neg_norm;
+ counts.neg_sub = counts.neg_sub === undefined ? counts.pos_sub : counts.neg_sub;
+
+ // Generating bit fields first and then converting to f64, so that the spread across the possible f64 values is more
+ // even. Generating against the bounds of f64 values directly results in the values being extremely biased towards the
+ // extremes, since they are so much larger.
+ const bit_fields = [
+ ...linearRangeBigInt(kBit.f64.negative.min, kBit.f64.negative.max, counts.neg_norm),
+ ...linearRangeBigInt(
+ kBit.f64.negative.subnormal.min,
+ kBit.f64.negative.subnormal.max,
+ counts.neg_sub
+ ),
+ // -0.0
+ 0x8000_0000_0000_0000n,
+ // +0.0
+ 0n,
+ ...linearRangeBigInt(
+ kBit.f64.positive.subnormal.min,
+ kBit.f64.positive.subnormal.max,
+ counts.pos_sub
+ ),
+ ...linearRangeBigInt(kBit.f64.positive.min, kBit.f64.positive.max, counts.pos_norm),
+ ];
+ return bit_fields.map(reinterpretU64AsF64);
+}
+
+/**
+ * @returns an ascending sorted array of f64 values spread over specific range of f64 normal floats
+ *
+ * Numbers are divided into 4 regions: negative 64-bit normals, negative 64-bit subnormals, positive 64-bit subnormals &
+ * positive 64-bit normals.
+ * Zero is included.
+ *
+ * Numbers are generated via taking a linear spread of the bit field representations of the values in each region. This
+ * means that number of precise f64 values between each returned value in a region should be about the same. This allows
+ * for a wide range of magnitudes to be generated, instead of being extremely biased towards the edges of the range.
+ *
+ * @param begin a negative f64 normal float value
+ * @param end a positive f64 normal float value
+ * @param counts structure param with 4 entries indicating the number of entries
+ * to be generated each region, entries must be 0 or greater.
+ */
+export function filteredF64Range(
+ begin: number,
+ end: number,
+ counts: { neg_norm?: number; neg_sub?: number; pos_sub: number; pos_norm: number } = {
+ pos_sub: 10,
+ pos_norm: 50,
+ }
+): Array<number> {
+ assert(
+ begin <= kValue.f64.negative.max,
+ `Beginning of range ${begin} must be negative f64 normal`
+ );
+ assert(end >= kValue.f64.positive.min, `Ending of range ${end} must be positive f64 normal`);
+
+ counts.neg_norm = counts.neg_norm === undefined ? counts.pos_norm : counts.neg_norm;
+ counts.neg_sub = counts.neg_sub === undefined ? counts.pos_sub : counts.neg_sub;
+
+ const u64_begin = reinterpretF64AsU64(begin);
+ const u64_end = reinterpretF64AsU64(end);
+ // Generating bit fields first and then converting to f64, so that the spread across the possible f64 values is more
+ // even. Generating against the bounds of f64 values directly results in the values being extremely biased towards the
+ // extremes, since they are so much larger.
+ const bit_fields = [
+ ...linearRangeBigInt(u64_begin, kBit.f64.negative.max, counts.neg_norm),
+ ...linearRangeBigInt(
+ kBit.f64.negative.subnormal.min,
+ kBit.f64.negative.subnormal.max,
+ counts.neg_sub
+ ),
+ // -0.0
+ 0x8000_0000_0000_0000n,
+ // +0.0
+ 0n,
+ ...linearRangeBigInt(
+ kBit.f64.positive.subnormal.min,
+ kBit.f64.positive.subnormal.max,
+ counts.pos_sub
+ ),
+ ...linearRangeBigInt(kBit.f64.positive.min, u64_end, counts.pos_norm),
+ ];
+ return bit_fields.map(reinterpretU64AsF64);
+}
+
+/** Short list of i32 values of interest to test against */
+const kInterestingI32Values: readonly number[] = [
+ kValue.i32.negative.max,
+ Math.trunc(kValue.i32.negative.max / 2),
+ -256,
+ -10,
+ -1,
+ 0,
+ 1,
+ 10,
+ 256,
+ Math.trunc(kValue.i32.positive.max / 2),
+ kValue.i32.positive.max,
+];
+
+/** @returns minimal i32 values that cover the entire range of i32 behaviours
+ *
+ * This is used instead of fullI32Range when the number of test cases being
+ * generated is a super linear function of the length of i32 values which is
+ * leading to time outs.
+ */
+export function sparseI32Range(): readonly number[] {
+ return kInterestingI32Values;
+}
+
+const kVectorI32Values = {
+ 2: kInterestingI32Values.flatMap(f => [
+ [f, 1],
+ [1, f],
+ [f, -1],
+ [-1, f],
+ ]),
+ 3: kInterestingI32Values.flatMap(f => [
+ [f, 1, 2],
+ [1, f, 2],
+ [1, 2, f],
+ [f, -1, -2],
+ [-1, f, -2],
+ [-1, -2, f],
+ ]),
+ 4: kInterestingI32Values.flatMap(f => [
+ [f, 1, 2, 3],
+ [1, f, 2, 3],
+ [1, 2, f, 3],
+ [1, 2, 3, f],
+ [f, -1, -2, -3],
+ [-1, f, -2, -3],
+ [-1, -2, f, -3],
+ [-1, -2, -3, f],
+ ]),
+};
+
+/**
+ * Returns set of vectors, indexed by dimension containing interesting i32
+ * values.
+ *
+ * The tests do not do the simple option for coverage of computing the cartesian
+ * product of all of the interesting i32 values N times for vecN tests,
+ * because that creates a huge number of tests for vec3 and vec4, leading to
+ * time outs.
+ *
+ * Instead they insert the interesting i32 values into each location of the
+ * vector to get a spread of testing over the entire range. This reduces the
+ * number of cases being run substantially, but maintains coverage.
+ */
+export function vectorI32Range(dim: number): ROArrayArray<number> {
+ assert(dim === 2 || dim === 3 || dim === 4, 'vectorI32Range only accepts dimensions 2, 3, and 4');
+ return kVectorI32Values[dim];
+}
+
+/**
+ * @returns an ascending sorted array of numbers spread over the entire range of 32-bit signed ints
+ *
+ * Numbers are divided into 2 regions: negatives, and positives, with their spreads biased towards 0
+ * Zero is included in range.
+ *
+ * @param counts structure param with 2 entries indicating the number of entries to be generated each region, values must be 0 or greater.
+ */
+export function fullI32Range(
+ counts: {
+ negative?: number;
+ positive: number;
+ } = { positive: 50 }
+): Array<number> {
+ counts.negative = counts.negative === undefined ? counts.positive : counts.negative;
+ return [
+ ...biasedRange(kValue.i32.negative.min, -1, counts.negative),
+ 0,
+ ...biasedRange(1, kValue.i32.positive.max, counts.positive),
+ ].map(Math.trunc);
+}
+
+/** Short list of u32 values of interest to test against */
+const kInterestingU32Values: readonly number[] = [
+ 0,
+ 1,
+ 10,
+ 256,
+ Math.trunc(kValue.u32.max / 2),
+ kValue.u32.max,
+];
+
+/** @returns minimal u32 values that cover the entire range of u32 behaviours
+ *
+ * This is used instead of fullU32Range when the number of test cases being
+ * generated is a super linear function of the length of u32 values which is
+ * leading to time outs.
+ */
+export function sparseU32Range(): readonly number[] {
+ return kInterestingU32Values;
+}
+
+const kVectorU32Values = {
+ 2: kInterestingU32Values.flatMap(f => [
+ [f, 1],
+ [1, f],
+ ]),
+ 3: kInterestingU32Values.flatMap(f => [
+ [f, 1, 2],
+ [1, f, 2],
+ [1, 2, f],
+ ]),
+ 4: kInterestingU32Values.flatMap(f => [
+ [f, 1, 2, 3],
+ [1, f, 2, 3],
+ [1, 2, f, 3],
+ [1, 2, 3, f],
+ ]),
+};
+
+/**
+ * Returns set of vectors, indexed by dimension containing interesting u32
+ * values.
+ *
+ * The tests do not do the simple option for coverage of computing the cartesian
+ * product of all of the interesting u32 values N times for vecN tests,
+ * because that creates a huge number of tests for vec3 and vec4, leading to
+ * time outs.
+ *
+ * Instead they insert the interesting u32 values into each location of the
+ * vector to get a spread of testing over the entire range. This reduces the
+ * number of cases being run substantially, but maintains coverage.
+ */
+export function vectorU32Range(dim: number): ROArrayArray<number> {
+ assert(dim === 2 || dim === 3 || dim === 4, 'vectorU32Range only accepts dimensions 2, 3, and 4');
+ return kVectorU32Values[dim];
+}
+
+/**
+ * @returns an ascending sorted array of numbers spread over the entire range of 32-bit unsigned ints
+ *
+ * Numbers are biased towards 0, and 0 is included in the range.
+ *
+ * @param count number of entries to include in the range, in addition to 0, must be greater than 0, defaults to 50
+ */
+export function fullU32Range(count: number = 50): Array<number> {
+ return [0, ...biasedRange(1, kValue.u32.max, count)].map(Math.trunc);
+}
+
+/** Short list of f32 values of interest to test against */
+const kInterestingF32Values: readonly number[] = [
+ kValue.f32.negative.min,
+ -10.0,
+ -1.0,
+ -0.125,
+ kValue.f32.negative.max,
+ kValue.f32.negative.subnormal.min,
+ kValue.f32.negative.subnormal.max,
+ -0.0,
+ 0.0,
+ kValue.f32.positive.subnormal.min,
+ kValue.f32.positive.subnormal.max,
+ kValue.f32.positive.min,
+ 0.125,
+ 1.0,
+ 10.0,
+ kValue.f32.positive.max,
+];
+
+/** @returns minimal f32 values that cover the entire range of f32 behaviours
+ *
+ * Has specially selected values that cover edge cases, normals, and subnormals.
+ * This is used instead of fullF32Range when the number of test cases being
+ * generated is a super linear function of the length of f32 values which is
+ * leading to time outs.
+ *
+ * These values have been chosen to attempt to test the widest range of f32
+ * behaviours in the lowest number of entries, so may potentially miss function
+ * specific values of interest. If there are known values of interest they
+ * should be appended to this list in the test generation code.
+ */
+export function sparseF32Range(): readonly number[] {
+ return kInterestingF32Values;
+}
+
+const kVectorF32Values = {
+ 2: sparseF32Range().flatMap(f => [
+ [f, 1.0],
+ [1.0, f],
+ [f, -1.0],
+ [-1.0, f],
+ ]),
+ 3: sparseF32Range().flatMap(f => [
+ [f, 1.0, 2.0],
+ [1.0, f, 2.0],
+ [1.0, 2.0, f],
+ [f, -1.0, -2.0],
+ [-1.0, f, -2.0],
+ [-1.0, -2.0, f],
+ ]),
+ 4: sparseF32Range().flatMap(f => [
+ [f, 1.0, 2.0, 3.0],
+ [1.0, f, 2.0, 3.0],
+ [1.0, 2.0, f, 3.0],
+ [1.0, 2.0, 3.0, f],
+ [f, -1.0, -2.0, -3.0],
+ [-1.0, f, -2.0, -3.0],
+ [-1.0, -2.0, f, -3.0],
+ [-1.0, -2.0, -3.0, f],
+ ]),
+};
+
+/**
+ * Returns set of vectors, indexed by dimension containing interesting float
+ * values.
+ *
+ * The tests do not do the simple option for coverage of computing the cartesian
+ * product of all of the interesting float values N times for vecN tests,
+ * because that creates a huge number of tests for vec3 and vec4, leading to
+ * time outs.
+ *
+ * Instead they insert the interesting f32 values into each location of the
+ * vector to get a spread of testing over the entire range. This reduces the
+ * number of cases being run substantially, but maintains coverage.
+ */
+export function vectorF32Range(dim: number): ROArrayArray<number> {
+ assert(dim === 2 || dim === 3 || dim === 4, 'vectorF32Range only accepts dimensions 2, 3, and 4');
+ return kVectorF32Values[dim];
+}
+
+const kSparseVectorF32Values = {
+ 2: sparseF32Range().map((f, idx) => [idx % 2 === 0 ? f : idx, idx % 2 === 1 ? f : -idx]),
+ 3: sparseF32Range().map((f, idx) => [
+ idx % 3 === 0 ? f : idx,
+ idx % 3 === 1 ? f : -idx,
+ idx % 3 === 2 ? f : idx,
+ ]),
+ 4: sparseF32Range().map((f, idx) => [
+ idx % 4 === 0 ? f : idx,
+ idx % 4 === 1 ? f : -idx,
+ idx % 4 === 2 ? f : idx,
+ idx % 4 === 3 ? f : -idx,
+ ]),
+};
+
+/**
+ * Minimal set of vectors, indexed by dimension, that contain interesting float
+ * values.
+ *
+ * This is an even more stripped down version of `vectorF32Range` for when
+ * pairs of vectors are being tested.
+ * All of the interesting floats from sparseF32 are guaranteed to be tested, but
+ * not in every position.
+ */
+export function sparseVectorF32Range(dim: number): ROArrayArray<number> {
+ assert(
+ dim === 2 || dim === 3 || dim === 4,
+ 'sparseVectorF32Range only accepts dimensions 2, 3, and 4'
+ );
+ return kSparseVectorF32Values[dim];
+}
+
+const kSparseMatrixF32Values = {
+ 2: {
+ 2: kInterestingF32Values.map((f, idx) => [
+ [idx % 4 === 0 ? f : idx, idx % 4 === 1 ? f : -idx],
+ [idx % 4 === 2 ? f : -idx, idx % 4 === 3 ? f : idx],
+ ]),
+ 3: kInterestingF32Values.map((f, idx) => [
+ [idx % 6 === 0 ? f : idx, idx % 6 === 1 ? f : -idx, idx % 6 === 2 ? f : idx],
+ [idx % 6 === 3 ? f : -idx, idx % 6 === 4 ? f : idx, idx % 6 === 5 ? f : -idx],
+ ]),
+ 4: kInterestingF32Values.map((f, idx) => [
+ [
+ idx % 8 === 0 ? f : idx,
+ idx % 8 === 1 ? f : -idx,
+ idx % 8 === 2 ? f : idx,
+ idx % 8 === 3 ? f : -idx,
+ ],
+ [
+ idx % 8 === 4 ? f : -idx,
+ idx % 8 === 5 ? f : idx,
+ idx % 8 === 6 ? f : -idx,
+ idx % 8 === 7 ? f : idx,
+ ],
+ ]),
+ },
+ 3: {
+ 2: kInterestingF32Values.map((f, idx) => [
+ [idx % 6 === 0 ? f : idx, idx % 6 === 1 ? f : -idx],
+ [idx % 6 === 2 ? f : -idx, idx % 6 === 3 ? f : idx],
+ [idx % 6 === 4 ? f : idx, idx % 6 === 5 ? f : -idx],
+ ]),
+ 3: kInterestingF32Values.map((f, idx) => [
+ [idx % 9 === 0 ? f : idx, idx % 9 === 1 ? f : -idx, idx % 9 === 2 ? f : idx],
+ [idx % 9 === 3 ? f : -idx, idx % 9 === 4 ? f : idx, idx % 9 === 5 ? f : -idx],
+ [idx % 9 === 6 ? f : idx, idx % 9 === 7 ? f : -idx, idx % 9 === 8 ? f : idx],
+ ]),
+ 4: kInterestingF32Values.map((f, idx) => [
+ [
+ idx % 12 === 0 ? f : idx,
+ idx % 12 === 1 ? f : -idx,
+ idx % 12 === 2 ? f : idx,
+ idx % 12 === 3 ? f : -idx,
+ ],
+ [
+ idx % 12 === 4 ? f : -idx,
+ idx % 12 === 5 ? f : idx,
+ idx % 12 === 6 ? f : -idx,
+ idx % 12 === 7 ? f : idx,
+ ],
+ [
+ idx % 12 === 8 ? f : idx,
+ idx % 12 === 9 ? f : -idx,
+ idx % 12 === 10 ? f : idx,
+ idx % 12 === 11 ? f : -idx,
+ ],
+ ]),
+ },
+ 4: {
+ 2: kInterestingF32Values.map((f, idx) => [
+ [idx % 8 === 0 ? f : idx, idx % 8 === 1 ? f : -idx],
+ [idx % 8 === 2 ? f : -idx, idx % 8 === 3 ? f : idx],
+ [idx % 8 === 4 ? f : idx, idx % 8 === 5 ? f : -idx],
+ [idx % 8 === 6 ? f : -idx, idx % 8 === 7 ? f : idx],
+ ]),
+ 3: kInterestingF32Values.map((f, idx) => [
+ [idx % 12 === 0 ? f : idx, idx % 12 === 1 ? f : -idx, idx % 12 === 2 ? f : idx],
+ [idx % 12 === 3 ? f : -idx, idx % 12 === 4 ? f : idx, idx % 12 === 5 ? f : -idx],
+ [idx % 12 === 6 ? f : idx, idx % 12 === 7 ? f : -idx, idx % 12 === 8 ? f : idx],
+ [idx % 12 === 9 ? f : -idx, idx % 12 === 10 ? f : idx, idx % 12 === 11 ? f : -idx],
+ ]),
+ 4: kInterestingF32Values.map((f, idx) => [
+ [
+ idx % 16 === 0 ? f : idx,
+ idx % 16 === 1 ? f : -idx,
+ idx % 16 === 2 ? f : idx,
+ idx % 16 === 3 ? f : -idx,
+ ],
+ [
+ idx % 16 === 4 ? f : -idx,
+ idx % 16 === 5 ? f : idx,
+ idx % 16 === 6 ? f : -idx,
+ idx % 16 === 7 ? f : idx,
+ ],
+ [
+ idx % 16 === 8 ? f : idx,
+ idx % 16 === 9 ? f : -idx,
+ idx % 16 === 10 ? f : idx,
+ idx % 16 === 11 ? f : -idx,
+ ],
+ [
+ idx % 16 === 12 ? f : -idx,
+ idx % 16 === 13 ? f : idx,
+ idx % 16 === 14 ? f : -idx,
+ idx % 16 === 15 ? f : idx,
+ ],
+ ]),
+ },
+};
+
+/**
+ * Returns a minimal set of matrices, indexed by dimension containing interesting
+ * float values.
+ *
+ * This is the matrix analogue of `sparseVectorF32Range`, so it is producing a
+ * minimal coverage set of matrices that test all of the interesting f32 values.
+ * There is not a more expansive set of matrices, since matrices are even more
+ * expensive than vectors for increasing runtime with coverage.
+ *
+ * All of the interesting floats from sparseF32 are guaranteed to be tested, but
+ * not in every position.
+ */
+export function sparseMatrixF32Range(c: number, r: number): ROArrayArrayArray<number> {
+ assert(
+ c === 2 || c === 3 || c === 4,
+ 'sparseMatrixF32Range only accepts column counts of 2, 3, and 4'
+ );
+ assert(
+ r === 2 || r === 3 || r === 4,
+ 'sparseMatrixF32Range only accepts row counts of 2, 3, and 4'
+ );
+ return kSparseMatrixF32Values[c][r];
+}
+
+/** Short list of f16 values of interest to test against */
+const kInterestingF16Values: readonly number[] = [
+ kValue.f16.negative.min,
+ -10.0,
+ -1.0,
+ -0.125,
+ kValue.f16.negative.max,
+ kValue.f16.negative.subnormal.min,
+ kValue.f16.negative.subnormal.max,
+ -0.0,
+ 0.0,
+ kValue.f16.positive.subnormal.min,
+ kValue.f16.positive.subnormal.max,
+ kValue.f16.positive.min,
+ 0.125,
+ 1.0,
+ 10.0,
+ kValue.f16.positive.max,
+];
+
+/** @returns minimal f16 values that cover the entire range of f16 behaviours
+ *
+ * Has specially selected values that cover edge cases, normals, and subnormals.
+ * This is used instead of fullF16Range when the number of test cases being
+ * generated is a super linear function of the length of f16 values which is
+ * leading to time outs.
+ *
+ * These values have been chosen to attempt to test the widest range of f16
+ * behaviours in the lowest number of entries, so may potentially miss function
+ * specific values of interest. If there are known values of interest they
+ * should be appended to this list in the test generation code.
+ */
+export function sparseF16Range(): readonly number[] {
+ return kInterestingF16Values;
+}
+
+const kVectorF16Values = {
+ 2: sparseF16Range().flatMap(f => [
+ [f, 1.0],
+ [1.0, f],
+ [f, -1.0],
+ [-1.0, f],
+ ]),
+ 3: sparseF16Range().flatMap(f => [
+ [f, 1.0, 2.0],
+ [1.0, f, 2.0],
+ [1.0, 2.0, f],
+ [f, -1.0, -2.0],
+ [-1.0, f, -2.0],
+ [-1.0, -2.0, f],
+ ]),
+ 4: sparseF16Range().flatMap(f => [
+ [f, 1.0, 2.0, 3.0],
+ [1.0, f, 2.0, 3.0],
+ [1.0, 2.0, f, 3.0],
+ [1.0, 2.0, 3.0, f],
+ [f, -1.0, -2.0, -3.0],
+ [-1.0, f, -2.0, -3.0],
+ [-1.0, -2.0, f, -3.0],
+ [-1.0, -2.0, -3.0, f],
+ ]),
+};
+
+/**
+ * Returns set of vectors, indexed by dimension containing interesting f16
+ * values.
+ *
+ * The tests do not do the simple option for coverage of computing the cartesian
+ * product of all of the interesting float values N times for vecN tests,
+ * because that creates a huge number of tests for vec3 and vec4, leading to
+ * time outs.
+ *
+ * Instead they insert the interesting f16 values into each location of the
+ * vector to get a spread of testing over the entire range. This reduces the
+ * number of cases being run substantially, but maintains coverage.
+ */
+export function vectorF16Range(dim: number): ROArrayArray<number> {
+ assert(dim === 2 || dim === 3 || dim === 4, 'vectorF16Range only accepts dimensions 2, 3, and 4');
+ return kVectorF16Values[dim];
+}
+
+const kSparseVectorF16Values = {
+ 2: sparseF16Range().map((f, idx) => [idx % 2 === 0 ? f : idx, idx % 2 === 1 ? f : -idx]),
+ 3: sparseF16Range().map((f, idx) => [
+ idx % 3 === 0 ? f : idx,
+ idx % 3 === 1 ? f : -idx,
+ idx % 3 === 2 ? f : idx,
+ ]),
+ 4: sparseF16Range().map((f, idx) => [
+ idx % 4 === 0 ? f : idx,
+ idx % 4 === 1 ? f : -idx,
+ idx % 4 === 2 ? f : idx,
+ idx % 4 === 3 ? f : -idx,
+ ]),
+};
+
+/**
+ * Minimal set of vectors, indexed by dimension, that contain interesting f16
+ * values.
+ *
+ * This is an even more stripped down version of `vectorF16Range` for when
+ * pairs of vectors are being tested.
+ * All of the interesting floats from sparseF16 are guaranteed to be tested, but
+ * not in every position.
+ */
+export function sparseVectorF16Range(dim: number): ROArrayArray<number> {
+ assert(
+ dim === 2 || dim === 3 || dim === 4,
+ 'sparseVectorF16Range only accepts dimensions 2, 3, and 4'
+ );
+ return kSparseVectorF16Values[dim];
+}
+
+const kSparseMatrixF16Values = {
+ 2: {
+ 2: kInterestingF16Values.map((f, idx) => [
+ [idx % 4 === 0 ? f : idx, idx % 4 === 1 ? f : -idx],
+ [idx % 4 === 2 ? f : -idx, idx % 4 === 3 ? f : idx],
+ ]),
+ 3: kInterestingF16Values.map((f, idx) => [
+ [idx % 6 === 0 ? f : idx, idx % 6 === 1 ? f : -idx, idx % 6 === 2 ? f : idx],
+ [idx % 6 === 3 ? f : -idx, idx % 6 === 4 ? f : idx, idx % 6 === 5 ? f : -idx],
+ ]),
+ 4: kInterestingF16Values.map((f, idx) => [
+ [
+ idx % 8 === 0 ? f : idx,
+ idx % 8 === 1 ? f : -idx,
+ idx % 8 === 2 ? f : idx,
+ idx % 8 === 3 ? f : -idx,
+ ],
+ [
+ idx % 8 === 4 ? f : -idx,
+ idx % 8 === 5 ? f : idx,
+ idx % 8 === 6 ? f : -idx,
+ idx % 8 === 7 ? f : idx,
+ ],
+ ]),
+ },
+ 3: {
+ 2: kInterestingF16Values.map((f, idx) => [
+ [idx % 6 === 0 ? f : idx, idx % 6 === 1 ? f : -idx],
+ [idx % 6 === 2 ? f : -idx, idx % 6 === 3 ? f : idx],
+ [idx % 6 === 4 ? f : idx, idx % 6 === 5 ? f : -idx],
+ ]),
+ 3: kInterestingF16Values.map((f, idx) => [
+ [idx % 9 === 0 ? f : idx, idx % 9 === 1 ? f : -idx, idx % 9 === 2 ? f : idx],
+ [idx % 9 === 3 ? f : -idx, idx % 9 === 4 ? f : idx, idx % 9 === 5 ? f : -idx],
+ [idx % 9 === 6 ? f : idx, idx % 9 === 7 ? f : -idx, idx % 9 === 8 ? f : idx],
+ ]),
+ 4: kInterestingF16Values.map((f, idx) => [
+ [
+ idx % 12 === 0 ? f : idx,
+ idx % 12 === 1 ? f : -idx,
+ idx % 12 === 2 ? f : idx,
+ idx % 12 === 3 ? f : -idx,
+ ],
+ [
+ idx % 12 === 4 ? f : -idx,
+ idx % 12 === 5 ? f : idx,
+ idx % 12 === 6 ? f : -idx,
+ idx % 12 === 7 ? f : idx,
+ ],
+ [
+ idx % 12 === 8 ? f : idx,
+ idx % 12 === 9 ? f : -idx,
+ idx % 12 === 10 ? f : idx,
+ idx % 12 === 11 ? f : -idx,
+ ],
+ ]),
+ },
+ 4: {
+ 2: kInterestingF16Values.map((f, idx) => [
+ [idx % 8 === 0 ? f : idx, idx % 8 === 1 ? f : -idx],
+ [idx % 8 === 2 ? f : -idx, idx % 8 === 3 ? f : idx],
+ [idx % 8 === 4 ? f : idx, idx % 8 === 5 ? f : -idx],
+ [idx % 8 === 6 ? f : -idx, idx % 8 === 7 ? f : idx],
+ ]),
+ 3: kInterestingF16Values.map((f, idx) => [
+ [idx % 12 === 0 ? f : idx, idx % 12 === 1 ? f : -idx, idx % 12 === 2 ? f : idx],
+ [idx % 12 === 3 ? f : -idx, idx % 12 === 4 ? f : idx, idx % 12 === 5 ? f : -idx],
+ [idx % 12 === 6 ? f : idx, idx % 12 === 7 ? f : -idx, idx % 12 === 8 ? f : idx],
+ [idx % 12 === 9 ? f : -idx, idx % 12 === 10 ? f : idx, idx % 12 === 11 ? f : -idx],
+ ]),
+ 4: kInterestingF16Values.map((f, idx) => [
+ [
+ idx % 16 === 0 ? f : idx,
+ idx % 16 === 1 ? f : -idx,
+ idx % 16 === 2 ? f : idx,
+ idx % 16 === 3 ? f : -idx,
+ ],
+ [
+ idx % 16 === 4 ? f : -idx,
+ idx % 16 === 5 ? f : idx,
+ idx % 16 === 6 ? f : -idx,
+ idx % 16 === 7 ? f : idx,
+ ],
+ [
+ idx % 16 === 8 ? f : idx,
+ idx % 16 === 9 ? f : -idx,
+ idx % 16 === 10 ? f : idx,
+ idx % 16 === 11 ? f : -idx,
+ ],
+ [
+ idx % 16 === 12 ? f : -idx,
+ idx % 16 === 13 ? f : idx,
+ idx % 16 === 14 ? f : -idx,
+ idx % 16 === 15 ? f : idx,
+ ],
+ ]),
+ },
+};
+
+/**
+ * Returns a minimal set of matrices, indexed by dimension containing interesting
+ * f16 values.
+ *
+ * This is the matrix analogue of `sparseVectorF16Range`, so it is producing a
+ * minimal coverage set of matrices that test all of the interesting f16 values.
+ * There is not a more expansive set of matrices, since matrices are even more
+ * expensive than vectors for increasing runtime with coverage.
+ *
+ * All of the interesting floats from sparseF16 are guaranteed to be tested, but
+ * not in every position.
+ */
+export function sparseMatrixF16Range(c: number, r: number): ROArrayArray<number>[] {
+ assert(
+ c === 2 || c === 3 || c === 4,
+ 'sparseMatrixF16Range only accepts column counts of 2, 3, and 4'
+ );
+ assert(
+ r === 2 || r === 3 || r === 4,
+ 'sparseMatrixF16Range only accepts row counts of 2, 3, and 4'
+ );
+ return kSparseMatrixF16Values[c][r];
+}
+
+/** Short list of f64 values of interest to test against */
+const kInterestingF64Values: readonly number[] = [
+ kValue.f64.negative.min,
+ -10.0,
+ -1.0,
+ -0.125,
+ kValue.f64.negative.max,
+ kValue.f64.negative.subnormal.min,
+ kValue.f64.negative.subnormal.max,
+ -0.0,
+ 0.0,
+ kValue.f64.positive.subnormal.min,
+ kValue.f64.positive.subnormal.max,
+ kValue.f64.positive.min,
+ 0.125,
+ 1.0,
+ 10.0,
+ kValue.f64.positive.max,
+];
+
+/** @returns minimal F64 values that cover the entire range of F64 behaviours
+ *
+ * Has specially selected values that cover edge cases, normals, and subnormals.
+ * This is used instead of fullF64Range when the number of test cases being
+ * generated is a super linear function of the length of F64 values which is
+ * leading to time outs.
+ *
+ * These values have been chosen to attempt to test the widest range of F64
+ * behaviours in the lowest number of entries, so may potentially miss function
+ * specific values of interest. If there are known values of interest they
+ * should be appended to this list in the test generation code.
+ */
+export function sparseF64Range(): readonly number[] {
+ return kInterestingF64Values;
+}
+
+const kVectorF64Values = {
+ 2: sparseF64Range().flatMap(f => [
+ [f, 1.0],
+ [1.0, f],
+ [f, -1.0],
+ [-1.0, f],
+ ]),
+ 3: sparseF64Range().flatMap(f => [
+ [f, 1.0, 2.0],
+ [1.0, f, 2.0],
+ [1.0, 2.0, f],
+ [f, -1.0, -2.0],
+ [-1.0, f, -2.0],
+ [-1.0, -2.0, f],
+ ]),
+ 4: sparseF64Range().flatMap(f => [
+ [f, 1.0, 2.0, 3.0],
+ [1.0, f, 2.0, 3.0],
+ [1.0, 2.0, f, 3.0],
+ [1.0, 2.0, 3.0, f],
+ [f, -1.0, -2.0, -3.0],
+ [-1.0, f, -2.0, -3.0],
+ [-1.0, -2.0, f, -3.0],
+ [-1.0, -2.0, -3.0, f],
+ ]),
+};
+
+/**
+ * Returns set of vectors, indexed by dimension containing interesting float
+ * values.
+ *
+ * The tests do not do the simple option for coverage of computing the cartesian
+ * product of all of the interesting float values N times for vecN tests,
+ * because that creates a huge number of tests for vec3 and vec4, leading to
+ * time outs.
+ *
+ * Instead they insert the interesting F64 values into each location of the
+ * vector to get a spread of testing over the entire range. This reduces the
+ * number of cases being run substantially, but maintains coverage.
+ */
+export function vectorF64Range(dim: number): ROArrayArray<number> {
+ assert(dim === 2 || dim === 3 || dim === 4, 'vectorF64Range only accepts dimensions 2, 3, and 4');
+ return kVectorF64Values[dim];
+}
+
+const kSparseVectorF64Values = {
+ 2: sparseF64Range().map((f, idx) => [idx % 2 === 0 ? f : idx, idx % 2 === 1 ? f : -idx]),
+ 3: sparseF64Range().map((f, idx) => [
+ idx % 3 === 0 ? f : idx,
+ idx % 3 === 1 ? f : -idx,
+ idx % 3 === 2 ? f : idx,
+ ]),
+ 4: sparseF64Range().map((f, idx) => [
+ idx % 4 === 0 ? f : idx,
+ idx % 4 === 1 ? f : -idx,
+ idx % 4 === 2 ? f : idx,
+ idx % 4 === 3 ? f : -idx,
+ ]),
+};
+
+/**
+ * Minimal set of vectors, indexed by dimension, that contain interesting f64
+ * values.
+ *
+ * This is an even more stripped down version of `vectorF64Range` for when
+ * pairs of vectors are being tested.
+ * All the interesting floats from sparseF64 are guaranteed to be tested, but
+ * not in every position.
+ */
+export function sparseVectorF64Range(dim: number): ROArrayArray<number> {
+ assert(
+ dim === 2 || dim === 3 || dim === 4,
+ 'sparseVectorF64Range only accepts dimensions 2, 3, and 4'
+ );
+ return kSparseVectorF64Values[dim];
+}
+
+const kSparseMatrixF64Values = {
+ 2: {
+ 2: kInterestingF64Values.map((f, idx) => [
+ [idx % 4 === 0 ? f : idx, idx % 4 === 1 ? f : -idx],
+ [idx % 4 === 2 ? f : -idx, idx % 4 === 3 ? f : idx],
+ ]),
+ 3: kInterestingF64Values.map((f, idx) => [
+ [idx % 6 === 0 ? f : idx, idx % 6 === 1 ? f : -idx, idx % 6 === 2 ? f : idx],
+ [idx % 6 === 3 ? f : -idx, idx % 6 === 4 ? f : idx, idx % 6 === 5 ? f : -idx],
+ ]),
+ 4: kInterestingF64Values.map((f, idx) => [
+ [
+ idx % 8 === 0 ? f : idx,
+ idx % 8 === 1 ? f : -idx,
+ idx % 8 === 2 ? f : idx,
+ idx % 8 === 3 ? f : -idx,
+ ],
+ [
+ idx % 8 === 4 ? f : -idx,
+ idx % 8 === 5 ? f : idx,
+ idx % 8 === 6 ? f : -idx,
+ idx % 8 === 7 ? f : idx,
+ ],
+ ]),
+ },
+ 3: {
+ 2: kInterestingF64Values.map((f, idx) => [
+ [idx % 6 === 0 ? f : idx, idx % 6 === 1 ? f : -idx],
+ [idx % 6 === 2 ? f : -idx, idx % 6 === 3 ? f : idx],
+ [idx % 6 === 4 ? f : idx, idx % 6 === 5 ? f : -idx],
+ ]),
+ 3: kInterestingF64Values.map((f, idx) => [
+ [idx % 9 === 0 ? f : idx, idx % 9 === 1 ? f : -idx, idx % 9 === 2 ? f : idx],
+ [idx % 9 === 3 ? f : -idx, idx % 9 === 4 ? f : idx, idx % 9 === 5 ? f : -idx],
+ [idx % 9 === 6 ? f : idx, idx % 9 === 7 ? f : -idx, idx % 9 === 8 ? f : idx],
+ ]),
+ 4: kInterestingF64Values.map((f, idx) => [
+ [
+ idx % 12 === 0 ? f : idx,
+ idx % 12 === 1 ? f : -idx,
+ idx % 12 === 2 ? f : idx,
+ idx % 12 === 3 ? f : -idx,
+ ],
+ [
+ idx % 12 === 4 ? f : -idx,
+ idx % 12 === 5 ? f : idx,
+ idx % 12 === 6 ? f : -idx,
+ idx % 12 === 7 ? f : idx,
+ ],
+ [
+ idx % 12 === 8 ? f : idx,
+ idx % 12 === 9 ? f : -idx,
+ idx % 12 === 10 ? f : idx,
+ idx % 12 === 11 ? f : -idx,
+ ],
+ ]),
+ },
+ 4: {
+ 2: kInterestingF64Values.map((f, idx) => [
+ [idx % 8 === 0 ? f : idx, idx % 8 === 1 ? f : -idx],
+ [idx % 8 === 2 ? f : -idx, idx % 8 === 3 ? f : idx],
+ [idx % 8 === 4 ? f : idx, idx % 8 === 5 ? f : -idx],
+ [idx % 8 === 6 ? f : -idx, idx % 8 === 7 ? f : idx],
+ ]),
+ 3: kInterestingF64Values.map((f, idx) => [
+ [idx % 12 === 0 ? f : idx, idx % 12 === 1 ? f : -idx, idx % 12 === 2 ? f : idx],
+ [idx % 12 === 3 ? f : -idx, idx % 12 === 4 ? f : idx, idx % 12 === 5 ? f : -idx],
+ [idx % 12 === 6 ? f : idx, idx % 12 === 7 ? f : -idx, idx % 12 === 8 ? f : idx],
+ [idx % 12 === 9 ? f : -idx, idx % 12 === 10 ? f : idx, idx % 12 === 11 ? f : -idx],
+ ]),
+ 4: kInterestingF64Values.map((f, idx) => [
+ [
+ idx % 16 === 0 ? f : idx,
+ idx % 16 === 1 ? f : -idx,
+ idx % 16 === 2 ? f : idx,
+ idx % 16 === 3 ? f : -idx,
+ ],
+ [
+ idx % 16 === 4 ? f : -idx,
+ idx % 16 === 5 ? f : idx,
+ idx % 16 === 6 ? f : -idx,
+ idx % 16 === 7 ? f : idx,
+ ],
+ [
+ idx % 16 === 8 ? f : idx,
+ idx % 16 === 9 ? f : -idx,
+ idx % 16 === 10 ? f : idx,
+ idx % 16 === 11 ? f : -idx,
+ ],
+ [
+ idx % 16 === 12 ? f : -idx,
+ idx % 16 === 13 ? f : idx,
+ idx % 16 === 14 ? f : -idx,
+ idx % 16 === 15 ? f : idx,
+ ],
+ ]),
+ },
+};
+
+/**
+ * Returns a minimal set of matrices, indexed by dimension containing interesting
+ * float values.
+ *
+ * This is the matrix analogue of `sparseVectorF64Range`, so it is producing a
+ * minimal coverage set of matrices that test all the interesting f64 values.
+ * There is not a more expansive set of matrices, since matrices are even more
+ * expensive than vectors for increasing runtime with coverage.
+ *
+ * All the interesting floats from sparseF64 are guaranteed to be tested, but
+ * not in every position.
+ */
+export function sparseMatrixF64Range(c: number, r: number): ROArrayArray<number>[] {
+ assert(
+ c === 2 || c === 3 || c === 4,
+ 'sparseMatrixF64Range only accepts column counts of 2, 3, and 4'
+ );
+ assert(
+ r === 2 || r === 3 || r === 4,
+ 'sparseMatrixF64Range only accepts row counts of 2, 3, and 4'
+ );
+ return kSparseMatrixF64Values[c][r];
+}
+
+/**
+ * @returns the result matrix in Array<Array<number>> type.
+ *
+ * Matrix multiplication. A is m x n and B is n x p. Returns
+ * m x p result.
+ */
+// A is m x n. B is n x p. product is m x p.
+export function multiplyMatrices(
+ A: Array<Array<number>>,
+ B: Array<Array<number>>
+): Array<Array<number>> {
+ assert(A.length > 0 && B.length > 0 && B[0].length > 0 && A[0].length === B.length);
+ const product = new Array<Array<number>>(A.length);
+ for (let i = 0; i < product.length; ++i) {
+ product[i] = new Array<number>(B[0].length).fill(0);
+ }
+
+ for (let m = 0; m < A.length; ++m) {
+ for (let p = 0; p < B[0].length; ++p) {
+ for (let n = 0; n < B.length; ++n) {
+ product[m][p] += A[m][n] * B[n][p];
+ }
+ }
+ }
+
+ return product;
+}
+
+/** Sign-extend the `bits`-bit number `n` to a 32-bit signed integer. */
+export function signExtend(n: number, bits: number): number {
+ const shift = 32 - bits;
+ return (n << shift) >> shift;
+}
+
+export interface QuantizeFunc {
+ (num: number): number;
+}
+
+/** @returns the closest 32-bit floating point value to the input */
+export function quantizeToF32(num: number): number {
+ return Math.fround(num);
+}
+
+/** @returns the closest 16-bit floating point value to the input */
+export function quantizeToF16(num: number): number {
+ return hfround(num);
+}
+
+/**
+ * @returns the closest 32-bit signed integer value to the input, rounding
+ * towards 0, if not already an integer
+ */
+export function quantizeToI32(num: number): number {
+ if (num >= kValue.i32.positive.max) {
+ return kValue.i32.positive.max;
+ }
+ if (num <= kValue.i32.negative.min) {
+ return kValue.i32.negative.min;
+ }
+ return Math.trunc(num);
+}
+
+/**
+ * @returns the closest 32-bit unsigned integer value to the input, rounding
+ * towards 0, if not already an integer
+ */
+export function quantizeToU32(num: number): number {
+ if (num >= kValue.u32.max) {
+ return kValue.u32.max;
+ }
+ if (num <= 0) {
+ return 0;
+ }
+ return Math.trunc(num);
+}
+
+/** @returns whether the number is an integer and a power of two */
+export function isPowerOfTwo(n: number): boolean {
+ if (!Number.isInteger(n)) {
+ return false;
+ }
+ return n !== 0 && (n & (n - 1)) === 0;
+}
+
+/** @returns the Greatest Common Divisor (GCD) of the inputs */
+export function gcd(a: number, b: number): number {
+ assert(Number.isInteger(a) && a > 0);
+ assert(Number.isInteger(b) && b > 0);
+
+ while (b !== 0) {
+ const bTemp = b;
+ b = a % b;
+ a = bTemp;
+ }
+
+ return a;
+}
+
+/** @returns the Least Common Multiplier (LCM) of the inputs */
+export function lcm(a: number, b: number): number {
+ return (a * b) / gcd(a, b);
+}
+
+/** @returns the cross of an array with the intermediate result of cartesianProduct
+ *
+ * @param elements array of values to cross with the intermediate result of
+ * cartesianProduct
+ * @param intermediate arrays of values representing the partial result of
+ * cartesianProduct
+ */
+function cartesianProductImpl<T>(
+ elements: readonly T[],
+ intermediate: ROArrayArray<T>
+): ROArrayArray<T> {
+ const result: T[][] = [];
+ elements.forEach((e: T) => {
+ if (intermediate.length > 0) {
+ intermediate.forEach((i: readonly T[]) => {
+ result.push([...i, e]);
+ });
+ } else {
+ result.push([e]);
+ }
+ });
+ return result;
+}
+
+/** @returns the cartesian product (NxMx...) of a set of arrays
+ *
+ * This is implemented by calculating the cross of a single input against an
+ * intermediate result for each input to build up the final array of arrays.
+ *
+ * There are examples of doing this more succinctly using map & reduce online,
+ * but they are a bit more opaque to read.
+ *
+ * @param inputs arrays of numbers to calculate cartesian product over
+ */
+export function cartesianProduct<T>(...inputs: ROArrayArray<T>): ROArrayArray<T> {
+ let result: ROArrayArray<T> = [];
+ inputs.forEach((i: readonly T[]) => {
+ result = cartesianProductImpl<T>(i, result);
+ });
+
+ return result;
+}
+
+/** @returns all of the permutations of an array
+ *
+ * Recursively calculates all of the permutations, does not cull duplicate
+ * entries.
+ *
+ * Only feasible for inputs of lengths 5 or so, since the number of permutations
+ * is (input.length)!, so will cause the stack to explode for longer inputs.
+ *
+ * This code could be made iterative using something like
+ * Steinhaus–Johnson–Trotter and additionally turned into a generator to reduce
+ * the stack size, but there is still a fundamental combinatorial explosion
+ * here that will affect runtime.
+ *
+ * @param input the array to get permutations of
+ */
+export function calculatePermutations<T>(input: readonly T[]): ROArrayArray<T> {
+ if (input.length === 0) {
+ return [];
+ }
+
+ if (input.length === 1) {
+ return [input];
+ }
+
+ if (input.length === 2) {
+ return [input, [input[1], input[0]]];
+ }
+
+ const result: T[][] = [];
+ input.forEach((head, idx) => {
+ const tail = input.slice(0, idx).concat(input.slice(idx + 1));
+ const permutations = calculatePermutations(tail);
+ permutations.forEach(p => {
+ result.push([head, ...p]);
+ });
+ });
+
+ return result;
+}
+
+/**
+ * Convert an Array of Arrays to linear array
+ *
+ * Caller is responsible to retaining the dimensions of the array for later
+ * unflattening
+ *
+ * @param m Matrix to convert
+ */
+export function flatten2DArray<T>(m: ROArrayArray<T>): T[] {
+ const c = m.length;
+ const r = m[0].length;
+ assert(
+ m.every(c => c.length === r),
+ `Unexpectedly received jagged array to flatten`
+ );
+ const result: T[] = Array<T>(c * r);
+ for (let i = 0; i < c; i++) {
+ for (let j = 0; j < r; j++) {
+ result[j + i * r] = m[i][j];
+ }
+ }
+ return result;
+}
+
+/**
+ * Convert linear array to an Array of Arrays
+ * @param n an array to convert
+ * @param c number of elements in the array containing arrays
+ * @param r number of elements in the arrays that are contained
+ */
+export function unflatten2DArray<T>(n: readonly T[], c: number, r: number): ROArrayArray<T> {
+ assert(
+ c > 0 && Number.isInteger(c) && r > 0 && Number.isInteger(r),
+ `columns (${c}) and rows (${r}) need to be positive integers`
+ );
+ assert(n.length === c * r, `m.length(${n.length}) should equal c * r (${c * r})`);
+ const result: T[][] = [...Array(c)].map(_ => [...Array(r)]);
+ for (let i = 0; i < c; i++) {
+ for (let j = 0; j < r; j++) {
+ result[i][j] = n[j + i * r];
+ }
+ }
+ return result;
+}
+
+/**
+ * Performs a .map over a matrix and return the result
+ * The shape of the input and output matrices will be the same
+ *
+ * @param m input matrix of type T
+ * @param op operation that converts an element of type T to one of type S
+ * @returns a matrix with elements of type S that are calculated by applying op element by element
+ */
+export function map2DArray<T, S>(m: ROArrayArray<T>, op: (input: T) => S): ROArrayArray<S> {
+ const c = m.length;
+ const r = m[0].length;
+ assert(
+ m.every(c => c.length === r),
+ `Unexpectedly received jagged array to map`
+ );
+ const result: S[][] = [...Array(c)].map(_ => [...Array(r)]);
+ for (let i = 0; i < c; i++) {
+ for (let j = 0; j < r; j++) {
+ result[i][j] = op(m[i][j]);
+ }
+ }
+ return result;
+}
+
+/**
+ * Performs a .every over a matrix and return the result
+ *
+ * @param m input matrix of type T
+ * @param op operation that performs a test on an element
+ * @returns a boolean indicating if the test passed for every element
+ */
+export function every2DArray<T>(m: ROArrayArray<T>, op: (input: T) => boolean): boolean {
+ const r = m[0].length;
+ assert(
+ m.every(c => c.length === r),
+ `Unexpectedly received jagged array to map`
+ );
+ return m.every(col => col.every(el => op(el)));
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/memory.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/memory.ts
new file mode 100644
index 0000000000..bc5c916495
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/memory.ts
@@ -0,0 +1,25 @@
+/**
+ * Helper to exhaust VRAM until there is less than 64 MB of capacity. Returns
+ * an opaque closure which can be called to free the allocated resources later.
+ */
+export async function exhaustVramUntilUnder64MB(device: GPUDevice) {
+ const allocateUntilOom = async (device: GPUDevice, size: number) => {
+ const buffers = [];
+ for (;;) {
+ device.pushErrorScope('out-of-memory');
+ const buffer = device.createBuffer({ size, usage: GPUBufferUsage.STORAGE });
+ if (await device.popErrorScope()) {
+ return buffers;
+ }
+ buffers.push(buffer);
+ }
+ };
+
+ const kLargeChunkSize = 512 * 1024 * 1024;
+ const kSmallChunkSize = 64 * 1024 * 1024;
+ const buffers = await allocateUntilOom(device, kLargeChunkSize);
+ buffers.push(...(await allocateUntilOom(device, kSmallChunkSize)));
+ return () => {
+ buffers.forEach(buffer => buffer.destroy());
+ };
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/pretty_diff_tables.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/pretty_diff_tables.ts
new file mode 100644
index 0000000000..af98ab7ecf
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/pretty_diff_tables.ts
@@ -0,0 +1,51 @@
+import { range } from '../../common/util/util.js';
+
+/**
+ * Pretty-prints a "table" of cell values (each being `number | string`), right-aligned.
+ * Each row may be any iterator, including lazily-generated (potentially infinite) rows.
+ *
+ * The first argument is the printing options:
+ * - fillToWidth: Keep printing columns (as long as there is data) until this width is passed.
+ * If there is more data, "..." is appended.
+ * - numberToString: if a cell value is a number, this is used to stringify it.
+ *
+ * Each remaining argument provides one row for the table.
+ */
+export function generatePrettyTable(
+ { fillToWidth, numberToString }: { fillToWidth: number; numberToString: (n: number) => string },
+ rows: ReadonlyArray<Iterable<string | number>>
+): string {
+ const rowStrings = range(rows.length, () => '');
+ let totalTableWidth = 0;
+ const iters = rows.map(row => row[Symbol.iterator]());
+
+ // Loop over columns
+ for (;;) {
+ const cellsForColumn = iters.map(iter => {
+ const r = iter.next(); // Advance the iterator for each row, in lock-step.
+ return r.done ? undefined : typeof r.value === 'number' ? numberToString(r.value) : r.value;
+ });
+ if (cellsForColumn.every(cell => cell === undefined)) break;
+
+ // Maximum width of any cell in this column, plus one for space between columns
+ // (also inserts a space at the left of the first column).
+ const colWidth = Math.max(...cellsForColumn.map(c => (c === undefined ? 0 : c.length))) + 1;
+ for (let row = 0; row < rowStrings.length; ++row) {
+ const cell = cellsForColumn[row];
+ if (cell !== undefined) {
+ rowStrings[row] += cell.padStart(colWidth);
+ }
+ }
+
+ totalTableWidth += colWidth;
+ if (totalTableWidth >= fillToWidth) {
+ for (let row = 0; row < rowStrings.length; ++row) {
+ if (cellsForColumn[row] !== undefined) {
+ rowStrings[row] += ' ...';
+ }
+ }
+ break;
+ }
+ }
+ return rowStrings.join('\n');
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/prng.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/prng.ts
new file mode 100644
index 0000000000..641a937081
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/prng.ts
@@ -0,0 +1,125 @@
+import { assert } from '../../common/util/util.js';
+
+import { kValue } from './constants.js';
+
+/**
+ * Seed-able deterministic pseudo random generator for the WebGPU CTS
+ *
+ * This generator requires setting a seed value and the sequence of values
+ * generated is deterministic based on the seed.
+ *
+ * This generator is intended to be a replacement for Math.random().
+ *
+ * This generator is not cryptographically secure, though nothing in the CTS
+ * should be needing cryptographic security.
+ *
+ * The current implementation is based on TinyMT
+ * (https://github.com/MersenneTwister-Lab/TinyMT), which is a version of
+ * Mersenne Twister that has reduced the internal state size at the cost of
+ * shortening the period length of the generated sequence. The period is still
+ * 2^127 - 1 entries long, so should be sufficient for use in the CTS, but it is
+ * less costly to create multiple instances of the class.
+ */
+export class PRNG {
+ // Storing variables for temper() as members, so they don't need to be
+ // reallocated per call to temper()
+ private readonly t_vars: Uint32Array;
+
+ // Storing variables for next() as members, so they don't need to be
+ // reallocated per call to next()
+ private readonly n_vars: Uint32Array;
+
+ // Generator internal state
+ private readonly state: Uint32Array;
+
+ // Default tuning parameters for TinyMT.
+ // These are tested to not generate an all zero initial state.
+ private static readonly kMat1: number = 0x8f7011ee;
+ private static readonly kMat2: number = 0xfc78ff1f;
+ private static readonly kTMat: number = 0x3793fdff;
+
+ // TinyMT algorithm internal magic numbers
+ private static readonly kMask = 0x7fffffff;
+ private static readonly kMinLoop = 8;
+ private static readonly kPreLoop = 8;
+ private static readonly kSH0 = 1;
+ private static readonly kSH1 = 10;
+ private static readonly kSH8 = 8;
+
+ // u32.max + 1, used to scale the u32 value from temper() to [0, 1).
+ private static readonly kRandomDivisor = 4294967296.0;
+
+ /**
+ * constructor
+ *
+ * @param seed value used to initialize random number sequence. Results are
+ * guaranteed to be deterministic based on this.
+ * This value must be in the range of unsigned 32-bit integers.
+ * Non-integers will be rounded.
+ */
+ constructor(seed: number) {
+ assert(seed >= 0 && seed <= kValue.u32.max, 'seed to PRNG needs to a u32');
+
+ this.t_vars = new Uint32Array(2);
+ this.n_vars = new Uint32Array(2);
+
+ this.state = new Uint32Array([Math.round(seed), PRNG.kMat1, PRNG.kMat2, PRNG.kTMat]);
+ for (let i = 1; i < PRNG.kMinLoop; i++) {
+ this.state[i & 3] ^=
+ i + Math.imul(1812433253, this.state[(i - 1) & 3] ^ (this.state[(i - 1) & 3] >>> 30));
+ }
+
+ // Check that the initial state isn't all 0s, since the algorithm assumes
+ // that this never occurs
+ assert(
+ (this.state[0] & PRNG.kMask) !== 0 ||
+ this.state[1] !== 0 ||
+ this.state[2] !== 0 ||
+ this.state[2] !== 0,
+ 'Initialization of PRNG unexpectedly generated all 0s initial state, this means the tuning parameters are bad'
+ );
+
+ for (let i = 0; i < PRNG.kPreLoop; i++) {
+ this.next();
+ }
+ }
+
+ /** Advances the internal state to the next values */
+ private next() {
+ this.n_vars[0] = (this.state[0] & PRNG.kMask) ^ this.state[1] ^ this.state[2];
+ this.n_vars[1] = this.state[3];
+ this.n_vars[0] ^= this.n_vars[0] << PRNG.kSH0;
+ this.n_vars[1] ^= (this.n_vars[1] >>> PRNG.kSH0) ^ this.n_vars[0];
+ this.state[0] = this.state[1];
+ this.state[1] = this.state[2];
+ this.state[2] = this.n_vars[0] ^ (this.n_vars[1] << PRNG.kSH1);
+ this.state[3] = this.n_vars[1];
+ if ((this.n_vars[1] & 1) !== 0) {
+ this.state[1] ^= PRNG.kMat1;
+ this.state[2] ^= PRNG.kMat2;
+ }
+ }
+
+ /** @returns a 32-bit unsigned integer based on the current state */
+ private temper(): number {
+ this.t_vars[0] = this.state[3];
+ this.t_vars[1] = this.state[0] + (this.state[2] >>> PRNG.kSH8);
+ this.t_vars[0] ^= this.t_vars[1];
+ if ((this.t_vars[1] & 1) !== 0) {
+ this.t_vars[0] ^= PRNG.kTMat;
+ }
+ return this.t_vars[0];
+ }
+
+ /** @returns a value on the range of [0, 1) and advances the state */
+ public random(): number {
+ this.next();
+ return this.temper() / PRNG.kRandomDivisor;
+ }
+
+ /** @returns a 32-bit unsigned integer value and advances the state */
+ public randomU32(): number {
+ this.next();
+ return this.temper();
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/reinterpret.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/reinterpret.ts
new file mode 100644
index 0000000000..2ffb24b231
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/reinterpret.ts
@@ -0,0 +1,118 @@
+import { Float16Array } from '../../external/petamoriken/float16/float16.js';
+
+/**
+ * Once-allocated ArrayBuffer/views to avoid overhead of allocation when converting between numeric formats
+ *
+ * workingData* is shared between multiple functions in this file, so to avoid re-entrancy problems, make sure in
+ * functions that use it that they don't call themselves or other functions that use workingData*.
+ */
+const workingData = new ArrayBuffer(8);
+const workingDataU32 = new Uint32Array(workingData);
+const workingDataU16 = new Uint16Array(workingData);
+const workingDataF32 = new Float32Array(workingData);
+const workingDataF16 = new Float16Array(workingData);
+const workingDataI32 = new Int32Array(workingData);
+const workingDataF64 = new Float64Array(workingData);
+const workingDataU64 = new BigUint64Array(workingData);
+
+/**
+ * @returns a 64-bit float value via interpreting the input as the bit
+ * representation as a 64-bit integer
+ */
+export function reinterpretU64AsF64(input: bigint): number {
+ workingDataU64[0] = input;
+ return workingDataF64[0];
+}
+
+/**
+ * @returns the 64-bit integer bit representation of 64-bit float value
+ */
+export function reinterpretF64AsU64(input: number): bigint {
+ workingDataF64[0] = input;
+ return workingDataU64[0];
+}
+
+// Encoding to u32s, instead of BigInt, for serialization
+export function reinterpretF64AsU32s(f64: number): [number, number] {
+ workingDataF64[0] = f64;
+ return [workingDataU32[0], workingDataU32[1]];
+}
+
+// De-encoding from u32s, instead of BigInt, for serialization
+export function reinterpretU32sAsF64(u32s: [number, number]): number {
+ workingDataU32[0] = u32s[0];
+ workingDataU32[1] = u32s[1];
+ return workingDataF64[0];
+}
+
+/**
+ * @returns a number representing the u32 interpretation
+ * of the bits of a number assumed to be an f32 value.
+ */
+export function reinterpretF32AsU32(f32: number): number {
+ workingDataF32[0] = f32;
+ return workingDataU32[0];
+}
+
+/**
+ * @returns a number representing the i32 interpretation
+ * of the bits of a number assumed to be an f32 value.
+ */
+export function reinterpretF32AsI32(f32: number): number {
+ workingDataF32[0] = f32;
+ return workingDataI32[0];
+}
+
+/**
+ * @returns a number representing the f32 interpretation
+ * of the bits of a number assumed to be an u32 value.
+ */
+export function reinterpretU32AsF32(u32: number): number {
+ workingDataU32[0] = u32;
+ return workingDataF32[0];
+}
+
+/**
+ * @returns a number representing the i32 interpretation
+ * of the bits of a number assumed to be an u32 value.
+ */
+export function reinterpretU32AsI32(u32: number): number {
+ workingDataU32[0] = u32;
+ return workingDataI32[0];
+}
+
+/**
+ * @returns a number representing the u32 interpretation
+ * of the bits of a number assumed to be an i32 value.
+ */
+export function reinterpretI32AsU32(i32: number): number {
+ workingDataI32[0] = i32;
+ return workingDataU32[0];
+}
+
+/**
+ * @returns a number representing the f32 interpretation
+ * of the bits of a number assumed to be an i32 value.
+ */
+export function reinterpretI32AsF32(i32: number): number {
+ workingDataI32[0] = i32;
+ return workingDataF32[0];
+}
+
+/**
+ * @returns a number representing the u16 interpretation
+ * of the bits of a number assumed to be an f16 value.
+ */
+export function reinterpretF16AsU16(f16: number): number {
+ workingDataF16[0] = f16;
+ return workingDataU16[0];
+}
+
+/**
+ * @returns a number representing the f16 interpretation
+ * of the bits of a number assumed to be an u16 value.
+ */
+export function reinterpretU16AsF16(u16: number): number {
+ workingDataU16[0] = u16;
+ return workingDataF16[0];
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/shader.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/shader.ts
new file mode 100644
index 0000000000..2a09061527
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/shader.ts
@@ -0,0 +1,196 @@
+import { unreachable } from '../../common/util/util.js';
+
+export const kDefaultVertexShaderCode = `
+@vertex fn main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+}
+`;
+
+export const kDefaultFragmentShaderCode = `
+@fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(1.0, 1.0, 1.0, 1.0);
+}`;
+
+const kPlainTypeInfo = {
+ i32: {
+ suffix: '',
+ fractionDigits: 0,
+ },
+ u32: {
+ suffix: 'u',
+ fractionDigits: 0,
+ },
+ f32: {
+ suffix: '',
+ fractionDigits: 4,
+ },
+};
+
+/**
+ *
+ * @param sampleType sampleType of texture format
+ * @returns plain type compatible of the sampleType
+ */
+export function getPlainTypeInfo(sampleType: GPUTextureSampleType): keyof typeof kPlainTypeInfo {
+ switch (sampleType) {
+ case 'sint':
+ return 'i32';
+ case 'uint':
+ return 'u32';
+ case 'float':
+ case 'unfilterable-float':
+ case 'depth':
+ return 'f32';
+ default:
+ unreachable();
+ }
+}
+
+/**
+ * Build a fragment shader based on output value and types
+ * e.g. write to color target 0 a `vec4<f32>(1.0, 0.0, 1.0, 1.0)` and color target 2 a `vec2<u32>(1, 2)`
+ * ```
+ * outputs: [
+ * {
+ * values: [1, 0, 1, 1],,
+ * plainType: 'f32',
+ * componentCount: 4,
+ * },
+ * null,
+ * {
+ * values: [1, 2],
+ * plainType: 'u32',
+ * componentCount: 2,
+ * },
+ * ]
+ * ```
+ *
+ * return:
+ * ```
+ * struct Outputs {
+ * @location(0) o1 : vec4<f32>,
+ * @location(2) o3 : vec2<u32>,
+ * }
+ * @fragment fn main() -> Outputs {
+ * return Outputs(vec4<f32>(1.0, 0.0, 1.0, 1.0), vec4<u32>(1, 2));
+ * }
+ * ```
+ *
+ * If fragDepth is given there will be an extra @builtin(frag_depth) output with the specified value assigned.
+ *
+ * @param outputs the shader outputs for each location attribute
+ * @param fragDepth the shader outputs frag_depth value (optional)
+ * @returns the fragment shader string
+ */
+export function getFragmentShaderCodeWithOutput(
+ outputs: ({
+ values: readonly number[];
+ plainType: 'i32' | 'u32' | 'f32';
+ componentCount: number;
+ } | null)[],
+ fragDepth: { value: number } | null = null
+): string {
+ if (outputs.length === 0) {
+ if (fragDepth) {
+ return `
+ @fragment fn main() -> @builtin(frag_depth) f32 {
+ return ${fragDepth.value.toFixed(kPlainTypeInfo['f32'].fractionDigits)};
+ }`;
+ }
+ return `
+ @fragment fn main() {
+ }`;
+ }
+
+ const resultStrings = [] as string[];
+ let outputStructString = '';
+
+ if (fragDepth) {
+ resultStrings.push(`${fragDepth.value.toFixed(kPlainTypeInfo['f32'].fractionDigits)}`);
+ outputStructString += `@builtin(frag_depth) depth_out: f32,\n`;
+ }
+
+ for (let i = 0; i < outputs.length; i++) {
+ const o = outputs[i];
+ if (o === null) {
+ continue;
+ }
+
+ const plainType = o.plainType;
+ const { suffix, fractionDigits } = kPlainTypeInfo[plainType];
+
+ let outputType;
+ const v = o.values.map(n => n.toFixed(fractionDigits));
+ switch (o.componentCount) {
+ case 1:
+ outputType = plainType;
+ resultStrings.push(`${v[0]}${suffix}`);
+ break;
+ case 2:
+ outputType = `vec2<${plainType}>`;
+ resultStrings.push(`${outputType}(${v[0]}${suffix}, ${v[1]}${suffix})`);
+ break;
+ case 3:
+ outputType = `vec3<${plainType}>`;
+ resultStrings.push(`${outputType}(${v[0]}${suffix}, ${v[1]}${suffix}, ${v[2]}${suffix})`);
+ break;
+ case 4:
+ outputType = `vec4<${plainType}>`;
+ resultStrings.push(
+ `${outputType}(${v[0]}${suffix}, ${v[1]}${suffix}, ${v[2]}${suffix}, ${v[3]}${suffix})`
+ );
+ break;
+ default:
+ unreachable();
+ }
+
+ outputStructString += `@location(${i}) o${i} : ${outputType},\n`;
+ }
+
+ return `
+ struct Outputs {
+ ${outputStructString}
+ }
+
+ @fragment fn main() -> Outputs {
+ return Outputs(${resultStrings.join(',')});
+ }`;
+}
+
+export type TShaderStage = 'compute' | 'vertex' | 'fragment' | 'empty';
+
+/**
+ * Return a foo shader of the given stage with the given entry point
+ * @param shaderStage
+ * @param entryPoint
+ * @returns the shader string
+ */
+export function getShaderWithEntryPoint(shaderStage: TShaderStage, entryPoint: string): string {
+ let code;
+ switch (shaderStage) {
+ case 'compute': {
+ code = `@compute @workgroup_size(1) fn ${entryPoint}() {}`;
+ break;
+ }
+ case 'vertex': {
+ code = `
+ @vertex fn ${entryPoint}() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }`;
+ break;
+ }
+ case 'fragment': {
+ code = `
+ @fragment fn ${entryPoint}() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`;
+ break;
+ }
+ case 'empty':
+ default: {
+ code = '';
+ break;
+ }
+ }
+ return code;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture.ts
new file mode 100644
index 0000000000..48ff1430b5
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture.ts
@@ -0,0 +1,81 @@
+import { assert } from '../../common/util/util.js';
+
+import { getTextureCopyLayout } from './texture/layout.js';
+import { TexelView } from './texture/texel_view.js';
+import { reifyExtent3D } from './unions.js';
+
+/**
+ * Creates a mipmapped texture where each mipmap level's (`i`) content is
+ * from `texelViews[i]`.
+ */
+export function createTextureFromTexelViews(
+ device: GPUDevice,
+ texelViews: TexelView[],
+ desc: Omit<GPUTextureDescriptor, 'format'>
+): GPUTexture {
+ // All texel views must be the same format for mipmaps.
+ assert(texelViews.length > 0 && texelViews.every(e => e.format === texelViews[0].format));
+ const format = texelViews[0].format;
+ const { width, height, depthOrArrayLayers } = reifyExtent3D(desc.size);
+
+ // Create the texture and then initialize each mipmap level separately.
+ const texture = device.createTexture({
+ ...desc,
+ format: texelViews[0].format,
+ usage: desc.usage | GPUTextureUsage.COPY_DST,
+ mipLevelCount: texelViews.length,
+ });
+
+ // Copy the texel view into each mip level layer.
+ const commandEncoder = device.createCommandEncoder();
+ const stagingBuffers = [];
+ for (let mipLevel = 0; mipLevel < texelViews.length; mipLevel++) {
+ const {
+ bytesPerRow,
+ mipSize: [mipWidth, mipHeight, mipDepthOrArray],
+ } = getTextureCopyLayout(format, desc.dimension ?? '2d', [width, height, depthOrArrayLayers], {
+ mipLevel,
+ });
+
+ // Create a staging buffer to upload the texture mip level contents.
+ const stagingBuffer = device.createBuffer({
+ mappedAtCreation: true,
+ size: bytesPerRow * mipHeight * mipDepthOrArray,
+ usage: GPUBufferUsage.COPY_SRC,
+ });
+ stagingBuffers.push(stagingBuffer);
+
+ // Write the texels into the staging buffer.
+ texelViews[mipLevel].writeTextureData(new Uint8Array(stagingBuffer.getMappedRange()), {
+ bytesPerRow,
+ rowsPerImage: mipHeight,
+ subrectOrigin: [0, 0, 0],
+ subrectSize: [mipWidth, mipHeight, mipDepthOrArray],
+ });
+ stagingBuffer.unmap();
+
+ // Copy from the staging buffer into the texture.
+ commandEncoder.copyBufferToTexture(
+ { buffer: stagingBuffer, bytesPerRow },
+ { texture, mipLevel },
+ [mipWidth, mipHeight, mipDepthOrArray]
+ );
+ }
+ device.queue.submit([commandEncoder.finish()]);
+
+ // Cleanup the staging buffers.
+ stagingBuffers.forEach(value => value.destroy());
+
+ return texture;
+}
+
+/**
+ * Creates a 1 mip level texture with the contents of a TexelView.
+ */
+export function createTextureFromTexelView(
+ device: GPUDevice,
+ texelView: TexelView,
+ desc: Omit<GPUTextureDescriptor, 'format'>
+): GPUTexture {
+ return createTextureFromTexelViews(device, [texelView], desc);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/base.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/base.ts
new file mode 100644
index 0000000000..67b4fc7156
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/base.ts
@@ -0,0 +1,243 @@
+import { assert, unreachable } from '../../../common/util/util.js';
+import { kTextureFormatInfo } from '../../format_info.js';
+import { align } from '../../util/math.js';
+import { reifyExtent3D } from '../../util/unions.js';
+
+/**
+ * Compute the maximum mip level count allowed for a given texture size and texture dimension.
+ */
+export function maxMipLevelCount({
+ size,
+ dimension = '2d',
+}: {
+ readonly size: Readonly<GPUExtent3DDict> | readonly number[];
+ readonly dimension?: GPUTextureDimension;
+}): number {
+ const sizeDict = reifyExtent3D(size);
+
+ let maxMippedDimension = 0;
+ switch (dimension) {
+ case '1d':
+ maxMippedDimension = 1; // No mipmaps allowed.
+ break;
+ case '2d':
+ maxMippedDimension = Math.max(sizeDict.width, sizeDict.height);
+ break;
+ case '3d':
+ maxMippedDimension = Math.max(sizeDict.width, sizeDict.height, sizeDict.depthOrArrayLayers);
+ break;
+ }
+
+ return Math.floor(Math.log2(maxMippedDimension)) + 1;
+}
+
+/**
+ * Compute the "physical size" of a mip level: the size of the level, rounded up to a
+ * multiple of the texel block size.
+ */
+export function physicalMipSize(
+ baseSize: Required<GPUExtent3DDict>,
+ format: GPUTextureFormat,
+ dimension: GPUTextureDimension,
+ level: number
+): Required<GPUExtent3DDict> {
+ switch (dimension) {
+ case '1d':
+ assert(level === 0, '1d textures cannot be mipmapped');
+ assert(baseSize.height === 1 && baseSize.depthOrArrayLayers === 1, '1d texture not Wx1x1');
+ return { width: baseSize.width, height: 1, depthOrArrayLayers: 1 };
+
+ case '2d': {
+ assert(
+ Math.max(baseSize.width, baseSize.height) >> level > 0,
+ () => `level (${level}) too large for base size (${baseSize.width}x${baseSize.height})`
+ );
+
+ const virtualWidthAtLevel = Math.max(baseSize.width >> level, 1);
+ const virtualHeightAtLevel = Math.max(baseSize.height >> level, 1);
+ const physicalWidthAtLevel = align(
+ virtualWidthAtLevel,
+ kTextureFormatInfo[format].blockWidth
+ );
+ const physicalHeightAtLevel = align(
+ virtualHeightAtLevel,
+ kTextureFormatInfo[format].blockHeight
+ );
+ return {
+ width: physicalWidthAtLevel,
+ height: physicalHeightAtLevel,
+ depthOrArrayLayers: baseSize.depthOrArrayLayers,
+ };
+ }
+
+ case '3d': {
+ assert(
+ Math.max(baseSize.width, baseSize.height, baseSize.depthOrArrayLayers) >> level > 0,
+ () =>
+ `level (${level}) too large for base size (${baseSize.width}x${baseSize.height}x${baseSize.depthOrArrayLayers})`
+ );
+ assert(
+ kTextureFormatInfo[format].blockWidth === 1 && kTextureFormatInfo[format].blockHeight === 1,
+ 'not implemented for 3d block formats'
+ );
+ return {
+ width: Math.max(baseSize.width >> level, 1),
+ height: Math.max(baseSize.height >> level, 1),
+ depthOrArrayLayers: Math.max(baseSize.depthOrArrayLayers >> level, 1),
+ };
+ }
+ }
+}
+
+/**
+ * Compute the "physical size" of a mip level: the size of the level, rounded up to a
+ * multiple of the texel block size.
+ */
+export function physicalMipSizeFromTexture(
+ texture: GPUTexture,
+ mipLevel: number
+): [number, number, number] {
+ const size = physicalMipSize(texture, texture.format, texture.dimension, mipLevel);
+ return [size.width, size.height, size.depthOrArrayLayers];
+}
+
+/**
+ * Compute the "virtual size" of a mip level of a texture (not accounting for texel block rounding).
+ *
+ * MAINTENANCE_TODO: Change input/output to Required<GPUExtent3DDict> for consistency.
+ */
+export function virtualMipSize(
+ dimension: GPUTextureDimension,
+ size: readonly [number, number, number],
+ mipLevel: number
+): [number, number, number] {
+ const shiftMinOne = (n: number) => Math.max(1, n >> mipLevel);
+ switch (dimension) {
+ case '1d':
+ assert(size[2] === 1);
+ return [shiftMinOne(size[0]), size[1], size[2]];
+ case '2d':
+ return [shiftMinOne(size[0]), shiftMinOne(size[1]), size[2]];
+ case '3d':
+ return [shiftMinOne(size[0]), shiftMinOne(size[1]), shiftMinOne(size[2])];
+ default:
+ unreachable();
+ }
+}
+
+/**
+ * Get texture dimension from view dimension in order to create an compatible texture for a given
+ * view dimension.
+ */
+export function getTextureDimensionFromView(viewDimension: GPUTextureViewDimension) {
+ switch (viewDimension) {
+ case '1d':
+ return '1d';
+ case '2d':
+ case '2d-array':
+ case 'cube':
+ case 'cube-array':
+ return '2d';
+ case '3d':
+ return '3d';
+ default:
+ unreachable();
+ }
+}
+
+/** Returns the possible valid view dimensions for a given texture dimension. */
+export function viewDimensionsForTextureDimension(textureDimension: GPUTextureDimension) {
+ switch (textureDimension) {
+ case '1d':
+ return ['1d'] as const;
+ case '2d':
+ return ['2d', '2d-array', 'cube', 'cube-array'] as const;
+ case '3d':
+ return ['3d'] as const;
+ }
+}
+
+/** Returns the default view dimension for a given texture descriptor. */
+export function defaultViewDimensionsForTexture(textureDescriptor: Readonly<GPUTextureDescriptor>) {
+ switch (textureDescriptor.dimension) {
+ case '1d':
+ return '1d';
+ case '2d': {
+ const sizeDict = reifyExtent3D(textureDescriptor.size);
+ return sizeDict.depthOrArrayLayers > 1 ? '2d-array' : '2d';
+ }
+ case '3d':
+ return '3d';
+ default:
+ unreachable();
+ }
+}
+
+/** Reifies the optional fields of `GPUTextureDescriptor`.
+ * MAINTENANCE_TODO: viewFormats should not be omitted here, but it seems likely that the
+ * @webgpu/types definition will have to change before we can include it again.
+ */
+export function reifyTextureDescriptor(
+ desc: Readonly<GPUTextureDescriptor>
+): Required<Omit<GPUTextureDescriptor, 'label' | 'viewFormats'>> {
+ return { dimension: '2d' as const, mipLevelCount: 1, sampleCount: 1, ...desc };
+}
+
+/** Reifies the optional fields of `GPUTextureViewDescriptor` (given a `GPUTextureDescriptor`). */
+export function reifyTextureViewDescriptor(
+ textureDescriptor: Readonly<GPUTextureDescriptor>,
+ view: Readonly<GPUTextureViewDescriptor>
+): Required<Omit<GPUTextureViewDescriptor, 'label'>> {
+ const texture = reifyTextureDescriptor(textureDescriptor);
+
+ // IDL defaulting
+
+ const baseMipLevel = view.baseMipLevel ?? 0;
+ const baseArrayLayer = view.baseArrayLayer ?? 0;
+ const aspect = view.aspect ?? 'all';
+
+ // Spec defaulting
+
+ const format = view.format ?? texture.format;
+ const mipLevelCount = view.mipLevelCount ?? texture.mipLevelCount - baseMipLevel;
+ const dimension = view.dimension ?? defaultViewDimensionsForTexture(texture);
+
+ let arrayLayerCount = view.arrayLayerCount;
+ if (arrayLayerCount === undefined) {
+ if (dimension === '2d-array' || dimension === 'cube-array') {
+ arrayLayerCount = reifyExtent3D(texture.size).depthOrArrayLayers - baseArrayLayer;
+ } else if (dimension === 'cube') {
+ arrayLayerCount = 6;
+ } else {
+ arrayLayerCount = 1;
+ }
+ }
+
+ return {
+ format,
+ dimension,
+ aspect,
+ baseMipLevel,
+ mipLevelCount,
+ baseArrayLayer,
+ arrayLayerCount,
+ };
+}
+
+/**
+ * Get generator of all the coordinates in a subrect.
+ * @param subrectOrigin - Subrect origin
+ * @param subrectSize - Subrect size
+ */
+export function* fullSubrectCoordinates(
+ subrectOrigin: Required<GPUOrigin3DDict>,
+ subrectSize: Required<GPUExtent3DDict>
+): Generator<Required<GPUOrigin3DDict>> {
+ for (let z = subrectOrigin.z; z < subrectOrigin.z + subrectSize.depthOrArrayLayers; ++z) {
+ for (let y = subrectOrigin.y; y < subrectOrigin.y + subrectSize.height; ++y) {
+ for (let x = subrectOrigin.x; x < subrectOrigin.x + subrectSize.width; ++x) {
+ yield { x, y, z };
+ }
+ }
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/data_generation.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/data_generation.ts
new file mode 100644
index 0000000000..7ad7d30e08
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/data_generation.ts
@@ -0,0 +1,83 @@
+/**
+ * A helper class that generates ranges of dummy data for buffer or texture operations
+ * efficiently. Tries to minimize allocations and data updates.
+ */
+export class DataArrayGenerator {
+ private dataBuffer = new Uint8Array(256);
+
+ private lastOffset = 0;
+ private lastStart = 0;
+ private lastByteSize = 0;
+
+ /** Find the nearest power of two greater than or equal to the input value. */
+ private nextPowerOfTwo(value: number) {
+ return 1 << (32 - Math.clz32(value - 1));
+ }
+
+ private generateData(byteSize: number, start: number = 0, offset: number = 0) {
+ const prevSize = this.dataBuffer.length;
+
+ if (prevSize < byteSize) {
+ // If the requested data is larger than the allocated buffer, reallocate it to a buffer large
+ // enough to handle the new request.
+ const newData = new Uint8Array(this.nextPowerOfTwo(byteSize));
+
+ if (this.lastOffset === offset && this.lastStart === start && this.lastByteSize) {
+ // Do a fast copy of any previous data that was generated.
+ newData.set(this.dataBuffer);
+ }
+
+ this.dataBuffer = newData;
+ } else if (this.lastOffset < offset) {
+ // Ensure all values up to the offset are zeroed out.
+ this.dataBuffer.fill(0, this.lastOffset, offset);
+ }
+
+ // If the offset or start values have changed, the whole data range needs to be regenerated.
+ if (this.lastOffset !== offset || this.lastStart !== start) {
+ this.lastByteSize = 0;
+ }
+
+ // Generate any new values that are required
+ if (this.lastByteSize < byteSize) {
+ for (let i = this.lastByteSize; i < byteSize - offset; ++i) {
+ this.dataBuffer[i + offset] = ((i ** 3 + i + start) % 251) + 1; // Ensure data is always non-zero
+ }
+
+ this.lastOffset = offset;
+ this.lastStart = start;
+ this.lastByteSize = byteSize;
+ }
+ }
+
+ /**
+ * Returns a new view into the generated data that's the correct length. Because this is a view
+ * previously returned views from the same generator will have their values overwritten as well.
+ * @param {number} byteSize - Number of bytes the returned view should contain.
+ * @param {number} [start] - The value of the first element generated in the view.
+ * @param {number} [offset] - Offset of the generated data within the view. Preceeding values will be 0.
+ * @returns {Uint8Array} A new Uint8Array view into the generated data.
+ */
+ generateView(byteSize: number, start: number = 0, offset: number = 0): Uint8Array {
+ this.generateData(byteSize, start, offset);
+
+ if (this.dataBuffer.length === byteSize) {
+ return this.dataBuffer;
+ }
+ return new Uint8Array(this.dataBuffer.buffer, 0, byteSize);
+ }
+
+ /**
+ * Returns a copy of the generated data. Note that this still changes the underlying buffer, so
+ * any previously generated views will still be overwritten, but the returned copy won't reflect
+ * future generate* calls.
+ * @param {number} byteSize - Number of bytes the returned array should contain.
+ * @param {number} [start] - The value of the first element generated in the view.
+ * @param {number} [offset] - Offset of the generated data within the view. Preceeding values will be 0.
+ * @returns {Uint8Array} A new Uint8Array copy of the generated data.
+ */
+ generateAndCopyView(byteSize: number, start: number = 0, offset: number = 0) {
+ this.generateData(byteSize, start, offset);
+ return this.dataBuffer.slice(0, byteSize);
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/layout.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/layout.ts
new file mode 100644
index 0000000000..8e6b564da6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/layout.ts
@@ -0,0 +1,371 @@
+import { assert, memcpy } from '../../../common/util/util.js';
+import {
+ kTextureFormatInfo,
+ resolvePerAspectFormat,
+ SizedTextureFormat,
+ EncodableTextureFormat,
+} from '../../format_info.js';
+import { align } from '../math.js';
+import { reifyExtent3D } from '../unions.js';
+
+import { physicalMipSize, virtualMipSize } from './base.js';
+
+/** The minimum `bytesPerRow` alignment, per spec. */
+export const kBytesPerRowAlignment = 256;
+/** The minimum buffer copy alignment, per spec. */
+export const kBufferCopyAlignment = 4;
+
+/**
+ * Overridable layout options for {@link getTextureCopyLayout}.
+ */
+export interface LayoutOptions {
+ mipLevel: number;
+ bytesPerRow?: number;
+ rowsPerImage?: number;
+ aspect?: GPUTextureAspect;
+}
+
+const kDefaultLayoutOptions = {
+ mipLevel: 0,
+ bytesPerRow: undefined,
+ rowsPerImage: undefined,
+ aspect: 'all' as const,
+};
+
+/** The info returned by {@link getTextureSubCopyLayout}. */
+export interface TextureSubCopyLayout {
+ bytesPerBlock: number;
+ byteLength: number;
+ /** Number of bytes in each row, not accounting for {@link kBytesPerRowAlignment}. */
+ minBytesPerRow: number;
+ /**
+ * Actual value of bytesPerRow, defaulting to `align(minBytesPerRow, kBytesPerRowAlignment}`
+ * if not overridden.
+ */
+ bytesPerRow: number;
+ /** Actual value of rowsPerImage, defaulting to `mipSize[1]` if not overridden. */
+ rowsPerImage: number;
+}
+
+/** The info returned by {@link getTextureCopyLayout}. */
+export interface TextureCopyLayout extends TextureSubCopyLayout {
+ mipSize: [number, number, number];
+}
+
+/**
+ * Computes layout information for a copy of the whole subresource at `mipLevel` of a GPUTexture
+ * of size `baseSize` with the provided `format` and `dimension`.
+ *
+ * Computes default values for `bytesPerRow` and `rowsPerImage` if not specified.
+ *
+ * MAINTENANCE_TODO: Change input/output to Required<GPUExtent3DDict> for consistency.
+ */
+export function getTextureCopyLayout(
+ format: GPUTextureFormat,
+ dimension: GPUTextureDimension,
+ baseSize: readonly [number, number, number],
+ { mipLevel, bytesPerRow, rowsPerImage, aspect }: LayoutOptions = kDefaultLayoutOptions
+): TextureCopyLayout {
+ const mipSize = physicalMipSize(
+ { width: baseSize[0], height: baseSize[1], depthOrArrayLayers: baseSize[2] },
+ format,
+ dimension,
+ mipLevel
+ );
+
+ const layout = getTextureSubCopyLayout(format, mipSize, { bytesPerRow, rowsPerImage, aspect });
+ return { ...layout, mipSize: [mipSize.width, mipSize.height, mipSize.depthOrArrayLayers] };
+}
+
+/**
+ * Computes layout information for a copy of size `copySize` to/from a GPUTexture with the provided
+ * `format`.
+ *
+ * Computes default values for `bytesPerRow` and `rowsPerImage` if not specified.
+ */
+export function getTextureSubCopyLayout(
+ format: GPUTextureFormat,
+ copySize: GPUExtent3D,
+ {
+ bytesPerRow,
+ rowsPerImage,
+ aspect = 'all' as const,
+ }: {
+ readonly bytesPerRow?: number;
+ readonly rowsPerImage?: number;
+ readonly aspect?: GPUTextureAspect;
+ } = {}
+): TextureSubCopyLayout {
+ format = resolvePerAspectFormat(format, aspect);
+ const { blockWidth, blockHeight, bytesPerBlock } = kTextureFormatInfo[format];
+ assert(bytesPerBlock !== undefined);
+
+ const copySize_ = reifyExtent3D(copySize);
+ assert(
+ copySize_.width > 0 && copySize_.height > 0 && copySize_.depthOrArrayLayers > 0,
+ 'not implemented for empty copySize'
+ );
+ assert(
+ copySize_.width % blockWidth === 0 && copySize_.height % blockHeight === 0,
+ () =>
+ `copySize (${copySize_.width},${copySize_.height}) must be a multiple of the block size (${blockWidth},${blockHeight})`
+ );
+ const copySizeBlocks = {
+ width: copySize_.width / blockWidth,
+ height: copySize_.height / blockHeight,
+ depthOrArrayLayers: copySize_.depthOrArrayLayers,
+ };
+
+ const minBytesPerRow = copySizeBlocks.width * bytesPerBlock;
+ const alignedMinBytesPerRow = align(minBytesPerRow, kBytesPerRowAlignment);
+ if (bytesPerRow !== undefined) {
+ assert(bytesPerRow >= alignedMinBytesPerRow);
+ assert(bytesPerRow % kBytesPerRowAlignment === 0);
+ } else {
+ bytesPerRow = alignedMinBytesPerRow;
+ }
+
+ if (rowsPerImage !== undefined) {
+ assert(rowsPerImage >= copySizeBlocks.height);
+ } else {
+ rowsPerImage = copySizeBlocks.height;
+ }
+
+ const bytesPerSlice = bytesPerRow * rowsPerImage;
+ const sliceSize =
+ bytesPerRow * (copySizeBlocks.height - 1) + bytesPerBlock * copySizeBlocks.width;
+ const byteLength = bytesPerSlice * (copySizeBlocks.depthOrArrayLayers - 1) + sliceSize;
+
+ return {
+ bytesPerBlock,
+ byteLength: align(byteLength, kBufferCopyAlignment),
+ minBytesPerRow,
+ bytesPerRow,
+ rowsPerImage,
+ };
+}
+
+/**
+ * Fill an ArrayBuffer with the linear-memory representation of a solid-color
+ * texture where every texel has the byte value `texelValue`.
+ * Preserves the contents of `outputBuffer` which are in "padding" space between image rows.
+ *
+ * Effectively emulates a copyTextureToBuffer from a solid-color texture to a buffer.
+ */
+export function fillTextureDataWithTexelValue(
+ texelValue: ArrayBuffer,
+ format: EncodableTextureFormat,
+ dimension: GPUTextureDimension,
+ outputBuffer: ArrayBuffer,
+ size: [number, number, number],
+ options: LayoutOptions = kDefaultLayoutOptions
+): void {
+ const { blockWidth, blockHeight, bytesPerBlock } = kTextureFormatInfo[format];
+ // Block formats are not handled correctly below.
+ assert(blockWidth === 1);
+ assert(blockHeight === 1);
+
+ assert(bytesPerBlock === texelValue.byteLength, 'texelValue must be of size bytesPerBlock');
+
+ const { byteLength, rowsPerImage, bytesPerRow } = getTextureCopyLayout(
+ format,
+ dimension,
+ size,
+ options
+ );
+
+ assert(byteLength <= outputBuffer.byteLength);
+
+ const mipSize = virtualMipSize(dimension, size, options.mipLevel);
+
+ const outputTexelValueBytes = new Uint8Array(outputBuffer);
+ for (let slice = 0; slice < mipSize[2]; ++slice) {
+ for (let row = 0; row < mipSize[1]; row += blockHeight) {
+ for (let col = 0; col < mipSize[0]; col += blockWidth) {
+ const byteOffset =
+ slice * rowsPerImage * bytesPerRow + row * bytesPerRow + col * texelValue.byteLength;
+ memcpy({ src: texelValue }, { dst: outputTexelValueBytes, start: byteOffset });
+ }
+ }
+ }
+}
+
+/**
+ * Create a `COPY_SRC` GPUBuffer containing the linear-memory representation of a solid-color
+ * texture where every texel has the byte value `texelValue`.
+ */
+export function createTextureUploadBuffer(
+ texelValue: ArrayBuffer,
+ device: GPUDevice,
+ format: EncodableTextureFormat,
+ dimension: GPUTextureDimension,
+ size: [number, number, number],
+ options: LayoutOptions = kDefaultLayoutOptions
+): {
+ buffer: GPUBuffer;
+ bytesPerRow: number;
+ rowsPerImage: number;
+} {
+ const { byteLength, bytesPerRow, rowsPerImage, bytesPerBlock } = getTextureCopyLayout(
+ format,
+ dimension,
+ size,
+ options
+ );
+
+ const buffer = device.createBuffer({
+ mappedAtCreation: true,
+ size: byteLength,
+ usage: GPUBufferUsage.COPY_SRC,
+ });
+ const mapping = buffer.getMappedRange();
+
+ assert(texelValue.byteLength === bytesPerBlock);
+ fillTextureDataWithTexelValue(texelValue, format, dimension, mapping, size, options);
+ buffer.unmap();
+
+ return {
+ buffer,
+ bytesPerRow,
+ rowsPerImage,
+ };
+}
+
+export type ImageCopyType = 'WriteTexture' | 'CopyB2T' | 'CopyT2B';
+export const kImageCopyTypes: readonly ImageCopyType[] = [
+ 'WriteTexture',
+ 'CopyB2T',
+ 'CopyT2B',
+] as const;
+
+/**
+ * Computes `bytesInACompleteRow` (as defined by the WebGPU spec) for image copies (B2T/T2B/writeTexture).
+ */
+export function bytesInACompleteRow(copyWidth: number, format: SizedTextureFormat): number {
+ const info = kTextureFormatInfo[format];
+ assert(copyWidth % info.blockWidth === 0);
+ return (info.bytesPerBlock * copyWidth) / info.blockWidth;
+}
+
+function validateBytesPerRow({
+ bytesPerRow,
+ bytesInLastRow,
+ sizeInBlocks,
+}: {
+ bytesPerRow: number | undefined;
+ bytesInLastRow: number;
+ sizeInBlocks: Required<GPUExtent3DDict>;
+}) {
+ // If specified, layout.bytesPerRow must be greater than or equal to bytesInLastRow.
+ if (bytesPerRow !== undefined && bytesPerRow < bytesInLastRow) {
+ return false;
+ }
+ // If heightInBlocks > 1, layout.bytesPerRow must be specified.
+ // If copyExtent.depthOrArrayLayers > 1, layout.bytesPerRow and layout.rowsPerImage must be specified.
+ if (
+ bytesPerRow === undefined &&
+ (sizeInBlocks.height > 1 || sizeInBlocks.depthOrArrayLayers > 1)
+ ) {
+ return false;
+ }
+ return true;
+}
+
+function validateRowsPerImage({
+ rowsPerImage,
+ sizeInBlocks,
+}: {
+ rowsPerImage: number | undefined;
+ sizeInBlocks: Required<GPUExtent3DDict>;
+}) {
+ // If specified, layout.rowsPerImage must be greater than or equal to heightInBlocks.
+ if (rowsPerImage !== undefined && rowsPerImage < sizeInBlocks.height) {
+ return false;
+ }
+ // If copyExtent.depthOrArrayLayers > 1, layout.bytesPerRow and layout.rowsPerImage must be specified.
+ if (rowsPerImage === undefined && sizeInBlocks.depthOrArrayLayers > 1) {
+ return false;
+ }
+ return true;
+}
+
+interface DataBytesForCopyArgs {
+ layout: GPUImageDataLayout;
+ format: SizedTextureFormat;
+ copySize: Readonly<GPUExtent3DDict> | readonly number[];
+ method: ImageCopyType;
+}
+
+/**
+ * Validate a copy and compute the number of bytes it needs. Throws if the copy is invalid.
+ */
+export function dataBytesForCopyOrFail(args: DataBytesForCopyArgs): number {
+ const { minDataSizeOrOverestimate, copyValid } = dataBytesForCopyOrOverestimate(args);
+ assert(copyValid, 'copy was invalid');
+ return minDataSizeOrOverestimate;
+}
+
+/**
+ * Validate a copy and compute the number of bytes it needs. If the copy is invalid, attempts to
+ * "conservatively guess" (overestimate) the number of bytes that could be needed for a copy, even
+ * if the copy parameters turn out to be invalid. This hopes to avoid "buffer too small" validation
+ * errors when attempting to test other validation errors.
+ */
+export function dataBytesForCopyOrOverestimate({
+ layout,
+ format,
+ copySize: copySize_,
+ method,
+}: DataBytesForCopyArgs): { minDataSizeOrOverestimate: number; copyValid: boolean } {
+ const copyExtent = reifyExtent3D(copySize_);
+
+ const info = kTextureFormatInfo[format];
+ assert(copyExtent.width % info.blockWidth === 0);
+ assert(copyExtent.height % info.blockHeight === 0);
+ const sizeInBlocks = {
+ width: copyExtent.width / info.blockWidth,
+ height: copyExtent.height / info.blockHeight,
+ depthOrArrayLayers: copyExtent.depthOrArrayLayers,
+ } as const;
+ const bytesInLastRow = sizeInBlocks.width * info.bytesPerBlock;
+
+ let valid = true;
+ const offset = layout.offset ?? 0;
+ if (method !== 'WriteTexture') {
+ if (offset % info.bytesPerBlock !== 0) valid = false;
+ if (layout.bytesPerRow && layout.bytesPerRow % 256 !== 0) valid = false;
+ }
+
+ let requiredBytesInCopy = 0;
+ {
+ let { bytesPerRow, rowsPerImage } = layout;
+
+ // If bytesPerRow or rowsPerImage is invalid, guess a value for the sake of various tests that
+ // don't actually care about the exact value.
+ // (In particular for validation tests that want to test invalid bytesPerRow or rowsPerImage but
+ // need to make sure the total buffer size is still big enough.)
+ if (!validateBytesPerRow({ bytesPerRow, bytesInLastRow, sizeInBlocks })) {
+ bytesPerRow = undefined;
+ valid = false;
+ }
+ if (!validateRowsPerImage({ rowsPerImage, sizeInBlocks })) {
+ rowsPerImage = undefined;
+ valid = false;
+ }
+ // Pick values for cases when (a) bpr/rpi was invalid or (b) they're validly undefined.
+ bytesPerRow ??= align(info.bytesPerBlock * sizeInBlocks.width, 256);
+ rowsPerImage ??= sizeInBlocks.height;
+
+ if (copyExtent.depthOrArrayLayers > 1) {
+ const bytesPerImage = bytesPerRow * rowsPerImage;
+ const bytesBeforeLastImage = bytesPerImage * (copyExtent.depthOrArrayLayers - 1);
+ requiredBytesInCopy += bytesBeforeLastImage;
+ }
+ if (copyExtent.depthOrArrayLayers > 0) {
+ if (sizeInBlocks.height > 1) requiredBytesInCopy += bytesPerRow * (sizeInBlocks.height - 1);
+ if (sizeInBlocks.height > 0) requiredBytesInCopy += bytesInLastRow;
+ }
+ }
+
+ return { minDataSizeOrOverestimate: offset + requiredBytesInCopy, copyValid: valid };
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/subresource.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/subresource.ts
new file mode 100644
index 0000000000..b8d6e3eb21
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/subresource.ts
@@ -0,0 +1,68 @@
+/** A range of indices expressed as `{ begin, count }`. */
+export interface BeginCountRange {
+ begin: number;
+ count: number;
+}
+
+/* A range of indices, expressed as `{ begin, end }`. */
+export interface BeginEndRange {
+ begin: number;
+ end: number;
+}
+
+function endOfRange(r: BeginEndRange | BeginCountRange): number {
+ return 'count' in r ? r.begin + r.count : r.end;
+}
+
+function* rangeAsIterator(r: BeginEndRange | BeginCountRange): Generator<number> {
+ for (let i = r.begin; i < endOfRange(r); ++i) {
+ yield i;
+ }
+}
+
+/**
+ * Represents a range of subresources of a single-plane texture:
+ * a min/max mip level and min/max array layer.
+ */
+export class SubresourceRange {
+ readonly mipRange: BeginEndRange;
+ readonly layerRange: BeginEndRange;
+
+ constructor(subresources: {
+ mipRange: BeginEndRange | BeginCountRange;
+ layerRange: BeginEndRange | BeginCountRange;
+ }) {
+ this.mipRange = {
+ begin: subresources.mipRange.begin,
+ end: endOfRange(subresources.mipRange),
+ };
+ this.layerRange = {
+ begin: subresources.layerRange.begin,
+ end: endOfRange(subresources.layerRange),
+ };
+ }
+
+ /**
+ * Iterates over the "rectangle" of `{ level, layer }` pairs represented by the range.
+ */
+ *each(): Generator<{ level: number; layer: number }> {
+ for (let level = this.mipRange.begin; level < this.mipRange.end; ++level) {
+ for (let layer = this.layerRange.begin; layer < this.layerRange.end; ++layer) {
+ yield { level, layer };
+ }
+ }
+ }
+
+ /**
+ * Iterates over the mip levels represented by the range, each level including an iterator
+ * over the array layers at that level.
+ */
+ *mipLevels(): Generator<{ level: number; layers: Generator<number> }> {
+ for (let level = this.mipRange.begin; level < this.mipRange.end; ++level) {
+ yield {
+ level,
+ layers: rangeAsIterator(this.layerRange),
+ };
+ }
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_data.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_data.spec.ts
new file mode 100644
index 0000000000..20f075e6f2
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_data.spec.ts
@@ -0,0 +1,334 @@
+export const description = 'Test helpers for texel data produce the expected data in the shader';
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { assert } from '../../../common/util/util.js';
+import {
+ kEncodableTextureFormats,
+ kTextureFormatInfo,
+ EncodableTextureFormat,
+} from '../../format_info.js';
+import { GPUTest } from '../../gpu_test.js';
+
+import {
+ kTexelRepresentationInfo,
+ getSingleDataType,
+ getComponentReadbackTraits,
+} from './texel_data.js';
+
+export const g = makeTestGroup(GPUTest);
+
+function doTest(
+ t: GPUTest & {
+ params: {
+ format: EncodableTextureFormat;
+ componentData: {
+ R?: number;
+ G?: number;
+ B?: number;
+ A?: number;
+ };
+ };
+ }
+) {
+ const { format } = t.params;
+ const componentData = t.params.componentData;
+
+ const rep = kTexelRepresentationInfo[format];
+ const texelData = rep.pack(componentData);
+ const texture = t.device.createTexture({
+ format,
+ size: [1, 1, 1],
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.TEXTURE_BINDING,
+ });
+
+ t.device.queue.writeTexture(
+ { texture },
+ texelData,
+ {
+ bytesPerRow: texelData.byteLength,
+ },
+ [1]
+ );
+
+ const { ReadbackTypedArray, shaderType } = getComponentReadbackTraits(getSingleDataType(format));
+
+ const shader = `
+ @group(0) @binding(0) var tex : texture_2d<${shaderType}>;
+
+ struct Output {
+ ${rep.componentOrder.map(C => `result${C} : ${shaderType},`).join('\n')}
+ };
+ @group(0) @binding(1) var<storage, read_write> output : Output;
+
+ @compute @workgroup_size(1)
+ fn main() {
+ var texel : vec4<${shaderType}> = textureLoad(tex, vec2<i32>(0, 0), 0);
+ ${rep.componentOrder.map(C => `output.result${C} = texel.${C.toLowerCase()};`).join('\n')}
+ return;
+ }`;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({
+ code: shader,
+ }),
+ entryPoint: 'main',
+ },
+ });
+
+ const outputBuffer = t.device.createBuffer({
+ size: rep.componentOrder.length * 4,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: texture.createView(),
+ },
+ {
+ binding: 1,
+ resource: {
+ buffer: outputBuffer,
+ },
+ },
+ ],
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+
+ t.expectGPUBufferValuesEqual(
+ outputBuffer,
+ new ReadbackTypedArray(
+ rep.componentOrder.map(c => {
+ const value = rep.decode(componentData)[c];
+ assert(value !== undefined);
+ return value;
+ })
+ )
+ );
+}
+
+// Make a test parameter by mapping a format and each component to a texel component
+// data value.
+function makeParam(
+ format: EncodableTextureFormat,
+ fn: (bitLength: number, index: number) => number
+) {
+ const rep = kTexelRepresentationInfo[format];
+ return {
+ R: rep.componentInfo.R ? fn(rep.componentInfo.R.bitLength, 0) : undefined,
+ G: rep.componentInfo.G ? fn(rep.componentInfo.G.bitLength, 1) : undefined,
+ B: rep.componentInfo.B ? fn(rep.componentInfo.B.bitLength, 2) : undefined,
+ A: rep.componentInfo.A ? fn(rep.componentInfo.A.bitLength, 3) : undefined,
+ };
+}
+
+g.test('unorm_texel_data_in_shader')
+ .params(u =>
+ u
+ .combine('format', kEncodableTextureFormats)
+ .filter(({ format }) => {
+ const info = kTextureFormatInfo[format];
+ return !!info.color && info.color.copyDst && getSingleDataType(format) === 'unorm';
+ })
+ .beginSubcases()
+ .expand('componentData', ({ format }) => {
+ const max = (bitLength: number) => Math.pow(2, bitLength) - 1;
+ return [
+ // Test extrema
+ makeParam(format, () => 0),
+ makeParam(format, bitLength => max(bitLength)),
+
+ // Test a middle value
+ makeParam(format, bitLength => Math.floor(max(bitLength) / 2)),
+
+ // Test mixed values
+ makeParam(format, (bitLength, i) => {
+ const offset = [0.13, 0.63, 0.42, 0.89];
+ return Math.floor(offset[i] * max(bitLength));
+ }),
+ ];
+ })
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ })
+ .fn(doTest);
+
+g.test('snorm_texel_data_in_shader')
+ .params(u =>
+ u
+ .combine('format', kEncodableTextureFormats)
+ .filter(({ format }) => {
+ const info = kTextureFormatInfo[format];
+ return !!info.color && info.color.copyDst && getSingleDataType(format) === 'snorm';
+ })
+ .beginSubcases()
+ .expand('componentData', ({ format }) => {
+ const max = (bitLength: number) => Math.pow(2, bitLength - 1) - 1;
+ return [
+ // Test extrema
+ makeParam(format, () => 0),
+ makeParam(format, bitLength => max(bitLength)),
+ makeParam(format, bitLength => -max(bitLength)),
+ makeParam(format, bitLength => -max(bitLength) - 1),
+
+ // Test a middle value
+ makeParam(format, bitLength => Math.floor(max(bitLength) / 2)),
+
+ // Test mixed values
+ makeParam(format, (bitLength, i) => {
+ const offset = [0.13, 0.63, 0.42, 0.89];
+ const range = 2 * max(bitLength);
+ return -max(bitLength) + Math.floor(offset[i] * range);
+ }),
+ ];
+ })
+ )
+ .fn(doTest);
+
+g.test('uint_texel_data_in_shader')
+ .params(u =>
+ u
+ .combine('format', kEncodableTextureFormats)
+ .filter(({ format }) => {
+ const info = kTextureFormatInfo[format];
+ return !!info.color && info.color.copyDst && getSingleDataType(format) === 'uint';
+ })
+ .beginSubcases()
+ .expand('componentData', ({ format }) => {
+ const max = (bitLength: number) => Math.pow(2, bitLength) - 1;
+ return [
+ // Test extrema
+ makeParam(format, () => 0),
+ makeParam(format, bitLength => max(bitLength)),
+
+ // Test a middle value
+ makeParam(format, bitLength => Math.floor(max(bitLength) / 2)),
+
+ // Test mixed values
+ makeParam(format, (bitLength, i) => {
+ const offset = [0.13, 0.63, 0.42, 0.89];
+ return Math.floor(offset[i] * max(bitLength));
+ }),
+ ];
+ })
+ )
+ .fn(doTest);
+
+g.test('sint_texel_data_in_shader')
+ .params(u =>
+ u
+ .combine('format', kEncodableTextureFormats)
+ .filter(({ format }) => {
+ const info = kTextureFormatInfo[format];
+ return !!info.color && info.color.copyDst && getSingleDataType(format) === 'sint';
+ })
+ .beginSubcases()
+ .expand('componentData', ({ format }) => {
+ const max = (bitLength: number) => Math.pow(2, bitLength - 1) - 1;
+ return [
+ // Test extrema
+ makeParam(format, () => 0),
+ makeParam(format, bitLength => max(bitLength)),
+ makeParam(format, bitLength => -max(bitLength) - 1),
+
+ // Test a middle value
+ makeParam(format, bitLength => Math.floor(max(bitLength) / 2)),
+
+ // Test mixed values
+ makeParam(format, (bitLength, i) => {
+ const offset = [0.13, 0.63, 0.42, 0.89];
+ const range = 2 * max(bitLength);
+ return -max(bitLength) + Math.floor(offset[i] * range);
+ }),
+ ];
+ })
+ )
+ .fn(doTest);
+
+g.test('float_texel_data_in_shader')
+ .desc(
+ `
+TODO: Test NaN, Infinity, -Infinity [1]`
+ )
+ .params(u =>
+ u
+ .combine('format', kEncodableTextureFormats)
+ .filter(({ format }) => {
+ const info = kTextureFormatInfo[format];
+ return !!info.color && info.color.copyDst && getSingleDataType(format) === 'float';
+ })
+ .beginSubcases()
+ .expand('componentData', ({ format }) => {
+ return [
+ // Test extrema
+ makeParam(format, () => 0),
+
+ // [1]: Test NaN, Infinity, -Infinity
+
+ // Test some values
+ makeParam(format, () => 0.1199951171875),
+ makeParam(format, () => 1.4072265625),
+ makeParam(format, () => 24928),
+ makeParam(format, () => -0.1319580078125),
+ makeParam(format, () => -323.25),
+ makeParam(format, () => -7440),
+
+ // Test mixed values
+ makeParam(format, (bitLength, i) => {
+ return [24896, -0.1319580078125, -323.25, -234.375][i];
+ }),
+ ];
+ })
+ )
+ .fn(doTest);
+
+g.test('ufloat_texel_data_in_shader')
+ .desc(
+ `
+TODO: Test NaN, Infinity [1]`
+ )
+ .params(u =>
+ u
+ .combine('format', kEncodableTextureFormats)
+ .filter(({ format }) => {
+ const info = kTextureFormatInfo[format];
+ return !!info.color && info.color.copyDst && getSingleDataType(format) === 'ufloat';
+ })
+ .beginSubcases()
+ .expand('componentData', ({ format }) => {
+ return [
+ // Test extrema
+ makeParam(format, () => 0),
+
+ // [2]: Test NaN, Infinity
+
+ // Test some values
+ makeParam(format, () => 0.119140625),
+ makeParam(format, () => 1.40625),
+ makeParam(format, () => 24896),
+
+ // Test scattered mixed values
+ makeParam(format, (bitLength, i) => {
+ return [24896, 1.40625, 0.119140625, 0.23095703125][i];
+ }),
+
+ // Test mixed values that are close in magnitude.
+ makeParam(format, (bitLength, i) => {
+ return [0.1337890625, 0.17919921875, 0.119140625, 0.125][i];
+ }),
+ ];
+ })
+ )
+ .fn(doTest);
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_data.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_data.ts
new file mode 100644
index 0000000000..42490d800b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_data.ts
@@ -0,0 +1,980 @@
+import { assert, unreachable } from '../../../common/util/util.js';
+import { UncompressedTextureFormat, EncodableTextureFormat } from '../../format_info.js';
+import {
+ assertInIntegerRange,
+ float32ToFloatBits,
+ float32ToFloat16Bits,
+ floatAsNormalizedInteger,
+ gammaCompress,
+ gammaDecompress,
+ normalizedIntegerAsFloat,
+ packRGB9E5UFloat,
+ floatBitsToNumber,
+ float16BitsToFloat32,
+ floatBitsToNormalULPFromZero,
+ kFloat32Format,
+ kFloat16Format,
+ kUFloat9e5Format,
+ numberToFloat32Bits,
+ float32BitsToNumber,
+ numberToFloatBits,
+ ufloatM9E5BitsToNumber,
+} from '../conversion.js';
+import { clamp, signExtend } from '../math.js';
+
+/** A component of a texture format: R, G, B, A, Depth, or Stencil. */
+export const enum TexelComponent {
+ R = 'R',
+ G = 'G',
+ B = 'B',
+ A = 'A',
+ Depth = 'Depth',
+ Stencil = 'Stencil',
+}
+
+/** Arbitrary data, per component of a texel format. */
+export type PerTexelComponent<T> = { [c in TexelComponent]?: T };
+
+/** How a component is encoded in its bit range of a texel format. */
+export type ComponentDataType = 'uint' | 'sint' | 'unorm' | 'snorm' | 'float' | 'ufloat' | null;
+
+/**
+ * Maps component values to component values
+ * @param {PerTexelComponent<number>} components - The input components.
+ * @returns {PerTexelComponent<number>} The new output components.
+ */
+type ComponentMapFn = (components: PerTexelComponent<number>) => PerTexelComponent<number>;
+
+/**
+ * Packs component values as an ArrayBuffer
+ * @param {PerTexelComponent<number>} components - The input components.
+ * @returns {ArrayBuffer} The packed data.
+ */
+type ComponentPackFn = (components: PerTexelComponent<number>) => ArrayBuffer;
+
+/** Unpacks component values from a Uint8Array */
+type ComponentUnpackFn = (data: Uint8Array) => PerTexelComponent<number>;
+
+/**
+ * Create a PerTexelComponent object filled with the same value for all components.
+ * @param {TexelComponent[]} components - The component names.
+ * @param {T} value - The value to assign to each component.
+ * @returns {PerTexelComponent<T>}
+ */
+function makePerTexelComponent<T>(components: TexelComponent[], value: T): PerTexelComponent<T> {
+ const values: PerTexelComponent<T> = {};
+ for (const c of components) {
+ values[c] = value;
+ }
+ return values;
+}
+
+/**
+ * Create a function which applies clones a `PerTexelComponent<number>` and then applies the
+ * function `fn` to each component of `components`.
+ * @param {(value: number) => number} fn - The mapping function to apply to component values.
+ * @param {TexelComponent[]} components - The component names.
+ * @returns {ComponentMapFn} The map function which clones the input component values, and applies
+ * `fn` to each of component of `components`.
+ */
+function applyEach(fn: (value: number) => number, components: TexelComponent[]): ComponentMapFn {
+ return (values: PerTexelComponent<number>) => {
+ values = Object.assign({}, values);
+ for (const c of components) {
+ assert(values[c] !== undefined);
+ values[c] = fn(values[c]!);
+ }
+ return values;
+ };
+}
+
+/**
+ * A `ComponentMapFn` for encoding sRGB.
+ * @param {PerTexelComponent<number>} components - The input component values.
+ * @returns {TexelComponent<number>} Gamma-compressed copy of `components`.
+ */
+const encodeSRGB: ComponentMapFn = components => {
+ assert(
+ components.R !== undefined && components.G !== undefined && components.B !== undefined,
+ 'sRGB requires all of R, G, and B components'
+ );
+ return applyEach(gammaCompress, kRGB)(components);
+};
+
+/**
+ * A `ComponentMapFn` for decoding sRGB.
+ * @param {PerTexelComponent<number>} components - The input component values.
+ * @returns {TexelComponent<number>} Gamma-decompressed copy of `components`.
+ */
+const decodeSRGB: ComponentMapFn = components => {
+ components = Object.assign({}, components);
+ assert(
+ components.R !== undefined && components.G !== undefined && components.B !== undefined,
+ 'sRGB requires all of R, G, and B components'
+ );
+ return applyEach(gammaDecompress, kRGB)(components);
+};
+
+/**
+ * Makes a `ComponentMapFn` for clamping values to the specified range.
+ */
+export function makeClampToRange(format: EncodableTextureFormat): ComponentMapFn {
+ const repr = kTexelRepresentationInfo[format];
+ assert(repr.numericRange !== null, 'Format has unknown numericRange');
+ return applyEach(x => clamp(x, repr.numericRange!), repr.componentOrder);
+}
+
+// MAINTENANCE_TODO: Look into exposing this map to the test fixture so that it can be GCed at the
+// end of each test group. That would allow for caching of larger buffers (though it's unclear how
+// ofter larger buffers are used by packComponents.)
+const smallComponentDataViews = new Map();
+function getComponentDataView(byteLength: number): DataView {
+ if (byteLength > 32) {
+ const buffer = new ArrayBuffer(byteLength);
+ return new DataView(buffer);
+ }
+ let dataView = smallComponentDataViews.get(byteLength);
+ if (!dataView) {
+ const buffer = new ArrayBuffer(byteLength);
+ dataView = new DataView(buffer);
+ smallComponentDataViews.set(byteLength, dataView);
+ }
+ return dataView;
+}
+
+/**
+ * Helper function to pack components as an ArrayBuffer.
+ * @param {TexelComponent[]} componentOrder - The order of the component data.
+ * @param {PerTexelComponent<number>} components - The input component values.
+ * @param {number | PerTexelComponent<number>} bitLengths - The length in bits of each component.
+ * If a single number, all components are the same length, otherwise this is a dictionary of
+ * per-component bit lengths.
+ * @param {ComponentDataType | PerTexelComponent<ComponentDataType>} componentDataTypes -
+ * The type of the data in `components`. If a single value, all components have the same value.
+ * Otherwise, this is a dictionary of per-component data types.
+ * @returns {ArrayBuffer} The packed component data.
+ */
+function packComponents(
+ componentOrder: TexelComponent[],
+ components: PerTexelComponent<number>,
+ bitLengths: number | PerTexelComponent<number>,
+ componentDataTypes: ComponentDataType | PerTexelComponent<ComponentDataType>
+): ArrayBuffer {
+ let bitLengthMap;
+ let totalBitLength;
+ if (typeof bitLengths === 'number') {
+ bitLengthMap = makePerTexelComponent(componentOrder, bitLengths);
+ totalBitLength = bitLengths * componentOrder.length;
+ } else {
+ bitLengthMap = bitLengths;
+ totalBitLength = Object.entries(bitLengthMap).reduce((acc, [, value]) => {
+ assert(value !== undefined);
+ return acc + value;
+ }, 0);
+ }
+ assert(totalBitLength % 8 === 0);
+
+ const componentDataTypeMap =
+ typeof componentDataTypes === 'string' || componentDataTypes === null
+ ? makePerTexelComponent(componentOrder, componentDataTypes)
+ : componentDataTypes;
+
+ const dataView = getComponentDataView(totalBitLength / 8);
+ let bitOffset = 0;
+ for (const c of componentOrder) {
+ const value = components[c];
+ const type = componentDataTypeMap[c];
+ const bitLength = bitLengthMap[c];
+ assert(value !== undefined);
+ assert(type !== undefined);
+ assert(bitLength !== undefined);
+
+ const byteOffset = Math.floor(bitOffset / 8);
+ const byteLength = Math.ceil(bitLength / 8);
+ switch (type) {
+ case 'uint':
+ case 'unorm':
+ if (byteOffset === bitOffset / 8 && byteLength === bitLength / 8) {
+ switch (byteLength) {
+ case 1:
+ dataView.setUint8(byteOffset, value);
+ break;
+ case 2:
+ dataView.setUint16(byteOffset, value, true);
+ break;
+ case 4:
+ dataView.setUint32(byteOffset, value, true);
+ break;
+ default:
+ unreachable();
+ }
+ } else {
+ // Packed representations are all 32-bit and use Uint as the data type.
+ // ex.) rg10b11float, rgb10a2unorm
+ switch (dataView.byteLength) {
+ case 4: {
+ const currentValue = dataView.getUint32(0, true);
+
+ let mask = 0xffffffff;
+ const bitsToClearRight = bitOffset;
+ const bitsToClearLeft = 32 - (bitLength + bitOffset);
+
+ mask = (mask >>> bitsToClearRight) << bitsToClearRight;
+ mask = (mask << bitsToClearLeft) >>> bitsToClearLeft;
+
+ const newValue = (currentValue & ~mask) | (value << bitOffset);
+
+ dataView.setUint32(0, newValue, true);
+ break;
+ }
+ default:
+ unreachable();
+ }
+ }
+ break;
+ case 'sint':
+ case 'snorm':
+ assert(byteOffset === bitOffset / 8 && byteLength === bitLength / 8);
+ switch (byteLength) {
+ case 1:
+ dataView.setInt8(byteOffset, value);
+ break;
+ case 2:
+ dataView.setInt16(byteOffset, value, true);
+ break;
+ case 4:
+ dataView.setInt32(byteOffset, value, true);
+ break;
+ default:
+ unreachable();
+ }
+ break;
+ case 'float':
+ assert(byteOffset === bitOffset / 8 && byteLength === bitLength / 8);
+ switch (byteLength) {
+ case 4:
+ dataView.setFloat32(byteOffset, value, true);
+ break;
+ default:
+ unreachable();
+ }
+ break;
+ case 'ufloat':
+ case null:
+ unreachable();
+ }
+
+ bitOffset += bitLength;
+ }
+
+ return dataView.buffer;
+}
+
+/**
+ * Unpack substrings of bits from a Uint8Array, e.g. [8,8,8,8] or [9,9,9,5].
+ */
+function unpackComponentsBits(
+ componentOrder: TexelComponent[],
+ byteView: Uint8Array,
+ bitLengths: number | PerTexelComponent<number>
+): PerTexelComponent<number> {
+ const components = makePerTexelComponent(componentOrder, 0);
+
+ let bitLengthMap;
+ let totalBitLength;
+ if (typeof bitLengths === 'number') {
+ let index = 0;
+ // Optimized cases for when the bit lengths are all a well aligned value.
+ switch (bitLengths) {
+ case 8:
+ for (const c of componentOrder) {
+ components[c] = byteView[index++];
+ }
+ return components;
+ case 16: {
+ const shortView = new Uint16Array(byteView.buffer, byteView.byteOffset);
+ for (const c of componentOrder) {
+ components[c] = shortView[index++];
+ }
+ return components;
+ }
+ case 32: {
+ const longView = new Uint32Array(byteView.buffer, byteView.byteOffset);
+ for (const c of componentOrder) {
+ components[c] = longView[index++];
+ }
+ return components;
+ }
+ }
+
+ bitLengthMap = makePerTexelComponent(componentOrder, bitLengths);
+ totalBitLength = bitLengths * componentOrder.length;
+ } else {
+ bitLengthMap = bitLengths;
+ totalBitLength = Object.entries(bitLengthMap).reduce((acc, [, value]) => {
+ assert(value !== undefined);
+ return acc + value;
+ }, 0);
+ }
+
+ assert(totalBitLength % 8 === 0);
+
+ const dataView = new DataView(byteView.buffer, byteView.byteOffset, byteView.byteLength);
+ let bitOffset = 0;
+ for (const c of componentOrder) {
+ const bitLength = bitLengthMap[c];
+ assert(bitLength !== undefined);
+
+ let value: number;
+
+ const byteOffset = Math.floor(bitOffset / 8);
+ const byteLength = Math.ceil(bitLength / 8);
+ if (byteOffset === bitOffset / 8 && byteLength === bitLength / 8) {
+ switch (byteLength) {
+ case 1:
+ value = dataView.getUint8(byteOffset);
+ break;
+ case 2:
+ value = dataView.getUint16(byteOffset, true);
+ break;
+ case 4:
+ value = dataView.getUint32(byteOffset, true);
+ break;
+ default:
+ unreachable();
+ }
+ } else {
+ // Packed representations are all 32-bit and use Uint as the data type.
+ // ex.) rg10b11float, rgb10a2unorm
+ assert(dataView.byteLength === 4);
+ const word = dataView.getUint32(0, true);
+ value = (word >>> bitOffset) & ((1 << bitLength) - 1);
+ }
+
+ bitOffset += bitLength;
+ components[c] = value;
+ }
+
+ return components;
+}
+
+/**
+ * Create an entry in `kTexelRepresentationInfo` for normalized integer texel data with constant
+ * bitlength.
+ * @param {TexelComponent[]} componentOrder - The order of the component data.
+ * @param {number} bitLength - The number of bits in each component.
+ * @param {{signed: boolean; sRGB: boolean}} opt - Boolean flags for `signed` and `sRGB`.
+ */
+function makeNormalizedInfo(
+ componentOrder: TexelComponent[],
+ bitLength: number,
+ opt: { signed: boolean; sRGB: boolean }
+): TexelRepresentationInfo {
+ const encodeNonSRGB = applyEach(
+ (n: number) => floatAsNormalizedInteger(n, bitLength, opt.signed),
+ componentOrder
+ );
+ const decodeNonSRGB = applyEach(
+ (n: number) => normalizedIntegerAsFloat(n, bitLength, opt.signed),
+ componentOrder
+ );
+
+ const numberToBitsNonSRGB = applyEach(
+ n => floatAsNormalizedInteger(n, bitLength, opt.signed),
+ componentOrder
+ );
+ let bitsToNumberNonSRGB: ComponentMapFn;
+ if (opt.signed) {
+ bitsToNumberNonSRGB = applyEach(
+ n => normalizedIntegerAsFloat(signExtend(n, bitLength), bitLength, opt.signed),
+ componentOrder
+ );
+ } else {
+ bitsToNumberNonSRGB = applyEach(
+ n => normalizedIntegerAsFloat(n, bitLength, opt.signed),
+ componentOrder
+ );
+ }
+
+ let encode: ComponentMapFn;
+ let decode: ComponentMapFn;
+ let numberToBits: ComponentMapFn;
+ let bitsToNumber: ComponentMapFn;
+ if (opt.sRGB) {
+ encode = components => encodeNonSRGB(encodeSRGB(components));
+ decode = components => decodeSRGB(decodeNonSRGB(components));
+ numberToBits = components => numberToBitsNonSRGB(encodeSRGB(components));
+ bitsToNumber = components => decodeSRGB(bitsToNumberNonSRGB(components));
+ } else {
+ encode = encodeNonSRGB;
+ decode = decodeNonSRGB;
+ numberToBits = numberToBitsNonSRGB;
+ bitsToNumber = bitsToNumberNonSRGB;
+ }
+
+ let bitsToULPFromZero: ComponentMapFn;
+ if (opt.signed) {
+ const maxValue = (1 << (bitLength - 1)) - 1; // e.g. 127 for snorm8
+ bitsToULPFromZero = applyEach(
+ n => Math.max(-maxValue, signExtend(n, bitLength)),
+ componentOrder
+ );
+ } else {
+ bitsToULPFromZero = components => components;
+ }
+
+ const dataType: ComponentDataType = opt.signed ? 'snorm' : 'unorm';
+ return {
+ componentOrder,
+ componentInfo: makePerTexelComponent(componentOrder, {
+ dataType,
+ bitLength,
+ }),
+ encode,
+ decode,
+ pack: (components: PerTexelComponent<number>) =>
+ packComponents(componentOrder, components, bitLength, dataType),
+ unpackBits: (data: Uint8Array) => unpackComponentsBits(componentOrder, data, bitLength),
+ numberToBits,
+ bitsToNumber,
+ bitsToULPFromZero,
+ numericRange: { min: opt.signed ? -1 : 0, max: 1 },
+ };
+}
+
+/**
+ * Create an entry in `kTexelRepresentationInfo` for integer texel data with constant bitlength.
+ * @param {TexelComponent[]} componentOrder - The order of the component data.
+ * @param {number} bitLength - The number of bits in each component.
+ * @param {{signed: boolean}} opt - Boolean flag for `signed`.
+ */
+function makeIntegerInfo(
+ componentOrder: TexelComponent[],
+ bitLength: number,
+ opt: { signed: boolean }
+): TexelRepresentationInfo {
+ assert(bitLength <= 32);
+ const numericRange = opt.signed
+ ? { min: -(2 ** (bitLength - 1)), max: 2 ** (bitLength - 1) - 1 }
+ : { min: 0, max: 2 ** bitLength - 1 };
+ const maxUnsignedValue = 2 ** bitLength;
+ const encode = applyEach(
+ (n: number) => (assertInIntegerRange(n, bitLength, opt.signed), n),
+ componentOrder
+ );
+ const decode = applyEach(
+ (n: number) => (assertInIntegerRange(n, bitLength, opt.signed), n),
+ componentOrder
+ );
+ const bitsToNumber = applyEach((n: number) => {
+ const decodedN = opt.signed ? (n > numericRange.max ? n - maxUnsignedValue : n) : n;
+ assertInIntegerRange(decodedN, bitLength, opt.signed);
+ return decodedN;
+ }, componentOrder);
+
+ let bitsToULPFromZero: ComponentMapFn;
+ if (opt.signed) {
+ bitsToULPFromZero = applyEach(n => signExtend(n, bitLength), componentOrder);
+ } else {
+ bitsToULPFromZero = components => components;
+ }
+
+ const dataType: ComponentDataType = opt.signed ? 'sint' : 'uint';
+ const bitMask = (1 << bitLength) - 1;
+ return {
+ componentOrder,
+ componentInfo: makePerTexelComponent(componentOrder, {
+ dataType,
+ bitLength,
+ }),
+ encode,
+ decode,
+ pack: (components: PerTexelComponent<number>) =>
+ packComponents(componentOrder, components, bitLength, dataType),
+ unpackBits: (data: Uint8Array) => unpackComponentsBits(componentOrder, data, bitLength),
+ numberToBits: applyEach(v => v & bitMask, componentOrder),
+ bitsToNumber,
+ bitsToULPFromZero,
+ numericRange,
+ };
+}
+
+/**
+ * Create an entry in `kTexelRepresentationInfo` for floating point texel data with constant
+ * bitlength.
+ * @param {TexelComponent[]} componentOrder - The order of the component data.
+ * @param {number} bitLength - The number of bits in each component.
+ */
+function makeFloatInfo(
+ componentOrder: TexelComponent[],
+ bitLength: number,
+ { restrictedDepth = false }: { restrictedDepth?: boolean } = {}
+): TexelRepresentationInfo {
+ let encode: ComponentMapFn;
+ let numberToBits;
+ let bitsToNumber;
+ let bitsToULPFromZero;
+ switch (bitLength) {
+ case 32:
+ if (restrictedDepth) {
+ encode = applyEach(v => {
+ assert(v >= 0.0 && v <= 1.0, 'depth out of range');
+ return new Float32Array([v])[0];
+ }, componentOrder);
+ } else {
+ encode = applyEach(v => new Float32Array([v])[0], componentOrder);
+ }
+ numberToBits = applyEach(numberToFloat32Bits, componentOrder);
+ bitsToNumber = applyEach(float32BitsToNumber, componentOrder);
+ bitsToULPFromZero = applyEach(
+ v => floatBitsToNormalULPFromZero(v, kFloat32Format),
+ componentOrder
+ );
+ break;
+ case 16:
+ if (restrictedDepth) {
+ encode = applyEach(v => {
+ assert(v >= 0.0 && v <= 1.0, 'depth out of range');
+ return float16BitsToFloat32(float32ToFloat16Bits(v));
+ }, componentOrder);
+ } else {
+ encode = applyEach(v => float16BitsToFloat32(float32ToFloat16Bits(v)), componentOrder);
+ }
+ numberToBits = applyEach(float32ToFloat16Bits, componentOrder);
+ bitsToNumber = applyEach(float16BitsToFloat32, componentOrder);
+ bitsToULPFromZero = applyEach(
+ v => floatBitsToNormalULPFromZero(v, kFloat16Format),
+ componentOrder
+ );
+ break;
+ default:
+ unreachable();
+ }
+ const decode = applyEach(identity, componentOrder);
+
+ return {
+ componentOrder,
+ componentInfo: makePerTexelComponent(componentOrder, {
+ dataType: 'float' as const,
+ bitLength,
+ }),
+ encode,
+ decode,
+ pack: (components: PerTexelComponent<number>) => {
+ switch (bitLength) {
+ case 16:
+ components = applyEach(float32ToFloat16Bits, componentOrder)(components);
+ return packComponents(componentOrder, components, 16, 'uint');
+ case 32:
+ return packComponents(componentOrder, components, bitLength, 'float');
+ default:
+ unreachable();
+ }
+ },
+ unpackBits: (data: Uint8Array) => unpackComponentsBits(componentOrder, data, bitLength),
+ numberToBits,
+ bitsToNumber,
+ bitsToULPFromZero,
+ numericRange: restrictedDepth
+ ? { min: 0, max: 1 }
+ : { min: Number.NEGATIVE_INFINITY, max: Number.POSITIVE_INFINITY },
+ };
+}
+
+const kR = [TexelComponent.R];
+const kRG = [TexelComponent.R, TexelComponent.G];
+const kRGB = [TexelComponent.R, TexelComponent.G, TexelComponent.B];
+const kRGBA = [TexelComponent.R, TexelComponent.G, TexelComponent.B, TexelComponent.A];
+const kBGRA = [TexelComponent.B, TexelComponent.G, TexelComponent.R, TexelComponent.A];
+
+const identity = (n: number) => n;
+
+const kFloat11Format = { signed: 0, exponentBits: 5, mantissaBits: 6, bias: 15 } as const;
+const kFloat10Format = { signed: 0, exponentBits: 5, mantissaBits: 5, bias: 15 } as const;
+
+export type TexelRepresentationInfo = {
+ /** Order of components in the packed representation. */
+ readonly componentOrder: TexelComponent[];
+ /** Data type and bit length of each component in the format. */
+ readonly componentInfo: PerTexelComponent<{
+ dataType: ComponentDataType;
+ bitLength: number;
+ }>;
+ /** Encode shader values into their data representation. ex.) float 1.0 -> unorm8 255 */
+ // MAINTENANCE_TODO: Replace with numberToBits?
+ readonly encode: ComponentMapFn;
+ /** Decode the data representation into the shader values. ex.) unorm8 255 -> float 1.0 */
+ // MAINTENANCE_TODO: Replace with bitsToNumber?
+ readonly decode: ComponentMapFn;
+ /** Pack texel component values into an ArrayBuffer. ex.) rg8unorm `{r: 0, g: 255}` -> 0xFF00 */
+ // MAINTENANCE_TODO: Replace with packBits?
+ readonly pack: ComponentPackFn;
+
+ /** Convert integer bit representations into numeric values, e.g. unorm8 255 -> numeric 1.0 */
+ readonly bitsToNumber: ComponentMapFn;
+ /** Convert numeric values into integer bit representations, e.g. numeric 1.0 -> unorm8 255 */
+ readonly numberToBits: ComponentMapFn;
+ /** Unpack integer bit representations from an ArrayBuffer, e.g. 0xFF00 -> rg8unorm [0,255] */
+ readonly unpackBits: ComponentUnpackFn;
+ /** Convert integer bit representations into ULPs-from-zero, e.g. unorm8 255 -> 255 ULPs */
+ readonly bitsToULPFromZero: ComponentMapFn;
+ /** The valid range of numeric "color" values, e.g. [0, Infinity] for ufloat. */
+ readonly numericRange: null | { min: number; max: number };
+
+ // Add fields as needed
+};
+export const kTexelRepresentationInfo: {
+ readonly [k in UncompressedTextureFormat]: TexelRepresentationInfo;
+} = {
+ .../* prettier-ignore */ {
+ 'r8unorm': makeNormalizedInfo( kR, 8, { signed: false, sRGB: false }),
+ 'r8snorm': makeNormalizedInfo( kR, 8, { signed: true, sRGB: false }),
+ 'r8uint': makeIntegerInfo( kR, 8, { signed: false }),
+ 'r8sint': makeIntegerInfo( kR, 8, { signed: true }),
+ 'r16uint': makeIntegerInfo( kR, 16, { signed: false }),
+ 'r16sint': makeIntegerInfo( kR, 16, { signed: true }),
+ 'r16float': makeFloatInfo( kR, 16),
+ 'rg8unorm': makeNormalizedInfo( kRG, 8, { signed: false, sRGB: false }),
+ 'rg8snorm': makeNormalizedInfo( kRG, 8, { signed: true, sRGB: false }),
+ 'rg8uint': makeIntegerInfo( kRG, 8, { signed: false }),
+ 'rg8sint': makeIntegerInfo( kRG, 8, { signed: true }),
+ 'r32uint': makeIntegerInfo( kR, 32, { signed: false }),
+ 'r32sint': makeIntegerInfo( kR, 32, { signed: true }),
+ 'r32float': makeFloatInfo( kR, 32),
+ 'rg16uint': makeIntegerInfo( kRG, 16, { signed: false }),
+ 'rg16sint': makeIntegerInfo( kRG, 16, { signed: true }),
+ 'rg16float': makeFloatInfo( kRG, 16),
+ 'rgba8unorm': makeNormalizedInfo(kRGBA, 8, { signed: false, sRGB: false }),
+ 'rgba8unorm-srgb': makeNormalizedInfo(kRGBA, 8, { signed: false, sRGB: true }),
+ 'rgba8snorm': makeNormalizedInfo(kRGBA, 8, { signed: true, sRGB: false }),
+ 'rgba8uint': makeIntegerInfo( kRGBA, 8, { signed: false }),
+ 'rgba8sint': makeIntegerInfo( kRGBA, 8, { signed: true }),
+ 'bgra8unorm': makeNormalizedInfo(kBGRA, 8, { signed: false, sRGB: false }),
+ 'bgra8unorm-srgb': makeNormalizedInfo(kBGRA, 8, { signed: false, sRGB: true }),
+ 'rg32uint': makeIntegerInfo( kRG, 32, { signed: false }),
+ 'rg32sint': makeIntegerInfo( kRG, 32, { signed: true }),
+ 'rg32float': makeFloatInfo( kRG, 32),
+ 'rgba16uint': makeIntegerInfo( kRGBA, 16, { signed: false }),
+ 'rgba16sint': makeIntegerInfo( kRGBA, 16, { signed: true }),
+ 'rgba16float': makeFloatInfo( kRGBA, 16),
+ 'rgba32uint': makeIntegerInfo( kRGBA, 32, { signed: false }),
+ 'rgba32sint': makeIntegerInfo( kRGBA, 32, { signed: true }),
+ 'rgba32float': makeFloatInfo( kRGBA, 32),
+ },
+ ...{
+ rgb10a2uint: {
+ componentOrder: kRGBA,
+ componentInfo: {
+ R: { dataType: 'uint', bitLength: 10 },
+ G: { dataType: 'uint', bitLength: 10 },
+ B: { dataType: 'uint', bitLength: 10 },
+ A: { dataType: 'uint', bitLength: 2 },
+ },
+ encode: components => {
+ assertInIntegerRange(components.R!, 10, false);
+ assertInIntegerRange(components.G!, 10, false);
+ assertInIntegerRange(components.B!, 10, false);
+ assertInIntegerRange(components.A!, 2, false);
+ return components;
+ },
+ decode: components => {
+ assertInIntegerRange(components.R!, 10, false);
+ assertInIntegerRange(components.G!, 10, false);
+ assertInIntegerRange(components.B!, 10, false);
+ assertInIntegerRange(components.A!, 2, false);
+ return components;
+ },
+ pack: components =>
+ packComponents(
+ kRGBA,
+ components,
+ {
+ R: 10,
+ G: 10,
+ B: 10,
+ A: 2,
+ },
+ 'uint'
+ ),
+ unpackBits: (data: Uint8Array) =>
+ unpackComponentsBits(kRGBA, data, { R: 10, G: 10, B: 10, A: 2 }),
+ numberToBits: components => ({
+ R: components.R! & 0x3ff,
+ G: components.G! & 0x3ff,
+ B: components.B! & 0x3ff,
+ A: components.A! & 0x3,
+ }),
+ bitsToNumber: components => {
+ assertInIntegerRange(components.R!, 10, false);
+ assertInIntegerRange(components.G!, 10, false);
+ assertInIntegerRange(components.B!, 10, false);
+ assertInIntegerRange(components.A!, 2, false);
+ return components;
+ },
+ bitsToULPFromZero: components => components,
+ numericRange: null,
+ },
+ rgb10a2unorm: {
+ componentOrder: kRGBA,
+ componentInfo: {
+ R: { dataType: 'unorm', bitLength: 10 },
+ G: { dataType: 'unorm', bitLength: 10 },
+ B: { dataType: 'unorm', bitLength: 10 },
+ A: { dataType: 'unorm', bitLength: 2 },
+ },
+ encode: components => {
+ return {
+ R: floatAsNormalizedInteger(components.R ?? unreachable(), 10, false),
+ G: floatAsNormalizedInteger(components.G ?? unreachable(), 10, false),
+ B: floatAsNormalizedInteger(components.B ?? unreachable(), 10, false),
+ A: floatAsNormalizedInteger(components.A ?? unreachable(), 2, false),
+ };
+ },
+ decode: components => {
+ return {
+ R: normalizedIntegerAsFloat(components.R ?? unreachable(), 10, false),
+ G: normalizedIntegerAsFloat(components.G ?? unreachable(), 10, false),
+ B: normalizedIntegerAsFloat(components.B ?? unreachable(), 10, false),
+ A: normalizedIntegerAsFloat(components.A ?? unreachable(), 2, false),
+ };
+ },
+ pack: components =>
+ packComponents(
+ kRGBA,
+ components,
+ {
+ R: 10,
+ G: 10,
+ B: 10,
+ A: 2,
+ },
+ 'uint'
+ ),
+ unpackBits: (data: Uint8Array) =>
+ unpackComponentsBits(kRGBA, data, { R: 10, G: 10, B: 10, A: 2 }),
+ numberToBits: components => ({
+ R: floatAsNormalizedInteger(components.R ?? unreachable(), 10, false),
+ G: floatAsNormalizedInteger(components.G ?? unreachable(), 10, false),
+ B: floatAsNormalizedInteger(components.B ?? unreachable(), 10, false),
+ A: floatAsNormalizedInteger(components.A ?? unreachable(), 2, false),
+ }),
+ bitsToNumber: components => ({
+ R: normalizedIntegerAsFloat(components.R!, 10, false),
+ G: normalizedIntegerAsFloat(components.G!, 10, false),
+ B: normalizedIntegerAsFloat(components.B!, 10, false),
+ A: normalizedIntegerAsFloat(components.A!, 2, false),
+ }),
+ bitsToULPFromZero: components => components,
+ numericRange: { min: 0, max: 1 },
+ },
+ rg11b10ufloat: {
+ componentOrder: kRGB,
+ encode: applyEach(identity, kRGB),
+ decode: applyEach(identity, kRGB),
+ componentInfo: {
+ R: { dataType: 'ufloat', bitLength: 11 },
+ G: { dataType: 'ufloat', bitLength: 11 },
+ B: { dataType: 'ufloat', bitLength: 10 },
+ },
+ pack: components => {
+ const componentsBits = {
+ R: float32ToFloatBits(components.R ?? unreachable(), 0, 5, 6, 15),
+ G: float32ToFloatBits(components.G ?? unreachable(), 0, 5, 6, 15),
+ B: float32ToFloatBits(components.B ?? unreachable(), 0, 5, 5, 15),
+ };
+ return packComponents(
+ kRGB,
+ componentsBits,
+ {
+ R: 11,
+ G: 11,
+ B: 10,
+ },
+ 'uint'
+ );
+ },
+ unpackBits: (data: Uint8Array) => unpackComponentsBits(kRGB, data, { R: 11, G: 11, B: 10 }),
+ numberToBits: components => ({
+ R: numberToFloatBits(components.R ?? unreachable(), kFloat11Format),
+ G: numberToFloatBits(components.G ?? unreachable(), kFloat11Format),
+ B: numberToFloatBits(components.B ?? unreachable(), kFloat10Format),
+ }),
+ bitsToNumber: components => ({
+ R: floatBitsToNumber(components.R!, kFloat11Format),
+ G: floatBitsToNumber(components.G!, kFloat11Format),
+ B: floatBitsToNumber(components.B!, kFloat10Format),
+ }),
+ bitsToULPFromZero: components => ({
+ R: floatBitsToNormalULPFromZero(components.R!, kFloat11Format),
+ G: floatBitsToNormalULPFromZero(components.G!, kFloat11Format),
+ B: floatBitsToNormalULPFromZero(components.B!, kFloat10Format),
+ }),
+ numericRange: { min: 0, max: Number.POSITIVE_INFINITY },
+ },
+ rgb9e5ufloat: {
+ componentOrder: kRGB,
+ componentInfo: makePerTexelComponent(kRGB, {
+ dataType: 'ufloat',
+ bitLength: -1, // Components don't really have a bitLength since the format is packed.
+ }),
+ encode: applyEach(identity, kRGB),
+ decode: applyEach(identity, kRGB),
+ pack: components =>
+ new Uint32Array([
+ packRGB9E5UFloat(
+ components.R ?? unreachable(),
+ components.G ?? unreachable(),
+ components.B ?? unreachable()
+ ),
+ ]).buffer,
+ unpackBits: (data: Uint8Array) => {
+ const encoded = (data[3] << 24) | (data[2] << 16) | (data[1] << 8) | data[0];
+ const redMantissa = (encoded >>> 0) & 0b111111111;
+ const greenMantissa = (encoded >>> 9) & 0b111111111;
+ const blueMantissa = (encoded >>> 18) & 0b111111111;
+ const exponentSharedBits = ((encoded >>> 27) & 0b11111) << 9;
+ return {
+ R: exponentSharedBits | redMantissa,
+ G: exponentSharedBits | greenMantissa,
+ B: exponentSharedBits | blueMantissa,
+ };
+ },
+ numberToBits: components => ({
+ R: float32ToFloatBits(components.R ?? unreachable(), 0, 5, 9, 15),
+ G: float32ToFloatBits(components.G ?? unreachable(), 0, 5, 9, 15),
+ B: float32ToFloatBits(components.B ?? unreachable(), 0, 5, 9, 15),
+ }),
+ bitsToNumber: components => ({
+ R: ufloatM9E5BitsToNumber(components.R!, kUFloat9e5Format),
+ G: ufloatM9E5BitsToNumber(components.G!, kUFloat9e5Format),
+ B: ufloatM9E5BitsToNumber(components.B!, kUFloat9e5Format),
+ }),
+ bitsToULPFromZero: components => ({
+ R: floatBitsToNormalULPFromZero(components.R!, kUFloat9e5Format),
+ G: floatBitsToNormalULPFromZero(components.G!, kUFloat9e5Format),
+ B: floatBitsToNormalULPFromZero(components.B!, kUFloat9e5Format),
+ }),
+ numericRange: { min: 0, max: Number.POSITIVE_INFINITY },
+ },
+ depth32float: makeFloatInfo([TexelComponent.Depth], 32, { restrictedDepth: true }),
+ depth16unorm: makeNormalizedInfo([TexelComponent.Depth], 16, { signed: false, sRGB: false }),
+ depth24plus: {
+ componentOrder: [TexelComponent.Depth],
+ componentInfo: { Depth: { dataType: null, bitLength: 24 } },
+ encode: applyEach(() => unreachable('depth24plus cannot be encoded'), [TexelComponent.Depth]),
+ decode: applyEach(() => unreachable('depth24plus cannot be decoded'), [TexelComponent.Depth]),
+ pack: () => unreachable('depth24plus data cannot be packed'),
+ unpackBits: () => unreachable('depth24plus data cannot be unpacked'),
+ numberToBits: () => unreachable('depth24plus has no representation'),
+ bitsToNumber: () => unreachable('depth24plus has no representation'),
+ bitsToULPFromZero: () => unreachable('depth24plus has no representation'),
+ numericRange: { min: 0, max: 1 },
+ },
+ stencil8: makeIntegerInfo([TexelComponent.Stencil], 8, { signed: false }),
+ 'depth32float-stencil8': {
+ componentOrder: [TexelComponent.Depth, TexelComponent.Stencil],
+ componentInfo: {
+ Depth: {
+ dataType: 'float',
+ bitLength: 32,
+ },
+ Stencil: {
+ dataType: 'uint',
+ bitLength: 8,
+ },
+ },
+ encode: components => {
+ assert(components.Stencil !== undefined);
+ assertInIntegerRange(components.Stencil, 8, false);
+ return components;
+ },
+ decode: components => {
+ assert(components.Stencil !== undefined);
+ assertInIntegerRange(components.Stencil, 8, false);
+ return components;
+ },
+ pack: () => unreachable('depth32float-stencil8 data cannot be packed'),
+ unpackBits: () => unreachable('depth32float-stencil8 data cannot be unpacked'),
+ numberToBits: () => unreachable('not implemented'),
+ bitsToNumber: () => unreachable('not implemented'),
+ bitsToULPFromZero: () => unreachable('not implemented'),
+ numericRange: null,
+ },
+ 'depth24plus-stencil8': {
+ componentOrder: [TexelComponent.Depth, TexelComponent.Stencil],
+ componentInfo: {
+ Depth: {
+ dataType: null,
+ bitLength: 24,
+ },
+ Stencil: {
+ dataType: 'uint',
+ bitLength: 8,
+ },
+ },
+ encode: components => {
+ assert(components.Depth === undefined, 'depth24plus cannot be encoded');
+ assert(components.Stencil !== undefined);
+ assertInIntegerRange(components.Stencil, 8, false);
+ return components;
+ },
+ decode: components => {
+ assert(components.Depth === undefined, 'depth24plus cannot be decoded');
+ assert(components.Stencil !== undefined);
+ assertInIntegerRange(components.Stencil, 8, false);
+ return components;
+ },
+ pack: () => unreachable('depth24plus-stencil8 data cannot be packed'),
+ unpackBits: () => unreachable('depth24plus-stencil8 data cannot be unpacked'),
+ numberToBits: () => unreachable('depth24plus-stencil8 has no representation'),
+ bitsToNumber: () => unreachable('depth24plus-stencil8 has no representation'),
+ bitsToULPFromZero: () => unreachable('depth24plus-stencil8 has no representation'),
+ numericRange: null,
+ },
+ },
+};
+
+/**
+ * Get the `ComponentDataType` for a format. All components must have the same type.
+ * @param {UncompressedTextureFormat} format - The input format.
+ * @returns {ComponentDataType} The data of the components.
+ */
+export function getSingleDataType(format: UncompressedTextureFormat): ComponentDataType {
+ const infos = Object.values(kTexelRepresentationInfo[format].componentInfo);
+ assert(infos.length > 0);
+ return infos.reduce((acc, cur) => {
+ assert(cur !== undefined);
+ assert(acc === undefined || acc === cur.dataType);
+ return cur.dataType;
+ }, infos[0]!.dataType);
+}
+
+/**
+ * Get traits for generating code to readback data from a component.
+ * @param {ComponentDataType} dataType - The input component data type.
+ * @returns A dictionary containing the respective `ReadbackTypedArray` and `shaderType`.
+ */
+export function getComponentReadbackTraits(dataType: ComponentDataType) {
+ switch (dataType) {
+ case 'ufloat':
+ case 'float':
+ case 'unorm':
+ case 'snorm':
+ return {
+ ReadbackTypedArray: Float32Array,
+ shaderType: 'f32' as const,
+ };
+ case 'uint':
+ return {
+ ReadbackTypedArray: Uint32Array,
+ shaderType: 'u32' as const,
+ };
+ case 'sint':
+ return {
+ ReadbackTypedArray: Int32Array,
+ shaderType: 'i32' as const,
+ };
+ default:
+ unreachable();
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_view.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_view.ts
new file mode 100644
index 0000000000..fea23b674e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_view.ts
@@ -0,0 +1,201 @@
+import { assert, memcpy } from '../../../common/util/util.js';
+import { kTextureFormatInfo, EncodableTextureFormat } from '../../format_info.js';
+import { generatePrettyTable } from '../pretty_diff_tables.js';
+import { reifyExtent3D, reifyOrigin3D } from '../unions.js';
+
+import { fullSubrectCoordinates } from './base.js';
+import { kTexelRepresentationInfo, makeClampToRange, PerTexelComponent } from './texel_data.js';
+
+/** Function taking some x,y,z coordinates and returning `Readonly<T>`. */
+export type PerPixelAtLevel<T> = (coords: Required<GPUOrigin3DDict>) => Readonly<T>;
+
+/**
+ * Wrapper to view various representations of texture data in other ways. E.g., can:
+ * - Provide a mapped buffer, containing copied texture data, and read color values.
+ * - Provide a function that generates color values by coordinate, and convert to ULPs-from-zero.
+ *
+ * MAINTENANCE_TODO: Would need some refactoring to support block formats, which could be partially
+ * supported if useful.
+ */
+export class TexelView {
+ /** The GPUTextureFormat of the TexelView. */
+ readonly format: EncodableTextureFormat;
+ /** Generates the bytes for the texel at the given coordinates. */
+ readonly bytes: PerPixelAtLevel<Uint8Array>;
+ /** Generates the ULPs-from-zero for the texel at the given coordinates. */
+ readonly ulpFromZero: PerPixelAtLevel<PerTexelComponent<number>>;
+ /** Generates the color for the texel at the given coordinates. */
+ readonly color: PerPixelAtLevel<PerTexelComponent<number>>;
+
+ private constructor(
+ format: EncodableTextureFormat,
+ {
+ bytes,
+ ulpFromZero,
+ color,
+ }: {
+ bytes: PerPixelAtLevel<Uint8Array>;
+ ulpFromZero: PerPixelAtLevel<PerTexelComponent<number>>;
+ color: PerPixelAtLevel<PerTexelComponent<number>>;
+ }
+ ) {
+ this.format = format;
+ this.bytes = bytes;
+ this.ulpFromZero = ulpFromZero;
+ this.color = color;
+ }
+
+ /**
+ * Produces a TexelView from "linear image data", i.e. the `writeTexture` format. Takes a
+ * reference to the input `subrectData`, so any changes to it will be visible in the TexelView.
+ */
+ static fromTextureDataByReference(
+ format: EncodableTextureFormat,
+ subrectData: Uint8Array | Uint8ClampedArray,
+ {
+ bytesPerRow,
+ rowsPerImage,
+ subrectOrigin,
+ subrectSize,
+ }: {
+ bytesPerRow: number;
+ rowsPerImage: number;
+ subrectOrigin: GPUOrigin3D;
+ subrectSize: GPUExtent3D;
+ }
+ ) {
+ const origin = reifyOrigin3D(subrectOrigin);
+ const size = reifyExtent3D(subrectSize);
+
+ const info = kTextureFormatInfo[format];
+ assert(info.blockWidth === 1 && info.blockHeight === 1, 'unimplemented for block formats');
+
+ return TexelView.fromTexelsAsBytes(format, coords => {
+ assert(
+ coords.x >= origin.x &&
+ coords.y >= origin.y &&
+ coords.z >= origin.z &&
+ coords.x < origin.x + size.width &&
+ coords.y < origin.y + size.height &&
+ coords.z < origin.z + size.depthOrArrayLayers,
+ () => `coordinate (${coords.x},${coords.y},${coords.z}) out of bounds`
+ );
+
+ const imageOffsetInRows = (coords.z - origin.z) * rowsPerImage;
+ const rowOffset = (imageOffsetInRows + (coords.y - origin.y)) * bytesPerRow;
+ const offset = rowOffset + (coords.x - origin.x) * info.bytesPerBlock;
+
+ // MAINTENANCE_TODO: To support block formats, decode the block and then index into the result.
+ return subrectData.subarray(offset, offset + info.bytesPerBlock) as Uint8Array;
+ });
+ }
+
+ /** Produces a TexelView from a generator of bytes for individual texel blocks. */
+ static fromTexelsAsBytes(
+ format: EncodableTextureFormat,
+ generator: PerPixelAtLevel<Uint8Array>
+ ): TexelView {
+ const info = kTextureFormatInfo[format];
+ assert(info.blockWidth === 1 && info.blockHeight === 1, 'unimplemented for block formats');
+
+ const repr = kTexelRepresentationInfo[format];
+ return new TexelView(format, {
+ bytes: generator,
+ ulpFromZero: coords => repr.bitsToULPFromZero(repr.unpackBits(generator(coords))),
+ color: coords => repr.bitsToNumber(repr.unpackBits(generator(coords))),
+ });
+ }
+
+ /** Produces a TexelView from a generator of numeric "color" values for each texel. */
+ static fromTexelsAsColors(
+ format: EncodableTextureFormat,
+ generator: PerPixelAtLevel<PerTexelComponent<number>>,
+ { clampToFormatRange = false }: { clampToFormatRange?: boolean } = {}
+ ): TexelView {
+ const info = kTextureFormatInfo[format];
+ assert(info.blockWidth === 1 && info.blockHeight === 1, 'unimplemented for block formats');
+
+ if (clampToFormatRange) {
+ const applyClamp = makeClampToRange(format);
+ const oldGenerator = generator;
+ generator = coords => applyClamp(oldGenerator(coords));
+ }
+
+ const repr = kTexelRepresentationInfo[format];
+ return new TexelView(format, {
+ bytes: coords => new Uint8Array(repr.pack(repr.encode(generator(coords)))),
+ ulpFromZero: coords => repr.bitsToULPFromZero(repr.numberToBits(generator(coords))),
+ color: generator,
+ });
+ }
+
+ /** Writes the contents of a TexelView as "linear image data", i.e. the `writeTexture` format. */
+ writeTextureData(
+ subrectData: Uint8Array | Uint8ClampedArray,
+ {
+ bytesPerRow,
+ rowsPerImage,
+ subrectOrigin: subrectOrigin_,
+ subrectSize: subrectSize_,
+ }: {
+ bytesPerRow: number;
+ rowsPerImage: number;
+ subrectOrigin: GPUOrigin3D;
+ subrectSize: GPUExtent3D;
+ }
+ ): void {
+ const subrectOrigin = reifyOrigin3D(subrectOrigin_);
+ const subrectSize = reifyExtent3D(subrectSize_);
+
+ const info = kTextureFormatInfo[this.format];
+ assert(info.blockWidth === 1 && info.blockHeight === 1, 'unimplemented for block formats');
+
+ for (let z = subrectOrigin.z; z < subrectOrigin.z + subrectSize.depthOrArrayLayers; ++z) {
+ for (let y = subrectOrigin.y; y < subrectOrigin.y + subrectSize.height; ++y) {
+ for (let x = subrectOrigin.x; x < subrectOrigin.x + subrectSize.width; ++x) {
+ const start = (z * rowsPerImage + y) * bytesPerRow + x * info.bytesPerBlock;
+ memcpy({ src: this.bytes({ x, y, z }) }, { dst: subrectData, start });
+ }
+ }
+ }
+ }
+
+ /** Returns a pretty table string of the given coordinates and their values. */
+ // MAINTENANCE_TODO: Unify some internal helpers with those in texture_ok.ts.
+ toString(subrectOrigin: Required<GPUOrigin3DDict>, subrectSize: Required<GPUExtent3DDict>) {
+ const info = kTextureFormatInfo[this.format];
+ const repr = kTexelRepresentationInfo[this.format];
+
+ const integerSampleType = info.sampleType === 'uint' || info.sampleType === 'sint';
+ const numberToString = integerSampleType
+ ? (n: number) => n.toFixed()
+ : (n: number) => n.toPrecision(6);
+
+ const componentOrderStr = repr.componentOrder.join(',') + ':';
+ const subrectCoords = [...fullSubrectCoordinates(subrectOrigin, subrectSize)];
+
+ const printCoords = (function* () {
+ yield* [' coords', '==', 'X,Y,Z:'];
+ for (const coords of subrectCoords) yield `${coords.x},${coords.y},${coords.z}`;
+ })();
+ const printActualBytes = (function* (t: TexelView) {
+ yield* [' act. texel bytes (little-endian)', '==', '0x:'];
+ for (const coords of subrectCoords) {
+ yield Array.from(t.bytes(coords), b => b.toString(16).padStart(2, '0')).join(' ');
+ }
+ })(this);
+ const printActualColors = (function* (t: TexelView) {
+ yield* [' act. colors', '==', componentOrderStr];
+ for (const coords of subrectCoords) {
+ const pixel = t.color(coords);
+ yield `${repr.componentOrder.map(ch => numberToString(pixel[ch]!)).join(',')}`;
+ }
+ })(this);
+
+ const opts = {
+ fillToWidth: 120,
+ numberToString,
+ };
+ return `${generatePrettyTable(opts, [printCoords, printActualBytes, printActualColors])}`;
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texture_ok.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texture_ok.spec.ts
new file mode 100644
index 0000000000..ad7635f939
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texture_ok.spec.ts
@@ -0,0 +1,159 @@
+export const description = 'checkPixels helpers behave as expected against real textures';
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { GPUTest } from '../../gpu_test.js';
+
+import { TexelView } from './texel_view.js';
+import { textureContentIsOKByT2B } from './texture_ok.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('float32')
+ .desc(`Basic test that actual/expected must match, for float32.`)
+ .params(u =>
+ u
+ .combineWithParams([
+ { format: 'rgba32float' }, //
+ { format: 'rg32float' },
+ ] as const)
+ .beginSubcases()
+ .combineWithParams([
+ // Expected data is 0.6 in all channels
+ { data: 0.6, opts: { maxFractionalDiff: 0.0000001 }, _ok: true },
+ { data: 0.6, opts: { maxDiffULPsForFloatFormat: 1 }, _ok: true },
+
+ { data: 0.5999, opts: { maxFractionalDiff: 0 }, _ok: false },
+ { data: 0.5999, opts: { maxFractionalDiff: 0.0001001 }, _ok: true },
+
+ { data: 0.6001, opts: { maxFractionalDiff: 0 }, _ok: false },
+ { data: 0.6001, opts: { maxFractionalDiff: 0.0001001 }, _ok: true },
+
+ { data: 0.5999, opts: { maxDiffULPsForFloatFormat: 1677 }, _ok: false },
+ { data: 0.5999, opts: { maxDiffULPsForFloatFormat: 1678 }, _ok: true },
+
+ { data: 0.6001, opts: { maxDiffULPsForFloatFormat: 1676 }, _ok: false },
+ { data: 0.6001, opts: { maxDiffULPsForFloatFormat: 1677 }, _ok: true },
+ ])
+ )
+ .fn(async t => {
+ const { format, data, opts, _ok } = t.params;
+
+ const size = [1, 1];
+ const texture = t.device.createTexture({
+ format,
+ size,
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC,
+ });
+ t.trackForCleanup(texture);
+ t.device.queue.writeTexture({ texture }, new Float32Array([data, data, data, data]), {}, size);
+
+ const expColor = { R: 0.6, G: 0.6, B: 0.6, A: 0.6 };
+ const expTexelView = TexelView.fromTexelsAsColors(format, _coords => expColor);
+
+ const result = await textureContentIsOKByT2B(t, { texture }, size, { expTexelView }, opts);
+ t.expect((result === undefined) === _ok, `expected ${_ok}, got ${result === undefined}`);
+ });
+
+g.test('norm')
+ .desc(`Basic test that actual/expected must match, for unorm/snorm.`)
+ .params(u =>
+ u
+ .combine('mode', ['bytes', 'colors'] as const)
+ .combineWithParams([
+ { format: 'r8unorm', _maxValue: 255 },
+ { format: 'r8snorm', _maxValue: 127 },
+ ] as const)
+ .beginSubcases()
+ .combineWithParams([
+ // Expected data is [10, 10]
+ { data: [10, 10], _ok: true },
+ { data: [10, 11], _ok: false },
+ { data: [11, 10], _ok: false },
+ { data: [11, 11], _ok: false },
+ ])
+ )
+ .fn(async t => {
+ const { mode, format, _maxValue, data, _ok } = t.params;
+
+ const size = [2, 1];
+ const texture = t.device.createTexture({
+ format,
+ size,
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC,
+ });
+ t.trackForCleanup(texture);
+ t.device.queue.writeTexture({ texture }, new Int8Array(data), {}, size);
+
+ let expTexelView;
+ switch (mode) {
+ case 'bytes':
+ expTexelView = TexelView.fromTexelsAsBytes(format, _coords => new Uint8Array([10]));
+ break;
+ case 'colors':
+ expTexelView = TexelView.fromTexelsAsColors(format, _coords => ({ R: 10 / _maxValue }));
+ break;
+ }
+
+ const result = await textureContentIsOKByT2B(
+ t,
+ { texture },
+ size,
+ { expTexelView },
+ { maxDiffULPsForNormFormat: 0 }
+ );
+ t.expect((result === undefined) === _ok, result?.message);
+ });
+
+g.test('snorm_min')
+ .desc(
+ `The minimum snorm value has two possible representations (e.g. -127 and -128). Ensure that
+ actual/expected can mismatch in both directions and pass the test.`
+ )
+ .params(u =>
+ u //
+ .combine('mode', ['bytes', 'colors'] as const)
+ .combineWithParams([
+ //
+ { format: 'r8snorm', _maxValue: 127 },
+ ] as const)
+ )
+ .fn(async t => {
+ const { mode, format, _maxValue } = t.params;
+
+ const data = [-_maxValue, -_maxValue - 1];
+
+ const size = [2, 1];
+ const texture = t.device.createTexture({
+ format,
+ size,
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC,
+ });
+ t.trackForCleanup(texture);
+ t.device.queue.writeTexture({ texture }, new Int8Array(data), {}, size);
+
+ let expTexelView;
+ switch (mode) {
+ case 'bytes':
+ {
+ // Actual value should be [-127,-128], expected value is [-128,-127], both should pass.
+ const exp = [-_maxValue - 1, -_maxValue];
+ expTexelView = TexelView.fromTexelsAsBytes(
+ format,
+ coords => new Uint8Array([exp[coords.x]])
+ );
+ }
+ break;
+ case 'colors':
+ expTexelView = TexelView.fromTexelsAsColors(format, _coords => ({ R: -1 }));
+ break;
+ }
+
+ const result = await textureContentIsOKByT2B(
+ t,
+ { texture },
+ size,
+ { expTexelView },
+ { maxDiffULPsForNormFormat: 0 }
+ );
+ t.expectOK(result);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texture_ok.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texture_ok.ts
new file mode 100644
index 0000000000..7b85489246
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texture_ok.ts
@@ -0,0 +1,348 @@
+import { assert, ErrorWithExtra, unreachable } from '../../../common/util/util.js';
+import { kTextureFormatInfo, EncodableTextureFormat } from '../../format_info.js';
+import { GPUTest } from '../../gpu_test.js';
+import { numbersApproximatelyEqual } from '../conversion.js';
+import { generatePrettyTable } from '../pretty_diff_tables.js';
+import { reifyExtent3D, reifyOrigin3D } from '../unions.js';
+
+import { fullSubrectCoordinates } from './base.js';
+import { getTextureSubCopyLayout } from './layout.js';
+import { kTexelRepresentationInfo, PerTexelComponent, TexelComponent } from './texel_data.js';
+import { TexelView } from './texel_view.js';
+
+type PerPixelAtLevel<T> = (coords: Required<GPUOrigin3DDict>) => T;
+
+/** Threshold options for comparing texels of different formats (norm/float/int). */
+export type TexelCompareOptions = {
+ /** Threshold for integer texture formats. Defaults to 0. */
+ maxIntDiff?: number;
+ /** Threshold for non-integer (norm/float) texture formats, if not overridden. */
+ maxFractionalDiff?: number;
+ /** Threshold in ULPs for unorm/snorm texture formats. Overrides `maxFractionalDiff`. */
+ maxDiffULPsForNormFormat?: number;
+ /** Threshold in ULPs for float/ufloat texture formats. Overrides `maxFractionalDiff`. */
+ maxDiffULPsForFloatFormat?: number;
+};
+
+export type PixelExpectation = PerTexelComponent<number> | Uint8Array;
+
+export type PerPixelComparison<E extends PixelExpectation> = {
+ coord: GPUOrigin3D;
+ exp: E;
+};
+
+type TexelViewComparer = {
+ /** Given coords, returns whether the two texel views are considered matching at that point. */
+ predicate: PerPixelAtLevel<boolean>;
+ /**
+ * Given a list of failed coords, returns table rows for `generatePrettyTable` that
+ * display the actual/expected values and diffs for debugging.
+ */
+ tableRows: (failedCoords: readonly Required<GPUOrigin3DDict>[]) => Iterable<string>[];
+};
+
+function makeTexelViewComparer(
+ format: EncodableTextureFormat,
+ { actTexelView, expTexelView }: { actTexelView: TexelView; expTexelView: TexelView },
+ opts: TexelCompareOptions
+): TexelViewComparer {
+ const {
+ maxIntDiff = 0,
+ maxFractionalDiff,
+ maxDiffULPsForNormFormat,
+ maxDiffULPsForFloatFormat,
+ } = opts;
+
+ assert(maxIntDiff >= 0, 'threshold must be non-negative');
+ if (maxFractionalDiff !== undefined) {
+ assert(maxFractionalDiff >= 0, 'threshold must be non-negative');
+ }
+ if (maxDiffULPsForFloatFormat !== undefined) {
+ assert(maxDiffULPsForFloatFormat >= 0, 'threshold must be non-negative');
+ }
+ if (maxDiffULPsForNormFormat !== undefined) {
+ assert(maxDiffULPsForNormFormat >= 0, 'threshold must be non-negative');
+ }
+
+ const fmtIsInt = format.includes('int');
+ const fmtIsNorm = format.includes('norm');
+ const fmtIsFloat = format.includes('float');
+
+ const tvc = {} as TexelViewComparer;
+ if (fmtIsInt) {
+ tvc.predicate = coords =>
+ comparePerComponent(actTexelView.color(coords), expTexelView.color(coords), maxIntDiff);
+ } else if (fmtIsNorm && maxDiffULPsForNormFormat !== undefined) {
+ tvc.predicate = coords =>
+ comparePerComponent(
+ actTexelView.ulpFromZero(coords),
+ expTexelView.ulpFromZero(coords),
+ maxDiffULPsForNormFormat
+ );
+ } else if (fmtIsFloat && maxDiffULPsForFloatFormat !== undefined) {
+ tvc.predicate = coords =>
+ comparePerComponent(
+ actTexelView.ulpFromZero(coords),
+ expTexelView.ulpFromZero(coords),
+ maxDiffULPsForFloatFormat
+ );
+ } else if (maxFractionalDiff !== undefined) {
+ tvc.predicate = coords =>
+ comparePerComponent(
+ actTexelView.color(coords),
+ expTexelView.color(coords),
+ maxFractionalDiff
+ );
+ } else {
+ if (fmtIsNorm) {
+ unreachable('need maxFractionalDiff or maxDiffULPsForNormFormat to compare norm textures');
+ } else if (fmtIsFloat) {
+ unreachable('need maxFractionalDiff or maxDiffULPsForFloatFormat to compare float textures');
+ } else {
+ unreachable();
+ }
+ }
+
+ const repr = kTexelRepresentationInfo[format];
+ if (fmtIsInt) {
+ tvc.tableRows = failedCoords => [
+ [`tolerance ± ${maxIntDiff}`],
+ (function* () {
+ yield* [` diff (act - exp)`, '==', ''];
+ for (const coords of failedCoords) {
+ const act = actTexelView.color(coords);
+ const exp = expTexelView.color(coords);
+ yield repr.componentOrder.map(ch => act[ch]! - exp[ch]!).join(',');
+ }
+ })(),
+ ];
+ } else if (
+ (fmtIsNorm && maxDiffULPsForNormFormat !== undefined) ||
+ (fmtIsFloat && maxDiffULPsForFloatFormat !== undefined)
+ ) {
+ const toleranceULPs = fmtIsNorm ? maxDiffULPsForNormFormat! : maxDiffULPsForFloatFormat!;
+ tvc.tableRows = failedCoords => [
+ [`tolerance ± ${toleranceULPs} normal-ULPs`],
+ (function* () {
+ yield* [` diff (act - exp) in normal-ULPs`, '==', ''];
+ for (const coords of failedCoords) {
+ const act = actTexelView.ulpFromZero(coords);
+ const exp = expTexelView.ulpFromZero(coords);
+ yield repr.componentOrder.map(ch => act[ch]! - exp[ch]!).join(',');
+ }
+ })(),
+ ];
+ } else {
+ assert(maxFractionalDiff !== undefined);
+ tvc.tableRows = failedCoords => [
+ [`tolerance ± ${maxFractionalDiff}`],
+ (function* () {
+ yield* [` diff (act - exp)`, '==', ''];
+ for (const coords of failedCoords) {
+ const act = actTexelView.color(coords);
+ const exp = expTexelView.color(coords);
+ yield repr.componentOrder.map(ch => (act[ch]! - exp[ch]!).toPrecision(4)).join(',');
+ }
+ })(),
+ ];
+ }
+
+ return tvc;
+}
+
+function comparePerComponent(
+ actual: PerTexelComponent<number>,
+ expected: PerTexelComponent<number>,
+ maxDiff: number
+) {
+ return Object.keys(actual).every(key => {
+ const k = key as TexelComponent;
+ const act = actual[k]!;
+ const exp = expected[k];
+ if (exp === undefined) return false;
+ return numbersApproximatelyEqual(act, exp, maxDiff);
+ });
+}
+
+/** Create a new mappable GPUBuffer, and copy a subrectangle of GPUTexture data into it. */
+function createTextureCopyForMapRead(
+ t: GPUTest,
+ source: GPUImageCopyTexture,
+ copySize: GPUExtent3D,
+ { format }: { format: EncodableTextureFormat }
+): { buffer: GPUBuffer; bytesPerRow: number; rowsPerImage: number } {
+ const { byteLength, bytesPerRow, rowsPerImage } = getTextureSubCopyLayout(format, copySize, {
+ aspect: source.aspect,
+ });
+
+ const buffer = t.device.createBuffer({
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
+ size: byteLength,
+ });
+ t.trackForCleanup(buffer);
+
+ const cmd = t.device.createCommandEncoder();
+ cmd.copyTextureToBuffer(source, { buffer, bytesPerRow, rowsPerImage }, copySize);
+ t.device.queue.submit([cmd.finish()]);
+
+ return { buffer, bytesPerRow, rowsPerImage };
+}
+
+export function findFailedPixels(
+ format: EncodableTextureFormat,
+ subrectOrigin: Required<GPUOrigin3DDict>,
+ subrectSize: Required<GPUExtent3DDict>,
+ { actTexelView, expTexelView }: { actTexelView: TexelView; expTexelView: TexelView },
+ texelCompareOptions: TexelCompareOptions,
+ coords?: Generator<Required<GPUOrigin3DDict>>
+) {
+ const comparer = makeTexelViewComparer(
+ format,
+ { actTexelView, expTexelView },
+ texelCompareOptions
+ );
+
+ const lowerCorner = [subrectSize.width, subrectSize.height, subrectSize.depthOrArrayLayers];
+ const upperCorner = [0, 0, 0];
+ const failedPixels: Required<GPUOrigin3DDict>[] = [];
+ for (const coord of coords ?? fullSubrectCoordinates(subrectOrigin, subrectSize)) {
+ const { x, y, z } = coord;
+ if (!comparer.predicate(coord)) {
+ failedPixels.push(coord);
+ lowerCorner[0] = Math.min(lowerCorner[0], x);
+ lowerCorner[1] = Math.min(lowerCorner[1], y);
+ lowerCorner[2] = Math.min(lowerCorner[2], z);
+ upperCorner[0] = Math.max(upperCorner[0], x);
+ upperCorner[1] = Math.max(upperCorner[1], y);
+ upperCorner[2] = Math.max(upperCorner[2], z);
+ }
+ }
+ if (failedPixels.length === 0) {
+ return undefined;
+ }
+
+ const info = kTextureFormatInfo[format];
+ const repr = kTexelRepresentationInfo[format];
+
+ const integerSampleType = info.sampleType === 'uint' || info.sampleType === 'sint';
+ const numberToString = integerSampleType
+ ? (n: number) => n.toFixed()
+ : (n: number) => n.toPrecision(6);
+
+ const componentOrderStr = repr.componentOrder.join(',') + ':';
+
+ const printCoords = (function* () {
+ yield* [' coords', '==', 'X,Y,Z:'];
+ for (const coords of failedPixels) yield `${coords.x},${coords.y},${coords.z}`;
+ })();
+ const printActualBytes = (function* () {
+ yield* [' act. texel bytes (little-endian)', '==', '0x:'];
+ for (const coords of failedPixels) {
+ yield Array.from(actTexelView.bytes(coords), b => b.toString(16).padStart(2, '0')).join(' ');
+ }
+ })();
+ const printActualColors = (function* () {
+ yield* [' act. colors', '==', componentOrderStr];
+ for (const coords of failedPixels) {
+ const pixel = actTexelView.color(coords);
+ yield `${repr.componentOrder.map(ch => numberToString(pixel[ch]!)).join(',')}`;
+ }
+ })();
+ const printExpectedColors = (function* () {
+ yield* [' exp. colors', '==', componentOrderStr];
+ for (const coords of failedPixels) {
+ const pixel = expTexelView.color(coords);
+ yield `${repr.componentOrder.map(ch => numberToString(pixel[ch]!)).join(',')}`;
+ }
+ })();
+ const printActualULPs = (function* () {
+ yield* [' act. normal-ULPs-from-zero', '==', componentOrderStr];
+ for (const coords of failedPixels) {
+ const pixel = actTexelView.ulpFromZero(coords);
+ yield `${repr.componentOrder.map(ch => pixel[ch]).join(',')}`;
+ }
+ })();
+ const printExpectedULPs = (function* () {
+ yield* [` exp. normal-ULPs-from-zero`, '==', componentOrderStr];
+ for (const coords of failedPixels) {
+ const pixel = expTexelView.ulpFromZero(coords);
+ yield `${repr.componentOrder.map(ch => pixel[ch]).join(',')}`;
+ }
+ })();
+
+ const opts = {
+ fillToWidth: 120,
+ numberToString,
+ };
+ return `\
+ between ${lowerCorner} and ${upperCorner} inclusive:
+${generatePrettyTable(opts, [
+ printCoords,
+ printActualBytes,
+ printActualColors,
+ printExpectedColors,
+ printActualULPs,
+ printExpectedULPs,
+ ...comparer.tableRows(failedPixels),
+])}`;
+}
+
+/**
+ * Check the contents of a GPUTexture by reading it back (with copyTextureToBuffer+mapAsync), then
+ * comparing the data with the data in `expTexelView`.
+ *
+ * The actual and expected texture data are both converted to the "NormalULPFromZero" format,
+ * which is a signed number representing how far the number is from zero, in ULPs, skipping
+ * subnormal numbers (where ULP is defined for float, normalized, and integer formats).
+ */
+export async function textureContentIsOKByT2B(
+ t: GPUTest,
+ source: GPUImageCopyTexture,
+ copySize_: GPUExtent3D,
+ { expTexelView }: { expTexelView: TexelView },
+ texelCompareOptions: TexelCompareOptions,
+ coords?: Generator<Required<GPUOrigin3DDict>>
+): Promise<ErrorWithExtra | undefined> {
+ const subrectOrigin = reifyOrigin3D(source.origin ?? [0, 0, 0]);
+ const subrectSize = reifyExtent3D(copySize_);
+ const format = expTexelView.format;
+
+ const { buffer, bytesPerRow, rowsPerImage } = createTextureCopyForMapRead(
+ t,
+ source,
+ subrectSize,
+ { format }
+ );
+
+ await buffer.mapAsync(GPUMapMode.READ);
+ const data = new Uint8Array(buffer.getMappedRange());
+
+ const texelViewConfig = {
+ bytesPerRow,
+ rowsPerImage,
+ subrectOrigin,
+ subrectSize,
+ } as const;
+
+ const actTexelView = TexelView.fromTextureDataByReference(format, data, texelViewConfig);
+
+ const failedPixelsMessage = findFailedPixels(
+ format,
+ subrectOrigin,
+ subrectSize,
+ { actTexelView, expTexelView },
+ texelCompareOptions,
+ coords
+ );
+
+ if (failedPixelsMessage === undefined) {
+ return undefined;
+ }
+
+ const msg = 'Texture level had unexpected contents:\n' + failedPixelsMessage;
+ return new ErrorWithExtra(msg, () => ({
+ expTexelView,
+ // Make a new TexelView with a copy of the data so we can unmap the buffer (debug mode only).
+ actTexelView: TexelView.fromTextureDataByReference(format, data.slice(), texelViewConfig),
+ }));
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/unions.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/unions.ts
new file mode 100644
index 0000000000..2f9e8b64d3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/unions.ts
@@ -0,0 +1,45 @@
+/**
+ * Reifies a `GPUOrigin3D` into a `Required<GPUOrigin3DDict>`.
+ */
+export function reifyOrigin3D(
+ val: Readonly<GPUOrigin3DDict> | Iterable<number>
+): Required<GPUOrigin3DDict> {
+ if (Symbol.iterator in val) {
+ const v = Array.from(val);
+ return {
+ x: (v[0] ?? 0) | 0,
+ y: (v[1] ?? 0) | 0,
+ z: (v[2] ?? 0) | 0,
+ };
+ } else {
+ const v = val;
+ return {
+ x: (v.x ?? 0) | 0,
+ y: (v.y ?? 0) | 0,
+ z: (v.z ?? 0) | 0,
+ };
+ }
+}
+
+/**
+ * Reifies a `GPUExtent3D` into a `Required<GPUExtent3DDict>`.
+ */
+export function reifyExtent3D(
+ val: Readonly<GPUExtent3DDict> | Iterable<number>
+): Required<GPUExtent3DDict> {
+ if (Symbol.iterator in val) {
+ const v = Array.from(val);
+ return {
+ width: (v[0] ?? 1) | 0,
+ height: (v[1] ?? 1) | 0,
+ depthOrArrayLayers: (v[2] ?? 1) | 0,
+ };
+ } else {
+ const v = val;
+ return {
+ width: (v.width ?? 1) | 0,
+ height: (v.height ?? 1) | 0,
+ depthOrArrayLayers: (v.depthOrArrayLayers ?? 1) | 0,
+ };
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/README.txt
new file mode 100644
index 0000000000..802f5b17a2
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/README.txt
@@ -0,0 +1,5 @@
+Tests for Web platform-specific interactions like GPUCanvasContext and canvas, WebXR,
+ImageBitmaps, and video APIs.
+
+TODO(#922): Also hopefully tests for user-initiated readbacks from WebGPU canvases
+(printing, save image as, etc.)
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/README.txt
new file mode 100644
index 0000000000..83194d5b11
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/README.txt
@@ -0,0 +1 @@
+Tests for WebGPU <canvas> and OffscreenCanvas presentation.
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/configure.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/configure.spec.ts
new file mode 100644
index 0000000000..163930e20e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/configure.spec.ts
@@ -0,0 +1,426 @@
+export const description = `
+Tests for GPUCanvasContext.configure.
+
+TODO:
+- Test colorSpace
+- Test viewFormats
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { assert } from '../../../common/util/util.js';
+import { kCanvasTextureFormats, kTextureUsages } from '../../capability_info.js';
+import { GPUConst } from '../../constants.js';
+import {
+ kAllTextureFormats,
+ kFeaturesForFormats,
+ kTextureFormats,
+ filterFormatsByFeature,
+ viewCompatible,
+} from '../../format_info.js';
+import { GPUTest } from '../../gpu_test.js';
+import { kAllCanvasTypes, createCanvas } from '../../util/create_elements.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('defaults')
+ .desc(
+ `
+ Ensure that the defaults for GPUCanvasConfiguration are correct.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('canvasType', kAllCanvasTypes)
+ )
+ .fn(t => {
+ const { canvasType } = t.params;
+ const canvas = createCanvas(t, canvasType, 2, 2);
+ const ctx = canvas.getContext('webgpu');
+ assert(ctx instanceof GPUCanvasContext, 'Failed to get WebGPU context from canvas');
+
+ ctx.configure({
+ device: t.device,
+ format: 'rgba8unorm',
+ });
+
+ const currentTexture = ctx.getCurrentTexture();
+ t.expect(currentTexture.format === 'rgba8unorm');
+ t.expect(currentTexture.usage === GPUTextureUsage.RENDER_ATTACHMENT);
+ t.expect(currentTexture.dimension === '2d');
+ t.expect(currentTexture.width === canvas.width);
+ t.expect(currentTexture.height === canvas.height);
+ t.expect(currentTexture.depthOrArrayLayers === 1);
+ t.expect(currentTexture.mipLevelCount === 1);
+ t.expect(currentTexture.sampleCount === 1);
+ });
+
+g.test('device')
+ .desc(
+ `
+ Ensure that configure reacts appropriately to various device states.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('canvasType', kAllCanvasTypes)
+ )
+ .fn(t => {
+ const { canvasType } = t.params;
+ const canvas = createCanvas(t, canvasType, 2, 2);
+ const ctx = canvas.getContext('webgpu');
+ assert(ctx instanceof GPUCanvasContext, 'Failed to get WebGPU context from canvas');
+
+ // Calling configure without a device should throw a TypeError.
+ t.shouldThrow('TypeError', () => {
+ ctx.configure({
+ format: 'rgba8unorm',
+ } as GPUCanvasConfiguration);
+ });
+
+ // Device is not configured, so getCurrentTexture will throw an InvalidStateError.
+ t.shouldThrow('InvalidStateError', () => {
+ ctx.getCurrentTexture();
+ });
+
+ // Calling configure with a device should succeed.
+ ctx.configure({
+ device: t.device,
+ format: 'rgba8unorm',
+ });
+
+ // getCurrentTexture will succeed with a valid device.
+ ctx.getCurrentTexture();
+
+ // Unconfiguring should cause the device to be cleared.
+ ctx.unconfigure();
+ t.shouldThrow('InvalidStateError', () => {
+ ctx.getCurrentTexture();
+ });
+
+ // Should be able to successfully configure again after unconfiguring.
+ ctx.configure({
+ device: t.device,
+ format: 'rgba8unorm',
+ });
+ ctx.getCurrentTexture();
+ });
+
+g.test('format')
+ .desc(
+ `
+ Ensure that only valid texture formats are allowed when calling configure.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('canvasType', kAllCanvasTypes)
+ .combine('format', kAllTextureFormats)
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceForTextureFormatOrSkipTestCase(t.params.format);
+ })
+ .fn(t => {
+ const { canvasType, format } = t.params;
+ const canvas = createCanvas(t, canvasType, 2, 2);
+ const ctx = canvas.getContext('webgpu');
+ assert(ctx instanceof GPUCanvasContext, 'Failed to get WebGPU context from canvas');
+
+ // Would prefer to use kCanvasTextureFormats.includes(format), but that's giving TS errors.
+ let validFormat = false;
+ for (const canvasFormat of kCanvasTextureFormats) {
+ if (format === canvasFormat) {
+ validFormat = true;
+ break;
+ }
+ }
+
+ t.expectValidationError(() => {
+ ctx.configure({
+ device: t.device,
+ format,
+ });
+ }, !validFormat);
+
+ t.expectValidationError(() => {
+ // Should always return a texture, whether the configured format was valid or not.
+ const currentTexture = ctx.getCurrentTexture();
+ t.expect(currentTexture instanceof GPUTexture);
+ }, !validFormat);
+ });
+
+g.test('usage')
+ .desc(
+ `
+ Ensure that getCurrentTexture returns a texture with the configured usages.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('canvasType', kAllCanvasTypes)
+ .beginSubcases()
+ .expand('usage', () => {
+ const usageSet = new Set<number>();
+ for (const usage0 of kTextureUsages) {
+ for (const usage1 of kTextureUsages) {
+ usageSet.add(usage0 | usage1);
+ }
+ }
+ return usageSet;
+ })
+ )
+ .fn(t => {
+ const { canvasType, usage } = t.params;
+ const canvas = createCanvas(t, canvasType, 2, 2);
+ const ctx = canvas.getContext('webgpu');
+ assert(ctx instanceof GPUCanvasContext, 'Failed to get WebGPU context from canvas');
+
+ ctx.configure({
+ device: t.device,
+ format: 'rgba8unorm',
+ usage,
+ });
+
+ const currentTexture = ctx.getCurrentTexture();
+ t.expect(currentTexture instanceof GPUTexture);
+ t.expect(currentTexture.usage === usage);
+
+ // Try to use the texture with the given usage
+
+ if (usage & GPUConst.TextureUsage.RENDER_ATTACHMENT) {
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: currentTexture.createView(),
+ clearValue: [1.0, 0.0, 0.0, 1.0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ }
+
+ if (usage & GPUConst.TextureUsage.TEXTURE_BINDING) {
+ const bgl = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ texture: {},
+ },
+ ],
+ });
+
+ t.device.createBindGroup({
+ layout: bgl,
+ entries: [
+ {
+ binding: 0,
+ resource: currentTexture.createView(),
+ },
+ ],
+ });
+ }
+
+ if (usage & GPUConst.TextureUsage.STORAGE_BINDING) {
+ const bgl = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.FRAGMENT,
+ storageTexture: { access: 'write-only', format: currentTexture.format },
+ },
+ ],
+ });
+
+ t.device.createBindGroup({
+ layout: bgl,
+ entries: [
+ {
+ binding: 0,
+ resource: currentTexture.createView(),
+ },
+ ],
+ });
+ }
+
+ if (usage & GPUConst.TextureUsage.COPY_DST) {
+ const rgbaData = new Uint8Array([255, 0, 0, 255]);
+
+ t.device.queue.writeTexture({ texture: currentTexture }, rgbaData, {}, [1, 1, 1]);
+ }
+
+ if (usage & GPUConst.TextureUsage.COPY_SRC) {
+ const size = [currentTexture.width, currentTexture.height, 1];
+ const dstTexture = t.device.createTexture({
+ format: currentTexture.format,
+ usage: GPUTextureUsage.COPY_DST,
+ size,
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.copyTextureToTexture({ texture: currentTexture }, { texture: dstTexture }, size);
+ t.device.queue.submit([encoder.finish()]);
+ }
+ });
+
+g.test('alpha_mode')
+ .desc(
+ `
+ Ensure that all valid alphaMode values are allowed when calling configure.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('canvasType', kAllCanvasTypes)
+ .beginSubcases()
+ .combine('alphaMode', ['opaque', 'premultiplied'] as const)
+ )
+ .fn(t => {
+ const { canvasType, alphaMode } = t.params;
+ const canvas = createCanvas(t, canvasType, 2, 2);
+ const ctx = canvas.getContext('webgpu');
+ assert(ctx instanceof GPUCanvasContext, 'Failed to get WebGPU context from canvas');
+
+ ctx.configure({
+ device: t.device,
+ format: 'rgba8unorm',
+ alphaMode,
+ });
+
+ const currentTexture = ctx.getCurrentTexture();
+ t.expect(currentTexture instanceof GPUTexture);
+ });
+
+g.test('size_zero_before_configure')
+ .desc(`Ensure a validation error is raised in configure() if the size of the canvas is zero.`)
+ .params(u =>
+ u //
+ .combine('canvasType', kAllCanvasTypes)
+ .combine('zeroDimension', ['width', 'height'] as const)
+ )
+ .fn(t => {
+ const { canvasType, zeroDimension } = t.params;
+ const canvas = createCanvas(t, canvasType, 1, 1);
+ canvas[zeroDimension] = 0;
+ const ctx = canvas.getContext('webgpu');
+ assert(ctx instanceof GPUCanvasContext, 'Failed to get WebGPU context from canvas');
+
+ // Validation error, the canvas size is 0 which doesn't make a valid GPUTextureDescriptor.
+ t.expectValidationError(() => {
+ ctx.configure({
+ device: t.device,
+ format: 'bgra8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ });
+
+ canvas[zeroDimension] = 1;
+
+ // The size being incorrect doesn't make for an invalid configuration. Now that it is fixed
+ // getting textures from the canvas should work.
+ const currentTexture = ctx.getCurrentTexture();
+
+ // Try rendering to it even!
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: currentTexture.createView(),
+ clearValue: [1.0, 0.0, 0.0, 1.0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ });
+
+g.test('size_zero_after_configure')
+ .desc(
+ `Ensure a validation error is raised after configure() if the size of the canvas becomes zero.`
+ )
+ .params(u =>
+ u //
+ .combine('canvasType', kAllCanvasTypes)
+ .combine('zeroDimension', ['width', 'height'] as const)
+ )
+ .fn(t => {
+ const { canvasType, zeroDimension } = t.params;
+ const canvas = createCanvas(t, canvasType, 1, 1);
+ const ctx = canvas.getContext('webgpu');
+ assert(ctx instanceof GPUCanvasContext, 'Failed to get WebGPU context from canvas');
+
+ ctx.configure({
+ device: t.device,
+ format: 'bgra8unorm',
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ canvas[zeroDimension] = 0;
+
+ // The size is incorrect, we should be getting an error texture and a validation error.
+ let currentTexture: GPUTexture;
+ t.expectValidationError(() => {
+ currentTexture = ctx.getCurrentTexture();
+ });
+
+ t.expect(currentTexture![zeroDimension] === 0);
+
+ // Using the texture should produce a validation error.
+ t.expectValidationError(() => {
+ currentTexture.createView();
+ });
+ });
+
+g.test('viewFormats')
+ .desc(
+ `Test the validation that viewFormats are compatible with the format (for all canvas format / view formats)`
+ )
+ .params(u =>
+ u
+ .combine('canvasType', kAllCanvasTypes)
+ .combine('format', kCanvasTextureFormats)
+ .combine('viewFormatFeature', kFeaturesForFormats)
+ .beginSubcases()
+ .expand('viewFormat', ({ viewFormatFeature }) =>
+ filterFormatsByFeature(viewFormatFeature, kTextureFormats)
+ )
+ )
+ .beforeAllSubcases(t => {
+ t.selectDeviceOrSkipTestCase([t.params.viewFormatFeature]);
+ })
+ .fn(t => {
+ const { canvasType, format, viewFormat } = t.params;
+
+ t.skipIfTextureFormatNotSupported(viewFormat);
+
+ const canvas = createCanvas(t, canvasType, 1, 1);
+ const ctx = canvas.getContext('webgpu');
+ assert(ctx instanceof GPUCanvasContext, 'Failed to get WebGPU context from canvas');
+
+ const compatible = viewCompatible(format, viewFormat);
+
+ // Test configure() produces an error if the formats aren't compatible.
+ t.expectValidationError(() => {
+ ctx.configure({
+ device: t.device,
+ format,
+ viewFormats: [viewFormat],
+ });
+ }, !compatible);
+
+ // Likewise for getCurrentTexture().
+ let currentTexture: GPUTexture;
+ t.expectValidationError(() => {
+ currentTexture = ctx.getCurrentTexture();
+ }, !compatible);
+
+ // The returned texture is an error texture.
+ t.expectValidationError(() => {
+ currentTexture.createView();
+ }, !compatible);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/context_creation.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/context_creation.spec.ts
new file mode 100644
index 0000000000..3f016cffcd
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/context_creation.spec.ts
@@ -0,0 +1,47 @@
+export const description = `
+Tests for canvas context creation.
+
+Note there are no context creation attributes for WebGPU (as of this writing).
+Options are configured in configure() instead.
+`;
+
+import { Fixture } from '../../../common/framework/fixture.js';
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+
+export const g = makeTestGroup(Fixture);
+
+g.test('return_type')
+ .desc(
+ `Test the return type of getContext for WebGPU.
+
+ TODO: Test OffscreenCanvas made from transferControlToOffscreen.`
+ )
+ .params(u =>
+ u //
+ .combine('offscreen', [false, true])
+ .beginSubcases()
+ .combine('attributes', [undefined, {}])
+ )
+ .fn(t => {
+ let canvas: HTMLCanvasElement | OffscreenCanvas;
+ if (t.params.offscreen) {
+ if (typeof OffscreenCanvas === 'undefined') {
+ // Skip if the current context doesn't have OffscreenCanvas (e.g. Node).
+ t.skip('OffscreenCanvas is not available in this context');
+ }
+
+ canvas = new OffscreenCanvas(10, 10);
+ } else {
+ if (typeof document === 'undefined') {
+ // Skip if there is no document (Workers, Node)
+ t.skip('DOM is not available to create canvas element');
+ }
+
+ canvas = document.createElement('canvas', t.params.attributes);
+ canvas.width = 10;
+ canvas.height = 10;
+ }
+
+ const ctx = canvas.getContext('webgpu');
+ t.expect(ctx instanceof GPUCanvasContext);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/getCurrentTexture.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/getCurrentTexture.spec.ts
new file mode 100644
index 0000000000..609dacb907
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/getCurrentTexture.spec.ts
@@ -0,0 +1,383 @@
+export const description = `
+Tests for GPUCanvasContext.getCurrentTexture.
+`;
+
+import { SkipTestCase } from '../../../common/framework/fixture.js';
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { timeout } from '../../../common/util/timeout.js';
+import { assert, unreachable } from '../../../common/util/util.js';
+import { GPUTest } from '../../gpu_test.js';
+import { kAllCanvasTypes, createCanvas, CanvasType } from '../../util/create_elements.js';
+
+const kFormat = 'bgra8unorm';
+
+class GPUContextTest extends GPUTest {
+ initCanvasContext(canvasType: CanvasType = 'onscreen'): GPUCanvasContext {
+ const canvas = createCanvas(this, canvasType, 2, 2);
+ if (canvasType === 'onscreen') {
+ // To make sure onscreen canvas are visible
+ const onscreencanvas = canvas as HTMLCanvasElement;
+ onscreencanvas.style.position = 'fixed';
+ onscreencanvas.style.top = '0';
+ onscreencanvas.style.left = '0';
+ // Set it to transparent so that if multiple canvas are created, they are still visible.
+ onscreencanvas.style.opacity = '50%';
+ document.body.appendChild(onscreencanvas);
+ this.trackForCleanup({
+ close() {
+ document.body.removeChild(onscreencanvas);
+ },
+ });
+ }
+ const ctx = canvas.getContext('webgpu');
+ assert(ctx instanceof GPUCanvasContext, 'Failed to get WebGPU context from canvas');
+
+ ctx.configure({
+ device: this.device,
+ format: kFormat,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ });
+
+ return ctx;
+ }
+}
+
+export const g = makeTestGroup(GPUContextTest);
+
+g.test('configured')
+ .desc(
+ `Checks that calling getCurrentTexture requires the context to be configured first, and
+ that each call to configure causes getCurrentTexture to return a new texture.`
+ )
+ .params(u =>
+ u //
+ .combine('canvasType', kAllCanvasTypes)
+ )
+ .fn(t => {
+ const canvas = createCanvas(t, t.params.canvasType, 2, 2);
+ const ctx = canvas.getContext('webgpu');
+ assert(ctx instanceof GPUCanvasContext, 'Failed to get WebGPU context from canvas');
+
+ // Calling getCurrentTexture prior to configuration should throw an InvalidStateError exception.
+ t.shouldThrow('InvalidStateError', () => {
+ ctx.getCurrentTexture();
+ });
+
+ // Once the context has been configured getCurrentTexture can be called.
+ ctx.configure({
+ device: t.device,
+ format: kFormat,
+ });
+
+ let prevTexture = ctx.getCurrentTexture();
+
+ // Calling configure again with different values will change the texture returned.
+ ctx.configure({
+ device: t.device,
+ format: 'bgra8unorm',
+ });
+
+ let currentTexture = ctx.getCurrentTexture();
+ t.expect(prevTexture !== currentTexture);
+ prevTexture = currentTexture;
+
+ // Calling configure again with the same values will still change the texture returned.
+ ctx.configure({
+ device: t.device,
+ format: 'bgra8unorm',
+ });
+
+ currentTexture = ctx.getCurrentTexture();
+ t.expect(prevTexture !== currentTexture);
+ prevTexture = currentTexture;
+
+ // Calling getCurrentTexture after calling unconfigure should throw an InvalidStateError exception.
+ ctx.unconfigure();
+
+ t.shouldThrow('InvalidStateError', () => {
+ ctx.getCurrentTexture();
+ });
+ });
+
+g.test('single_frames')
+ .desc(`Checks that the value of getCurrentTexture is consistent within a single frame.`)
+ .params(u =>
+ u //
+ .combine('canvasType', kAllCanvasTypes)
+ )
+ .fn(t => {
+ const ctx = t.initCanvasContext(t.params.canvasType);
+ const frameTexture = ctx.getCurrentTexture();
+
+ // Calling getCurrentTexture a second time returns the same texture.
+ t.expect(frameTexture === ctx.getCurrentTexture());
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: frameTexture.createView(),
+ clearValue: [1.0, 0.0, 0.0, 1.0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+
+ // Calling getCurrentTexture after performing some work on the texture returns the same texture.
+ t.expect(frameTexture === ctx.getCurrentTexture());
+
+ // Ensure that getCurrentTexture does not clear the texture.
+ t.expectSingleColor(frameTexture, frameTexture.format, {
+ size: [frameTexture.width, frameTexture.height, 1],
+ exp: { R: 1, G: 0, B: 0, A: 1 },
+ });
+
+ frameTexture.destroy();
+
+ // Calling getCurrentTexture after destroying the texture still returns the same texture.
+ t.expect(frameTexture === ctx.getCurrentTexture());
+ });
+
+g.test('multiple_frames')
+ .desc(`Checks that the value of getCurrentTexture differs across multiple frames.`)
+ .params(u =>
+ u //
+ .combine('canvasType', kAllCanvasTypes)
+ .beginSubcases()
+ .combine('clearTexture', [true, false])
+ )
+ .beforeAllSubcases(t => {
+ const { canvasType } = t.params;
+ if (canvasType === 'offscreen' && !('transferToImageBitmap' in OffscreenCanvas.prototype)) {
+ throw new SkipTestCase('transferToImageBitmap not supported');
+ }
+ })
+ .fn(t => {
+ const { canvasType, clearTexture } = t.params;
+
+ return new Promise(resolve => {
+ const ctx = t.initCanvasContext(canvasType);
+ let prevTexture: GPUTexture | undefined;
+ let frameCount = 0;
+
+ function frameCheck() {
+ const currentTexture = ctx.getCurrentTexture();
+
+ if (prevTexture) {
+ // Ensure that each frame a new texture object is returned.
+ t.expect(currentTexture !== prevTexture);
+
+ // Ensure that texture contents are transparent black.
+ t.expectSingleColor(currentTexture, currentTexture.format, {
+ size: [currentTexture.width, currentTexture.height, 1],
+ exp: { R: 0, G: 0, B: 0, A: 0 },
+ });
+ }
+
+ if (clearTexture) {
+ // Clear the texture to test that texture contents don't carry over from frame to frame.
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: currentTexture.createView(),
+ clearValue: [1.0, 0.0, 0.0, 1.0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ }
+
+ prevTexture = currentTexture;
+
+ if (frameCount++ < 5) {
+ // Which method will be used to begin a new "frame"?
+ switch (canvasType) {
+ case 'onscreen':
+ requestAnimationFrame(frameCheck);
+ break;
+ case 'offscreen': {
+ (ctx.canvas as OffscreenCanvas).transferToImageBitmap();
+ frameCheck();
+ break;
+ }
+ default:
+ unreachable();
+ }
+ } else {
+ resolve();
+ }
+ }
+
+ // Call frameCheck for the first time from requestAnimationFrame
+ // To make sure two frameChecks are run in different frames for onscreen canvas.
+ // offscreen canvas doesn't care.
+ requestAnimationFrame(frameCheck);
+ });
+ });
+
+g.test('resize')
+ .desc(`Checks the value of getCurrentTexture differs when the canvas is resized.`)
+ .params(u =>
+ u //
+ .combine('canvasType', kAllCanvasTypes)
+ )
+ .fn(t => {
+ const ctx = t.initCanvasContext(t.params.canvasType);
+ let prevTexture = ctx.getCurrentTexture();
+
+ // Trigger a resize by changing the width.
+ ctx.canvas.width = 4;
+
+ // When the canvas resizes the texture returned by getCurrentTexture should immediately begin
+ // returning a new texture matching the update dimensions.
+ let currentTexture = ctx.getCurrentTexture();
+ t.expect(prevTexture !== currentTexture);
+ t.expect(currentTexture.width === ctx.canvas.width);
+ t.expect(currentTexture.height === ctx.canvas.height);
+
+ // The width and height of the previous texture should remain unchanged.
+ t.expect(prevTexture.width === 2);
+ t.expect(prevTexture.height === 2);
+ prevTexture = currentTexture;
+
+ // Ensure that texture contents are transparent black.
+ t.expectSingleColor(currentTexture, currentTexture.format, {
+ size: [currentTexture.width, currentTexture.height, 1],
+ exp: { R: 0, G: 0, B: 0, A: 0 },
+ });
+
+ // Trigger a resize by changing the height.
+ ctx.canvas.height = 4;
+
+ // Check to ensure the texture is resized again.
+ currentTexture = ctx.getCurrentTexture();
+ t.expect(prevTexture !== currentTexture);
+ t.expect(currentTexture.width === ctx.canvas.width);
+ t.expect(currentTexture.height === ctx.canvas.height);
+ t.expect(prevTexture.width === 4);
+ t.expect(prevTexture.height === 2);
+ prevTexture = currentTexture;
+
+ // Ensure that texture contents are transparent black.
+ t.expectSingleColor(currentTexture, currentTexture.format, {
+ size: [currentTexture.width, currentTexture.height, 1],
+ exp: { R: 0, G: 0, B: 0, A: 0 },
+ });
+
+ // Simply setting the canvas width and height values to their current values should not trigger
+ // a change in the texture.
+ ctx.canvas.width = 4;
+ ctx.canvas.height = 4;
+
+ currentTexture = ctx.getCurrentTexture();
+ t.expect(prevTexture === currentTexture);
+ });
+
+g.test('expiry')
+ .desc(
+ `
+Test automatic WebGPU canvas texture expiry on all canvas types with the following requirements:
+- getCurrentTexture returns the same texture object until the next task:
+ - after previous frame update the rendering
+ - before current frame update the rendering
+ - in a microtask off the current frame task
+- getCurrentTexture returns a new texture object and the old texture object becomes invalid
+ as soon as possible after HTML update the rendering.
+
+TODO: test more canvas types, and ways to update the rendering
+- if on a different thread, expiry happens when the worker updates its rendering (worker "rPAF") OR transferToImageBitmap is called
+- [draw, transferControlToOffscreen, then canvas is displayed] on either {main thread, or transferred to worker}
+- [draw, canvas is displayed, then transferControlToOffscreen] on either {main thread, or transferred to worker}
+- reftests for the above 2 (what gets displayed when the canvas is displayed)
+- with canvas element added to DOM or not (applies to other canvas tests as well)
+ - canvas is added to DOM after being rendered
+ - canvas is already in DOM but becomes visible after being rendered
+ `
+ )
+ .params(u =>
+ u //
+ .combine('canvasType', kAllCanvasTypes)
+ .combine('prevFrameCallsite', ['runInNewCanvasFrame', 'requestAnimationFrame'] as const)
+ .combine('getCurrentTextureAgain', [true, false] as const)
+ )
+ .fn(t => {
+ const { canvasType, prevFrameCallsite, getCurrentTextureAgain } = t.params;
+ const ctx = t.initCanvasContext(t.params.canvasType);
+ // Create a bindGroupLayout to test invalid texture view usage later.
+ const bgl = t.device.createBindGroupLayout({
+ entries: [
+ {
+ binding: 0,
+ visibility: GPUShaderStage.COMPUTE,
+ texture: {},
+ },
+ ],
+ });
+
+ // The fn is called immediately after previous frame updating the rendering.
+ // Polyfill by calling the callback by setTimeout, in the requestAnimationFrame callback (for onscreen canvas)
+ // or after transferToImageBitmap (for offscreen canvas).
+ function runInNewCanvasFrame(fn: () => void) {
+ switch (canvasType) {
+ case 'onscreen':
+ requestAnimationFrame(() => timeout(fn));
+ break;
+ case 'offscreen':
+ // for offscreen canvas, after calling transferToImageBitmap, we are in a new frame immediately
+ (ctx.canvas as OffscreenCanvas).transferToImageBitmap();
+ fn();
+ break;
+ default:
+ unreachable();
+ }
+ }
+
+ function checkGetCurrentTexture() {
+ // Call getCurrentTexture on previous frame.
+ const prevTexture = ctx.getCurrentTexture();
+
+ // Call getCurrentTexture immediately after the frame, the texture object should stay the same.
+ queueMicrotask(() => {
+ if (getCurrentTextureAgain) {
+ t.expect(prevTexture === ctx.getCurrentTexture());
+ }
+
+ // Call getCurrentTexture in a new frame.
+ // It should expire the previous texture object return a new texture object by the next frame by then.
+ // Call runInNewCanvasFrame in the micro task to make sure the new frame run after the getCurrentTexture in the micro task for offscreen canvas.
+ runInNewCanvasFrame(() => {
+ if (getCurrentTextureAgain) {
+ t.expect(prevTexture !== ctx.getCurrentTexture());
+ }
+
+ // Event when prevTexture expired, createView should still succeed anyway.
+ const prevTextureView = prevTexture.createView();
+ // Using the invalid view should fail if it expires.
+ t.expectValidationError(() => {
+ t.device.createBindGroup({
+ layout: bgl,
+ entries: [{ binding: 0, resource: prevTextureView }],
+ });
+ });
+ });
+ });
+ }
+
+ switch (prevFrameCallsite) {
+ case 'runInNewCanvasFrame':
+ runInNewCanvasFrame(checkGetCurrentTexture);
+ break;
+ case 'requestAnimationFrame':
+ requestAnimationFrame(checkGetCurrentTexture);
+ break;
+ default:
+ break;
+ }
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/getPreferredCanvasFormat.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/getPreferredCanvasFormat.spec.ts
new file mode 100644
index 0000000000..cd582b4f3a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/getPreferredCanvasFormat.spec.ts
@@ -0,0 +1,19 @@
+export const description = `
+Tests for navigator.gpu.getPreferredCanvasFormat.
+`;
+
+import { Fixture } from '../../../common/framework/fixture.js';
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+
+export const g = makeTestGroup(Fixture);
+
+g.test('value')
+ .desc(
+ `
+ Ensure getPreferredCanvasFormat returns one of the valid values.
+ `
+ )
+ .fn(t => {
+ const preferredFormat = navigator.gpu.getPreferredCanvasFormat();
+ t.expect(preferredFormat === 'bgra8unorm' || preferredFormat === 'rgba8unorm');
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/readbackFromWebGPUCanvas.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/readbackFromWebGPUCanvas.spec.ts
new file mode 100644
index 0000000000..7fd7142f00
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/canvas/readbackFromWebGPUCanvas.spec.ts
@@ -0,0 +1,481 @@
+export const description = `
+Tests for readback from WebGPU Canvas.
+
+This includes testing that colorSpace makes it through from the WebGPU canvas
+to the form of copy (toDataURL, toBlob, ImageBitmap, drawImage)
+
+The color space support is tested by drawing the readback form of the WebGPU
+canvas into a 2D canvas of a different color space via drawImage (A). Another
+2D canvas is created with the same source data and color space as the WebGPU
+canvas and also drawn into another 2D canvas of a different color space (B).
+The contents of A and B should match.
+
+TODO: implement all canvas types, see TODO on kCanvasTypes.
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { assert, raceWithRejectOnTimeout, unreachable } from '../../../common/util/util.js';
+import {
+ kCanvasAlphaModes,
+ kCanvasColorSpaces,
+ kCanvasTextureFormats,
+} from '../../capability_info.js';
+import { GPUTest } from '../../gpu_test.js';
+import { checkElementsEqual } from '../../util/check_contents.js';
+import {
+ kAllCanvasTypes,
+ CanvasType,
+ createCanvas,
+ createOnscreenCanvas,
+} from '../../util/create_elements.js';
+
+export const g = makeTestGroup(GPUTest);
+
+// We choose 0x66 as the value for each color and alpha channel
+// 0x66 / 0xff = 0.4
+// Given a pixel value of RGBA = (0x66, 0, 0, 0x66) in the source WebGPU canvas,
+// For alphaMode = opaque, the copy output should be RGBA = (0x66, 0, 0, 0xff)
+// For alphaMode = premultiplied, the copy output should be RGBA = (0xff, 0, 0, 0x66)
+const kPixelValue = 0x66;
+const kPixelValueFloat = 0x66 / 0xff; // 0.4
+
+// Use four pixels rectangle for the test:
+// blue: top-left;
+// green: top-right;
+// red: bottom-left;
+// yellow: bottom-right;
+const expect = {
+ /* prettier-ignore */
+ 'opaque': new Uint8ClampedArray([
+ 0x00, 0x00, kPixelValue, 0xff, // blue
+ 0x00, kPixelValue, 0x00, 0xff, // green
+ kPixelValue, 0x00, 0x00, 0xff, // red
+ kPixelValue, kPixelValue, 0x00, 0xff, // yellow
+ ]),
+ /* prettier-ignore */
+ 'premultiplied': new Uint8ClampedArray([
+ 0x00, 0x00, 0xff, kPixelValue, // blue
+ 0x00, 0xff, 0x00, kPixelValue, // green
+ 0xff, 0x00, 0x00, kPixelValue, // red
+ 0xff, 0xff, 0x00, kPixelValue, // yellow
+ ]),
+};
+
+function initWebGPUCanvasContent<T extends CanvasType>(
+ t: GPUTest,
+ format: GPUTextureFormat,
+ alphaMode: GPUCanvasAlphaMode,
+ colorSpace: PredefinedColorSpace,
+ canvasType: T
+) {
+ const canvas = createCanvas(t, canvasType, 2, 2);
+ const ctx = canvas.getContext('webgpu');
+ assert(ctx instanceof GPUCanvasContext, 'Failed to get WebGPU context from canvas');
+
+ ctx.configure({
+ device: t.device,
+ format,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST,
+ alphaMode,
+ colorSpace,
+ });
+
+ const canvasTexture = ctx.getCurrentTexture();
+ const tempTexture = t.device.createTexture({
+ size: { width: 1, height: 1, depthOrArrayLayers: 1 },
+ format,
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+ const tempTextureView = tempTexture.createView();
+ const encoder = t.device.createCommandEncoder();
+
+ const clearOnePixel = (origin: GPUOrigin3D, color: GPUColor) => {
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ { view: tempTextureView, clearValue: color, loadOp: 'clear', storeOp: 'store' },
+ ],
+ });
+ pass.end();
+ encoder.copyTextureToTexture(
+ { texture: tempTexture },
+ { texture: canvasTexture, origin },
+ { width: 1, height: 1 }
+ );
+ };
+
+ clearOnePixel([0, 0], [0, 0, kPixelValueFloat, kPixelValueFloat]);
+ clearOnePixel([1, 0], [0, kPixelValueFloat, 0, kPixelValueFloat]);
+ clearOnePixel([0, 1], [kPixelValueFloat, 0, 0, kPixelValueFloat]);
+ clearOnePixel([1, 1], [kPixelValueFloat, kPixelValueFloat, 0, kPixelValueFloat]);
+
+ t.device.queue.submit([encoder.finish()]);
+ tempTexture.destroy();
+
+ return canvas;
+}
+
+function drawImageSourceIntoCanvas(
+ t: GPUTest,
+ image: CanvasImageSource,
+ colorSpace: PredefinedColorSpace
+) {
+ const canvas: HTMLCanvasElement = createOnscreenCanvas(t, 2, 2);
+ const ctx = canvas.getContext('2d', { colorSpace });
+ assert(ctx !== null);
+ ctx.drawImage(image, 0, 0);
+ return ctx;
+}
+
+function checkImageResultWithSameColorSpaceCanvas(
+ t: GPUTest,
+ image: CanvasImageSource,
+ sourceColorSpace: PredefinedColorSpace,
+ expect: Uint8ClampedArray
+) {
+ const ctx = drawImageSourceIntoCanvas(t, image, sourceColorSpace);
+ readPixelsFrom2DCanvasAndCompare(t, ctx, expect);
+}
+
+function checkImageResultWithDifferentColorSpaceCanvas(
+ t: GPUTest,
+ image: CanvasImageSource,
+ sourceColorSpace: PredefinedColorSpace,
+ sourceData: Uint8ClampedArray
+) {
+ const destinationColorSpace = sourceColorSpace === 'srgb' ? 'display-p3' : 'srgb';
+
+ // draw the WebGPU derived data into a canvas
+ const fromWebGPUCtx = drawImageSourceIntoCanvas(t, image, destinationColorSpace);
+
+ // create a 2D canvas with the same source data in the same color space as the WebGPU
+ // canvas
+ const source2DCanvas: HTMLCanvasElement = createOnscreenCanvas(t, 2, 2);
+ const source2DCtx = source2DCanvas.getContext('2d', { colorSpace: sourceColorSpace });
+ assert(source2DCtx !== null);
+ const imgData = source2DCtx.getImageData(0, 0, 2, 2);
+ imgData.data.set(sourceData);
+ source2DCtx.putImageData(imgData, 0, 0);
+
+ // draw the source 2D canvas into another 2D canvas with the destination color space and
+ // then pull out the data. This result should be the same as the WebGPU derived data
+ // written to a 2D canvas of the same destination color space.
+ const from2DCtx = drawImageSourceIntoCanvas(t, source2DCanvas, destinationColorSpace);
+ const expect = from2DCtx.getImageData(0, 0, 2, 2).data;
+
+ readPixelsFrom2DCanvasAndCompare(t, fromWebGPUCtx, expect);
+}
+
+function checkImageResult(
+ t: GPUTest,
+ image: CanvasImageSource,
+ sourceColorSpace: PredefinedColorSpace,
+ expect: Uint8ClampedArray
+) {
+ checkImageResultWithSameColorSpaceCanvas(t, image, sourceColorSpace, expect);
+ checkImageResultWithDifferentColorSpaceCanvas(t, image, sourceColorSpace, expect);
+}
+
+function readPixelsFrom2DCanvasAndCompare(
+ t: GPUTest,
+ ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D,
+ expect: Uint8ClampedArray
+) {
+ const actual = ctx.getImageData(0, 0, ctx.canvas.width, ctx.canvas.height).data;
+
+ t.expectOK(checkElementsEqual(actual, expect));
+}
+
+g.test('onscreenCanvas,snapshot')
+ .desc(
+ `
+ Ensure snapshot of canvas with WebGPU context is correct with
+ - various WebGPU canvas texture formats
+ - WebGPU canvas alpha mode = {"opaque", "premultiplied"}
+ - colorSpace = {"srgb", "display-p3"}
+ - snapshot methods = {convertToBlob, transferToImageBitmap, createImageBitmap}
+
+ TODO: Snapshot canvas to jpeg, webp and other mime type and
+ different quality. Maybe we should test them in reftest.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('format', kCanvasTextureFormats)
+ .combine('alphaMode', kCanvasAlphaModes)
+ .combine('colorSpace', kCanvasColorSpaces)
+ .combine('snapshotType', ['toDataURL', 'toBlob', 'imageBitmap'])
+ )
+ .fn(async t => {
+ const canvas = initWebGPUCanvasContent(
+ t,
+ t.params.format,
+ t.params.alphaMode,
+ t.params.colorSpace,
+ 'onscreen'
+ );
+
+ let snapshot: HTMLImageElement | ImageBitmap;
+ switch (t.params.snapshotType) {
+ case 'toDataURL': {
+ const url = canvas.toDataURL();
+ const img = new Image(canvas.width, canvas.height);
+ img.src = url;
+ await raceWithRejectOnTimeout(img.decode(), 5000, 'load image timeout');
+ snapshot = img;
+ break;
+ }
+ case 'toBlob': {
+ const blobFromCanvas = new Promise(resolve => {
+ canvas.toBlob(blob => resolve(blob));
+ });
+ const blob = (await blobFromCanvas) as Blob;
+ const url = URL.createObjectURL(blob);
+ const img = new Image(canvas.width, canvas.height);
+ img.src = url;
+ await raceWithRejectOnTimeout(img.decode(), 5000, 'load image timeout');
+ snapshot = img;
+ break;
+ }
+ case 'imageBitmap': {
+ snapshot = await createImageBitmap(canvas);
+ break;
+ }
+ default:
+ unreachable();
+ }
+
+ checkImageResult(t, snapshot, t.params.colorSpace, expect[t.params.alphaMode]);
+ });
+
+g.test('offscreenCanvas,snapshot')
+ .desc(
+ `
+ Ensure snapshot of offscreenCanvas with WebGPU context is correct with
+ - various WebGPU canvas texture formats
+ - WebGPU canvas alpha mode = {"opaque", "premultiplied"}
+ - colorSpace = {"srgb", "display-p3"}
+ - snapshot methods = {convertToBlob, transferToImageBitmap, createImageBitmap}
+
+ TODO: Snapshot offscreenCanvas to jpeg, webp and other mime type and
+ different quality. Maybe we should test them in reftest.
+ `
+ )
+ .params(u =>
+ u //
+ .combine('format', kCanvasTextureFormats)
+ .combine('alphaMode', kCanvasAlphaModes)
+ .combine('colorSpace', kCanvasColorSpaces)
+ .combine('snapshotType', ['convertToBlob', 'transferToImageBitmap', 'imageBitmap'])
+ )
+ .fn(async t => {
+ const offscreenCanvas = initWebGPUCanvasContent(
+ t,
+ t.params.format,
+ t.params.alphaMode,
+ t.params.colorSpace,
+ 'offscreen'
+ );
+
+ let snapshot: HTMLImageElement | ImageBitmap;
+ switch (t.params.snapshotType) {
+ case 'convertToBlob': {
+ if (typeof offscreenCanvas.convertToBlob === 'undefined') {
+ t.skip("Browser doesn't support OffscreenCanvas.convertToBlob");
+ return;
+ }
+ const blob = await offscreenCanvas.convertToBlob();
+ const url = URL.createObjectURL(blob);
+ const img = new Image(offscreenCanvas.width, offscreenCanvas.height);
+ img.src = url;
+ await raceWithRejectOnTimeout(img.decode(), 5000, 'load image timeout');
+ snapshot = img;
+ break;
+ }
+ case 'transferToImageBitmap': {
+ if (typeof offscreenCanvas.transferToImageBitmap === 'undefined') {
+ t.skip("Browser doesn't support OffscreenCanvas.transferToImageBitmap");
+ return;
+ }
+ snapshot = offscreenCanvas.transferToImageBitmap();
+ break;
+ }
+ case 'imageBitmap': {
+ snapshot = await createImageBitmap(offscreenCanvas);
+ break;
+ }
+ default:
+ unreachable();
+ }
+
+ checkImageResult(t, snapshot, t.params.colorSpace, expect[t.params.alphaMode]);
+ });
+
+g.test('onscreenCanvas,uploadToWebGL')
+ .desc(
+ `
+ Ensure upload WebGPU context canvas to webgl texture is correct with
+ - various WebGPU canvas texture formats
+ - WebGPU canvas alpha mode = {"opaque", "premultiplied"}
+ - upload methods = {texImage2D, texSubImage2D}
+ `
+ )
+ .params(u =>
+ u //
+ .combine('format', kCanvasTextureFormats)
+ .combine('alphaMode', kCanvasAlphaModes)
+ .combine('webgl', ['webgl', 'webgl2'])
+ .combine('upload', ['texImage2D', 'texSubImage2D'])
+ )
+ .fn(t => {
+ const { format, webgl, upload } = t.params;
+ const canvas = initWebGPUCanvasContent(t, format, t.params.alphaMode, 'srgb', 'onscreen');
+
+ const expectCanvas: HTMLCanvasElement = createOnscreenCanvas(t, canvas.width, canvas.height);
+ const gl = expectCanvas.getContext(webgl) as WebGLRenderingContext | WebGL2RenderingContext;
+ if (gl === null) {
+ return;
+ }
+
+ const texture = gl.createTexture();
+ gl.bindTexture(gl.TEXTURE_2D, texture);
+ switch (upload) {
+ case 'texImage2D': {
+ gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, canvas);
+ break;
+ }
+ case 'texSubImage2D': {
+ gl.texImage2D(
+ gl.TEXTURE_2D,
+ 0,
+ gl.RGBA,
+ canvas.width,
+ canvas.height,
+ 0,
+ gl.RGBA,
+ gl.UNSIGNED_BYTE,
+ null
+ );
+ gl.texSubImage2D(gl.TEXTURE_2D, 0, 0, 0, gl.RGBA, gl.UNSIGNED_BYTE, canvas);
+ break;
+ }
+ default:
+ unreachable();
+ }
+
+ const fb = gl.createFramebuffer();
+
+ gl.bindFramebuffer(gl.FRAMEBUFFER, fb);
+ gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0);
+
+ const pixels = new Uint8Array(canvas.width * canvas.height * 4);
+ gl.readPixels(0, 0, 2, 2, gl.RGBA, gl.UNSIGNED_BYTE, pixels);
+ const actual = new Uint8ClampedArray(pixels);
+
+ t.expectOK(checkElementsEqual(actual, expect[t.params.alphaMode]));
+ });
+
+g.test('drawTo2DCanvas')
+ .desc(
+ `
+ Ensure draw WebGPU context canvas to 2d context canvas/offscreenCanvas is correct with
+ - various WebGPU canvas texture formats
+ - WebGPU canvas alpha mode = {"opaque", "premultiplied"}
+ - colorSpace = {"srgb", "display-p3"}
+ - WebGPU canvas type = {"onscreen", "offscreen"}
+ - 2d canvas type = {"onscreen", "offscreen"}
+ `
+ )
+ .params(u =>
+ u //
+ .combine('format', kCanvasTextureFormats)
+ .combine('alphaMode', kCanvasAlphaModes)
+ .combine('colorSpace', kCanvasColorSpaces)
+ .combine('webgpuCanvasType', kAllCanvasTypes)
+ .combine('canvas2DType', kAllCanvasTypes)
+ )
+ .fn(t => {
+ const { format, webgpuCanvasType, alphaMode, colorSpace, canvas2DType } = t.params;
+
+ const canvas = initWebGPUCanvasContent(t, format, alphaMode, colorSpace, webgpuCanvasType);
+
+ const expectCanvas = createCanvas(t, canvas2DType, canvas.width, canvas.height);
+ const ctx = expectCanvas.getContext('2d') as CanvasRenderingContext2D;
+ if (ctx === null) {
+ t.skip(canvas2DType + ' canvas cannot get 2d context');
+ return;
+ }
+
+ ctx.drawImage(canvas, 0, 0);
+ readPixelsFrom2DCanvasAndCompare(t, ctx, expect[t.params.alphaMode]);
+ });
+
+g.test('transferToImageBitmap_unconfigured_nonzero_size')
+ .desc(
+ `Regression test for a crash when calling transferImageBitmap on an unconfigured. Case where the canvas is not empty`
+ )
+ .fn(t => {
+ const canvas = createCanvas(t, 'offscreen', 2, 3);
+ canvas.getContext('webgpu');
+
+ // Transferring gives an ImageBitmap of the correct size filled with transparent black.
+ const ib = canvas.transferToImageBitmap();
+ t.expect(ib.width === canvas.width);
+ t.expect(ib.height === canvas.height);
+
+ const readbackCanvas = document.createElement('canvas');
+ readbackCanvas.width = canvas.width;
+ readbackCanvas.height = canvas.height;
+ const readbackContext = readbackCanvas.getContext('2d', {
+ alpha: true,
+ });
+ if (readbackContext === null) {
+ t.skip('Cannot get a 2D canvas context');
+ return;
+ }
+
+ // Since there isn't a configuration we expect the ImageBitmap to have the default alphaMode of "opaque".
+ const expected = new Uint8ClampedArray(canvas.width * canvas.height * 4);
+ for (let i = 0; i < expected.byteLength; i += 4) {
+ expected[i + 0] = 0;
+ expected[i + 1] = 0;
+ expected[i + 2] = 0;
+ expected[i + 3] = 255;
+ }
+
+ readbackContext.drawImage(ib, 0, 0);
+ readPixelsFrom2DCanvasAndCompare(t, readbackContext, expected);
+ });
+
+g.test('transferToImageBitmap_zero_size')
+ .desc(
+ `Regression test for a crash when calling transferImageBitmap on an unconfigured. Case where the canvas is empty.
+
+ TODO: Spec and expect a particular Exception type here.`
+ )
+ .params(u => u.combine('configure', [true, false]))
+ .fn(t => {
+ const { configure } = t.params;
+ const canvas = createCanvas(t, 'offscreen', 0, 1);
+ const ctx = canvas.getContext('webgpu')!;
+
+ if (configure) {
+ t.expectValidationError(() => ctx.configure({ device: t.device, format: 'bgra8unorm' }));
+ }
+
+ // Transferring would give an empty ImageBitmap which is not possible, so an Exception is thrown.
+ t.shouldThrow(true, () => {
+ canvas.transferToImageBitmap();
+ });
+ });
+
+g.test('transferToImageBitmap_huge_size')
+ .desc(`Regression test for a crash when calling transferImageBitmap on a HUGE canvas.`)
+ .fn(t => {
+ const canvas = createCanvas(t, 'offscreen', 1000000, 1000000);
+ canvas.getContext('webgpu')!;
+
+ // Transferring to such a HUGE image bitmap would not be possible, so an Exception is thrown.
+ t.shouldThrow(true, () => {
+ canvas.transferToImageBitmap();
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/ImageBitmap.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/ImageBitmap.spec.ts
new file mode 100644
index 0000000000..25b25798d2
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/ImageBitmap.spec.ts
@@ -0,0 +1,543 @@
+export const description = `
+copyExternalImageToTexture from ImageBitmaps created from various sources.
+
+TODO: Test ImageBitmap generated from all possible ImageBitmapSource, relevant ImageBitmapOptions
+ (https://html.spec.whatwg.org/multipage/imagebitmap-and-animations.html#images-2)
+ and various source filetypes and metadata (weird dimensions, EXIF orientations, video rotations
+ and visible/crop rectangles, etc. (In theory these things are handled inside createImageBitmap,
+ but in theory could affect the internal representation of the ImageBitmap.)
+
+TODO: Test zero-sized copies from all sources (just make sure params cover it) (e.g. 0x0, 0x4, 4x0).
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { kTextureFormatInfo, kValidTextureFormatsForCopyE2T } from '../../format_info.js';
+import { CopyToTextureUtils, kCopySubrectInfo } from '../../util/copy_to_texture.js';
+
+import { kTestColorsAll, kTestColorsOpaque, makeTestColorsTexelView } from './util.js';
+
+export const g = makeTestGroup(CopyToTextureUtils);
+
+g.test('from_ImageData')
+ .desc(
+ `
+ Test ImageBitmap generated from ImageData can be copied to WebGPU
+ texture correctly. These imageBitmaps are highly possible living
+ in CPU back resource.
+
+ It generates pixels in ImageData one by one based on a color list:
+ [Red, Green, Blue, Black, White, SemitransparentWhite].
+
+ Then call copyExternalImageToTexture() to do a full copy to the 0 mipLevel
+ of dst texture, and read the contents out to compare with the ImageBitmap contents.
+
+ Do premultiply alpha during copy if 'premultipliedAlpha' in 'GPUImageCopyTextureTagged'
+ is set to 'true' and do unpremultiply alpha if it is set to 'false'.
+
+ If 'flipY' in 'GPUImageCopyExternalImage' is set to 'true', copy will ensure the result
+ is flipped.
+
+ The tests covers:
+ - Valid dstFormat of copyExternalImageToTexture()
+ - Valid source image alphaMode
+ - Valid dest alphaMode
+ - Valid 'flipY' config in 'GPUImageCopyExternalImage' (named 'srcFlipYInCopy' in cases)
+
+ And the expected results are all passed.
+ `
+ )
+ .params(u =>
+ u
+ .combine('alpha', ['none', 'premultiply'] as const)
+ .combine('orientation', ['none', 'flipY'] as const)
+ .combine('colorSpaceConversion', ['none', 'default'] as const)
+ .combine('srcFlipYInCopy', [true, false])
+ .combine('dstFormat', kValidTextureFormatsForCopyE2T)
+ .combine('dstPremultiplied', [true, false])
+ .beginSubcases()
+ .combine('width', [1, 2, 4, 15, 255, 256])
+ .combine('height', [1, 2, 4, 15, 255, 256])
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.dstFormat);
+ })
+ .fn(async t => {
+ const {
+ width,
+ height,
+ alpha,
+ orientation,
+ colorSpaceConversion,
+ dstFormat,
+ dstPremultiplied,
+ srcFlipYInCopy,
+ } = t.params;
+
+ const testColors = kTestColorsAll;
+
+ // Generate correct expected values
+ const texelViewSource = makeTestColorsTexelView({
+ testColors,
+ format: 'rgba8unorm', // ImageData is always in rgba8unorm format.
+ width,
+ height,
+ flipY: false,
+ premultiplied: false,
+ });
+ const imageData = new ImageData(width, height);
+ texelViewSource.writeTextureData(imageData.data, {
+ bytesPerRow: width * 4,
+ rowsPerImage: height,
+ subrectOrigin: [0, 0],
+ subrectSize: { width, height },
+ });
+
+ const imageBitmap = await createImageBitmap(imageData, {
+ premultiplyAlpha: alpha,
+ imageOrientation: orientation,
+ colorSpaceConversion,
+ });
+
+ const dst = t.device.createTexture({
+ size: { width, height },
+ format: dstFormat,
+ usage:
+ GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const expFormat = kTextureFormatInfo[dstFormat].baseFormat ?? dstFormat;
+ const flipSrcBeforeCopy = orientation === 'flipY';
+ const texelViewExpected = t.getExpectedDstPixelsFromSrcPixels({
+ srcPixels: imageData.data,
+ srcOrigin: [0, 0],
+ srcSize: [width, height],
+ dstOrigin: [0, 0],
+ dstSize: [width, height],
+ subRectSize: [width, height],
+ format: expFormat,
+ flipSrcBeforeCopy,
+ srcDoFlipYDuringCopy: srcFlipYInCopy,
+ conversion: {
+ srcPremultiplied: false,
+ dstPremultiplied,
+ },
+ });
+
+ t.doTestAndCheckResult(
+ { source: imageBitmap, origin: { x: 0, y: 0 }, flipY: srcFlipYInCopy },
+ {
+ texture: dst,
+ origin: { x: 0, y: 0 },
+ colorSpace: 'srgb',
+ premultipliedAlpha: dstPremultiplied,
+ },
+ texelViewExpected,
+ { width, height, depthOrArrayLayers: 1 },
+ // 1.0 and 0.6 are representable precisely by all formats except rgb10a2unorm, but
+ // allow diffs of 1ULP since that's the generally-appropriate threshold.
+ { maxDiffULPsForFloatFormat: 1, maxDiffULPsForNormFormat: 1 }
+ );
+ });
+
+g.test('from_canvas')
+ .desc(
+ `
+ Test ImageBitmap generated from canvas/offscreenCanvas can be copied to WebGPU
+ texture correctly. These imageBitmaps are highly possible living in GPU back resource.
+
+ It generates pixels in ImageData one by one based on a color list:
+ [Red, Green, Blue, Black, White].
+
+ Then call copyExternalImageToTexture() to do a full copy to the 0 mipLevel
+ of dst texture, and read the contents out to compare with the ImageBitmap contents.
+
+ Do premultiply alpha during copy if 'premultipliedAlpha' in 'GPUImageCopyTextureTagged'
+ is set to 'true' and do unpremultiply alpha if it is set to 'false'.
+
+ If 'flipY' in 'GPUImageCopyExternalImage' is set to 'true', copy will ensure the result
+ is flipped.
+
+ The tests covers:
+ - Valid 2D canvas
+ - Valid dstFormat of copyExternalImageToTexture()
+ - Valid source image alphaMode
+ - Valid dest alphaMode
+ - Valid 'flipY' config in 'GPUImageCopyExternalImage' (named 'srcFlipYInCopy' in cases)
+
+ And the expected results are all passed.
+ `
+ )
+ .params(u =>
+ u
+ .combine('orientation', ['none', 'flipY'] as const)
+ .combine('colorSpaceConversion', ['none', 'default'] as const)
+ .combine('srcFlipYInCopy', [true, false])
+ .combine('dstFormat', kValidTextureFormatsForCopyE2T)
+ .combine('dstPremultiplied', [true, false])
+ .beginSubcases()
+ .combine('width', [1, 2, 4, 15, 255, 256])
+ .combine('height', [1, 2, 4, 15, 255, 256])
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.dstFormat);
+ })
+ .fn(async t => {
+ const {
+ width,
+ height,
+ orientation,
+ colorSpaceConversion,
+ dstFormat,
+ dstPremultiplied,
+ srcFlipYInCopy,
+ } = t.params;
+
+ // CTS sometimes runs on worker threads, where document is not available.
+ // In this case, OffscreenCanvas can be used instead of <canvas>.
+ // But some browsers don't support OffscreenCanvas, and some don't
+ // support '2d' contexts on OffscreenCanvas.
+ // In this situation, the case will be skipped.
+ let imageCanvas;
+ if (typeof document !== 'undefined') {
+ imageCanvas = document.createElement('canvas');
+ imageCanvas.width = width;
+ imageCanvas.height = height;
+ } else if (typeof OffscreenCanvas === 'undefined') {
+ t.skip('OffscreenCanvas is not supported');
+ return;
+ } else {
+ imageCanvas = new OffscreenCanvas(width, height);
+ }
+ const imageCanvasContext = imageCanvas.getContext('2d');
+ if (imageCanvasContext === null) {
+ t.skip('OffscreenCanvas "2d" context not available');
+ return;
+ }
+
+ // Generate non-transparent pixel data to avoid canvas
+ // different opt behaviour on putImageData()
+ // from browsers.
+ const texelViewSource = makeTestColorsTexelView({
+ testColors: kTestColorsOpaque,
+ format: 'rgba8unorm', // ImageData is always in rgba8unorm format.
+ width,
+ height,
+ flipY: false,
+ premultiplied: false,
+ });
+ // Generate correct expected values
+ const imageData = new ImageData(width, height);
+ texelViewSource.writeTextureData(imageData.data, {
+ bytesPerRow: width * 4,
+ rowsPerImage: height,
+ subrectOrigin: [0, 0],
+ subrectSize: { width, height },
+ });
+
+ // Use putImageData to prevent color space conversion.
+ imageCanvasContext.putImageData(imageData, 0, 0);
+
+ // MAINTENANCE_TODO: Workaround for @types/offscreencanvas missing an overload of
+ // `createImageBitmap` that takes `ImageBitmapOptions`.
+ const imageBitmap = await createImageBitmap(imageCanvas as HTMLCanvasElement, {
+ premultiplyAlpha: 'premultiply',
+ imageOrientation: orientation,
+ colorSpaceConversion,
+ });
+
+ const dst = t.device.createTexture({
+ size: { width, height },
+ format: dstFormat,
+ usage:
+ GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const expFormat = kTextureFormatInfo[dstFormat].baseFormat ?? dstFormat;
+ const flipSrcBeforeCopy = orientation === 'flipY';
+ const texelViewExpected = t.getExpectedDstPixelsFromSrcPixels({
+ srcPixels: imageData.data,
+ srcOrigin: [0, 0],
+ srcSize: [width, height],
+ dstOrigin: [0, 0],
+ dstSize: [width, height],
+ subRectSize: [width, height],
+ format: expFormat,
+ flipSrcBeforeCopy,
+ srcDoFlipYDuringCopy: srcFlipYInCopy,
+ conversion: {
+ srcPremultiplied: false,
+ dstPremultiplied,
+ },
+ });
+
+ t.doTestAndCheckResult(
+ { source: imageBitmap, origin: { x: 0, y: 0 }, flipY: srcFlipYInCopy },
+ {
+ texture: dst,
+ origin: { x: 0, y: 0 },
+ colorSpace: 'srgb',
+ premultipliedAlpha: dstPremultiplied,
+ },
+ texelViewExpected,
+ { width, height, depthOrArrayLayers: 1 },
+ // 1.0 and 0.6 are representable precisely by all formats except rgb10a2unorm, but
+ // allow diffs of 1ULP since that's the generally-appropriate threshold.
+ { maxDiffULPsForFloatFormat: 1, maxDiffULPsForNormFormat: 1 }
+ );
+ });
+
+g.test('copy_subrect_from_ImageData')
+ .desc(
+ `
+ Test ImageBitmap generated from ImageData can be copied to WebGPU
+ texture correctly. These imageBitmaps are highly possible living in CPU back resource.
+
+ It generates pixels in ImageData one by one based on a color list:
+ [Red, Green, Blue, Black, White].
+
+ Then call copyExternalImageToTexture() to do a subrect copy, based on a predefined copy
+ rect info list, to the 0 mipLevel of dst texture, and read the contents out to compare
+ with the ImageBitmap contents.
+
+ Do premultiply alpha during copy if 'premultipliedAlpha' in 'GPUImageCopyTextureTagged'
+ is set to 'true' and do unpremultiply alpha if it is set to 'false'.
+
+ If 'flipY' in 'GPUImageCopyExternalImage' is set to 'true', copy will ensure the result
+ is flipped, and origin is top-left consistantly.
+
+ The tests covers:
+ - Source WebGPU Canvas lives in the same GPUDevice or different GPUDevice as test
+ - Valid dstFormat of copyExternalImageToTexture()
+ - Valid source image alphaMode
+ - Valid dest alphaMode
+ - Valid 'flipY' config in 'GPUImageCopyExternalImage' (named 'srcFlipYInCopy' in cases)
+ - Valid subrect copies.
+
+ And the expected results are all passed.
+ `
+ )
+ .params(u =>
+ u
+ .combine('alpha', ['none', 'premultiply'] as const)
+ .combine('orientation', ['none', 'flipY'] as const)
+ .combine('colorSpaceConversion', ['none', 'default'] as const)
+ .combine('srcFlipYInCopy', [true, false])
+ .combine('dstPremultiplied', [true, false])
+ .beginSubcases()
+ .combine('copySubRectInfo', kCopySubrectInfo)
+ )
+ .fn(async t => {
+ const {
+ copySubRectInfo,
+ alpha,
+ orientation,
+ colorSpaceConversion,
+ dstPremultiplied,
+ srcFlipYInCopy,
+ } = t.params;
+
+ const testColors = kTestColorsAll;
+ const { srcOrigin, dstOrigin, srcSize, dstSize, copyExtent } = copySubRectInfo;
+ const kColorFormat = 'rgba8unorm';
+
+ // Generate correct expected values
+ const texelViewSource = makeTestColorsTexelView({
+ testColors,
+ format: kColorFormat, // ImageData is always in rgba8unorm format.
+ width: srcSize.width,
+ height: srcSize.height,
+ flipY: false,
+ premultiplied: false,
+ });
+ const imageData = new ImageData(srcSize.width, srcSize.height);
+ texelViewSource.writeTextureData(imageData.data, {
+ bytesPerRow: srcSize.width * 4,
+ rowsPerImage: srcSize.height,
+ subrectOrigin: [0, 0],
+ subrectSize: srcSize,
+ });
+
+ const imageBitmap = await createImageBitmap(imageData, {
+ premultiplyAlpha: alpha,
+ imageOrientation: orientation,
+ colorSpaceConversion,
+ });
+
+ const dst = t.device.createTexture({
+ size: dstSize,
+ format: kColorFormat,
+ usage:
+ GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const flipSrcBeforeCopy = orientation === 'flipY';
+ const texelViewExpected = t.getExpectedDstPixelsFromSrcPixels({
+ srcPixels: imageData.data,
+ srcOrigin,
+ srcSize,
+ dstOrigin,
+ dstSize,
+ subRectSize: copyExtent,
+ format: kColorFormat,
+ flipSrcBeforeCopy,
+ srcDoFlipYDuringCopy: srcFlipYInCopy,
+ conversion: {
+ srcPremultiplied: false,
+ dstPremultiplied,
+ },
+ });
+
+ t.doTestAndCheckResult(
+ { source: imageBitmap, origin: srcOrigin, flipY: srcFlipYInCopy },
+ {
+ texture: dst,
+ origin: dstOrigin,
+ colorSpace: 'srgb',
+ premultipliedAlpha: dstPremultiplied,
+ },
+ texelViewExpected,
+ copyExtent,
+ // 1.0 and 0.6 are representable precisely by all formats except rgb10a2unorm, but
+ // allow diffs of 1ULP since that's the generally-appropriate threshold.
+ { maxDiffULPsForFloatFormat: 1, maxDiffULPsForNormFormat: 1 }
+ );
+ });
+
+g.test('copy_subrect_from_2D_Canvas')
+ .desc(
+ `
+ Test ImageBitmap generated from canvas/offscreenCanvas can be copied to WebGPU
+ texture correctly. These imageBitmaps are highly possible living in GPU back resource.
+
+ It generates pixels in ImageData one by one based on a color list:
+ [Red, Green, Blue, Black, White].
+
+ Then call copyExternalImageToTexture() to do a subrect copy, based on a predefined copy
+ rect info list, to the 0 mipLevel of dst texture, and read the contents out to compare
+ with the ImageBitmap contents.
+
+ Do premultiply alpha during copy if 'premultipliedAlpha' in 'GPUImageCopyTextureTagged'
+ is set to 'true' and do unpremultiply alpha if it is set to 'false'.
+
+ If 'flipY' in 'GPUImageCopyExternalImage' is set to 'true', copy will ensure the result
+ is flipped, and origin is top-left consistantly.
+
+ The tests covers:
+ - Source WebGPU Canvas lives in the same GPUDevice or different GPUDevice as test
+ - Valid dstFormat of copyExternalImageToTexture()
+ - Valid source image alphaMode
+ - Valid dest alphaMode
+ - Valid 'flipY' config in 'GPUImageCopyExternalImage' (named 'srcFlipYInCopy' in cases)
+ - Valid subrect copies.
+
+ And the expected results are all passed.
+ `
+ )
+ .params(u =>
+ u
+ .combine('orientation', ['none', 'flipY'] as const)
+ .combine('colorSpaceConversion', ['none', 'default'] as const)
+ .combine('srcFlipYInCopy', [true, false])
+ .combine('dstPremultiplied', [true, false])
+ .beginSubcases()
+ .combine('copySubRectInfo', kCopySubrectInfo)
+ )
+ .fn(async t => {
+ const { copySubRectInfo, orientation, colorSpaceConversion, dstPremultiplied, srcFlipYInCopy } =
+ t.params;
+
+ const { srcOrigin, dstOrigin, srcSize, dstSize, copyExtent } = copySubRectInfo;
+ const kColorFormat = 'rgba8unorm';
+
+ // CTS sometimes runs on worker threads, where document is not available.
+ // In this case, OffscreenCanvas can be used instead of <canvas>.
+ // But some browsers don't support OffscreenCanvas, and some don't
+ // support '2d' contexts on OffscreenCanvas.
+ // In this situation, the case will be skipped.
+ let imageCanvas;
+ if (typeof document !== 'undefined') {
+ imageCanvas = document.createElement('canvas');
+ imageCanvas.width = srcSize.width;
+ imageCanvas.height = srcSize.height;
+ } else if (typeof OffscreenCanvas === 'undefined') {
+ t.skip('OffscreenCanvas is not supported');
+ return;
+ } else {
+ imageCanvas = new OffscreenCanvas(srcSize.width, srcSize.height);
+ }
+ const imageCanvasContext = imageCanvas.getContext('2d') as CanvasRenderingContext2D;
+ if (imageCanvasContext === null) {
+ t.skip('OffscreenCanvas "2d" context not available');
+ return;
+ }
+
+ // Generate non-transparent pixel data to avoid canvas
+ // different opt behaviour on putImageData()
+ // from browsers.
+ const texelViewSource = makeTestColorsTexelView({
+ testColors: kTestColorsOpaque,
+ format: 'rgba8unorm', // ImageData is always in rgba8unorm format.
+ width: srcSize.width,
+ height: srcSize.height,
+ flipY: false,
+ premultiplied: false,
+ });
+ // Generate correct expected values
+ const imageData = new ImageData(srcSize.width, srcSize.height);
+ texelViewSource.writeTextureData(imageData.data, {
+ bytesPerRow: srcSize.width * 4,
+ rowsPerImage: srcSize.height,
+ subrectOrigin: [0, 0],
+ subrectSize: srcSize,
+ });
+
+ // Use putImageData to prevent color space conversion.
+ imageCanvasContext.putImageData(imageData, 0, 0);
+
+ // MAINTENANCE_TODO: Workaround for @types/offscreencanvas missing an overload of
+ // `createImageBitmap` that takes `ImageBitmapOptions`.
+ const imageBitmap = await createImageBitmap(imageCanvas as HTMLCanvasElement, {
+ premultiplyAlpha: 'premultiply',
+ imageOrientation: orientation,
+ colorSpaceConversion,
+ });
+
+ const dst = t.device.createTexture({
+ size: dstSize,
+ format: kColorFormat,
+ usage:
+ GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const flipSrcBeforeCopy = orientation === 'flipY';
+ const texelViewExpected = t.getExpectedDstPixelsFromSrcPixels({
+ srcPixels: imageData.data,
+ srcOrigin,
+ srcSize,
+ dstOrigin,
+ dstSize,
+ subRectSize: copyExtent,
+ format: kColorFormat,
+ flipSrcBeforeCopy,
+ srcDoFlipYDuringCopy: srcFlipYInCopy,
+ conversion: {
+ srcPremultiplied: false,
+ dstPremultiplied,
+ },
+ });
+
+ t.doTestAndCheckResult(
+ { source: imageBitmap, origin: srcOrigin, flipY: srcFlipYInCopy },
+ {
+ texture: dst,
+ origin: dstOrigin,
+ colorSpace: 'srgb',
+ premultipliedAlpha: dstPremultiplied,
+ },
+ texelViewExpected,
+ copyExtent,
+ // 1.0 and 0.6 are representable precisely by all formats except rgb10a2unorm, but
+ // allow diffs of 1ULP since that's the generally-appropriate threshold.
+ { maxDiffULPsForFloatFormat: 1, maxDiffULPsForNormFormat: 1 }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/ImageData.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/ImageData.spec.ts
new file mode 100644
index 0000000000..38876f0419
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/ImageData.spec.ts
@@ -0,0 +1,221 @@
+export const description = `
+copyExternalImageToTexture from ImageData source.
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { kTextureFormatInfo, kValidTextureFormatsForCopyE2T } from '../../format_info.js';
+import { CopyToTextureUtils, kCopySubrectInfo } from '../../util/copy_to_texture.js';
+
+import { kTestColorsAll, makeTestColorsTexelView } from './util.js';
+
+export const g = makeTestGroup(CopyToTextureUtils);
+
+g.test('from_ImageData')
+ .desc(
+ `
+ Test ImageData can be copied to WebGPU
+ texture correctly. These imageDatas are highly possible living
+ in CPU back resource.
+
+ It generates pixels in ImageData one by one based on a color list:
+ [Red, Green, Blue, Black, White, SemitransparentWhite].
+
+ Then call copyExternalImageToTexture() to do a full copy to the 0 mipLevel
+ of dst texture, and read the contents out to compare with the ImageData contents.
+
+ Expect alpha to get premultiplied in the copy if, and only if, 'premultipliedAlpha'
+ in 'GPUImageCopyTextureTagged' is set to 'true'.
+
+ If 'flipY' in 'GPUImageCopyExternalImage' is set to 'true', copy will ensure the result
+ is flipped.
+
+ The tests covers:
+ - Valid dstColorFormat of copyExternalImageToTexture()
+ - Valid dest alphaMode
+ - Valid 'flipY' config in 'GPUImageCopyExternalImage' (named 'srcDoFlipYDuringCopy' in cases)
+
+ And the expected results are all passed.
+ `
+ )
+ .params(u =>
+ u
+ .combine('srcDoFlipYDuringCopy', [true, false])
+ .combine('dstColorFormat', kValidTextureFormatsForCopyE2T)
+ .combine('dstPremultiplied', [true, false])
+ .beginSubcases()
+ .combine('width', [1, 2, 4, 15, 255, 256])
+ .combine('height', [1, 2, 4, 15, 255, 256])
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.dstColorFormat);
+ })
+ .fn(t => {
+ const { width, height, dstColorFormat, dstPremultiplied, srcDoFlipYDuringCopy } = t.params;
+
+ const testColors = kTestColorsAll;
+
+ // Generate correct expected values
+ const texelViewSource = makeTestColorsTexelView({
+ testColors,
+ format: 'rgba8unorm', // ImageData is always in rgba8unorm format.
+ width,
+ height,
+ flipY: false,
+ premultiplied: false,
+ });
+ const imageData = new ImageData(width, height);
+ texelViewSource.writeTextureData(imageData.data, {
+ bytesPerRow: width * 4,
+ rowsPerImage: height,
+ subrectOrigin: [0, 0],
+ subrectSize: { width, height },
+ });
+
+ const dst = t.device.createTexture({
+ size: { width, height },
+ format: dstColorFormat,
+ usage:
+ GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const expFormat = kTextureFormatInfo[dstColorFormat].baseFormat ?? dstColorFormat;
+ const flipSrcBeforeCopy = false;
+ const texelViewExpected = t.getExpectedDstPixelsFromSrcPixels({
+ srcPixels: imageData.data,
+ srcOrigin: [0, 0],
+ srcSize: [width, height],
+ dstOrigin: [0, 0],
+ dstSize: [width, height],
+ subRectSize: [width, height],
+ format: expFormat,
+ flipSrcBeforeCopy,
+ srcDoFlipYDuringCopy,
+ conversion: {
+ srcPremultiplied: false,
+ dstPremultiplied,
+ },
+ });
+
+ t.doTestAndCheckResult(
+ {
+ source: imageData,
+ origin: { x: 0, y: 0 },
+ flipY: srcDoFlipYDuringCopy,
+ },
+ {
+ texture: dst,
+ origin: { x: 0, y: 0 },
+ colorSpace: 'srgb',
+ premultipliedAlpha: dstPremultiplied,
+ },
+ texelViewExpected,
+ { width, height, depthOrArrayLayers: 1 },
+ // 1.0 and 0.6 are representable precisely by all formats except rgb10a2unorm, but
+ // allow diffs of 1ULP since that's the generally-appropriate threshold.
+ { maxDiffULPsForFloatFormat: 1, maxDiffULPsForNormFormat: 1 }
+ );
+ });
+
+g.test('copy_subrect_from_ImageData')
+ .desc(
+ `
+ Test ImageData can be copied to WebGPU
+ texture correctly. These imageDatas are highly possible living in CPU back resource.
+
+ It generates pixels in ImageData one by one based on a color list:
+ [Red, Green, Blue, Black, White].
+
+ Then call copyExternalImageToTexture() to do a subrect copy, based on a predefined copy
+ rect info list, to the 0 mipLevel of dst texture, and read the contents out to compare
+ with the ImageBitmap contents.
+
+ Expect alpha to get premultiplied in the copy if, and only if, 'premultipliedAlpha'
+ in 'GPUImageCopyTextureTagged' is set to 'true'.
+
+ If 'flipY' in 'GPUImageCopyExternalImage' is set to 'true', copy will ensure the result
+ is flipped, and origin is top-left consistantly.
+
+ The tests covers:
+ - Source WebGPU Canvas lives in the same GPUDevice or different GPUDevice as test
+ - Valid dstColorFormat of copyExternalImageToTexture()
+ - Valid dest alphaMode
+ - Valid 'flipY' config in 'GPUImageCopyExternalImage' (named 'srcDoFlipYDuringCopy' in cases)
+ - Valid subrect copies.
+
+ And the expected results are all passed.
+ `
+ )
+ .params(u =>
+ u
+ .combine('srcDoFlipYDuringCopy', [true, false])
+ .combine('dstPremultiplied', [true, false])
+ .beginSubcases()
+ .combine('copySubRectInfo', kCopySubrectInfo)
+ )
+ .fn(t => {
+ const { copySubRectInfo, dstPremultiplied, srcDoFlipYDuringCopy } = t.params;
+
+ const testColors = kTestColorsAll;
+ const { srcOrigin, dstOrigin, srcSize, dstSize, copyExtent } = copySubRectInfo;
+ const kColorFormat = 'rgba8unorm';
+
+ // Generate correct expected values
+ const texelViewSource = makeTestColorsTexelView({
+ testColors,
+ format: kColorFormat, // ImageData is always in rgba8unorm format.
+ width: srcSize.width,
+ height: srcSize.height,
+ flipY: false,
+ premultiplied: false,
+ });
+ const imageData = new ImageData(srcSize.width, srcSize.height);
+ texelViewSource.writeTextureData(imageData.data, {
+ bytesPerRow: srcSize.width * 4,
+ rowsPerImage: srcSize.height,
+ subrectOrigin: [0, 0],
+ subrectSize: srcSize,
+ });
+
+ const dst = t.device.createTexture({
+ size: dstSize,
+ format: kColorFormat,
+ usage:
+ GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const flipSrcBeforeCopy = false;
+ const texelViewExpected = t.getExpectedDstPixelsFromSrcPixels({
+ srcPixels: imageData.data,
+ srcOrigin,
+ srcSize,
+ dstOrigin,
+ dstSize,
+ subRectSize: copyExtent,
+ format: kColorFormat,
+ flipSrcBeforeCopy,
+ srcDoFlipYDuringCopy,
+ conversion: {
+ srcPremultiplied: false,
+ dstPremultiplied,
+ },
+ });
+
+ t.doTestAndCheckResult(
+ {
+ source: imageData,
+ origin: srcOrigin,
+ flipY: srcDoFlipYDuringCopy,
+ },
+ {
+ texture: dst,
+ origin: dstOrigin,
+ colorSpace: 'srgb',
+ premultipliedAlpha: dstPremultiplied,
+ },
+ texelViewExpected,
+ copyExtent,
+ // 1.0 and 0.6 are representable precisely by all formats except rgb10a2unorm, but
+ // allow diffs of 1ULP since that's the generally-appropriate threshold.
+ { maxDiffULPsForFloatFormat: 1, maxDiffULPsForNormFormat: 1 }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/README.txt
new file mode 100644
index 0000000000..be68b34dd6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/README.txt
@@ -0,0 +1 @@
+Tests for copyToTexture from all possible sources (video, canvas, ImageBitmap, ...)
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/canvas.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/canvas.spec.ts
new file mode 100644
index 0000000000..06c3cd30b2
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/canvas.spec.ts
@@ -0,0 +1,841 @@
+export const description = `
+copyToTexture with HTMLCanvasElement and OffscreenCanvas sources.
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { skipTestCase } from '../../../common/util/util.js';
+import { kCanvasAlphaModes } from '../../capability_info.js';
+import {
+ kTextureFormatInfo,
+ kValidTextureFormatsForCopyE2T,
+ RegularTextureFormat,
+} from '../../format_info.js';
+import { CopyToTextureUtils } from '../../util/copy_to_texture.js';
+import { CanvasType, kAllCanvasTypes, createCanvas } from '../../util/create_elements.js';
+import { TexelCompareOptions } from '../../util/texture/texture_ok.js';
+
+class F extends CopyToTextureUtils {
+ init2DCanvasContentWithColorSpace({
+ width,
+ height,
+ colorSpace,
+ }: {
+ width: number;
+ height: number;
+ colorSpace: 'srgb' | 'display-p3';
+ }): {
+ canvas: HTMLCanvasElement | OffscreenCanvas;
+ expectedSourceData: Uint8ClampedArray;
+ } {
+ const canvas = createCanvas(this, 'onscreen', width, height);
+
+ let canvasContext = null;
+ canvasContext = canvas.getContext('2d', { colorSpace });
+
+ if (canvasContext === null) {
+ this.skip('onscreen canvas 2d context not available');
+ }
+
+ if (
+ typeof canvasContext.getContextAttributes === 'undefined' ||
+ typeof canvasContext.getContextAttributes().colorSpace === 'undefined'
+ ) {
+ this.skip('color space attr is not supported for canvas 2d context');
+ }
+
+ const SOURCE_PIXEL_BYTES = 4;
+ const imagePixels = new Uint8ClampedArray(SOURCE_PIXEL_BYTES * width * height);
+
+ const rectWidth = Math.floor(width / 2);
+ const rectHeight = Math.floor(height / 2);
+
+ const alphaValue = 153;
+
+ let pixelStartPos = 0;
+ // Red;
+ for (let i = 0; i < rectHeight; ++i) {
+ for (let j = 0; j < rectWidth; ++j) {
+ pixelStartPos = (i * width + j) * SOURCE_PIXEL_BYTES;
+ imagePixels[pixelStartPos] = 255;
+ imagePixels[pixelStartPos + 1] = 0;
+ imagePixels[pixelStartPos + 2] = 0;
+ imagePixels[pixelStartPos + 3] = alphaValue;
+ }
+ }
+
+ // Lime;
+ for (let i = 0; i < rectHeight; ++i) {
+ for (let j = rectWidth; j < width; ++j) {
+ pixelStartPos = (i * width + j) * SOURCE_PIXEL_BYTES;
+ imagePixels[pixelStartPos] = 0;
+ imagePixels[pixelStartPos + 1] = 255;
+ imagePixels[pixelStartPos + 2] = 0;
+ imagePixels[pixelStartPos + 3] = alphaValue;
+ }
+ }
+
+ // Blue
+ for (let i = rectHeight; i < height; ++i) {
+ for (let j = 0; j < rectWidth; ++j) {
+ pixelStartPos = (i * width + j) * SOURCE_PIXEL_BYTES;
+ imagePixels[pixelStartPos] = 0;
+ imagePixels[pixelStartPos + 1] = 0;
+ imagePixels[pixelStartPos + 2] = 255;
+ imagePixels[pixelStartPos + 3] = alphaValue;
+ }
+ }
+
+ // Fuchsia
+ for (let i = rectHeight; i < height; ++i) {
+ for (let j = rectWidth; j < width; ++j) {
+ pixelStartPos = (i * width + j) * SOURCE_PIXEL_BYTES;
+ imagePixels[pixelStartPos] = 255;
+ imagePixels[pixelStartPos + 1] = 0;
+ imagePixels[pixelStartPos + 2] = 255;
+ imagePixels[pixelStartPos + 3] = alphaValue;
+ }
+ }
+
+ const imageData = new ImageData(imagePixels, width, height, { colorSpace });
+ // MAINTENANCE_TODO: Remove as any when tsc support imageData.colorSpace
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ if (typeof (imageData as any).colorSpace === 'undefined') {
+ this.skip('color space attr is not supported for ImageData');
+ }
+
+ const ctx = canvasContext as CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D;
+ ctx.putImageData(imageData, 0, 0);
+
+ return {
+ canvas,
+ expectedSourceData: this.getExpectedReadbackFor2DCanvas(canvasContext, width, height),
+ };
+ }
+
+ // MAINTENANCE_TODO: Cache the generated canvas to avoid duplicated initialization.
+ init2DCanvasContent({
+ canvasType,
+ width,
+ height,
+ }: {
+ canvasType: CanvasType;
+ width: number;
+ height: number;
+ }): {
+ canvas: HTMLCanvasElement | OffscreenCanvas;
+ expectedSourceData: Uint8ClampedArray;
+ } {
+ const canvas = createCanvas(this, canvasType, width, height);
+
+ let canvasContext = null;
+ canvasContext = canvas.getContext('2d');
+
+ if (canvasContext === null) {
+ this.skip(canvasType + ' canvas 2d context not available');
+ }
+
+ const ctx = canvasContext;
+ this.paint2DCanvas(ctx, width, height, 0.6);
+
+ return {
+ canvas,
+ expectedSourceData: this.getExpectedReadbackFor2DCanvas(canvasContext, width, height),
+ };
+ }
+
+ private paint2DCanvas(
+ ctx: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D,
+ width: number,
+ height: number,
+ alphaValue: number
+ ) {
+ const rectWidth = Math.floor(width / 2);
+ const rectHeight = Math.floor(height / 2);
+
+ // Red
+ ctx.fillStyle = `rgba(255, 0, 0, ${alphaValue})`;
+ ctx.fillRect(0, 0, rectWidth, rectHeight);
+ // Lime
+ ctx.fillStyle = `rgba(0, 255, 0, ${alphaValue})`;
+ ctx.fillRect(rectWidth, 0, width - rectWidth, rectHeight);
+ // Blue
+ ctx.fillStyle = `rgba(0, 0, 255, ${alphaValue})`;
+ ctx.fillRect(0, rectHeight, rectWidth, height - rectHeight);
+ // Fuchsia
+ ctx.fillStyle = `rgba(255, 0, 255, ${alphaValue})`;
+ ctx.fillRect(rectWidth, rectHeight, width - rectWidth, height - rectHeight);
+ }
+
+ // MAINTENANCE_TODO: Cache the generated canvas to avoid duplicated initialization.
+ initGLCanvasContent({
+ canvasType,
+ contextName,
+ width,
+ height,
+ premultiplied,
+ }: {
+ canvasType: CanvasType;
+ contextName: 'webgl' | 'webgl2';
+ width: number;
+ height: number;
+ premultiplied: boolean;
+ }): {
+ canvas: HTMLCanvasElement | OffscreenCanvas;
+ expectedSourceData: Uint8ClampedArray;
+ } {
+ const canvas = createCanvas(this, canvasType, width, height);
+
+ // MAINTENANCE_TODO: Workaround for @types/offscreencanvas missing an overload of
+ // `OffscreenCanvas.getContext` that takes `string` or a union of context types.
+ const gl = (canvas as HTMLCanvasElement).getContext(contextName, {
+ premultipliedAlpha: premultiplied,
+ }) as WebGLRenderingContext | WebGL2RenderingContext | null;
+
+ if (gl === null) {
+ this.skip(canvasType + ' canvas ' + contextName + ' context not available');
+ }
+ this.trackForCleanup(gl);
+
+ const rectWidth = Math.floor(width / 2);
+ const rectHeight = Math.floor(height / 2);
+
+ const alphaValue = 0.6;
+ const colorValue = premultiplied ? alphaValue : 1.0;
+
+ // For webgl/webgl2 context canvas, if the context created with premultipliedAlpha attributes,
+ // it means that the value in drawing buffer is premultiplied or not. So we should set
+ // premultipliedAlpha value for premultipliedAlpha true gl context and unpremultipliedAlpha value
+ // for the premultipliedAlpha false gl context.
+ gl.enable(gl.SCISSOR_TEST);
+ gl.scissor(0, 0, rectWidth, rectHeight);
+ gl.clearColor(colorValue, 0.0, 0.0, alphaValue);
+ gl.clear(gl.COLOR_BUFFER_BIT);
+
+ gl.scissor(rectWidth, 0, width - rectWidth, rectHeight);
+ gl.clearColor(0.0, colorValue, 0.0, alphaValue);
+ gl.clear(gl.COLOR_BUFFER_BIT);
+
+ gl.scissor(0, rectHeight, rectWidth, height - rectHeight);
+ gl.clearColor(0.0, 0.0, colorValue, alphaValue);
+ gl.clear(gl.COLOR_BUFFER_BIT);
+
+ gl.scissor(rectWidth, rectHeight, width - rectWidth, height - rectHeight);
+ gl.clearColor(colorValue, colorValue, colorValue, alphaValue);
+ gl.clear(gl.COLOR_BUFFER_BIT);
+
+ return {
+ canvas,
+ expectedSourceData: this.getExpectedReadbackForWebGLCanvas(gl, width, height),
+ };
+ }
+
+ private getDataToInitSourceWebGPUCanvas(
+ width: number,
+ height: number,
+ alphaMode: GPUCanvasAlphaMode
+ ): Uint8ClampedArray {
+ const rectWidth = Math.floor(width / 2);
+ const rectHeight = Math.floor(height / 2);
+
+ const alphaValue = 153;
+ // Always output [153, 153, 153, 153]. When the alphaMode is...
+ // - premultiplied: the readback is CSS `rgba(255, 255, 255, 60%)`.
+ // - opaque: the readback is CSS `rgba(153, 153, 153, 100%)`.
+ // getExpectedReadbackForWebGPUCanvas matches this.
+ const colorValue = alphaValue;
+
+ // BGRA8Unorm texture
+ const initialData = new Uint8ClampedArray(4 * width * height);
+ const maxRectHeightIndex = width * rectHeight;
+ for (let pixelIndex = 0; pixelIndex < initialData.length / 4; ++pixelIndex) {
+ const index = pixelIndex * 4;
+
+ // Top-half two rectangles
+ if (pixelIndex < maxRectHeightIndex) {
+ // top-left side rectangle
+ if (pixelIndex % width < rectWidth) {
+ // top-left side rectangle
+ initialData[index] = colorValue;
+ initialData[index + 1] = 0;
+ initialData[index + 2] = 0;
+ initialData[index + 3] = alphaValue;
+ } else {
+ // top-right side rectangle
+ initialData[index] = 0;
+ initialData[index + 1] = colorValue;
+ initialData[index + 2] = 0;
+ initialData[index + 3] = alphaValue;
+ }
+ } else {
+ // Bottom-half two rectangles
+ // bottom-left side rectangle
+ if (pixelIndex % width < rectWidth) {
+ initialData[index] = 0;
+ initialData[index + 1] = 0;
+ initialData[index + 2] = colorValue;
+ initialData[index + 3] = alphaValue;
+ } else {
+ // bottom-right side rectangle
+ initialData[index] = colorValue;
+ initialData[index + 1] = colorValue;
+ initialData[index + 2] = colorValue;
+ initialData[index + 3] = alphaValue;
+ }
+ }
+ }
+ return initialData;
+ }
+
+ initSourceWebGPUCanvas({
+ device,
+ canvasType,
+ width,
+ height,
+ alphaMode,
+ }: {
+ device: GPUDevice;
+ canvasType: CanvasType;
+ width: number;
+ height: number;
+ alphaMode: GPUCanvasAlphaMode;
+ }): {
+ canvas: HTMLCanvasElement | OffscreenCanvas;
+ expectedSourceData: Uint8ClampedArray;
+ } {
+ const canvas = createCanvas(this, canvasType, width, height);
+
+ const gpuContext = canvas.getContext('webgpu');
+
+ if (!(gpuContext instanceof GPUCanvasContext)) {
+ this.skip(canvasType + ' canvas webgpu context not available');
+ }
+
+ gpuContext.configure({
+ device,
+ format: 'bgra8unorm',
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC,
+ alphaMode,
+ });
+
+ // BGRA8Unorm texture
+ const initialData = this.getDataToInitSourceWebGPUCanvas(width, height, alphaMode);
+ const canvasTexture = gpuContext.getCurrentTexture();
+ device.queue.writeTexture(
+ { texture: canvasTexture },
+ initialData,
+ {
+ bytesPerRow: width * 4,
+ rowsPerImage: height,
+ },
+ {
+ width,
+ height,
+ depthOrArrayLayers: 1,
+ }
+ );
+
+ return {
+ canvas,
+ expectedSourceData: this.getExpectedReadbackForWebGPUCanvas(width, height, alphaMode),
+ };
+ }
+
+ private getExpectedReadbackFor2DCanvas(
+ context: CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D,
+ width: number,
+ height: number
+ ): Uint8ClampedArray {
+ // Always read back the raw data from canvas
+ return context.getImageData(0, 0, width, height).data;
+ }
+
+ private getExpectedReadbackForWebGLCanvas(
+ gl: WebGLRenderingContext | WebGL2RenderingContext,
+ width: number,
+ height: number
+ ): Uint8ClampedArray {
+ const bytesPerPixel = 4;
+
+ const sourcePixels = new Uint8ClampedArray(width * height * bytesPerPixel);
+ gl.readPixels(0, 0, width, height, gl.RGBA, gl.UNSIGNED_BYTE, sourcePixels);
+
+ return this.doFlipY(sourcePixels, width, height, bytesPerPixel);
+ }
+
+ private getExpectedReadbackForWebGPUCanvas(
+ width: number,
+ height: number,
+ alphaMode: GPUCanvasAlphaMode
+ ): Uint8ClampedArray {
+ const bytesPerPixel = 4;
+
+ const rgbaPixels = this.getDataToInitSourceWebGPUCanvas(width, height, alphaMode);
+
+ // The source canvas has bgra8unorm back resource. We
+ // swizzle the channels to align with 2d/webgl canvas and
+ // clear alpha to 255 (1.0) when context alphaMode
+ // is set to opaque (follow webgpu spec).
+ for (let i = 0; i < height; ++i) {
+ for (let j = 0; j < width; ++j) {
+ const pixelPos = i * width + j;
+ const r = rgbaPixels[pixelPos * bytesPerPixel + 2];
+ if (alphaMode === 'opaque') {
+ rgbaPixels[pixelPos * bytesPerPixel + 3] = 255;
+ }
+
+ rgbaPixels[pixelPos * bytesPerPixel + 2] = rgbaPixels[pixelPos * bytesPerPixel];
+ rgbaPixels[pixelPos * bytesPerPixel] = r;
+ }
+ }
+
+ return rgbaPixels;
+ }
+
+ doCopyContentsTest(
+ source: HTMLCanvasElement | OffscreenCanvas,
+ expectedSourceImage: Uint8ClampedArray,
+ p: {
+ width: number;
+ height: number;
+ dstColorFormat: RegularTextureFormat;
+ srcDoFlipYDuringCopy: boolean;
+ srcPremultiplied: boolean;
+ dstPremultiplied: boolean;
+ }
+ ) {
+ const dst = this.device.createTexture({
+ size: {
+ width: p.width,
+ height: p.height,
+ depthOrArrayLayers: 1,
+ },
+ format: p.dstColorFormat,
+ usage:
+ GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ // Construct expected value for different dst color format
+ const info = kTextureFormatInfo[p.dstColorFormat];
+ const expFormat = info.baseFormat ?? p.dstColorFormat;
+
+ // For 2d canvas, get expected pixels with getImageData(), which returns unpremultiplied
+ // values.
+ const expectedDestinationImage = this.getExpectedDstPixelsFromSrcPixels({
+ srcPixels: expectedSourceImage,
+ srcOrigin: [0, 0],
+ srcSize: [p.width, p.height],
+ dstOrigin: [0, 0],
+ dstSize: [p.width, p.height],
+ subRectSize: [p.width, p.height],
+ format: expFormat,
+ flipSrcBeforeCopy: false,
+ srcDoFlipYDuringCopy: p.srcDoFlipYDuringCopy,
+ conversion: {
+ srcPremultiplied: p.srcPremultiplied,
+ dstPremultiplied: p.dstPremultiplied,
+ },
+ });
+
+ this.doTestAndCheckResult(
+ { source, origin: { x: 0, y: 0 }, flipY: p.srcDoFlipYDuringCopy },
+ {
+ texture: dst,
+ origin: { x: 0, y: 0 },
+ colorSpace: 'srgb',
+ premultipliedAlpha: p.dstPremultiplied,
+ },
+ expectedDestinationImage,
+ { width: p.width, height: p.height, depthOrArrayLayers: 1 },
+ // 1.0 and 0.6 are representable precisely by all formats except rgb10a2unorm, but
+ // allow diffs of 1ULP since that's the generally-appropriate threshold.
+ { maxDiffULPsForNormFormat: 1, maxDiffULPsForFloatFormat: 1 }
+ );
+ }
+}
+
+export const g = makeTestGroup(F);
+
+g.test('copy_contents_from_2d_context_canvas')
+ .desc(
+ `
+ Test HTMLCanvasElement and OffscreenCanvas with 2d context
+ can be copied to WebGPU texture correctly.
+
+ It creates HTMLCanvasElement/OffscreenCanvas with '2d'.
+ Use fillRect(2d context) to render red rect for top-left,
+ green rect for top-right, blue rect for bottom-left and white for bottom-right.
+
+ Then call copyExternalImageToTexture() to do a full copy to the 0 mipLevel
+ of dst texture, and read the contents out to compare with the canvas contents.
+
+ Provide premultiplied input if 'premultipliedAlpha' in 'GPUImageCopyTextureTagged'
+ is set to 'true' and unpremultiplied input if it is set to 'false'.
+
+ If 'flipY' in 'GPUImageCopyExternalImage' is set to 'true', copy will ensure the result
+ is flipped.
+
+ The tests covers:
+ - Valid canvas type
+ - Valid 2d context type
+ - Valid dstColorFormat of copyExternalImageToTexture()
+ - Valid dest alphaMode
+ - Valid 'flipY' config in 'GPUImageCopyExternalImage' (named 'srcDoFlipYDuringCopy' in cases)
+ - TODO(#913): color space tests need to be added
+
+ And the expected results are all passed.
+ `
+ )
+ .params(u =>
+ u
+ .combine('canvasType', kAllCanvasTypes)
+ .combine('dstColorFormat', kValidTextureFormatsForCopyE2T)
+ .combine('dstAlphaMode', kCanvasAlphaModes)
+ .combine('srcDoFlipYDuringCopy', [true, false])
+ .beginSubcases()
+ .combine('width', [1, 2, 4, 15])
+ .combine('height', [1, 2, 4, 15])
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.dstColorFormat);
+ })
+ .fn(t => {
+ const { width, height, canvasType, dstAlphaMode } = t.params;
+
+ const { canvas, expectedSourceData } = t.init2DCanvasContent({
+ canvasType,
+ width,
+ height,
+ });
+
+ t.doCopyContentsTest(canvas, expectedSourceData, {
+ srcPremultiplied: false,
+ dstPremultiplied: dstAlphaMode === 'premultiplied',
+ ...t.params,
+ });
+ });
+
+g.test('copy_contents_from_gl_context_canvas')
+ .desc(
+ `
+ Test HTMLCanvasElement and OffscreenCanvas with webgl/webgl2 context
+ can be copied to WebGPU texture correctly.
+
+ It creates HTMLCanvasElement/OffscreenCanvas with webgl'/'webgl2'.
+ Use scissor + clear to render red rect for top-left, green rect
+ for top-right, blue rect for bottom-left and white for bottom-right.
+ And do premultiply alpha in advance if the webgl/webgl2 context is created
+ with premultipliedAlpha : true.
+
+ Then call copyExternalImageToTexture() to do a full copy to the 0 mipLevel
+ of dst texture, and read the contents out to compare with the canvas contents.
+
+ Provide premultiplied input if 'premultipliedAlpha' in 'GPUImageCopyTextureTagged'
+ is set to 'true' and unpremultiplied input if it is set to 'false'.
+
+ If 'flipY' in 'GPUImageCopyExternalImage' is set to 'true', copy will ensure the result
+ is flipped.
+
+ The tests covers:
+ - Valid canvas type
+ - Valid webgl/webgl2 context type
+ - Valid dstColorFormat of copyExternalImageToTexture()
+ - Valid source image alphaMode
+ - Valid dest alphaMode
+ - Valid 'flipY' config in 'GPUImageCopyExternalImage'(named 'srcDoFlipYDuringCopy' in cases)
+ - TODO: color space tests need to be added
+
+ And the expected results are all passed.
+ `
+ )
+ .params(u =>
+ u
+ .combine('canvasType', kAllCanvasTypes)
+ .combine('contextName', ['webgl', 'webgl2'] as const)
+ .combine('dstColorFormat', kValidTextureFormatsForCopyE2T)
+ .combine('srcPremultiplied', [true, false])
+ .combine('dstAlphaMode', kCanvasAlphaModes)
+ .combine('srcDoFlipYDuringCopy', [true, false])
+ .beginSubcases()
+ .combine('width', [1, 2, 4, 15])
+ .combine('height', [1, 2, 4, 15])
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.dstColorFormat);
+ })
+ .fn(t => {
+ const { width, height, canvasType, contextName, srcPremultiplied, dstAlphaMode } = t.params;
+
+ const { canvas, expectedSourceData } = t.initGLCanvasContent({
+ canvasType,
+ contextName,
+ width,
+ height,
+ premultiplied: srcPremultiplied,
+ });
+
+ t.doCopyContentsTest(canvas, expectedSourceData, {
+ dstPremultiplied: dstAlphaMode === 'premultiplied',
+ ...t.params,
+ });
+ });
+
+g.test('copy_contents_from_gpu_context_canvas')
+ .desc(
+ `
+ Test HTMLCanvasElement and OffscreenCanvas with webgpu context
+ can be copied to WebGPU texture correctly.
+
+ It creates HTMLCanvasElement/OffscreenCanvas with 'webgpu'.
+ Use writeTexture to copy pixels to back buffer. The results are:
+ red rect for top-left, green rect for top-right, blue rect for bottom-left
+ and white for bottom-right.
+
+ TODO: Actually test alphaMode = opaque.
+ And do premultiply alpha in advance if the webgpu context is created
+ with alphaMode="premultiplied".
+
+ Then call copyExternalImageToTexture() to do a full copy to the 0 mipLevel
+ of dst texture, and read the contents out to compare with the canvas contents.
+
+ Provide premultiplied input if 'premultipliedAlpha' in 'GPUImageCopyTextureTagged'
+ is set to 'true' and unpremultiplied input if it is set to 'false'.
+
+ If 'flipY' in 'GPUImageCopyExternalImage' is set to 'true', copy will ensure the result
+ is flipped.
+
+ The tests covers:
+ - Valid canvas type
+ - Source WebGPU Canvas lives in the same GPUDevice or different GPUDevice as test
+ - Valid dstColorFormat of copyExternalImageToTexture()
+ - TODO: test more source image alphaMode
+ - Valid dest alphaMode
+ - Valid 'flipY' config in 'GPUImageCopyExternalImage'(named 'srcDoFlipYDuringCopy' in cases)
+ - TODO: color space tests need to be added
+
+ And the expected results are all passed.
+ `
+ )
+ .params(u =>
+ u
+ .combine('canvasType', kAllCanvasTypes)
+ .combine('srcAndDstInSameGPUDevice', [true, false])
+ .combine('dstColorFormat', kValidTextureFormatsForCopyE2T)
+ // .combine('srcAlphaMode', kCanvasAlphaModes)
+ .combine('srcAlphaMode', ['premultiplied'] as const)
+ .combine('dstAlphaMode', kCanvasAlphaModes)
+ .combine('srcDoFlipYDuringCopy', [true, false])
+ .beginSubcases()
+ .combine('width', [1, 2, 4, 15])
+ .combine('height', [1, 2, 4, 15])
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.dstColorFormat);
+ t.selectMismatchedDeviceOrSkipTestCase(undefined);
+ })
+ .fn(t => {
+ const { width, height, canvasType, srcAndDstInSameGPUDevice, srcAlphaMode, dstAlphaMode } =
+ t.params;
+
+ const device = srcAndDstInSameGPUDevice ? t.device : t.mismatchedDevice;
+ const { canvas: source, expectedSourceData } = t.initSourceWebGPUCanvas({
+ device,
+ canvasType,
+ width,
+ height,
+ alphaMode: srcAlphaMode,
+ });
+
+ t.doCopyContentsTest(source, expectedSourceData, {
+ srcPremultiplied: srcAlphaMode === 'premultiplied',
+ dstPremultiplied: dstAlphaMode === 'premultiplied',
+ ...t.params,
+ });
+ });
+
+g.test('copy_contents_from_bitmaprenderer_context_canvas')
+ .desc(
+ `
+ Test HTMLCanvasElement and OffscreenCanvas with ImageBitmapRenderingContext
+ can be copied to WebGPU texture correctly.
+
+ It creates HTMLCanvasElement/OffscreenCanvas with 'bitmaprenderer'.
+ First, use fillRect(2d context) to render red rect for top-left,
+ green rect for top-right, blue rect for bottom-left and white for bottom-right on a
+ 2d context canvas and create imageBitmap with that canvas. Use transferFromImageBitmap()
+ to render the imageBitmap to source canvas.
+
+ Then call copyExternalImageToTexture() to do a full copy to the 0 mipLevel
+ of dst texture, and read the contents out to compare with the canvas contents.
+
+ Provide premultiplied input if 'premultipliedAlpha' in 'GPUImageCopyTextureTagged'
+ is set to 'true' and unpremultiplied input if it is set to 'false'.
+
+ If 'flipY' in 'GPUImageCopyExternalImage' is set to 'true', copy will ensure the result
+ is flipped.
+
+ The tests covers:
+ - Valid canvas type
+ - Valid ImageBitmapRendering context type
+ - Valid dstColorFormat of copyExternalImageToTexture()
+ - Valid dest alphaMode
+ - Valid 'flipY' config in 'GPUImageCopyExternalImage' (named 'srcDoFlipYDuringCopy' in cases)
+ - TODO(#913): color space tests need to be added
+
+ And the expected results are all passed.
+ `
+ )
+ .params(u =>
+ u
+ .combine('canvasType', kAllCanvasTypes)
+ .combine('dstColorFormat', kValidTextureFormatsForCopyE2T)
+ .combine('dstAlphaMode', kCanvasAlphaModes)
+ .combine('srcDoFlipYDuringCopy', [true, false])
+ .beginSubcases()
+ .combine('width', [1, 2, 4, 15])
+ .combine('height', [1, 2, 4, 15])
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.dstColorFormat);
+ })
+ .fn(async t => {
+ const { width, height, canvasType, dstAlphaMode } = t.params;
+
+ const canvas = createCanvas(t, canvasType, width, height);
+
+ const imageBitmapRenderingContext = canvas.getContext('bitmaprenderer');
+
+ if (!(imageBitmapRenderingContext instanceof ImageBitmapRenderingContext)) {
+ skipTestCase(canvasType + ' canvas imageBitmap rendering context not available');
+ }
+
+ const { canvas: sourceContentCanvas, expectedSourceData } = t.init2DCanvasContent({
+ canvasType,
+ width,
+ height,
+ });
+
+ const imageBitmap = await createImageBitmap(sourceContentCanvas, { premultiplyAlpha: 'none' });
+ imageBitmapRenderingContext.transferFromImageBitmap(imageBitmap);
+
+ t.doCopyContentsTest(canvas, expectedSourceData, {
+ srcPremultiplied: false,
+ dstPremultiplied: dstAlphaMode === 'premultiplied',
+ ...t.params,
+ });
+ });
+
+g.test('color_space_conversion')
+ .desc(
+ `
+ Test HTMLCanvasElement with 2d context can created with 'colorSpace' attribute.
+ Using CopyExternalImageToTexture to copy from such type of canvas needs
+ to do color space converting correctly.
+
+ It creates HTMLCanvasElement/OffscreenCanvas with '2d' and 'colorSpace' attributes.
+ Use fillRect(2d context) to render red rect for top-left,
+ green rect for top-right, blue rect for bottom-left and white for bottom-right.
+
+ Then call copyExternalImageToTexture() to do a full copy to the 0 mipLevel
+ of dst texture, and read the contents out to compare with the canvas contents.
+
+ Provide premultiplied input if 'premultipliedAlpha' in 'GPUImageCopyTextureTagged'
+ is set to 'true' and unpremultiplied input if it is set to 'false'.
+
+ If 'flipY' in 'GPUImageCopyExternalImage' is set to 'true', copy will ensure the result
+ is flipped.
+
+ If color space from source input and user defined dstTexture color space are different, the
+ result must convert the content to user defined color space
+
+ The tests covers:
+ - Valid dstColorFormat of copyExternalImageToTexture()
+ - Valid dest alphaMode
+ - Valid 'flipY' config in 'GPUImageCopyExternalImage' (named 'srcDoFlipYDuringCopy' in cases)
+ - Valid 'colorSpace' config in 'dstColorSpace'
+
+ And the expected results are all passed.
+
+ TODO: Enhance test data with colors that aren't always opaque and fully saturated.
+ TODO: Consider refactoring src data setup with TexelView.writeTextureData.
+ `
+ )
+ .params(u =>
+ u
+ .combine('srcColorSpace', ['srgb', 'display-p3'] as const)
+ .combine('dstColorSpace', ['srgb'] as const)
+ .combine('dstColorFormat', kValidTextureFormatsForCopyE2T)
+ .combine('dstPremultiplied', [true, false])
+ .combine('srcDoFlipYDuringCopy', [true, false])
+ .beginSubcases()
+ .combine('width', [1, 2, 4, 15, 255, 256])
+ .combine('height', [1, 2, 4, 15, 255, 256])
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.dstColorFormat);
+ })
+ .fn(t => {
+ const {
+ width,
+ height,
+ srcColorSpace,
+ dstColorSpace,
+ dstColorFormat,
+ dstPremultiplied,
+ srcDoFlipYDuringCopy,
+ } = t.params;
+ const { canvas, expectedSourceData } = t.init2DCanvasContentWithColorSpace({
+ width,
+ height,
+ colorSpace: srcColorSpace,
+ });
+
+ const dst = t.device.createTexture({
+ size: { width, height },
+ format: dstColorFormat,
+ usage:
+ GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const expectedDestinationImage = t.getExpectedDstPixelsFromSrcPixels({
+ srcPixels: expectedSourceData,
+ srcOrigin: [0, 0],
+ srcSize: [width, height],
+ dstOrigin: [0, 0],
+ dstSize: [width, height],
+ subRectSize: [width, height],
+ // copyExternalImageToTexture does not perform gamma-encoding into `-srgb` formats.
+ format: kTextureFormatInfo[dstColorFormat].baseFormat ?? dstColorFormat,
+ flipSrcBeforeCopy: false,
+ srcDoFlipYDuringCopy,
+ conversion: {
+ srcPremultiplied: false,
+ dstPremultiplied,
+ srcColorSpace,
+ dstColorSpace,
+ },
+ });
+
+ const texelCompareOptions: TexelCompareOptions = {
+ maxFractionalDiff: 0,
+ maxDiffULPsForNormFormat: 1,
+ };
+ if (srcColorSpace !== dstColorSpace) {
+ // Color space conversion seems prone to errors up to about 0.0003 on f32, 0.0007 on f16.
+ texelCompareOptions.maxFractionalDiff = 0.001;
+ } else {
+ texelCompareOptions.maxDiffULPsForFloatFormat = 1;
+ }
+
+ t.doTestAndCheckResult(
+ { source: canvas, origin: { x: 0, y: 0 }, flipY: srcDoFlipYDuringCopy },
+ {
+ texture: dst,
+ origin: { x: 0, y: 0 },
+ colorSpace: dstColorSpace,
+ premultipliedAlpha: dstPremultiplied,
+ },
+ expectedDestinationImage,
+ { width, height, depthOrArrayLayers: 1 },
+ texelCompareOptions
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/image.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/image.spec.ts
new file mode 100644
index 0000000000..e19f986c0f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/image.spec.ts
@@ -0,0 +1,271 @@
+export const description = `
+copyExternalImageToTexture from HTMLImageElement source.
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { raceWithRejectOnTimeout } from '../../../common/util/util.js';
+import { kTextureFormatInfo, kValidTextureFormatsForCopyE2T } from '../../format_info.js';
+import { CopyToTextureUtils, kCopySubrectInfo } from '../../util/copy_to_texture.js';
+
+import { kTestColorsOpaque, makeTestColorsTexelView } from './util.js';
+
+async function decodeImageFromCanvas(canvas: HTMLCanvasElement): Promise<HTMLImageElement> {
+ const blobFromCanvas = new Promise(resolve => {
+ canvas.toBlob(blob => resolve(blob));
+ });
+ const blob = (await blobFromCanvas) as Blob;
+ const url = URL.createObjectURL(blob);
+ const image = new Image(canvas.width, canvas.height);
+ image.src = url;
+ await raceWithRejectOnTimeout(image.decode(), 5000, 'decode image timeout');
+ return image;
+}
+
+export const g = makeTestGroup(CopyToTextureUtils);
+
+g.test('from_image')
+ .desc(
+ `
+ Test HTMLImageElement can be copied to WebGPU texture correctly.
+ These images are highly possible living in GPU back resource.
+
+ It generates pixels in ImageData one by one based on a color list:
+ [Red, Green, Blue, Black, White].
+
+ Then call copyExternalImageToTexture() to do a full copy to the 0 mipLevel
+ of dst texture, and read the contents out to compare with the HTMLImageElement contents.
+
+ Do premultiply alpha during copy if 'premultipliedAlpha' in 'GPUImageCopyTextureTagged'
+ is set to 'true' and do unpremultiply alpha if it is set to 'false'.
+
+ If 'flipY' in 'GPUImageCopyExternalImage' is set to 'true', copy will ensure the result
+ is flipped.
+
+ The tests covers:
+ - Valid 2D canvas
+ - Valid dstColorFormat of copyExternalImageToTexture()
+ - Valid source image alphaMode
+ - Valid dest alphaMode
+ - Valid 'flipY' config in 'GPUImageCopyExternalImage' (named 'srcDoFlipYDuringCopy' in cases)
+
+ And the expected results are all passed.
+ `
+ )
+ .params(u =>
+ u
+ .combine('srcDoFlipYDuringCopy', [true, false])
+ .combine('dstColorFormat', kValidTextureFormatsForCopyE2T)
+ .combine('dstPremultiplied', [true, false])
+ .beginSubcases()
+ .combine('width', [1, 2, 4, 15, 255, 256])
+ .combine('height', [1, 2, 4, 15, 255, 256])
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.dstColorFormat);
+ if (typeof HTMLImageElement === 'undefined') t.skip('HTMLImageElement not available');
+ })
+ .fn(async t => {
+ const { width, height, dstColorFormat, dstPremultiplied, srcDoFlipYDuringCopy } = t.params;
+
+ const imageCanvas = document.createElement('canvas');
+ imageCanvas.width = width;
+ imageCanvas.height = height;
+
+ // Generate non-transparent pixel data to avoid canvas
+ // different opt behaviour on putImageData()
+ // from browsers.
+ const texelViewSource = makeTestColorsTexelView({
+ testColors: kTestColorsOpaque,
+ format: 'rgba8unorm', // ImageData is always in rgba8unorm format.
+ width,
+ height,
+ flipY: false,
+ premultiplied: false,
+ });
+ // Generate correct expected values
+ const imageData = new ImageData(width, height);
+ texelViewSource.writeTextureData(imageData.data, {
+ bytesPerRow: width * 4,
+ rowsPerImage: height,
+ subrectOrigin: [0, 0],
+ subrectSize: { width, height },
+ });
+
+ const imageCanvasContext = imageCanvas.getContext('2d') as CanvasRenderingContext2D;
+ if (imageCanvasContext === null) {
+ t.skip('canvas cannot get 2d context');
+ return;
+ }
+ // Use putImageData to prevent color space conversion.
+ imageCanvasContext.putImageData(imageData, 0, 0);
+
+ const image = await decodeImageFromCanvas(imageCanvas);
+
+ const dst = t.device.createTexture({
+ size: { width, height },
+ format: dstColorFormat,
+ usage:
+ GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const expFormat = kTextureFormatInfo[dstColorFormat].baseFormat ?? dstColorFormat;
+ const flipSrcBeforeCopy = false;
+ const texelViewExpected = t.getExpectedDstPixelsFromSrcPixels({
+ srcPixels: imageData.data,
+ srcOrigin: [0, 0],
+ srcSize: [width, height],
+ dstOrigin: [0, 0],
+ dstSize: [width, height],
+ subRectSize: [width, height],
+ format: expFormat,
+ flipSrcBeforeCopy,
+ srcDoFlipYDuringCopy,
+ conversion: {
+ srcPremultiplied: false,
+ dstPremultiplied,
+ },
+ });
+
+ t.doTestAndCheckResult(
+ {
+ source: image,
+ origin: { x: 0, y: 0 },
+ flipY: srcDoFlipYDuringCopy,
+ },
+ {
+ texture: dst,
+ origin: { x: 0, y: 0 },
+ colorSpace: 'srgb',
+ premultipliedAlpha: dstPremultiplied,
+ },
+ texelViewExpected,
+ { width, height, depthOrArrayLayers: 1 },
+ // 1.0 and 0.6 are representable precisely by all formats except rgb10a2unorm, but
+ // allow diffs of 1ULP since that's the generally-appropriate threshold.
+ { maxDiffULPsForFloatFormat: 1, maxDiffULPsForNormFormat: 1 }
+ );
+ });
+
+g.test('copy_subrect_from_2D_Canvas')
+ .desc(
+ `
+ Test HTMLImageElement can be copied to WebGPU texture correctly.
+ These images are highly possible living in GPU back resource.
+
+ It generates pixels in ImageData one by one based on a color list:
+ [Red, Green, Blue, Black, White].
+
+ Then call copyExternalImageToTexture() to do a subrect copy, based on a predefined copy
+ rect info list, to the 0 mipLevel of dst texture, and read the contents out to compare
+ with the HTMLImageElement contents.
+
+ Do premultiply alpha during copy if 'premultipliedAlpha' in 'GPUImageCopyTextureTagged'
+ is set to 'true' and do unpremultiply alpha if it is set to 'false'.
+
+ If 'flipY' in 'GPUImageCopyExternalImage' is set to 'true', copy will ensure the result
+ is flipped, and origin is top-left consistantly.
+
+ The tests covers:
+ - Source WebGPU Canvas lives in the same GPUDevice or different GPUDevice as test
+ - Valid dstColorFormat of copyExternalImageToTexture()
+ - Valid source image alphaMode
+ - Valid dest alphaMode
+ - Valid 'flipY' config in 'GPUImageCopyExternalImage' (named 'srcDoFlipYDuringCopy' in cases)
+ - Valid subrect copies.
+
+ And the expected results are all passed.
+ `
+ )
+ .params(u =>
+ u
+ .combine('srcDoFlipYDuringCopy', [true, false])
+ .combine('dstPremultiplied', [true, false])
+ .beginSubcases()
+ .combine('copySubRectInfo', kCopySubrectInfo)
+ )
+ .beforeAllSubcases(t => {
+ if (typeof HTMLImageElement === 'undefined') t.skip('HTMLImageElement not available');
+ })
+ .fn(async t => {
+ const { copySubRectInfo, dstPremultiplied, srcDoFlipYDuringCopy } = t.params;
+
+ const { srcOrigin, dstOrigin, srcSize, dstSize, copyExtent } = copySubRectInfo;
+ const kColorFormat = 'rgba8unorm';
+
+ const imageCanvas = document.createElement('canvas');
+ imageCanvas.width = srcSize.width;
+ imageCanvas.height = srcSize.height;
+
+ // Generate non-transparent pixel data to avoid canvas
+ // different opt behaviour on putImageData()
+ // from browsers.
+ const texelViewSource = makeTestColorsTexelView({
+ testColors: kTestColorsOpaque,
+ format: 'rgba8unorm', // ImageData is always in rgba8unorm format.
+ width: srcSize.width,
+ height: srcSize.height,
+ flipY: false,
+ premultiplied: false,
+ });
+ // Generate correct expected values
+ const imageData = new ImageData(srcSize.width, srcSize.height);
+ texelViewSource.writeTextureData(imageData.data, {
+ bytesPerRow: srcSize.width * 4,
+ rowsPerImage: srcSize.height,
+ subrectOrigin: [0, 0],
+ subrectSize: srcSize,
+ });
+
+ const imageCanvasContext = imageCanvas.getContext('2d');
+ if (imageCanvasContext === null) {
+ t.skip('canvas cannot get 2d context');
+ return;
+ }
+ // Use putImageData to prevent color space conversion.
+ imageCanvasContext.putImageData(imageData, 0, 0);
+
+ const image = await decodeImageFromCanvas(imageCanvas);
+
+ const dst = t.device.createTexture({
+ size: dstSize,
+ format: kColorFormat,
+ usage:
+ GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const flipSrcBeforeCopy = false;
+ const texelViewExpected = t.getExpectedDstPixelsFromSrcPixels({
+ srcPixels: imageData.data,
+ srcOrigin,
+ srcSize,
+ dstOrigin,
+ dstSize,
+ subRectSize: copyExtent,
+ format: kColorFormat,
+ flipSrcBeforeCopy,
+ srcDoFlipYDuringCopy,
+ conversion: {
+ srcPremultiplied: false,
+ dstPremultiplied,
+ },
+ });
+
+ t.doTestAndCheckResult(
+ {
+ source: image,
+ origin: srcOrigin,
+ flipY: srcDoFlipYDuringCopy,
+ },
+ {
+ texture: dst,
+ origin: dstOrigin,
+ colorSpace: 'srgb',
+ premultipliedAlpha: dstPremultiplied,
+ },
+ texelViewExpected,
+ copyExtent,
+ // 1.0 and 0.6 are representable precisely by all formats except rgb10a2unorm, but
+ // allow diffs of 1ULP since that's the generally-appropriate threshold.
+ { maxDiffULPsForFloatFormat: 1, maxDiffULPsForNormFormat: 1 }
+ );
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/util.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/util.ts
new file mode 100644
index 0000000000..2ce2fe4295
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/util.ts
@@ -0,0 +1,58 @@
+import { EncodableTextureFormat } from '../../format_info.js';
+import { PerTexelComponent } from '../../util/texture/texel_data.js';
+import { TexelView } from '../../util/texture/texel_view.js';
+
+type TestColor = PerTexelComponent<number>;
+
+// None of the dst texture format is 'uint' or 'sint', so we can always use float value.
+const kColors = {
+ Red: { R: 1.0, G: 0.0, B: 0.0, A: 1.0 },
+ Green: { R: 0.0, G: 1.0, B: 0.0, A: 1.0 },
+ Blue: { R: 0.0, G: 0.0, B: 1.0, A: 1.0 },
+ Black: { R: 0.0, G: 0.0, B: 0.0, A: 1.0 },
+ White: { R: 1.0, G: 1.0, B: 1.0, A: 1.0 },
+ SemitransparentWhite: { R: 1.0, G: 1.0, B: 1.0, A: 0.6 },
+} as const;
+
+export const kTestColorsOpaque = [
+ kColors.Red,
+ kColors.Green,
+ kColors.Blue,
+ kColors.Black,
+ kColors.White,
+] as const;
+
+export const kTestColorsAll = [...kTestColorsOpaque, kColors.SemitransparentWhite] as const;
+
+export function makeTestColorsTexelView({
+ testColors,
+ format,
+ width,
+ height,
+ premultiplied,
+ flipY,
+}: {
+ testColors: readonly TestColor[];
+ format: EncodableTextureFormat;
+ width: number;
+ height: number;
+ premultiplied: boolean;
+ flipY: boolean;
+}) {
+ return TexelView.fromTexelsAsColors(format, coords => {
+ const y = flipY ? height - coords.y - 1 : coords.y;
+ const pixelPos = y * width + coords.x;
+ const currentPixel = testColors[pixelPos % testColors.length];
+
+ if (premultiplied && currentPixel.A !== 1.0) {
+ return {
+ R: currentPixel.R! * currentPixel.A!,
+ G: currentPixel.G! * currentPixel.A!,
+ B: currentPixel.B! * currentPixel.A!,
+ A: currentPixel.A,
+ };
+ } else {
+ return currentPixel;
+ }
+ });
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/video.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/video.spec.ts
new file mode 100644
index 0000000000..1888eb7e58
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/copyToTexture/video.spec.ts
@@ -0,0 +1,119 @@
+export const description = `
+copyToTexture with HTMLVideoElement and VideoFrame.
+
+- videos with various encodings/formats (webm vp8, webm vp9, ogg theora, mp4), color spaces
+ (bt.601, bt.709, bt.2020)
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { GPUTest, TextureTestMixin } from '../../gpu_test.js';
+import {
+ startPlayingAndWaitForVideo,
+ getVideoElement,
+ getVideoFrameFromVideoElement,
+ kVideoExpectations,
+} from '../../web_platform/util.js';
+
+const kFormat = 'rgba8unorm';
+
+export const g = makeTestGroup(TextureTestMixin(GPUTest));
+
+g.test('copy_from_video')
+ .desc(
+ `
+Test HTMLVideoElement and VideoFrame can be copied to WebGPU texture correctly.
+
+It creates HTMLVideoElement with videos under Resource folder.
+
+ Then call copyExternalImageToTexture() to do a full copy to the 0 mipLevel
+ of dst texture, and read the contents out to compare with the ImageBitmap contents.
+
+ If 'flipY' in 'GPUImageCopyExternalImage' is set to 'true', copy will ensure the result
+ is flipped.
+
+ The tests covers:
+ - Video comes from different color spaces.
+ - Valid 'flipY' config in 'GPUImageCopyExternalImage' (named 'srcDoFlipYDuringCopy' in cases)
+ - TODO: partial copy tests should be added
+ - TODO: all valid dstColorFormat tests should be added.
+ - TODO: dst color space tests need to be added
+`
+ )
+ .params(u =>
+ u //
+ .combineWithParams(kVideoExpectations)
+ .combine('sourceType', ['VideoElement', 'VideoFrame'] as const)
+ .combine('srcDoFlipYDuringCopy', [true, false])
+ )
+ .fn(async t => {
+ const { videoName, sourceType, srcDoFlipYDuringCopy } = t.params;
+
+ if (sourceType === 'VideoFrame' && typeof VideoFrame === 'undefined') {
+ t.skip('WebCodec is not supported');
+ }
+
+ const videoElement = getVideoElement(t, videoName);
+
+ await startPlayingAndWaitForVideo(videoElement, async () => {
+ let source, width, height;
+ if (sourceType === 'VideoFrame') {
+ source = await getVideoFrameFromVideoElement(t, videoElement);
+ width = source.codedWidth;
+ height = source.codedHeight;
+ } else {
+ source = videoElement;
+ width = source.videoWidth;
+ height = source.videoHeight;
+ }
+
+ const dstTexture = t.device.createTexture({
+ format: kFormat,
+ size: { width, height, depthOrArrayLayers: 1 },
+ usage:
+ GPUTextureUsage.COPY_SRC | GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ t.queue.copyExternalImageToTexture(
+ {
+ source,
+ origin: { x: 0, y: 0 },
+ flipY: srcDoFlipYDuringCopy,
+ },
+ {
+ texture: dstTexture,
+ origin: { x: 0, y: 0 },
+ colorSpace: 'srgb',
+ premultipliedAlpha: true,
+ },
+ { width, height, depthOrArrayLayers: 1 }
+ );
+
+ if (srcDoFlipYDuringCopy) {
+ t.expectSinglePixelComparisonsAreOkInTexture({ texture: dstTexture }, [
+ // Top-left should be blue.
+ { coord: { x: width * 0.25, y: height * 0.25 }, exp: t.params._blueExpectation },
+ // Top-right should be green.
+ { coord: { x: width * 0.75, y: height * 0.25 }, exp: t.params._greenExpectation },
+ // Bottom-left should be yellow.
+ { coord: { x: width * 0.25, y: height * 0.75 }, exp: t.params._yellowExpectation },
+ // Bottom-right should be red.
+ { coord: { x: width * 0.75, y: height * 0.75 }, exp: t.params._redExpectation },
+ ]);
+ } else {
+ t.expectSinglePixelComparisonsAreOkInTexture({ texture: dstTexture }, [
+ // Top-left should be yellow.
+ { coord: { x: width * 0.25, y: height * 0.25 }, exp: t.params._yellowExpectation },
+ // Top-right should be red.
+ { coord: { x: width * 0.75, y: height * 0.25 }, exp: t.params._redExpectation },
+ // Bottom-left should be blue.
+ { coord: { x: width * 0.25, y: height * 0.75 }, exp: t.params._blueExpectation },
+ // Bottom-right should be green.
+ { coord: { x: width * 0.75, y: height * 0.75 }, exp: t.params._greenExpectation },
+ ]);
+ }
+
+ if (source instanceof VideoFrame) {
+ source.close();
+ }
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/external_texture/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/external_texture/README.txt
new file mode 100644
index 0000000000..5deaeb4416
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/external_texture/README.txt
@@ -0,0 +1 @@
+Tests for external textures. \ No newline at end of file
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/external_texture/video.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/external_texture/video.spec.ts
new file mode 100644
index 0000000000..baa2a985d2
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/external_texture/video.spec.ts
@@ -0,0 +1,480 @@
+export const description = `
+Tests for external textures from HTMLVideoElement (and other video-type sources?).
+
+- videos with various encodings/formats (webm vp8, webm vp9, ogg theora, mp4), color spaces
+ (bt.601, bt.709, bt.2020)
+
+TODO: consider whether external_texture and copyToTexture video tests should be in the same file
+`;
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { GPUTest, TextureTestMixin } from '../../gpu_test.js';
+import {
+ startPlayingAndWaitForVideo,
+ getVideoFrameFromVideoElement,
+ getVideoElement,
+ kVideoExpectations,
+ kVideoRotationExpectations,
+} from '../../web_platform/util.js';
+
+const kHeight = 16;
+const kWidth = 16;
+const kFormat = 'rgba8unorm';
+
+export const g = makeTestGroup(TextureTestMixin(GPUTest));
+
+function createExternalTextureSamplingTestPipeline(t: GPUTest): GPURenderPipeline {
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+ @vertex fn main(@builtin(vertex_index) VertexIndex : u32) -> @builtin(position) vec4<f32> {
+ var pos = array<vec4<f32>, 6>(
+ vec4<f32>( 1.0, 1.0, 0.0, 1.0),
+ vec4<f32>( 1.0, -1.0, 0.0, 1.0),
+ vec4<f32>(-1.0, -1.0, 0.0, 1.0),
+ vec4<f32>( 1.0, 1.0, 0.0, 1.0),
+ vec4<f32>(-1.0, -1.0, 0.0, 1.0),
+ vec4<f32>(-1.0, 1.0, 0.0, 1.0)
+ );
+ return pos[VertexIndex];
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var s : sampler;
+ @group(0) @binding(1) var t : texture_external;
+
+ @fragment fn main(@builtin(position) FragCoord : vec4<f32>)
+ -> @location(0) vec4<f32> {
+ return textureSampleBaseClampToEdge(t, s, FragCoord.xy / vec2<f32>(16.0, 16.0));
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ targets: [
+ {
+ format: kFormat,
+ },
+ ],
+ },
+ primitive: { topology: 'triangle-list' },
+ });
+
+ return pipeline;
+}
+
+function createExternalTextureSamplingTestBindGroup(
+ t: GPUTest,
+ checkNonStandardIsZeroCopy: true | undefined,
+ source: HTMLVideoElement | VideoFrame,
+ pipeline: GPURenderPipeline
+): GPUBindGroup {
+ const linearSampler = t.device.createSampler();
+
+ const externalTexture = t.device.importExternalTexture({
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ source: source as any,
+ });
+
+ if (checkNonStandardIsZeroCopy) {
+ expectZeroCopyNonStandard(t, externalTexture);
+ }
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: linearSampler,
+ },
+ {
+ binding: 1,
+ resource: externalTexture,
+ },
+ ],
+ });
+
+ return bindGroup;
+}
+
+/**
+ * Expect the non-standard `externalTexture.isZeroCopy` is true.
+ */
+function expectZeroCopyNonStandard(t: GPUTest, externalTexture: GPUExternalTexture): void {
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ t.expect((externalTexture as any).isZeroCopy, '0-copy import failed.');
+}
+
+/**
+ * `externalTexture.isZeroCopy` is a non-standard Chrome API for testing only.
+ * It is exposed by enabling chrome://flags/#enable-webgpu-developer-features
+ *
+ * If the API is available, this function adds a parameter `checkNonStandardIsZeroCopy`.
+ * Cases with that parameter set to `true` will fail if `externalTexture.isZeroCopy` is not true.
+ */
+function checkNonStandardIsZeroCopyIfAvailable(): { checkNonStandardIsZeroCopy?: true }[] {
+ if (
+ typeof GPUExternalTexture !== 'undefined' &&
+ // eslint-disable-next-line no-prototype-builtins
+ GPUExternalTexture.prototype.hasOwnProperty('isZeroCopy')
+ ) {
+ return [{}, { checkNonStandardIsZeroCopy: true }];
+ } else {
+ return [{}];
+ }
+}
+
+g.test('importExternalTexture,sample')
+ .desc(
+ `
+Tests that we can import an HTMLVideoElement/VideoFrame into a GPUExternalTexture, sample from it
+for several combinations of video format and color space.
+`
+ )
+ .params(u =>
+ u //
+ .combineWithParams(checkNonStandardIsZeroCopyIfAvailable())
+ .combine('sourceType', ['VideoElement', 'VideoFrame'] as const)
+ .combineWithParams(kVideoExpectations)
+ )
+ .fn(async t => {
+ const sourceType = t.params.sourceType;
+ if (sourceType === 'VideoFrame' && typeof VideoFrame === 'undefined') {
+ t.skip('WebCodec is not supported');
+ }
+
+ const videoElement = getVideoElement(t, t.params.videoName);
+
+ await startPlayingAndWaitForVideo(videoElement, async () => {
+ const source =
+ sourceType === 'VideoFrame'
+ ? await getVideoFrameFromVideoElement(t, videoElement)
+ : videoElement;
+
+ const colorAttachment = t.device.createTexture({
+ format: kFormat,
+ size: { width: kWidth, height: kHeight, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const pipeline = createExternalTextureSamplingTestPipeline(t);
+ const bindGroup = createExternalTextureSamplingTestBindGroup(
+ t,
+ t.params.checkNonStandardIsZeroCopy,
+ source,
+ pipeline
+ );
+
+ const commandEncoder = t.device.createCommandEncoder();
+ const passEncoder = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachment.createView(),
+ clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ passEncoder.setPipeline(pipeline);
+ passEncoder.setBindGroup(0, bindGroup);
+ passEncoder.draw(6);
+ passEncoder.end();
+ t.device.queue.submit([commandEncoder.finish()]);
+
+ // For validation, we sample a few pixels away from the edges to avoid compression
+ // artifacts.
+ t.expectSinglePixelComparisonsAreOkInTexture({ texture: colorAttachment }, [
+ // Top-left should be yellow.
+ { coord: { x: kWidth * 0.25, y: kHeight * 0.25 }, exp: t.params._yellowExpectation },
+ // Top-right should be red.
+ { coord: { x: kWidth * 0.75, y: kHeight * 0.25 }, exp: t.params._redExpectation },
+ // Bottom-left should be blue.
+ { coord: { x: kWidth * 0.25, y: kHeight * 0.75 }, exp: t.params._blueExpectation },
+ // Bottom-right should be green.
+ { coord: { x: kWidth * 0.75, y: kHeight * 0.75 }, exp: t.params._greenExpectation },
+ ]);
+
+ if (sourceType === 'VideoFrame') (source as VideoFrame).close();
+ });
+ });
+
+g.test('importExternalTexture,sampleWithRotationMetadata')
+ .desc(
+ `
+Tests that when importing an HTMLVideoElement/VideoFrame into a GPUExternalTexture, sampling from
+it will honor rotation metadata.
+`
+ )
+ .params(u =>
+ u //
+ .combineWithParams(checkNonStandardIsZeroCopyIfAvailable())
+ .combine('sourceType', ['VideoElement', 'VideoFrame'] as const)
+ .combineWithParams(kVideoRotationExpectations)
+ )
+ .fn(async t => {
+ const sourceType = t.params.sourceType;
+ const videoElement = getVideoElement(t, t.params.videoName);
+
+ await startPlayingAndWaitForVideo(videoElement, async () => {
+ const source =
+ sourceType === 'VideoFrame'
+ ? await getVideoFrameFromVideoElement(t, videoElement)
+ : videoElement;
+
+ const colorAttachment = t.device.createTexture({
+ format: kFormat,
+ size: { width: kWidth, height: kHeight, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const pipeline = createExternalTextureSamplingTestPipeline(t);
+ const bindGroup = createExternalTextureSamplingTestBindGroup(
+ t,
+ t.params.checkNonStandardIsZeroCopy,
+ source,
+ pipeline
+ );
+
+ const commandEncoder = t.device.createCommandEncoder();
+ const passEncoder = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachment.createView(),
+ clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ passEncoder.setPipeline(pipeline);
+ passEncoder.setBindGroup(0, bindGroup);
+ passEncoder.draw(6);
+ passEncoder.end();
+ t.device.queue.submit([commandEncoder.finish()]);
+
+ // For validation, we sample a few pixels away from the edges to avoid compression
+ // artifacts.
+ t.expectSinglePixelComparisonsAreOkInTexture({ texture: colorAttachment }, [
+ { coord: { x: kWidth * 0.25, y: kHeight * 0.25 }, exp: t.params._topLeftExpectation },
+ { coord: { x: kWidth * 0.75, y: kHeight * 0.25 }, exp: t.params._topRightExpectation },
+ { coord: { x: kWidth * 0.25, y: kHeight * 0.75 }, exp: t.params._bottomLeftExpectation },
+ { coord: { x: kWidth * 0.75, y: kHeight * 0.75 }, exp: t.params._bottomRightExpectation },
+ ]);
+
+ if (sourceType === 'VideoFrame') (source as VideoFrame).close();
+ });
+ });
+
+g.test('importExternalTexture,sampleWithVideoFrameWithVisibleRectParam')
+ .desc(
+ `
+Tests that we can import VideoFrames and sample the correct sub-rectangle when visibleRect
+parameters are present.
+`
+ )
+ .params(u =>
+ u //
+ .combineWithParams(checkNonStandardIsZeroCopyIfAvailable())
+ .combineWithParams(kVideoExpectations)
+ )
+ .fn(async t => {
+ const videoElement = getVideoElement(t, t.params.videoName);
+
+ await startPlayingAndWaitForVideo(videoElement, async () => {
+ const source = await getVideoFrameFromVideoElement(t, videoElement);
+
+ // All tested videos are derived from an image showing yellow, red, blue or green in each
+ // quadrant. In this test we crop the video to each quadrant and check that desired color
+ // is sampled from each corner of the cropped image.
+ const srcVideoHeight = 240;
+ const srcVideoWidth = 320;
+ const cropParams = [
+ // Top left (yellow)
+ {
+ subRect: { x: 0, y: 0, width: srcVideoWidth / 2, height: srcVideoHeight / 2 },
+ color: t.params._yellowExpectation,
+ },
+ // Top right (red)
+ {
+ subRect: {
+ x: srcVideoWidth / 2,
+ y: 0,
+ width: srcVideoWidth / 2,
+ height: srcVideoHeight / 2,
+ },
+ color: t.params._redExpectation,
+ },
+ // Bottom left (blue)
+ {
+ subRect: {
+ x: 0,
+ y: srcVideoHeight / 2,
+ width: srcVideoWidth / 2,
+ height: srcVideoHeight / 2,
+ },
+ color: t.params._blueExpectation,
+ },
+ // Bottom right (green)
+ {
+ subRect: {
+ x: srcVideoWidth / 2,
+ y: srcVideoHeight / 2,
+ width: srcVideoWidth / 2,
+ height: srcVideoHeight / 2,
+ },
+ color: t.params._greenExpectation,
+ },
+ ];
+
+ for (const cropParam of cropParams) {
+ // MAINTENANCE_TODO: remove cast with TypeScript 4.9.6+.
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ const subRect = new VideoFrame(source as any, { visibleRect: cropParam.subRect });
+
+ const colorAttachment = t.device.createTexture({
+ format: kFormat,
+ size: { width: kWidth, height: kHeight, depthOrArrayLayers: 1 },
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const pipeline = createExternalTextureSamplingTestPipeline(t);
+ const bindGroup = createExternalTextureSamplingTestBindGroup(
+ t,
+ t.params.checkNonStandardIsZeroCopy,
+ subRect,
+ pipeline
+ );
+
+ const commandEncoder = t.device.createCommandEncoder();
+ const passEncoder = commandEncoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachment.createView(),
+ clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ passEncoder.setPipeline(pipeline);
+ passEncoder.setBindGroup(0, bindGroup);
+ passEncoder.draw(6);
+ passEncoder.end();
+ t.device.queue.submit([commandEncoder.finish()]);
+
+ // For validation, we sample a few pixels away from the edges to avoid compression
+ // artifacts.
+ t.expectSinglePixelComparisonsAreOkInTexture({ texture: colorAttachment }, [
+ { coord: { x: kWidth * 0.1, y: kHeight * 0.1 }, exp: cropParam.color },
+ { coord: { x: kWidth * 0.9, y: kHeight * 0.1 }, exp: cropParam.color },
+ { coord: { x: kWidth * 0.1, y: kHeight * 0.9 }, exp: cropParam.color },
+ { coord: { x: kWidth * 0.9, y: kHeight * 0.9 }, exp: cropParam.color },
+ ]);
+
+ subRect.close();
+ }
+
+ source.close();
+ });
+ });
+g.test('importExternalTexture,compute')
+ .desc(
+ `
+Tests that we can import an HTMLVideoElement/VideoFrame into a GPUExternalTexture and use it in a
+compute shader, for several combinations of video format and color space.
+`
+ )
+ .params(u =>
+ u //
+ .combineWithParams(checkNonStandardIsZeroCopyIfAvailable())
+ .combine('sourceType', ['VideoElement', 'VideoFrame'] as const)
+ .combineWithParams(kVideoExpectations)
+ )
+ .fn(async t => {
+ const sourceType = t.params.sourceType;
+ if (sourceType === 'VideoFrame' && typeof VideoFrame === 'undefined') {
+ t.skip('WebCodec is not supported');
+ }
+
+ const videoElement = getVideoElement(t, t.params.videoName);
+
+ await startPlayingAndWaitForVideo(videoElement, async () => {
+ const source =
+ sourceType === 'VideoFrame'
+ ? await getVideoFrameFromVideoElement(t, videoElement)
+ : videoElement;
+ const externalTexture = t.device.importExternalTexture({
+ /* eslint-disable-next-line @typescript-eslint/no-explicit-any */
+ source: source as any,
+ });
+ if (t.params.checkNonStandardIsZeroCopy) {
+ expectZeroCopyNonStandard(t, externalTexture);
+ }
+ const outputTexture = t.device.createTexture({
+ format: 'rgba8unorm',
+ size: [2, 2, 1],
+ usage: GPUTextureUsage.COPY_SRC | GPUTextureUsage.STORAGE_BINDING,
+ });
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ // Shader loads 4 pixels near each corner, and then store them in a storage texture.
+ module: t.device.createShaderModule({
+ code: `
+ @group(0) @binding(0) var t : texture_external;
+ @group(0) @binding(1) var outImage : texture_storage_2d<rgba8unorm, write>;
+
+ @compute @workgroup_size(1) fn main() {
+ var yellow : vec4<f32> = textureLoad(t, vec2<i32>(80, 60));
+ textureStore(outImage, vec2<i32>(0, 0), yellow);
+ var red : vec4<f32> = textureLoad(t, vec2<i32>(240, 60));
+ textureStore(outImage, vec2<i32>(0, 1), red);
+ var blue : vec4<f32> = textureLoad(t, vec2<i32>(80, 180));
+ textureStore(outImage, vec2<i32>(1, 0), blue);
+ var green : vec4<f32> = textureLoad(t, vec2<i32>(240, 180));
+ textureStore(outImage, vec2<i32>(1, 1), green);
+ return;
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ });
+
+ const bg = t.device.createBindGroup({
+ entries: [
+ { binding: 0, resource: externalTexture },
+ { binding: 1, resource: outputTexture.createView() },
+ ],
+ layout: pipeline.getBindGroupLayout(0),
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bg);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+
+ t.expectSinglePixelComparisonsAreOkInTexture({ texture: outputTexture }, [
+ // Top-left should be yellow.
+ { coord: { x: 0, y: 0 }, exp: t.params._yellowExpectation },
+ // Top-right should be red.
+ { coord: { x: 0, y: 1 }, exp: t.params._redExpectation },
+ // Bottom-left should be blue.
+ { coord: { x: 1, y: 0 }, exp: t.params._blueExpectation },
+ // Bottom-right should be green.
+ { coord: { x: 1, y: 1 }, exp: t.params._greenExpectation },
+ ]);
+
+ if (sourceType === 'VideoFrame') (source as VideoFrame).close();
+ });
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/README.txt b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/README.txt
new file mode 100644
index 0000000000..9f623b1434
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/README.txt
@@ -0,0 +1,17 @@
+Reference tests (reftests) for WebGPU canvas presentation.
+
+These render some contents to a canvas using WebGPU, and WPT compares the rendering result with
+the "reference" versions (in `ref/`) which render with 2D canvas.
+
+This tests things like:
+- The canvas has the correct orientation.
+- The canvas renders with the correct transfer function.
+- The canvas blends and interpolates in the correct color encoding.
+
+TODO(#918): Test all possible color spaces (once we have more than 1)
+TODO(#921): Why is there sometimes a difference of 1 (e.g. 3f vs 40) in canvas_size_different_with_back_buffer_size?
+And why does chromium's image_diff show diffs on other pixels that don't seem to have diffs?
+TODO(#1093): Test rgba16float values which are out of gamut of the canvas but under SDR luminance.
+TODO(#1093): Test rgba16float values which are above SDR luminance.
+TODO(#1116): Test canvas scaling.
+TODO: Test transferControlToOffscreen, used from {the same,another} thread
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_clear.html.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_clear.html.ts
new file mode 100644
index 0000000000..aabd90a0b4
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_clear.html.ts
@@ -0,0 +1,34 @@
+import { runRefTest } from './gpu_ref_test.js';
+
+runRefTest(t => {
+ function draw(canvasId: string, format: GPUTextureFormat) {
+ const canvas = document.getElementById(canvasId) as HTMLCanvasElement;
+
+ const ctx = canvas.getContext('webgpu') as unknown as GPUCanvasContext;
+ ctx.configure({
+ device: t.device,
+ format,
+ });
+
+ const colorAttachment = ctx.getCurrentTexture();
+ const colorAttachmentView = colorAttachment.createView();
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: colorAttachmentView,
+ clearValue: { r: 0.4, g: 1.0, b: 0.0, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ }
+
+ draw('cvs0', 'bgra8unorm');
+ draw('cvs1', 'rgba8unorm');
+ draw('cvs2', 'rgba16float');
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_clear.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_clear.https.html
new file mode 100644
index 0000000000..3639d3ca82
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_clear.https.html
@@ -0,0 +1,12 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_clear</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta name="assert" content="WebGPU cleared canvas should be presented correctly" />
+ <link rel="match" href="./ref/canvas_clear-ref.html" />
+ <canvas id="cvs0" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs1" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs2" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <script type="module" src="canvas_clear.html.js"></script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_colorspace.html.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_colorspace.html.ts
new file mode 100644
index 0000000000..3a763e8c28
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_colorspace.html.ts
@@ -0,0 +1,139 @@
+import { kUnitCaseParamsBuilder } from '../../../common/framework/params_builder.js';
+import { Float16Array } from '../../../external/petamoriken/float16/float16.js';
+import { kCanvasAlphaModes, kCanvasColorSpaces } from '../../capability_info.js';
+
+import { runRefTest } from './gpu_ref_test.js';
+
+function bgra8UnormFromRgba8Unorm(rgba8Unorm: Uint8Array) {
+ // This is used only once. May need to optimize if reused.
+ const bgra8Unorm = rgba8Unorm.slice();
+ for (let i = 0; i < bgra8Unorm.length; i += 4) {
+ [bgra8Unorm[i], bgra8Unorm[i + 2]] = [bgra8Unorm[i + 2], bgra8Unorm[i]];
+ }
+ return bgra8Unorm;
+}
+
+function rgba16floatFromRgba8unorm(rgba8Unorm: Uint8Array) {
+ // This is used only once. May need to optimize if reused.
+ const rgba16Float = new Float16Array(rgba8Unorm.length);
+ for (let i = 0; i < rgba8Unorm.length; ++i) {
+ rgba16Float[i] = rgba8Unorm[i] / 255;
+ }
+ return rgba16Float;
+}
+
+type Transferable = {
+ canvas: HTMLCanvasElement | OffscreenCanvas;
+ textureData: ArrayBuffer;
+ format: GPUTextureFormat;
+ colorSpace: PredefinedColorSpace;
+ alphaMode: GPUCanvasAlphaMode;
+};
+
+function render(
+ device: GPUDevice,
+ { canvas, format, alphaMode, colorSpace, textureData }: Transferable
+) {
+ const context = canvas.getContext('webgpu') as GPUCanvasContext;
+ context.configure({
+ device,
+ format,
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
+ alphaMode,
+ colorSpace,
+ });
+
+ const texture = context.getCurrentTexture();
+ device.queue.writeTexture({ texture }, textureData, {}, { width: 4, height: 1 });
+}
+
+export function runColorSpaceTest(format: GPUTextureFormat) {
+ runRefTest(async t => {
+ // prettier-ignore
+ const kRGBA8UnormData = new Uint8Array([
+ 0, 255, 0, 255,
+ 117, 251, 7, 255,
+ 170, 35, 209, 255,
+ 80, 150, 200, 255,
+ ]);
+ const kBGRA8UnormData = bgra8UnormFromRgba8Unorm(kRGBA8UnormData);
+ const kRGBA16FloatData = rgba16floatFromRgba8unorm(kRGBA8UnormData);
+ const width = kRGBA8UnormData.length / 4;
+
+ const testData: { [id: string]: Uint8Array | Float16Array } = {
+ rgba8unorm: kRGBA8UnormData,
+ bgra8unorm: kBGRA8UnormData,
+ rgba16float: kRGBA16FloatData,
+ };
+ const textureData = testData[format].buffer;
+
+ async function createCanvas(
+ creation: string,
+ alphaMode: GPUCanvasAlphaMode,
+ format: GPUTextureFormat,
+ colorSpace: PredefinedColorSpace
+ ) {
+ const canvas = document.createElement('canvas');
+ canvas.width = width;
+ canvas.height = 1;
+ document.body.appendChild(canvas);
+
+ switch (creation) {
+ case 'canvas':
+ render(t.device, { canvas, format, alphaMode, colorSpace, textureData });
+ break;
+
+ case 'transferControlToOffscreen': {
+ const offscreenCanvas = canvas.transferControlToOffscreen();
+ render(t.device, { canvas: offscreenCanvas, format, alphaMode, colorSpace, textureData });
+ break;
+ }
+
+ case 'transferControlToOffscreenWorker': {
+ const offscreenCanvas = canvas.transferControlToOffscreen();
+ const source = `
+ ${render.toString()}
+
+ onmessage = async (event) => {
+ try {
+ const adapter = await navigator.gpu.requestAdapter();
+ const device = await adapter.requestDevice();
+ render(device, event.data);
+ postMessage(true);
+ } catch (e) {
+ postMessage(false);
+ }
+ };
+ `;
+ const blob = new Blob([source], { type: 'application/javascript' });
+ const url = URL.createObjectURL(blob);
+ const worker = new Worker(url);
+ let resolve: (success: boolean) => void;
+ const promise = new Promise(_resolve => (resolve = _resolve));
+ worker.onmessage = event => {
+ resolve(event.data);
+ };
+ worker.postMessage(
+ { canvas: offscreenCanvas, format, alphaMode, colorSpace, textureData },
+ [offscreenCanvas]
+ );
+ await promise;
+ break;
+ }
+ }
+ }
+
+ const u = kUnitCaseParamsBuilder
+ .combine('alphaMode', kCanvasAlphaModes)
+ .combine('colorSpace', kCanvasColorSpaces)
+ .combine('creation', [
+ 'canvas',
+ 'transferControlToOffscreen',
+ 'transferControlToOffscreenWorker',
+ ]);
+
+ for (const { alphaMode, colorSpace, creation } of u) {
+ await createCanvas(creation, alphaMode, format, colorSpace);
+ }
+ });
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_colorspace_bgra8unorm.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_colorspace_bgra8unorm.https.html
new file mode 100644
index 0000000000..c910c97b1d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_colorspace_bgra8unorm.https.html
@@ -0,0 +1,22 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_colorspace_bgra8unorm</title>
+ <meta charset="utf-8" />
+ <style>
+ canvas {
+ width: 128px;
+ height: 128px;
+ margin-right: 5px;
+ image-rendering: pixelated;
+ image-rendering: crisp-edges;
+ }
+ </style>
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta name="assert" content="WebGPU bgra8norm canvas with colorSpace set should be rendered correctly" />
+ <link rel="match" href="./ref/canvas_colorspace-ref.html" />
+ <script type="module">
+ import { runColorSpaceTest } from './canvas_colorspace.html.js';
+ runColorSpaceTest('bgra8unorm');
+ </script>
+ <body></body>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_colorspace_rgba16float.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_colorspace_rgba16float.https.html
new file mode 100644
index 0000000000..7f57858e49
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_colorspace_rgba16float.https.html
@@ -0,0 +1,23 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_colorspace_rgba16float</title>
+ <meta charset="utf-8" />
+ <style>
+ canvas {
+ width: 128px;
+ height: 128px;
+ margin-right: 5px;
+ image-rendering: pixelated;
+ image-rendering: crisp-edges;
+ }
+ </style>
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta name="assert" content="WebGPU rgba16float canvas with colorSpace set should be rendered correctly" />
+ <link rel="match" href="./ref/canvas_colorspace-ref.html" />
+ <meta name=fuzzy content="maxDifference=1;totalPixels=8192">
+ <script type="module">
+ import { runColorSpaceTest } from './canvas_colorspace.html.js';
+ runColorSpaceTest('rgba16float');
+ </script>
+ <body></body>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_colorspace_rgba8unorm.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_colorspace_rgba8unorm.https.html
new file mode 100644
index 0000000000..e57e04ef5c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_colorspace_rgba8unorm.https.html
@@ -0,0 +1,22 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_colorspace_rgba8unorm</title>
+ <meta charset="utf-8" />
+ <style>
+ canvas {
+ width: 128px;
+ height: 128px;
+ margin-right: 5px;
+ image-rendering: pixelated;
+ image-rendering: crisp-edges;
+ }
+ </style>
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta name="assert" content="WebGPU rgba8unorm canvas with colorSpace set should be rendered correctly" />
+ <link rel="match" href="./ref/canvas_colorspace-ref.html" />
+ <script type="module">
+ import { runColorSpaceTest } from './canvas_colorspace.html.js';
+ runColorSpaceTest('rgba8unorm');
+ </script>
+ <body></body>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex.html.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex.html.ts
new file mode 100644
index 0000000000..2c17be8875
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex.html.ts
@@ -0,0 +1,772 @@
+import { assert, unreachable } from '../../../common/util/util.js';
+import { kTextureFormatInfo, ColorTextureFormat } from '../../format_info.js';
+import { gammaDecompress, float32ToFloat16Bits } from '../../util/conversion.js';
+import { align } from '../../util/math.js';
+
+import { runRefTest } from './gpu_ref_test.js';
+
+type WriteCanvasMethod =
+ | 'copyBufferToTexture'
+ | 'copyTextureToTexture'
+ | 'copyExternalImageToTexture'
+ | 'DrawTextureSample'
+ | 'DrawVertexColor'
+ | 'DrawFragcoord'
+ | 'FragmentTextureStore'
+ | 'ComputeWorkgroup1x1TextureStore'
+ | 'ComputeWorkgroup16x16TextureStore';
+
+export function run(
+ format: ColorTextureFormat,
+ targets: { cvs: HTMLCanvasElement; writeCanvasMethod: WriteCanvasMethod }[]
+) {
+ runRefTest(async t => {
+ let shaderValue: number = 0x66 / 0xff;
+ let isOutputSrgb = false;
+ switch (format) {
+ case 'bgra8unorm':
+ case 'rgba8unorm':
+ case 'rgba16float':
+ break;
+ case 'bgra8unorm-srgb':
+ case 'rgba8unorm-srgb':
+ // NOTE: "-srgb" cases haven't been tested (there aren't any .html files that use them).
+
+ // Reverse gammaCompress to get same value shader output as non-srgb formats:
+ shaderValue = gammaDecompress(shaderValue);
+ isOutputSrgb = true;
+ break;
+ default:
+ unreachable();
+ }
+ const shaderValueStr = shaderValue.toFixed(5);
+
+ function copyBufferToTexture(ctx: GPUCanvasContext) {
+ const rows = ctx.canvas.height;
+ const bytesPerPixel = kTextureFormatInfo[format].color.bytes;
+ if (bytesPerPixel === undefined) {
+ unreachable();
+ }
+ const bytesPerRow = align(bytesPerPixel * ctx.canvas.width, 256);
+ const componentsPerPixel = 4;
+
+ const buffer = t.device.createBuffer({
+ mappedAtCreation: true,
+ size: rows * bytesPerRow,
+ usage: GPUBufferUsage.COPY_SRC,
+ });
+ // These are run only once per test, so there are no wasted reallocations below.
+ let red: Uint8Array | Uint16Array;
+ let green: Uint8Array | Uint16Array;
+ let blue: Uint8Array | Uint16Array;
+ let yellow: Uint8Array | Uint16Array;
+
+ const mapping = buffer.getMappedRange();
+ let data: Uint8Array | Uint16Array;
+ switch (format) {
+ case 'bgra8unorm':
+ case 'bgra8unorm-srgb':
+ {
+ data = new Uint8Array(mapping);
+ red = new Uint8Array([0x00, 0x00, 0x66, 0xff]);
+ green = new Uint8Array([0x00, 0x66, 0x00, 0xff]);
+ blue = new Uint8Array([0x66, 0x00, 0x00, 0xff]);
+ yellow = new Uint8Array([0x00, 0x66, 0x66, 0xff]);
+ }
+ break;
+ case 'rgba8unorm':
+ case 'rgba8unorm-srgb':
+ {
+ data = new Uint8Array(mapping);
+ red = new Uint8Array([0x66, 0x00, 0x00, 0xff]);
+ green = new Uint8Array([0x00, 0x66, 0x00, 0xff]);
+ blue = new Uint8Array([0x00, 0x00, 0x66, 0xff]);
+ yellow = new Uint8Array([0x66, 0x66, 0x00, 0xff]);
+ }
+ break;
+ case 'rgba16float':
+ {
+ data = new Uint16Array(mapping);
+ red = new Uint16Array([
+ float32ToFloat16Bits(0.4),
+ float32ToFloat16Bits(0.0),
+ float32ToFloat16Bits(0.0),
+ float32ToFloat16Bits(1.0),
+ ]);
+ green = new Uint16Array([
+ float32ToFloat16Bits(0.0),
+ float32ToFloat16Bits(0.4),
+ float32ToFloat16Bits(0.0),
+ float32ToFloat16Bits(1.0),
+ ]);
+ blue = new Uint16Array([
+ float32ToFloat16Bits(0.0),
+ float32ToFloat16Bits(0.0),
+ float32ToFloat16Bits(0.4),
+ float32ToFloat16Bits(1.0),
+ ]);
+ yellow = new Uint16Array([
+ float32ToFloat16Bits(0.4),
+ float32ToFloat16Bits(0.4),
+ float32ToFloat16Bits(0.0),
+ float32ToFloat16Bits(1.0),
+ ]);
+ }
+ break;
+ default:
+ unreachable();
+ }
+ for (let i = 0; i < ctx.canvas.width; ++i)
+ for (let j = 0; j < ctx.canvas.height; ++j) {
+ let pixel: Uint8Array | Uint16Array;
+ if (i < ctx.canvas.width / 2) {
+ if (j < ctx.canvas.height / 2) {
+ pixel = red;
+ } else {
+ pixel = blue;
+ }
+ } else {
+ if (j < ctx.canvas.height / 2) {
+ pixel = green;
+ } else {
+ pixel = yellow;
+ }
+ }
+ data.set(pixel, (i + j * (bytesPerRow / bytesPerPixel)) * componentsPerPixel);
+ }
+ buffer.unmap();
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.copyBufferToTexture({ buffer, bytesPerRow }, { texture: ctx.getCurrentTexture() }, [
+ ctx.canvas.width,
+ ctx.canvas.height,
+ 1,
+ ]);
+ t.device.queue.submit([encoder.finish()]);
+ }
+
+ function getImageBitmap(ctx: GPUCanvasContext): Promise<ImageBitmap> {
+ const data = new Uint8ClampedArray(ctx.canvas.width * ctx.canvas.height * 4);
+ for (let i = 0; i < ctx.canvas.width; ++i)
+ for (let j = 0; j < ctx.canvas.height; ++j) {
+ const offset = (i + j * ctx.canvas.width) * 4;
+ if (i < ctx.canvas.width / 2) {
+ if (j < ctx.canvas.height / 2) {
+ data.set([0x66, 0x00, 0x00, 0xff], offset);
+ } else {
+ data.set([0x00, 0x00, 0x66, 0xff], offset);
+ }
+ } else {
+ if (j < ctx.canvas.height / 2) {
+ data.set([0x00, 0x66, 0x00, 0xff], offset);
+ } else {
+ data.set([0x66, 0x66, 0x00, 0xff], offset);
+ }
+ }
+ }
+ const imageData = new ImageData(data, ctx.canvas.width, ctx.canvas.height);
+ return createImageBitmap(imageData);
+ }
+
+ function setupSrcTexture(imageBitmap: ImageBitmap): GPUTexture {
+ const [srcWidth, srcHeight] = [imageBitmap.width, imageBitmap.height];
+ const srcTexture = t.device.createTexture({
+ size: [srcWidth, srcHeight, 1],
+ format,
+ usage:
+ GPUTextureUsage.TEXTURE_BINDING |
+ GPUTextureUsage.RENDER_ATTACHMENT |
+ GPUTextureUsage.COPY_DST |
+ GPUTextureUsage.COPY_SRC,
+ });
+ t.device.queue.copyExternalImageToTexture({ source: imageBitmap }, { texture: srcTexture }, [
+ imageBitmap.width,
+ imageBitmap.height,
+ ]);
+ return srcTexture;
+ }
+
+ async function copyExternalImageToTexture(ctx: GPUCanvasContext) {
+ const imageBitmap = await getImageBitmap(ctx);
+ t.device.queue.copyExternalImageToTexture(
+ { source: imageBitmap },
+ { texture: ctx.getCurrentTexture() },
+ [imageBitmap.width, imageBitmap.height]
+ );
+ }
+
+ async function copyTextureToTexture(ctx: GPUCanvasContext) {
+ const imageBitmap = await getImageBitmap(ctx);
+ const srcTexture = setupSrcTexture(imageBitmap);
+
+ const encoder = t.device.createCommandEncoder();
+ encoder.copyTextureToTexture(
+ { texture: srcTexture, mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
+ { texture: ctx.getCurrentTexture(), mipLevel: 0, origin: { x: 0, y: 0, z: 0 } },
+ [imageBitmap.width, imageBitmap.height, 1]
+ );
+ t.device.queue.submit([encoder.finish()]);
+ }
+
+ async function DrawTextureSample(ctx: GPUCanvasContext) {
+ const imageBitmap = await getImageBitmap(ctx);
+ const srcTexture = setupSrcTexture(imageBitmap);
+
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+struct VertexOutput {
+ @builtin(position) Position : vec4<f32>,
+ @location(0) fragUV : vec2<f32>,
+}
+
+@vertex
+fn main(@builtin(vertex_index) VertexIndex : u32) -> VertexOutput {
+ var pos = array<vec2<f32>, 6>(
+ vec2<f32>( 1.0, 1.0),
+ vec2<f32>( 1.0, -1.0),
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>( 1.0, 1.0),
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>(-1.0, 1.0));
+
+ var uv = array<vec2<f32>, 6>(
+ vec2<f32>(1.0, 0.0),
+ vec2<f32>(1.0, 1.0),
+ vec2<f32>(0.0, 1.0),
+ vec2<f32>(1.0, 0.0),
+ vec2<f32>(0.0, 1.0),
+ vec2<f32>(0.0, 0.0));
+
+ var output : VertexOutput;
+ output.Position = vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+ output.fragUV = uv[VertexIndex];
+ return output;
+}
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ // NOTE: "-srgb" cases haven't been tested (there aren't any .html files that use them).
+ code: `
+@group(0) @binding(0) var mySampler: sampler;
+@group(0) @binding(1) var myTexture: texture_2d<f32>;
+
+fn gammaDecompress(n: f32) -> f32 {
+ var r = n;
+ if (r <= 0.04045) {
+ r = r * 25.0 / 323.0;
+ } else {
+ r = pow((200.0 * r + 11.0) / 121.0, 12.0 / 5.0);
+ }
+ r = clamp(r, 0.0, 1.0);
+ return r;
+}
+
+@fragment
+fn srgbMain(@location(0) fragUV: vec2<f32>) -> @location(0) vec4<f32> {
+ var result = textureSample(myTexture, mySampler, fragUV);
+ result.r = gammaDecompress(result.r);
+ result.g = gammaDecompress(result.g);
+ result.b = gammaDecompress(result.b);
+ return result;
+}
+
+@fragment
+fn linearMain(@location(0) fragUV: vec2<f32>) -> @location(0) vec4<f32> {
+ return textureSample(myTexture, mySampler, fragUV);
+}
+ `,
+ }),
+ entryPoint: isOutputSrgb ? 'srgbMain' : 'linearMain',
+ targets: [{ format }],
+ },
+ primitive: {
+ topology: 'triangle-list',
+ },
+ });
+
+ const sampler = t.device.createSampler({
+ magFilter: 'nearest',
+ minFilter: 'nearest',
+ });
+
+ const uniformBindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: sampler,
+ },
+ {
+ binding: 1,
+ resource: srcTexture.createView(),
+ },
+ ],
+ });
+
+ const renderPassDescriptor: GPURenderPassDescriptor = {
+ colorAttachments: [
+ {
+ view: ctx.getCurrentTexture().createView(),
+
+ clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ };
+
+ const commandEncoder = t.device.createCommandEncoder();
+ const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
+ passEncoder.setPipeline(pipeline);
+ passEncoder.setBindGroup(0, uniformBindGroup);
+ passEncoder.draw(6, 1, 0, 0);
+ passEncoder.end();
+ t.device.queue.submit([commandEncoder.finish()]);
+ }
+
+ function DrawVertexColor(ctx: GPUCanvasContext) {
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+struct VertexOutput {
+ @builtin(position) Position : vec4<f32>,
+ @location(0) fragColor : vec4<f32>,
+}
+
+@vertex
+fn main(@builtin(vertex_index) VertexIndex : u32) -> VertexOutput {
+ var pos = array<vec2<f32>, 6>(
+ vec2<f32>( 0.5, 0.5),
+ vec2<f32>( 0.5, -0.5),
+ vec2<f32>(-0.5, -0.5),
+ vec2<f32>( 0.5, 0.5),
+ vec2<f32>(-0.5, -0.5),
+ vec2<f32>(-0.5, 0.5));
+
+ var offset = array<vec2<f32>, 4>(
+ vec2<f32>( -0.5, 0.5),
+ vec2<f32>( 0.5, 0.5),
+ vec2<f32>(-0.5, -0.5),
+ vec2<f32>( 0.5, -0.5));
+
+ var color = array<vec4<f32>, 4>(
+ vec4<f32>(${shaderValueStr}, 0.0, 0.0, 1.0),
+ vec4<f32>(0.0, ${shaderValueStr}, 0.0, 1.0),
+ vec4<f32>(0.0, 0.0, ${shaderValueStr}, 1.0),
+ vec4<f32>(${shaderValueStr}, ${shaderValueStr}, 0.0, 1.0));
+
+ var output : VertexOutput;
+ output.Position = vec4<f32>(pos[VertexIndex % 6u] + offset[VertexIndex / 6u], 0.0, 1.0);
+ output.fragColor = color[VertexIndex / 6u];
+ return output;
+}
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+@fragment
+fn main(@location(0) fragColor: vec4<f32>) -> @location(0) vec4<f32> {
+ return fragColor;
+}
+ `,
+ }),
+ entryPoint: 'main',
+ targets: [{ format }],
+ },
+ primitive: {
+ topology: 'triangle-list',
+ },
+ });
+
+ const renderPassDescriptor: GPURenderPassDescriptor = {
+ colorAttachments: [
+ {
+ view: ctx.getCurrentTexture().createView(),
+
+ clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ };
+
+ const commandEncoder = t.device.createCommandEncoder();
+ const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
+ passEncoder.setPipeline(pipeline);
+ passEncoder.draw(24, 1, 0, 0);
+ passEncoder.end();
+ t.device.queue.submit([commandEncoder.finish()]);
+ }
+
+ function DrawFragcoord(ctx: GPUCanvasContext) {
+ const halfCanvasWidthStr = (ctx.canvas.width / 2).toFixed();
+ const halfCanvasHeightStr = (ctx.canvas.height / 2).toFixed();
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+struct VertexOutput {
+ @builtin(position) Position : vec4<f32>
+}
+
+@vertex
+fn main(@builtin(vertex_index) VertexIndex : u32) -> VertexOutput {
+ var pos = array<vec2<f32>, 6>(
+ vec2<f32>( 1.0, 1.0),
+ vec2<f32>( 1.0, -1.0),
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>( 1.0, 1.0),
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>(-1.0, 1.0));
+
+ var output : VertexOutput;
+ output.Position = vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+ return output;
+}
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+@group(0) @binding(0) var mySampler: sampler;
+@group(0) @binding(1) var myTexture: texture_2d<f32>;
+
+@fragment
+fn main(@builtin(position) fragcoord: vec4<f32>) -> @location(0) vec4<f32> {
+ var coord = vec2<u32>(floor(fragcoord.xy));
+ var color = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ if (coord.x < ${halfCanvasWidthStr}u) {
+ if (coord.y < ${halfCanvasHeightStr}u) {
+ color.r = ${shaderValueStr};
+ } else {
+ color.b = ${shaderValueStr};
+ }
+ } else {
+ if (coord.y < ${halfCanvasHeightStr}u) {
+ color.g = ${shaderValueStr};
+ } else {
+ color.r = ${shaderValueStr};
+ color.g = ${shaderValueStr};
+ }
+ }
+ return color;
+}
+ `,
+ }),
+ entryPoint: 'main',
+ targets: [{ format }],
+ },
+ primitive: {
+ topology: 'triangle-list',
+ },
+ });
+
+ const renderPassDescriptor: GPURenderPassDescriptor = {
+ colorAttachments: [
+ {
+ view: ctx.getCurrentTexture().createView(),
+
+ clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ };
+
+ const commandEncoder = t.device.createCommandEncoder();
+ const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
+ passEncoder.setPipeline(pipeline);
+ passEncoder.draw(6, 1, 0, 0);
+ passEncoder.end();
+ t.device.queue.submit([commandEncoder.finish()]);
+ }
+
+ function FragmentTextureStore(ctx: GPUCanvasContext) {
+ const halfCanvasWidthStr = (ctx.canvas.width / 2).toFixed();
+ const halfCanvasHeightStr = (ctx.canvas.height / 2).toFixed();
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module: t.device.createShaderModule({
+ code: `
+struct VertexOutput {
+ @builtin(position) Position : vec4<f32>
+}
+
+@vertex
+fn main(@builtin(vertex_index) VertexIndex : u32) -> VertexOutput {
+ var pos = array<vec2<f32>, 6>(
+ vec2<f32>( 1.0, 1.0),
+ vec2<f32>( 1.0, -1.0),
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>( 1.0, 1.0),
+ vec2<f32>(-1.0, -1.0),
+ vec2<f32>(-1.0, 1.0));
+
+ var output : VertexOutput;
+ output.Position = vec4<f32>(pos[VertexIndex], 0.0, 1.0);
+ return output;
+}
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ fragment: {
+ module: t.device.createShaderModule({
+ code: `
+@group(0) @binding(0) var outImage : texture_storage_2d<${format}, write>;
+
+@fragment
+fn main(@builtin(position) fragcoord: vec4<f32>) -> @location(0) vec4<f32> {
+ var coord = vec2<u32>(floor(fragcoord.xy));
+ var color = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ if (coord.x < ${halfCanvasWidthStr}u) {
+ if (coord.y < ${halfCanvasHeightStr}u) {
+ color.r = ${shaderValueStr};
+ } else {
+ color.b = ${shaderValueStr};
+ }
+ } else {
+ if (coord.y < ${halfCanvasHeightStr}u) {
+ color.g = ${shaderValueStr};
+ } else {
+ color.r = ${shaderValueStr};
+ color.g = ${shaderValueStr};
+ }
+ }
+ textureStore(outImage, vec2<i32>(coord), color);
+ return color;
+}
+ `,
+ }),
+ entryPoint: 'main',
+ targets: [{ format }],
+ },
+ primitive: {
+ topology: 'triangle-list',
+ },
+ });
+
+ const bg = t.device.createBindGroup({
+ entries: [{ binding: 0, resource: ctx.getCurrentTexture().createView() }],
+ layout: pipeline.getBindGroupLayout(0),
+ });
+
+ const outputTexture = t.device.createTexture({
+ format,
+ size: [ctx.canvas.width, ctx.canvas.height, 1],
+ usage: GPUTextureUsage.RENDER_ATTACHMENT,
+ });
+
+ const renderPassDescriptor: GPURenderPassDescriptor = {
+ colorAttachments: [
+ {
+ view: outputTexture.createView(),
+
+ clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ };
+
+ const commandEncoder = t.device.createCommandEncoder();
+ const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
+ passEncoder.setPipeline(pipeline);
+ passEncoder.setBindGroup(0, bg);
+ passEncoder.draw(6, 1, 0, 0);
+ passEncoder.end();
+ t.device.queue.submit([commandEncoder.finish()]);
+ }
+
+ function ComputeWorkgroup1x1TextureStore(ctx: GPUCanvasContext) {
+ const halfCanvasWidthStr = (ctx.canvas.width / 2).toFixed();
+ const halfCanvasHeightStr = (ctx.canvas.height / 2).toFixed();
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+@group(0) @binding(0) var outImage : texture_storage_2d<${format}, write>;
+
+@compute @workgroup_size(1, 1, 1)
+fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
+ var color = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ if (GlobalInvocationID.x < ${halfCanvasWidthStr}u) {
+ if (GlobalInvocationID.y < ${halfCanvasHeightStr}u) {
+ color.r = ${shaderValueStr};
+ } else {
+ color.b = ${shaderValueStr};
+ }
+ } else {
+ if (GlobalInvocationID.y < ${halfCanvasHeightStr}u) {
+ color.g = ${shaderValueStr};
+ } else {
+ color.r = ${shaderValueStr};
+ color.g = ${shaderValueStr};
+ }
+ }
+ textureStore(outImage, vec2<i32>(GlobalInvocationID.xy), color);
+ return;
+}
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ });
+
+ const bg = t.device.createBindGroup({
+ entries: [{ binding: 0, resource: ctx.getCurrentTexture().createView() }],
+ layout: pipeline.getBindGroupLayout(0),
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bg);
+ pass.dispatchWorkgroups(ctx.canvas.width, ctx.canvas.height, 1);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ }
+
+ function ComputeWorkgroup16x16TextureStore(ctx: GPUCanvasContext) {
+ const canvasWidthStr = ctx.canvas.width.toFixed();
+ const canvasHeightStr = ctx.canvas.height.toFixed();
+ const halfCanvasWidthStr = (ctx.canvas.width / 2).toFixed();
+ const halfCanvasHeightStr = (ctx.canvas.height / 2).toFixed();
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({
+ code: `
+@group(0) @binding(0) var outImage : texture_storage_2d<${format}, write>;
+
+@compute @workgroup_size(16, 16, 1)
+fn main(@builtin(global_invocation_id) GlobalInvocationID : vec3<u32>) {
+ if (GlobalInvocationID.x >= ${canvasWidthStr}u ||
+ GlobalInvocationID.y >= ${canvasHeightStr}u) {
+ return;
+ }
+ var color = vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ if (GlobalInvocationID.x < ${halfCanvasWidthStr}u) {
+ if (GlobalInvocationID.y < ${halfCanvasHeightStr}u) {
+ color.r = ${shaderValueStr};
+ } else {
+ color.b = ${shaderValueStr};
+ }
+ } else {
+ if (GlobalInvocationID.y < ${halfCanvasHeightStr}u) {
+ color.g = ${shaderValueStr};
+ } else {
+ color.r = ${shaderValueStr};
+ color.g = ${shaderValueStr};
+ }
+ }
+ textureStore(outImage, vec2<i32>(GlobalInvocationID.xy), color);
+ return;
+}
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ });
+
+ const bg = t.device.createBindGroup({
+ entries: [{ binding: 0, resource: ctx.getCurrentTexture().createView() }],
+ layout: pipeline.getBindGroupLayout(0),
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bg);
+ pass.dispatchWorkgroups(
+ align(ctx.canvas.width, 16) / 16,
+ align(ctx.canvas.height, 16) / 16,
+ 1
+ );
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+ }
+
+ for (const { cvs, writeCanvasMethod } of targets) {
+ const ctx = cvs.getContext('webgpu');
+ assert(ctx instanceof GPUCanvasContext, 'Failed to get WebGPU context from canvas');
+
+ let usage: GPUTextureUsageFlags;
+ switch (writeCanvasMethod) {
+ case 'copyBufferToTexture':
+ case 'copyTextureToTexture':
+ usage = GPUTextureUsage.COPY_DST;
+ break;
+ case 'copyExternalImageToTexture':
+ usage = GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT;
+ break;
+ case 'DrawTextureSample':
+ case 'DrawVertexColor':
+ case 'DrawFragcoord':
+ usage = GPUTextureUsage.RENDER_ATTACHMENT;
+ break;
+ case 'FragmentTextureStore':
+ case 'ComputeWorkgroup1x1TextureStore':
+ case 'ComputeWorkgroup16x16TextureStore':
+ usage = GPUTextureUsage.STORAGE_BINDING;
+ break;
+ default:
+ unreachable();
+ }
+
+ ctx.configure({
+ device: t.device,
+ format,
+ usage,
+ });
+
+ switch (writeCanvasMethod) {
+ case 'copyBufferToTexture':
+ copyBufferToTexture(ctx);
+ break;
+ case 'copyExternalImageToTexture':
+ await copyExternalImageToTexture(ctx);
+ break;
+ case 'copyTextureToTexture':
+ await copyTextureToTexture(ctx);
+ break;
+ case 'DrawTextureSample':
+ await DrawTextureSample(ctx);
+ break;
+ case 'DrawVertexColor':
+ DrawVertexColor(ctx);
+ break;
+ case 'DrawFragcoord':
+ DrawFragcoord(ctx);
+ break;
+ case 'FragmentTextureStore':
+ FragmentTextureStore(ctx);
+ break;
+ case 'ComputeWorkgroup1x1TextureStore':
+ ComputeWorkgroup1x1TextureStore(ctx);
+ break;
+ case 'ComputeWorkgroup16x16TextureStore':
+ ComputeWorkgroup16x16TextureStore(ctx);
+ break;
+ default:
+ unreachable();
+ }
+ }
+ });
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_bgra8unorm_copy.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_bgra8unorm_copy.https.html
new file mode 100644
index 0000000000..d378bdfcf5
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_bgra8unorm_copy.https.html
@@ -0,0 +1,24 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_complex_bgra8unorm_copy</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_complex-ref.html" />
+
+ <canvas id="cvs_copy_buffer_to_texture" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs_copy_texture_to_texture" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs_copy_external_image_to_texture" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+
+ <script type="module">
+ import { run } from './canvas_complex.html.js';
+ run('bgra8unorm', [
+ { cvs: cvs_copy_buffer_to_texture, writeCanvasMethod: 'copyBufferToTexture' },
+ { cvs: cvs_copy_texture_to_texture, writeCanvasMethod: 'copyTextureToTexture' },
+ { cvs: cvs_copy_external_image_to_texture, writeCanvasMethod: 'copyExternalImageToTexture' },
+ ]);
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_bgra8unorm_draw.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_bgra8unorm_draw.https.html
new file mode 100644
index 0000000000..99049e6e32
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_bgra8unorm_draw.https.html
@@ -0,0 +1,24 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_complex_bgra8unorm_draw</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_complex-ref.html" />
+
+ <canvas id="cvs_draw_texture_sample" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs_draw_vertex_color" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs_draw_fragcoord" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+
+ <script type="module">
+ import { run } from './canvas_complex.html.js';
+ run('bgra8unorm', [
+ { cvs: cvs_draw_texture_sample, writeCanvasMethod: 'DrawTextureSample' },
+ { cvs: cvs_draw_vertex_color, writeCanvasMethod: 'DrawVertexColor' },
+ { cvs: cvs_draw_fragcoord, writeCanvasMethod: 'DrawFragcoord' },
+ ]);
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba16float_copy.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba16float_copy.https.html
new file mode 100644
index 0000000000..400afa121b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba16float_copy.https.html
@@ -0,0 +1,24 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_complex_rgba16float_copy</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_complex-ref.html" />
+
+ <canvas id="cvs_copy_buffer_to_texture" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs_copy_texture_to_texture" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs_copy_external_image_to_texture" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+
+ <script type="module">
+ import { run } from './canvas_complex.html.js';
+ run('rgba16float', [
+ { cvs: cvs_copy_buffer_to_texture, writeCanvasMethod: 'copyBufferToTexture' },
+ { cvs: cvs_copy_texture_to_texture, writeCanvasMethod: 'copyTextureToTexture' },
+ { cvs: cvs_copy_external_image_to_texture, writeCanvasMethod: 'copyExternalImageToTexture' },
+ ]);
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba16float_draw.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba16float_draw.https.html
new file mode 100644
index 0000000000..a647fc2956
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba16float_draw.https.html
@@ -0,0 +1,24 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_complex_rgba16float_draw</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_complex-ref.html" />
+
+ <canvas id="cvs_draw_texture_sample" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs_draw_vertex_color" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs_draw_fragcoord" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+
+ <script type="module">
+ import { run } from './canvas_complex.html.js';
+ run('rgba16float', [
+ { cvs: cvs_draw_texture_sample, writeCanvasMethod: 'DrawTextureSample' },
+ { cvs: cvs_draw_vertex_color, writeCanvasMethod: 'DrawVertexColor' },
+ { cvs: cvs_draw_fragcoord, writeCanvasMethod: 'DrawFragcoord' },
+ ]);
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba16float_store.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba16float_store.https.html
new file mode 100644
index 0000000000..b812129b0b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba16float_store.https.html
@@ -0,0 +1,24 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_complex_rgba16float_store</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_complex-ref.html" />
+
+ <canvas id="cvs_fragment_texture_store" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs_compute_texture_store_1" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs_compute_texture_store_2" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+
+ <script type="module">
+ import { run } from './canvas_complex.html.js';
+ run('rgba16float', [
+ { cvs: cvs_fragment_texture_store, writeCanvasMethod: 'FragmentTextureStore' },
+ { cvs: cvs_compute_texture_store_1, writeCanvasMethod: 'ComputeWorkgroup1x1TextureStore' },
+ { cvs: cvs_compute_texture_store_2, writeCanvasMethod: 'ComputeWorkgroup16x16TextureStore' },
+ ]);
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba8unorm_copy.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba8unorm_copy.https.html
new file mode 100644
index 0000000000..d2570a3bdf
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba8unorm_copy.https.html
@@ -0,0 +1,24 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_complex_rgba8unorm_copy</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_complex-ref.html" />
+
+ <canvas id="cvs_copy_buffer_to_texture" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs_copy_texture_to_texture" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs_copy_external_image_to_texture" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+
+ <script type="module">
+ import { run } from './canvas_complex.html.js';
+ run('rgba8unorm', [
+ { cvs: cvs_copy_buffer_to_texture, writeCanvasMethod: 'copyBufferToTexture' },
+ { cvs: cvs_copy_texture_to_texture, writeCanvasMethod: 'copyTextureToTexture' },
+ { cvs: cvs_copy_external_image_to_texture, writeCanvasMethod: 'copyExternalImageToTexture' },
+ ]);
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba8unorm_draw.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba8unorm_draw.https.html
new file mode 100644
index 0000000000..647a829259
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba8unorm_draw.https.html
@@ -0,0 +1,24 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_complex_rgba8unorm_draw</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_complex-ref.html" />
+
+ <canvas id="cvs_draw_texture_sample" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs_draw_vertex_color" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs_draw_fragcoord" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+
+ <script type="module">
+ import { run } from './canvas_complex.html.js';
+ run('rgba8unorm', [
+ { cvs: cvs_draw_texture_sample, writeCanvasMethod: 'DrawTextureSample' },
+ { cvs: cvs_draw_vertex_color, writeCanvasMethod: 'DrawVertexColor' },
+ { cvs: cvs_draw_fragcoord, writeCanvasMethod: 'DrawFragcoord' },
+ ]);
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba8unorm_store.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba8unorm_store.https.html
new file mode 100644
index 0000000000..b82745658e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_complex_rgba8unorm_store.https.html
@@ -0,0 +1,24 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_complex_rgba8unorm_store</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_complex-ref.html" />
+
+ <canvas id="cvs_fragment_texture_store" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs_compute_texture_store_1" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs_compute_texture_store_2" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+
+ <script type="module">
+ import { run } from './canvas_complex.html.js';
+ run('rgba8unorm', [
+ { cvs: cvs_fragment_texture_store, writeCanvasMethod: 'FragmentTextureStore' },
+ { cvs: cvs_compute_texture_store_1, writeCanvasMethod: 'ComputeWorkgroup1x1TextureStore' },
+ { cvs: cvs_compute_texture_store_2, writeCanvasMethod: 'ComputeWorkgroup16x16TextureStore' },
+ ]);
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha.html.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha.html.ts
new file mode 100644
index 0000000000..eba60f5c51
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha.html.ts
@@ -0,0 +1,177 @@
+import { assert, unreachable } from '../../../common/util/util.js';
+
+import { runRefTest } from './gpu_ref_test.js';
+
+type WriteCanvasMethod = 'draw' | 'copy';
+
+export function run(
+ format: GPUTextureFormat,
+ alphaMode: GPUCanvasAlphaMode,
+ writeCanvasMethod: WriteCanvasMethod
+) {
+ runRefTest(t => {
+ const module = t.device.createShaderModule({
+ code: `
+struct VertexOutput {
+@builtin(position) Position : vec4<f32>,
+@location(0) fragColor : vec4<f32>,
+}
+
+@vertex
+fn mainVS(@builtin(vertex_index) VertexIndex : u32) -> VertexOutput {
+var pos = array<vec2<f32>, 6>(
+ vec2<f32>( 0.75, 0.75),
+ vec2<f32>( 0.75, -0.75),
+ vec2<f32>(-0.75, -0.75),
+ vec2<f32>( 0.75, 0.75),
+ vec2<f32>(-0.75, -0.75),
+ vec2<f32>(-0.75, 0.75));
+
+var offset = array<vec2<f32>, 4>(
+vec2<f32>( -0.25, 0.25),
+vec2<f32>( 0.25, 0.25),
+vec2<f32>(-0.25, -0.25),
+vec2<f32>( 0.25, -0.25));
+
+// Alpha channel value is set to 0.5 regardless of the canvas alpha mode.
+// For 'opaque' mode, it shouldn't affect the end result, as the alpha channel should always get cleared to 1.0.
+var color = array<vec4<f32>, 4>(
+ vec4<f32>(0.4, 0.0, 0.0, 0.5),
+ vec4<f32>(0.0, 0.4, 0.0, 0.5),
+ vec4<f32>(0.0, 0.0, 0.4, 0.5),
+ vec4<f32>(0.4, 0.4, 0.0, 0.5)); // 0.4 -> 0x66
+
+var output : VertexOutput;
+output.Position = vec4<f32>(pos[VertexIndex % 6u] + offset[VertexIndex / 6u], 0.0, 1.0);
+output.fragColor = color[VertexIndex / 6u];
+return output;
+}
+
+@fragment
+fn mainFS(@location(0) fragColor: vec4<f32>) -> @location(0) vec4<f32> {
+return fragColor;
+}
+ `,
+ });
+
+ document.querySelectorAll('canvas').forEach(canvas => {
+ const ctx = canvas.getContext('webgpu');
+ assert(ctx instanceof GPUCanvasContext, 'Failed to get WebGPU context from canvas');
+
+ switch (format) {
+ case 'bgra8unorm':
+ case 'bgra8unorm-srgb':
+ case 'rgba8unorm':
+ case 'rgba8unorm-srgb':
+ case 'rgba16float':
+ break;
+ default:
+ unreachable();
+ }
+
+ let usage = 0;
+ switch (writeCanvasMethod) {
+ case 'draw':
+ usage = GPUTextureUsage.RENDER_ATTACHMENT;
+ break;
+ case 'copy':
+ usage = GPUTextureUsage.COPY_DST;
+ break;
+ }
+ ctx.configure({
+ device: t.device,
+ format,
+ usage,
+ alphaMode,
+ });
+
+ // The blending behavior here is to mimic 2d context blending behavior
+ // of drawing rects in order
+ // https://drafts.fxtf.org/compositing/#porterduffcompositingoperators_srcover
+ const kBlendStateSourceOver = {
+ color: {
+ srcFactor: 'src-alpha',
+ dstFactor: 'one-minus-src-alpha',
+ operation: 'add',
+ },
+ alpha: {
+ srcFactor: 'one',
+ dstFactor: 'one-minus-src-alpha',
+ operation: 'add',
+ },
+ } as const;
+
+ const pipeline = t.device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'mainVS',
+ },
+ fragment: {
+ module,
+ entryPoint: 'mainFS',
+ targets: [
+ {
+ format,
+ blend: { premultiplied: kBlendStateSourceOver, opaque: undefined }[alphaMode],
+ },
+ ],
+ },
+ primitive: {
+ topology: 'triangle-list',
+ },
+ });
+
+ let renderTarget: GPUTexture;
+ switch (writeCanvasMethod) {
+ case 'draw':
+ renderTarget = ctx.getCurrentTexture();
+ break;
+ case 'copy':
+ renderTarget = t.device.createTexture({
+ size: [ctx.canvas.width, ctx.canvas.height],
+ format,
+ usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
+ });
+ break;
+ }
+ const renderPassDescriptor: GPURenderPassDescriptor = {
+ colorAttachments: [
+ {
+ view: renderTarget.createView(),
+ clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 0.0 },
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ };
+
+ const commandEncoder = t.device.createCommandEncoder();
+ const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
+ passEncoder.setPipeline(pipeline);
+ passEncoder.draw(6, 1, 0, 0);
+ passEncoder.draw(6, 1, 6, 0);
+ passEncoder.draw(6, 1, 12, 0);
+ passEncoder.draw(6, 1, 18, 0);
+ passEncoder.end();
+
+ switch (writeCanvasMethod) {
+ case 'draw':
+ break;
+ case 'copy':
+ commandEncoder.copyTextureToTexture(
+ {
+ texture: renderTarget,
+ },
+ {
+ texture: ctx.getCurrentTexture(),
+ },
+ [ctx.canvas.width, ctx.canvas.height]
+ );
+ break;
+ }
+
+ t.device.queue.submit([commandEncoder.finish()]);
+ });
+ });
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_bgra8unorm_opaque_copy.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_bgra8unorm_opaque_copy.https.html
new file mode 100644
index 0000000000..60e8417c16
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_bgra8unorm_opaque_copy.https.html
@@ -0,0 +1,21 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_composite_alpha_bgra8unorm_opaque</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_composite_alpha_opaque-ref.html" />
+ <style>
+ body { background-color: #F0E68C; }
+ #c-canvas { background-color: #8CF0E6; }
+ </style>
+ <canvas id="c-body" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="c-canvas" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <script type="module">
+ import { run } from './canvas_composite_alpha.html.js';
+ run('bgra8unorm', 'opaque', 'copy');
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_bgra8unorm_opaque_draw.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_bgra8unorm_opaque_draw.https.html
new file mode 100644
index 0000000000..c0280a2a99
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_bgra8unorm_opaque_draw.https.html
@@ -0,0 +1,21 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_composite_alpha_bgra8unorm_opaque</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_composite_alpha_opaque-ref.html" />
+ <style>
+ body { background-color: #F0E68C; }
+ #c-canvas { background-color: #8CF0E6; }
+ </style>
+ <canvas id="c-body" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="c-canvas" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <script type="module">
+ import { run } from './canvas_composite_alpha.html.js';
+ run('bgra8unorm', 'opaque', 'draw');
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_bgra8unorm_premultiplied_copy.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_bgra8unorm_premultiplied_copy.https.html
new file mode 100644
index 0000000000..70920dc0e6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_bgra8unorm_premultiplied_copy.https.html
@@ -0,0 +1,22 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_composite_alpha_bgra8unorm_premultiplied</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_composite_alpha_premultiplied-ref.html" />
+ <meta name=fuzzy content="maxDifference=0-2;totalPixels=0-400">
+ <style>
+ body { background-color: #F0E68C; }
+ #c-canvas { background-color: #8CF0E6; }
+ </style>
+ <canvas id="c-body" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="c-canvas" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <script type="module">
+ import { run } from './canvas_composite_alpha.html.js';
+ run('bgra8unorm', 'premultiplied', 'copy');
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_bgra8unorm_premultiplied_draw.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_bgra8unorm_premultiplied_draw.https.html
new file mode 100644
index 0000000000..d12751fac2
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_bgra8unorm_premultiplied_draw.https.html
@@ -0,0 +1,22 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_composite_alpha_bgra8unorm_premultiplied</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_composite_alpha_premultiplied-ref.html" />
+ <meta name=fuzzy content="maxDifference=0-2;totalPixels=0-400">
+ <style>
+ body { background-color: #F0E68C; }
+ #c-canvas { background-color: #8CF0E6; }
+ </style>
+ <canvas id="c-body" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="c-canvas" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <script type="module">
+ import { run } from './canvas_composite_alpha.html.js';
+ run('bgra8unorm', 'premultiplied', 'draw');
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba16float_opaque_copy.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba16float_opaque_copy.https.html
new file mode 100644
index 0000000000..4471f08480
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba16float_opaque_copy.https.html
@@ -0,0 +1,21 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_composite_alpha_rgba16float_opaque</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_composite_alpha_opaque-ref.html" />
+ <style>
+ body { background-color: #F0E68C; }
+ #c-canvas { background-color: #8CF0E6; }
+ </style>
+ <canvas id="c-body" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="c-canvas" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <script type="module">
+ import { run } from './canvas_composite_alpha.html.js';
+ run('rgba16float', 'opaque', 'copy');
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba16float_opaque_draw.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba16float_opaque_draw.https.html
new file mode 100644
index 0000000000..11f0e73ec2
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba16float_opaque_draw.https.html
@@ -0,0 +1,21 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_composite_alpha_rgba16float_opaque</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_composite_alpha_opaque-ref.html" />
+ <style>
+ body { background-color: #F0E68C; }
+ #c-canvas { background-color: #8CF0E6; }
+ </style>
+ <canvas id="c-body" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="c-canvas" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <script type="module">
+ import { run } from './canvas_composite_alpha.html.js';
+ run('rgba16float', 'opaque', 'draw');
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba16float_premultiplied_copy.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba16float_premultiplied_copy.https.html
new file mode 100644
index 0000000000..ed722013c1
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba16float_premultiplied_copy.https.html
@@ -0,0 +1,22 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_composite_alpha_rgba16float_premultiplied</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_composite_alpha_premultiplied-ref.html" />
+ <meta name=fuzzy content="maxDifference=0-2;totalPixels=0-400">
+ <style>
+ body { background-color: #F0E68C; }
+ #c-canvas { background-color: #8CF0E6; }
+ </style>
+ <canvas id="c-body" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="c-canvas" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <script type="module">
+ import { run } from './canvas_composite_alpha.html.js';
+ run('rgba16float', 'premultiplied', 'copy');
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba16float_premultiplied_draw.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba16float_premultiplied_draw.https.html
new file mode 100644
index 0000000000..8a028b168e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba16float_premultiplied_draw.https.html
@@ -0,0 +1,22 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_composite_alpha_rgba16float_premultiplied</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_composite_alpha_premultiplied-ref.html" />
+ <meta name=fuzzy content="maxDifference=0-2;totalPixels=0-400">
+ <style>
+ body { background-color: #F0E68C; }
+ #c-canvas { background-color: #8CF0E6; }
+ </style>
+ <canvas id="c-body" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="c-canvas" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <script type="module">
+ import { run } from './canvas_composite_alpha.html.js';
+ run('rgba16float', 'premultiplied', 'draw');
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba8unorm_opaque_copy.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba8unorm_opaque_copy.https.html
new file mode 100644
index 0000000000..7147631d19
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba8unorm_opaque_copy.https.html
@@ -0,0 +1,21 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_composite_alpha_rgba8unorm_opaque</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_composite_alpha_opaque-ref.html" />
+ <style>
+ body { background-color: #F0E68C; }
+ #c-canvas { background-color: #8CF0E6; }
+ </style>
+ <canvas id="c-body" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="c-canvas" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <script type="module">
+ import { run } from './canvas_composite_alpha.html.js';
+ run('rgba8unorm', 'opaque', 'copy');
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba8unorm_opaque_draw.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba8unorm_opaque_draw.https.html
new file mode 100644
index 0000000000..ec2bb05ed3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba8unorm_opaque_draw.https.html
@@ -0,0 +1,21 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_composite_alpha_rgba8unorm_opaque</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_composite_alpha_opaque-ref.html" />
+ <style>
+ body { background-color: #F0E68C; }
+ #c-canvas { background-color: #8CF0E6; }
+ </style>
+ <canvas id="c-body" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="c-canvas" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <script type="module">
+ import { run } from './canvas_composite_alpha.html.js';
+ run('rgba8unorm', 'opaque', 'draw');
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba8unorm_premultiplied_copy.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba8unorm_premultiplied_copy.https.html
new file mode 100644
index 0000000000..fa938aba41
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba8unorm_premultiplied_copy.https.html
@@ -0,0 +1,22 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_composite_alpha_rgba8unorm_premultiplied</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_composite_alpha_premultiplied-ref.html" />
+ <meta name=fuzzy content="maxDifference=0-2;totalPixels=0-400">
+ <style>
+ body { background-color: #F0E68C; }
+ #c-canvas { background-color: #8CF0E6; }
+ </style>
+ <canvas id="c-body" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="c-canvas" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <script type="module">
+ import { run } from './canvas_composite_alpha.html.js';
+ run('rgba8unorm', 'premultiplied', 'copy');
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba8unorm_premultiplied_draw.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba8unorm_premultiplied_draw.https.html
new file mode 100644
index 0000000000..b62e71054c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_composite_alpha_rgba8unorm_premultiplied_draw.https.html
@@ -0,0 +1,22 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_composite_alpha_rgba8unorm_premultiplied</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta
+ name="assert"
+ content="WebGPU canvas should have correct orientation, components, scaling, filtering, color space"
+ />
+ <link rel="match" href="./ref/canvas_composite_alpha_premultiplied-ref.html" />
+ <meta name=fuzzy content="maxDifference=0-2;totalPixels=0-400">
+ <style>
+ body { background-color: #F0E68C; }
+ #c-canvas { background-color: #8CF0E6; }
+ </style>
+ <canvas id="c-body" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="c-canvas" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <script type="module">
+ import { run } from './canvas_composite_alpha.html.js';
+ run('rgba8unorm', 'premultiplied', 'draw');
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_image_rendering.html.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_image_rendering.html.ts
new file mode 100644
index 0000000000..62b76a8add
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_image_rendering.html.ts
@@ -0,0 +1,79 @@
+import { runRefTest } from './gpu_ref_test.js';
+
+runRefTest(t => {
+ const device = t.device;
+ const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
+
+ const module = device.createShaderModule({
+ code: `
+ @vertex fn vs(
+ @builtin(vertex_index) VertexIndex : u32
+ ) -> @builtin(position) vec4<f32> {
+ var pos = array<vec2<f32>, 3>(
+ vec2(-1.0, 3.0),
+ vec2(-1.0,-1.0),
+ vec2( 3.0,-1.0)
+ );
+
+ return vec4(pos[VertexIndex], 0.0, 1.0);
+ }
+
+ @fragment fn fs(
+ @builtin(position) Pos : vec4<f32>
+ ) -> @location(0) vec4<f32> {
+ let black = vec4f(0, 0, 0, 1);
+ let white = vec4f(1, 1, 1, 1);
+ let iPos = vec4u(Pos);
+ let check = (iPos.x + iPos.y) & 1;
+ return mix(black, white, f32(check));
+ }
+ `,
+ });
+
+ const pipeline = device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vs',
+ },
+ fragment: {
+ module,
+ entryPoint: 'fs',
+ targets: [{ format: presentationFormat }],
+ },
+ });
+
+ function draw(selector: string, alphaMode: GPUCanvasAlphaMode) {
+ const canvas = document.querySelector(selector) as HTMLCanvasElement;
+ const context = canvas.getContext('webgpu') as GPUCanvasContext;
+ context.configure({
+ device,
+ format: presentationFormat,
+ alphaMode,
+ });
+
+ const encoder = device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: context.getCurrentTexture().createView(),
+ clearValue: [0.0, 0.0, 0.0, 0.0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.setPipeline(pipeline);
+ pass.draw(3);
+ pass.end();
+
+ device.queue.submit([encoder.finish()]);
+ }
+
+ draw('#elem1', 'premultiplied');
+ draw('#elem2', 'premultiplied');
+ draw('#elem3', 'premultiplied');
+ draw('#elem4', 'opaque');
+ draw('#elem5', 'opaque');
+ draw('#elem6', 'opaque');
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_image_rendering.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_image_rendering.https.html
new file mode 100644
index 0000000000..f51145645b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/canvas_image_rendering.https.html
@@ -0,0 +1,15 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_image_rendering</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta name="assert" content="WebGPU canvas with image-rendering set should be rendered correctly" />
+ <link rel="match" href="./ref/canvas_image_rendering-ref.html" />
+ <canvas id="elem1" width="64" height="64" style="width: 99px; height: 99px;"></canvas>
+ <canvas id="elem2" width="64" height="64" style="width: 99px; height: 99px; image-rendering: pixelated;"></canvas>
+ <canvas id="elem3" width="64" height="64" style="width: 99px; height: 99px; image-rendering: crisp-edges"></canvas>
+ <canvas id="elem4" width="64" height="64" style="width: 99px; height: 99px;"></canvas>
+ <canvas id="elem5" width="64" height="64" style="width: 99px; height: 99px; image-rendering: pixelated;"></canvas>
+ <canvas id="elem6" width="64" height="64" style="width: 99px; height: 99px; image-rendering: crisp-edges"></canvas>
+ <script type="module" src="canvas_image_rendering.html.js"></script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/create-pattern-data-url.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/create-pattern-data-url.ts
new file mode 100644
index 0000000000..aa96bbd85b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/create-pattern-data-url.ts
@@ -0,0 +1,23 @@
+// creates a 4x4 pattern
+export default function createPatternDataURL() {
+ const patternSize = 4;
+ const ctx = document.createElement('canvas').getContext('2d')!;
+ ctx.canvas.width = patternSize;
+ ctx.canvas.height = patternSize;
+
+ const b = [0, 0, 0, 255];
+ const t = [0, 0, 0, 0];
+ const r = [255, 0, 0, 255];
+ const g = [0, 255, 0, 255];
+
+ const imageData = new ImageData(patternSize, patternSize);
+ // prettier-ignore
+ imageData.data.set([
+ b, t, t, r,
+ t, b, g, t,
+ t, r, b, t,
+ g, t, t, b,
+ ].flat());
+ ctx.putImageData(imageData, 0, 0);
+ return { patternSize, imageData, dataURL: ctx.canvas.toDataURL() };
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/gpu_ref_test.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/gpu_ref_test.ts
new file mode 100644
index 0000000000..48161ac33e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/gpu_ref_test.ts
@@ -0,0 +1,26 @@
+import { assert } from '../../../common/util/util.js';
+import { takeScreenshotDelayed } from '../../../common/util/wpt_reftest_wait.js';
+
+interface GPURefTest {
+ readonly device: GPUDevice;
+ readonly queue: GPUQueue;
+}
+
+export function runRefTest(fn: (t: GPURefTest) => Promise<void> | void): void {
+ void (async () => {
+ assert(
+ typeof navigator !== 'undefined' && navigator.gpu !== undefined,
+ 'No WebGPU implementation found'
+ );
+
+ const adapter = await navigator.gpu.requestAdapter();
+ assert(adapter !== null);
+ const device = await adapter.requestDevice();
+ assert(device !== null);
+ const queue = device.queue;
+
+ await fn({ device, queue });
+
+ takeScreenshotDelayed(50);
+ })();
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_clear-ref.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_clear-ref.html
new file mode 100644
index 0000000000..e37b78c3a6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_clear-ref.html
@@ -0,0 +1,22 @@
+<!DOCTYPE html>
+<html>
+ <title>WebGPU canvas_clear (ref)</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <canvas id="cvs0" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs1" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs2" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <script>
+ function draw(canvas) {
+ var c = document.getElementById(canvas);
+ var ctx = c.getContext('2d');
+ ctx.fillStyle = '#66FF00';
+ ctx.fillRect(0, 0, c.width, c.height);
+ }
+
+ draw('cvs0');
+ draw('cvs1');
+ draw('cvs2');
+
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_colorspace-ref.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_colorspace-ref.html
new file mode 100644
index 0000000000..a6da9f6748
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_colorspace-ref.html
@@ -0,0 +1,17 @@
+<!DOCTYPE html>
+<html>
+ <title>WebGPU canvas_colorspace (ref)</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <style>
+ canvas {
+ width: 128px;
+ height: 128px;
+ margin-right: 5px;
+ image-rendering: pixelated;
+ image-rendering: crisp-edges;
+ }
+ </style>
+ <body></body>
+ <script type="module" src="canvas_colorspace-ref.html.js"></script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_colorspace-ref.html.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_colorspace-ref.html.ts
new file mode 100644
index 0000000000..aa8583311e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_colorspace-ref.html.ts
@@ -0,0 +1,41 @@
+import { kUnitCaseParamsBuilder } from '../../../../common/framework/params_builder.js';
+import { kCanvasAlphaModes, kCanvasColorSpaces } from '../../../capability_info.js';
+
+// prettier-ignore
+const kRGBAData = new Uint8Array([
+ 0, 255, 0, 255,
+ 117, 251, 7, 255,
+ 170, 35, 209, 255,
+ 80, 150, 200, 255,
+]);
+const width = kRGBAData.length / 4;
+
+function createCanvas(colorSpace: PredefinedColorSpace) {
+ const canvas = document.createElement('canvas');
+ canvas.width = width;
+ canvas.height = 1;
+ const context = canvas.getContext('2d', {
+ colorSpace,
+ }) as CanvasRenderingContext2D;
+
+ const imgData = context.getImageData(0, 0, width, 1);
+ imgData.data.set(kRGBAData);
+ context.putImageData(imgData, 0, 0);
+
+ document.body.appendChild(canvas);
+}
+
+const u = kUnitCaseParamsBuilder
+ .combine('alphaMode', kCanvasAlphaModes)
+ .combine('colorSpace', kCanvasColorSpaces)
+ .combine('creation', [
+ 'canvas',
+ 'transferControlToOffscreen',
+ 'transferControlToOffscreenWorker',
+ ]);
+
+// Generate reference canvases for all combinations from the test.
+// We only need colorSpace to generate the correct reference.
+for (const { colorSpace } of u) {
+ createCanvas(colorSpace);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_complex-ref.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_complex-ref.html
new file mode 100644
index 0000000000..b1d46c108a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_complex-ref.html
@@ -0,0 +1,26 @@
+<!DOCTYPE html>
+<html>
+ <title>WebGPU canvas_complex (ref)</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <canvas id="cvs0" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs1" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="cvs2" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <script>
+ function draw(ctx) {
+ ctx.fillStyle = '#660000';
+ ctx.fillRect(0, 0, 10, 10);
+ ctx.fillStyle = '#006600';
+ ctx.fillRect(10, 0, 10, 10);
+ ctx.fillStyle = '#000066';
+ ctx.fillRect(0, 10, 10, 10);
+ ctx.fillStyle = '#666600';
+ ctx.fillRect(10, 10, 10, 10);
+ }
+
+ draw(cvs0.getContext('2d'));
+ draw(cvs1.getContext('2d'));
+ draw(cvs2.getContext('2d'));
+
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_composite_alpha_opaque-ref.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_composite_alpha_opaque-ref.html
new file mode 100644
index 0000000000..94b9486514
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_composite_alpha_opaque-ref.html
@@ -0,0 +1,26 @@
+<!DOCTYPE html>
+<html>
+ <title>WebGPU canvas_composite_alpha_premultiplied (ref)</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <style>
+ body { background-color: #F0E68C; }
+ #c-canvas { background-color: #8CF0E6; }
+ </style>
+ <canvas id="c-body" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="c-canvas" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <script>
+ document.querySelectorAll('canvas').forEach(canvas => {
+ const ctx = canvas.getContext('2d');
+ ctx.globalAlpha = 1.0;
+ ctx.fillStyle = '#660000';
+ ctx.fillRect(0, 0, 15, 15);
+ ctx.fillStyle = '#006600';
+ ctx.fillRect(5, 0, 15, 15);
+ ctx.fillStyle = '#000066';
+ ctx.fillRect(0, 5, 15, 20);
+ ctx.fillStyle = '#666600';
+ ctx.fillRect(5, 5, 20, 20);
+ });
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_composite_alpha_premultiplied-ref.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_composite_alpha_premultiplied-ref.html
new file mode 100644
index 0000000000..635625ecc7
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_composite_alpha_premultiplied-ref.html
@@ -0,0 +1,26 @@
+<!DOCTYPE html>
+<html>
+ <title>WebGPU canvas_composite_alpha_premultiplied (ref)</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <style>
+ body { background-color: #F0E68C; }
+ #c-canvas { background-color: #8CF0E6; }
+ </style>
+ <canvas id="c-body" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <canvas id="c-canvas" width="20" height="20" style="width: 20px; height: 20px;"></canvas>
+ <script>
+ document.querySelectorAll('canvas').forEach(canvas => {
+ const ctx = canvas.getContext('2d');
+ ctx.globalAlpha = 0.5;
+ ctx.fillStyle = '#660000';
+ ctx.fillRect(0, 0, 15, 15);
+ ctx.fillStyle = '#006600';
+ ctx.fillRect(5, 0, 15, 15);
+ ctx.fillStyle = '#000066';
+ ctx.fillRect(0, 5, 15, 20);
+ ctx.fillStyle = '#666600';
+ ctx.fillRect(5, 5, 20, 20);
+ });
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_image_rendering-ref.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_image_rendering-ref.html
new file mode 100644
index 0000000000..f9eca704e8
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/canvas_image_rendering-ref.html
@@ -0,0 +1,25 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU canvas_image_rendering (ref)</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <img id="elem1" width="64" height="64" style="width: 99px; height: 99px;">
+ <img id="elem2" width="64" height="64" style="width: 99px; height: 99px; image-rendering: pixelated;">
+ <img id="elem3" width="64" height="64" style="width: 99px; height: 99px; image-rendering: crisp-edges">
+ <img id="elem4" width="64" height="64" style="width: 99px; height: 99px;">
+ <img id="elem5" width="64" height="64" style="width: 99px; height: 99px; image-rendering: pixelated;">
+ <img id="elem6" width="64" height="64" style="width: 99px; height: 99px; image-rendering: crisp-edges">
+ <script type="module">
+ import { takeScreenshotDelayed } from '../../../../common/util/wpt_reftest_wait.js';
+
+ (async () => {
+ const dataURL = 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAYAAACqaXHeAAAAAXNSR0IArs4c6QAAAKdJREFUeF7t28kJwDAQA0Cp/6I3hJAqNK/kazDr2atJ7u7S9v3Z+76nnz18m7oBboAYIAZMRv//1fMKcAAHkCAJLuYAXoEv8ZMLcAAHcAAHcAAHjBZEOeBSDuAADuAADuAADtjrCqsIqQh98xAkSIIkSIIkSIIkSIKrYzJ6gyRIgiRIgiRIgiRIgiRoZ2hwYcp8gKqwSVF9AXuD9gbtDdobXGWw7nCbB5+MQQlHipKKAAAAAElFTkSuQmCC';
+ await Promise.all([...document.querySelectorAll('img')].map(img => {
+ img.src = dataURL;
+ return img.decode();
+ }));
+
+ takeScreenshotDelayed(50);
+ })();
+ </script>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/resize_observer-ref.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/resize_observer-ref.html
new file mode 100644
index 0000000000..5259a25c27
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/ref/resize_observer-ref.html
@@ -0,0 +1,90 @@
+<!DOCTYPE html>
+<html>
+ <title>WebGPU ResizeObserver test (ref)</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <style>
+ .outer {
+ display: flex;
+ align-items: center;
+ flex-direction: column;
+ }
+ .outer>* {
+ display: block;
+ height: 100px;
+ }
+ </style>
+ <body>
+ <div id="dpr"></div>
+ <div class="outer"></div>
+ <script type="module">
+ import { takeScreenshotDelayed } from '../../../../common/util/wpt_reftest_wait.js';
+ import createPatternDataURL from '../create-pattern-data-url.js';
+
+ (async () => {
+ const {patternSize, dataURL} = createPatternDataURL();
+
+ document.querySelector('#dpr').textContent = `dpr: ${devicePixelRatio}`;
+
+ /**
+ * Set the pattern's size on this element so that it draws where
+ * 1 pixel in the pattern maps to 1 devicePixel.
+ */
+ function setPattern(elem) {
+ const oneDevicePixel = 1 / devicePixelRatio;
+ const patternPixels = oneDevicePixel * patternSize;
+ elem.style.backgroundImage = `url("${dataURL}")`;
+ elem.style.backgroundSize = `${patternPixels}px ${patternPixels}px`;
+ }
+
+ /*
+ This ref creates elements like this
+ <body>
+ <div class="outer">
+ <div></div>
+ <div></div>
+ <div></div>
+ ...
+ </div>
+ </body>
+ Where the outer div is a flexbox centering the child elements.
+ Each of the child elements is set to a different width in percent.
+ The devicePixelContentBox size of each child element is observed
+ with a ResizeObserver and when changed, a pattern is applied to
+ the element and the pattern's size set so each pixel in the pattern
+ will be one device pixel.
+ A similar process happens in the test HTML using canvases
+ and patterns generated using putImageData.
+ The test and this reference page should then match.
+ */
+
+ const outerElem = document.querySelector('.outer');
+
+ let resolve;
+ const promise = new Promise(_resolve => (resolve = _resolve));
+
+ /**
+ * Set the pattern's size on this element so that it draws where
+ * 1 pixel in the pattern maps to 1 devicePixel.
+ */
+ function setPatterns(entries) {
+ for (const entry of entries) {
+ setPattern(entry.target)
+ }
+ resolve();
+ }
+
+ const observer = new ResizeObserver(setPatterns);
+ for (let percentSize = 7; percentSize < 100; percentSize += 13) {
+ const innerElem = document.createElement('div');
+ innerElem.style.width = `${percentSize}%`;
+ observer.observe(innerElem, {box:"device-pixel-content-box"});
+ outerElem.appendChild(innerElem);
+ }
+
+ await promise;
+ takeScreenshotDelayed(50);
+ })();
+ </script>
+ </body>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/resize_observer.html.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/resize_observer.html.ts
new file mode 100644
index 0000000000..9cb9905a77
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/resize_observer.html.ts
@@ -0,0 +1,150 @@
+import createPatternDataURL from './create-pattern-data-url.js';
+import { runRefTest } from './gpu_ref_test.js';
+
+runRefTest(async t => {
+ const { patternSize, imageData: patternImageData } = createPatternDataURL();
+
+ document.querySelector('#dpr')!.textContent = `dpr: ${devicePixelRatio}`;
+
+ const device = t.device;
+ const presentationFormat = navigator.gpu.getPreferredCanvasFormat();
+
+ const module = device.createShaderModule({
+ code: `
+ @vertex fn vs(
+ @builtin(vertex_index) VertexIndex : u32
+ ) -> @builtin(position) vec4<f32> {
+ var pos = array<vec2<f32>, 3>(
+ vec2(-1.0, 3.0),
+ vec2(-1.0,-1.0),
+ vec2( 3.0,-1.0)
+ );
+
+ return vec4(pos[VertexIndex], 0.0, 1.0);
+ }
+
+ @group(0) @binding(0) var pattern: texture_2d<f32>;
+
+ @fragment fn fs(
+ @builtin(position) Pos : vec4<f32>
+ ) -> @location(0) vec4<f32> {
+ let patternSize = textureDimensions(pattern, 0);
+ let uPos = vec2u(Pos.xy) % patternSize;
+ return textureLoad(pattern, uPos, 0);
+ }
+ `,
+ });
+
+ const pipeline = device.createRenderPipeline({
+ layout: 'auto',
+ vertex: {
+ module,
+ entryPoint: 'vs',
+ },
+ fragment: {
+ module,
+ entryPoint: 'fs',
+ targets: [{ format: presentationFormat }],
+ },
+ });
+
+ const tex = device.createTexture({
+ size: [patternSize, patternSize, 1],
+ format: 'rgba8unorm',
+ usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST,
+ });
+ device.queue.writeTexture(
+ { texture: tex },
+ patternImageData.data,
+ { bytesPerRow: patternSize * 4, rowsPerImage: 4 },
+ { width: patternSize, height: patternSize }
+ );
+
+ const bindGroup = device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: tex.createView() }],
+ });
+
+ function setCanvasPattern(
+ canvas: HTMLCanvasElement,
+ devicePixelWidth: number,
+ devicePixelHeight: number
+ ) {
+ canvas.width = devicePixelWidth;
+ canvas.height = devicePixelHeight;
+
+ const context = canvas.getContext('webgpu') as GPUCanvasContext;
+ context.configure({
+ device,
+ format: presentationFormat,
+ alphaMode: 'premultiplied',
+ });
+
+ const encoder = device.createCommandEncoder();
+ const pass = encoder.beginRenderPass({
+ colorAttachments: [
+ {
+ view: context.getCurrentTexture().createView(),
+ clearValue: [0.0, 0.0, 0.0, 0.0],
+ loadOp: 'clear',
+ storeOp: 'store',
+ },
+ ],
+ });
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.draw(3);
+ pass.end();
+
+ device.queue.submit([encoder.finish()]);
+ }
+
+ /*
+ This test creates elements like this
+ <body>
+ <div class="outer">
+ <canvas></canvas>
+ <canvas></canvas>
+ <canvas></canvas>
+ ...
+ </div>
+ </body>
+ Where the outer div is a flexbox centering the child canvases.
+ Each of the child canvases is set to a different width in percent.
+ The size of each canvas in device pixels is queried with ResizeObserver
+ and then each canvases' resolution is set to that size so that there should
+ be one pixel in each canvas for each device pixel.
+ Each canvas is filled with a pattern using putImageData.
+ In the reference the canvas elements are replaced with divs.
+ For the divs the same pattern is applied with CSS and its size
+ adjusted so the pattern should appear with one pixel in the pattern
+ corresponding to 1 device pixel.
+ The reference and this page should then match.
+ */
+
+ const outerElem = document.querySelector('.outer')!;
+
+ let resolve: (value: unknown) => void;
+ const promise = new Promise(_resolve => (resolve = _resolve));
+
+ function setPatternsUsingSizeInfo(entries: ResizeObserverEntry[]) {
+ for (const entry of entries) {
+ setCanvasPattern(
+ entry.target as HTMLCanvasElement,
+ entry.devicePixelContentBoxSize[0].inlineSize,
+ entry.devicePixelContentBoxSize[0].blockSize
+ );
+ }
+ resolve(true);
+ }
+
+ const observer = new ResizeObserver(setPatternsUsingSizeInfo);
+ for (let percentSize = 7; percentSize < 100; percentSize += 13) {
+ const canvasElem = document.createElement('canvas');
+ canvasElem.style.width = `${percentSize}%`;
+ observer.observe(canvasElem, { box: 'device-pixel-content-box' });
+ outerElem.appendChild(canvasElem);
+ }
+
+ await promise;
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/resize_observer.https.html b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/resize_observer.https.html
new file mode 100644
index 0000000000..2845cc29eb
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/reftests/resize_observer.https.html
@@ -0,0 +1,24 @@
+<!DOCTYPE html>
+<html class="reftest-wait">
+ <title>WebGPU resize_observer</title>
+ <meta charset="utf-8" />
+ <link rel="help" href="https://gpuweb.github.io/gpuweb/" />
+ <meta name="assert" content="WebGPU canvases should return the correct ResizeObserver values" />
+ <link rel="match" href="./ref/resize_observer-ref.html" />
+ <style>
+ .outer {
+ display: flex;
+ align-items: center;
+ flex-direction: column;
+ }
+ .outer>* {
+ display: block;
+ height: 100px;
+ }
+ </style>
+ <body>
+ <div id="dpr"></div>
+ <div class="outer"></div>
+ <script type="module" src="resize_observer.html.js"></script>
+ </body>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/util.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/util.ts
new file mode 100644
index 0000000000..84ac6b31d1
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/util.ts
@@ -0,0 +1,307 @@
+import { Fixture, SkipTestCase } from '../../common/framework/fixture.js';
+import { getResourcePath } from '../../common/framework/resources.js';
+import { makeTable } from '../../common/util/data_tables.js';
+import { timeout } from '../../common/util/timeout.js';
+import { ErrorWithExtra, raceWithRejectOnTimeout } from '../../common/util/util.js';
+import { GPUTest } from '../gpu_test.js';
+
+declare global {
+ interface HTMLMediaElement {
+ // Add captureStream() support for HTMLMediaElement from
+ // https://w3c.github.io/mediacapture-fromelement/#dom-htmlmediaelement-capturestream
+ captureStream(): MediaStream;
+ }
+}
+
+export const kVideoInfo =
+ /* prettier-ignore */ makeTable(
+ ['mimeType' ] as const,
+ [undefined ] as const, {
+ // All video names
+ 'four-colors-vp8-bt601.webm': ['video/webm; codecs=vp8' ],
+ 'four-colors-theora-bt601.ogv': ['video/ogg; codecs=theora' ],
+ 'four-colors-h264-bt601.mp4': ['video/mp4; codecs=avc1.4d400c'],
+ 'four-colors-vp9-bt601.webm': ['video/webm; codecs=vp9' ],
+ 'four-colors-vp9-bt709.webm': ['video/webm; codecs=vp9' ],
+ 'four-colors-vp9-bt2020.webm': ['video/webm; codecs=vp9' ],
+ 'four-colors-h264-bt601-rotate-90.mp4': ['video/mp4; codecs=avc1.4d400c'],
+ 'four-colors-h264-bt601-rotate-180.mp4': ['video/mp4; codecs=avc1.4d400c'],
+ 'four-colors-h264-bt601-rotate-270.mp4': ['video/mp4; codecs=avc1.4d400c'],
+ } as const);
+export type VideoName = keyof typeof kVideoInfo;
+
+// Expectation values about converting video contents to sRGB color space.
+// Source video color space affects expected values.
+// The process to calculate these expected pixel values can be found:
+// https://github.com/gpuweb/cts/pull/2242#issuecomment-1430382811
+// and https://github.com/gpuweb/cts/pull/2242#issuecomment-1463273434
+const kBt601PixelValue = {
+ red: new Float32Array([0.972945567233341, 0.141794376683341, -0.0209589916711088, 1.0]),
+ green: new Float32Array([0.248234279433399, 0.984810378661784, -0.0564701319494314, 1.0]),
+ blue: new Float32Array([0.10159735826538, 0.135451122863674, 1.00262982899724, 1.0]),
+ yellow: new Float32Array([0.995470750775951, 0.992742114518355, -0.0774291236205402, 1.0]),
+};
+
+function convertToUnorm8(expectation: Float32Array): Uint8Array {
+ const unorm8 = new Uint8ClampedArray(expectation.length);
+
+ for (let i = 0; i < expectation.length; ++i) {
+ unorm8[i] = Math.round(expectation[i] * 255.0);
+ }
+
+ return new Uint8Array(unorm8.buffer);
+}
+
+// kVideoExpectations uses unorm8 results
+const kBt601Red = convertToUnorm8(kBt601PixelValue.red);
+const kBt601Green = convertToUnorm8(kBt601PixelValue.green);
+const kBt601Blue = convertToUnorm8(kBt601PixelValue.blue);
+const kBt601Yellow = convertToUnorm8(kBt601PixelValue.yellow);
+
+export const kVideoExpectations = [
+ {
+ videoName: 'four-colors-vp8-bt601.webm',
+ _redExpectation: kBt601Red,
+ _greenExpectation: kBt601Green,
+ _blueExpectation: kBt601Blue,
+ _yellowExpectation: kBt601Yellow,
+ },
+ {
+ videoName: 'four-colors-theora-bt601.ogv',
+ _redExpectation: kBt601Red,
+ _greenExpectation: kBt601Green,
+ _blueExpectation: kBt601Blue,
+ _yellowExpectation: kBt601Yellow,
+ },
+ {
+ videoName: 'four-colors-h264-bt601.mp4',
+ _redExpectation: kBt601Red,
+ _greenExpectation: kBt601Green,
+ _blueExpectation: kBt601Blue,
+ _yellowExpectation: kBt601Yellow,
+ },
+ {
+ videoName: 'four-colors-vp9-bt601.webm',
+ _redExpectation: kBt601Red,
+ _greenExpectation: kBt601Green,
+ _blueExpectation: kBt601Blue,
+ _yellowExpectation: kBt601Yellow,
+ },
+ {
+ videoName: 'four-colors-vp9-bt709.webm',
+ _redExpectation: new Uint8Array([255, 0, 0, 255]),
+ _greenExpectation: new Uint8Array([0, 255, 0, 255]),
+ _blueExpectation: new Uint8Array([0, 0, 255, 255]),
+ _yellowExpectation: new Uint8Array([255, 255, 0, 255]),
+ },
+] as const;
+
+export const kVideoRotationExpectations = [
+ {
+ videoName: 'four-colors-h264-bt601-rotate-90.mp4',
+ _topLeftExpectation: kBt601Red,
+ _topRightExpectation: kBt601Green,
+ _bottomLeftExpectation: kBt601Yellow,
+ _bottomRightExpectation: kBt601Blue,
+ },
+ {
+ videoName: 'four-colors-h264-bt601-rotate-180.mp4',
+ _topLeftExpectation: kBt601Green,
+ _topRightExpectation: kBt601Blue,
+ _bottomLeftExpectation: kBt601Red,
+ _bottomRightExpectation: kBt601Yellow,
+ },
+ {
+ videoName: 'four-colors-h264-bt601-rotate-270.mp4',
+ _topLeftExpectation: kBt601Blue,
+ _topRightExpectation: kBt601Yellow,
+ _bottomLeftExpectation: kBt601Green,
+ _bottomRightExpectation: kBt601Red,
+ },
+] as const;
+
+/**
+ * Starts playing a video and waits for it to be consumable.
+ * Returns a promise which resolves after `callback` (which may be async) completes.
+ *
+ * @param video An HTML5 Video element.
+ * @param callback Function to call when video is ready.
+ *
+ * Adapted from https://github.com/KhronosGroup/WebGL/blob/main/sdk/tests/js/webgl-test-utils.js
+ */
+export function startPlayingAndWaitForVideo(
+ video: HTMLVideoElement,
+ callback: () => unknown | Promise<unknown>
+): Promise<void> {
+ return raceWithRejectOnTimeout(
+ new Promise((resolve, reject) => {
+ const callbackAndResolve = () =>
+ void (async () => {
+ try {
+ await callback();
+ resolve();
+ } catch (ex) {
+ reject(ex);
+ }
+ })();
+ if (video.error) {
+ reject(
+ new ErrorWithExtra('Video.error: ' + video.error.message, () => ({ error: video.error }))
+ );
+ return;
+ }
+
+ video.addEventListener(
+ 'error',
+ event => reject(new ErrorWithExtra('Video received "error" event', () => ({ event }))),
+ true
+ );
+
+ if (video.requestVideoFrameCallback) {
+ video.requestVideoFrameCallback(() => {
+ callbackAndResolve();
+ });
+ } else {
+ // If requestVideoFrameCallback isn't available, check each frame if the video has advanced.
+ const timeWatcher = () => {
+ if (video.currentTime > 0) {
+ callbackAndResolve();
+ } else {
+ requestAnimationFrame(timeWatcher);
+ }
+ };
+ timeWatcher();
+ }
+
+ video.loop = true;
+ video.muted = true;
+ video.preload = 'auto';
+ video.play().catch(reject);
+ }),
+ 2000,
+ 'Video never became ready'
+ );
+}
+
+/**
+ * Fire a `callback` when the script animation reaches a new frame.
+ * Returns a promise which resolves after `callback` (which may be async) completes.
+ */
+export function waitForNextTask(callback: () => unknown | Promise<unknown>): Promise<void> {
+ const { promise, callbackAndResolve } = callbackHelper(callback, 'wait for next task timed out');
+ timeout(() => {
+ callbackAndResolve();
+ }, 0);
+
+ return promise;
+}
+
+/**
+ * Fire a `callback` when the video reaches a new frame.
+ * Returns a promise which resolves after `callback` (which may be async) completes.
+ *
+ * MAINTENANCE_TODO: Find a way to implement this for browsers without requestVideoFrameCallback as
+ * well, similar to the timeWatcher path in startPlayingAndWaitForVideo. If that path is proven to
+ * work well, we can consider getting rid of the requestVideoFrameCallback path.
+ */
+export function waitForNextFrame(
+ video: HTMLVideoElement,
+ callback: () => unknown | Promise<unknown>
+): Promise<void> {
+ const { promise, callbackAndResolve } = callbackHelper(callback, 'waitForNextFrame timed out');
+
+ if ('requestVideoFrameCallback' in video) {
+ video.requestVideoFrameCallback(() => {
+ callbackAndResolve();
+ });
+ } else {
+ throw new SkipTestCase('waitForNextFrame currently requires requestVideoFrameCallback');
+ }
+
+ return promise;
+}
+
+export async function getVideoFrameFromVideoElement(
+ test: Fixture,
+ video: HTMLVideoElement
+): Promise<VideoFrame> {
+ if (video.captureStream === undefined) {
+ test.skip('HTMLVideoElement.captureStream is not supported');
+ }
+
+ return raceWithRejectOnTimeout(
+ new Promise<VideoFrame>(resolve => {
+ const videoTrack: MediaStreamVideoTrack = video.captureStream().getVideoTracks()[0];
+ const trackProcessor: MediaStreamTrackProcessor<VideoFrame> = new MediaStreamTrackProcessor({
+ track: videoTrack,
+ });
+ const transformer: TransformStream = new TransformStream({
+ transform(videoFrame, _controller) {
+ videoTrack.stop();
+ resolve(videoFrame);
+ },
+ flush(controller) {
+ controller.terminate();
+ },
+ });
+ const trackGenerator: MediaStreamTrackGenerator<VideoFrame> = new MediaStreamTrackGenerator({
+ kind: 'video',
+ });
+ trackProcessor.readable
+ .pipeThrough(transformer)
+ .pipeTo(trackGenerator.writable)
+ .catch(() => {});
+ }),
+ 2000,
+ 'Video never became ready'
+ );
+}
+
+/**
+ * Create HTMLVideoElement based on VideoName. Check whether video is playable in current
+ * browser environment.
+ * Returns a HTMLVideoElement.
+ *
+ * @param t: GPUTest that requires getting HTMLVideoElement
+ * @param videoName: Required video name
+ *
+ */
+export function getVideoElement(t: GPUTest, videoName: VideoName): HTMLVideoElement {
+ const videoElement = document.createElement('video');
+ const videoInfo = kVideoInfo[videoName];
+
+ if (videoElement.canPlayType(videoInfo.mimeType) === '') {
+ t.skip('Video codec is not supported');
+ }
+
+ const videoUrl = getResourcePath(videoName);
+ videoElement.src = videoUrl;
+
+ return videoElement;
+}
+
+/**
+ * Helper for doing something inside of a (possibly async) callback (directly, not in a following
+ * microtask), and returning a promise when the callback is done.
+ * MAINTENANCE_TODO: Use this in startPlayingAndWaitForVideo (and make sure it works).
+ */
+function callbackHelper(
+ callback: () => unknown | Promise<unknown>,
+ timeoutMessage: string
+): { promise: Promise<void>; callbackAndResolve: () => void } {
+ let callbackAndResolve: () => void;
+
+ const promiseWithoutTimeout = new Promise<void>((resolve, reject) => {
+ callbackAndResolve = () =>
+ void (async () => {
+ try {
+ await callback(); // catches both exceptions and rejections
+ resolve();
+ } catch (ex) {
+ reject(ex);
+ }
+ })();
+ });
+ const promise = raceWithRejectOnTimeout(promiseWithoutTimeout, 2000, timeoutMessage);
+ return { promise, callbackAndResolve: callbackAndResolve! };
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/worker/worker.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/worker/worker.spec.ts
new file mode 100644
index 0000000000..67f9f693be
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/worker/worker.spec.ts
@@ -0,0 +1,35 @@
+export const description = `
+Tests WebGPU is available in a worker.
+
+Note: The CTS test can be run in a worker by passing in worker=1 as
+a query parameter. This test is specifically to check that WebGPU
+is available in a worker.
+`;
+
+import { Fixture } from '../../../common/framework/fixture.js';
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { assert } from '../../../common/util/util.js';
+
+export const g = makeTestGroup(Fixture);
+
+function isNode(): boolean {
+ return typeof process !== 'undefined' && process?.versions?.node !== undefined;
+}
+
+g.test('worker')
+ .desc(`test WebGPU is available in DedicatedWorkers and check for basic functionality`)
+ .fn(async t => {
+ if (isNode()) {
+ t.skip('node does not support 100% compatible workers');
+ return;
+ }
+ // Note: we load worker_launcher dynamically because ts-node support
+ // is using commonjs which doesn't support import.meta. Further,
+ // we need to put the url in a string add pass the string to import
+ // otherwise typescript tries to parse the file which again, fails.
+ // worker_launcher.js is excluded in node.tsconfig.json.
+ const url = './worker_launcher.js';
+ const { launchWorker } = await import(url);
+ const result = await launchWorker();
+ assert(result.error === undefined, `should be no error from worker but was: ${result.error}`);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/worker/worker.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/worker/worker.ts
new file mode 100644
index 0000000000..a3cf8064e2
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/worker/worker.ts
@@ -0,0 +1,83 @@
+import { getGPU, setDefaultRequestAdapterOptions } from '../../../common/util/navigator_gpu.js';
+import { assert, objectEquals, iterRange } from '../../../common/util/util.js';
+
+async function basicTest() {
+ const adapter = await getGPU(null).requestAdapter();
+ assert(adapter !== null, 'Failed to get adapter.');
+
+ const device = await adapter.requestDevice();
+ assert(device !== null, 'Failed to get device.');
+
+ const kOffset = 1230000;
+ const pipeline = device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: device.createShaderModule({
+ code: `
+ struct Buffer { data: array<u32>, };
+
+ @group(0) @binding(0) var<storage, read_write> buffer: Buffer;
+ @compute @workgroup_size(1u) fn main(
+ @builtin(global_invocation_id) id: vec3<u32>) {
+ buffer.data[id.x] = id.x + ${kOffset}u;
+ }
+ `,
+ }),
+ entryPoint: 'main',
+ },
+ });
+
+ const kNumElements = 64;
+ const kBufferSize = kNumElements * 4;
+ const buffer = device.createBuffer({
+ size: kBufferSize,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+
+ const resultBuffer = device.createBuffer({
+ size: kBufferSize,
+ usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
+ });
+
+ const bindGroup = device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [{ binding: 0, resource: { buffer } }],
+ });
+
+ const encoder = device.createCommandEncoder();
+
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(kNumElements);
+ pass.end();
+
+ encoder.copyBufferToBuffer(buffer, 0, resultBuffer, 0, kBufferSize);
+
+ device.queue.submit([encoder.finish()]);
+
+ const expected = new Uint32Array([...iterRange(kNumElements, x => x + kOffset)]);
+
+ await resultBuffer.mapAsync(GPUMapMode.READ);
+ const actual = new Uint32Array(resultBuffer.getMappedRange());
+
+ assert(objectEquals(actual, expected), 'compute pipeline ran');
+
+ resultBuffer.destroy();
+ buffer.destroy();
+ device.destroy();
+}
+
+self.onmessage = async (ev: MessageEvent) => {
+ const defaultRequestAdapterOptions: GPURequestAdapterOptions =
+ ev.data.defaultRequestAdapterOptions;
+ setDefaultRequestAdapterOptions(defaultRequestAdapterOptions);
+
+ let error = undefined;
+ try {
+ await basicTest();
+ } catch (err: unknown) {
+ error = (err as Error).toString();
+ }
+ self.postMessage({ error });
+};
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/worker/worker_launcher.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/worker/worker_launcher.ts
new file mode 100644
index 0000000000..72059eb99f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/web_platform/worker/worker_launcher.ts
@@ -0,0 +1,18 @@
+import { getDefaultRequestAdapterOptions } from '../../../common/util/navigator_gpu.js';
+
+export type TestResult = {
+ error: String | undefined;
+};
+
+export async function launchWorker() {
+ const selfPath = import.meta.url;
+ const selfPathDir = selfPath.substring(0, selfPath.lastIndexOf('/'));
+ const workerPath = selfPathDir + '/worker.js';
+ const worker = new Worker(workerPath, { type: 'module' });
+
+ const promise = new Promise<TestResult>(resolve => {
+ worker.addEventListener('message', ev => resolve(ev.data as TestResult), { once: true });
+ });
+ worker.postMessage({ defaultRequestAdapterOptions: getDefaultRequestAdapterOptions() });
+ return await promise;
+}
diff --git a/dom/webgpu/tests/cts/checkout/standalone/index.html b/dom/webgpu/tests/cts/checkout/standalone/index.html
new file mode 100644
index 0000000000..85ce1a9e6e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/standalone/index.html
@@ -0,0 +1,453 @@
+<html>
+ <head>
+ <meta charset="UTF-8">
+ <title>WebGPU CTS</title>
+ <link
+ id="favicon"
+ rel="shortcut icon"
+ type="image/png"
+ href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQAgMAAABinRfyAAAACVBMVEUAAAAAAAD///+D3c/SAAAAAXRSTlMAQObYZgAAAEpJREFUCB0FwbERgDAMA0BdSkbJQBSuaPABE0WuaKILmpJ/rNVejPKBUXGhqAC5J0gn9ESg2wvdNua8hUoKJQo8b6HyE6a2QHdbP0CPITh2pewWAAAAAElFTkSuQmCC"
+ />
+ <link rel="preconnect" href="https://fonts.googleapis.com">
+ <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
+ <link href="https://fonts.googleapis.com/css2?family=Poppins&display=swap" rel="stylesheet">
+ <meta name="viewport" content="width=device-width" />
+ <!-- Chrome Origin Trial token for https://gpuweb.github.io (see dev_server.ts for localhost tokens) -->
+ <meta http-equiv="origin-trial" content="AmV1vLgjOQ01SlGnVhpoKXy7gLW+K/plXHwHKnYn4S4US98WaSesKBI+XSUMo95unQARyMGDvW70KsfyeYblZQ0AAABQeyJvcmlnaW4iOiJodHRwczovL2dwdXdlYi5naXRodWIuaW86NDQzIiwiZmVhdHVyZSI6IldlYkdQVSIsImV4cGlyeSI6MTY2MzcxODM5OX0=">
+ <link rel="stylesheet" href="third_party/normalize.min.css" />
+ <script src="third_party/jquery/jquery-3.3.1.min.js"></script>
+ <style>
+ :root {
+ color-scheme: light dark;
+
+ --fg-color: #000;
+ --bg-color: #fff;
+ --border-color: #888;
+ --emphasis-fg-color: #F00;
+
+ --results-fg-color: gray;
+ --node-description-fg-color: #gray;
+ --node-hover-bg-color: rgba(0, 0, 0, 0.1);
+
+ --button-bg-color: #eee;
+ --button-hover-bg-color: #ccc;
+ --button-image-filter: none;
+
+ --subtree-border-color: #ddd;
+ --subtree-hover-left-border-color: #000;
+ --multicase-border-color: #55f;
+ --testcase-border-color: #bbf;
+ --testcase-bg-color: #bbb;
+
+ --testcase-data-status-fail-bg-color: #fdd;
+ --testcase-data-status-warn-bg-color: #ffb;
+ --testcase-data-status-pass-bg-color: #cfc;
+ --testcase-data-status-skip-bg-color: #aaf;
+
+ --testcase-logs-bg-color: #white;
+ --testcase-log-odd-bg-color: #fff;
+ --testcase-log-even-bg-color: #f8f8f8;
+ --testcase-log-text-fg-color: #666;
+ --testcase-log-text-first-line-fg-color: #000;
+ }
+ @media (prefers-color-scheme: dark) {
+ :root {
+ --fg-color: #fff;
+ --bg-color: #000;
+ --border-color: #888;
+ --emphasis-fg-color: #F44;
+
+ --results-fg-color: #aaa;
+ --node-description-fg-color: #aaa;
+ --node-hover-bg-color: rgba(255, 255, 255, 0.1);
+
+ --button-image-filter: invert(100%);
+ --button-bg-color: #666;
+ --button-hover-bg-color: #888;
+
+ --subtree-border-color: #444;
+ --subtree-hover-left-border-color: #FFF;
+ --multicase-border-color: #338;
+ --testcase-border-color: #55a;
+ --testcase-bg-color: #888;
+
+ --testcase-data-status-fail-bg-color: #400;
+ --testcase-data-status-warn-bg-color: #660;
+ --testcase-data-status-pass-bg-color: #040;
+ --testcase-data-status-skip-bg-color: #446;
+
+ --testcase-logs-bg-color: #black;
+ --testcase-log-odd-bg-color: #000;
+ --testcase-log-even-bg-color: #080808;
+ --testcase-log-text-fg-color: #aaa;
+ --testcase-log-text-first-line-fg-color: #fff;
+ }
+ }
+ body {
+ font-family: monospace;
+ min-width: 400px;
+ margin: 0.5em;
+ }
+ * {
+ box-sizing: border-box;
+ }
+ h1 {
+ font-size: 1.5em;
+ font-family: 'Poppins', sans-serif;
+ height: 1.2em;
+ vertical-align: middle;
+ }
+ input[type=button],
+ button {
+ cursor: pointer;
+ background-color: var(--button-bg-color);
+ }
+ input[type=button]:hover,
+ button:hover,
+ a.nodelink:hover {
+ background-color: var(--button-hover-bg-color);
+ }
+ .logo {
+ height: 1.2em;
+ float: left;
+ }
+ .important {
+ font-weight: bold;
+ color: var(--emphasis-fg-color);
+ }
+ #options label {
+ display: flex;
+ }
+ table#options {
+ border-collapse: collapse;
+ width: 100%;
+ }
+ #options td {
+ border: 1px solid var(--subtree-border-color);
+ width: 1px; /* to make the columns as small as possible */
+ }
+ #options tr:hover {
+ background: var(--node-hover-bg-color);
+ }
+ #options td:nth-child(1) {
+ text-align: right;
+ }
+ #options td:nth-child(2),
+ #options td:nth-child(3) {
+ padding-left: 0.5em;
+ }
+ #options td:nth-child(3) {
+ width: 100%; /* to make the last column use the space */
+ }
+ #info {
+ font-family: monospace;
+ }
+ #progress {
+ position: fixed;
+ display: flex;
+ width: 100%;
+ left: 0;
+ top: 0;
+ background-color: #068;
+ color: #fff;
+ align-items: center;
+ }
+ #progress .progress-test-name {
+ flex: 1 1;
+ min-width: 0;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ direction: rtl;
+ white-space: nowrap;
+ }
+ #resultsJSON {
+ font-family: monospace;
+ width: 100%;
+ height: 15em;
+ }
+
+ #resultsVis {
+ border-right: 1px solid var(--results-fg-color);
+ }
+
+ /* PS: this does not disable using the keyboard to click */
+ #resultsVis.disable-run button.leafrun,
+ #resultsVis.disable-run button.subtreerun {
+ pointer-events: none;
+ opacity: 25%;
+ }
+
+ /* tree nodes */
+
+ .nodeheader {
+ display: flex;
+ width: 100%;
+ padding: 0px 2px 0px 1px;
+ }
+ .nodeheader:hover {
+ background: var(--node-hover-bg-color);
+ }
+ .subtreerun,
+ .leafrun,
+ .nodelink,
+ .collapsebtn,
+ .testcaselogbtn,
+ .copybtn {
+ display: inline-flex;
+ flex-shrink: 0;
+ flex-grow: 0;
+ justify-content: center;
+ align-items: center;
+ text-decoration: none;
+ vertical-align: top;
+ color: var(--fg-color);
+ background-color: var(--button-bg-color);
+ background-repeat: no-repeat;
+ background-position: center;
+ border: 1px solid var(--border-color);
+ }
+ .subtreerun::before,
+ .leafrun::before,
+ .nodelink::before,
+ .collapsebtn::before,
+ .testcaselogbtn::before,
+ .copybtn::before {
+ content: "";
+ width: 100%;
+ height: 100%;
+ background-repeat: no-repeat;
+ background-position: center;
+ filter: var(--button-image-filter);
+ }
+ @media (pointer: fine) {
+ .subtreerun,
+ .leafrun,
+ .nodelink,
+ .collapsebtn,
+ .testcaselogbtn,
+ .copybtn {
+ flex-basis: 24px;
+ border-radius: 4px;
+ width: 24px;
+ height: 18px;
+ }
+ }
+ @media (pointer: coarse) {
+ .subtreerun,
+ .leafrun,
+ .nodelink,
+ .collapsebtn,
+ .testcaselogbtn,
+ .copybtn {
+ flex-basis: 36px;
+ border-radius: 6px;
+ width: 36px;
+ height: 36px;
+ }
+ }
+ .subtreerun::before {
+ background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAkAAAAJAQMAAADaX5RTAAAABlBMVEUAAAAAAAClZ7nPAAAAAXRSTlMAQObYZgAAAB5JREFUCNdjOMDAsIGBoYeBoZmBoaEBRPaARQ4wAABTfwX/l/WQvgAAAABJRU5ErkJggg==);
+ }
+ .leafrun::before {
+ background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAkAAAAJCAYAAADgkQYQAAAANklEQVQoU2NkYGD4zwABjFAagwJJwBTBJDEUY1OEoRifIrhiYhSBHYvuJnSHM5LtJry+wxlOAGPTCQmAB/WwAAAAAElFTkSuQmCC);
+ }
+ .nodelink::before {
+ background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAwAAAAMAQMAAABsu86kAAAABlBMVEUAAAAAAAClZ7nPAAAAAXRSTlMAQObYZgAAACRJREFUCNdjYGBg+P+BoUGAYesFhj4BhvsFDPYNDHwMCMTAAACqJwbp3VgbrAAAAABJRU5ErkJggg==);
+ }
+ .copybtn::before {
+ background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAwAAAAMAQMAAABsu86kAAAABlBMVEUAAAAAAAClZ7nPAAAAAXRSTlMAQObYZgAAACVJREFUCNdjYGBgqGdgcGRgcGBg8H/A4KEAQhkKDBxgxP8AKA8AVNUEx41Lj8MAAAAASUVORK5CYII=);
+ }
+ .nodetitle {
+ display: inline;
+ flex: 10 0 4em;
+ }
+ .nodecolumns {
+ position: absolute;
+ left: 220px;
+ }
+ .nodequery {
+ font-weight: bold;
+ background: transparent;
+ border: none;
+ padding: 2px;
+ margin: 0 0.5em;
+ width: calc(100vw - 360px);
+ }
+ .nodedescription {
+ margin: 0 0 0 1em;
+ color: var(--node-description-fg-color);
+ white-space: pre-wrap;
+ font-size: 80%;
+ }
+
+ /* tree nodes which are subtrees */
+
+ .subtree {
+ margin: 3px 0 0 0;
+ padding: 3px 0 0 3px;
+ border-width: 1px 0 0;
+ border-style: solid;
+ border-color: var(--subtree-border-color);
+ }
+ .subtree::before {
+ float: right;
+ margin-right: 3px;
+ }
+ .subtree[data-status='fail'], .subtree[data-status='passfail'] {
+ background: linear-gradient(90deg, var(--testcase-data-status-fail-bg-color), var(--testcase-data-status-fail-bg-color) 16px, var(--bg-color) 16px);
+ }
+ .subtree[data-status='fail']::before {
+ content: "⛔"
+ }
+ .subtree[data-status='pass'] {
+ background: linear-gradient(90deg, var(--testcase-data-status-pass-bg-color), var(--testcase-data-status-pass-bg-color) 16px, var(--bg-color) 16px);
+ }
+ .subtree[data-status='skip'] {
+ background: linear-gradient(90deg, var(--testcase-data-status-skip-bg-color), var(--testcase-data-status-skip-bg-color) 16px, var(--bg-color) 16px);
+ }
+ .subtree[data-status='pass']::before {
+ content: "✔"
+ }
+ .subtree[data-status='skip']::before {
+ content: "○"
+ }
+ .subtree[data-status='passfail']::before {
+ content: "✔/⛔"
+ }
+ .subtree:hover {
+ border-left-color: var(--subtree-hover-left-border-color);
+ }
+ .subtree.multifile > .subtreechildren > .subtree.multitest,
+ .subtree.multifile > .subtreechildren > .subtree.multicase {
+ border-width: 2px 0 0 1px;
+ border-color: var(--multicase-border-color);
+ }
+ .subtree.multitest > .subtreechildren > .subtree.multicase,
+ .subtree.multitest > .subtreechildren > .testcase {
+ border-width: 2px 0 0 1px;
+ border-color: var(--testcase-border-color);
+ }
+ .subtreechildren {
+ margin-left: 9px;
+ }
+
+ /* tree nodes which are test cases */
+
+ .testcase {
+ padding: 3px;
+ border-width: 1px 0 0 0;
+ border-style: solid;
+ border-color: var(--border-color);
+ background: var(--testcase-bg-color);
+ }
+ .testcase:first-child {
+ margin-top: 3px;
+ }
+ .testcase::after {
+ float: right;
+ margin-top: -1.1em;
+ }
+ .testcase[data-status='fail'] {
+ background: var(--testcase-data-status-fail-bg-color);
+ }
+ .testcase[data-status='fail']::after {
+ content: "⛔"
+ }
+ .testcase[data-status='warn'] {
+ background: var(--testcase-data-status-warn-bg-color);
+ }
+ .testcase[data-status='warn']::after {
+ content: "⚠"
+ }
+ .testcase[data-status='pass'] {
+ background: var(--testcase-data-status-pass-bg-color);
+ }
+ .testcase[data-status='pass']::after {
+ content: "✔"
+ }
+ .testcase[data-status='skip'] {
+ background: var(--testcase-data-status-skip-bg-color);
+ }
+ .testcase .nodequery {
+ font-weight: normal;
+ width: calc(100vw - 275px);
+ }
+ .testcasetime {
+ white-space: nowrap;
+ text-align: right;
+ flex: 1 0 5.5em;
+ }
+ .testcaselogs {
+ margin-left: 6px;
+ width: calc(100% - 6px);
+ border-width: 0 0px 0 1px;
+ border-style: solid;
+ border-color: var(--border-color);
+ background: var(--testcase-logs-bg-color);
+ }
+ .testcaselog {
+ display: flex;
+ }
+ .testcaselog:nth-child(odd) {
+ background: var(--testcase-log-odd-bg-color);
+ }
+ .testcaselog:nth-child(even) {
+ background: var(--testcase-log-even-bg-color);
+ }
+ .testcaselogbtn::before {
+ background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAwAAAAMAQMAAABsu86kAAAABlBMVEUAAAAAAAClZ7nPAAAAAXRSTlMAQObYZgAAACRJREFUCNdjYGBg+H+AwUGBwV+BQUGAQX0CiNQQYFABk8ogLgBsYQUt2gNKPwAAAABJRU5ErkJggg==);
+ }
+ .testcaselogtext {
+ flex: 1 0;
+ font-size: 10pt;
+ white-space: pre-wrap;
+ word-break: break-word;
+ margin: 0;
+ color: var(--testcase-log-text-fg-color)
+ }
+ .testcaselogtext::first-line {
+ color: var(--testcase-log-text-first-line-fg-color);
+ }
+
+ @media only screen and (max-width: 600px) {
+ .subtreechildren {
+ margin-left: 2px;
+ }
+ .testcaselogs {
+ margin-left: 2px;
+ width: calc(100% - 2px);
+ }
+ .nodequery {
+ position: relative;
+ left: 0;
+ width: 100%;
+ }
+ }
+ </style>
+ </head>
+ <body>
+ <h1><img class="logo" src="webgpu-logo-notext.svg">WebGPU Conformance Test Suite</h1>
+ <details>
+ <summary>options (requires reload!)</summary>
+ <table id="options">
+ <tbody></tbody>
+ </table>
+ <p class="important">Note: The options above only set the url parameters.
+ You must reload the page for the options to take affect.</p>
+ </details>
+ <p>
+ <input type=button id=expandall value="Expand All (slow!)">
+ <label><input type=checkbox id=autoCloseOnPass> Auto-close each subtree when it passes</label>
+ </p>
+
+ <div id="info"></div>
+ <div id="resultsVis"></div>
+ <div id="progress" style="display: none;"><button type="button">stop</button><div class="progress-test-name"></div></div>
+
+ <p>
+ <input type="button" id="copyResultsJSON" value="Copy results as JSON">
+ </p>
+
+ <script type="module" src="../out/common/runtime/standalone.js"></script>
+ </body>
+</html>
diff --git a/dom/webgpu/tests/cts/checkout/standalone/third_party/jquery/LICENSE.txt b/dom/webgpu/tests/cts/checkout/standalone/third_party/jquery/LICENSE.txt
new file mode 100644
index 0000000000..45ee6cbe38
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/standalone/third_party/jquery/LICENSE.txt
@@ -0,0 +1,9 @@
+The MIT License (MIT)
+
+Copyright (c) <year> <copyright holders>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/dom/webgpu/tests/cts/checkout/standalone/third_party/jquery/jquery-3.3.1.min.js b/dom/webgpu/tests/cts/checkout/standalone/third_party/jquery/jquery-3.3.1.min.js
new file mode 100644
index 0000000000..4d9b3a2587
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/standalone/third_party/jquery/jquery-3.3.1.min.js
@@ -0,0 +1,2 @@
+/*! jQuery v3.3.1 | (c) JS Foundation and other contributors | jquery.org/license */
+!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(e,t){"use strict";var n=[],r=e.document,i=Object.getPrototypeOf,o=n.slice,a=n.concat,s=n.push,u=n.indexOf,l={},c=l.toString,f=l.hasOwnProperty,p=f.toString,d=p.call(Object),h={},g=function e(t){return"function"==typeof t&&"number"!=typeof t.nodeType},y=function e(t){return null!=t&&t===t.window},v={type:!0,src:!0,noModule:!0};function m(e,t,n){var i,o=(t=t||r).createElement("script");if(o.text=e,n)for(i in v)n[i]&&(o[i]=n[i]);t.head.appendChild(o).parentNode.removeChild(o)}function x(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?l[c.call(e)]||"object":typeof e}var b="3.3.1",w=function(e,t){return new w.fn.init(e,t)},T=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;w.fn=w.prototype={jquery:"3.3.1",constructor:w,length:0,toArray:function(){return o.call(this)},get:function(e){return null==e?o.call(this):e<0?this[e+this.length]:this[e]},pushStack:function(e){var t=w.merge(this.constructor(),e);return t.prevObject=this,t},each:function(e){return w.each(this,e)},map:function(e){return this.pushStack(w.map(this,function(t,n){return e.call(t,n,t)}))},slice:function(){return this.pushStack(o.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var t=this.length,n=+e+(e<0?t:0);return this.pushStack(n>=0&&n<t?[this[n]]:[])},end:function(){return this.prevObject||this.constructor()},push:s,sort:n.sort,splice:n.splice},w.extend=w.fn.extend=function(){var e,t,n,r,i,o,a=arguments[0]||{},s=1,u=arguments.length,l=!1;for("boolean"==typeof a&&(l=a,a=arguments[s]||{},s++),"object"==typeof a||g(a)||(a={}),s===u&&(a=this,s--);s<u;s++)if(null!=(e=arguments[s]))for(t in e)n=a[t],a!==(r=e[t])&&(l&&r&&(w.isPlainObject(r)||(i=Array.isArray(r)))?(i?(i=!1,o=n&&Array.isArray(n)?n:[]):o=n&&w.isPlainObject(n)?n:{},a[t]=w.extend(l,o,r)):void 0!==r&&(a[t]=r));return a},w.extend({expando:"jQuery"+("3.3.1"+Math.random()).replace(/\D/g,""),isReady:!0,error:function(e){throw new Error(e)},noop:function(){},isPlainObject:function(e){var t,n;return!(!e||"[object Object]"!==c.call(e))&&(!(t=i(e))||"function"==typeof(n=f.call(t,"constructor")&&t.constructor)&&p.call(n)===d)},isEmptyObject:function(e){var t;for(t in e)return!1;return!0},globalEval:function(e){m(e)},each:function(e,t){var n,r=0;if(C(e)){for(n=e.length;r<n;r++)if(!1===t.call(e[r],r,e[r]))break}else for(r in e)if(!1===t.call(e[r],r,e[r]))break;return e},trim:function(e){return null==e?"":(e+"").replace(T,"")},makeArray:function(e,t){var n=t||[];return null!=e&&(C(Object(e))?w.merge(n,"string"==typeof e?[e]:e):s.call(n,e)),n},inArray:function(e,t,n){return null==t?-1:u.call(t,e,n)},merge:function(e,t){for(var n=+t.length,r=0,i=e.length;r<n;r++)e[i++]=t[r];return e.length=i,e},grep:function(e,t,n){for(var r,i=[],o=0,a=e.length,s=!n;o<a;o++)(r=!t(e[o],o))!==s&&i.push(e[o]);return i},map:function(e,t,n){var r,i,o=0,s=[];if(C(e))for(r=e.length;o<r;o++)null!=(i=t(e[o],o,n))&&s.push(i);else for(o in e)null!=(i=t(e[o],o,n))&&s.push(i);return a.apply([],s)},guid:1,support:h}),"function"==typeof Symbol&&(w.fn[Symbol.iterator]=n[Symbol.iterator]),w.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(e,t){l["[object "+t+"]"]=t.toLowerCase()});function C(e){var t=!!e&&"length"in e&&e.length,n=x(e);return!g(e)&&!y(e)&&("array"===n||0===t||"number"==typeof t&&t>0&&t-1 in e)}var E=function(e){var t,n,r,i,o,a,s,u,l,c,f,p,d,h,g,y,v,m,x,b="sizzle"+1*new Date,w=e.document,T=0,C=0,E=ae(),k=ae(),S=ae(),D=function(e,t){return e===t&&(f=!0),0},N={}.hasOwnProperty,A=[],j=A.pop,q=A.push,L=A.push,H=A.slice,O=function(e,t){for(var n=0,r=e.length;n<r;n++)if(e[n]===t)return n;return-1},P="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",R="(?:\\\\.|[\\w-]|[^\0-\\xa0])+",I="\\["+M+"*("+R+")(?:"+M+"*([*^$|!~]?=)"+M+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+R+"))|)"+M+"*\\]",W=":("+R+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+I+")*)|.*)\\)|)",$=new RegExp(M+"+","g"),B=new RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),F=new RegExp("^"+M+"*,"+M+"*"),_=new RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),z=new RegExp("="+M+"*([^\\]'\"]*?)"+M+"*\\]","g"),X=new RegExp(W),U=new RegExp("^"+R+"$"),V={ID:new RegExp("^#("+R+")"),CLASS:new RegExp("^\\.("+R+")"),TAG:new RegExp("^("+R+"|[*])"),ATTR:new RegExp("^"+I),PSEUDO:new RegExp("^"+W),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+P+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},G=/^(?:input|select|textarea|button)$/i,Y=/^h\d$/i,Q=/^[^{]+\{\s*\[native \w/,J=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,K=/[+~]/,Z=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ee=function(e,t,n){var r="0x"+t-65536;return r!==r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},te=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ne=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},re=function(){p()},ie=me(function(e){return!0===e.disabled&&("form"in e||"label"in e)},{dir:"parentNode",next:"legend"});try{L.apply(A=H.call(w.childNodes),w.childNodes),A[w.childNodes.length].nodeType}catch(e){L={apply:A.length?function(e,t){q.apply(e,H.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function oe(e,t,r,i){var o,s,l,c,f,h,v,m=t&&t.ownerDocument,T=t?t.nodeType:9;if(r=r||[],"string"!=typeof e||!e||1!==T&&9!==T&&11!==T)return r;if(!i&&((t?t.ownerDocument||t:w)!==d&&p(t),t=t||d,g)){if(11!==T&&(f=J.exec(e)))if(o=f[1]){if(9===T){if(!(l=t.getElementById(o)))return r;if(l.id===o)return r.push(l),r}else if(m&&(l=m.getElementById(o))&&x(t,l)&&l.id===o)return r.push(l),r}else{if(f[2])return L.apply(r,t.getElementsByTagName(e)),r;if((o=f[3])&&n.getElementsByClassName&&t.getElementsByClassName)return L.apply(r,t.getElementsByClassName(o)),r}if(n.qsa&&!S[e+" "]&&(!y||!y.test(e))){if(1!==T)m=t,v=e;else if("object"!==t.nodeName.toLowerCase()){(c=t.getAttribute("id"))?c=c.replace(te,ne):t.setAttribute("id",c=b),s=(h=a(e)).length;while(s--)h[s]="#"+c+" "+ve(h[s]);v=h.join(","),m=K.test(e)&&ge(t.parentNode)||t}if(v)try{return L.apply(r,m.querySelectorAll(v)),r}catch(e){}finally{c===b&&t.removeAttribute("id")}}}return u(e.replace(B,"$1"),t,r,i)}function ae(){var e=[];function t(n,i){return e.push(n+" ")>r.cacheLength&&delete t[e.shift()],t[n+" "]=i}return t}function se(e){return e[b]=!0,e}function ue(e){var t=d.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function le(e,t){var n=e.split("|"),i=n.length;while(i--)r.attrHandle[n[i]]=t}function ce(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function fe(e){return function(t){return"input"===t.nodeName.toLowerCase()&&t.type===e}}function pe(e){return function(t){var n=t.nodeName.toLowerCase();return("input"===n||"button"===n)&&t.type===e}}function de(e){return function(t){return"form"in t?t.parentNode&&!1===t.disabled?"label"in t?"label"in t.parentNode?t.parentNode.disabled===e:t.disabled===e:t.isDisabled===e||t.isDisabled!==!e&&ie(t)===e:t.disabled===e:"label"in t&&t.disabled===e}}function he(e){return se(function(t){return t=+t,se(function(n,r){var i,o=e([],n.length,t),a=o.length;while(a--)n[i=o[a]]&&(n[i]=!(r[i]=n[i]))})})}function ge(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}n=oe.support={},o=oe.isXML=function(e){var t=e&&(e.ownerDocument||e).documentElement;return!!t&&"HTML"!==t.nodeName},p=oe.setDocument=function(e){var t,i,a=e?e.ownerDocument||e:w;return a!==d&&9===a.nodeType&&a.documentElement?(d=a,h=d.documentElement,g=!o(d),w!==d&&(i=d.defaultView)&&i.top!==i&&(i.addEventListener?i.addEventListener("unload",re,!1):i.attachEvent&&i.attachEvent("onunload",re)),n.attributes=ue(function(e){return e.className="i",!e.getAttribute("className")}),n.getElementsByTagName=ue(function(e){return e.appendChild(d.createComment("")),!e.getElementsByTagName("*").length}),n.getElementsByClassName=Q.test(d.getElementsByClassName),n.getById=ue(function(e){return h.appendChild(e).id=b,!d.getElementsByName||!d.getElementsByName(b).length}),n.getById?(r.filter.ID=function(e){var t=e.replace(Z,ee);return function(e){return e.getAttribute("id")===t}},r.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&g){var n=t.getElementById(e);return n?[n]:[]}}):(r.filter.ID=function(e){var t=e.replace(Z,ee);return function(e){var n="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return n&&n.value===t}},r.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&g){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),r.find.TAG=n.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):n.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},r.find.CLASS=n.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&g)return t.getElementsByClassName(e)},v=[],y=[],(n.qsa=Q.test(d.querySelectorAll))&&(ue(function(e){h.appendChild(e).innerHTML="<a id='"+b+"'></a><select id='"+b+"-\r\\' msallowcapture=''><option selected=''></option></select>",e.querySelectorAll("[msallowcapture^='']").length&&y.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||y.push("\\["+M+"*(?:value|"+P+")"),e.querySelectorAll("[id~="+b+"-]").length||y.push("~="),e.querySelectorAll(":checked").length||y.push(":checked"),e.querySelectorAll("a#"+b+"+*").length||y.push(".#.+[+~]")}),ue(function(e){e.innerHTML="<a href='' disabled='disabled'></a><select disabled='disabled'><option/></select>";var t=d.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&y.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&y.push(":enabled",":disabled"),h.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&y.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),y.push(",.*:")})),(n.matchesSelector=Q.test(m=h.matches||h.webkitMatchesSelector||h.mozMatchesSelector||h.oMatchesSelector||h.msMatchesSelector))&&ue(function(e){n.disconnectedMatch=m.call(e,"*"),m.call(e,"[s!='']:x"),v.push("!=",W)}),y=y.length&&new RegExp(y.join("|")),v=v.length&&new RegExp(v.join("|")),t=Q.test(h.compareDocumentPosition),x=t||Q.test(h.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return f=!0,0;var r=!e.compareDocumentPosition-!t.compareDocumentPosition;return r||(1&(r=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!n.sortDetached&&t.compareDocumentPosition(e)===r?e===d||e.ownerDocument===w&&x(w,e)?-1:t===d||t.ownerDocument===w&&x(w,t)?1:c?O(c,e)-O(c,t):0:4&r?-1:1)}:function(e,t){if(e===t)return f=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===d?-1:t===d?1:i?-1:o?1:c?O(c,e)-O(c,t):0;if(i===o)return ce(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?ce(a[r],s[r]):a[r]===w?-1:s[r]===w?1:0},d):d},oe.matches=function(e,t){return oe(e,null,null,t)},oe.matchesSelector=function(e,t){if((e.ownerDocument||e)!==d&&p(e),t=t.replace(z,"='$1']"),n.matchesSelector&&g&&!S[t+" "]&&(!v||!v.test(t))&&(!y||!y.test(t)))try{var r=m.call(e,t);if(r||n.disconnectedMatch||e.document&&11!==e.document.nodeType)return r}catch(e){}return oe(t,d,null,[e]).length>0},oe.contains=function(e,t){return(e.ownerDocument||e)!==d&&p(e),x(e,t)},oe.attr=function(e,t){(e.ownerDocument||e)!==d&&p(e);var i=r.attrHandle[t.toLowerCase()],o=i&&N.call(r.attrHandle,t.toLowerCase())?i(e,t,!g):void 0;return void 0!==o?o:n.attributes||!g?e.getAttribute(t):(o=e.getAttributeNode(t))&&o.specified?o.value:null},oe.escape=function(e){return(e+"").replace(te,ne)},oe.error=function(e){throw new Error("Syntax error, unrecognized expression: "+e)},oe.uniqueSort=function(e){var t,r=[],i=0,o=0;if(f=!n.detectDuplicates,c=!n.sortStable&&e.slice(0),e.sort(D),f){while(t=e[o++])t===e[o]&&(i=r.push(o));while(i--)e.splice(r[i],1)}return c=null,e},i=oe.getText=function(e){var t,n="",r=0,o=e.nodeType;if(o){if(1===o||9===o||11===o){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=i(e)}else if(3===o||4===o)return e.nodeValue}else while(t=e[r++])n+=i(t);return n},(r=oe.selectors={cacheLength:50,createPseudo:se,match:V,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(Z,ee),e[3]=(e[3]||e[4]||e[5]||"").replace(Z,ee),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||oe.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&oe.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return V.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=a(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(Z,ee).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=E[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&E(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(e,t,n){return function(r){var i=oe.attr(r,e);return null==i?"!="===t:!t||(i+="","="===t?i===n:"!="===t?i!==n:"^="===t?n&&0===i.indexOf(n):"*="===t?n&&i.indexOf(n)>-1:"$="===t?n&&i.slice(-n.length)===n:"~="===t?(" "+i.replace($," ")+" ").indexOf(n)>-1:"|="===t&&(i===n||i.slice(0,n.length+1)===n+"-"))}},CHILD:function(e,t,n,r,i){var o="nth"!==e.slice(0,3),a="last"!==e.slice(-4),s="of-type"===t;return 1===r&&0===i?function(e){return!!e.parentNode}:function(t,n,u){var l,c,f,p,d,h,g=o!==a?"nextSibling":"previousSibling",y=t.parentNode,v=s&&t.nodeName.toLowerCase(),m=!u&&!s,x=!1;if(y){if(o){while(g){p=t;while(p=p[g])if(s?p.nodeName.toLowerCase()===v:1===p.nodeType)return!1;h=g="only"===e&&!h&&"nextSibling"}return!0}if(h=[a?y.firstChild:y.lastChild],a&&m){x=(d=(l=(c=(f=(p=y)[b]||(p[b]={}))[p.uniqueID]||(f[p.uniqueID]={}))[e]||[])[0]===T&&l[1])&&l[2],p=d&&y.childNodes[d];while(p=++d&&p&&p[g]||(x=d=0)||h.pop())if(1===p.nodeType&&++x&&p===t){c[e]=[T,d,x];break}}else if(m&&(x=d=(l=(c=(f=(p=t)[b]||(p[b]={}))[p.uniqueID]||(f[p.uniqueID]={}))[e]||[])[0]===T&&l[1]),!1===x)while(p=++d&&p&&p[g]||(x=d=0)||h.pop())if((s?p.nodeName.toLowerCase()===v:1===p.nodeType)&&++x&&(m&&((c=(f=p[b]||(p[b]={}))[p.uniqueID]||(f[p.uniqueID]={}))[e]=[T,x]),p===t))break;return(x-=i)===r||x%r==0&&x/r>=0}}},PSEUDO:function(e,t){var n,i=r.pseudos[e]||r.setFilters[e.toLowerCase()]||oe.error("unsupported pseudo: "+e);return i[b]?i(t):i.length>1?(n=[e,e,"",t],r.setFilters.hasOwnProperty(e.toLowerCase())?se(function(e,n){var r,o=i(e,t),a=o.length;while(a--)e[r=O(e,o[a])]=!(n[r]=o[a])}):function(e){return i(e,0,n)}):i}},pseudos:{not:se(function(e){var t=[],n=[],r=s(e.replace(B,"$1"));return r[b]?se(function(e,t,n,i){var o,a=r(e,null,i,[]),s=e.length;while(s--)(o=a[s])&&(e[s]=!(t[s]=o))}):function(e,i,o){return t[0]=e,r(t,null,o,n),t[0]=null,!n.pop()}}),has:se(function(e){return function(t){return oe(e,t).length>0}}),contains:se(function(e){return e=e.replace(Z,ee),function(t){return(t.textContent||t.innerText||i(t)).indexOf(e)>-1}}),lang:se(function(e){return U.test(e||"")||oe.error("unsupported lang: "+e),e=e.replace(Z,ee).toLowerCase(),function(t){var n;do{if(n=g?t.lang:t.getAttribute("xml:lang")||t.getAttribute("lang"))return(n=n.toLowerCase())===e||0===n.indexOf(e+"-")}while((t=t.parentNode)&&1===t.nodeType);return!1}}),target:function(t){var n=e.location&&e.location.hash;return n&&n.slice(1)===t.id},root:function(e){return e===h},focus:function(e){return e===d.activeElement&&(!d.hasFocus||d.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:de(!1),disabled:de(!0),checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,!0===e.selected},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeType<6)return!1;return!0},parent:function(e){return!r.pseudos.empty(e)},header:function(e){return Y.test(e.nodeName)},input:function(e){return G.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||"text"===t.toLowerCase())},first:he(function(){return[0]}),last:he(function(e,t){return[t-1]}),eq:he(function(e,t,n){return[n<0?n+t:n]}),even:he(function(e,t){for(var n=0;n<t;n+=2)e.push(n);return e}),odd:he(function(e,t){for(var n=1;n<t;n+=2)e.push(n);return e}),lt:he(function(e,t,n){for(var r=n<0?n+t:n;--r>=0;)e.push(r);return e}),gt:he(function(e,t,n){for(var r=n<0?n+t:n;++r<t;)e.push(r);return e})}}).pseudos.nth=r.pseudos.eq;for(t in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})r.pseudos[t]=fe(t);for(t in{submit:!0,reset:!0})r.pseudos[t]=pe(t);function ye(){}ye.prototype=r.filters=r.pseudos,r.setFilters=new ye,a=oe.tokenize=function(e,t){var n,i,o,a,s,u,l,c=k[e+" "];if(c)return t?0:c.slice(0);s=e,u=[],l=r.preFilter;while(s){n&&!(i=F.exec(s))||(i&&(s=s.slice(i[0].length)||s),u.push(o=[])),n=!1,(i=_.exec(s))&&(n=i.shift(),o.push({value:n,type:i[0].replace(B," ")}),s=s.slice(n.length));for(a in r.filter)!(i=V[a].exec(s))||l[a]&&!(i=l[a](i))||(n=i.shift(),o.push({value:n,type:a,matches:i}),s=s.slice(n.length));if(!n)break}return t?s.length:s?oe.error(e):k(e,u).slice(0)};function ve(e){for(var t=0,n=e.length,r="";t<n;t++)r+=e[t].value;return r}function me(e,t,n){var r=t.dir,i=t.next,o=i||r,a=n&&"parentNode"===o,s=C++;return t.first?function(t,n,i){while(t=t[r])if(1===t.nodeType||a)return e(t,n,i);return!1}:function(t,n,u){var l,c,f,p=[T,s];if(u){while(t=t[r])if((1===t.nodeType||a)&&e(t,n,u))return!0}else while(t=t[r])if(1===t.nodeType||a)if(f=t[b]||(t[b]={}),c=f[t.uniqueID]||(f[t.uniqueID]={}),i&&i===t.nodeName.toLowerCase())t=t[r]||t;else{if((l=c[o])&&l[0]===T&&l[1]===s)return p[2]=l[2];if(c[o]=p,p[2]=e(t,n,u))return!0}return!1}}function xe(e){return e.length>1?function(t,n,r){var i=e.length;while(i--)if(!e[i](t,n,r))return!1;return!0}:e[0]}function be(e,t,n){for(var r=0,i=t.length;r<i;r++)oe(e,t[r],n);return n}function we(e,t,n,r,i){for(var o,a=[],s=0,u=e.length,l=null!=t;s<u;s++)(o=e[s])&&(n&&!n(o,r,i)||(a.push(o),l&&t.push(s)));return a}function Te(e,t,n,r,i,o){return r&&!r[b]&&(r=Te(r)),i&&!i[b]&&(i=Te(i,o)),se(function(o,a,s,u){var l,c,f,p=[],d=[],h=a.length,g=o||be(t||"*",s.nodeType?[s]:s,[]),y=!e||!o&&t?g:we(g,p,e,s,u),v=n?i||(o?e:h||r)?[]:a:y;if(n&&n(y,v,s,u),r){l=we(v,d),r(l,[],s,u),c=l.length;while(c--)(f=l[c])&&(v[d[c]]=!(y[d[c]]=f))}if(o){if(i||e){if(i){l=[],c=v.length;while(c--)(f=v[c])&&l.push(y[c]=f);i(null,v=[],l,u)}c=v.length;while(c--)(f=v[c])&&(l=i?O(o,f):p[c])>-1&&(o[l]=!(a[l]=f))}}else v=we(v===a?v.splice(h,v.length):v),i?i(null,a,v,u):L.apply(a,v)})}function Ce(e){for(var t,n,i,o=e.length,a=r.relative[e[0].type],s=a||r.relative[" "],u=a?1:0,c=me(function(e){return e===t},s,!0),f=me(function(e){return O(t,e)>-1},s,!0),p=[function(e,n,r){var i=!a&&(r||n!==l)||((t=n).nodeType?c(e,n,r):f(e,n,r));return t=null,i}];u<o;u++)if(n=r.relative[e[u].type])p=[me(xe(p),n)];else{if((n=r.filter[e[u].type].apply(null,e[u].matches))[b]){for(i=++u;i<o;i++)if(r.relative[e[i].type])break;return Te(u>1&&xe(p),u>1&&ve(e.slice(0,u-1).concat({value:" "===e[u-2].type?"*":""})).replace(B,"$1"),n,u<i&&Ce(e.slice(u,i)),i<o&&Ce(e=e.slice(i)),i<o&&ve(e))}p.push(n)}return xe(p)}function Ee(e,t){var n=t.length>0,i=e.length>0,o=function(o,a,s,u,c){var f,h,y,v=0,m="0",x=o&&[],b=[],w=l,C=o||i&&r.find.TAG("*",c),E=T+=null==w?1:Math.random()||.1,k=C.length;for(c&&(l=a===d||a||c);m!==k&&null!=(f=C[m]);m++){if(i&&f){h=0,a||f.ownerDocument===d||(p(f),s=!g);while(y=e[h++])if(y(f,a||d,s)){u.push(f);break}c&&(T=E)}n&&((f=!y&&f)&&v--,o&&x.push(f))}if(v+=m,n&&m!==v){h=0;while(y=t[h++])y(x,b,a,s);if(o){if(v>0)while(m--)x[m]||b[m]||(b[m]=j.call(u));b=we(b)}L.apply(u,b),c&&!o&&b.length>0&&v+t.length>1&&oe.uniqueSort(u)}return c&&(T=E,l=w),x};return n?se(o):o}return s=oe.compile=function(e,t){var n,r=[],i=[],o=S[e+" "];if(!o){t||(t=a(e)),n=t.length;while(n--)(o=Ce(t[n]))[b]?r.push(o):i.push(o);(o=S(e,Ee(i,r))).selector=e}return o},u=oe.select=function(e,t,n,i){var o,u,l,c,f,p="function"==typeof e&&e,d=!i&&a(e=p.selector||e);if(n=n||[],1===d.length){if((u=d[0]=d[0].slice(0)).length>2&&"ID"===(l=u[0]).type&&9===t.nodeType&&g&&r.relative[u[1].type]){if(!(t=(r.find.ID(l.matches[0].replace(Z,ee),t)||[])[0]))return n;p&&(t=t.parentNode),e=e.slice(u.shift().value.length)}o=V.needsContext.test(e)?0:u.length;while(o--){if(l=u[o],r.relative[c=l.type])break;if((f=r.find[c])&&(i=f(l.matches[0].replace(Z,ee),K.test(u[0].type)&&ge(t.parentNode)||t))){if(u.splice(o,1),!(e=i.length&&ve(u)))return L.apply(n,i),n;break}}}return(p||s(e,d))(i,t,!g,n,!t||K.test(e)&&ge(t.parentNode)||t),n},n.sortStable=b.split("").sort(D).join("")===b,n.detectDuplicates=!!f,p(),n.sortDetached=ue(function(e){return 1&e.compareDocumentPosition(d.createElement("fieldset"))}),ue(function(e){return e.innerHTML="<a href='#'></a>","#"===e.firstChild.getAttribute("href")})||le("type|href|height|width",function(e,t,n){if(!n)return e.getAttribute(t,"type"===t.toLowerCase()?1:2)}),n.attributes&&ue(function(e){return e.innerHTML="<input/>",e.firstChild.setAttribute("value",""),""===e.firstChild.getAttribute("value")})||le("value",function(e,t,n){if(!n&&"input"===e.nodeName.toLowerCase())return e.defaultValue}),ue(function(e){return null==e.getAttribute("disabled")})||le(P,function(e,t,n){var r;if(!n)return!0===e[t]?t.toLowerCase():(r=e.getAttributeNode(t))&&r.specified?r.value:null}),oe}(e);w.find=E,w.expr=E.selectors,w.expr[":"]=w.expr.pseudos,w.uniqueSort=w.unique=E.uniqueSort,w.text=E.getText,w.isXMLDoc=E.isXML,w.contains=E.contains,w.escapeSelector=E.escape;var k=function(e,t,n){var r=[],i=void 0!==n;while((e=e[t])&&9!==e.nodeType)if(1===e.nodeType){if(i&&w(e).is(n))break;r.push(e)}return r},S=function(e,t){for(var n=[];e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n},D=w.expr.match.needsContext;function N(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()}var A=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,t,n){return g(t)?w.grep(e,function(e,r){return!!t.call(e,r,e)!==n}):t.nodeType?w.grep(e,function(e){return e===t!==n}):"string"!=typeof t?w.grep(e,function(e){return u.call(t,e)>-1!==n}):w.filter(t,e,n)}w.filter=function(e,t,n){var r=t[0];return n&&(e=":not("+e+")"),1===t.length&&1===r.nodeType?w.find.matchesSelector(r,e)?[r]:[]:w.find.matches(e,w.grep(t,function(e){return 1===e.nodeType}))},w.fn.extend({find:function(e){var t,n,r=this.length,i=this;if("string"!=typeof e)return this.pushStack(w(e).filter(function(){for(t=0;t<r;t++)if(w.contains(i[t],this))return!0}));for(n=this.pushStack([]),t=0;t<r;t++)w.find(e,i[t],n);return r>1?w.uniqueSort(n):n},filter:function(e){return this.pushStack(j(this,e||[],!1))},not:function(e){return this.pushStack(j(this,e||[],!0))},is:function(e){return!!j(this,"string"==typeof e&&D.test(e)?w(e):e||[],!1).length}});var q,L=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/;(w.fn.init=function(e,t,n){var i,o;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(i="<"===e[0]&&">"===e[e.length-1]&&e.length>=3?[null,e,null]:L.exec(e))||!i[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(i[1]){if(t=t instanceof w?t[0]:t,w.merge(this,w.parseHTML(i[1],t&&t.nodeType?t.ownerDocument||t:r,!0)),A.test(i[1])&&w.isPlainObject(t))for(i in t)g(this[i])?this[i](t[i]):this.attr(i,t[i]);return this}return(o=r.getElementById(i[2]))&&(this[0]=o,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):g(e)?void 0!==n.ready?n.ready(e):e(w):w.makeArray(e,this)}).prototype=w.fn,q=w(r);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};w.fn.extend({has:function(e){var t=w(e,this),n=t.length;return this.filter(function(){for(var e=0;e<n;e++)if(w.contains(this,t[e]))return!0})},closest:function(e,t){var n,r=0,i=this.length,o=[],a="string"!=typeof e&&w(e);if(!D.test(e))for(;r<i;r++)for(n=this[r];n&&n!==t;n=n.parentNode)if(n.nodeType<11&&(a?a.index(n)>-1:1===n.nodeType&&w.find.matchesSelector(n,e))){o.push(n);break}return this.pushStack(o.length>1?w.uniqueSort(o):o)},index:function(e){return e?"string"==typeof e?u.call(w(e),this[0]):u.call(this,e.jquery?e[0]:e):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){return this.pushStack(w.uniqueSort(w.merge(this.get(),w(e,t))))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}});function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}w.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return k(e,"parentNode")},parentsUntil:function(e,t,n){return k(e,"parentNode",n)},next:function(e){return P(e,"nextSibling")},prev:function(e){return P(e,"previousSibling")},nextAll:function(e){return k(e,"nextSibling")},prevAll:function(e){return k(e,"previousSibling")},nextUntil:function(e,t,n){return k(e,"nextSibling",n)},prevUntil:function(e,t,n){return k(e,"previousSibling",n)},siblings:function(e){return S((e.parentNode||{}).firstChild,e)},children:function(e){return S(e.firstChild)},contents:function(e){return N(e,"iframe")?e.contentDocument:(N(e,"template")&&(e=e.content||e),w.merge([],e.childNodes))}},function(e,t){w.fn[e]=function(n,r){var i=w.map(this,t,n);return"Until"!==e.slice(-5)&&(r=n),r&&"string"==typeof r&&(i=w.filter(r,i)),this.length>1&&(O[e]||w.uniqueSort(i),H.test(e)&&i.reverse()),this.pushStack(i)}});var M=/[^\x20\t\r\n\f]+/g;function R(e){var t={};return w.each(e.match(M)||[],function(e,n){t[n]=!0}),t}w.Callbacks=function(e){e="string"==typeof e?R(e):w.extend({},e);var t,n,r,i,o=[],a=[],s=-1,u=function(){for(i=i||e.once,r=t=!0;a.length;s=-1){n=a.shift();while(++s<o.length)!1===o[s].apply(n[0],n[1])&&e.stopOnFalse&&(s=o.length,n=!1)}e.memory||(n=!1),t=!1,i&&(o=n?[]:"")},l={add:function(){return o&&(n&&!t&&(s=o.length-1,a.push(n)),function t(n){w.each(n,function(n,r){g(r)?e.unique&&l.has(r)||o.push(r):r&&r.length&&"string"!==x(r)&&t(r)})}(arguments),n&&!t&&u()),this},remove:function(){return w.each(arguments,function(e,t){var n;while((n=w.inArray(t,o,n))>-1)o.splice(n,1),n<=s&&s--}),this},has:function(e){return e?w.inArray(e,o)>-1:o.length>0},empty:function(){return o&&(o=[]),this},disable:function(){return i=a=[],o=n="",this},disabled:function(){return!o},lock:function(){return i=a=[],n||t||(o=n=""),this},locked:function(){return!!i},fireWith:function(e,n){return i||(n=[e,(n=n||[]).slice?n.slice():n],a.push(n),t||u()),this},fire:function(){return l.fireWith(this,arguments),this},fired:function(){return!!r}};return l};function I(e){return e}function W(e){throw e}function $(e,t,n,r){var i;try{e&&g(i=e.promise)?i.call(e).done(t).fail(n):e&&g(i=e.then)?i.call(e,t,n):t.apply(void 0,[e].slice(r))}catch(e){n.apply(void 0,[e])}}w.extend({Deferred:function(t){var n=[["notify","progress",w.Callbacks("memory"),w.Callbacks("memory"),2],["resolve","done",w.Callbacks("once memory"),w.Callbacks("once memory"),0,"resolved"],["reject","fail",w.Callbacks("once memory"),w.Callbacks("once memory"),1,"rejected"]],r="pending",i={state:function(){return r},always:function(){return o.done(arguments).fail(arguments),this},"catch":function(e){return i.then(null,e)},pipe:function(){var e=arguments;return w.Deferred(function(t){w.each(n,function(n,r){var i=g(e[r[4]])&&e[r[4]];o[r[1]](function(){var e=i&&i.apply(this,arguments);e&&g(e.promise)?e.promise().progress(t.notify).done(t.resolve).fail(t.reject):t[r[0]+"With"](this,i?[e]:arguments)})}),e=null}).promise()},then:function(t,r,i){var o=0;function a(t,n,r,i){return function(){var s=this,u=arguments,l=function(){var e,l;if(!(t<o)){if((e=r.apply(s,u))===n.promise())throw new TypeError("Thenable self-resolution");l=e&&("object"==typeof e||"function"==typeof e)&&e.then,g(l)?i?l.call(e,a(o,n,I,i),a(o,n,W,i)):(o++,l.call(e,a(o,n,I,i),a(o,n,W,i),a(o,n,I,n.notifyWith))):(r!==I&&(s=void 0,u=[e]),(i||n.resolveWith)(s,u))}},c=i?l:function(){try{l()}catch(e){w.Deferred.exceptionHook&&w.Deferred.exceptionHook(e,c.stackTrace),t+1>=o&&(r!==W&&(s=void 0,u=[e]),n.rejectWith(s,u))}};t?c():(w.Deferred.getStackHook&&(c.stackTrace=w.Deferred.getStackHook()),e.setTimeout(c))}}return w.Deferred(function(e){n[0][3].add(a(0,e,g(i)?i:I,e.notifyWith)),n[1][3].add(a(0,e,g(t)?t:I)),n[2][3].add(a(0,e,g(r)?r:W))}).promise()},promise:function(e){return null!=e?w.extend(e,i):i}},o={};return w.each(n,function(e,t){var a=t[2],s=t[5];i[t[1]]=a.add,s&&a.add(function(){r=s},n[3-e][2].disable,n[3-e][3].disable,n[0][2].lock,n[0][3].lock),a.add(t[3].fire),o[t[0]]=function(){return o[t[0]+"With"](this===o?void 0:this,arguments),this},o[t[0]+"With"]=a.fireWith}),i.promise(o),t&&t.call(o,o),o},when:function(e){var t=arguments.length,n=t,r=Array(n),i=o.call(arguments),a=w.Deferred(),s=function(e){return function(n){r[e]=this,i[e]=arguments.length>1?o.call(arguments):n,--t||a.resolveWith(r,i)}};if(t<=1&&($(e,a.done(s(n)).resolve,a.reject,!t),"pending"===a.state()||g(i[n]&&i[n].then)))return a.then();while(n--)$(i[n],s(n),a.reject);return a.promise()}});var B=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;w.Deferred.exceptionHook=function(t,n){e.console&&e.console.warn&&t&&B.test(t.name)&&e.console.warn("jQuery.Deferred exception: "+t.message,t.stack,n)},w.readyException=function(t){e.setTimeout(function(){throw t})};var F=w.Deferred();w.fn.ready=function(e){return F.then(e)["catch"](function(e){w.readyException(e)}),this},w.extend({isReady:!1,readyWait:1,ready:function(e){(!0===e?--w.readyWait:w.isReady)||(w.isReady=!0,!0!==e&&--w.readyWait>0||F.resolveWith(r,[w]))}}),w.ready.then=F.then;function _(){r.removeEventListener("DOMContentLoaded",_),e.removeEventListener("load",_),w.ready()}"complete"===r.readyState||"loading"!==r.readyState&&!r.documentElement.doScroll?e.setTimeout(w.ready):(r.addEventListener("DOMContentLoaded",_),e.addEventListener("load",_));var z=function(e,t,n,r,i,o,a){var s=0,u=e.length,l=null==n;if("object"===x(n)){i=!0;for(s in n)z(e,t,s,n[s],!0,o,a)}else if(void 0!==r&&(i=!0,g(r)||(a=!0),l&&(a?(t.call(e,r),t=null):(l=t,t=function(e,t,n){return l.call(w(e),n)})),t))for(;s<u;s++)t(e[s],n,a?r:r.call(e[s],s,t(e[s],n)));return i?e:l?t.call(e):u?t(e[0],n):o},X=/^-ms-/,U=/-([a-z])/g;function V(e,t){return t.toUpperCase()}function G(e){return e.replace(X,"ms-").replace(U,V)}var Y=function(e){return 1===e.nodeType||9===e.nodeType||!+e.nodeType};function Q(){this.expando=w.expando+Q.uid++}Q.uid=1,Q.prototype={cache:function(e){var t=e[this.expando];return t||(t={},Y(e)&&(e.nodeType?e[this.expando]=t:Object.defineProperty(e,this.expando,{value:t,configurable:!0}))),t},set:function(e,t,n){var r,i=this.cache(e);if("string"==typeof t)i[G(t)]=n;else for(r in t)i[G(r)]=t[r];return i},get:function(e,t){return void 0===t?this.cache(e):e[this.expando]&&e[this.expando][G(t)]},access:function(e,t,n){return void 0===t||t&&"string"==typeof t&&void 0===n?this.get(e,t):(this.set(e,t,n),void 0!==n?n:t)},remove:function(e,t){var n,r=e[this.expando];if(void 0!==r){if(void 0!==t){n=(t=Array.isArray(t)?t.map(G):(t=G(t))in r?[t]:t.match(M)||[]).length;while(n--)delete r[t[n]]}(void 0===t||w.isEmptyObject(r))&&(e.nodeType?e[this.expando]=void 0:delete e[this.expando])}},hasData:function(e){var t=e[this.expando];return void 0!==t&&!w.isEmptyObject(t)}};var J=new Q,K=new Q,Z=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,ee=/[A-Z]/g;function te(e){return"true"===e||"false"!==e&&("null"===e?null:e===+e+""?+e:Z.test(e)?JSON.parse(e):e)}function ne(e,t,n){var r;if(void 0===n&&1===e.nodeType)if(r="data-"+t.replace(ee,"-$&").toLowerCase(),"string"==typeof(n=e.getAttribute(r))){try{n=te(n)}catch(e){}K.set(e,t,n)}else n=void 0;return n}w.extend({hasData:function(e){return K.hasData(e)||J.hasData(e)},data:function(e,t,n){return K.access(e,t,n)},removeData:function(e,t){K.remove(e,t)},_data:function(e,t,n){return J.access(e,t,n)},_removeData:function(e,t){J.remove(e,t)}}),w.fn.extend({data:function(e,t){var n,r,i,o=this[0],a=o&&o.attributes;if(void 0===e){if(this.length&&(i=K.get(o),1===o.nodeType&&!J.get(o,"hasDataAttrs"))){n=a.length;while(n--)a[n]&&0===(r=a[n].name).indexOf("data-")&&(r=G(r.slice(5)),ne(o,r,i[r]));J.set(o,"hasDataAttrs",!0)}return i}return"object"==typeof e?this.each(function(){K.set(this,e)}):z(this,function(t){var n;if(o&&void 0===t){if(void 0!==(n=K.get(o,e)))return n;if(void 0!==(n=ne(o,e)))return n}else this.each(function(){K.set(this,e,t)})},null,t,arguments.length>1,null,!0)},removeData:function(e){return this.each(function(){K.remove(this,e)})}}),w.extend({queue:function(e,t,n){var r;if(e)return t=(t||"fx")+"queue",r=J.get(e,t),n&&(!r||Array.isArray(n)?r=J.access(e,t,w.makeArray(n)):r.push(n)),r||[]},dequeue:function(e,t){t=t||"fx";var n=w.queue(e,t),r=n.length,i=n.shift(),o=w._queueHooks(e,t),a=function(){w.dequeue(e,t)};"inprogress"===i&&(i=n.shift(),r--),i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,a,o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return J.get(e,n)||J.access(e,n,{empty:w.Callbacks("once memory").add(function(){J.remove(e,[t+"queue",n])})})}}),w.fn.extend({queue:function(e,t){var n=2;return"string"!=typeof e&&(t=e,e="fx",n--),arguments.length<n?w.queue(this[0],e):void 0===t?this:this.each(function(){var n=w.queue(this,e,t);w._queueHooks(this,e),"fx"===e&&"inprogress"!==n[0]&&w.dequeue(this,e)})},dequeue:function(e){return this.each(function(){w.dequeue(this,e)})},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,t){var n,r=1,i=w.Deferred(),o=this,a=this.length,s=function(){--r||i.resolveWith(o,[o])};"string"!=typeof e&&(t=e,e=void 0),e=e||"fx";while(a--)(n=J.get(o[a],e+"queueHooks"))&&n.empty&&(r++,n.empty.add(s));return s(),i.promise(t)}});var re=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,ie=new RegExp("^(?:([+-])=|)("+re+")([a-z%]*)$","i"),oe=["Top","Right","Bottom","Left"],ae=function(e,t){return"none"===(e=t||e).style.display||""===e.style.display&&w.contains(e.ownerDocument,e)&&"none"===w.css(e,"display")},se=function(e,t,n,r){var i,o,a={};for(o in t)a[o]=e.style[o],e.style[o]=t[o];i=n.apply(e,r||[]);for(o in t)e.style[o]=a[o];return i};function ue(e,t,n,r){var i,o,a=20,s=r?function(){return r.cur()}:function(){return w.css(e,t,"")},u=s(),l=n&&n[3]||(w.cssNumber[t]?"":"px"),c=(w.cssNumber[t]||"px"!==l&&+u)&&ie.exec(w.css(e,t));if(c&&c[3]!==l){u/=2,l=l||c[3],c=+u||1;while(a--)w.style(e,t,c+l),(1-o)*(1-(o=s()/u||.5))<=0&&(a=0),c/=o;c*=2,w.style(e,t,c+l),n=n||[]}return n&&(c=+c||+u||0,i=n[1]?c+(n[1]+1)*n[2]:+n[2],r&&(r.unit=l,r.start=c,r.end=i)),i}var le={};function ce(e){var t,n=e.ownerDocument,r=e.nodeName,i=le[r];return i||(t=n.body.appendChild(n.createElement(r)),i=w.css(t,"display"),t.parentNode.removeChild(t),"none"===i&&(i="block"),le[r]=i,i)}function fe(e,t){for(var n,r,i=[],o=0,a=e.length;o<a;o++)(r=e[o]).style&&(n=r.style.display,t?("none"===n&&(i[o]=J.get(r,"display")||null,i[o]||(r.style.display="")),""===r.style.display&&ae(r)&&(i[o]=ce(r))):"none"!==n&&(i[o]="none",J.set(r,"display",n)));for(o=0;o<a;o++)null!=i[o]&&(e[o].style.display=i[o]);return e}w.fn.extend({show:function(){return fe(this,!0)},hide:function(){return fe(this)},toggle:function(e){return"boolean"==typeof e?e?this.show():this.hide():this.each(function(){ae(this)?w(this).show():w(this).hide()})}});var pe=/^(?:checkbox|radio)$/i,de=/<([a-z][^\/\0>\x20\t\r\n\f]+)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,"<select multiple='multiple'>","</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};ge.optgroup=ge.option,ge.tbody=ge.tfoot=ge.colgroup=ge.caption=ge.thead,ge.th=ge.td;function ye(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&N(e,t)?w.merge([e],n):n}function ve(e,t){for(var n=0,r=e.length;n<r;n++)J.set(e[n],"globalEval",!t||J.get(t[n],"globalEval"))}var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d<h;d++)if((o=e[d])||0===o)if("object"===x(o))w.merge(p,o.nodeType?[o]:o);else if(me.test(o)){a=a||f.appendChild(t.createElement("div")),s=(de.exec(o)||["",""])[1].toLowerCase(),u=ge[s]||ge._default,a.innerHTML=u[1]+w.htmlPrefilter(o)+u[2],c=u[0];while(c--)a=a.lastChild;w.merge(p,a.childNodes),(a=f.firstChild).textContent=""}else p.push(t.createTextNode(o));f.textContent="",d=0;while(o=p[d++])if(r&&w.inArray(o,r)>-1)i&&i.push(o);else if(l=w.contains(o.ownerDocument,o),a=ye(f.appendChild(o),"script"),l&&ve(a),n){c=0;while(o=a[c++])he.test(o.type||"")&&n.push(o)}return f}!function(){var e=r.createDocumentFragment().appendChild(r.createElement("div")),t=r.createElement("input");t.setAttribute("type","radio"),t.setAttribute("checked","checked"),t.setAttribute("name","t"),e.appendChild(t),h.checkClone=e.cloneNode(!0).cloneNode(!0).lastChild.checked,e.innerHTML="<textarea>x</textarea>",h.noCloneChecked=!!e.cloneNode(!0).lastChild.defaultValue}();var be=r.documentElement,we=/^key/,Te=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ce=/^([^.]*)(?:\.(.+)|)/;function Ee(){return!0}function ke(){return!1}function Se(){try{return r.activeElement}catch(e){}}function De(e,t,n,r,i,o){var a,s;if("object"==typeof t){"string"!=typeof n&&(r=r||n,n=void 0);for(s in t)De(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=ke;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return w().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=w.guid++)),e.each(function(){w.event.add(this,t,i,r,n)})}w.event={global:{},add:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,y=J.get(e);if(y){n.handler&&(n=(o=n).handler,i=o.selector),i&&w.find.matchesSelector(be,i),n.guid||(n.guid=w.guid++),(u=y.events)||(u=y.events={}),(a=y.handle)||(a=y.handle=function(t){return"undefined"!=typeof w&&w.event.triggered!==t.type?w.event.dispatch.apply(e,arguments):void 0}),l=(t=(t||"").match(M)||[""]).length;while(l--)d=g=(s=Ce.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=w.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=w.event.special[d]||{},c=w.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&w.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(e,r,h,a)||e.addEventListener&&e.addEventListener(d,a)),f.add&&(f.add.call(e,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),w.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,y=J.hasData(e)&&J.get(e);if(y&&(u=y.events)){l=(t=(t||"").match(M)||[""]).length;while(l--)if(s=Ce.exec(t[l])||[],d=g=s[1],h=(s[2]||"").split(".").sort(),d){f=w.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,y.handle)||w.removeEvent(e,d,y.handle),delete u[d])}else for(d in u)w.event.remove(e,d+t[l],n,r,!0);w.isEmptyObject(u)&&J.remove(e,"handle events")}},dispatch:function(e){var t=w.event.fix(e),n,r,i,o,a,s,u=new Array(arguments.length),l=(J.get(this,"events")||{})[t.type]||[],c=w.event.special[t.type]||{};for(u[0]=t,n=1;n<arguments.length;n++)u[n]=arguments[n];if(t.delegateTarget=this,!c.preDispatch||!1!==c.preDispatch.call(this,t)){s=w.event.handlers.call(this,t,l),n=0;while((o=s[n++])&&!t.isPropagationStopped()){t.currentTarget=o.elem,r=0;while((a=o.handlers[r++])&&!t.isImmediatePropagationStopped())t.rnamespace&&!t.rnamespace.test(a.namespace)||(t.handleObj=a,t.data=a.data,void 0!==(i=((w.event.special[a.origType]||{}).handle||a.handler).apply(o.elem,u))&&!1===(t.result=i)&&(t.preventDefault(),t.stopPropagation()))}return c.postDispatch&&c.postDispatch.call(this,t),t.result}},handlers:function(e,t){var n,r,i,o,a,s=[],u=t.delegateCount,l=e.target;if(u&&l.nodeType&&!("click"===e.type&&e.button>=1))for(;l!==this;l=l.parentNode||this)if(1===l.nodeType&&("click"!==e.type||!0!==l.disabled)){for(o=[],a={},n=0;n<u;n++)void 0===a[i=(r=t[n]).selector+" "]&&(a[i]=r.needsContext?w(i,this).index(l)>-1:w.find(i,this,null,[l]).length),a[i]&&o.push(r);o.length&&s.push({elem:l,handlers:o})}return l=this,u<t.length&&s.push({elem:l,handlers:t.slice(u)}),s},addProp:function(e,t){Object.defineProperty(w.Event.prototype,e,{enumerable:!0,configurable:!0,get:g(t)?function(){if(this.originalEvent)return t(this.originalEvent)}:function(){if(this.originalEvent)return this.originalEvent[e]},set:function(t){Object.defineProperty(this,e,{enumerable:!0,configurable:!0,writable:!0,value:t})}})},fix:function(e){return e[w.expando]?e:new w.Event(e)},special:{load:{noBubble:!0},focus:{trigger:function(){if(this!==Se()&&this.focus)return this.focus(),!1},delegateType:"focusin"},blur:{trigger:function(){if(this===Se()&&this.blur)return this.blur(),!1},delegateType:"focusout"},click:{trigger:function(){if("checkbox"===this.type&&this.click&&N(this,"input"))return this.click(),!1},_default:function(e){return N(e.target,"a")}},beforeunload:{postDispatch:function(e){void 0!==e.result&&e.originalEvent&&(e.originalEvent.returnValue=e.result)}}}},w.removeEvent=function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n)},w.Event=function(e,t){if(!(this instanceof w.Event))return new w.Event(e,t);e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||void 0===e.defaultPrevented&&!1===e.returnValue?Ee:ke,this.target=e.target&&3===e.target.nodeType?e.target.parentNode:e.target,this.currentTarget=e.currentTarget,this.relatedTarget=e.relatedTarget):this.type=e,t&&w.extend(this,t),this.timeStamp=e&&e.timeStamp||Date.now(),this[w.expando]=!0},w.Event.prototype={constructor:w.Event,isDefaultPrevented:ke,isPropagationStopped:ke,isImmediatePropagationStopped:ke,isSimulated:!1,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=Ee,e&&!this.isSimulated&&e.preventDefault()},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=Ee,e&&!this.isSimulated&&e.stopPropagation()},stopImmediatePropagation:function(){var e=this.originalEvent;this.isImmediatePropagationStopped=Ee,e&&!this.isSimulated&&e.stopImmediatePropagation(),this.stopPropagation()}},w.each({altKey:!0,bubbles:!0,cancelable:!0,changedTouches:!0,ctrlKey:!0,detail:!0,eventPhase:!0,metaKey:!0,pageX:!0,pageY:!0,shiftKey:!0,view:!0,"char":!0,charCode:!0,key:!0,keyCode:!0,button:!0,buttons:!0,clientX:!0,clientY:!0,offsetX:!0,offsetY:!0,pointerId:!0,pointerType:!0,screenX:!0,screenY:!0,targetTouches:!0,toElement:!0,touches:!0,which:function(e){var t=e.button;return null==e.which&&we.test(e.type)?null!=e.charCode?e.charCode:e.keyCode:!e.which&&void 0!==t&&Te.test(e.type)?1&t?1:2&t?3:4&t?2:0:e.which}},w.event.addProp),w.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(e,t){w.event.special[e]={delegateType:t,bindType:t,handle:function(e){var n,r=this,i=e.relatedTarget,o=e.handleObj;return i&&(i===r||w.contains(r,i))||(e.type=o.origType,n=o.handler.apply(this,arguments),e.type=t),n}}}),w.fn.extend({on:function(e,t,n,r){return De(this,e,t,n,r)},one:function(e,t,n,r){return De(this,e,t,n,r,1)},off:function(e,t,n){var r,i;if(e&&e.preventDefault&&e.handleObj)return r=e.handleObj,w(e.delegateTarget).off(r.namespace?r.origType+"."+r.namespace:r.origType,r.selector,r.handler),this;if("object"==typeof e){for(i in e)this.off(i,t,e[i]);return this}return!1!==t&&"function"!=typeof t||(n=t,t=void 0),!1===n&&(n=ke),this.each(function(){w.event.remove(this,e,n,t)})}});var Ne=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi,Ae=/<script|<style|<link/i,je=/checked\s*(?:[^=]|=\s*.checked.)/i,qe=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;function Le(e,t){return N(e,"table")&&N(11!==t.nodeType?t:t.firstChild,"tr")?w(e).children("tbody")[0]||e:e}function He(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Oe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Pe(e,t){var n,r,i,o,a,s,u,l;if(1===t.nodeType){if(J.hasData(e)&&(o=J.access(e),a=J.set(t,o),l=o.events)){delete a.handle,a.events={};for(i in l)for(n=0,r=l[i].length;n<r;n++)w.event.add(t,i,l[i][n])}K.hasData(e)&&(s=K.access(e),u=w.extend({},s),K.set(t,u))}}function Me(e,t){var n=t.nodeName.toLowerCase();"input"===n&&pe.test(e.type)?t.checked=e.checked:"input"!==n&&"textarea"!==n||(t.defaultValue=e.defaultValue)}function Re(e,t,n,r){t=a.apply([],t);var i,o,s,u,l,c,f=0,p=e.length,d=p-1,y=t[0],v=g(y);if(v||p>1&&"string"==typeof y&&!h.checkClone&&je.test(y))return e.each(function(i){var o=e.eq(i);v&&(t[0]=y.call(this,i,o.html())),Re(o,t,n,r)});if(p&&(i=xe(t,e[0].ownerDocument,!1,e,r),o=i.firstChild,1===i.childNodes.length&&(i=o),o||r)){for(u=(s=w.map(ye(i,"script"),He)).length;f<p;f++)l=i,f!==d&&(l=w.clone(l,!0,!0),u&&w.merge(s,ye(l,"script"))),n.call(e[f],l,f);if(u)for(c=s[s.length-1].ownerDocument,w.map(s,Oe),f=0;f<u;f++)l=s[f],he.test(l.type||"")&&!J.access(l,"globalEval")&&w.contains(c,l)&&(l.src&&"module"!==(l.type||"").toLowerCase()?w._evalUrl&&w._evalUrl(l.src):m(l.textContent.replace(qe,""),c,l))}return e}function Ie(e,t,n){for(var r,i=t?w.filter(t,e):e,o=0;null!=(r=i[o]);o++)n||1!==r.nodeType||w.cleanData(ye(r)),r.parentNode&&(n&&w.contains(r.ownerDocument,r)&&ve(ye(r,"script")),r.parentNode.removeChild(r));return e}w.extend({htmlPrefilter:function(e){return e.replace(Ne,"<$1></$2>")},clone:function(e,t,n){var r,i,o,a,s=e.cloneNode(!0),u=w.contains(e.ownerDocument,e);if(!(h.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||w.isXMLDoc(e)))for(a=ye(s),r=0,i=(o=ye(e)).length;r<i;r++)Me(o[r],a[r]);if(t)if(n)for(o=o||ye(e),a=a||ye(s),r=0,i=o.length;r<i;r++)Pe(o[r],a[r]);else Pe(e,s);return(a=ye(s,"script")).length>0&&ve(a,!u&&ye(e,"script")),s},cleanData:function(e){for(var t,n,r,i=w.event.special,o=0;void 0!==(n=e[o]);o++)if(Y(n)){if(t=n[J.expando]){if(t.events)for(r in t.events)i[r]?w.event.remove(n,r):w.removeEvent(n,r,t.handle);n[J.expando]=void 0}n[K.expando]&&(n[K.expando]=void 0)}}}),w.fn.extend({detach:function(e){return Ie(this,e,!0)},remove:function(e){return Ie(this,e)},text:function(e){return z(this,function(e){return void 0===e?w.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=e)})},null,e,arguments.length)},append:function(){return Re(this,arguments,function(e){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||Le(this,e).appendChild(e)})},prepend:function(){return Re(this,arguments,function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=Le(this,e);t.insertBefore(e,t.firstChild)}})},before:function(){return Re(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return Re(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},empty:function(){for(var e,t=0;null!=(e=this[t]);t++)1===e.nodeType&&(w.cleanData(ye(e,!1)),e.textContent="");return this},clone:function(e,t){return e=null!=e&&e,t=null==t?e:t,this.map(function(){return w.clone(this,e,t)})},html:function(e){return z(this,function(e){var t=this[0]||{},n=0,r=this.length;if(void 0===e&&1===t.nodeType)return t.innerHTML;if("string"==typeof e&&!Ae.test(e)&&!ge[(de.exec(e)||["",""])[1].toLowerCase()]){e=w.htmlPrefilter(e);try{for(;n<r;n++)1===(t=this[n]||{}).nodeType&&(w.cleanData(ye(t,!1)),t.innerHTML=e);t=0}catch(e){}}t&&this.empty().append(e)},null,e,arguments.length)},replaceWith:function(){var e=[];return Re(this,arguments,function(t){var n=this.parentNode;w.inArray(this,e)<0&&(w.cleanData(ye(this)),n&&n.replaceChild(t,this))},e)}}),w.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(e,t){w.fn[e]=function(e){for(var n,r=[],i=w(e),o=i.length-1,a=0;a<=o;a++)n=a===o?this:this.clone(!0),w(i[a])[t](n),s.apply(r,n.get());return this.pushStack(r)}});var We=new RegExp("^("+re+")(?!px)[a-z%]+$","i"),$e=function(t){var n=t.ownerDocument.defaultView;return n&&n.opener||(n=e),n.getComputedStyle(t)},Be=new RegExp(oe.join("|"),"i");!function(){function t(){if(c){l.style.cssText="position:absolute;left:-11111px;width:60px;margin-top:1px;padding:0;border:0",c.style.cssText="position:relative;display:block;box-sizing:border-box;overflow:scroll;margin:auto;border:1px;padding:1px;width:60%;top:1%",be.appendChild(l).appendChild(c);var t=e.getComputedStyle(c);i="1%"!==t.top,u=12===n(t.marginLeft),c.style.right="60%",s=36===n(t.right),o=36===n(t.width),c.style.position="absolute",a=36===c.offsetWidth||"absolute",be.removeChild(l),c=null}}function n(e){return Math.round(parseFloat(e))}var i,o,a,s,u,l=r.createElement("div"),c=r.createElement("div");c.style&&(c.style.backgroundClip="content-box",c.cloneNode(!0).style.backgroundClip="",h.clearCloneStyle="content-box"===c.style.backgroundClip,w.extend(h,{boxSizingReliable:function(){return t(),o},pixelBoxStyles:function(){return t(),s},pixelPosition:function(){return t(),i},reliableMarginLeft:function(){return t(),u},scrollboxSize:function(){return t(),a}}))}();function Fe(e,t,n){var r,i,o,a,s=e.style;return(n=n||$e(e))&&(""!==(a=n.getPropertyValue(t)||n[t])||w.contains(e.ownerDocument,e)||(a=w.style(e,t)),!h.pixelBoxStyles()&&We.test(a)&&Be.test(t)&&(r=s.width,i=s.minWidth,o=s.maxWidth,s.minWidth=s.maxWidth=s.width=a,a=n.width,s.width=r,s.minWidth=i,s.maxWidth=o)),void 0!==a?a+"":a}function _e(e,t){return{get:function(){if(!e())return(this.get=t).apply(this,arguments);delete this.get}}}var ze=/^(none|table(?!-c[ea]).+)/,Xe=/^--/,Ue={position:"absolute",visibility:"hidden",display:"block"},Ve={letterSpacing:"0",fontWeight:"400"},Ge=["Webkit","Moz","ms"],Ye=r.createElement("div").style;function Qe(e){if(e in Ye)return e;var t=e[0].toUpperCase()+e.slice(1),n=Ge.length;while(n--)if((e=Ge[n]+t)in Ye)return e}function Je(e){var t=w.cssProps[e];return t||(t=w.cssProps[e]=Qe(e)||e),t}function Ke(e,t,n){var r=ie.exec(t);return r?Math.max(0,r[2]-(n||0))+(r[3]||"px"):t}function Ze(e,t,n,r,i,o){var a="width"===t?1:0,s=0,u=0;if(n===(r?"border":"content"))return 0;for(;a<4;a+=2)"margin"===n&&(u+=w.css(e,n+oe[a],!0,i)),r?("content"===n&&(u-=w.css(e,"padding"+oe[a],!0,i)),"margin"!==n&&(u-=w.css(e,"border"+oe[a]+"Width",!0,i))):(u+=w.css(e,"padding"+oe[a],!0,i),"padding"!==n?u+=w.css(e,"border"+oe[a]+"Width",!0,i):s+=w.css(e,"border"+oe[a]+"Width",!0,i));return!r&&o>=0&&(u+=Math.max(0,Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-o-u-s-.5))),u}function et(e,t,n){var r=$e(e),i=Fe(e,t,r),o="border-box"===w.css(e,"boxSizing",!1,r),a=o;if(We.test(i)){if(!n)return i;i="auto"}return a=a&&(h.boxSizingReliable()||i===e.style[t]),("auto"===i||!parseFloat(i)&&"inline"===w.css(e,"display",!1,r))&&(i=e["offset"+t[0].toUpperCase()+t.slice(1)],a=!0),(i=parseFloat(i)||0)+Ze(e,t,n||(o?"border":"content"),a,r,i)+"px"}w.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=Fe(e,"opacity");return""===n?"1":n}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{},style:function(e,t,n,r){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var i,o,a,s=G(t),u=Xe.test(t),l=e.style;if(u||(t=Je(s)),a=w.cssHooks[t]||w.cssHooks[s],void 0===n)return a&&"get"in a&&void 0!==(i=a.get(e,!1,r))?i:l[t];"string"==(o=typeof n)&&(i=ie.exec(n))&&i[1]&&(n=ue(e,t,i),o="number"),null!=n&&n===n&&("number"===o&&(n+=i&&i[3]||(w.cssNumber[s]?"":"px")),h.clearCloneStyle||""!==n||0!==t.indexOf("background")||(l[t]="inherit"),a&&"set"in a&&void 0===(n=a.set(e,n,r))||(u?l.setProperty(t,n):l[t]=n))}},css:function(e,t,n,r){var i,o,a,s=G(t);return Xe.test(t)||(t=Je(s)),(a=w.cssHooks[t]||w.cssHooks[s])&&"get"in a&&(i=a.get(e,!0,n)),void 0===i&&(i=Fe(e,t,r)),"normal"===i&&t in Ve&&(i=Ve[t]),""===n||n?(o=parseFloat(i),!0===n||isFinite(o)?o||0:i):i}}),w.each(["height","width"],function(e,t){w.cssHooks[t]={get:function(e,n,r){if(n)return!ze.test(w.css(e,"display"))||e.getClientRects().length&&e.getBoundingClientRect().width?et(e,t,r):se(e,Ue,function(){return et(e,t,r)})},set:function(e,n,r){var i,o=$e(e),a="border-box"===w.css(e,"boxSizing",!1,o),s=r&&Ze(e,t,r,a,o);return a&&h.scrollboxSize()===o.position&&(s-=Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-parseFloat(o[t])-Ze(e,t,"border",!1,o)-.5)),s&&(i=ie.exec(n))&&"px"!==(i[3]||"px")&&(e.style[t]=n,n=w.css(e,t)),Ke(e,n,s)}}}),w.cssHooks.marginLeft=_e(h.reliableMarginLeft,function(e,t){if(t)return(parseFloat(Fe(e,"marginLeft"))||e.getBoundingClientRect().left-se(e,{marginLeft:0},function(){return e.getBoundingClientRect().left}))+"px"}),w.each({margin:"",padding:"",border:"Width"},function(e,t){w.cssHooks[e+t]={expand:function(n){for(var r=0,i={},o="string"==typeof n?n.split(" "):[n];r<4;r++)i[e+oe[r]+t]=o[r]||o[r-2]||o[0];return i}},"margin"!==e&&(w.cssHooks[e+t].set=Ke)}),w.fn.extend({css:function(e,t){return z(this,function(e,t,n){var r,i,o={},a=0;if(Array.isArray(t)){for(r=$e(e),i=t.length;a<i;a++)o[t[a]]=w.css(e,t[a],!1,r);return o}return void 0!==n?w.style(e,t,n):w.css(e,t)},e,t,arguments.length>1)}});function tt(e,t,n,r,i){return new tt.prototype.init(e,t,n,r,i)}w.Tween=tt,tt.prototype={constructor:tt,init:function(e,t,n,r,i,o){this.elem=e,this.prop=n,this.easing=i||w.easing._default,this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=o||(w.cssNumber[n]?"":"px")},cur:function(){var e=tt.propHooks[this.prop];return e&&e.get?e.get(this):tt.propHooks._default.get(this)},run:function(e){var t,n=tt.propHooks[this.prop];return this.options.duration?this.pos=t=w.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):this.pos=t=e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):tt.propHooks._default.set(this),this}},tt.prototype.init.prototype=tt.prototype,tt.propHooks={_default:{get:function(e){var t;return 1!==e.elem.nodeType||null!=e.elem[e.prop]&&null==e.elem.style[e.prop]?e.elem[e.prop]:(t=w.css(e.elem,e.prop,""))&&"auto"!==t?t:0},set:function(e){w.fx.step[e.prop]?w.fx.step[e.prop](e):1!==e.elem.nodeType||null==e.elem.style[w.cssProps[e.prop]]&&!w.cssHooks[e.prop]?e.elem[e.prop]=e.now:w.style(e.elem,e.prop,e.now+e.unit)}}},tt.propHooks.scrollTop=tt.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},w.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2},_default:"swing"},w.fx=tt.prototype.init,w.fx.step={};var nt,rt,it=/^(?:toggle|show|hide)$/,ot=/queueHooks$/;function at(){rt&&(!1===r.hidden&&e.requestAnimationFrame?e.requestAnimationFrame(at):e.setTimeout(at,w.fx.interval),w.fx.tick())}function st(){return e.setTimeout(function(){nt=void 0}),nt=Date.now()}function ut(e,t){var n,r=0,i={height:e};for(t=t?1:0;r<4;r+=2-t)i["margin"+(n=oe[r])]=i["padding"+n]=e;return t&&(i.opacity=i.width=e),i}function lt(e,t,n){for(var r,i=(pt.tweeners[t]||[]).concat(pt.tweeners["*"]),o=0,a=i.length;o<a;o++)if(r=i[o].call(n,t,e))return r}function ct(e,t,n){var r,i,o,a,s,u,l,c,f="width"in t||"height"in t,p=this,d={},h=e.style,g=e.nodeType&&ae(e),y=J.get(e,"fxshow");n.queue||(null==(a=w._queueHooks(e,"fx")).unqueued&&(a.unqueued=0,s=a.empty.fire,a.empty.fire=function(){a.unqueued||s()}),a.unqueued++,p.always(function(){p.always(function(){a.unqueued--,w.queue(e,"fx").length||a.empty.fire()})}));for(r in t)if(i=t[r],it.test(i)){if(delete t[r],o=o||"toggle"===i,i===(g?"hide":"show")){if("show"!==i||!y||void 0===y[r])continue;g=!0}d[r]=y&&y[r]||w.style(e,r)}if((u=!w.isEmptyObject(t))||!w.isEmptyObject(d)){f&&1===e.nodeType&&(n.overflow=[h.overflow,h.overflowX,h.overflowY],null==(l=y&&y.display)&&(l=J.get(e,"display")),"none"===(c=w.css(e,"display"))&&(l?c=l:(fe([e],!0),l=e.style.display||l,c=w.css(e,"display"),fe([e]))),("inline"===c||"inline-block"===c&&null!=l)&&"none"===w.css(e,"float")&&(u||(p.done(function(){h.display=l}),null==l&&(c=h.display,l="none"===c?"":c)),h.display="inline-block")),n.overflow&&(h.overflow="hidden",p.always(function(){h.overflow=n.overflow[0],h.overflowX=n.overflow[1],h.overflowY=n.overflow[2]})),u=!1;for(r in d)u||(y?"hidden"in y&&(g=y.hidden):y=J.access(e,"fxshow",{display:l}),o&&(y.hidden=!g),g&&fe([e],!0),p.done(function(){g||fe([e]),J.remove(e,"fxshow");for(r in d)w.style(e,r,d[r])})),u=lt(g?y[r]:0,r,p),r in y||(y[r]=u.start,g&&(u.end=u.start,u.start=0))}}function ft(e,t){var n,r,i,o,a;for(n in e)if(r=G(n),i=t[r],o=e[n],Array.isArray(o)&&(i=o[1],o=e[n]=o[0]),n!==r&&(e[r]=o,delete e[n]),(a=w.cssHooks[r])&&"expand"in a){o=a.expand(o),delete e[r];for(n in o)n in e||(e[n]=o[n],t[n]=i)}else t[r]=i}function pt(e,t,n){var r,i,o=0,a=pt.prefilters.length,s=w.Deferred().always(function(){delete u.elem}),u=function(){if(i)return!1;for(var t=nt||st(),n=Math.max(0,l.startTime+l.duration-t),r=1-(n/l.duration||0),o=0,a=l.tweens.length;o<a;o++)l.tweens[o].run(r);return s.notifyWith(e,[l,r,n]),r<1&&a?n:(a||s.notifyWith(e,[l,1,0]),s.resolveWith(e,[l]),!1)},l=s.promise({elem:e,props:w.extend({},t),opts:w.extend(!0,{specialEasing:{},easing:w.easing._default},n),originalProperties:t,originalOptions:n,startTime:nt||st(),duration:n.duration,tweens:[],createTween:function(t,n){var r=w.Tween(e,l.opts,t,n,l.opts.specialEasing[t]||l.opts.easing);return l.tweens.push(r),r},stop:function(t){var n=0,r=t?l.tweens.length:0;if(i)return this;for(i=!0;n<r;n++)l.tweens[n].run(1);return t?(s.notifyWith(e,[l,1,0]),s.resolveWith(e,[l,t])):s.rejectWith(e,[l,t]),this}}),c=l.props;for(ft(c,l.opts.specialEasing);o<a;o++)if(r=pt.prefilters[o].call(l,e,c,l.opts))return g(r.stop)&&(w._queueHooks(l.elem,l.opts.queue).stop=r.stop.bind(r)),r;return w.map(c,lt,l),g(l.opts.start)&&l.opts.start.call(e,l),l.progress(l.opts.progress).done(l.opts.done,l.opts.complete).fail(l.opts.fail).always(l.opts.always),w.fx.timer(w.extend(u,{elem:e,anim:l,queue:l.opts.queue})),l}w.Animation=w.extend(pt,{tweeners:{"*":[function(e,t){var n=this.createTween(e,t);return ue(n.elem,e,ie.exec(t),n),n}]},tweener:function(e,t){g(e)?(t=e,e=["*"]):e=e.match(M);for(var n,r=0,i=e.length;r<i;r++)n=e[r],pt.tweeners[n]=pt.tweeners[n]||[],pt.tweeners[n].unshift(t)},prefilters:[ct],prefilter:function(e,t){t?pt.prefilters.unshift(e):pt.prefilters.push(e)}}),w.speed=function(e,t,n){var r=e&&"object"==typeof e?w.extend({},e):{complete:n||!n&&t||g(e)&&e,duration:e,easing:n&&t||t&&!g(t)&&t};return w.fx.off?r.duration=0:"number"!=typeof r.duration&&(r.duration in w.fx.speeds?r.duration=w.fx.speeds[r.duration]:r.duration=w.fx.speeds._default),null!=r.queue&&!0!==r.queue||(r.queue="fx"),r.old=r.complete,r.complete=function(){g(r.old)&&r.old.call(this),r.queue&&w.dequeue(this,r.queue)},r},w.fn.extend({fadeTo:function(e,t,n,r){return this.filter(ae).css("opacity",0).show().end().animate({opacity:t},e,n,r)},animate:function(e,t,n,r){var i=w.isEmptyObject(e),o=w.speed(t,n,r),a=function(){var t=pt(this,w.extend({},e),o);(i||J.get(this,"finish"))&&t.stop(!0)};return a.finish=a,i||!1===o.queue?this.each(a):this.queue(o.queue,a)},stop:function(e,t,n){var r=function(e){var t=e.stop;delete e.stop,t(n)};return"string"!=typeof e&&(n=t,t=e,e=void 0),t&&!1!==e&&this.queue(e||"fx",[]),this.each(function(){var t=!0,i=null!=e&&e+"queueHooks",o=w.timers,a=J.get(this);if(i)a[i]&&a[i].stop&&r(a[i]);else for(i in a)a[i]&&a[i].stop&&ot.test(i)&&r(a[i]);for(i=o.length;i--;)o[i].elem!==this||null!=e&&o[i].queue!==e||(o[i].anim.stop(n),t=!1,o.splice(i,1));!t&&n||w.dequeue(this,e)})},finish:function(e){return!1!==e&&(e=e||"fx"),this.each(function(){var t,n=J.get(this),r=n[e+"queue"],i=n[e+"queueHooks"],o=w.timers,a=r?r.length:0;for(n.finish=!0,w.queue(this,e,[]),i&&i.stop&&i.stop.call(this,!0),t=o.length;t--;)o[t].elem===this&&o[t].queue===e&&(o[t].anim.stop(!0),o.splice(t,1));for(t=0;t<a;t++)r[t]&&r[t].finish&&r[t].finish.call(this);delete n.finish})}}),w.each(["toggle","show","hide"],function(e,t){var n=w.fn[t];w.fn[t]=function(e,r,i){return null==e||"boolean"==typeof e?n.apply(this,arguments):this.animate(ut(t,!0),e,r,i)}}),w.each({slideDown:ut("show"),slideUp:ut("hide"),slideToggle:ut("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(e,t){w.fn[e]=function(e,n,r){return this.animate(t,e,n,r)}}),w.timers=[],w.fx.tick=function(){var e,t=0,n=w.timers;for(nt=Date.now();t<n.length;t++)(e=n[t])()||n[t]!==e||n.splice(t--,1);n.length||w.fx.stop(),nt=void 0},w.fx.timer=function(e){w.timers.push(e),w.fx.start()},w.fx.interval=13,w.fx.start=function(){rt||(rt=!0,at())},w.fx.stop=function(){rt=null},w.fx.speeds={slow:600,fast:200,_default:400},w.fn.delay=function(t,n){return t=w.fx?w.fx.speeds[t]||t:t,n=n||"fx",this.queue(n,function(n,r){var i=e.setTimeout(n,t);r.stop=function(){e.clearTimeout(i)}})},function(){var e=r.createElement("input"),t=r.createElement("select").appendChild(r.createElement("option"));e.type="checkbox",h.checkOn=""!==e.value,h.optSelected=t.selected,(e=r.createElement("input")).value="t",e.type="radio",h.radioValue="t"===e.value}();var dt,ht=w.expr.attrHandle;w.fn.extend({attr:function(e,t){return z(this,w.attr,e,t,arguments.length>1)},removeAttr:function(e){return this.each(function(){w.removeAttr(this,e)})}}),w.extend({attr:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return"undefined"==typeof e.getAttribute?w.prop(e,t,n):(1===o&&w.isXMLDoc(e)||(i=w.attrHooks[t.toLowerCase()]||(w.expr.match.bool.test(t)?dt:void 0)),void 0!==n?null===n?void w.removeAttr(e,t):i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:(e.setAttribute(t,n+""),n):i&&"get"in i&&null!==(r=i.get(e,t))?r:null==(r=w.find.attr(e,t))?void 0:r)},attrHooks:{type:{set:function(e,t){if(!h.radioValue&&"radio"===t&&N(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},removeAttr:function(e,t){var n,r=0,i=t&&t.match(M);if(i&&1===e.nodeType)while(n=i[r++])e.removeAttribute(n)}}),dt={set:function(e,t,n){return!1===t?w.removeAttr(e,n):e.setAttribute(n,n),n}},w.each(w.expr.match.bool.source.match(/\w+/g),function(e,t){var n=ht[t]||w.find.attr;ht[t]=function(e,t,r){var i,o,a=t.toLowerCase();return r||(o=ht[a],ht[a]=i,i=null!=n(e,t,r)?a:null,ht[a]=o),i}});var gt=/^(?:input|select|textarea|button)$/i,yt=/^(?:a|area)$/i;w.fn.extend({prop:function(e,t){return z(this,w.prop,e,t,arguments.length>1)},removeProp:function(e){return this.each(function(){delete this[w.propFix[e]||e]})}}),w.extend({prop:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return 1===o&&w.isXMLDoc(e)||(t=w.propFix[t]||t,i=w.propHooks[t]),void 0!==n?i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:e[t]=n:i&&"get"in i&&null!==(r=i.get(e,t))?r:e[t]},propHooks:{tabIndex:{get:function(e){var t=w.find.attr(e,"tabindex");return t?parseInt(t,10):gt.test(e.nodeName)||yt.test(e.nodeName)&&e.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),h.optSelected||(w.propHooks.selected={get:function(e){var t=e.parentNode;return t&&t.parentNode&&t.parentNode.selectedIndex,null},set:function(e){var t=e.parentNode;t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex)}}),w.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){w.propFix[this.toLowerCase()]=this});function vt(e){return(e.match(M)||[]).join(" ")}function mt(e){return e.getAttribute&&e.getAttribute("class")||""}function xt(e){return Array.isArray(e)?e:"string"==typeof e?e.match(M)||[]:[]}w.fn.extend({addClass:function(e){var t,n,r,i,o,a,s,u=0;if(g(e))return this.each(function(t){w(this).addClass(e.call(this,t,mt(this)))});if((t=xt(e)).length)while(n=this[u++])if(i=mt(n),r=1===n.nodeType&&" "+vt(i)+" "){a=0;while(o=t[a++])r.indexOf(" "+o+" ")<0&&(r+=o+" ");i!==(s=vt(r))&&n.setAttribute("class",s)}return this},removeClass:function(e){var t,n,r,i,o,a,s,u=0;if(g(e))return this.each(function(t){w(this).removeClass(e.call(this,t,mt(this)))});if(!arguments.length)return this.attr("class","");if((t=xt(e)).length)while(n=this[u++])if(i=mt(n),r=1===n.nodeType&&" "+vt(i)+" "){a=0;while(o=t[a++])while(r.indexOf(" "+o+" ")>-1)r=r.replace(" "+o+" "," ");i!==(s=vt(r))&&n.setAttribute("class",s)}return this},toggleClass:function(e,t){var n=typeof e,r="string"===n||Array.isArray(e);return"boolean"==typeof t&&r?t?this.addClass(e):this.removeClass(e):g(e)?this.each(function(n){w(this).toggleClass(e.call(this,n,mt(this),t),t)}):this.each(function(){var t,i,o,a;if(r){i=0,o=w(this),a=xt(e);while(t=a[i++])o.hasClass(t)?o.removeClass(t):o.addClass(t)}else void 0!==e&&"boolean"!==n||((t=mt(this))&&J.set(this,"__className__",t),this.setAttribute&&this.setAttribute("class",t||!1===e?"":J.get(this,"__className__")||""))})},hasClass:function(e){var t,n,r=0;t=" "+e+" ";while(n=this[r++])if(1===n.nodeType&&(" "+vt(mt(n))+" ").indexOf(t)>-1)return!0;return!1}});var bt=/\r/g;w.fn.extend({val:function(e){var t,n,r,i=this[0];{if(arguments.length)return r=g(e),this.each(function(n){var i;1===this.nodeType&&(null==(i=r?e.call(this,n,w(this).val()):e)?i="":"number"==typeof i?i+="":Array.isArray(i)&&(i=w.map(i,function(e){return null==e?"":e+""})),(t=w.valHooks[this.type]||w.valHooks[this.nodeName.toLowerCase()])&&"set"in t&&void 0!==t.set(this,i,"value")||(this.value=i))});if(i)return(t=w.valHooks[i.type]||w.valHooks[i.nodeName.toLowerCase()])&&"get"in t&&void 0!==(n=t.get(i,"value"))?n:"string"==typeof(n=i.value)?n.replace(bt,""):null==n?"":n}}}),w.extend({valHooks:{option:{get:function(e){var t=w.find.attr(e,"value");return null!=t?t:vt(w.text(e))}},select:{get:function(e){var t,n,r,i=e.options,o=e.selectedIndex,a="select-one"===e.type,s=a?null:[],u=a?o+1:i.length;for(r=o<0?u:a?o:0;r<u;r++)if(((n=i[r]).selected||r===o)&&!n.disabled&&(!n.parentNode.disabled||!N(n.parentNode,"optgroup"))){if(t=w(n).val(),a)return t;s.push(t)}return s},set:function(e,t){var n,r,i=e.options,o=w.makeArray(t),a=i.length;while(a--)((r=i[a]).selected=w.inArray(w.valHooks.option.get(r),o)>-1)&&(n=!0);return n||(e.selectedIndex=-1),o}}}}),w.each(["radio","checkbox"],function(){w.valHooks[this]={set:function(e,t){if(Array.isArray(t))return e.checked=w.inArray(w(e).val(),t)>-1}},h.checkOn||(w.valHooks[this].get=function(e){return null===e.getAttribute("value")?"on":e.value})}),h.focusin="onfocusin"in e;var wt=/^(?:focusinfocus|focusoutblur)$/,Tt=function(e){e.stopPropagation()};w.extend(w.event,{trigger:function(t,n,i,o){var a,s,u,l,c,p,d,h,v=[i||r],m=f.call(t,"type")?t.type:t,x=f.call(t,"namespace")?t.namespace.split("."):[];if(s=h=u=i=i||r,3!==i.nodeType&&8!==i.nodeType&&!wt.test(m+w.event.triggered)&&(m.indexOf(".")>-1&&(m=(x=m.split(".")).shift(),x.sort()),c=m.indexOf(":")<0&&"on"+m,t=t[w.expando]?t:new w.Event(m,"object"==typeof t&&t),t.isTrigger=o?2:3,t.namespace=x.join("."),t.rnamespace=t.namespace?new RegExp("(^|\\.)"+x.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,t.result=void 0,t.target||(t.target=i),n=null==n?[t]:w.makeArray(n,[t]),d=w.event.special[m]||{},o||!d.trigger||!1!==d.trigger.apply(i,n))){if(!o&&!d.noBubble&&!y(i)){for(l=d.delegateType||m,wt.test(l+m)||(s=s.parentNode);s;s=s.parentNode)v.push(s),u=s;u===(i.ownerDocument||r)&&v.push(u.defaultView||u.parentWindow||e)}a=0;while((s=v[a++])&&!t.isPropagationStopped())h=s,t.type=a>1?l:d.bindType||m,(p=(J.get(s,"events")||{})[t.type]&&J.get(s,"handle"))&&p.apply(s,n),(p=c&&s[c])&&p.apply&&Y(s)&&(t.result=p.apply(s,n),!1===t.result&&t.preventDefault());return t.type=m,o||t.isDefaultPrevented()||d._default&&!1!==d._default.apply(v.pop(),n)||!Y(i)||c&&g(i[m])&&!y(i)&&((u=i[c])&&(i[c]=null),w.event.triggered=m,t.isPropagationStopped()&&h.addEventListener(m,Tt),i[m](),t.isPropagationStopped()&&h.removeEventListener(m,Tt),w.event.triggered=void 0,u&&(i[c]=u)),t.result}},simulate:function(e,t,n){var r=w.extend(new w.Event,n,{type:e,isSimulated:!0});w.event.trigger(r,null,t)}}),w.fn.extend({trigger:function(e,t){return this.each(function(){w.event.trigger(e,t,this)})},triggerHandler:function(e,t){var n=this[0];if(n)return w.event.trigger(e,t,n,!0)}}),h.focusin||w.each({focus:"focusin",blur:"focusout"},function(e,t){var n=function(e){w.event.simulate(t,e.target,w.event.fix(e))};w.event.special[t]={setup:function(){var r=this.ownerDocument||this,i=J.access(r,t);i||r.addEventListener(e,n,!0),J.access(r,t,(i||0)+1)},teardown:function(){var r=this.ownerDocument||this,i=J.access(r,t)-1;i?J.access(r,t,i):(r.removeEventListener(e,n,!0),J.remove(r,t))}}});var Ct=e.location,Et=Date.now(),kt=/\?/;w.parseXML=function(t){var n;if(!t||"string"!=typeof t)return null;try{n=(new e.DOMParser).parseFromString(t,"text/xml")}catch(e){n=void 0}return n&&!n.getElementsByTagName("parsererror").length||w.error("Invalid XML: "+t),n};var St=/\[\]$/,Dt=/\r?\n/g,Nt=/^(?:submit|button|image|reset|file)$/i,At=/^(?:input|select|textarea|keygen)/i;function jt(e,t,n,r){var i;if(Array.isArray(t))w.each(t,function(t,i){n||St.test(e)?r(e,i):jt(e+"["+("object"==typeof i&&null!=i?t:"")+"]",i,n,r)});else if(n||"object"!==x(t))r(e,t);else for(i in t)jt(e+"["+i+"]",t[i],n,r)}w.param=function(e,t){var n,r=[],i=function(e,t){var n=g(t)?t():t;r[r.length]=encodeURIComponent(e)+"="+encodeURIComponent(null==n?"":n)};if(Array.isArray(e)||e.jquery&&!w.isPlainObject(e))w.each(e,function(){i(this.name,this.value)});else for(n in e)jt(n,e[n],t,i);return r.join("&")},w.fn.extend({serialize:function(){return w.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var e=w.prop(this,"elements");return e?w.makeArray(e):this}).filter(function(){var e=this.type;return this.name&&!w(this).is(":disabled")&&At.test(this.nodeName)&&!Nt.test(e)&&(this.checked||!pe.test(e))}).map(function(e,t){var n=w(this).val();return null==n?null:Array.isArray(n)?w.map(n,function(e){return{name:t.name,value:e.replace(Dt,"\r\n")}}):{name:t.name,value:n.replace(Dt,"\r\n")}}).get()}});var qt=/%20/g,Lt=/#.*$/,Ht=/([?&])_=[^&]*/,Ot=/^(.*?):[ \t]*([^\r\n]*)$/gm,Pt=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Mt=/^(?:GET|HEAD)$/,Rt=/^\/\//,It={},Wt={},$t="*/".concat("*"),Bt=r.createElement("a");Bt.href=Ct.href;function Ft(e){return function(t,n){"string"!=typeof t&&(n=t,t="*");var r,i=0,o=t.toLowerCase().match(M)||[];if(g(n))while(r=o[i++])"+"===r[0]?(r=r.slice(1)||"*",(e[r]=e[r]||[]).unshift(n)):(e[r]=e[r]||[]).push(n)}}function _t(e,t,n,r){var i={},o=e===Wt;function a(s){var u;return i[s]=!0,w.each(e[s]||[],function(e,s){var l=s(t,n,r);return"string"!=typeof l||o||i[l]?o?!(u=l):void 0:(t.dataTypes.unshift(l),a(l),!1)}),u}return a(t.dataTypes[0])||!i["*"]&&a("*")}function zt(e,t){var n,r,i=w.ajaxSettings.flatOptions||{};for(n in t)void 0!==t[n]&&((i[n]?e:r||(r={}))[n]=t[n]);return r&&w.extend(!0,e,r),e}function Xt(e,t,n){var r,i,o,a,s=e.contents,u=e.dataTypes;while("*"===u[0])u.shift(),void 0===r&&(r=e.mimeType||t.getResponseHeader("Content-Type"));if(r)for(i in s)if(s[i]&&s[i].test(r)){u.unshift(i);break}if(u[0]in n)o=u[0];else{for(i in n){if(!u[0]||e.converters[i+" "+u[0]]){o=i;break}a||(a=i)}o=o||a}if(o)return o!==u[0]&&u.unshift(o),n[o]}function Ut(e,t,n,r){var i,o,a,s,u,l={},c=e.dataTypes.slice();if(c[1])for(a in e.converters)l[a.toLowerCase()]=e.converters[a];o=c.shift();while(o)if(e.responseFields[o]&&(n[e.responseFields[o]]=t),!u&&r&&e.dataFilter&&(t=e.dataFilter(t,e.dataType)),u=o,o=c.shift())if("*"===o)o=u;else if("*"!==u&&u!==o){if(!(a=l[u+" "+o]||l["* "+o]))for(i in l)if((s=i.split(" "))[1]===o&&(a=l[u+" "+s[0]]||l["* "+s[0]])){!0===a?a=l[i]:!0!==l[i]&&(o=s[0],c.unshift(s[1]));break}if(!0!==a)if(a&&e["throws"])t=a(t);else try{t=a(t)}catch(e){return{state:"parsererror",error:a?e:"No conversion from "+u+" to "+o}}}return{state:"success",data:t}}w.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:Ct.href,type:"GET",isLocal:Pt.test(Ct.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":$t,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":w.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?zt(zt(e,w.ajaxSettings),t):zt(w.ajaxSettings,e)},ajaxPrefilter:Ft(It),ajaxTransport:Ft(Wt),ajax:function(t,n){"object"==typeof t&&(n=t,t=void 0),n=n||{};var i,o,a,s,u,l,c,f,p,d,h=w.ajaxSetup({},n),g=h.context||h,y=h.context&&(g.nodeType||g.jquery)?w(g):w.event,v=w.Deferred(),m=w.Callbacks("once memory"),x=h.statusCode||{},b={},T={},C="canceled",E={readyState:0,getResponseHeader:function(e){var t;if(c){if(!s){s={};while(t=Ot.exec(a))s[t[1].toLowerCase()]=t[2]}t=s[e.toLowerCase()]}return null==t?null:t},getAllResponseHeaders:function(){return c?a:null},setRequestHeader:function(e,t){return null==c&&(e=T[e.toLowerCase()]=T[e.toLowerCase()]||e,b[e]=t),this},overrideMimeType:function(e){return null==c&&(h.mimeType=e),this},statusCode:function(e){var t;if(e)if(c)E.always(e[E.status]);else for(t in e)x[t]=[x[t],e[t]];return this},abort:function(e){var t=e||C;return i&&i.abort(t),k(0,t),this}};if(v.promise(E),h.url=((t||h.url||Ct.href)+"").replace(Rt,Ct.protocol+"//"),h.type=n.method||n.type||h.method||h.type,h.dataTypes=(h.dataType||"*").toLowerCase().match(M)||[""],null==h.crossDomain){l=r.createElement("a");try{l.href=h.url,l.href=l.href,h.crossDomain=Bt.protocol+"//"+Bt.host!=l.protocol+"//"+l.host}catch(e){h.crossDomain=!0}}if(h.data&&h.processData&&"string"!=typeof h.data&&(h.data=w.param(h.data,h.traditional)),_t(It,h,n,E),c)return E;(f=w.event&&h.global)&&0==w.active++&&w.event.trigger("ajaxStart"),h.type=h.type.toUpperCase(),h.hasContent=!Mt.test(h.type),o=h.url.replace(Lt,""),h.hasContent?h.data&&h.processData&&0===(h.contentType||"").indexOf("application/x-www-form-urlencoded")&&(h.data=h.data.replace(qt,"+")):(d=h.url.slice(o.length),h.data&&(h.processData||"string"==typeof h.data)&&(o+=(kt.test(o)?"&":"?")+h.data,delete h.data),!1===h.cache&&(o=o.replace(Ht,"$1"),d=(kt.test(o)?"&":"?")+"_="+Et+++d),h.url=o+d),h.ifModified&&(w.lastModified[o]&&E.setRequestHeader("If-Modified-Since",w.lastModified[o]),w.etag[o]&&E.setRequestHeader("If-None-Match",w.etag[o])),(h.data&&h.hasContent&&!1!==h.contentType||n.contentType)&&E.setRequestHeader("Content-Type",h.contentType),E.setRequestHeader("Accept",h.dataTypes[0]&&h.accepts[h.dataTypes[0]]?h.accepts[h.dataTypes[0]]+("*"!==h.dataTypes[0]?", "+$t+"; q=0.01":""):h.accepts["*"]);for(p in h.headers)E.setRequestHeader(p,h.headers[p]);if(h.beforeSend&&(!1===h.beforeSend.call(g,E,h)||c))return E.abort();if(C="abort",m.add(h.complete),E.done(h.success),E.fail(h.error),i=_t(Wt,h,n,E)){if(E.readyState=1,f&&y.trigger("ajaxSend",[E,h]),c)return E;h.async&&h.timeout>0&&(u=e.setTimeout(function(){E.abort("timeout")},h.timeout));try{c=!1,i.send(b,k)}catch(e){if(c)throw e;k(-1,e)}}else k(-1,"No Transport");function k(t,n,r,s){var l,p,d,b,T,C=n;c||(c=!0,u&&e.clearTimeout(u),i=void 0,a=s||"",E.readyState=t>0?4:0,l=t>=200&&t<300||304===t,r&&(b=Xt(h,E,r)),b=Ut(h,b,E,l),l?(h.ifModified&&((T=E.getResponseHeader("Last-Modified"))&&(w.lastModified[o]=T),(T=E.getResponseHeader("etag"))&&(w.etag[o]=T)),204===t||"HEAD"===h.type?C="nocontent":304===t?C="notmodified":(C=b.state,p=b.data,l=!(d=b.error))):(d=C,!t&&C||(C="error",t<0&&(t=0))),E.status=t,E.statusText=(n||C)+"",l?v.resolveWith(g,[p,C,E]):v.rejectWith(g,[E,C,d]),E.statusCode(x),x=void 0,f&&y.trigger(l?"ajaxSuccess":"ajaxError",[E,h,l?p:d]),m.fireWith(g,[E,C]),f&&(y.trigger("ajaxComplete",[E,h]),--w.active||w.event.trigger("ajaxStop")))}return E},getJSON:function(e,t,n){return w.get(e,t,n,"json")},getScript:function(e,t){return w.get(e,void 0,t,"script")}}),w.each(["get","post"],function(e,t){w[t]=function(e,n,r,i){return g(n)&&(i=i||r,r=n,n=void 0),w.ajax(w.extend({url:e,type:t,dataType:i,data:n,success:r},w.isPlainObject(e)&&e))}}),w._evalUrl=function(e){return w.ajax({url:e,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,"throws":!0})},w.fn.extend({wrapAll:function(e){var t;return this[0]&&(g(e)&&(e=e.call(this[0])),t=w(e,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstElementChild)e=e.firstElementChild;return e}).append(this)),this},wrapInner:function(e){return g(e)?this.each(function(t){w(this).wrapInner(e.call(this,t))}):this.each(function(){var t=w(this),n=t.contents();n.length?n.wrapAll(e):t.append(e)})},wrap:function(e){var t=g(e);return this.each(function(n){w(this).wrapAll(t?e.call(this,n):e)})},unwrap:function(e){return this.parent(e).not("body").each(function(){w(this).replaceWith(this.childNodes)}),this}}),w.expr.pseudos.hidden=function(e){return!w.expr.pseudos.visible(e)},w.expr.pseudos.visible=function(e){return!!(e.offsetWidth||e.offsetHeight||e.getClientRects().length)},w.ajaxSettings.xhr=function(){try{return new e.XMLHttpRequest}catch(e){}};var Vt={0:200,1223:204},Gt=w.ajaxSettings.xhr();h.cors=!!Gt&&"withCredentials"in Gt,h.ajax=Gt=!!Gt,w.ajaxTransport(function(t){var n,r;if(h.cors||Gt&&!t.crossDomain)return{send:function(i,o){var a,s=t.xhr();if(s.open(t.type,t.url,t.async,t.username,t.password),t.xhrFields)for(a in t.xhrFields)s[a]=t.xhrFields[a];t.mimeType&&s.overrideMimeType&&s.overrideMimeType(t.mimeType),t.crossDomain||i["X-Requested-With"]||(i["X-Requested-With"]="XMLHttpRequest");for(a in i)s.setRequestHeader(a,i[a]);n=function(e){return function(){n&&(n=r=s.onload=s.onerror=s.onabort=s.ontimeout=s.onreadystatechange=null,"abort"===e?s.abort():"error"===e?"number"!=typeof s.status?o(0,"error"):o(s.status,s.statusText):o(Vt[s.status]||s.status,s.statusText,"text"!==(s.responseType||"text")||"string"!=typeof s.responseText?{binary:s.response}:{text:s.responseText},s.getAllResponseHeaders()))}},s.onload=n(),r=s.onerror=s.ontimeout=n("error"),void 0!==s.onabort?s.onabort=r:s.onreadystatechange=function(){4===s.readyState&&e.setTimeout(function(){n&&r()})},n=n("abort");try{s.send(t.hasContent&&t.data||null)}catch(e){if(n)throw e}},abort:function(){n&&n()}}}),w.ajaxPrefilter(function(e){e.crossDomain&&(e.contents.script=!1)}),w.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(e){return w.globalEval(e),e}}}),w.ajaxPrefilter("script",function(e){void 0===e.cache&&(e.cache=!1),e.crossDomain&&(e.type="GET")}),w.ajaxTransport("script",function(e){if(e.crossDomain){var t,n;return{send:function(i,o){t=w("<script>").prop({charset:e.scriptCharset,src:e.url}).on("load error",n=function(e){t.remove(),n=null,e&&o("error"===e.type?404:200,e.type)}),r.head.appendChild(t[0])},abort:function(){n&&n()}}}});var Yt=[],Qt=/(=)\?(?=&|$)|\?\?/;w.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Yt.pop()||w.expando+"_"+Et++;return this[e]=!0,e}}),w.ajaxPrefilter("json jsonp",function(t,n,r){var i,o,a,s=!1!==t.jsonp&&(Qt.test(t.url)?"url":"string"==typeof t.data&&0===(t.contentType||"").indexOf("application/x-www-form-urlencoded")&&Qt.test(t.data)&&"data");if(s||"jsonp"===t.dataTypes[0])return i=t.jsonpCallback=g(t.jsonpCallback)?t.jsonpCallback():t.jsonpCallback,s?t[s]=t[s].replace(Qt,"$1"+i):!1!==t.jsonp&&(t.url+=(kt.test(t.url)?"&":"?")+t.jsonp+"="+i),t.converters["script json"]=function(){return a||w.error(i+" was not called"),a[0]},t.dataTypes[0]="json",o=e[i],e[i]=function(){a=arguments},r.always(function(){void 0===o?w(e).removeProp(i):e[i]=o,t[i]&&(t.jsonpCallback=n.jsonpCallback,Yt.push(i)),a&&g(o)&&o(a[0]),a=o=void 0}),"script"}),h.createHTMLDocument=function(){var e=r.implementation.createHTMLDocument("").body;return e.innerHTML="<form></form><form></form>",2===e.childNodes.length}(),w.parseHTML=function(e,t,n){if("string"!=typeof e)return[];"boolean"==typeof t&&(n=t,t=!1);var i,o,a;return t||(h.createHTMLDocument?((i=(t=r.implementation.createHTMLDocument("")).createElement("base")).href=r.location.href,t.head.appendChild(i)):t=r),o=A.exec(e),a=!n&&[],o?[t.createElement(o[1])]:(o=xe([e],t,a),a&&a.length&&w(a).remove(),w.merge([],o.childNodes))},w.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return s>-1&&(r=vt(e.slice(s)),e=e.slice(0,s)),g(t)?(n=t,t=void 0):t&&"object"==typeof t&&(i="POST"),a.length>0&&w.ajax({url:e,type:i||"GET",dataType:"html",data:t}).done(function(e){o=arguments,a.html(r?w("<div>").append(w.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},w.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){w.fn[t]=function(e){return this.on(t,e)}}),w.expr.pseudos.animated=function(e){return w.grep(w.timers,function(t){return e===t.elem}).length},w.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l,c=w.css(e,"position"),f=w(e),p={};"static"===c&&(e.style.position="relative"),s=f.offset(),o=w.css(e,"top"),u=w.css(e,"left"),(l=("absolute"===c||"fixed"===c)&&(o+u).indexOf("auto")>-1)?(a=(r=f.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),g(t)&&(t=t.call(e,n,w.extend({},s))),null!=t.top&&(p.top=t.top-s.top+a),null!=t.left&&(p.left=t.left-s.left+i),"using"in t?t.using.call(e,p):f.css(p)}},w.fn.extend({offset:function(e){if(arguments.length)return void 0===e?this:this.each(function(t){w.offset.setOffset(this,e,t)});var t,n,r=this[0];if(r)return r.getClientRects().length?(t=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:t.top+n.pageYOffset,left:t.left+n.pageXOffset}):{top:0,left:0}},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===w.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===w.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=w(e).offset()).top+=w.css(e,"borderTopWidth",!0),i.left+=w.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-w.css(r,"marginTop",!0),left:t.left-i.left-w.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===w.css(e,"position"))e=e.offsetParent;return e||be})}}),w.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(e,t){var n="pageYOffset"===t;w.fn[e]=function(r){return z(this,function(e,r,i){var o;if(y(e)?o=e:9===e.nodeType&&(o=e.defaultView),void 0===i)return o?o[t]:e[r];o?o.scrollTo(n?o.pageXOffset:i,n?i:o.pageYOffset):e[r]=i},e,r,arguments.length)}}),w.each(["top","left"],function(e,t){w.cssHooks[t]=_e(h.pixelPosition,function(e,n){if(n)return n=Fe(e,t),We.test(n)?w(e).position()[t]+"px":n})}),w.each({Height:"height",Width:"width"},function(e,t){w.each({padding:"inner"+e,content:t,"":"outer"+e},function(n,r){w.fn[r]=function(i,o){var a=arguments.length&&(n||"boolean"!=typeof i),s=n||(!0===i||!0===o?"margin":"border");return z(this,function(t,n,i){var o;return y(t)?0===r.indexOf("outer")?t["inner"+e]:t.document.documentElement["client"+e]:9===t.nodeType?(o=t.documentElement,Math.max(t.body["scroll"+e],o["scroll"+e],t.body["offset"+e],o["offset"+e],o["client"+e])):void 0===i?w.css(t,n,s):w.style(t,n,i,s)},t,a?i:void 0,a)}})}),w.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,t){w.fn[t]=function(e,n){return arguments.length>0?this.on(t,null,e,n):this.trigger(t)}}),w.fn.extend({hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),w.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)}}),w.proxy=function(e,t){var n,r,i;if("string"==typeof t&&(n=e[t],t=e,e=n),g(e))return r=o.call(arguments,2),i=function(){return e.apply(t||this,r.concat(o.call(arguments)))},i.guid=e.guid=e.guid||w.guid++,i},w.holdReady=function(e){e?w.readyWait++:w.ready(!0)},w.isArray=Array.isArray,w.parseJSON=JSON.parse,w.nodeName=N,w.isFunction=g,w.isWindow=y,w.camelCase=G,w.type=x,w.now=Date.now,w.isNumeric=function(e){var t=w.type(e);return("number"===t||"string"===t)&&!isNaN(e-parseFloat(e))},"function"==typeof define&&define.amd&&define("jquery",[],function(){return w});var Jt=e.jQuery,Kt=e.$;return w.noConflict=function(t){return e.$===w&&(e.$=Kt),t&&e.jQuery===w&&(e.jQuery=Jt),w},t||(e.jQuery=e.$=w),w});
diff --git a/dom/webgpu/tests/cts/checkout/standalone/third_party/normalize.min.css b/dom/webgpu/tests/cts/checkout/standalone/third_party/normalize.min.css
new file mode 100644
index 0000000000..8ba678f608
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/standalone/third_party/normalize.min.css
@@ -0,0 +1 @@
+/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */html{line-height:1.15;-webkit-text-size-adjust:100%}body{margin:0}main{display:block}h1{font-size:2em;margin:.67em 0}hr{box-sizing:content-box;height:0;overflow:visible}pre{font-family:monospace,monospace;font-size:1em}a{background-color:transparent}abbr[title]{border-bottom:none;text-decoration:underline;text-decoration:underline dotted}b,strong{font-weight:bolder}code,kbd,samp{font-family:monospace,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;line-height:1.15;margin:0}button,input{overflow:visible}button,select{text-transform:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{border-style:none;padding:0}[type=button]:-moz-focusring,[type=reset]:-moz-focusring,[type=submit]:-moz-focusring,button:-moz-focusring{outline:1px dotted ButtonText}fieldset{padding:.35em .75em .625em}legend{box-sizing:border-box;color:inherit;display:table;max-width:100%;padding:0;white-space:normal}progress{vertical-align:baseline}textarea{overflow:auto}[type=checkbox],[type=radio]{box-sizing:border-box;padding:0}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}details{display:block}summary{display:list-item}template{display:none}[hidden]{display:none}
diff --git a/dom/webgpu/tests/cts/checkout/standalone/webgpu-logo-notext.svg b/dom/webgpu/tests/cts/checkout/standalone/webgpu-logo-notext.svg
new file mode 100644
index 0000000000..8e8c2bf72c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/standalone/webgpu-logo-notext.svg
@@ -0,0 +1,34 @@
+<svg id="Logo" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="768" height="600" viewBox="0 0 768 600">
+ <defs>
+ <style>
+ .cls-1 {
+ fill: #005a9c;
+ }
+
+ .cls-1, .cls-2, .cls-3, .cls-4, .cls-5, .cls-6 {
+ fill-rule: evenodd;
+ }
+
+ .cls-2 {
+ fill: #0066b0;
+ }
+
+ .cls-3 {
+ fill: #0076cc;
+ }
+
+ .cls-4 {
+ fill: #0086e8;
+ }
+
+ .cls-5 {
+ fill: #0093ff;
+ }
+ </style>
+ </defs>
+ <path id="Triangle_1" data-name="Triangle 1" class="cls-1" d="M265.5,504L24.745,87h481.51Z"/>
+ <path id="Triangle_2" data-name="Triangle 2" class="cls-2" d="M506.5,87L386,295H627Z"/>
+ <path id="Triangle_3" data-name="Triangle 3" class="cls-3" d="M506.5,503L386,295H627Z"/>
+ <path id="Triangle_4" data-name="Triangle 4" class="cls-4" d="M626.5,296L566,192H687Z"/>
+ <path id="Triangle_5" data-name="Triangle 5" class="cls-5" d="M626.5,88L566,192H687Z"/>
+</svg>
diff --git a/dom/webgpu/tests/cts/checkout/tools/checklist b/dom/webgpu/tests/cts/checkout/tools/checklist
new file mode 100755
index 0000000000..8aace4f387
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/checklist
@@ -0,0 +1,11 @@
+#!/usr/bin/env node
+
+// Takes a list of queries and checks that:
+// - Every query matches something in the repository
+// - Every case in the repository matches exactly one query
+// This is used to ensure that tracking spreadsheet is complete (not missing any tests)
+// and every query in it is valid (e.g. renames have been applied, and new tests added
+// to the spreadsheet have also been added to the CTS).
+
+require('../src/common/tools/setup-ts-in-node.js');
+require('../src/common/tools/checklist.ts');
diff --git a/dom/webgpu/tests/cts/checkout/tools/dev_server b/dom/webgpu/tests/cts/checkout/tools/dev_server
new file mode 100755
index 0000000000..d400d79c19
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/dev_server
@@ -0,0 +1,4 @@
+#!/usr/bin/env node
+
+require('../src/common/tools/setup-ts-in-node.js');
+require('../src/common/tools/dev_server.ts');
diff --git a/dom/webgpu/tests/cts/checkout/tools/eslint-plugin-gpuweb-cts/index.js b/dom/webgpu/tests/cts/checkout/tools/eslint-plugin-gpuweb-cts/index.js
new file mode 100644
index 0000000000..1f5bb211aa
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/eslint-plugin-gpuweb-cts/index.js
@@ -0,0 +1,6 @@
+module.exports = {
+ rules: {
+ 'string-trailing-space': require('./trailing-space-anywhere'),
+ 'string-tabs': require('./tabs-anywhere'),
+ },
+};
diff --git a/dom/webgpu/tests/cts/checkout/tools/eslint-plugin-gpuweb-cts/package.json b/dom/webgpu/tests/cts/checkout/tools/eslint-plugin-gpuweb-cts/package.json
new file mode 100644
index 0000000000..5685ebfa3c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/eslint-plugin-gpuweb-cts/package.json
@@ -0,0 +1,8 @@
+{
+ "name": "eslint-plugin-gpuweb-cts",
+ "version": "0.0.0",
+ "author": "WebGPU CTS Contributors",
+ "private": true,
+ "license": "BSD-3-Clause",
+ "main": "index.js"
+}
diff --git a/dom/webgpu/tests/cts/checkout/tools/eslint-plugin-gpuweb-cts/tabs-anywhere.js b/dom/webgpu/tests/cts/checkout/tools/eslint-plugin-gpuweb-cts/tabs-anywhere.js
new file mode 100644
index 0000000000..82238f8615
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/eslint-plugin-gpuweb-cts/tabs-anywhere.js
@@ -0,0 +1,29 @@
+module.exports = {
+ meta: {
+ type: 'suggestion',
+ docs: {
+ description: 'Indentation tabs are not allowed, even in multiline strings, due to WPT lint rules. This rule simply disallows tabs anywhere.',
+ },
+ schema: [],
+ },
+ create: context => {
+ const sourceCode = context.getSourceCode();
+
+ return {
+ Program: node => {
+ for (let lineIdx = 0; lineIdx < sourceCode.lines.length; ++lineIdx) {
+ const line = sourceCode.lines[lineIdx];
+ const matches = line.matchAll(/\t/g);
+ for (const match of matches) {
+ context.report({
+ node,
+ loc: { line: lineIdx + 1, column: match.index },
+ message: 'Tabs not allowed.',
+ // fixer is hard to implement, so not implemented.
+ });
+ }
+ }
+ },
+ };
+ },
+};
diff --git a/dom/webgpu/tests/cts/checkout/tools/eslint-plugin-gpuweb-cts/trailing-space-anywhere.js b/dom/webgpu/tests/cts/checkout/tools/eslint-plugin-gpuweb-cts/trailing-space-anywhere.js
new file mode 100644
index 0000000000..811b379ff6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/eslint-plugin-gpuweb-cts/trailing-space-anywhere.js
@@ -0,0 +1,29 @@
+module.exports = {
+ meta: {
+ type: 'suggestion',
+ docs: {
+ description: 'Trailing spaces are not allowed, even in multiline strings, due to WPT lint rules.',
+ },
+ schema: [],
+ },
+ create: context => {
+ const sourceCode = context.getSourceCode();
+
+ return {
+ Program: node => {
+ for (let lineIdx = 0; lineIdx < sourceCode.lines.length; ++lineIdx) {
+ const line = sourceCode.lines[lineIdx];
+ const match = /\s+$/.exec(line);
+ if (match) {
+ context.report({
+ node,
+ loc: { line: lineIdx + 1, column: match.index },
+ message: 'Trailing spaces not allowed.',
+ // fixer is hard to implement, so not implemented.
+ });
+ }
+ }
+ },
+ };
+ },
+};
diff --git a/dom/webgpu/tests/cts/checkout/tools/gen_cache b/dom/webgpu/tests/cts/checkout/tools/gen_cache
new file mode 100755
index 0000000000..fd7bf52c2f
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/gen_cache
@@ -0,0 +1,4 @@
+#!/usr/bin/env node
+
+require('../src/common/tools/setup-ts-in-node.js');
+require('../src/common/tools/gen_cache.ts');
diff --git a/dom/webgpu/tests/cts/checkout/tools/gen_listings b/dom/webgpu/tests/cts/checkout/tools/gen_listings
new file mode 100755
index 0000000000..6c25622423
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/gen_listings
@@ -0,0 +1,7 @@
+#!/usr/bin/env node
+
+// Crawl a suite directory (e.g. src/webgpu/) to generate a listing.js containing
+// the listing of test files in the suite.
+
+require('../src/common/tools/setup-ts-in-node.js');
+require('../src/common/tools/gen_listings.ts');
diff --git a/dom/webgpu/tests/cts/checkout/tools/gen_version b/dom/webgpu/tests/cts/checkout/tools/gen_version
new file mode 100755
index 0000000000..53128ca2a0
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/gen_version
@@ -0,0 +1,33 @@
+#!/usr/bin/env node
+
+// Get the current git hash, and save (overwrite) it into out/framework/version.js
+// so it can be read when running inside the browser.
+
+/* eslint-disable no-console */
+
+require('../src/common/tools/setup-ts-in-node.js');
+const fs = require('fs');
+
+const myself = 'tools/gen_version';
+if (!fs.existsSync(myself)) {
+ console.error('Must be run from repository root');
+ process.exit(1);
+}
+
+const { version } = require('../src/common/tools/version.ts');
+
+fs.mkdirSync('./out/common/internal', { recursive: true });
+// Overwrite the version.js generated by TypeScript compilation.
+fs.writeFileSync(
+ './out/common/internal/version.js',
+ `\
+// AUTO-GENERATED - DO NOT EDIT. See ${myself}.
+
+export const version = '${version}';
+`
+);
+
+// Since the generated version.js was overwritten, its source map is no longer relevant.
+try {
+ fs.unlinkSync('./out/common/internal/version.js.map');
+} catch (ex) { }
diff --git a/dom/webgpu/tests/cts/checkout/tools/gen_wpt_cfg_chunked2sec.json b/dom/webgpu/tests/cts/checkout/tools/gen_wpt_cfg_chunked2sec.json
new file mode 100644
index 0000000000..1d13e85c58
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/gen_wpt_cfg_chunked2sec.json
@@ -0,0 +1,6 @@
+{
+ "suite": "webgpu",
+ "out": "../out-wpt/cts-chunked2sec.https.html",
+ "template": "../src/common/templates/cts.https.html",
+ "maxChunkTimeMS": 2000
+}
diff --git a/dom/webgpu/tests/cts/checkout/tools/gen_wpt_cfg_unchunked.json b/dom/webgpu/tests/cts/checkout/tools/gen_wpt_cfg_unchunked.json
new file mode 100644
index 0000000000..ffe06d5633
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/gen_wpt_cfg_unchunked.json
@@ -0,0 +1,5 @@
+{
+ "suite": "webgpu",
+ "out": "../out-wpt/cts.https.html",
+ "template": "../src/common/templates/cts.https.html"
+}
diff --git a/dom/webgpu/tests/cts/checkout/tools/gen_wpt_cts_html b/dom/webgpu/tests/cts/checkout/tools/gen_wpt_cts_html
new file mode 100755
index 0000000000..07f1f465c7
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/gen_wpt_cts_html
@@ -0,0 +1,39 @@
+#!/usr/bin/env node
+
+// Generate a top-level cts.https.html file for WPT.
+//
+// In the default invocation (used by grunt), just generates a cts.https.html with one "variant"
+// per test spec file (.spec.ts).
+//
+// In the advanced invocation, generate a list of variants, which are broken down as much as needed
+// to accommodate a provided list of suppressions, and no further. This reduces the total runtime of
+// the test suite by not generating an entire page load for every single test case.
+// The resulting cts.https.html can be checked in and used to run tests within browser harnesses.
+//
+// For example, for the following 9 cases:
+//
+// webgpu:a/foo:foo1={"x":1}
+// webgpu:a/foo:foo1={"x":2}
+// webgpu:a/foo:foo2={"x":1}
+// webgpu:a/foo:foo2={"x":2}
+// webgpu:a/bar:bar1={"x":1}
+// webgpu:a/bar:bar1={"x":2}
+// webgpu:a/bar:bar1={"x":3}
+// webgpu:a/bar:bar2={"x":1}
+// webgpu:a/bar:bar2={"x":2}
+//
+// and the following suppressions:
+//
+// [ Win ] ?q=webgpu:a/bar:bar1={"x":1} [ Failure ]
+// [ Mac ] ?q=webgpu:a/bar:bar1={"x":3} [ Failure ]
+//
+// the following list of 5 variants gives enough granularity to suppress only the failing cases:
+//
+// ?q=webgpu:a/foo:
+// ?q=webgpu:a/bar:bar1={"x":1} <- [ Win ]
+// ?q=webgpu:a/bar:bar1={"x":2}
+// ?q=webgpu:a/bar:bar1={"x":3} <- [ Mac ]
+// ?q=webgpu:a/bar:bar2~
+
+require('../src/common/tools/setup-ts-in-node.js');
+require('../src/common/tools/gen_wpt_cts_html.ts');
diff --git a/dom/webgpu/tests/cts/checkout/tools/merge_listing_times b/dom/webgpu/tests/cts/checkout/tools/merge_listing_times
new file mode 100755
index 0000000000..a9bcd2e71a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/merge_listing_times
@@ -0,0 +1,9 @@
+#!/usr/bin/env node
+
+// See `docs/adding_timing_metadata.md` for an explanation of listing times, and
+// a walkthrough on adding entries for new tests.
+
+require('../src/common/tools/setup-ts-in-node.js');
+
+// See the help message in this file for info on how to use the tool.
+require('../src/common/tools/merge_listing_times.ts');
diff --git a/dom/webgpu/tests/cts/checkout/tools/run_deno b/dom/webgpu/tests/cts/checkout/tools/run_deno
new file mode 100755
index 0000000000..8cc89a475c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/run_deno
@@ -0,0 +1,3 @@
+#!/usr/bin/env -S deno run --unstable --allow-read --allow-write --allow-env --allow-net=deno.land --no-check
+
+import '../out/common/runtime/cmdline.js'; \ No newline at end of file
diff --git a/dom/webgpu/tests/cts/checkout/tools/run_node b/dom/webgpu/tests/cts/checkout/tools/run_node
new file mode 100755
index 0000000000..b71ec9f134
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/run_node
@@ -0,0 +1,6 @@
+#!/usr/bin/env node
+
+// Run test suites under node.
+
+require('../src/common/tools/setup-ts-in-node.js');
+require('../src/common/runtime/cmdline.ts');
diff --git a/dom/webgpu/tests/cts/checkout/tools/run_wpt_ref_tests b/dom/webgpu/tests/cts/checkout/tools/run_wpt_ref_tests
new file mode 100644
index 0000000000..79fd1b1b7c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/run_wpt_ref_tests
@@ -0,0 +1,4 @@
+#!/usr/bin/env node
+
+require('../src/common/tools/setup-ts-in-node.js');
+require('../src/common/tools/run_wpt_ref_tests.ts');
diff --git a/dom/webgpu/tests/cts/checkout/tools/validate b/dom/webgpu/tests/cts/checkout/tools/validate
new file mode 100755
index 0000000000..03b3a61bb1
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/validate
@@ -0,0 +1,6 @@
+#!/usr/bin/env node
+
+// Validate several properties of test files and the tests in them.
+
+require('../src/common/tools/setup-ts-in-node.js');
+require('../src/common/tools/validate.ts');
diff --git a/dom/webgpu/tests/cts/checkout/tools/websocket-logger/.gitignore b/dom/webgpu/tests/cts/checkout/tools/websocket-logger/.gitignore
new file mode 100644
index 0000000000..1c0f45a79c
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/websocket-logger/.gitignore
@@ -0,0 +1 @@
+/wslog-*.txt
diff --git a/dom/webgpu/tests/cts/checkout/tools/websocket-logger/README.md b/dom/webgpu/tests/cts/checkout/tools/websocket-logger/README.md
new file mode 100644
index 0000000000..1328f12e97
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/websocket-logger/README.md
@@ -0,0 +1,9 @@
+This simple utility receives messages via a WebSocket and writes them out to both the command line
+and a file called `wslog-TIMESTAMP.txt` in the working directory.
+
+It can be used to receive logs from CTS in a way that's resistant to test crashes and totally
+independent of which runtime is being used (e.g. standalone, WPT, Node).
+It's used in particular to capture timing results for predefining "chunking" of the CTS for WPT.
+
+To set up, use `npm ci`.
+To launch, use `npm start`.
diff --git a/dom/webgpu/tests/cts/checkout/tools/websocket-logger/main.js b/dom/webgpu/tests/cts/checkout/tools/websocket-logger/main.js
new file mode 100755
index 0000000000..4a5a89e762
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/websocket-logger/main.js
@@ -0,0 +1,25 @@
+#!/usr/bin/env node
+
+import fs from 'fs/promises';
+import { WebSocketServer } from 'ws';
+
+const wss = new WebSocketServer({ port: 59497 });
+
+const timestamp = new Date().toISOString().slice(0, 19).replace(/[:]/g, '-')
+const filename = `wslog-${timestamp}.txt`
+const f = await fs.open(filename, 'w');
+console.log(`Writing to ${filename}`);
+console.log('Ctrl-C to stop');
+
+process.on('SIGINT', () => {
+ console.log(`\nWritten to ${filename}`);
+ process.exit();
+});
+
+wss.on('connection', async ws => {
+ ws.on('message', data => {
+ const s = data.toString();
+ f.write(s + '\n');
+ console.log(s);
+ });
+});
diff --git a/dom/webgpu/tests/cts/checkout/tools/websocket-logger/package-lock.json b/dom/webgpu/tests/cts/checkout/tools/websocket-logger/package-lock.json
new file mode 100644
index 0000000000..b43ae34804
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/websocket-logger/package-lock.json
@@ -0,0 +1,39 @@
+{
+ "name": "websocket-logger",
+ "version": "0.0.0",
+ "lockfileVersion": 3,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "websocket-logger",
+ "version": "0.0.0",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "ws": "^8.13.0"
+ },
+ "bin": {
+ "websocket-logger": "main.js"
+ }
+ },
+ "node_modules/ws": {
+ "version": "8.13.0",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-8.13.0.tgz",
+ "integrity": "sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==",
+ "engines": {
+ "node": ">=10.0.0"
+ },
+ "peerDependencies": {
+ "bufferutil": "^4.0.1",
+ "utf-8-validate": ">=5.0.2"
+ },
+ "peerDependenciesMeta": {
+ "bufferutil": {
+ "optional": true
+ },
+ "utf-8-validate": {
+ "optional": true
+ }
+ }
+ }
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/tools/websocket-logger/package.json b/dom/webgpu/tests/cts/checkout/tools/websocket-logger/package.json
new file mode 100644
index 0000000000..66585968bd
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tools/websocket-logger/package.json
@@ -0,0 +1,14 @@
+{
+ "name": "websocket-logger",
+ "version": "0.0.0",
+ "author": "WebGPU CTS Contributors",
+ "private": true,
+ "license": "BSD-3-Clause",
+ "type": "module",
+ "scripts": {
+ "start": "node main.js"
+ },
+ "dependencies": {
+ "ws": "^8.13.0"
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/tsconfig.json b/dom/webgpu/tests/cts/checkout/tsconfig.json
new file mode 100644
index 0000000000..6d3b0d98bf
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/tsconfig.json
@@ -0,0 +1,64 @@
+{
+ "extends": "./node_modules/gts/tsconfig-google.json",
+ "compilerOptions": {
+ "lib": ["dom", "es2020"],
+ "module": "esnext",
+ /* Output options */
+ "noEmit": true,
+ /* Strict type-checking options */
+ "allowJs": true,
+ "checkJs": false,
+ "strict": true,
+ /* tsc lint options */
+ "allowUnusedLabels": false,
+ "noImplicitOverride": true,
+ "noImplicitReturns": true,
+ /* These should be caught by eslint instead */
+ "noFallthroughCasesInSwitch": false,
+ "noUnusedLocals": false,
+ "noUnusedParameters": false,
+ "allowUnreachableCode": true,
+ /* Compiler warnings we intentionally don't use */
+ // - Would be nice, but produces lots of errors that probably aren't worth fixing
+ "noUncheckedIndexedAccess": false,
+ // - We could make our code pass this, but it doesn't seem to provide much value to us
+ "noPropertyAccessFromIndexSignature": false,
+ // - Doesn't work with @webgpu/types right now, also has annoying interactions and limited value
+ "exactOptionalPropertyTypes": false,
+ /* Module Options */
+ "moduleResolution": "node",
+ "esModuleInterop": false,
+ "isolatedModules": true,
+ "skipLibCheck": true,
+ "target": "es2020"
+ // @tsconfig/strictest is a useful reference for new options:
+ // https://github.com/tsconfig/bases/blob/main/bases/strictest.json
+ },
+ "include": [
+ "src/**/*.ts",
+ "src/external/**/*.js",
+ ],
+ "typedocOptions": {
+ "entryPointStrategy": "expand",
+ "entryPoints": [
+ "src/common/framework/",
+ "src/common/util/",
+ "src/webgpu/",
+ ],
+ "exclude": [
+ "**/*.spec.ts",
+ "**/*.html.ts",
+ "src/*/listing.ts",
+ "src/webgpu/util/device_pool.ts",
+ ],
+ "excludeInternal": true,
+ "excludeProtected": true,
+ "excludePrivate": true,
+ "validation": {
+ "invalidLink": true,
+ "notExported": false,
+ },
+ "readme": "./docs/helper_index.txt",
+ "out": "docs/tsdoc/"
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/w3c.json b/dom/webgpu/tests/cts/checkout/w3c.json
new file mode 100644
index 0000000000..2c0b3a7a90
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/w3c.json
@@ -0,0 +1,5 @@
+{
+ "group": [96877, 125519],
+ "contacts": ["tidoust", "Kangz", "grorg"],
+ "repo-type": ["tests"]
+} \ No newline at end of file
diff --git a/dom/webgpu/tests/cts/checkout_commit.txt b/dom/webgpu/tests/cts/checkout_commit.txt
new file mode 100644
index 0000000000..1f42bc3256
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout_commit.txt
@@ -0,0 +1 @@
+41f89e77b67e6b66cb017be4e00235a0a9429ca7
diff --git a/dom/webgpu/tests/cts/vendor/Cargo.lock b/dom/webgpu/tests/cts/vendor/Cargo.lock
new file mode 100644
index 0000000000..ea51837ca5
--- /dev/null
+++ b/dom/webgpu/tests/cts/vendor/Cargo.lock
@@ -0,0 +1,889 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "addr2line"
+version = "0.19.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97"
+dependencies = [
+ "gimli",
+]
+
+[[package]]
+name = "adler"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
+
+[[package]]
+name = "ahash"
+version = "0.7.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47"
+dependencies = [
+ "getrandom",
+ "once_cell",
+ "version_check",
+]
+
+[[package]]
+name = "aho-corasick"
+version = "0.7.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "atty"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
+dependencies = [
+ "hermit-abi 0.1.19",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "backtrace"
+version = "0.3.67"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca"
+dependencies = [
+ "addr2line",
+ "cc",
+ "cfg-if",
+ "libc",
+ "miniz_oxide",
+ "object",
+ "rustc-demangle",
+]
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "cc"
+version = "1.0.79"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "clap"
+version = "4.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec0b0588d44d4d63a87dbd75c136c166bbfd9a86a31cb89e09906521c7d3f5e3"
+dependencies = [
+ "bitflags",
+ "clap_derive",
+ "clap_lex",
+ "is-terminal",
+ "once_cell",
+ "strsim",
+ "termcolor",
+]
+
+[[package]]
+name = "clap_derive"
+version = "4.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "684a277d672e91966334af371f1a7b5833f9aa00b07c84e92fbce95e00208ce8"
+dependencies = [
+ "heck",
+ "proc-macro-error",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "clap_lex"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "350b9cf31731f9957399229e9b2adc51eeabdfbe9d71d9a0552275fd12710d09"
+dependencies = [
+ "os_str_bytes",
+]
+
+[[package]]
+name = "crossbeam"
+version = "0.8.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c"
+dependencies = [
+ "cfg-if",
+ "crossbeam-channel",
+ "crossbeam-deque",
+ "crossbeam-epoch",
+ "crossbeam-queue",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-channel"
+version = "0.5.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf2b3e8478797446514c91ef04bafcb59faba183e621ad488df88983cc14128c"
+dependencies = [
+ "cfg-if",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-deque"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef"
+dependencies = [
+ "cfg-if",
+ "crossbeam-epoch",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-epoch"
+version = "0.9.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695"
+dependencies = [
+ "autocfg",
+ "cfg-if",
+ "crossbeam-utils",
+ "memoffset",
+ "scopeguard",
+]
+
+[[package]]
+name = "crossbeam-queue"
+version = "0.3.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add"
+dependencies = [
+ "cfg-if",
+ "crossbeam-utils",
+]
+
+[[package]]
+name = "crossbeam-utils"
+version = "0.8.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "dircpy"
+version = "0.3.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "10b6622b9d0dc20c70e74ff24c56493278d7d9299ac8729deb923703616e5a7e"
+dependencies = [
+ "jwalk",
+ "log",
+ "walkdir",
+]
+
+[[package]]
+name = "dunce"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0bd4b30a6560bbd9b4620f4de34c3f14f60848e58a9b7216801afcb4c7b31c3c"
+
+[[package]]
+name = "either"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7fcaabb2fef8c910e7f4c7ce9f67a1283a1715879a7c230ca9d6d1ae31f16d91"
+
+[[package]]
+name = "env_logger"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0"
+dependencies = [
+ "humantime",
+ "is-terminal",
+ "log",
+ "regex",
+ "termcolor",
+]
+
+[[package]]
+name = "errno"
+version = "0.2.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1"
+dependencies = [
+ "errno-dragonfly",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "errno-dragonfly"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf"
+dependencies = [
+ "cc",
+ "libc",
+]
+
+[[package]]
+name = "format"
+version = "0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "901e1b63ac63f86d9fb836b1ae8b43e5a9f2338975e9de24f36a1af4acf23ac8"
+dependencies = [
+ "format-core",
+ "format-macro",
+]
+
+[[package]]
+name = "format-core"
+version = "0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e66b70d6700c47044b73e43dd0649e0d6bfef18f87919c23785cdbd1aaa9d3f5"
+
+[[package]]
+name = "format-macro"
+version = "0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8f9faac4e57f217563dd1fd58628a0c526aa37a681ffac76ca80d64907370a4c"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.2.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "wasi",
+]
+
+[[package]]
+name = "gimli"
+version = "0.27.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4"
+
+[[package]]
+name = "hashbrown"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
+dependencies = [
+ "ahash",
+]
+
+[[package]]
+name = "heck"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286"
+
+[[package]]
+name = "humantime"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
+
+[[package]]
+name = "io-lifetimes"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1abeb7a0dd0f8181267ff8adc397075586500b81b28a73e8a0208b00fc170fb3"
+dependencies = [
+ "libc",
+ "windows-sys",
+]
+
+[[package]]
+name = "is-terminal"
+version = "0.4.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "21b6b32576413a8e69b90e952e4a026476040d81017b80445deda5f2d3921857"
+dependencies = [
+ "hermit-abi 0.3.1",
+ "io-lifetimes",
+ "rustix",
+ "windows-sys",
+]
+
+[[package]]
+name = "is_ci"
+version = "1.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "616cde7c720bb2bb5824a224687d8f77bfd38922027f01d825cd7453be5099fb"
+
+[[package]]
+name = "jwalk"
+version = "0.6.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5dbcda57db8b6dc067e589628b7348639014e793d9e8137d8cf215e8b133a0bd"
+dependencies = [
+ "crossbeam",
+ "rayon",
+]
+
+[[package]]
+name = "lets_find_up"
+version = "0.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b91a14fb0b4300e025486cc8bc096c7173c2c615ce8f9c6da7829a4af3f5afbd"
+
+[[package]]
+name = "libc"
+version = "0.2.139"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79"
+
+[[package]]
+name = "linux-raw-sys"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4"
+
+[[package]]
+name = "log"
+version = "0.4.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "memchr"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
+
+[[package]]
+name = "memoffset"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1"
+dependencies = [
+ "autocfg",
+]
+
+[[package]]
+name = "miette"
+version = "5.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4afd9b301defa984bbdbe112b4763e093ed191750a0d914a78c1106b2d0fe703"
+dependencies = [
+ "atty",
+ "backtrace",
+ "miette-derive",
+ "once_cell",
+ "owo-colors",
+ "supports-color",
+ "supports-hyperlinks",
+ "supports-unicode",
+ "terminal_size",
+ "textwrap",
+ "thiserror",
+ "unicode-width",
+]
+
+[[package]]
+name = "miette-derive"
+version = "5.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "97c2401ab7ac5282ca5c8b518a87635b1a93762b0b90b9990c509888eeccba29"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "miniz_oxide"
+version = "0.6.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa"
+dependencies = [
+ "adler",
+]
+
+[[package]]
+name = "num_cpus"
+version = "1.15.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b"
+dependencies = [
+ "hermit-abi 0.2.6",
+ "libc",
+]
+
+[[package]]
+name = "object"
+version = "0.30.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ea86265d3d3dcb6a27fc51bd29a4bf387fae9d2986b823079d4986af253eb439"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.17.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3"
+
+[[package]]
+name = "os_str_bytes"
+version = "6.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee"
+
+[[package]]
+name = "owo-colors"
+version = "3.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f"
+
+[[package]]
+name = "proc-macro-error"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
+dependencies = [
+ "proc-macro-error-attr",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro-error-attr"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.51"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "rayon"
+version = "1.6.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7"
+dependencies = [
+ "either",
+ "rayon-core",
+]
+
+[[package]]
+name = "rayon-core"
+version = "1.10.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "356a0625f1954f730c0201cdab48611198dc6ce21f4acff55089b5a78e6e835b"
+dependencies = [
+ "crossbeam-channel",
+ "crossbeam-deque",
+ "crossbeam-utils",
+ "num_cpus",
+]
+
+[[package]]
+name = "regex"
+version = "1.7.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.6.28"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848"
+
+[[package]]
+name = "rustc-demangle"
+version = "0.1.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342"
+
+[[package]]
+name = "rustix"
+version = "0.36.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f43abb88211988493c1abb44a70efa56ff0ce98f233b7b276146f1f3f7ba9644"
+dependencies = [
+ "bitflags",
+ "errno",
+ "io-lifetimes",
+ "libc",
+ "linux-raw-sys",
+ "windows-sys",
+]
+
+[[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "scopeguard"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
+
+[[package]]
+name = "shell-words"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde"
+
+[[package]]
+name = "smawk"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f67ad224767faa3c7d8b6d91985b78e70a1324408abcb1cfcc2be4c06bc06043"
+
+[[package]]
+name = "strsim"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
+
+[[package]]
+name = "supports-color"
+version = "1.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8ba6faf2ca7ee42fdd458f4347ae0a9bd6bcc445ad7cb57ad82b383f18870d6f"
+dependencies = [
+ "atty",
+ "is_ci",
+]
+
+[[package]]
+name = "supports-hyperlinks"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "590b34f7c5f01ecc9d78dba4b3f445f31df750a67621cf31626f3b7441ce6406"
+dependencies = [
+ "atty",
+]
+
+[[package]]
+name = "supports-unicode"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a8b945e45b417b125a8ec51f1b7df2f8df7920367700d1f98aedd21e5735f8b2"
+dependencies = [
+ "atty",
+]
+
+[[package]]
+name = "syn"
+version = "1.0.109"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "termcolor"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "terminal_size"
+version = "0.1.17"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "633c1a546cee861a1a6d0dc69ebeca693bf4296661ba7852b9d21d159e0506df"
+dependencies = [
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "textwrap"
+version = "0.15.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b7b3e525a49ec206798b40326a44121291b530c963cfb01018f63e135bac543d"
+dependencies = [
+ "smawk",
+ "unicode-linebreak",
+ "unicode-width",
+]
+
+[[package]]
+name = "thiserror"
+version = "1.0.38"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0"
+dependencies = [
+ "thiserror-impl",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "1.0.38"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc"
+
+[[package]]
+name = "unicode-linebreak"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c5faade31a542b8b35855fff6e8def199853b2da8da256da52f52f1316ee3137"
+dependencies = [
+ "hashbrown",
+ "regex",
+]
+
+[[package]]
+name = "unicode-width"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b"
+
+[[package]]
+name = "vendor-webgpu-cts"
+version = "0.1.0"
+dependencies = [
+ "clap",
+ "dircpy",
+ "dunce",
+ "env_logger",
+ "format",
+ "lets_find_up",
+ "log",
+ "miette",
+ "regex",
+ "shell-words",
+ "thiserror",
+ "which",
+]
+
+[[package]]
+name = "version_check"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
+
+[[package]]
+name = "walkdir"
+version = "2.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56"
+dependencies = [
+ "same-file",
+ "winapi",
+ "winapi-util",
+]
+
+[[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+
+[[package]]
+name = "which"
+version = "4.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269"
+dependencies = [
+ "either",
+ "libc",
+ "once_cell",
+]
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
+[[package]]
+name = "windows-sys"
+version = "0.45.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
+dependencies = [
+ "windows-targets",
+]
+
+[[package]]
+name = "windows-targets"
+version = "0.42.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7"
+dependencies = [
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc",
+]
+
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.42.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608"
+
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.42.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7"
+
+[[package]]
+name = "windows_i686_gnu"
+version = "0.42.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640"
+
+[[package]]
+name = "windows_i686_msvc"
+version = "0.42.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605"
+
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.42.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45"
+
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.42.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463"
+
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.42.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd"
diff --git a/dom/webgpu/tests/cts/vendor/Cargo.toml b/dom/webgpu/tests/cts/vendor/Cargo.toml
new file mode 100644
index 0000000000..d7721d1931
--- /dev/null
+++ b/dom/webgpu/tests/cts/vendor/Cargo.toml
@@ -0,0 +1,20 @@
+[package]
+name = "vendor-webgpu-cts"
+version = "0.1.0"
+edition = "2021"
+
+[dependencies]
+clap = { version = "4.1.6", features = ["derive"] }
+dircpy = "0.3.14"
+dunce = "1.0.3"
+env_logger = "0.10.0"
+format = "0.2.4"
+lets_find_up = "0.0.3"
+log = "0.4.17"
+miette = { version = "5.5.0", features = ["fancy"] }
+regex = "1.7.1"
+shell-words = "1.1.0"
+thiserror = "1.0.38"
+which = "4.4.0"
+
+[workspace]
diff --git a/dom/webgpu/tests/cts/vendor/src/fs.rs b/dom/webgpu/tests/cts/vendor/src/fs.rs
new file mode 100644
index 0000000000..31697f9758
--- /dev/null
+++ b/dom/webgpu/tests/cts/vendor/src/fs.rs
@@ -0,0 +1,331 @@
+use std::{
+ ffi::OsStr,
+ fmt::{self, Display},
+ fs,
+ ops::Deref,
+ path::{Path, PathBuf, StripPrefixError},
+};
+
+use miette::{ensure, Context, IntoDiagnostic};
+
+#[derive(Debug)]
+pub(crate) struct FileRoot {
+ nickname: &'static str,
+ path: PathBuf,
+}
+
+impl Eq for FileRoot {}
+
+impl PartialEq for FileRoot {
+ fn eq(&self, other: &Self) -> bool {
+ self.path == other.path
+ }
+}
+
+impl Ord for FileRoot {
+ fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+ self.path.cmp(&other.path)
+ }
+}
+
+impl PartialOrd for FileRoot {
+ fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl FileRoot {
+ pub(crate) fn new<P>(nickname: &'static str, path: P) -> miette::Result<Self>
+ where
+ P: AsRef<Path>,
+ {
+ let path = path.as_ref();
+ Ok(Self {
+ nickname,
+ path: dunce::canonicalize(path)
+ .map_err(miette::Report::msg)
+ .wrap_err_with(|| format!("failed to canonicalize {path:?}"))?,
+ })
+ }
+
+ pub(crate) fn nickname(&self) -> &str {
+ self.nickname
+ }
+
+ pub(crate) fn try_child<P>(&self, path: P) -> Result<Child<'_>, StripPrefixError>
+ where
+ P: AsRef<Path>,
+ {
+ let path = path.as_ref();
+ if path.is_absolute() {
+ path.strip_prefix(&self.path)?;
+ }
+ Ok(Child {
+ root: self,
+ path: self.path.join(path),
+ })
+ }
+
+ #[track_caller]
+ pub(crate) fn child<P>(&self, path: P) -> Child<'_>
+ where
+ P: AsRef<Path>,
+ {
+ self.try_child(path)
+ .into_diagnostic()
+ .wrap_err("invariant violation: `path` is absolute and not a child of this file root")
+ .unwrap()
+ }
+
+ fn removed_dir<P>(&self, path: P) -> miette::Result<Child<'_>>
+ where
+ P: AsRef<Path>,
+ {
+ let path = path.as_ref();
+ let child = self.child(path);
+ if child.exists() {
+ log::info!("removing old contents of {child}…",);
+ log::trace!("removing directory {:?}", &*child);
+ fs::remove_dir_all(&*child)
+ .map_err(miette::Report::msg)
+ .wrap_err_with(|| format!("failed to remove old contents of {child}"))?;
+ }
+ Ok(child)
+ }
+
+ fn removed_file<P>(&self, path: P) -> miette::Result<Child<'_>>
+ where
+ P: AsRef<Path>,
+ {
+ let path = path.as_ref();
+ let child = self.child(path);
+ if child.exists() {
+ log::info!("removing old copy of {child}…",);
+ fs::remove_file(&*child)
+ .map_err(miette::Report::msg)
+ .wrap_err_with(|| format!("failed to remove old copy of {child}"))?;
+ }
+ Ok(child)
+ }
+
+ pub(crate) fn regen_dir<P>(
+ &self,
+ path: P,
+ gen: impl FnOnce(&Child<'_>) -> miette::Result<()>,
+ ) -> miette::Result<Child<'_>>
+ where
+ P: AsRef<Path>,
+ {
+ let child = self.removed_dir(path)?;
+ gen(&child)?;
+ ensure!(
+ child.is_dir(),
+ "{} was not regenerated for an unknown reason",
+ child,
+ );
+ Ok(child)
+ }
+
+ pub(crate) fn regen_file<P>(
+ &self,
+ path: P,
+ gen: impl FnOnce(&Child<'_>) -> miette::Result<()>,
+ ) -> miette::Result<Child<'_>>
+ where
+ P: AsRef<Path>,
+ {
+ let child = self.removed_file(path)?;
+ gen(&child)?;
+ ensure!(
+ child.is_file(),
+ "{} was not regenerated for an unknown reason",
+ child,
+ );
+ Ok(child)
+ }
+}
+
+impl Deref for FileRoot {
+ type Target = Path;
+
+ fn deref(&self) -> &Self::Target {
+ &self.path
+ }
+}
+
+impl AsRef<Path> for FileRoot {
+ fn as_ref(&self) -> &Path {
+ &self.path
+ }
+}
+
+impl AsRef<OsStr> for FileRoot {
+ fn as_ref(&self) -> &OsStr {
+ self.path.as_os_str()
+ }
+}
+
+impl Display for FileRoot {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let Self { nickname, path } = self;
+ write!(f, "`{}` (AKA `<{nickname}>`)", path.display())
+ }
+}
+
+#[derive(Debug, Eq, Ord, PartialEq, PartialOrd)]
+pub(crate) struct Child<'a> {
+ root: &'a FileRoot,
+ /// NOTE: This is always an absolute path that is a child of the `root`.
+ path: PathBuf,
+}
+
+impl Child<'_> {
+ pub(crate) fn relative_path(&self) -> &Path {
+ let Self { root, path } = self;
+ path.strip_prefix(root).unwrap()
+ }
+
+ pub(crate) fn try_child<P>(&self, path: P) -> Result<Self, StripPrefixError>
+ where
+ P: AsRef<Path>,
+ {
+ let child_path = path.as_ref();
+ let Self { root, path } = self;
+
+ if child_path.is_absolute() {
+ child_path.strip_prefix(path)?;
+ }
+ Ok(Child {
+ root,
+ path: path.join(child_path),
+ })
+ }
+
+ #[track_caller]
+ pub(crate) fn child<P>(&self, path: P) -> Self
+ where
+ P: AsRef<Path>,
+ {
+ self.try_child(path)
+ .into_diagnostic()
+ .wrap_err("invariant violation: `path` is absolute and not a child of this child")
+ .unwrap()
+ }
+}
+
+impl Deref for Child<'_> {
+ type Target = Path;
+
+ fn deref(&self) -> &Self::Target {
+ &self.path
+ }
+}
+
+impl AsRef<Path> for Child<'_> {
+ fn as_ref(&self) -> &Path {
+ &self.path
+ }
+}
+
+impl AsRef<OsStr> for Child<'_> {
+ fn as_ref(&self) -> &OsStr {
+ self.path.as_os_str()
+ }
+}
+
+impl Display for Child<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "`<{}>{}{}`",
+ self.root.nickname(),
+ std::path::MAIN_SEPARATOR,
+ self.relative_path().display()
+ )
+ }
+}
+
+pub(crate) fn existing_file<P>(path: P) -> P
+where
+ P: AsRef<Path>,
+{
+ let p = path.as_ref();
+ assert!(p.is_file(), "{p:?} does not exist as a file");
+ path
+}
+
+pub(crate) fn copy_dir<P, Q>(source: P, dest: Q) -> miette::Result<()>
+where
+ P: Display + AsRef<Path>,
+ Q: Display + AsRef<Path>,
+{
+ log::debug!(
+ "copy-merging directories from {} into {}",
+ source.as_ref().display(),
+ dest.as_ref().display(),
+ );
+ ::dircpy::copy_dir(&source, &dest)
+ .into_diagnostic()
+ .wrap_err_with(|| format!("failed to copy files from {source} to {dest}"))
+}
+
+pub(crate) fn read_to_string<P>(path: P) -> miette::Result<String>
+where
+ P: AsRef<Path>,
+{
+ fs::read_to_string(&path)
+ .into_diagnostic()
+ .wrap_err_with(|| {
+ format!(
+ "failed to read UTF-8 string from path {}",
+ path.as_ref().display()
+ )
+ })
+}
+
+pub(crate) fn copy<P1, P2>(from: P1, to: P2) -> miette::Result<u64>
+where
+ P1: AsRef<Path>,
+ P2: AsRef<Path>,
+{
+ fs::copy(&from, &to).into_diagnostic().wrap_err_with(|| {
+ format!(
+ "failed to copy {} to {}",
+ from.as_ref().display(),
+ to.as_ref().display()
+ )
+ })
+}
+
+pub(crate) fn create_dir_all<P>(path: P) -> miette::Result<()>
+where
+ P: AsRef<Path>,
+{
+ fs::create_dir_all(&path)
+ .into_diagnostic()
+ .wrap_err_with(|| {
+ format!(
+ "failed to create directories leading up to {}",
+ path.as_ref().display()
+ )
+ })
+}
+
+pub(crate) fn remove_file<P>(path: P) -> miette::Result<()>
+where
+ P: AsRef<Path>,
+{
+ fs::remove_file(&path)
+ .into_diagnostic()
+ .wrap_err_with(|| format!("failed to remove file at path {}", path.as_ref().display()))
+}
+
+pub(crate) fn write<P, C>(path: P, contents: C) -> miette::Result<()>
+where
+ P: AsRef<Path>,
+ C: AsRef<[u8]>,
+{
+ fs::write(&path, &contents)
+ .into_diagnostic()
+ .wrap_err_with(|| format!("failed to write to path {}", path.as_ref().display()))
+}
diff --git a/dom/webgpu/tests/cts/vendor/src/main.rs b/dom/webgpu/tests/cts/vendor/src/main.rs
new file mode 100644
index 0000000000..750b65c62e
--- /dev/null
+++ b/dom/webgpu/tests/cts/vendor/src/main.rs
@@ -0,0 +1,565 @@
+use std::{
+ collections::{BTreeMap, BTreeSet},
+ env::{current_dir, set_current_dir},
+ path::{Path, PathBuf},
+ process::ExitCode,
+};
+
+use clap::Parser;
+use lets_find_up::{find_up_with, FindUpKind, FindUpOptions};
+use miette::{bail, ensure, miette, Context, Diagnostic, IntoDiagnostic, Report, SourceSpan};
+use regex::Regex;
+
+use crate::{
+ fs::{copy_dir, create_dir_all, existing_file, remove_file, FileRoot},
+ path::join_path,
+ process::{which, EasyCommand},
+};
+
+mod fs;
+mod path;
+mod process;
+
+/// Vendor WebGPU CTS tests from a local Git checkout of [our `gpuweb/cts` fork].
+///
+/// WPT tests are generated into `testing/web-platform/mozilla/tests/webgpu/`. If the set of tests
+/// changes upstream, make sure that the generated output still matches up with test expectation
+/// metadata in `testing/web-platform/mozilla/meta/webgpu/`.
+///
+/// [our `gpuweb/cts` fork]: https://github.com/mozilla/gpuweb-cts
+#[derive(Debug, Parser)]
+struct CliArgs {
+ /// A path to the top-level directory of your WebGPU CTS checkout.
+ cts_checkout_path: PathBuf,
+}
+
+fn main() -> ExitCode {
+ env_logger::builder()
+ .filter_level(log::LevelFilter::Info)
+ .parse_default_env()
+ .init();
+
+ let args = CliArgs::parse();
+
+ match run(args) {
+ Ok(()) => ExitCode::SUCCESS,
+ Err(e) => {
+ log::error!("{e:?}");
+ ExitCode::FAILURE
+ }
+ }
+}
+
+fn run(args: CliArgs) -> miette::Result<()> {
+ let CliArgs { cts_checkout_path } = args;
+
+ let orig_working_dir = current_dir().unwrap();
+
+ let cts_dir = join_path(["dom", "webgpu", "tests", "cts"]);
+ let cts_vendor_dir = join_path([&*cts_dir, "vendor".as_ref()]);
+ let gecko_ckt = {
+ let find_up_opts = || FindUpOptions {
+ cwd: Path::new("."),
+ kind: FindUpKind::Dir,
+ };
+ let find_up = |root_dir_name| {
+ let err = || {
+ miette!(
+ concat!(
+ "failed to find a Mercurial repository ({:?}) in any of current ",
+ "working directory and its parent directories",
+ ),
+ root_dir_name
+ )
+ };
+ find_up_with(root_dir_name, find_up_opts())
+ .map_err(Report::msg)
+ .wrap_err_with(err)
+ .and_then(|loc_opt| loc_opt.ok_or_else(err))
+ .map(|mut dir| {
+ dir.pop();
+ dir
+ })
+ };
+ let gecko_source_root = find_up(".hg").or_else(|e| match find_up(".git") {
+ Ok(path) => {
+ log::debug!("{e:?}");
+ Ok(path)
+ }
+ Err(e2) => {
+ log::warn!("{e:?}");
+ log::warn!("{e2:?}");
+ bail!("failed to find a Gecko repository root")
+ }
+ })?;
+
+ let root = FileRoot::new("gecko", &gecko_source_root)?;
+ log::info!("detected Gecko repository root at {root}");
+
+ ensure!(
+ root.try_child(&orig_working_dir)
+ .map_or(false, |c| c.relative_path() == cts_vendor_dir),
+ concat!(
+ "It is expected to run this tool from the root of its Cargo project, ",
+ "but this does not appear to have been done. Bailing."
+ )
+ );
+
+ root
+ };
+
+ let cts_vendor_dir = gecko_ckt.child(orig_working_dir.parent().unwrap());
+
+ let wpt_tests_dir = {
+ let child = gecko_ckt.child(join_path(["testing", "web-platform", "mozilla", "tests"]));
+ ensure!(
+ child.is_dir(),
+ "WPT tests dir ({child}) does not appear to exist"
+ );
+ child
+ };
+
+ let (cts_ckt_git_dir, cts_ckt) = {
+ let failed_find_git_err = || {
+ miette!(concat!(
+ "failed to find a Git repository (`.git` directory) in the provided path ",
+ "and all of its parent directories"
+ ))
+ };
+ let git_dir = find_up_with(
+ ".git",
+ FindUpOptions {
+ cwd: &cts_checkout_path,
+ kind: FindUpKind::Dir,
+ },
+ )
+ .map_err(Report::msg)
+ .wrap_err_with(failed_find_git_err)?
+ .ok_or_else(failed_find_git_err)?;
+
+ let ckt = FileRoot::new("cts", git_dir.parent().unwrap())?;
+ log::debug!("detected CTS checkout root at {ckt}");
+ (git_dir, ckt)
+ };
+
+ let git_bin = which("git", "Git binary")?;
+ let npm_bin = which("npm", "NPM binary")?;
+
+ // XXX: It'd be nice to expose separate operations for copying in source and generating WPT
+ // cases from the vendored copy. Checks like these really only matter when updating source.
+ let ensure_no_child = |p1: &FileRoot, p2| {
+ ensure!(
+ p1.try_child(p2).is_err(),
+ "{p1} is a child path of {p2}, which is not supported"
+ );
+ Ok(())
+ };
+ ensure_no_child(&cts_ckt, &gecko_ckt)?;
+ ensure_no_child(&gecko_ckt, &cts_ckt)?;
+
+ log::info!("making a vendored copy of checked-in files from {cts_ckt}…",);
+ gecko_ckt.regen_file(
+ join_path([&*cts_dir, "checkout_commit.txt".as_ref()]),
+ |checkout_commit_file| {
+ let mut git_status_porcelain_cmd = EasyCommand::new(&git_bin, |cmd| {
+ cmd.args(["status", "--porcelain"])
+ .envs([("GIT_DIR", &*cts_ckt_git_dir), ("GIT_WORK_TREE", &*cts_ckt)])
+ });
+ log::info!(
+ " …ensuring the working tree and index are clean with {}…",
+ git_status_porcelain_cmd
+ );
+ let git_status_porcelain_output = git_status_porcelain_cmd.just_stdout_utf8()?;
+ ensure!(
+ git_status_porcelain_output.is_empty(),
+ concat!(
+ "expected a clean CTS working tree and index, ",
+ "but {}'s output was not empty; ",
+ "for reference, it was:\n\n{}",
+ ),
+ git_status_porcelain_cmd,
+ git_status_porcelain_output,
+ );
+
+ gecko_ckt.regen_dir(&cts_vendor_dir.join("checkout"), |vendored_ckt_dir| {
+ log::info!(" …copying files tracked by Git to {vendored_ckt_dir}…");
+ let files_to_vendor = {
+ let mut git_ls_files_cmd = EasyCommand::new(&git_bin, |cmd| {
+ cmd.arg("ls-files").env("GIT_DIR", &cts_ckt_git_dir)
+ });
+ log::debug!(" …getting files to vendor from {git_ls_files_cmd}…");
+ let output = git_ls_files_cmd.just_stdout_utf8()?;
+ let mut files = output
+ .split_terminator('\n')
+ .map(PathBuf::from)
+ .collect::<BTreeSet<_>>();
+ log::trace!(" …files from {git_ls_files_cmd}: {files:#?}");
+
+ log::trace!(" …validating that files from Git repo still exist…");
+ let files_not_found = files
+ .iter()
+ .filter(|p| !cts_ckt.child(p).exists())
+ .collect::<Vec<_>>();
+ ensure!(
+ files_not_found.is_empty(),
+ concat!(
+ "the following files were returned by `git ls-files`, ",
+ "but do not exist on disk: {:#?}",
+ ),
+ files_not_found,
+ );
+
+ log::trace!(" …stripping files we actually don't want to vendor…");
+ let files_to_actually_not_vendor = [
+ // There's no reason to bring this over, and lots of reasons to not bring in
+ // security-sensitive content unless we have to.
+ "deploy_key.enc",
+ ]
+ .map(Path::new);
+ log::trace!(" …files we don't want: {files_to_actually_not_vendor:?}");
+ for path in files_to_actually_not_vendor {
+ ensure!(
+ files.remove(path),
+ concat!(
+ "failed to remove {} from list of files to vendor; ",
+ "does it still exist?"
+ ),
+ cts_ckt.child(path)
+ );
+ }
+ files
+ };
+
+ log::debug!(" …now doing the copying…");
+ for path in files_to_vendor {
+ let vendor_from_path = cts_ckt.child(&path);
+ let vendor_to_path = vendored_ckt_dir.child(&path);
+ if let Some(parent) = vendor_to_path.parent() {
+ create_dir_all(vendored_ckt_dir.child(parent))?;
+ }
+ log::trace!(" …copying {vendor_from_path} to {vendor_to_path}…");
+ fs::copy(&vendor_from_path, &vendor_to_path)?;
+ }
+
+ Ok(())
+ })?;
+
+ log::info!(" …writing commit ref pointed to by `HEAD` to {checkout_commit_file}…");
+ let mut git_rev_parse_head_cmd = EasyCommand::new(&git_bin, |cmd| {
+ cmd.args(["rev-parse", "HEAD"])
+ .env("GIT_DIR", &cts_ckt_git_dir)
+ });
+ log::trace!(" …getting output of {git_rev_parse_head_cmd}…");
+ fs::write(
+ checkout_commit_file,
+ git_rev_parse_head_cmd.just_stdout_utf8()?,
+ )
+ .wrap_err_with(|| format!("failed to write HEAD ref to {checkout_commit_file}"))
+ },
+ )?;
+
+ set_current_dir(&*cts_ckt)
+ .into_diagnostic()
+ .wrap_err("failed to change working directory to CTS checkout")?;
+ log::debug!("changed CWD to {cts_ckt}");
+
+ let mut npm_ci_cmd = EasyCommand::new(&npm_bin, |cmd| cmd.arg("ci"));
+ log::info!(
+ "ensuring a clean {} directory with {npm_ci_cmd}…",
+ cts_ckt.child("node_modules"),
+ );
+ npm_ci_cmd.spawn()?;
+
+ let out_wpt_dir = cts_ckt.regen_dir("out-wpt", |out_wpt_dir| {
+ let mut npm_run_wpt_cmd = EasyCommand::new(&npm_bin, |cmd| cmd.args(["run", "wpt"]));
+ log::info!("generating WPT test cases into {out_wpt_dir} with {npm_run_wpt_cmd}…");
+ npm_run_wpt_cmd.spawn()
+ })?;
+
+ let cts_https_html_path = out_wpt_dir.child("cts.https.html");
+ log::info!("refining the output of {cts_https_html_path} with `npm run gen_wpt_cts_html …`…");
+ EasyCommand::new(&npm_bin, |cmd| {
+ cmd.args(["run", "gen_wpt_cts_html"]).arg(existing_file(
+ &cts_ckt.child("tools/gen_wpt_cfg_unchunked.json"),
+ ))
+ })
+ .spawn()?;
+
+ {
+ let extra_cts_https_html_path = out_wpt_dir.child("cts-chunked2sec.https.html");
+ log::info!("removing extraneous {extra_cts_https_html_path}…");
+ remove_file(&*extra_cts_https_html_path)?;
+ }
+
+ log::info!("analyzing {cts_https_html_path}…");
+ let cts_https_html_content = fs::read_to_string(&*cts_https_html_path)?;
+ let cts_boilerplate_short_timeout;
+ let cts_boilerplate_long_timeout;
+ let cts_cases;
+ {
+ {
+ let (boilerplate, cases_start) = {
+ let cases_start_idx = cts_https_html_content
+ .find("<meta name=variant")
+ .ok_or_else(|| miette!("no test cases found; this is unexpected!"))?;
+ cts_https_html_content.split_at(cases_start_idx)
+ };
+
+ {
+ if !boilerplate.is_empty() {
+ #[derive(Debug, Diagnostic, thiserror::Error)]
+ #[error("last character before test cases was not a newline; bug, or weird?")]
+ #[diagnostic(severity("warning"))]
+ struct Oops {
+ #[label(
+ "this character ({:?}) was expected to be a newline, so that {}",
+ source_code.chars().last().unwrap(),
+ "the test spec. following it is on its own line"
+ )]
+ span: SourceSpan,
+ #[source_code]
+ source_code: String,
+ }
+ ensure!(
+ boilerplate.ends_with('\n'),
+ Oops {
+ span: SourceSpan::from(0..boilerplate.len()),
+ source_code: cts_https_html_content,
+ }
+ );
+ }
+
+ // NOTE: Adding `_mozilla` is necessary because [that's how it's mounted][source].
+ //
+ // [source]: https://searchfox.org/mozilla-central/rev/cd2121e7d83af1b421c95e8c923db70e692dab5f/testing/web-platform/mozilla/README#1-4]
+ log::info!(concat!(
+ " …fixing `script` paths in WPT boilerplate ",
+ "so they work as Mozilla-private WPT tests…"
+ ));
+ let expected_wpt_script_tag =
+ "<script type=module src=/webgpu/common/runtime/wpt.js></script>";
+ ensure!(
+ boilerplate.contains(expected_wpt_script_tag),
+ concat!(
+ "failed to find expected `script` tag for `wpt.js` ",
+ "({:?}); did something change upstream?",
+ ),
+ expected_wpt_script_tag
+ );
+ let mut boilerplate = boilerplate.replacen(
+ expected_wpt_script_tag,
+ "<script type=module src=/_mozilla/webgpu/common/runtime/wpt.js></script>",
+ 1,
+ );
+
+ cts_boilerplate_short_timeout = boilerplate.clone();
+
+ let timeout_insert_idx = {
+ let meta_charset_utf8 = "\n<meta charset=utf-8>\n";
+ let meta_charset_utf8_idx =
+ boilerplate.find(meta_charset_utf8).ok_or_else(|| {
+ miette!(
+ "could not find {:?} in document; did something change upstream?",
+ meta_charset_utf8
+ )
+ })?;
+ meta_charset_utf8_idx + meta_charset_utf8.len()
+ };
+ boilerplate.insert_str(
+ timeout_insert_idx,
+ concat!(
+ r#"<meta name="timeout" content="long">"#,
+ " <!-- TODO: narrow to only where it's needed, see ",
+ "https://bugzilla.mozilla.org/show_bug.cgi?id=1850537",
+ " -->\n"
+ ),
+ );
+ cts_boilerplate_long_timeout = boilerplate
+ };
+
+ log::info!(" …parsing test variants in {cts_https_html_path}…");
+ let mut parsing_failed = false;
+ let meta_variant_regex = Regex::new(concat!(
+ "^",
+ "<meta name=variant content='\\?q=([^']*?):\\*'>",
+ "$"
+ ))
+ .unwrap();
+ cts_cases = cases_start
+ .split_terminator('\n')
+ .filter_map(|line| {
+ let path_and_meta = meta_variant_regex
+ .captures(line)
+ .map(|caps| (caps[1].to_owned(), line));
+ if path_and_meta.is_none() {
+ parsing_failed = true;
+ log::error!("line is not a test case: {line:?}");
+ }
+ path_and_meta
+ })
+ .collect::<Vec<_>>();
+ ensure!(
+ !parsing_failed,
+ "one or more test case lines failed to parse, fix it and try again"
+ );
+ };
+ log::trace!("\"original\" HTML boilerplate:\n\n{cts_boilerplate_short_timeout}");
+
+ ensure!(
+ !cts_cases.is_empty(),
+ "no test cases found; this is unexpected!"
+ );
+ log::info!(" …found {} test cases", cts_cases.len());
+ }
+
+ cts_ckt.regen_dir(out_wpt_dir.join("cts"), |cts_tests_dir| {
+ log::info!("re-distributing tests into single file per test path…");
+ let mut failed_writing = false;
+ let mut cts_cases_by_spec_file_dir = BTreeMap::<_, BTreeSet<_>>::new();
+ for (path, meta) in cts_cases {
+ let case_dir = {
+ // Context: We want to mirror CTS upstream's `src/webgpu/**/*.spec.ts` paths as
+ // entire WPT tests, with each subtest being a WPT variant. Here's a diagram of
+ // a CTS path to explain why the logic below is correct:
+ //
+ // ```sh
+ // webgpu:this,is,the,spec.ts,file,path:subtest_in_file:…
+ // \____/ \___________________________/^\_____________/
+ // test `*.spec.ts` file path | |
+ // \__________________________________/| |
+ // | | |
+ // We want this… | …but not this. CTS upstream generates
+ // | this too, but we don't want to divide
+ // second ':' character here---/ here (yet).
+ // ```
+ let subtest_and_later_start_idx =
+ match path.match_indices(':').nth(1).map(|(idx, _s)| idx) {
+ Some(some) => some,
+ None => {
+ failed_writing = true;
+ log::error!(
+ concat!(
+ "failed to split suite and test path segments ",
+ "from CTS path `{}`"
+ ),
+ path
+ );
+ continue;
+ }
+ };
+ let slashed =
+ path[..subtest_and_later_start_idx].replace(|c| matches!(c, ':' | ','), "/");
+ cts_tests_dir.child(slashed)
+ };
+ if !cts_cases_by_spec_file_dir
+ .entry(case_dir)
+ .or_default()
+ .insert(meta)
+ {
+ log::warn!("duplicate entry {meta:?} detected")
+ }
+ }
+
+ struct WptEntry<'a> {
+ cases: BTreeSet<&'a str>,
+ timeout_length: TimeoutLength,
+ }
+ enum TimeoutLength {
+ Short,
+ Long,
+ }
+ let split_cases = {
+ let mut split_cases = BTreeMap::new();
+ fn insert_with_default_name<'a>(
+ split_cases: &mut BTreeMap<fs::Child<'a>, WptEntry<'a>>,
+ spec_file_dir: fs::Child<'a>,
+ cases: WptEntry<'a>,
+ ) {
+ let path = spec_file_dir.child("cts.https.html");
+ assert!(split_cases.insert(path, cases).is_none());
+ }
+ {
+ let dld_path =
+ &cts_tests_dir.child("webgpu/api/validation/state/device_lost/destroy");
+ let (spec_file_dir, cases) = cts_cases_by_spec_file_dir
+ .remove_entry(dld_path)
+ .expect("no `device_lost/destroy` tests found; did they move?");
+ insert_with_default_name(
+ &mut split_cases,
+ spec_file_dir,
+ WptEntry {
+ cases,
+ timeout_length: TimeoutLength::Short,
+ },
+ );
+ }
+ for (spec_file_dir, cases) in cts_cases_by_spec_file_dir {
+ insert_with_default_name(
+ &mut split_cases,
+ spec_file_dir,
+ WptEntry {
+ cases,
+ timeout_length: TimeoutLength::Long,
+ },
+ );
+ }
+ split_cases
+ };
+
+ for (path, entry) in split_cases {
+ let dir = path.parent().expect("no parent found for ");
+ match create_dir_all(&dir) {
+ Ok(()) => log::trace!("made directory {}", dir.display()),
+ Err(e) => {
+ failed_writing = true;
+ log::error!("{e:#}");
+ continue;
+ }
+ }
+ let file_contents = {
+ let WptEntry {
+ cases,
+ timeout_length,
+ } = entry;
+ let content = match timeout_length {
+ TimeoutLength::Short => &cts_boilerplate_short_timeout,
+ TimeoutLength::Long => &cts_boilerplate_long_timeout,
+ };
+ let mut content = content.as_bytes().to_vec();
+ for meta in cases {
+ content.extend(meta.as_bytes());
+ content.extend(b"\n");
+ }
+ content
+ };
+ match fs::write(&path, &file_contents)
+ .wrap_err_with(|| miette!("failed to write output to path {path:?}"))
+ {
+ Ok(()) => log::debug!(" …wrote {path}"),
+ Err(e) => {
+ failed_writing = true;
+ log::error!("{e:#}");
+ }
+ }
+ }
+ ensure!(
+ !failed_writing,
+ "failed to write one or more WPT test files; see above output for more details"
+ );
+ log::debug!(" …finished writing new WPT test files!");
+
+ log::info!(" …removing {cts_https_html_path}, now that it's been divided up…");
+ remove_file(&cts_https_html_path)?;
+
+ Ok(())
+ })?;
+
+ gecko_ckt.regen_dir(wpt_tests_dir.join("webgpu"), |wpt_webgpu_tests_dir| {
+ log::info!("copying contents of {out_wpt_dir} to {wpt_webgpu_tests_dir}…");
+ copy_dir(&out_wpt_dir, wpt_webgpu_tests_dir)
+ })?;
+
+ log::info!("All done! Now get your CTS _ON_! :)");
+
+ Ok(())
+}
diff --git a/dom/webgpu/tests/cts/vendor/src/path.rs b/dom/webgpu/tests/cts/vendor/src/path.rs
new file mode 100644
index 0000000000..aa5bae2e6d
--- /dev/null
+++ b/dom/webgpu/tests/cts/vendor/src/path.rs
@@ -0,0 +1,23 @@
+use std::path::{Path, PathBuf};
+
+/// Construct a [`PathBuf`] from individual [`Path`] components.
+///
+/// This is a simple and legible way to construct `PathBuf`s that use the system's native path
+/// separator character. (It's ugly to see paths mixing `\` and `/`.)
+///
+/// # Examples
+///
+/// ```rust
+/// # use std::path::Path;
+/// # use vendor_webgpu_cts::path::join_path;
+/// assert_eq!(&*join_path(["foo", "bar", "baz"]), Path::new("foo/bar/baz"));
+/// ```
+pub(crate) fn join_path<I, P>(iter: I) -> PathBuf
+where
+ I: IntoIterator<Item = P>,
+ P: AsRef<Path>,
+{
+ let mut path = PathBuf::new();
+ path.extend(iter);
+ path
+}
diff --git a/dom/webgpu/tests/cts/vendor/src/process.rs b/dom/webgpu/tests/cts/vendor/src/process.rs
new file mode 100644
index 0000000000..b36c3b953d
--- /dev/null
+++ b/dom/webgpu/tests/cts/vendor/src/process.rs
@@ -0,0 +1,85 @@
+use std::{
+ ffi::{OsStr, OsString},
+ fmt::{self, Display},
+ iter::once,
+ process::{Command, Output},
+};
+
+use format::lazy_format;
+use miette::{ensure, Context, IntoDiagnostic};
+
+pub(crate) fn which(name: &'static str, desc: &str) -> miette::Result<OsString> {
+ let found = ::which::which(name)
+ .into_diagnostic()
+ .wrap_err(lazy_format!("failed to find `{name}` executable"))?;
+ log::debug!("using {desc} from {}", found.display());
+ Ok(found.file_name().unwrap().to_owned())
+}
+
+pub(crate) struct EasyCommand {
+ inner: Command,
+}
+
+impl EasyCommand {
+ pub(crate) fn new<C>(cmd: C, f: impl FnOnce(&mut Command) -> &mut Command) -> Self
+ where
+ C: AsRef<OsStr>,
+ {
+ let mut cmd = Command::new(cmd);
+ f(&mut cmd);
+ Self { inner: cmd }
+ }
+
+ pub(crate) fn spawn(&mut self) -> miette::Result<()> {
+ log::debug!("spawning {self}…");
+ let status = self
+ .inner
+ .spawn()
+ .into_diagnostic()
+ .wrap_err_with(|| format!("failed to spawn {self}"))?
+ .wait()
+ .into_diagnostic()
+ .wrap_err_with(|| format!("failed to wait for exit code from {self}"))?;
+ log::debug!("{self} returned {:?}", status.code());
+ ensure!(status.success(), "{self} returned {:?}", status.code());
+ Ok(())
+ }
+
+ fn just_stdout(&mut self) -> miette::Result<Vec<u8>> {
+ log::debug!("getting `stdout` output of {self}");
+ let output = self
+ .inner
+ .output()
+ .into_diagnostic()
+ .wrap_err_with(|| format!("failed to execute `{self}`"))?;
+ let Output {
+ status,
+ stdout: _,
+ stderr,
+ } = &output;
+ log::debug!("{self} returned {:?}", status.code());
+ ensure!(
+ status.success(),
+ "{self} returned {:?}; full output: {output:#?}",
+ status.code(),
+ );
+ assert!(stderr.is_empty());
+ Ok(output.stdout)
+ }
+
+ pub(crate) fn just_stdout_utf8(&mut self) -> miette::Result<String> {
+ String::from_utf8(self.just_stdout()?)
+ .into_diagnostic()
+ .wrap_err_with(|| format!("output of {self} was not UTF-8 (!?)"))
+ }
+}
+
+impl Display for EasyCommand {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let Self { inner } = self;
+ let prog = inner.get_program().to_string_lossy();
+ let args = inner.get_args().map(|a| a.to_string_lossy());
+ let shell_words = ::shell_words::join(once(prog).chain(args));
+ write!(f, "`{shell_words}`")
+ }
+}