summaryrefslogtreecommitdiffstats
path: root/dom/webgpu/tests/cts/checkout/src/webgpu/util
diff options
context:
space:
mode:
Diffstat (limited to 'dom/webgpu/tests/cts/checkout/src/webgpu/util')
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/binary_stream.ts213
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/buffer.ts23
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/check_contents.ts272
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/color_space_conversion.ts265
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/command_buffer_maker.ts85
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/compare.ts472
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/constants.ts487
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/conversion.ts1635
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/copy_to_texture.ts192
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/create_elements.ts82
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/device_pool.ts414
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/floating_point.ts5441
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/math.ts2247
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/memory.ts25
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/pretty_diff_tables.ts51
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/prng.ts125
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/reinterpret.ts118
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/shader.ts196
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture.ts81
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/base.ts243
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/data_generation.ts83
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/layout.ts371
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/subresource.ts68
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_data.spec.ts334
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_data.ts980
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_view.ts201
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texture_ok.spec.ts159
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texture_ok.ts348
-rw-r--r--dom/webgpu/tests/cts/checkout/src/webgpu/util/unions.ts45
29 files changed, 15256 insertions, 0 deletions
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/binary_stream.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/binary_stream.ts
new file mode 100644
index 0000000000..a6512020e6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/binary_stream.ts
@@ -0,0 +1,213 @@
+import { assert } from '../../common/util/util.js';
+
+import { float16ToUint16, uint16ToFloat16 } from './conversion.js';
+import { align } from './math.js';
+
+/**
+ * BinaryStream is a utility to efficiently encode and decode numbers to / from a Uint8Array.
+ * BinaryStream uses a number of internal typed arrays to avoid small array allocations when reading
+ * and writing.
+ */
+export default class BinaryStream {
+ /**
+ * Constructor
+ * @param buffer the buffer to read from / write to. Array length must be a multiple of 8 bytes.
+ */
+ constructor(buffer: ArrayBufferLike) {
+ this.offset = 0;
+ this.view = new DataView(buffer);
+ }
+
+ /** buffer() returns the stream's buffer sliced to the 8-byte rounded read or write offset */
+ buffer(): Uint8Array {
+ return new Uint8Array(this.view.buffer, 0, align(this.offset, 8));
+ }
+
+ /** writeBool() writes a boolean as 255 or 0 to the buffer at the next byte offset */
+ writeBool(value: boolean) {
+ this.view.setUint8(this.offset++, value ? 255 : 0);
+ }
+
+ /** readBool() reads a boolean from the buffer at the next byte offset */
+ readBool(): boolean {
+ const val = this.view.getUint8(this.offset++);
+ assert(val === 0 || val === 255);
+ return val !== 0;
+ }
+
+ /** writeU8() writes a uint8 to the buffer at the next byte offset */
+ writeU8(value: number) {
+ this.view.setUint8(this.offset++, value);
+ }
+
+ /** readU8() reads a uint8 from the buffer at the next byte offset */
+ readU8(): number {
+ return this.view.getUint8(this.offset++);
+ }
+
+ /** writeU16() writes a uint16 to the buffer at the next 16-bit aligned offset */
+ writeU16(value: number) {
+ this.view.setUint16(this.alignedOffset(2), value, /* littleEndian */ true);
+ }
+
+ /** readU16() reads a uint16 from the buffer at the next 16-bit aligned offset */
+ readU16(): number {
+ return this.view.getUint16(this.alignedOffset(2), /* littleEndian */ true);
+ }
+
+ /** writeU32() writes a uint32 to the buffer at the next 32-bit aligned offset */
+ writeU32(value: number) {
+ this.view.setUint32(this.alignedOffset(4), value, /* littleEndian */ true);
+ }
+
+ /** readU32() reads a uint32 from the buffer at the next 32-bit aligned offset */
+ readU32(): number {
+ return this.view.getUint32(this.alignedOffset(4), /* littleEndian */ true);
+ }
+
+ /** writeI8() writes a int8 to the buffer at the next byte offset */
+ writeI8(value: number) {
+ this.view.setInt8(this.offset++, value);
+ }
+
+ /** readI8() reads a int8 from the buffer at the next byte offset */
+ readI8(): number {
+ return this.view.getInt8(this.offset++);
+ }
+
+ /** writeI16() writes a int16 to the buffer at the next 16-bit aligned offset */
+ writeI16(value: number) {
+ this.view.setInt16(this.alignedOffset(2), value, /* littleEndian */ true);
+ }
+
+ /** readI16() reads a int16 from the buffer at the next 16-bit aligned offset */
+ readI16(): number {
+ return this.view.getInt16(this.alignedOffset(2), /* littleEndian */ true);
+ }
+
+ /** writeI32() writes a int32 to the buffer at the next 32-bit aligned offset */
+ writeI32(value: number) {
+ this.view.setInt32(this.alignedOffset(4), value, /* littleEndian */ true);
+ }
+
+ /** readI32() reads a int32 from the buffer at the next 32-bit aligned offset */
+ readI32(): number {
+ return this.view.getInt32(this.alignedOffset(4), /* littleEndian */ true);
+ }
+
+ /** writeF16() writes a float16 to the buffer at the next 16-bit aligned offset */
+ writeF16(value: number) {
+ this.writeU16(float16ToUint16(value));
+ }
+
+ /** readF16() reads a float16 from the buffer at the next 16-bit aligned offset */
+ readF16(): number {
+ return uint16ToFloat16(this.readU16());
+ }
+
+ /** writeF32() writes a float32 to the buffer at the next 32-bit aligned offset */
+ writeF32(value: number) {
+ this.view.setFloat32(this.alignedOffset(4), value, /* littleEndian */ true);
+ }
+
+ /** readF32() reads a float32 from the buffer at the next 32-bit aligned offset */
+ readF32(): number {
+ return this.view.getFloat32(this.alignedOffset(4), /* littleEndian */ true);
+ }
+
+ /** writeF64() writes a float64 to the buffer at the next 64-bit aligned offset */
+ writeF64(value: number) {
+ this.view.setFloat64(this.alignedOffset(8), value, /* littleEndian */ true);
+ }
+
+ /** readF64() reads a float64 from the buffer at the next 64-bit aligned offset */
+ readF64(): number {
+ return this.view.getFloat64(this.alignedOffset(8), /* littleEndian */ true);
+ }
+
+ /**
+ * writeString() writes a length-prefixed UTF-16 string to the buffer at the next 32-bit aligned
+ * offset
+ */
+ writeString(value: string) {
+ this.writeU32(value.length);
+ for (let i = 0; i < value.length; i++) {
+ this.writeU16(value.charCodeAt(i));
+ }
+ }
+
+ /**
+ * readString() writes a length-prefixed UTF-16 string from the buffer at the next 32-bit aligned
+ * offset
+ */
+ readString(): string {
+ const len = this.readU32();
+ const codes = new Array<number>(len);
+ for (let i = 0; i < len; i++) {
+ codes[i] = this.readU16();
+ }
+ return String.fromCharCode(...codes);
+ }
+
+ /**
+ * writeArray() writes a length-prefixed array of T elements to the buffer at the next 32-bit
+ * aligned offset, using the provided callback to write the individual elements
+ */
+ writeArray<T>(value: readonly T[], writeElement: (s: BinaryStream, element: T) => void) {
+ this.writeU32(value.length);
+ for (const element of value) {
+ writeElement(this, element);
+ }
+ }
+
+ /**
+ * readArray() reads a length-prefixed array of T elements from the buffer at the next 32-bit
+ * aligned offset, using the provided callback to read the individual elements
+ */
+ readArray<T>(readElement: (s: BinaryStream) => T): T[] {
+ const len = this.readU32();
+ const array = new Array<T>(len);
+ for (let i = 0; i < len; i++) {
+ array[i] = readElement(this);
+ }
+ return array;
+ }
+
+ /**
+ * writeCond() writes the boolean condition `cond` to the buffer, then either calls if_true if
+ * `cond` is true, otherwise if_false
+ */
+ writeCond<T, F>(cond: boolean, fns: { if_true: () => T; if_false: () => F }) {
+ this.writeBool(cond);
+ if (cond) {
+ return fns.if_true();
+ } else {
+ return fns.if_false();
+ }
+ }
+
+ /**
+ * readCond() reads a boolean condition from the buffer, then either calls if_true if
+ * the condition was is true, otherwise if_false
+ */
+ readCond<T, F>(fns: { if_true: () => T; if_false: () => F }) {
+ if (this.readBool()) {
+ return fns.if_true();
+ } else {
+ return fns.if_false();
+ }
+ }
+
+ /**
+ * alignedOffset() aligns this.offset to `bytes`, then increments this.offset by `bytes`.
+ * @returns the old offset aligned to the next multiple of `bytes`.
+ */
+ private alignedOffset(bytes: number) {
+ const aligned = align(this.offset, bytes);
+ this.offset = aligned + bytes;
+ return aligned;
+ }
+
+ private offset: number;
+ private view: DataView;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/buffer.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/buffer.ts
new file mode 100644
index 0000000000..a7d154a7e6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/buffer.ts
@@ -0,0 +1,23 @@
+import { memcpy, TypedArrayBufferView } from '../../common/util/util.js';
+
+import { align } from './math.js';
+
+/**
+ * Creates a buffer with the contents of some TypedArray.
+ * The buffer size will always be aligned to 4 as we set mappedAtCreation === true when creating the
+ * buffer.
+ */
+export function makeBufferWithContents(
+ device: GPUDevice,
+ dataArray: TypedArrayBufferView,
+ usage: GPUBufferUsageFlags
+): GPUBuffer {
+ const buffer = device.createBuffer({
+ mappedAtCreation: true,
+ size: align(dataArray.byteLength, 4),
+ usage,
+ });
+ memcpy({ src: dataArray }, { dst: buffer.getMappedRange() });
+ buffer.unmap();
+ return buffer;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/check_contents.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/check_contents.ts
new file mode 100644
index 0000000000..298e7ae4a9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/check_contents.ts
@@ -0,0 +1,272 @@
+// MAINTENANCE_TODO: The "checkThingTrue" naming is confusing; these must be used with `expectOK`
+// or the result is dropped on the floor. Rename these to things like `typedArrayIsOK`(??) to
+// make it clearer.
+// MAINTENANCE_TODO: Also, audit to make sure we aren't dropping any on the floor. Consider a
+// no-ignored-return lint check if we can find one that we can use.
+
+import {
+ assert,
+ ErrorWithExtra,
+ iterRange,
+ range,
+ TypedArrayBufferView,
+ TypedArrayBufferViewConstructor,
+} from '../../common/util/util.js';
+import { Float16Array } from '../../external/petamoriken/float16/float16.js';
+
+import { generatePrettyTable } from './pretty_diff_tables.js';
+
+/** Generate an expected value at `index`, to test for equality with the actual value. */
+export type CheckElementsGenerator = (index: number) => number;
+/** Check whether the actual `value` at `index` is as expected. */
+export type CheckElementsPredicate = (index: number, value: number) => boolean;
+/**
+ * Provides a pretty-printing implementation for a particular CheckElementsPredicate.
+ * This is an array; each element provides info to print an additional row in the error message.
+ */
+export type CheckElementsSupplementalTableRows = Array<{
+ /** Row header. */
+ leftHeader: string;
+ /**
+ * Get the value for a cell in the table with element index `index`.
+ * May be a string or a number; a number will be formatted according to the TypedArray type used.
+ */
+ getValueForCell: (index: number) => number | string;
+}>;
+
+/**
+ * Check whether two `TypedArray`s have equal contents.
+ * Returns `undefined` if the check passes, or an `Error` if not.
+ */
+export function checkElementsEqual(
+ actual: TypedArrayBufferView,
+ expected: TypedArrayBufferView
+): ErrorWithExtra | undefined {
+ assert(actual.constructor === expected.constructor, 'TypedArray type mismatch');
+ assert(actual.length === expected.length, 'size mismatch');
+
+ let failedElementsFirstMaybe: number | undefined = undefined;
+ /** Sparse array with `true` for elements that failed. */
+ const failedElements: (true | undefined)[] = [];
+ for (let i = 0; i < actual.length; ++i) {
+ if (actual[i] !== expected[i]) {
+ failedElementsFirstMaybe ??= i;
+ failedElements[i] = true;
+ }
+ }
+
+ if (failedElementsFirstMaybe === undefined) {
+ return undefined;
+ }
+
+ const failedElementsFirst = failedElementsFirstMaybe;
+ return failCheckElements({
+ actual,
+ failedElements,
+ failedElementsFirst,
+ predicatePrinter: [{ leftHeader: 'expected ==', getValueForCell: index => expected[index] }],
+ });
+}
+
+/**
+ * Check whether each value in a `TypedArray` is between the two corresponding "expected" values
+ * (either `a(i) <= actual[i] <= b(i)` or `a(i) >= actual[i] => b(i)`).
+ */
+export function checkElementsBetween(
+ actual: TypedArrayBufferView,
+ expected: readonly [CheckElementsGenerator, CheckElementsGenerator]
+): ErrorWithExtra | undefined {
+ const error = checkElementsPassPredicate(
+ actual,
+ (index, value) =>
+ value >= Math.min(expected[0](index), expected[1](index)) &&
+ value <= Math.max(expected[0](index), expected[1](index)),
+ {
+ predicatePrinter: [
+ { leftHeader: 'between', getValueForCell: index => expected[0](index) },
+ { leftHeader: 'and', getValueForCell: index => expected[1](index) },
+ ],
+ }
+ );
+ // If there was an error, extend it with additional extras.
+ return error ? new ErrorWithExtra(error, () => ({ expected })) : undefined;
+}
+
+/**
+ * Check whether each value in a `TypedArray` is equal to one of the two corresponding "expected"
+ * values (either `actual[i] === a[i]` or `actual[i] === b[i]`)
+ */
+export function checkElementsEqualEither(
+ actual: TypedArrayBufferView,
+ expected: readonly [TypedArrayBufferView, TypedArrayBufferView]
+): ErrorWithExtra | undefined {
+ const error = checkElementsPassPredicate(
+ actual,
+ (index, value) => value === expected[0][index] || value === expected[1][index],
+ {
+ predicatePrinter: [
+ { leftHeader: 'either', getValueForCell: index => expected[0][index] },
+ { leftHeader: 'or', getValueForCell: index => expected[1][index] },
+ ],
+ }
+ );
+ // If there was an error, extend it with additional extras.
+ return error ? new ErrorWithExtra(error, () => ({ expected })) : undefined;
+}
+
+/**
+ * Check whether a `TypedArray`'s contents equal the values produced by a generator function.
+ * Returns `undefined` if the check passes, or an `Error` if not.
+ *
+ * ```text
+ * Array had unexpected contents at indices 2 through 19.
+ * Starting at index 1:
+ * actual == 0x: 00 fe ff 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f 00
+ * failed -> xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx xx
+ * expected == 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * ```
+ *
+ * ```text
+ * Array had unexpected contents at indices 2 through 29.
+ * Starting at index 1:
+ * actual == 0.000 -2.000e+100 -1.000e+100 0.000 1.000e+100 2.000e+100 3.000e+100 4.000e+100 5.000e+100 6.000e+100 7.000e+100 ...
+ * failed -> xx xx xx xx xx xx xx xx xx ...
+ * expected == 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 ...
+ * ```
+ */
+export function checkElementsEqualGenerated(
+ actual: TypedArrayBufferView,
+ generator: CheckElementsGenerator
+): ErrorWithExtra | undefined {
+ let failedElementsFirstMaybe: number | undefined = undefined;
+ /** Sparse array with `true` for elements that failed. */
+ const failedElements: (true | undefined)[] = [];
+ for (let i = 0; i < actual.length; ++i) {
+ if (actual[i] !== generator(i)) {
+ failedElementsFirstMaybe ??= i;
+ failedElements[i] = true;
+ }
+ }
+
+ if (failedElementsFirstMaybe === undefined) {
+ return undefined;
+ }
+
+ const failedElementsFirst = failedElementsFirstMaybe;
+ const error = failCheckElements({
+ actual,
+ failedElements,
+ failedElementsFirst,
+ predicatePrinter: [{ leftHeader: 'expected ==', getValueForCell: index => generator(index) }],
+ });
+ // Add more extras to the error.
+ return new ErrorWithExtra(error, () => ({ generator }));
+}
+
+/**
+ * Check whether a `TypedArray`'s values pass the provided predicate function.
+ * Returns `undefined` if the check passes, or an `Error` if not.
+ */
+export function checkElementsPassPredicate(
+ actual: TypedArrayBufferView,
+ predicate: CheckElementsPredicate,
+ { predicatePrinter }: { predicatePrinter?: CheckElementsSupplementalTableRows }
+): ErrorWithExtra | undefined {
+ let failedElementsFirstMaybe: number | undefined = undefined;
+ /** Sparse array with `true` for elements that failed. */
+ const failedElements: (true | undefined)[] = [];
+ for (let i = 0; i < actual.length; ++i) {
+ if (!predicate(i, actual[i])) {
+ failedElementsFirstMaybe ??= i;
+ failedElements[i] = true;
+ }
+ }
+
+ if (failedElementsFirstMaybe === undefined) {
+ return undefined;
+ }
+
+ const failedElementsFirst = failedElementsFirstMaybe;
+ return failCheckElements({ actual, failedElements, failedElementsFirst, predicatePrinter });
+}
+
+interface CheckElementsFailOpts {
+ actual: TypedArrayBufferView;
+ failedElements: (true | undefined)[];
+ failedElementsFirst: number;
+ predicatePrinter?: CheckElementsSupplementalTableRows;
+}
+
+/**
+ * Implements the failure case of some checkElementsX helpers above. This allows those functions to
+ * implement their checks directly without too many function indirections in between.
+ *
+ * Note: Separating this into its own function significantly speeds up the non-error case in
+ * Chromium (though this may be V8-specific behavior).
+ */
+function failCheckElements({
+ actual,
+ failedElements,
+ failedElementsFirst,
+ predicatePrinter,
+}: CheckElementsFailOpts): ErrorWithExtra {
+ const size = actual.length;
+ const ctor = actual.constructor as TypedArrayBufferViewConstructor;
+ const printAsFloat = ctor === Float16Array || ctor === Float32Array || ctor === Float64Array;
+
+ const failedElementsLast = failedElements.length - 1;
+
+ // Include one extra non-failed element at the beginning and end (if they exist), for context.
+ const printElementsStart = Math.max(0, failedElementsFirst - 1);
+ const printElementsEnd = Math.min(size, failedElementsLast + 2);
+ const printElementsCount = printElementsEnd - printElementsStart;
+
+ const numberToString = printAsFloat
+ ? (n: number) => n.toPrecision(4)
+ : (n: number) => intToPaddedHex(n, { byteLength: ctor.BYTES_PER_ELEMENT });
+ const numberPrefix = printAsFloat ? '' : '0x:';
+
+ const printActual = actual.subarray(printElementsStart, printElementsEnd);
+ const printExpected: Array<Iterable<string | number>> = [];
+ if (predicatePrinter) {
+ for (const { leftHeader, getValueForCell: cell } of predicatePrinter) {
+ printExpected.push(
+ (function* () {
+ yield* [leftHeader, ''];
+ yield* iterRange(printElementsCount, i => cell(printElementsStart + i));
+ })()
+ );
+ }
+ }
+
+ const printFailedValueMarkers = (function* () {
+ yield* ['failed ->', ''];
+ yield* range(printElementsCount, i => (failedElements[printElementsStart + i] ? 'xx' : ''));
+ })();
+
+ const opts = {
+ fillToWidth: 120,
+ numberToString,
+ };
+ const msg = `Array had unexpected contents at indices ${failedElementsFirst} through ${failedElementsLast}.
+ Starting at index ${printElementsStart}:
+${generatePrettyTable(opts, [
+ ['actual ==', numberPrefix, ...printActual],
+ printFailedValueMarkers,
+ ...printExpected,
+])}`;
+ return new ErrorWithExtra(msg, () => ({
+ actual: actual.slice(),
+ }));
+}
+
+// Helper helpers
+
+/** Convert an integral `number` into a hex string, padded to the specified `byteLength`. */
+function intToPaddedHex(number: number, { byteLength }: { byteLength: number }) {
+ assert(Number.isInteger(number), 'number must be integer');
+ let s = Math.abs(number).toString(16);
+ if (byteLength) s = s.padStart(byteLength * 2, '0');
+ if (number < 0) s = '-' + s;
+ return s;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/color_space_conversion.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/color_space_conversion.ts
new file mode 100644
index 0000000000..a1de0e48ba
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/color_space_conversion.ts
@@ -0,0 +1,265 @@
+import { assert, unreachable } from '../../common/util/util.js';
+
+import { multiplyMatrices } from './math.js';
+
+// These color space conversion function definitions are copied directly from
+// CSS Color Module Level 4 Sample Code: https://drafts.csswg.org/css-color/#color-conversion-code
+// *EXCEPT* the conversion matrices are replaced with exact rational forms computed here:
+// https://github.com/kainino0x/exact_css_xyz_matrices
+// using this Rust crate: https://crates.io/crates/rgb_derivation
+// as described for sRGB on this page: https://mina86.com/2019/srgb-xyz-matrix/
+// but using the numbers from the CSS spec: https://www.w3.org/TR/css-color-4/#predefined
+
+// Sample code for color conversions
+// Conversion can also be done using ICC profiles and a Color Management System
+// For clarity, a library is used for matrix multiplication (multiply-matrices.js)
+
+// sRGB-related functions
+
+/**
+ * convert an array of sRGB values
+ * where in-gamut values are in the range [0 - 1]
+ * to linear light (un-companded) form.
+ * https://en.wikipedia.org/wiki/SRGB
+ * Extended transfer function:
+ * for negative values, linear portion is extended on reflection of axis,
+ * then reflected power function is used.
+ */
+function lin_sRGB(RGB: Array<number>) {
+ return RGB.map(val => {
+ const sign = val < 0 ? -1 : 1;
+ const abs = Math.abs(val);
+
+ if (abs < 0.04045) {
+ return val / 12.92;
+ }
+
+ return sign * Math.pow((abs + 0.055) / 1.055, 2.4);
+ });
+}
+
+/**
+ * convert an array of linear-light sRGB values in the range 0.0-1.0
+ * to gamma corrected form
+ * https://en.wikipedia.org/wiki/SRGB
+ * Extended transfer function:
+ * For negative values, linear portion extends on reflection
+ * of axis, then uses reflected pow below that
+ */
+function gam_sRGB(RGB: Array<number>) {
+ return RGB.map(val => {
+ const sign = val < 0 ? -1 : 1;
+ const abs = Math.abs(val);
+
+ if (abs > 0.0031308) {
+ return sign * (1.055 * Math.pow(abs, 1 / 2.4) - 0.055);
+ }
+
+ return 12.92 * val;
+ });
+}
+
+/**
+ * convert an array of linear-light sRGB values to CIE XYZ
+ * using sRGB's own white, D65 (no chromatic adaptation)
+ */
+function lin_sRGB_to_XYZ(rgb: Array<Array<number>>) {
+ /* prettier-ignore */
+ const M = [
+ [506752 / 1228815, 87881 / 245763, 12673 / 70218],
+ [ 87098 / 409605, 175762 / 245763, 12673 / 175545],
+ [ 7918 / 409605, 87881 / 737289, 1001167 / 1053270],
+ ];
+ return multiplyMatrices(M, rgb);
+}
+
+/**
+ * convert XYZ to linear-light sRGB
+ * using sRGB's own white, D65 (no chromatic adaptation)
+ */
+function XYZ_to_lin_sRGB(XYZ: Array<Array<number>>) {
+ /* prettier-ignore */
+ const M = [
+ [ 12831 / 3959, -329 / 214, -1974 / 3959],
+ [-851781 / 878810, 1648619 / 878810, 36519 / 878810],
+ [ 705 / 12673, -2585 / 12673, 705 / 667],
+ ];
+
+ return multiplyMatrices(M, XYZ);
+}
+
+// display-p3-related functions
+
+/**
+ * convert an array of display-p3 RGB values in the range 0.0 - 1.0
+ * to linear light (un-companded) form.
+ */
+function lin_P3(RGB: Array<number>) {
+ return lin_sRGB(RGB); // same as sRGB
+}
+
+/**
+ * convert an array of linear-light display-p3 RGB in the range 0.0-1.0
+ * to gamma corrected form
+ */
+function gam_P3(RGB: Array<number>) {
+ return gam_sRGB(RGB); // same as sRGB
+}
+
+/**
+ * convert an array of linear-light display-p3 values to CIE XYZ
+ * using display-p3's D65 (no chromatic adaptation)
+ */
+function lin_P3_to_XYZ(rgb: Array<Array<number>>) {
+ /* prettier-ignore */
+ const M = [
+ [608311 / 1250200, 189793 / 714400, 198249 / 1000160],
+ [ 35783 / 156275, 247089 / 357200, 198249 / 2500400],
+ [ 0 / 1, 32229 / 714400, 5220557 / 5000800],
+ ];
+
+ return multiplyMatrices(M, rgb);
+}
+
+/**
+ * convert XYZ to linear-light P3
+ * using display-p3's own white, D65 (no chromatic adaptation)
+ */
+function XYZ_to_lin_P3(XYZ: Array<Array<number>>) {
+ /* prettier-ignore */
+ const M = [
+ [446124 / 178915, -333277 / 357830, -72051 / 178915],
+ [-14852 / 17905, 63121 / 35810, 423 / 17905],
+ [ 11844 / 330415, -50337 / 660830, 316169 / 330415],
+ ];
+
+ return multiplyMatrices(M, XYZ);
+}
+
+/**
+ * @returns the converted pixels in `{R: number, G: number, B: number, A: number}`.
+ *
+ * Follow conversion steps in CSS Color Module Level 4
+ * https://drafts.csswg.org/css-color/#predefined-to-predefined
+ * display-p3 and sRGB share the same white points.
+ */
+export function displayP3ToSrgb(pixel: { R: number; G: number; B: number; A: number }): {
+ R: number;
+ G: number;
+ B: number;
+ A: number;
+} {
+ assert(
+ pixel.R !== undefined && pixel.G !== undefined && pixel.B !== undefined,
+ 'color space conversion requires all of R, G and B components'
+ );
+
+ let rgbVec = [pixel.R, pixel.G, pixel.B];
+ rgbVec = lin_P3(rgbVec);
+ let rgbMatrix = [[rgbVec[0]], [rgbVec[1]], [rgbVec[2]]];
+ rgbMatrix = XYZ_to_lin_sRGB(lin_P3_to_XYZ(rgbMatrix));
+ rgbVec = [rgbMatrix[0][0], rgbMatrix[1][0], rgbMatrix[2][0]];
+ rgbVec = gam_sRGB(rgbVec);
+
+ pixel.R = rgbVec[0];
+ pixel.G = rgbVec[1];
+ pixel.B = rgbVec[2];
+
+ return pixel;
+}
+/**
+ * @returns the converted pixels in `{R: number, G: number, B: number, A: number}`.
+ *
+ * Follow conversion steps in CSS Color Module Level 4
+ * https://drafts.csswg.org/css-color/#predefined-to-predefined
+ * display-p3 and sRGB share the same white points.
+ */
+export function srgbToDisplayP3(pixel: { R: number; G: number; B: number; A: number }): {
+ R: number;
+ G: number;
+ B: number;
+ A: number;
+} {
+ assert(
+ pixel.R !== undefined && pixel.G !== undefined && pixel.B !== undefined,
+ 'color space conversion requires all of R, G and B components'
+ );
+
+ let rgbVec = [pixel.R, pixel.G, pixel.B];
+ rgbVec = lin_sRGB(rgbVec);
+ let rgbMatrix = [[rgbVec[0]], [rgbVec[1]], [rgbVec[2]]];
+ rgbMatrix = XYZ_to_lin_P3(lin_sRGB_to_XYZ(rgbMatrix));
+ rgbVec = [rgbMatrix[0][0], rgbMatrix[1][0], rgbMatrix[2][0]];
+ rgbVec = gam_P3(rgbVec);
+
+ pixel.R = rgbVec[0];
+ pixel.G = rgbVec[1];
+ pixel.B = rgbVec[2];
+
+ return pixel;
+}
+
+type InPlaceColorConversion = (rgba: {
+ R: number;
+ G: number;
+ B: number;
+ readonly A: number; // Alpha never changes during a conversion.
+}) => void;
+
+/**
+ * Returns a function which applies the specified colorspace/premultiplication conversion.
+ * Does not clamp, so may return values outside of the `dstColorSpace` gamut, due to either
+ * color space conversion or alpha premultiplication.
+ */
+export function makeInPlaceColorConversion({
+ srcPremultiplied,
+ dstPremultiplied,
+ srcColorSpace = 'srgb',
+ dstColorSpace = 'srgb',
+}: {
+ srcPremultiplied: boolean;
+ dstPremultiplied: boolean;
+ srcColorSpace?: PredefinedColorSpace;
+ dstColorSpace?: PredefinedColorSpace;
+}): InPlaceColorConversion {
+ const requireColorSpaceConversion = srcColorSpace !== dstColorSpace;
+ const requireUnpremultiplyAlpha =
+ srcPremultiplied && (requireColorSpaceConversion || srcPremultiplied !== dstPremultiplied);
+ const requirePremultiplyAlpha =
+ dstPremultiplied && (requireColorSpaceConversion || srcPremultiplied !== dstPremultiplied);
+
+ return rgba => {
+ assert(rgba.A >= 0.0 && rgba.A <= 1.0, 'rgba.A out of bounds');
+
+ if (requireUnpremultiplyAlpha) {
+ if (rgba.A !== 0.0) {
+ rgba.R /= rgba.A;
+ rgba.G /= rgba.A;
+ rgba.B /= rgba.A;
+ } else {
+ assert(
+ rgba.R === 0.0 && rgba.G === 0.0 && rgba.B === 0.0 && rgba.A === 0.0,
+ 'Unpremultiply ops with alpha value 0.0 requires all channels equals to 0.0'
+ );
+ }
+ }
+ // It's possible RGB are now > 1.
+ // This technically represents colors outside the src gamut, so no clamping yet.
+
+ if (requireColorSpaceConversion) {
+ // WebGPU currently only supports dstColorSpace = 'srgb'.
+ if (srcColorSpace === 'display-p3' && dstColorSpace === 'srgb') {
+ rgba = displayP3ToSrgb(rgba);
+ } else {
+ unreachable();
+ }
+ }
+ // Now RGB may also be negative if the src gamut is larger than the dst gamut.
+
+ if (requirePremultiplyAlpha) {
+ rgba.R *= rgba.A;
+ rgba.G *= rgba.A;
+ rgba.B *= rgba.A;
+ }
+ };
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/command_buffer_maker.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/command_buffer_maker.ts
new file mode 100644
index 0000000000..8ac663daf5
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/command_buffer_maker.ts
@@ -0,0 +1,85 @@
+import { ResourceState, GPUTestBase } from '../gpu_test.js';
+
+export const kRenderEncodeTypes = ['render pass', 'render bundle'] as const;
+export type RenderEncodeType = (typeof kRenderEncodeTypes)[number];
+export const kProgrammableEncoderTypes = ['compute pass', ...kRenderEncodeTypes] as const;
+export type ProgrammableEncoderType = (typeof kProgrammableEncoderTypes)[number];
+export const kEncoderTypes = ['non-pass', ...kProgrammableEncoderTypes] as const;
+export type EncoderType = (typeof kEncoderTypes)[number];
+
+// Look up the type of the encoder based on `T`. If `T` is a union, this will be too!
+type EncoderByEncoderType<T extends EncoderType> = {
+ 'non-pass': GPUCommandEncoder;
+ 'compute pass': GPUComputePassEncoder;
+ 'render pass': GPURenderPassEncoder;
+ 'render bundle': GPURenderBundleEncoder;
+}[T];
+
+/** See {@link webgpu/api/validation/validation_test.ValidationTest.createEncoder |
+ * GPUTest.createEncoder()}. */
+export class CommandBufferMaker<T extends EncoderType> {
+ /** `GPU___Encoder` for recording commands into. */
+ // Look up the type of the encoder based on `T`. If `T` is a union, this will be too!
+ readonly encoder: EncoderByEncoderType<T>;
+
+ /**
+ * Finish any passes, finish and record any bundles, and finish/return the command buffer. Any
+ * errors are ignored and the GPUCommandBuffer (which may be an error buffer) is returned.
+ */
+ readonly finish: () => GPUCommandBuffer;
+
+ /**
+ * Finish any passes, finish and record any bundles, and finish/return the command buffer.
+ * Checks for validation errors in (only) the appropriate finish call.
+ */
+ readonly validateFinish: (shouldSucceed: boolean) => GPUCommandBuffer;
+
+ /**
+ * Finish the command buffer and submit it. Checks for validation errors in either the submit or
+ * the appropriate finish call, depending on the state of a resource used in the encoding.
+ */
+ readonly validateFinishAndSubmit: (
+ shouldBeValid: boolean,
+ submitShouldSucceedIfValid: boolean
+ ) => void;
+
+ /**
+ * `validateFinishAndSubmit()` based on the state of a resource in the command encoder.
+ * - `finish()` should fail if the resource is 'invalid'.
+ * - Only `submit()` should fail if the resource is 'destroyed'.
+ */
+ readonly validateFinishAndSubmitGivenState: (resourceState: ResourceState) => void;
+
+ constructor(
+ t: GPUTestBase,
+ encoder: EncoderByEncoderType<EncoderType>,
+ finish: () => GPUCommandBuffer
+ ) {
+ // TypeScript introduces an intersection type here where we don't want one.
+ this.encoder = encoder as EncoderByEncoderType<T>;
+ this.finish = finish;
+
+ // Define extra methods like this, otherwise they get unbound when destructured, e.g.:
+ // const { encoder, validateFinishAndSubmit } = t.createEncoder(type);
+ // Alternatively, do not destructure, and call member functions, e.g.:
+ // const encoder = t.createEncoder(type);
+ // encoder.validateFinish(true);
+ this.validateFinish = (shouldSucceed: boolean) => {
+ return t.expectGPUError('validation', this.finish, !shouldSucceed);
+ };
+
+ this.validateFinishAndSubmit = (
+ shouldBeValid: boolean,
+ submitShouldSucceedIfValid: boolean
+ ) => {
+ const commandBuffer = this.validateFinish(shouldBeValid);
+ if (shouldBeValid) {
+ t.expectValidationError(() => t.queue.submit([commandBuffer]), !submitShouldSucceedIfValid);
+ }
+ };
+
+ this.validateFinishAndSubmitGivenState = (resourceState: ResourceState) => {
+ this.validateFinishAndSubmit(resourceState !== 'invalid', resourceState !== 'destroyed');
+ };
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/compare.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/compare.ts
new file mode 100644
index 0000000000..45599d25f6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/compare.ts
@@ -0,0 +1,472 @@
+import { getIsBuildingDataCache } from '../../common/framework/data_cache.js';
+import { Colors } from '../../common/util/colors.js';
+import { assert, unreachable } from '../../common/util/util.js';
+import {
+ deserializeExpectation,
+ serializeExpectation,
+} from '../shader/execution/expression/case_cache.js';
+import { Expectation, toComparator } from '../shader/execution/expression/expression.js';
+
+import BinaryStream from './binary_stream.js';
+import { isFloatValue, Matrix, Scalar, Value, Vector } from './conversion.js';
+import { FPInterval } from './floating_point.js';
+
+/** Comparison describes the result of a Comparator function. */
+export interface Comparison {
+ matched: boolean; // True if the two values were considered a match
+ got: string; // The string representation of the 'got' value (possibly with markup)
+ expected: string; // The string representation of the 'expected' value (possibly with markup)
+}
+
+// All Comparators must be serializable to be used in the CaseCache.
+// New Comparators should add a new entry to SerializableComparatorKind and
+// define functionality in serialize/deserializeComparator as needed.
+//
+// 'value' and 'packed' are internal framework Comparators that exist, so that
+// the whole Case type hierarchy doesn't need to be split into Serializable vs
+// non-Serializable paths. Passing them into the CaseCache will cause a runtime
+// error.
+// 'value' and 'packed' should never be used in .spec.ts files.
+//
+export type SerializableComparatorKind = 'anyOf' | 'skipUndefined' | 'alwaysPass';
+type InternalComparatorKind = 'value' | 'packed';
+export type ComparatorKind = SerializableComparatorKind | InternalComparatorKind;
+export type ComparatorImpl = (got: Value) => Comparison;
+
+/** Comparator is a function that compares whether the provided value matches an expectation. */
+export interface Comparator {
+ compare: ComparatorImpl;
+ kind: ComparatorKind;
+ data?: Expectation | Expectation[] | string;
+}
+
+/** SerializedComparator is an enum of all the possible serialized comparator types. */
+enum SerializedComparatorKind {
+ AnyOf,
+ SkipUndefined,
+ AlwaysPass,
+}
+
+/** serializeComparatorKind() serializes a ComparatorKind to a BinaryStream */
+function serializeComparatorKind(s: BinaryStream, value: ComparatorKind) {
+ switch (value) {
+ case 'anyOf':
+ return s.writeU8(SerializedComparatorKind.AnyOf);
+ case 'skipUndefined':
+ return s.writeU8(SerializedComparatorKind.SkipUndefined);
+ case 'alwaysPass':
+ return s.writeU8(SerializedComparatorKind.AlwaysPass);
+ }
+}
+
+/** deserializeComparatorKind() deserializes a ComparatorKind from a BinaryStream */
+function deserializeComparatorKind(s: BinaryStream): ComparatorKind {
+ const kind = s.readU8();
+ switch (kind) {
+ case SerializedComparatorKind.AnyOf:
+ return 'anyOf';
+ case SerializedComparatorKind.SkipUndefined:
+ return 'skipUndefined';
+ case SerializedComparatorKind.AlwaysPass:
+ return 'alwaysPass';
+ default:
+ unreachable(`invalid serialized ComparatorKind: ${kind}`);
+ }
+}
+
+/**
+ * compares 'got' Value to 'expected' Value, returning the Comparison information.
+ * @param got the Value obtained from the test
+ * @param expected the expected Value
+ * @returns the comparison results
+ */
+// NOTE: This function does not use objectEquals, since that does not handle FP
+// specific corners cases correctly, i.e. that f64/f32/f16 are all considered
+// the same type for this comparison.
+function compareValue(got: Value, expected: Value): Comparison {
+ {
+ // Check types
+ const gTy = got.type;
+ const eTy = expected.type;
+ const bothFloatTypes = isFloatValue(got) && isFloatValue(expected);
+ if (gTy !== eTy && !bothFloatTypes) {
+ return {
+ matched: false,
+ got: `${Colors.red(gTy.toString())}(${got})`,
+ expected: `${Colors.red(eTy.toString())}(${expected})`,
+ };
+ }
+ }
+
+ if (got instanceof Scalar) {
+ const g = got;
+ const e = expected as Scalar;
+ const isFloat = g.type.kind === 'f64' || g.type.kind === 'f32' || g.type.kind === 'f16';
+ const matched =
+ (isFloat && (g.value as number) === (e.value as number)) || (!isFloat && g.value === e.value);
+ return {
+ matched,
+ got: g.toString(),
+ expected: matched ? Colors.green(e.toString()) : Colors.red(e.toString()),
+ };
+ }
+
+ if (got instanceof Vector) {
+ const e = expected as Vector;
+ const gLen = got.elements.length;
+ const eLen = e.elements.length;
+ let matched = gLen === eLen;
+ if (matched) {
+ // Iterating and calling compare instead of just using objectEquals to use the FP specific logic from above
+ matched = got.elements.every((_, i) => {
+ return compare(got.elements[i], e.elements[i]).matched;
+ });
+ }
+
+ return {
+ matched,
+ got: `${got.toString()}`,
+ expected: matched ? Colors.green(e.toString()) : Colors.red(e.toString()),
+ };
+ }
+
+ if (got instanceof Matrix) {
+ const e = expected as Matrix;
+ const gCols = got.type.cols;
+ const eCols = e.type.cols;
+ const gRows = got.type.rows;
+ const eRows = e.type.rows;
+ let matched = gCols === eCols && gRows === eRows;
+ if (matched) {
+ // Iterating and calling compare instead of just using objectEquals to use the FP specific logic from above
+ matched = got.elements.every((c, i) => {
+ return c.every((_, j) => {
+ return compare(got.elements[i][j], e.elements[i][j]).matched;
+ });
+ });
+ }
+
+ return {
+ matched,
+ got: `${got.toString()}`,
+ expected: matched ? Colors.green(e.toString()) : Colors.red(e.toString()),
+ };
+ }
+
+ throw new Error(`unhandled type '${typeof got}`);
+}
+
+/**
+ * Tests it a 'got' Value is contained in 'expected' interval, returning the Comparison information.
+ * @param got the Value obtained from the test
+ * @param expected the expected FPInterval
+ * @returns the comparison results
+ */
+function compareInterval(got: Value, expected: FPInterval): Comparison {
+ {
+ // Check type
+ const gTy = got.type;
+ if (!isFloatValue(got)) {
+ return {
+ matched: false,
+ got: `${Colors.red(gTy.toString())}(${got})`,
+ expected: `floating point value`,
+ };
+ }
+ }
+
+ if (got instanceof Scalar) {
+ const g = got.value as number;
+ const matched = expected.contains(g);
+ return {
+ matched,
+ got: g.toString(),
+ expected: matched ? Colors.green(expected.toString()) : Colors.red(expected.toString()),
+ };
+ }
+
+ // Vector results are currently not handled
+ throw new Error(`unhandled type '${typeof got}`);
+}
+
+/**
+ * Tests it a 'got' Value is contained in 'expected' vector, returning the Comparison information.
+ * @param got the Value obtained from the test, is expected to be a Vector
+ * @param expected the expected array of FPIntervals, one for each element of the vector
+ * @returns the comparison results
+ */
+function compareVector(got: Value, expected: FPInterval[]): Comparison {
+ // Check got type
+ if (!(got instanceof Vector)) {
+ return {
+ matched: false,
+ got: `${Colors.red((typeof got).toString())}(${got})`,
+ expected: `Vector`,
+ };
+ }
+
+ // Check element type
+ {
+ const gTy = got.type.elementType;
+ if (!isFloatValue(got.elements[0])) {
+ return {
+ matched: false,
+ got: `${Colors.red(gTy.toString())}(${got})`,
+ expected: `floating point elements`,
+ };
+ }
+ }
+
+ if (got.elements.length !== expected.length) {
+ return {
+ matched: false,
+ got: `Vector of ${got.elements.length} elements`,
+ expected: `${expected.length} elements`,
+ };
+ }
+
+ const results = got.elements.map((_, idx) => {
+ const g = got.elements[idx].value as number;
+ return { match: expected[idx].contains(g), index: idx };
+ });
+
+ const failures = results.filter(v => !v.match).map(v => v.index);
+ if (failures.length !== 0) {
+ const expected_string = expected.map((v, idx) =>
+ idx in failures ? Colors.red(`[${v}]`) : Colors.green(`[${v}]`)
+ );
+ return {
+ matched: false,
+ got: `[${got.elements}]`,
+ expected: `[${expected_string}]`,
+ };
+ }
+
+ return {
+ matched: true,
+ got: `[${got.elements}]`,
+ expected: `[${Colors.green(expected.toString())}]`,
+ };
+}
+
+// Utility to get arround not being able to nest `` blocks
+function convertArrayToString<T>(m: T[]): string {
+ return `[${m.join(',')}]`;
+}
+
+/**
+ * Tests it a 'got' Value is contained in 'expected' matrix, returning the Comparison information.
+ * @param got the Value obtained from the test, is expected to be a Matrix
+ * @param expected the expected array of array of FPIntervals, representing a column-major matrix
+ * @returns the comparison results
+ */
+function compareMatrix(got: Value, expected: FPInterval[][]): Comparison {
+ // Check got type
+ if (!(got instanceof Matrix)) {
+ return {
+ matched: false,
+ got: `${Colors.red((typeof got).toString())}(${got})`,
+ expected: `Matrix`,
+ };
+ }
+
+ // Check element type
+ {
+ const gTy = got.type.elementType;
+ if (!isFloatValue(got.elements[0][0])) {
+ return {
+ matched: false,
+ got: `${Colors.red(gTy.toString())}(${got})`,
+ expected: `floating point elements`,
+ };
+ }
+ }
+
+ // Check matrix dimensions
+ {
+ const gCols = got.elements.length;
+ const gRows = got.elements[0].length;
+ const eCols = expected.length;
+ const eRows = expected[0].length;
+
+ if (gCols !== eCols || gRows !== eRows) {
+ assert(false);
+ return {
+ matched: false,
+ got: `Matrix of ${gCols}x${gRows} elements`,
+ expected: `Matrix of ${eCols}x${eRows} elements`,
+ };
+ }
+ }
+
+ // Check that got values fall in expected intervals
+ let matched = true;
+ const expected_strings: string[][] = [...Array(got.elements.length)].map(_ => [
+ ...Array(got.elements[0].length),
+ ]);
+
+ got.elements.forEach((c, i) => {
+ c.forEach((r, j) => {
+ const g = r.value as number;
+ if (expected[i][j].contains(g)) {
+ expected_strings[i][j] = Colors.green(`[${expected[i][j]}]`);
+ } else {
+ matched = false;
+ expected_strings[i][j] = Colors.red(`[${expected[i][j]}]`);
+ }
+ });
+ });
+
+ return {
+ matched,
+ got: convertArrayToString(got.elements.map(convertArrayToString)),
+ expected: convertArrayToString(expected_strings.map(convertArrayToString)),
+ };
+}
+
+/**
+ * compare() compares 'got' to 'expected', returning the Comparison information.
+ * @param got the result obtained from the test
+ * @param expected the expected result
+ * @returns the comparison results
+ */
+export function compare(
+ got: Value,
+ expected: Value | FPInterval | FPInterval[] | FPInterval[][]
+): Comparison {
+ if (expected instanceof Array) {
+ if (expected[0] instanceof Array) {
+ expected = expected as FPInterval[][];
+ return compareMatrix(got, expected);
+ } else {
+ expected = expected as FPInterval[];
+ return compareVector(got, expected);
+ }
+ }
+
+ if (expected instanceof FPInterval) {
+ return compareInterval(got, expected);
+ }
+
+ return compareValue(got, expected);
+}
+
+/** @returns a Comparator that checks whether a test value matches any of the provided options */
+export function anyOf(...expectations: Expectation[]): Comparator {
+ const c: Comparator = {
+ compare: (got: Value) => {
+ const failed = new Set<string>();
+ for (const e of expectations) {
+ const cmp = toComparator(e).compare(got);
+ if (cmp.matched) {
+ return cmp;
+ }
+ failed.add(cmp.expected);
+ }
+ return { matched: false, got: got.toString(), expected: [...failed].join(' or ') };
+ },
+ kind: 'anyOf',
+ };
+
+ if (getIsBuildingDataCache()) {
+ // If there's an active DataCache, and it supports storing, then append the
+ // Expectations to the result, so it can be serialized.
+ c.data = expectations;
+ }
+ return c;
+}
+
+/** @returns a Comparator that skips the test if the expectation is undefined */
+export function skipUndefined(expectation: Expectation | undefined): Comparator {
+ const c: Comparator = {
+ compare: (got: Value) => {
+ if (expectation !== undefined) {
+ return toComparator(expectation).compare(got);
+ }
+ return { matched: true, got: got.toString(), expected: `Treating 'undefined' as Any` };
+ },
+ kind: 'skipUndefined',
+ };
+
+ if (expectation !== undefined && getIsBuildingDataCache()) {
+ // If there's an active DataCache, and it supports storing, then append the
+ // Expectation to the result, so it can be serialized.
+ c.data = expectation;
+ }
+ return c;
+}
+
+/**
+ * @returns a Comparator that always passes, used to test situations where the
+ * result of computation doesn't matter, but the fact it finishes is being
+ * tested.
+ */
+export function alwaysPass(msg: string = 'always pass'): Comparator {
+ const c: Comparator = {
+ compare: (got: Value) => {
+ return { matched: true, got: got.toString(), expected: msg };
+ },
+ kind: 'alwaysPass',
+ };
+
+ if (getIsBuildingDataCache()) {
+ // If there's an active DataCache, and it supports storing, then append the
+ // message string to the result, so it can be serialized.
+ c.data = msg;
+ }
+ return c;
+}
+
+/** serializeComparator() serializes a Comparator to a BinaryStream */
+export function serializeComparator(s: BinaryStream, c: Comparator) {
+ serializeComparatorKind(s, c.kind);
+ switch (c.kind) {
+ case 'anyOf':
+ s.writeArray(c.data as Expectation[], serializeExpectation);
+ return;
+ case 'skipUndefined':
+ s.writeCond(c.data !== undefined, {
+ if_true: () => {
+ // defined data
+ serializeExpectation(s, c.data as Expectation);
+ },
+ if_false: () => {
+ // undefined data
+ },
+ });
+ return;
+ case 'alwaysPass': {
+ s.writeString(c.data as string);
+ return;
+ }
+ case 'value':
+ case 'packed': {
+ unreachable(`Serializing '${c.kind}' comparators is not allowed (${c})`);
+ break;
+ }
+ }
+ unreachable(`Unable serialize comparator '${c}'`);
+}
+
+/** deserializeComparator() deserializes a Comparator from a BinaryStream */
+export function deserializeComparator(s: BinaryStream): Comparator {
+ const kind = deserializeComparatorKind(s);
+ switch (kind) {
+ case 'anyOf':
+ return anyOf(...s.readArray(deserializeExpectation));
+ case 'skipUndefined':
+ return s.readCond({
+ if_true: () => {
+ // defined data
+ return skipUndefined(deserializeExpectation(s));
+ },
+ if_false: () => {
+ // undefined data
+ return skipUndefined(undefined);
+ },
+ });
+ case 'alwaysPass':
+ return alwaysPass(s.readString());
+ }
+ unreachable(`Unable deserialize comparator '${s}'`);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/constants.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/constants.ts
new file mode 100644
index 0000000000..5ee819c64e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/constants.ts
@@ -0,0 +1,487 @@
+import {
+ reinterpretU64AsF64,
+ reinterpretF64AsU64,
+ reinterpretU32AsF32,
+ reinterpretU16AsF16,
+} from './reinterpret.js';
+
+export const kBit = {
+ // Limits of int32
+ i32: {
+ positive: {
+ min: 0x0000_0000, // 0
+ max: 0x7fff_ffff, // 2147483647
+ },
+ negative: {
+ min: 0x8000_0000, // -2147483648
+ max: 0x0000_0000, // 0
+ },
+ },
+
+ // Limits of uint32
+ u32: {
+ min: 0x0000_0000,
+ max: 0xffff_ffff,
+ },
+
+ // Limits of f64
+ // Have to be stored as a BigInt hex value, since number is a f64 internally,
+ // so 64-bit hex values are not guaranteed to be precisely representable.
+ f64: {
+ positive: {
+ min: BigInt(0x0010_0000_0000_0000n),
+ max: BigInt(0x7fef_ffff_ffff_ffffn),
+ zero: BigInt(0x0000_0000_0000_0000n),
+ subnormal: {
+ min: BigInt(0x0000_0000_0000_0001n),
+ max: BigInt(0x000f_ffff_ffff_ffffn),
+ },
+ infinity: BigInt(0x7ff0_0000_0000_0000n),
+ nearest_max: BigInt(0x7fef_ffff_ffff_fffen),
+ less_than_one: BigInt(0x3fef_ffff_ffff_ffffn),
+ pi: {
+ whole: BigInt(0x4009_21fb_5444_2d18n),
+ three_quarters: BigInt(0x4002_d97c_7f33_21d2n),
+ half: BigInt(0x3ff9_21fb_5444_2d18n),
+ third: BigInt(0x3ff0_c152_382d_7365n),
+ quarter: BigInt(0x3fe9_21fb_5444_2d18n),
+ sixth: BigInt(0x3fe0_c152_382d_7365n),
+ },
+ e: BigInt(0x4005_bf0a_8b14_5769n),
+ },
+ negative: {
+ max: BigInt(0x8010_0000_0000_0000n),
+ min: BigInt(0xffef_ffff_ffff_ffffn),
+ zero: BigInt(0x8000_0000_0000_0000n),
+ subnormal: {
+ max: BigInt(0x8000_0000_0000_0001n),
+ min: BigInt(0x800f_ffff_ffff_ffffn),
+ },
+ infinity: BigInt(0xfff0_0000_0000_0000n),
+ nearest_min: BigInt(0xffef_ffff_ffff_fffen),
+ less_than_one: BigInt(0xbfef_ffff_ffff_ffffn),
+ pi: {
+ whole: BigInt(0xc009_21fb_5444_2d18n),
+ three_quarters: BigInt(0xc002_d97c_7f33_21d2n),
+ half: BigInt(0xbff9_21fb_5444_2d18n),
+ third: BigInt(0xbff0_c152_382d_7365n),
+ quarter: BigInt(0xbfe9_21fb_5444_2d18n),
+ sixth: BigInt(0xbfe0_c152_382d_7365n),
+ },
+ },
+ max_ulp: BigInt(0x7ca0_0000_0000_0000n),
+ },
+
+ // Limits of f32
+ f32: {
+ positive: {
+ min: 0x0080_0000,
+ max: 0x7f7f_ffff,
+ zero: 0x0000_0000,
+ subnormal: {
+ min: 0x0000_0001,
+ max: 0x007f_ffff,
+ },
+ infinity: 0x7f80_0000,
+ nearest_max: 0x7f7f_fffe,
+ less_than_one: 0x3f7f_ffff,
+ pi: {
+ whole: 0x4049_0fdb,
+ three_quarters: 0x4016_cbe4,
+ half: 0x3fc9_0fdb,
+ third: 0x3f86_0a92,
+ quarter: 0x3f49_0fdb,
+ sixth: 0x3f06_0a92,
+ },
+ e: 0x402d_f854,
+ },
+ negative: {
+ max: 0x8080_0000,
+ min: 0xff7f_ffff,
+ zero: 0x8000_0000,
+ subnormal: {
+ max: 0x8000_0001,
+ min: 0x807f_ffff,
+ },
+ infinity: 0xff80_0000,
+ nearest_min: 0xff7f_fffe,
+ less_than_one: 0xbf7f_ffff,
+ pi: {
+ whole: 0xc04_90fdb,
+ three_quarters: 0xc016_cbe4,
+ half: 0xbfc9_0fdb,
+ third: 0xbf86_0a92,
+ quarter: 0xbf49_0fdb,
+ sixth: 0xbf06_0a92,
+ },
+ },
+ max_ulp: 0x7380_0000,
+ },
+
+ // Limits of f16
+ f16: {
+ positive: {
+ min: 0x0400,
+ max: 0x7bff,
+ zero: 0x0000,
+ subnormal: {
+ min: 0x0001,
+ max: 0x03ff,
+ },
+ infinity: 0x7c00,
+ nearest_max: 0x7bfe,
+ less_than_one: 0x3bff,
+ pi: {
+ whole: 0x4248,
+ three_quarters: 0x40b6,
+ half: 0x3e48,
+ third: 0x3c30,
+ quarter: 0x3a48,
+ sixth: 0x3830,
+ },
+ e: 0x416f,
+ },
+ negative: {
+ max: 0x8400,
+ min: 0xfbff,
+ zero: 0x8000,
+ subnormal: {
+ max: 0x8001,
+ min: 0x83ff,
+ },
+ infinity: 0xfc00,
+ nearest_min: 0xfbfe,
+ less_than_one: 0xbbff,
+ pi: {
+ whole: 0xc248,
+ three_quarters: 0xc0b6,
+ half: 0xbe48,
+ third: 0xbc30,
+ quarter: 0xba48,
+ sixth: 0xb830,
+ },
+ },
+ max_ulp: 0x5000,
+ },
+
+ // Uint32 representation of power(2, n) n = {0, ..., 31}
+ // Stored as a JS `number`
+ // {to0, ..., to31} ie. {0, ..., 31}
+ powTwo: {
+ to0: 0x0000_0001,
+ to1: 0x0000_0002,
+ to2: 0x0000_0004,
+ to3: 0x0000_0008,
+ to4: 0x0000_0010,
+ to5: 0x0000_0020,
+ to6: 0x0000_0040,
+ to7: 0x0000_0080,
+ to8: 0x0000_0100,
+ to9: 0x0000_0200,
+ to10: 0x0000_0400,
+ to11: 0x0000_0800,
+ to12: 0x0000_1000,
+ to13: 0x0000_2000,
+ to14: 0x0000_4000,
+ to15: 0x0000_8000,
+ to16: 0x0001_0000,
+ to17: 0x0002_0000,
+ to18: 0x0004_0000,
+ to19: 0x0008_0000,
+ to20: 0x0010_0000,
+ to21: 0x0020_0000,
+ to22: 0x0040_0000,
+ to23: 0x0080_0000,
+ to24: 0x0100_0000,
+ to25: 0x0200_0000,
+ to26: 0x0400_0000,
+ to27: 0x0800_0000,
+ to28: 0x1000_0000,
+ to29: 0x2000_0000,
+ to30: 0x4000_0000,
+ to31: 0x8000_0000,
+ },
+
+ // Int32 representation of of -1 * power(2, n) n = {0, ..., 31}
+ // Stored as a JS `number`
+ // {to0, ..., to31} ie. {0, ..., 31}
+ negPowTwo: {
+ to0: 0xffff_ffff,
+ to1: 0xffff_fffe,
+ to2: 0xffff_fffc,
+ to3: 0xffff_fff8,
+ to4: 0xffff_fff0,
+ to5: 0xffff_ffe0,
+ to6: 0xffff_ffc0,
+ to7: 0xffff_ff80,
+ to8: 0xffff_ff00,
+ to9: 0xffff_fe00,
+ to10: 0xffff_fc00,
+ to11: 0xffff_f800,
+ to12: 0xffff_f000,
+ to13: 0xffff_e000,
+ to14: 0xffff_c000,
+ to15: 0xffff_8000,
+ to16: 0xffff_0000,
+ to17: 0xfffe_0000,
+ to18: 0xfffc_0000,
+ to19: 0xfff8_0000,
+ to20: 0xfff0_0000,
+ to21: 0xffe0_0000,
+ to22: 0xffc0_0000,
+ to23: 0xff80_0000,
+ to24: 0xff00_0000,
+ to25: 0xfe00_0000,
+ to26: 0xfc00_0000,
+ to27: 0xf800_0000,
+ to28: 0xf000_0000,
+ to29: 0xe000_0000,
+ to30: 0xc000_0000,
+ to31: 0x8000_0000,
+ },
+} as const;
+
+export const kValue = {
+ // Limits of i32
+ i32: {
+ positive: {
+ min: 0,
+ max: 2147483647,
+ },
+ negative: {
+ min: -2147483648,
+ max: 0,
+ },
+ },
+
+ // Limits of u32
+ u32: {
+ min: 0,
+ max: 4294967295,
+ },
+
+ // Limits of f64
+ f64: {
+ positive: {
+ min: reinterpretU64AsF64(kBit.f64.positive.min),
+ max: reinterpretU64AsF64(kBit.f64.positive.max),
+ zero: reinterpretU64AsF64(kBit.f64.positive.zero),
+ subnormal: {
+ min: reinterpretU64AsF64(kBit.f64.positive.subnormal.min),
+ max: reinterpretU64AsF64(kBit.f64.positive.subnormal.max),
+ },
+ infinity: reinterpretU64AsF64(kBit.f64.positive.infinity),
+ nearest_max: reinterpretU64AsF64(kBit.f64.positive.nearest_max),
+ less_than_one: reinterpretU64AsF64(kBit.f64.positive.less_than_one),
+ pi: {
+ whole: reinterpretU64AsF64(kBit.f64.positive.pi.whole),
+ three_quarters: reinterpretU64AsF64(kBit.f64.positive.pi.three_quarters),
+ half: reinterpretU64AsF64(kBit.f64.positive.pi.half),
+ third: reinterpretU64AsF64(kBit.f64.positive.pi.third),
+ quarter: reinterpretU64AsF64(kBit.f64.positive.pi.quarter),
+ sixth: reinterpretU64AsF64(kBit.f64.positive.pi.sixth),
+ },
+ e: reinterpretU64AsF64(kBit.f64.positive.e),
+ },
+ negative: {
+ max: reinterpretU64AsF64(kBit.f64.negative.max),
+ min: reinterpretU64AsF64(kBit.f64.negative.min),
+ zero: reinterpretU64AsF64(kBit.f64.negative.zero),
+ subnormal: {
+ max: reinterpretU64AsF64(kBit.f64.negative.subnormal.max),
+ min: reinterpretU64AsF64(kBit.f64.negative.subnormal.min),
+ },
+ infinity: reinterpretU64AsF64(kBit.f64.negative.infinity),
+ nearest_min: reinterpretU64AsF64(kBit.f64.negative.nearest_min),
+ less_than_one: reinterpretU64AsF64(kBit.f64.negative.less_than_one), // -0.999999940395
+ pi: {
+ whole: reinterpretU64AsF64(kBit.f64.negative.pi.whole),
+ three_quarters: reinterpretU64AsF64(kBit.f64.negative.pi.three_quarters),
+ half: reinterpretU64AsF64(kBit.f64.negative.pi.half),
+ third: reinterpretU64AsF64(kBit.f64.negative.pi.third),
+ quarter: reinterpretU64AsF64(kBit.f64.negative.pi.quarter),
+ sixth: reinterpretU64AsF64(kBit.f64.negative.pi.sixth),
+ },
+ },
+ max_ulp: reinterpretU64AsF64(kBit.f64.max_ulp),
+ },
+
+ // Limits of f32
+ f32: {
+ positive: {
+ min: reinterpretU32AsF32(kBit.f32.positive.min),
+ max: reinterpretU32AsF32(kBit.f32.positive.max),
+ zero: reinterpretU32AsF32(kBit.f32.positive.zero),
+ subnormal: {
+ min: reinterpretU32AsF32(kBit.f32.positive.subnormal.min),
+ max: reinterpretU32AsF32(kBit.f32.positive.subnormal.max),
+ },
+ infinity: reinterpretU32AsF32(kBit.f32.positive.infinity),
+
+ nearest_max: reinterpretU32AsF32(kBit.f32.positive.nearest_max),
+ less_than_one: reinterpretU32AsF32(kBit.f32.positive.less_than_one),
+ pi: {
+ whole: reinterpretU32AsF32(kBit.f32.positive.pi.whole),
+ three_quarters: reinterpretU32AsF32(kBit.f32.positive.pi.three_quarters),
+ half: reinterpretU32AsF32(kBit.f32.positive.pi.half),
+ third: reinterpretU32AsF32(kBit.f32.positive.pi.third),
+ quarter: reinterpretU32AsF32(kBit.f32.positive.pi.quarter),
+ sixth: reinterpretU32AsF32(kBit.f32.positive.pi.sixth),
+ },
+ e: reinterpretU32AsF32(kBit.f32.positive.e),
+ // The positive pipeline-overridable constant with the smallest magnitude
+ // which when cast to f32 will produce infinity. This comes from WGSL
+ // conversion rules and the rounding rules of WebIDL.
+ first_non_castable_pipeline_override:
+ reinterpretU32AsF32(kBit.f32.positive.max) / 2 + 2 ** 127,
+ // The positive pipeline-overridable constant with the largest magnitude
+ // which when cast to f32 will not produce infinity. This comes from WGSL
+ // conversion rules and the rounding rules of WebIDL
+ last_castable_pipeline_override: reinterpretU64AsF64(
+ reinterpretF64AsU64(reinterpretU32AsF32(kBit.f32.positive.max) / 2 + 2 ** 127) - BigInt(1)
+ ),
+ },
+ negative: {
+ max: reinterpretU32AsF32(kBit.f32.negative.max),
+ min: reinterpretU32AsF32(kBit.f32.negative.min),
+ zero: reinterpretU32AsF32(kBit.f32.negative.zero),
+ subnormal: {
+ max: reinterpretU32AsF32(kBit.f32.negative.subnormal.max),
+ min: reinterpretU32AsF32(kBit.f32.negative.subnormal.min),
+ },
+ infinity: reinterpretU32AsF32(kBit.f32.negative.infinity),
+ nearest_min: reinterpretU32AsF32(kBit.f32.negative.nearest_min),
+ less_than_one: reinterpretU32AsF32(kBit.f32.negative.less_than_one), // -0.999999940395
+ pi: {
+ whole: reinterpretU32AsF32(kBit.f32.negative.pi.whole),
+ three_quarters: reinterpretU32AsF32(kBit.f32.negative.pi.three_quarters),
+ half: reinterpretU32AsF32(kBit.f32.negative.pi.half),
+ third: reinterpretU32AsF32(kBit.f32.negative.pi.third),
+ quarter: reinterpretU32AsF32(kBit.f32.negative.pi.quarter),
+ sixth: reinterpretU32AsF32(kBit.f32.negative.pi.sixth),
+ },
+ // The negative pipeline-overridable constant with the smallest magnitude
+ // which when cast to f32 will produce infinity. This comes from WGSL
+ // conversion rules and the rounding rules of WebIDL.
+ first_non_castable_pipeline_override: -(
+ reinterpretU32AsF32(kBit.f32.positive.max) / 2 +
+ 2 ** 127
+ ),
+ // The negative pipeline-overridable constant with the largest magnitude
+ // which when cast to f32 will not produce infinity. This comes from WGSL
+ // conversion rules and the rounding rules of WebIDL.
+ last_castable_pipeline_override: -reinterpretU64AsF64(
+ reinterpretF64AsU64(reinterpretU32AsF32(kBit.f32.positive.max) / 2 + 2 ** 127) - BigInt(1)
+ ),
+ },
+ max_ulp: reinterpretU32AsF32(kBit.f32.max_ulp),
+ emax: 127,
+ },
+
+ // Limits of i16
+ i16: {
+ positive: {
+ min: 0,
+ max: 32767,
+ },
+ negative: {
+ min: -32768,
+ max: 0,
+ },
+ },
+
+ // Limits of u16
+ u16: {
+ min: 0,
+ max: 65535,
+ },
+
+ // Limits of f16
+ f16: {
+ positive: {
+ min: reinterpretU16AsF16(kBit.f16.positive.min),
+ max: reinterpretU16AsF16(kBit.f16.positive.max),
+ zero: reinterpretU16AsF16(kBit.f16.positive.zero),
+ subnormal: {
+ min: reinterpretU16AsF16(kBit.f16.positive.subnormal.min),
+ max: reinterpretU16AsF16(kBit.f16.positive.subnormal.max),
+ },
+ infinity: reinterpretU16AsF16(kBit.f16.positive.infinity),
+ nearest_max: reinterpretU16AsF16(kBit.f16.positive.nearest_max),
+ less_than_one: reinterpretU16AsF16(kBit.f16.positive.less_than_one),
+ pi: {
+ whole: reinterpretU16AsF16(kBit.f16.positive.pi.whole),
+ three_quarters: reinterpretU16AsF16(kBit.f16.positive.pi.three_quarters),
+ half: reinterpretU16AsF16(kBit.f16.positive.pi.half),
+ third: reinterpretU16AsF16(kBit.f16.positive.pi.third),
+ quarter: reinterpretU16AsF16(kBit.f16.positive.pi.quarter),
+ sixth: reinterpretU16AsF16(kBit.f16.positive.pi.sixth),
+ },
+ e: reinterpretU16AsF16(kBit.f16.positive.e),
+ // The positive pipeline-overridable constant with the smallest magnitude
+ // which when cast to f16 will produce infinity. This comes from WGSL
+ // conversion rules and the rounding rules of WebIDL.
+ first_non_castable_pipeline_override:
+ reinterpretU16AsF16(kBit.f16.positive.max) / 2 + 2 ** 15,
+ // The positive pipeline-overridable constant with the largest magnitude
+ // which when cast to f16 will not produce infinity. This comes from WGSL
+ // conversion rules and the rounding rules of WebIDL
+ last_castable_pipeline_override: reinterpretU64AsF64(
+ reinterpretF64AsU64(reinterpretU16AsF16(kBit.f16.positive.max) / 2 + 2 ** 15) - BigInt(1)
+ ),
+ },
+ negative: {
+ max: reinterpretU16AsF16(kBit.f16.negative.max),
+ min: reinterpretU16AsF16(kBit.f16.negative.min),
+ zero: reinterpretU16AsF16(kBit.f16.negative.zero),
+ subnormal: {
+ max: reinterpretU16AsF16(kBit.f16.negative.subnormal.max),
+ min: reinterpretU16AsF16(kBit.f16.negative.subnormal.min),
+ },
+ infinity: reinterpretU16AsF16(kBit.f16.negative.infinity),
+ nearest_min: reinterpretU16AsF16(kBit.f16.negative.nearest_min),
+ less_than_one: reinterpretU16AsF16(kBit.f16.negative.less_than_one), // -0.9996
+ pi: {
+ whole: reinterpretU16AsF16(kBit.f16.negative.pi.whole),
+ three_quarters: reinterpretU16AsF16(kBit.f16.negative.pi.three_quarters),
+ half: reinterpretU16AsF16(kBit.f16.negative.pi.half),
+ third: reinterpretU16AsF16(kBit.f16.negative.pi.third),
+ quarter: reinterpretU16AsF16(kBit.f16.negative.pi.quarter),
+ sixth: reinterpretU16AsF16(kBit.f16.negative.pi.sixth),
+ },
+ // The negative pipeline-overridable constant with the smallest magnitude
+ // which when cast to f16 will produce infinity. This comes from WGSL
+ // conversion rules and the rounding rules of WebIDL.
+ first_non_castable_pipeline_override: -(
+ reinterpretU16AsF16(kBit.f16.positive.max) / 2 +
+ 2 ** 15
+ ),
+ // The negative pipeline-overridable constant with the largest magnitude
+ // which when cast to f16 will not produce infinity. This comes from WGSL
+ // conversion rules and the rounding rules of WebIDL.
+ last_castable_pipeline_override: -reinterpretU64AsF64(
+ reinterpretF64AsU64(reinterpretU16AsF16(kBit.f16.positive.max) / 2 + 2 ** 15) - BigInt(1)
+ ),
+ },
+ max_ulp: reinterpretU16AsF16(kBit.f16.max_ulp),
+ emax: 15,
+ },
+
+ // Limits of i8
+ i8: {
+ positive: {
+ min: 0,
+ max: 127,
+ },
+ negative: {
+ min: -128,
+ max: 0,
+ },
+ },
+
+ // Limits of u8
+ u8: {
+ min: 0,
+ max: 255,
+ },
+} as const;
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/conversion.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/conversion.ts
new file mode 100644
index 0000000000..d98367447d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/conversion.ts
@@ -0,0 +1,1635 @@
+import { Colors } from '../../common/util/colors.js';
+import { ROArrayArray } from '../../common/util/types.js';
+import { assert, objectEquals, TypedArrayBufferView, unreachable } from '../../common/util/util.js';
+import { Float16Array } from '../../external/petamoriken/float16/float16.js';
+
+import BinaryStream from './binary_stream.js';
+import { kBit } from './constants.js';
+import {
+ cartesianProduct,
+ clamp,
+ correctlyRoundedF16,
+ isFiniteF16,
+ isSubnormalNumberF16,
+ isSubnormalNumberF32,
+ isSubnormalNumberF64,
+} from './math.js';
+
+/**
+ * Encodes a JS `number` into a "normalized" (unorm/snorm) integer representation with `bits` bits.
+ * Input must be between -1 and 1 if signed, or 0 and 1 if unsigned.
+ *
+ * MAINTENANCE_TODO: See if performance of texel_data improves if this function is pre-specialized
+ * for a particular `bits`/`signed`.
+ */
+export function floatAsNormalizedInteger(float: number, bits: number, signed: boolean): number {
+ if (signed) {
+ assert(float >= -1 && float <= 1, () => `${float} out of bounds of snorm`);
+ const max = Math.pow(2, bits - 1) - 1;
+ return Math.round(float * max);
+ } else {
+ assert(float >= 0 && float <= 1, () => `${float} out of bounds of unorm`);
+ const max = Math.pow(2, bits) - 1;
+ return Math.round(float * max);
+ }
+}
+
+/**
+ * Decodes a JS `number` from a "normalized" (unorm/snorm) integer representation with `bits` bits.
+ * Input must be an integer in the range of the specified unorm/snorm type.
+ */
+export function normalizedIntegerAsFloat(integer: number, bits: number, signed: boolean): number {
+ assert(Number.isInteger(integer));
+ if (signed) {
+ const max = Math.pow(2, bits - 1) - 1;
+ assert(integer >= -max - 1 && integer <= max);
+ if (integer === -max - 1) {
+ integer = -max;
+ }
+ return integer / max;
+ } else {
+ const max = Math.pow(2, bits) - 1;
+ assert(integer >= 0 && integer <= max);
+ return integer / max;
+ }
+}
+
+/**
+ * Compares 2 numbers. Returns true if their absolute value is
+ * less than or equal to maxDiff or if they are both NaN or the
+ * same sign infinity.
+ */
+export function numbersApproximatelyEqual(a: number, b: number, maxDiff: number = 0) {
+ return (
+ (Number.isNaN(a) && Number.isNaN(b)) ||
+ (a === Number.POSITIVE_INFINITY && b === Number.POSITIVE_INFINITY) ||
+ (a === Number.NEGATIVE_INFINITY && b === Number.NEGATIVE_INFINITY) ||
+ Math.abs(a - b) <= maxDiff
+ );
+}
+
+/**
+ * Once-allocated ArrayBuffer/views to avoid overhead of allocation when converting between numeric formats
+ *
+ * workingData* is shared between multiple functions in this file, so to avoid re-entrancy problems, make sure in
+ * functions that use it that they don't call themselves or other functions that use workingData*.
+ */
+const workingData = new ArrayBuffer(8);
+const workingDataU32 = new Uint32Array(workingData);
+const workingDataU16 = new Uint16Array(workingData);
+const workingDataU8 = new Uint8Array(workingData);
+const workingDataF32 = new Float32Array(workingData);
+const workingDataF16 = new Float16Array(workingData);
+const workingDataI16 = new Int16Array(workingData);
+const workingDataI32 = new Int32Array(workingData);
+const workingDataI8 = new Int8Array(workingData);
+const workingDataF64 = new Float64Array(workingData);
+const workingDataView = new DataView(workingData);
+
+/**
+ * Encodes a JS `number` into an IEEE754 floating point number with the specified number of
+ * sign, exponent, mantissa bits, and exponent bias.
+ * Returns the result as an integer-valued JS `number`.
+ *
+ * Does not handle clamping, overflow, or denormal inputs.
+ * On underflow (result is subnormal), rounds to (signed) zero.
+ *
+ * MAINTENANCE_TODO: Replace usages of this with numberToFloatBits.
+ */
+export function float32ToFloatBits(
+ n: number,
+ signBits: 0 | 1,
+ exponentBits: number,
+ mantissaBits: number,
+ bias: number
+): number {
+ assert(exponentBits <= 8);
+ assert(mantissaBits <= 23);
+
+ if (Number.isNaN(n)) {
+ // NaN = all exponent bits true, 1 or more mantissia bits true
+ return (((1 << exponentBits) - 1) << mantissaBits) | ((1 << mantissaBits) - 1);
+ }
+
+ workingDataView.setFloat32(0, n, true);
+ const bits = workingDataView.getUint32(0, true);
+ // bits (32): seeeeeeeefffffffffffffffffffffff
+
+ // 0 or 1
+ const sign = (bits >> 31) & signBits;
+
+ if (n === 0) {
+ if (sign === 1) {
+ // Handle negative zero.
+ return 1 << (exponentBits + mantissaBits);
+ }
+ return 0;
+ }
+
+ if (signBits === 0) {
+ assert(n >= 0);
+ }
+
+ if (!Number.isFinite(n)) {
+ // Infinity = all exponent bits true, no mantissa bits true
+ // plus the sign bit.
+ return (
+ (((1 << exponentBits) - 1) << mantissaBits) | (n < 0 ? 2 ** (exponentBits + mantissaBits) : 0)
+ );
+ }
+
+ const mantissaBitsToDiscard = 23 - mantissaBits;
+
+ // >> to remove mantissa, & to remove sign, - 127 to remove bias.
+ const exp = ((bits >> 23) & 0xff) - 127;
+
+ // Convert to the new biased exponent.
+ const newBiasedExp = bias + exp;
+ assert(newBiasedExp < 1 << exponentBits, () => `input number ${n} overflows target type`);
+
+ if (newBiasedExp <= 0) {
+ // Result is subnormal or zero. Round to (signed) zero.
+ return sign << (exponentBits + mantissaBits);
+ } else {
+ // Mask only the mantissa, and discard the lower bits.
+ const newMantissa = (bits & 0x7fffff) >> mantissaBitsToDiscard;
+ return (sign << (exponentBits + mantissaBits)) | (newBiasedExp << mantissaBits) | newMantissa;
+ }
+}
+
+/**
+ * Encodes a JS `number` into an IEEE754 16 bit floating point number.
+ * Returns the result as an integer-valued JS `number`.
+ *
+ * Does not handle clamping, overflow, or denormal inputs.
+ * On underflow (result is subnormal), rounds to (signed) zero.
+ */
+export function float32ToFloat16Bits(n: number) {
+ return float32ToFloatBits(n, 1, 5, 10, 15);
+}
+
+/**
+ * Decodes an IEEE754 16 bit floating point number into a JS `number` and returns.
+ */
+export function float16BitsToFloat32(float16Bits: number): number {
+ return floatBitsToNumber(float16Bits, kFloat16Format);
+}
+
+type FloatFormat = { signed: 0 | 1; exponentBits: number; mantissaBits: number; bias: number };
+
+/** FloatFormat defining IEEE754 32-bit float. */
+export const kFloat32Format = { signed: 1, exponentBits: 8, mantissaBits: 23, bias: 127 } as const;
+/** FloatFormat defining IEEE754 16-bit float. */
+export const kFloat16Format = { signed: 1, exponentBits: 5, mantissaBits: 10, bias: 15 } as const;
+/** FloatFormat for 9 bit mantissa, 5 bit exponent unsigned float */
+export const kUFloat9e5Format = { signed: 0, exponentBits: 5, mantissaBits: 9, bias: 15 } as const;
+
+/** Bitcast u32 (represented as integer Number) to f32 (represented as floating-point Number). */
+export function float32BitsToNumber(bits: number): number {
+ workingDataU32[0] = bits;
+ return workingDataF32[0];
+}
+/** Bitcast f32 (represented as floating-point Number) to u32 (represented as integer Number). */
+export function numberToFloat32Bits(number: number): number {
+ workingDataF32[0] = number;
+ return workingDataU32[0];
+}
+
+/**
+ * Decodes an IEEE754 float with the supplied format specification into a JS number.
+ *
+ * The format MUST be no larger than a 32-bit float.
+ */
+export function floatBitsToNumber(bits: number, fmt: FloatFormat): number {
+ // Pad the provided bits out to f32, then convert to a `number` with the wrong bias.
+ // E.g. for f16 to f32:
+ // - f16: S EEEEE MMMMMMMMMM
+ // ^ 000^^^^^ ^^^^^^^^^^0000000000000
+ // - f32: S eeeEEEEE MMMMMMMMMMmmmmmmmmmmmmm
+
+ const kNonSignBits = fmt.exponentBits + fmt.mantissaBits;
+ const kNonSignBitsMask = (1 << kNonSignBits) - 1;
+ const exponentAndMantissaBits = bits & kNonSignBitsMask;
+ const exponentMask = ((1 << fmt.exponentBits) - 1) << fmt.mantissaBits;
+ const infinityOrNaN = (bits & exponentMask) === exponentMask;
+ if (infinityOrNaN) {
+ const mantissaMask = (1 << fmt.mantissaBits) - 1;
+ const signBit = 2 ** kNonSignBits;
+ const isNegative = (bits & signBit) !== 0;
+ return bits & mantissaMask
+ ? Number.NaN
+ : isNegative
+ ? Number.NEGATIVE_INFINITY
+ : Number.POSITIVE_INFINITY;
+ }
+ let f32BitsWithWrongBias =
+ exponentAndMantissaBits << (kFloat32Format.mantissaBits - fmt.mantissaBits);
+ f32BitsWithWrongBias |= (bits << (31 - kNonSignBits)) & 0x8000_0000;
+ const numberWithWrongBias = float32BitsToNumber(f32BitsWithWrongBias);
+ return numberWithWrongBias * 2 ** (kFloat32Format.bias - fmt.bias);
+}
+
+/**
+ * Convert ufloat9e5 bits from rgb9e5ufloat to a JS number
+ *
+ * The difference between `floatBitsToNumber` and `ufloatBitsToNumber`
+ * is that the latter doesn't use an implicit leading bit:
+ *
+ * floatBitsToNumber = 2^(exponent - bias) * (1 + mantissa / 2 ^ numMantissaBits)
+ * ufloatM9E5BitsToNumber = 2^(exponent - bias) * (mantissa / 2 ^ numMantissaBits)
+ * = 2^(exponent - bias - numMantissaBits) * mantissa
+ */
+export function ufloatM9E5BitsToNumber(bits: number, fmt: FloatFormat): number {
+ const exponent = bits >> fmt.mantissaBits;
+ const mantissaMask = (1 << fmt.mantissaBits) - 1;
+ const mantissa = bits & mantissaMask;
+ return mantissa * 2 ** (exponent - fmt.bias - fmt.mantissaBits);
+}
+
+/**
+ * Encodes a JS `number` into an IEEE754 floating point number with the specified format.
+ * Returns the result as an integer-valued JS `number`.
+ *
+ * Does not handle clamping, overflow, or denormal inputs.
+ * On underflow (result is subnormal), rounds to (signed) zero.
+ */
+export function numberToFloatBits(number: number, fmt: FloatFormat): number {
+ return float32ToFloatBits(number, fmt.signed, fmt.exponentBits, fmt.mantissaBits, fmt.bias);
+}
+
+/**
+ * Given a floating point number (as an integer representing its bits), computes how many ULPs it is
+ * from zero.
+ *
+ * Subnormal numbers are skipped, so that 0 is one ULP from the minimum normal number.
+ * Subnormal values are flushed to 0.
+ * Positive and negative 0 are both considered to be 0 ULPs from 0.
+ */
+export function floatBitsToNormalULPFromZero(bits: number, fmt: FloatFormat): number {
+ const mask_sign = fmt.signed << (fmt.exponentBits + fmt.mantissaBits);
+ const mask_expt = ((1 << fmt.exponentBits) - 1) << fmt.mantissaBits;
+ const mask_mant = (1 << fmt.mantissaBits) - 1;
+ const mask_rest = mask_expt | mask_mant;
+
+ assert(fmt.exponentBits + fmt.mantissaBits <= 31);
+
+ const sign = bits & mask_sign ? -1 : 1;
+ const rest = bits & mask_rest;
+ const subnormal_or_zero = (bits & mask_expt) === 0;
+ const infinity_or_nan = (bits & mask_expt) === mask_expt;
+ assert(!infinity_or_nan, 'no ulp representation for infinity/nan');
+
+ // The first normal number is mask_mant+1, so subtract mask_mant to make min_normal - zero = 1ULP.
+ const abs_ulp_from_zero = subnormal_or_zero ? 0 : rest - mask_mant;
+ return sign * abs_ulp_from_zero;
+}
+
+/**
+ * Encodes three JS `number` values into RGB9E5, returned as an integer-valued JS `number`.
+ *
+ * RGB9E5 represents three partial-precision floating-point numbers encoded into a single 32-bit
+ * value all sharing the same 5-bit exponent.
+ * There is no sign bit, and there is a shared 5-bit biased (15) exponent and a 9-bit
+ * mantissa for each channel. The mantissa does NOT have an implicit leading "1.",
+ * and instead has an implicit leading "0.".
+ *
+ * @see https://registry.khronos.org/OpenGL/extensions/EXT/EXT_texture_shared_exponent.txt
+ */
+export function packRGB9E5UFloat(r: number, g: number, b: number): number {
+ const N = 9; // number of mantissa bits
+ const Emax = 31; // max exponent
+ const B = 15; // exponent bias
+ const sharedexp_max = (((1 << N) - 1) / (1 << N)) * 2 ** (Emax - B);
+ const red_c = clamp(r, { min: 0, max: sharedexp_max });
+ const green_c = clamp(g, { min: 0, max: sharedexp_max });
+ const blue_c = clamp(b, { min: 0, max: sharedexp_max });
+ const max_c = Math.max(red_c, green_c, blue_c);
+ const exp_shared_p = Math.max(-B - 1, Math.floor(Math.log2(max_c))) + 1 + B;
+ const max_s = Math.floor(max_c / 2 ** (exp_shared_p - B - N) + 0.5);
+ const exp_shared = max_s === 1 << N ? exp_shared_p + 1 : exp_shared_p;
+ const scalar = 1 / 2 ** (exp_shared - B - N);
+ const red_s = Math.floor(red_c * scalar + 0.5);
+ const green_s = Math.floor(green_c * scalar + 0.5);
+ const blue_s = Math.floor(blue_c * scalar + 0.5);
+ assert(red_s >= 0 && red_s <= 0b111111111);
+ assert(green_s >= 0 && green_s <= 0b111111111);
+ assert(blue_s >= 0 && blue_s <= 0b111111111);
+ assert(exp_shared >= 0 && exp_shared <= 0b11111);
+ return ((exp_shared << 27) | (blue_s << 18) | (green_s << 9) | red_s) >>> 0;
+}
+
+/**
+ * Decodes a RGB9E5 encoded color.
+ * @see packRGB9E5UFloat
+ */
+export function unpackRGB9E5UFloat(encoded: number): { R: number; G: number; B: number } {
+ const N = 9; // number of mantissa bits
+ const B = 15; // exponent bias
+ const red_s = (encoded >>> 0) & 0b111111111;
+ const green_s = (encoded >>> 9) & 0b111111111;
+ const blue_s = (encoded >>> 18) & 0b111111111;
+ const exp_shared = (encoded >>> 27) & 0b11111;
+ const exp = Math.pow(2, exp_shared - B - N);
+ return {
+ R: exp * red_s,
+ G: exp * green_s,
+ B: exp * blue_s,
+ };
+}
+
+/**
+ * Quantizes two f32s to f16 and then packs them in a u32
+ *
+ * This should implement the same behaviour as the builtin `pack2x16float` from
+ * WGSL.
+ *
+ * Caller is responsible to ensuring inputs are f32s
+ *
+ * @param x first f32 to be packed
+ * @param y second f32 to be packed
+ * @returns an array of possible results for pack2x16float. Elements are either
+ * a number or undefined.
+ * undefined indicates that any value is valid, since the input went
+ * out of bounds.
+ */
+export function pack2x16float(x: number, y: number): (number | undefined)[] {
+ // Generates all possible valid u16 bit fields for a given f32 to f16 conversion.
+ // Assumes FTZ for both the f32 and f16 value is allowed.
+ const generateU16s = (n: number): readonly number[] => {
+ let contains_subnormals = isSubnormalNumberF32(n);
+ const n_f16s = correctlyRoundedF16(n);
+ contains_subnormals ||= n_f16s.some(isSubnormalNumberF16);
+
+ const n_u16s = n_f16s.map(f16 => {
+ workingDataF16[0] = f16;
+ return workingDataU16[0];
+ });
+
+ const contains_poszero = n_u16s.some(u => u === kBit.f16.positive.zero);
+ const contains_negzero = n_u16s.some(u => u === kBit.f16.negative.zero);
+ if (!contains_negzero && (contains_poszero || contains_subnormals)) {
+ n_u16s.push(kBit.f16.negative.zero);
+ }
+
+ if (!contains_poszero && (contains_negzero || contains_subnormals)) {
+ n_u16s.push(kBit.f16.positive.zero);
+ }
+
+ return n_u16s;
+ };
+
+ if (!isFiniteF16(x) || !isFiniteF16(y)) {
+ // This indicates any value is valid, so it isn't worth bothering
+ // calculating the more restrictive possibilities.
+ return [undefined];
+ }
+
+ const results = new Array<number>();
+ for (const p of cartesianProduct(generateU16s(x), generateU16s(y))) {
+ assert(p.length === 2, 'cartesianProduct of 2 arrays returned an entry with not 2 elements');
+ workingDataU16[0] = p[0];
+ workingDataU16[1] = p[1];
+ results.push(workingDataU32[0]);
+ }
+
+ return results;
+}
+
+/**
+ * Converts two normalized f32s to i16s and then packs them in a u32
+ *
+ * This should implement the same behaviour as the builtin `pack2x16snorm` from
+ * WGSL.
+ *
+ * Caller is responsible to ensuring inputs are normalized f32s
+ *
+ * @param x first f32 to be packed
+ * @param y second f32 to be packed
+ * @returns a number that is expected result of pack2x16snorm.
+ */
+export function pack2x16snorm(x: number, y: number): number {
+ // Converts f32 to i16 via the pack2x16snorm formula.
+ // FTZ is not explicitly handled, because all subnormals will produce a value
+ // between 0 and 1, but significantly away from the edges, so floor goes to 0.
+ const generateI16 = (n: number): number => {
+ return Math.floor(0.5 + 32767 * Math.min(1, Math.max(-1, n)));
+ };
+
+ workingDataI16[0] = generateI16(x);
+ workingDataI16[1] = generateI16(y);
+
+ return workingDataU32[0];
+}
+
+/**
+ * Converts two normalized f32s to u16s and then packs them in a u32
+ *
+ * This should implement the same behaviour as the builtin `pack2x16unorm` from
+ * WGSL.
+ *
+ * Caller is responsible to ensuring inputs are normalized f32s
+ *
+ * @param x first f32 to be packed
+ * @param y second f32 to be packed
+ * @returns an number that is expected result of pack2x16unorm.
+ */
+export function pack2x16unorm(x: number, y: number): number {
+ // Converts f32 to u16 via the pack2x16unorm formula.
+ // FTZ is not explicitly handled, because all subnormals will produce a value
+ // between 0.5 and much less than 1, so floor goes to 0.
+ const generateU16 = (n: number): number => {
+ return Math.floor(0.5 + 65535 * Math.min(1, Math.max(0, n)));
+ };
+
+ workingDataU16[0] = generateU16(x);
+ workingDataU16[1] = generateU16(y);
+
+ return workingDataU32[0];
+}
+
+/**
+ * Converts four normalized f32s to i8s and then packs them in a u32
+ *
+ * This should implement the same behaviour as the builtin `pack4x8snorm` from
+ * WGSL.
+ *
+ * Caller is responsible to ensuring inputs are normalized f32s
+ *
+ * @param vals four f32s to be packed
+ * @returns a number that is expected result of pack4x8usorm.
+ */
+export function pack4x8snorm(...vals: [number, number, number, number]): number {
+ // Converts f32 to u8 via the pack4x8snorm formula.
+ // FTZ is not explicitly handled, because all subnormals will produce a value
+ // between 0 and 1, so floor goes to 0.
+ const generateI8 = (n: number): number => {
+ return Math.floor(0.5 + 127 * Math.min(1, Math.max(-1, n)));
+ };
+
+ for (const idx in vals) {
+ workingDataI8[idx] = generateI8(vals[idx]);
+ }
+
+ return workingDataU32[0];
+}
+
+/**
+ * Converts four normalized f32s to u8s and then packs them in a u32
+ *
+ * This should implement the same behaviour as the builtin `pack4x8unorm` from
+ * WGSL.
+ *
+ * Caller is responsible to ensuring inputs are normalized f32s
+ *
+ * @param vals four f32s to be packed
+ * @returns a number that is expected result of pack4x8unorm.
+ */
+export function pack4x8unorm(...vals: [number, number, number, number]): number {
+ // Converts f32 to u8 via the pack4x8unorm formula.
+ // FTZ is not explicitly handled, because all subnormals will produce a value
+ // between 0.5 and much less than 1, so floor goes to 0.
+ const generateU8 = (n: number): number => {
+ return Math.floor(0.5 + 255 * Math.min(1, Math.max(0, n)));
+ };
+
+ for (const idx in vals) {
+ workingDataU8[idx] = generateU8(vals[idx]);
+ }
+
+ return workingDataU32[0];
+}
+
+/**
+ * Asserts that a number is within the representable (inclusive) of the integer type with the
+ * specified number of bits and signedness.
+ *
+ * MAINTENANCE_TODO: Assert isInteger? Then this function "asserts that a number is representable"
+ * by the type.
+ */
+export function assertInIntegerRange(n: number, bits: number, signed: boolean): void {
+ if (signed) {
+ const min = -Math.pow(2, bits - 1);
+ const max = Math.pow(2, bits - 1) - 1;
+ assert(n >= min && n <= max);
+ } else {
+ const max = Math.pow(2, bits) - 1;
+ assert(n >= 0 && n <= max);
+ }
+}
+
+/**
+ * Converts a linear value into a "gamma"-encoded value using the sRGB-clamped transfer function.
+ */
+export function gammaCompress(n: number): number {
+ n = n <= 0.0031308 ? (323 * n) / 25 : (211 * Math.pow(n, 5 / 12) - 11) / 200;
+ return clamp(n, { min: 0, max: 1 });
+}
+
+/**
+ * Converts a "gamma"-encoded value into a linear value using the sRGB-clamped transfer function.
+ */
+export function gammaDecompress(n: number): number {
+ n = n <= 0.04045 ? (n * 25) / 323 : Math.pow((200 * n + 11) / 211, 12 / 5);
+ return clamp(n, { min: 0, max: 1 });
+}
+
+/** Converts a 32-bit float value to a 32-bit unsigned integer value */
+export function float32ToUint32(f32: number): number {
+ workingDataF32[0] = f32;
+ return workingDataU32[0];
+}
+
+/** Converts a 32-bit unsigned integer value to a 32-bit float value */
+export function uint32ToFloat32(u32: number): number {
+ workingDataU32[0] = u32;
+ return workingDataF32[0];
+}
+
+/** Converts a 32-bit float value to a 32-bit signed integer value */
+export function float32ToInt32(f32: number): number {
+ workingDataF32[0] = f32;
+ return workingDataI32[0];
+}
+
+/** Converts a 32-bit unsigned integer value to a 32-bit signed integer value */
+export function uint32ToInt32(u32: number): number {
+ workingDataU32[0] = u32;
+ return workingDataI32[0];
+}
+
+/** Converts a 16-bit float value to a 16-bit unsigned integer value */
+export function float16ToUint16(f16: number): number {
+ workingDataF16[0] = f16;
+ return workingDataU16[0];
+}
+
+/** Converts a 16-bit unsigned integer value to a 16-bit float value */
+export function uint16ToFloat16(u16: number): number {
+ workingDataU16[0] = u16;
+ return workingDataF16[0];
+}
+
+/** Converts a 16-bit float value to a 16-bit signed integer value */
+export function float16ToInt16(f16: number): number {
+ workingDataF16[0] = f16;
+ return workingDataI16[0];
+}
+
+/** A type of number representable by Scalar. */
+export type ScalarKind =
+ | 'abstract-float'
+ | 'f64'
+ | 'f32'
+ | 'f16'
+ | 'u32'
+ | 'u16'
+ | 'u8'
+ | 'i32'
+ | 'i16'
+ | 'i8'
+ | 'bool';
+
+/** ScalarType describes the type of WGSL Scalar. */
+export class ScalarType {
+ readonly kind: ScalarKind; // The named type
+ readonly _size: number; // In bytes
+ readonly read: (buf: Uint8Array, offset: number) => Scalar; // reads a scalar from a buffer
+
+ constructor(kind: ScalarKind, size: number, read: (buf: Uint8Array, offset: number) => Scalar) {
+ this.kind = kind;
+ this._size = size;
+ this.read = read;
+ }
+
+ public toString(): string {
+ return this.kind;
+ }
+
+ public get size(): number {
+ return this._size;
+ }
+
+ /** Constructs a Scalar of this type with `value` */
+ public create(value: number): Scalar {
+ switch (this.kind) {
+ case 'abstract-float':
+ return abstractFloat(value);
+ case 'f64':
+ return f64(value);
+ case 'f32':
+ return f32(value);
+ case 'f16':
+ return f16(value);
+ case 'u32':
+ return u32(value);
+ case 'u16':
+ return u16(value);
+ case 'u8':
+ return u8(value);
+ case 'i32':
+ return i32(value);
+ case 'i16':
+ return i16(value);
+ case 'i8':
+ return i8(value);
+ case 'bool':
+ return bool(value !== 0);
+ }
+ }
+}
+
+/** VectorType describes the type of WGSL Vector. */
+export class VectorType {
+ readonly width: number; // Number of elements in the vector
+ readonly elementType: ScalarType; // Element type
+
+ constructor(width: number, elementType: ScalarType) {
+ this.width = width;
+ this.elementType = elementType;
+ }
+
+ /**
+ * @returns a vector constructed from the values read from the buffer at the
+ * given byte offset
+ */
+ public read(buf: Uint8Array, offset: number): Vector {
+ const elements: Array<Scalar> = [];
+ for (let i = 0; i < this.width; i++) {
+ elements[i] = this.elementType.read(buf, offset);
+ offset += this.elementType.size;
+ }
+ return new Vector(elements);
+ }
+
+ public toString(): string {
+ return `vec${this.width}<${this.elementType}>`;
+ }
+
+ public get size(): number {
+ return this.elementType.size * this.width;
+ }
+
+ /** Constructs a Vector of this type with the given values */
+ public create(value: number | readonly number[]): Vector {
+ if (value instanceof Array) {
+ assert(value.length === this.width);
+ } else {
+ value = Array(this.width).fill(value);
+ }
+ return new Vector(value.map(v => this.elementType.create(v)));
+ }
+}
+
+// Maps a string representation of a vector type to vector type.
+const vectorTypes = new Map<string, VectorType>();
+
+export function TypeVec(width: number, elementType: ScalarType): VectorType {
+ const key = `${elementType.toString()} ${width}}`;
+ let ty = vectorTypes.get(key);
+ if (ty !== undefined) {
+ return ty;
+ }
+ ty = new VectorType(width, elementType);
+ vectorTypes.set(key, ty);
+ return ty;
+}
+
+/** MatrixType describes the type of WGSL Matrix. */
+export class MatrixType {
+ readonly cols: number; // Number of columns in the Matrix
+ readonly rows: number; // Number of elements per column in the Matrix
+ readonly elementType: ScalarType; // Element type
+
+ constructor(cols: number, rows: number, elementType: ScalarType) {
+ this.cols = cols;
+ this.rows = rows;
+ assert(
+ elementType.kind === 'f32' ||
+ elementType.kind === 'f16' ||
+ elementType.kind === 'abstract-float',
+ "MatrixType can only have elementType of 'f32' or 'f16' or 'abstract-float'"
+ );
+ this.elementType = elementType;
+ }
+
+ /**
+ * @returns a Matrix constructed from the values read from the buffer at the
+ * given byte offset
+ */
+ public read(buf: Uint8Array, offset: number): Matrix {
+ const elements: Scalar[][] = [...Array(this.cols)].map(_ => [...Array(this.rows)]);
+ for (let c = 0; c < this.cols; c++) {
+ for (let r = 0; r < this.rows; r++) {
+ elements[c][r] = this.elementType.read(buf, offset);
+ offset += this.elementType.size;
+ }
+
+ // vec3 have one padding element, so need to skip in matrices
+ if (this.rows === 3) {
+ offset += this.elementType.size;
+ }
+ }
+ return new Matrix(elements);
+ }
+
+ public toString(): string {
+ return `mat${this.cols}x${this.rows}<${this.elementType}>`;
+ }
+}
+
+// Maps a string representation of a Matrix type to Matrix type.
+const matrixTypes = new Map<string, MatrixType>();
+
+export function TypeMat(cols: number, rows: number, elementType: ScalarType): MatrixType {
+ const key = `${elementType.toString()} ${cols} ${rows}`;
+ let ty = matrixTypes.get(key);
+ if (ty !== undefined) {
+ return ty;
+ }
+ ty = new MatrixType(cols, rows, elementType);
+ matrixTypes.set(key, ty);
+ return ty;
+}
+
+/** Type is a ScalarType, VectorType, or MatrixType. */
+export type Type = ScalarType | VectorType | MatrixType;
+
+/** Copy bytes from `buf` at `offset` into the working data, then read it out using `workingDataOut` */
+function valueFromBytes(workingDataOut: TypedArrayBufferView, buf: Uint8Array, offset: number) {
+ for (let i = 0; i < workingDataOut.BYTES_PER_ELEMENT; ++i) {
+ workingDataU8[i] = buf[offset + i];
+ }
+ return workingDataOut[0];
+}
+
+export const TypeI32 = new ScalarType('i32', 4, (buf: Uint8Array, offset: number) =>
+ i32(valueFromBytes(workingDataI32, buf, offset))
+);
+export const TypeU32 = new ScalarType('u32', 4, (buf: Uint8Array, offset: number) =>
+ u32(valueFromBytes(workingDataU32, buf, offset))
+);
+export const TypeAbstractFloat = new ScalarType(
+ 'abstract-float',
+ 8,
+ (buf: Uint8Array, offset: number) => abstractFloat(valueFromBytes(workingDataF64, buf, offset))
+);
+export const TypeF64 = new ScalarType('f64', 8, (buf: Uint8Array, offset: number) =>
+ f64(valueFromBytes(workingDataF64, buf, offset))
+);
+export const TypeF32 = new ScalarType('f32', 4, (buf: Uint8Array, offset: number) =>
+ f32(valueFromBytes(workingDataF32, buf, offset))
+);
+export const TypeI16 = new ScalarType('i16', 2, (buf: Uint8Array, offset: number) =>
+ i16(valueFromBytes(workingDataI16, buf, offset))
+);
+export const TypeU16 = new ScalarType('u16', 2, (buf: Uint8Array, offset: number) =>
+ u16(valueFromBytes(workingDataU16, buf, offset))
+);
+export const TypeF16 = new ScalarType('f16', 2, (buf: Uint8Array, offset: number) =>
+ f16Bits(valueFromBytes(workingDataU16, buf, offset))
+);
+export const TypeI8 = new ScalarType('i8', 1, (buf: Uint8Array, offset: number) =>
+ i8(valueFromBytes(workingDataI8, buf, offset))
+);
+export const TypeU8 = new ScalarType('u8', 1, (buf: Uint8Array, offset: number) =>
+ u8(valueFromBytes(workingDataU8, buf, offset))
+);
+export const TypeBool = new ScalarType('bool', 4, (buf: Uint8Array, offset: number) =>
+ bool(valueFromBytes(workingDataU32, buf, offset) !== 0)
+);
+
+/** @returns the ScalarType from the ScalarKind */
+export function scalarType(kind: ScalarKind): ScalarType {
+ switch (kind) {
+ case 'abstract-float':
+ return TypeAbstractFloat;
+ case 'f64':
+ return TypeF64;
+ case 'f32':
+ return TypeF32;
+ case 'f16':
+ return TypeF16;
+ case 'u32':
+ return TypeU32;
+ case 'u16':
+ return TypeU16;
+ case 'u8':
+ return TypeU8;
+ case 'i32':
+ return TypeI32;
+ case 'i16':
+ return TypeI16;
+ case 'i8':
+ return TypeI8;
+ case 'bool':
+ return TypeBool;
+ }
+}
+
+/** @returns the number of scalar (element) types of the given Type */
+export function numElementsOf(ty: Type): number {
+ if (ty instanceof ScalarType) {
+ return 1;
+ }
+ if (ty instanceof VectorType) {
+ return ty.width;
+ }
+ if (ty instanceof MatrixType) {
+ return ty.cols * ty.rows;
+ }
+ throw new Error(`unhandled type ${ty}`);
+}
+
+/** @returns the scalar elements of the given Value */
+export function elementsOf(value: Value): Scalar[] {
+ if (value instanceof Scalar) {
+ return [value];
+ }
+ if (value instanceof Vector) {
+ return value.elements;
+ }
+ if (value instanceof Matrix) {
+ return value.elements.flat();
+ }
+ throw new Error(`unhandled value ${value}`);
+}
+
+/** @returns the scalar (element) type of the given Type */
+export function scalarTypeOf(ty: Type): ScalarType {
+ if (ty instanceof ScalarType) {
+ return ty;
+ }
+ if (ty instanceof VectorType) {
+ return ty.elementType;
+ }
+ if (ty instanceof MatrixType) {
+ return ty.elementType;
+ }
+ throw new Error(`unhandled type ${ty}`);
+}
+
+/** ScalarValue is the JS type that can be held by a Scalar */
+type ScalarValue = boolean | number;
+
+/** Class that encapsulates a single scalar value of various types. */
+export class Scalar {
+ readonly value: ScalarValue; // The scalar value
+ readonly type: ScalarType; // The type of the scalar
+
+ // The scalar value, packed in one or two 32-bit unsigned integers.
+ // Whether or not the bits1 is used depends on `this.type.size`.
+ readonly bits1: number;
+ readonly bits0: number;
+
+ public constructor(type: ScalarType, value: ScalarValue, bits1: number, bits0: number) {
+ this.value = value;
+ this.type = type;
+ this.bits1 = bits1;
+ this.bits0 = bits0;
+ }
+
+ /**
+ * Copies the scalar value to the buffer at the provided byte offset.
+ * @param buffer the destination buffer
+ * @param offset the offset in buffer, in units of `buffer`
+ */
+ public copyTo(buffer: TypedArrayBufferView, offset: number) {
+ assert(this.type.kind !== 'f64', `Copying f64 values to/from buffers is not defined`);
+ workingDataU32[1] = this.bits1;
+ workingDataU32[0] = this.bits0;
+ for (let i = 0; i < this.type.size; i++) {
+ buffer[offset + i] = workingDataU8[i];
+ }
+ }
+
+ /**
+ * @returns the WGSL representation of this scalar value
+ */
+ public wgsl(): string {
+ const withPoint = (x: number) => {
+ const str = `${x}`;
+ return str.indexOf('.') > 0 || str.indexOf('e') > 0 ? str : `${str}.0`;
+ };
+ if (isFinite(this.value as number)) {
+ switch (this.type.kind) {
+ case 'abstract-float':
+ return `${withPoint(this.value as number)}`;
+ case 'f64':
+ return `${withPoint(this.value as number)}`;
+ case 'f32':
+ return `${withPoint(this.value as number)}f`;
+ case 'f16':
+ return `${withPoint(this.value as number)}h`;
+ case 'u32':
+ return `${this.value}u`;
+ case 'i32':
+ return `i32(${this.value})`;
+ case 'bool':
+ return `${this.value}`;
+ }
+ }
+ throw new Error(
+ `scalar of value ${this.value} and type ${this.type} has no WGSL representation`
+ );
+ }
+
+ public toString(): string {
+ if (this.type.kind === 'bool') {
+ return Colors.bold(this.value.toString());
+ }
+ switch (this.value) {
+ case Infinity:
+ case -Infinity:
+ return Colors.bold(this.value.toString());
+ default: {
+ workingDataU32[1] = this.bits1;
+ workingDataU32[0] = this.bits0;
+ let hex = '';
+ for (let i = 0; i < this.type.size; ++i) {
+ hex = workingDataU8[i].toString(16).padStart(2, '0') + hex;
+ }
+ const n = this.value as Number;
+ if (n !== null && isFloatValue(this)) {
+ let str = this.value.toString();
+ str = str.indexOf('.') > 0 || str.indexOf('e') > 0 ? str : `${str}.0`;
+ switch (this.type.kind) {
+ case 'abstract-float':
+ return isSubnormalNumberF64(n.valueOf())
+ ? `${Colors.bold(str)} (0x${hex} subnormal)`
+ : `${Colors.bold(str)} (0x${hex})`;
+ case 'f64':
+ return isSubnormalNumberF64(n.valueOf())
+ ? `${Colors.bold(str)} (0x${hex} subnormal)`
+ : `${Colors.bold(str)} (0x${hex})`;
+ case 'f32':
+ return isSubnormalNumberF32(n.valueOf())
+ ? `${Colors.bold(str)} (0x${hex} subnormal)`
+ : `${Colors.bold(str)} (0x${hex})`;
+ case 'f16':
+ return isSubnormalNumberF16(n.valueOf())
+ ? `${Colors.bold(str)} (0x${hex} subnormal)`
+ : `${Colors.bold(str)} (0x${hex})`;
+ default:
+ unreachable(
+ `Printing of floating point kind ${this.type.kind} is not implemented...`
+ );
+ }
+ }
+ return `${Colors.bold(this.value.toString())} (0x${hex})`;
+ }
+ }
+ }
+}
+
+export interface ScalarBuilder {
+ (value: number): Scalar;
+}
+
+/** Create a Scalar of `type` by storing `value` as an element of `workingDataArray` and retrieving it.
+ * The working data array *must* be an alias of `workingData`.
+ */
+function scalarFromValue(
+ type: ScalarType,
+ workingDataArray: TypedArrayBufferView,
+ value: number
+): Scalar {
+ // Clear all bits of the working data since `value` may be smaller; the upper bits should be 0.
+ workingDataU32[1] = 0;
+ workingDataU32[0] = 0;
+ workingDataArray[0] = value;
+ return new Scalar(type, workingDataArray[0], workingDataU32[1], workingDataU32[0]);
+}
+
+/** Create a Scalar of `type` by storing `value` as an element of `workingDataStoreArray` and
+ * reinterpreting it as an element of `workingDataLoadArray`.
+ * Both working data arrays *must* be aliases of `workingData`.
+ */
+function scalarFromBits(
+ type: ScalarType,
+ workingDataStoreArray: TypedArrayBufferView,
+ workingDataLoadArray: TypedArrayBufferView,
+ bits: number
+): Scalar {
+ // Clear all bits of the working data since `value` may be smaller; the upper bits should be 0.
+ workingDataU32[1] = 0;
+ workingDataU32[0] = 0;
+ workingDataStoreArray[0] = bits;
+ return new Scalar(type, workingDataLoadArray[0], workingDataU32[1], workingDataU32[0]);
+}
+
+/** Create an AbstractFloat from a numeric value, a JS `number`. */
+export const abstractFloat = (value: number): Scalar =>
+ scalarFromValue(TypeAbstractFloat, workingDataF64, value);
+
+/** Create an f64 from a numeric value, a JS `number`. */
+export const f64 = (value: number): Scalar => scalarFromValue(TypeF64, workingDataF64, value);
+
+/** Create an f32 from a numeric value, a JS `number`. */
+export const f32 = (value: number): Scalar => scalarFromValue(TypeF32, workingDataF32, value);
+
+/** Create an f16 from a numeric value, a JS `number`. */
+export const f16 = (value: number): Scalar => scalarFromValue(TypeF16, workingDataF16, value);
+
+/** Create an f32 from a bit representation, a uint32 represented as a JS `number`. */
+export const f32Bits = (bits: number): Scalar =>
+ scalarFromBits(TypeF32, workingDataU32, workingDataF32, bits);
+
+/** Create an f16 from a bit representation, a uint16 represented as a JS `number`. */
+export const f16Bits = (bits: number): Scalar =>
+ scalarFromBits(TypeF16, workingDataU16, workingDataF16, bits);
+
+/** Create an i32 from a numeric value, a JS `number`. */
+export const i32 = (value: number): Scalar => scalarFromValue(TypeI32, workingDataI32, value);
+
+/** Create an i16 from a numeric value, a JS `number`. */
+export const i16 = (value: number): Scalar => scalarFromValue(TypeI16, workingDataI16, value);
+
+/** Create an i8 from a numeric value, a JS `number`. */
+export const i8 = (value: number): Scalar => scalarFromValue(TypeI8, workingDataI8, value);
+
+/** Create an i32 from a bit representation, a uint32 represented as a JS `number`. */
+export const i32Bits = (bits: number): Scalar =>
+ scalarFromBits(TypeI32, workingDataU32, workingDataI32, bits);
+
+/** Create an i16 from a bit representation, a uint16 represented as a JS `number`. */
+export const i16Bits = (bits: number): Scalar =>
+ scalarFromBits(TypeI16, workingDataU16, workingDataI16, bits);
+
+/** Create an i8 from a bit representation, a uint8 represented as a JS `number`. */
+export const i8Bits = (bits: number): Scalar =>
+ scalarFromBits(TypeI8, workingDataU8, workingDataI8, bits);
+
+/** Create a u32 from a numeric value, a JS `number`. */
+export const u32 = (value: number): Scalar => scalarFromValue(TypeU32, workingDataU32, value);
+
+/** Create a u16 from a numeric value, a JS `number`. */
+export const u16 = (value: number): Scalar => scalarFromValue(TypeU16, workingDataU16, value);
+
+/** Create a u8 from a numeric value, a JS `number`. */
+export const u8 = (value: number): Scalar => scalarFromValue(TypeU8, workingDataU8, value);
+
+/** Create an u32 from a bit representation, a uint32 represented as a JS `number`. */
+export const u32Bits = (bits: number): Scalar =>
+ scalarFromBits(TypeU32, workingDataU32, workingDataU32, bits);
+
+/** Create an u16 from a bit representation, a uint16 represented as a JS `number`. */
+export const u16Bits = (bits: number): Scalar =>
+ scalarFromBits(TypeU16, workingDataU16, workingDataU16, bits);
+
+/** Create an u8 from a bit representation, a uint8 represented as a JS `number`. */
+export const u8Bits = (bits: number): Scalar =>
+ scalarFromBits(TypeU8, workingDataU8, workingDataU8, bits);
+
+/** Create a boolean value. */
+export function bool(value: boolean): Scalar {
+ // WGSL does not support using 'bool' types directly in storage / uniform
+ // buffers, so instead we pack booleans in a u32, where 'false' is zero and
+ // 'true' is any non-zero value.
+ workingDataU32[0] = value ? 1 : 0;
+ workingDataU32[1] = 0;
+ return new Scalar(TypeBool, value, workingDataU32[1], workingDataU32[0]);
+}
+
+/** A 'true' literal value */
+export const True = bool(true);
+
+/** A 'false' literal value */
+export const False = bool(false);
+
+/**
+ * Class that encapsulates a vector value.
+ */
+export class Vector {
+ readonly elements: Array<Scalar>;
+ readonly type: VectorType;
+
+ public constructor(elements: Array<Scalar>) {
+ if (elements.length < 2 || elements.length > 4) {
+ throw new Error(`vector element count must be between 2 and 4, got ${elements.length}`);
+ }
+ for (let i = 1; i < elements.length; i++) {
+ const a = elements[0].type;
+ const b = elements[i].type;
+ if (a !== b) {
+ throw new Error(
+ `cannot mix vector element types. Found elements with types '${a}' and '${b}'`
+ );
+ }
+ }
+ this.elements = elements;
+ this.type = TypeVec(elements.length, elements[0].type);
+ }
+
+ /**
+ * Copies the vector value to the Uint8Array buffer at the provided byte offset.
+ * @param buffer the destination buffer
+ * @param offset the byte offset within buffer
+ */
+ public copyTo(buffer: Uint8Array, offset: number) {
+ for (const element of this.elements) {
+ element.copyTo(buffer, offset);
+ offset += this.type.elementType.size;
+ }
+ }
+
+ /**
+ * @returns the WGSL representation of this vector value
+ */
+ public wgsl(): string {
+ const els = this.elements.map(v => v.wgsl()).join(', ');
+ return `vec${this.type.width}(${els})`;
+ }
+
+ public toString(): string {
+ return `${this.type}(${this.elements.map(e => e.toString()).join(', ')})`;
+ }
+
+ public get x() {
+ assert(0 < this.elements.length);
+ return this.elements[0];
+ }
+
+ public get y() {
+ assert(1 < this.elements.length);
+ return this.elements[1];
+ }
+
+ public get z() {
+ assert(2 < this.elements.length);
+ return this.elements[2];
+ }
+
+ public get w() {
+ assert(3 < this.elements.length);
+ return this.elements[3];
+ }
+}
+
+/** Helper for constructing a new two-element vector with the provided values */
+export function vec2(x: Scalar, y: Scalar) {
+ return new Vector([x, y]);
+}
+
+/** Helper for constructing a new three-element vector with the provided values */
+export function vec3(x: Scalar, y: Scalar, z: Scalar) {
+ return new Vector([x, y, z]);
+}
+
+/** Helper for constructing a new four-element vector with the provided values */
+export function vec4(x: Scalar, y: Scalar, z: Scalar, w: Scalar) {
+ return new Vector([x, y, z, w]);
+}
+
+/**
+ * Helper for constructing Vectors from arrays of numbers
+ *
+ * @param v array of numbers to be converted, must contain 2, 3 or 4 elements
+ * @param op function to convert from number to Scalar, e.g. 'f32`
+ */
+export function toVector(v: readonly number[], op: (n: number) => Scalar): Vector {
+ switch (v.length) {
+ case 2:
+ return vec2(op(v[0]), op(v[1]));
+ case 3:
+ return vec3(op(v[0]), op(v[1]), op(v[2]));
+ case 4:
+ return vec4(op(v[0]), op(v[1]), op(v[2]), op(v[3]));
+ }
+ unreachable(`input to 'toVector' must contain 2, 3, or 4 elements`);
+}
+
+/**
+ * Class that encapsulates a Matrix value.
+ */
+export class Matrix {
+ readonly elements: Scalar[][];
+ readonly type: MatrixType;
+
+ public constructor(elements: Array<Array<Scalar>>) {
+ const num_cols = elements.length;
+ if (num_cols < 2 || num_cols > 4) {
+ throw new Error(`matrix cols count must be between 2 and 4, got ${num_cols}`);
+ }
+
+ const num_rows = elements[0].length;
+ if (!elements.every(c => c.length === num_rows)) {
+ throw new Error(`cannot mix matrix column lengths`);
+ }
+
+ if (num_rows < 2 || num_rows > 4) {
+ throw new Error(`matrix rows count must be between 2 and 4, got ${num_rows}`);
+ }
+
+ const elem_type = elements[0][0].type;
+ if (!elements.every(c => c.every(r => objectEquals(r.type, elem_type)))) {
+ throw new Error(`cannot mix matrix element types`);
+ }
+
+ this.elements = elements;
+ this.type = TypeMat(num_cols, num_rows, elem_type);
+ }
+
+ /**
+ * Copies the matrix value to the Uint8Array buffer at the provided byte offset.
+ * @param buffer the destination buffer
+ * @param offset the byte offset within buffer
+ */
+ public copyTo(buffer: Uint8Array, offset: number) {
+ for (let i = 0; i < this.type.cols; i++) {
+ for (let j = 0; j < this.type.rows; j++) {
+ this.elements[i][j].copyTo(buffer, offset);
+ offset += this.type.elementType.size;
+ }
+
+ // vec3 have one padding element, so need to skip in matrices
+ if (this.type.rows === 3) {
+ offset += this.type.elementType.size;
+ }
+ }
+ }
+
+ /**
+ * @returns the WGSL representation of this matrix value
+ */
+ public wgsl(): string {
+ const els = this.elements.flatMap(c => c.map(r => r.wgsl())).join(', ');
+ return `mat${this.type.cols}x${this.type.rows}(${els})`;
+ }
+
+ public toString(): string {
+ return `${this.type}(${this.elements.map(c => c.join(', ')).join(', ')})`;
+ }
+}
+
+/**
+ * Helper for constructing Matrices from arrays of numbers
+ *
+ * @param m array of array of numbers to be converted, all Array of number must
+ * be of the same length. All Arrays must have 2, 3, or 4 elements.
+ * @param op function to convert from number to Scalar, e.g. 'f32`
+ */
+export function toMatrix(m: ROArrayArray<number>, op: (n: number) => Scalar): Matrix {
+ const cols = m.length;
+ const rows = m[0].length;
+ const elements: Scalar[][] = [...Array<Scalar[]>(cols)].map(_ => [...Array<Scalar>(rows)]);
+ for (let i = 0; i < cols; i++) {
+ for (let j = 0; j < rows; j++) {
+ elements[i][j] = op(m[i][j]);
+ }
+ }
+
+ return new Matrix(elements);
+}
+
+/** Value is a Scalar or Vector value. */
+export type Value = Scalar | Vector | Matrix;
+
+export type SerializedValueScalar = {
+ kind: 'scalar';
+ type: ScalarKind;
+ value: boolean | number;
+};
+
+export type SerializedValueVector = {
+ kind: 'vector';
+ type: ScalarKind;
+ value: boolean[] | readonly number[];
+};
+
+export type SerializedValueMatrix = {
+ kind: 'matrix';
+ type: ScalarKind;
+ value: ROArrayArray<number>;
+};
+
+enum SerializedScalarKind {
+ AbstractFloat,
+ F64,
+ F32,
+ F16,
+ U32,
+ U16,
+ U8,
+ I32,
+ I16,
+ I8,
+ Bool,
+}
+
+/** serializeScalarKind() serializes a ScalarKind to a BinaryStream */
+function serializeScalarKind(s: BinaryStream, v: ScalarKind) {
+ switch (v) {
+ case 'abstract-float':
+ s.writeU8(SerializedScalarKind.AbstractFloat);
+ return;
+ case 'f64':
+ s.writeU8(SerializedScalarKind.F64);
+ return;
+ case 'f32':
+ s.writeU8(SerializedScalarKind.F32);
+ return;
+ case 'f16':
+ s.writeU8(SerializedScalarKind.F16);
+ return;
+ case 'u32':
+ s.writeU8(SerializedScalarKind.U32);
+ return;
+ case 'u16':
+ s.writeU8(SerializedScalarKind.U16);
+ return;
+ case 'u8':
+ s.writeU8(SerializedScalarKind.U8);
+ return;
+ case 'i32':
+ s.writeU8(SerializedScalarKind.I32);
+ return;
+ case 'i16':
+ s.writeU8(SerializedScalarKind.I16);
+ return;
+ case 'i8':
+ s.writeU8(SerializedScalarKind.I8);
+ return;
+ case 'bool':
+ s.writeU8(SerializedScalarKind.Bool);
+ return;
+ }
+}
+
+/** deserializeScalarKind() deserializes a ScalarKind from a BinaryStream */
+function deserializeScalarKind(s: BinaryStream): ScalarKind {
+ const kind = s.readU8();
+ switch (kind) {
+ case SerializedScalarKind.AbstractFloat:
+ return 'abstract-float';
+ case SerializedScalarKind.F64:
+ return 'f64';
+ case SerializedScalarKind.F32:
+ return 'f32';
+ case SerializedScalarKind.F16:
+ return 'f16';
+ case SerializedScalarKind.U32:
+ return 'u32';
+ case SerializedScalarKind.U16:
+ return 'u16';
+ case SerializedScalarKind.U8:
+ return 'u8';
+ case SerializedScalarKind.I32:
+ return 'i32';
+ case SerializedScalarKind.I16:
+ return 'i16';
+ case SerializedScalarKind.I8:
+ return 'i8';
+ case SerializedScalarKind.Bool:
+ return 'bool';
+ default:
+ unreachable(`invalid serialized ScalarKind: ${kind}`);
+ }
+}
+
+enum SerializedValueKind {
+ Scalar,
+ Vector,
+ Matrix,
+}
+
+/** serializeValue() serializes a Value to a BinaryStream */
+export function serializeValue(s: BinaryStream, v: Value) {
+ const serializeScalar = (scalar: Scalar, kind: ScalarKind) => {
+ switch (kind) {
+ case 'abstract-float':
+ s.writeF64(scalar.value as number);
+ return;
+ case 'f64':
+ s.writeF64(scalar.value as number);
+ return;
+ case 'f32':
+ s.writeF32(scalar.value as number);
+ return;
+ case 'f16':
+ s.writeF16(scalar.value as number);
+ return;
+ case 'u32':
+ s.writeU32(scalar.value as number);
+ return;
+ case 'u16':
+ s.writeU16(scalar.value as number);
+ return;
+ case 'u8':
+ s.writeU8(scalar.value as number);
+ return;
+ case 'i32':
+ s.writeI32(scalar.value as number);
+ return;
+ case 'i16':
+ s.writeI16(scalar.value as number);
+ return;
+ case 'i8':
+ s.writeI8(scalar.value as number);
+ return;
+ case 'bool':
+ s.writeBool(scalar.value as boolean);
+ return;
+ }
+ };
+
+ if (v instanceof Scalar) {
+ s.writeU8(SerializedValueKind.Scalar);
+ serializeScalarKind(s, v.type.kind);
+ serializeScalar(v, v.type.kind);
+ return;
+ }
+ if (v instanceof Vector) {
+ s.writeU8(SerializedValueKind.Vector);
+ serializeScalarKind(s, v.type.elementType.kind);
+ s.writeU8(v.type.width);
+ for (const element of v.elements) {
+ serializeScalar(element, v.type.elementType.kind);
+ }
+ return;
+ }
+ if (v instanceof Matrix) {
+ s.writeU8(SerializedValueKind.Matrix);
+ serializeScalarKind(s, v.type.elementType.kind);
+ s.writeU8(v.type.cols);
+ s.writeU8(v.type.rows);
+ for (const column of v.elements) {
+ for (const element of column) {
+ serializeScalar(element, v.type.elementType.kind);
+ }
+ }
+ return;
+ }
+
+ unreachable(`unhandled value type: ${v}`);
+}
+
+/** deserializeValue() deserializes a Value from a BinaryStream */
+export function deserializeValue(s: BinaryStream): Value {
+ const deserializeScalar = (kind: ScalarKind) => {
+ switch (kind) {
+ case 'abstract-float':
+ return abstractFloat(s.readF64());
+ case 'f64':
+ return f64(s.readF64());
+ case 'f32':
+ return f32(s.readF32());
+ case 'f16':
+ return f16(s.readF16());
+ case 'u32':
+ return u32(s.readU32());
+ case 'u16':
+ return u16(s.readU16());
+ case 'u8':
+ return u8(s.readU8());
+ case 'i32':
+ return i32(s.readI32());
+ case 'i16':
+ return i16(s.readI16());
+ case 'i8':
+ return i8(s.readI8());
+ case 'bool':
+ return bool(s.readBool());
+ }
+ };
+ const valueKind = s.readU8();
+ const scalarKind = deserializeScalarKind(s);
+ switch (valueKind) {
+ case SerializedValueKind.Scalar:
+ return deserializeScalar(scalarKind);
+ case SerializedValueKind.Vector: {
+ const width = s.readU8();
+ const scalars = new Array<Scalar>(width);
+ for (let i = 0; i < width; i++) {
+ scalars[i] = deserializeScalar(scalarKind);
+ }
+ return new Vector(scalars);
+ }
+ case SerializedValueKind.Matrix: {
+ const numCols = s.readU8();
+ const numRows = s.readU8();
+ const columns = new Array<Scalar[]>(numCols);
+ for (let c = 0; c < numCols; c++) {
+ columns[c] = new Array<Scalar>(numRows);
+ for (let i = 0; i < numRows; i++) {
+ columns[c][i] = deserializeScalar(scalarKind);
+ }
+ }
+ return new Matrix(columns);
+ }
+ default:
+ unreachable(`invalid serialized value kind: ${valueKind}`);
+ }
+}
+
+/** @returns if the Value is a float scalar type */
+export function isFloatValue(v: Value): boolean {
+ return isFloatType(v.type);
+}
+
+/**
+ * @returns if `ty` is an abstract numeric type.
+ * @note this does not consider composite types.
+ * Use elementType() if you want to test the element type.
+ */
+export function isAbstractType(ty: Type): boolean {
+ if (ty instanceof ScalarType) {
+ return ty.kind === 'abstract-float';
+ }
+ return false;
+}
+
+/**
+ * @returns if `ty` is a floating point type.
+ * @note this does not consider composite types.
+ * Use elementType() if you want to test the element type.
+ */
+export function isFloatType(ty: Type): boolean {
+ if (ty instanceof ScalarType) {
+ return (
+ ty.kind === 'abstract-float' || ty.kind === 'f64' || ty.kind === 'f32' || ty.kind === 'f16'
+ );
+ }
+ return false;
+}
+
+/// All floating-point scalar types
+export const kAllFloatScalars = [TypeAbstractFloat, TypeF32, TypeF16] as const;
+
+/// All floating-point vec2 types
+export const kAllFloatVector2 = [
+ TypeVec(2, TypeAbstractFloat),
+ TypeVec(2, TypeF32),
+ TypeVec(2, TypeF16),
+] as const;
+
+/// All floating-point vec3 types
+export const kAllFloatVector3 = [
+ TypeVec(3, TypeAbstractFloat),
+ TypeVec(3, TypeF32),
+ TypeVec(3, TypeF16),
+] as const;
+
+/// All floating-point vec4 types
+export const kAllFloatVector4 = [
+ TypeVec(4, TypeAbstractFloat),
+ TypeVec(4, TypeF32),
+ TypeVec(4, TypeF16),
+] as const;
+
+/// All floating-point vector types
+export const kAllFloatVectors = [
+ ...kAllFloatVector2,
+ ...kAllFloatVector3,
+ ...kAllFloatVector4,
+] as const;
+
+/// All floating-point scalar and vector types
+export const kAllFloatScalarsAndVectors = [...kAllFloatScalars, ...kAllFloatVectors] as const;
+
+/// All integer scalar and vector types
+export const kAllIntegerScalarsAndVectors = [
+ TypeI32,
+ TypeVec(2, TypeI32),
+ TypeVec(3, TypeI32),
+ TypeVec(4, TypeI32),
+ TypeU32,
+ TypeVec(2, TypeU32),
+ TypeVec(3, TypeU32),
+ TypeVec(4, TypeU32),
+] as const;
+
+/// All signed integer scalar and vector types
+export const kAllSignedIntegerScalarsAndVectors = [
+ TypeI32,
+ TypeVec(2, TypeI32),
+ TypeVec(3, TypeI32),
+ TypeVec(4, TypeI32),
+] as const;
+
+/// All unsigned integer scalar and vector types
+export const kAllUnsignedIntegerScalarsAndVectors = [
+ TypeU32,
+ TypeVec(2, TypeU32),
+ TypeVec(3, TypeU32),
+ TypeVec(4, TypeU32),
+] as const;
+
+/// All floating-point and integer scalar and vector types
+export const kAllFloatAndIntegerScalarsAndVectors = [
+ ...kAllFloatScalarsAndVectors,
+ ...kAllIntegerScalarsAndVectors,
+] as const;
+
+/// All floating-point and signed integer scalar and vector types
+export const kAllFloatAndSignedIntegerScalarsAndVectors = [
+ ...kAllFloatScalarsAndVectors,
+ ...kAllSignedIntegerScalarsAndVectors,
+] as const;
+
+/** @returns the inner element type of the given type */
+export function elementType(t: ScalarType | VectorType | MatrixType) {
+ if (t instanceof ScalarType) {
+ return t;
+ }
+ return t.elementType;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/copy_to_texture.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/copy_to_texture.ts
new file mode 100644
index 0000000000..8e0444ffea
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/copy_to_texture.ts
@@ -0,0 +1,192 @@
+import { assert, memcpy } from '../../common/util/util.js';
+import { RegularTextureFormat } from '../format_info.js';
+import { GPUTest, TextureTestMixin } from '../gpu_test.js';
+import { reifyExtent3D, reifyOrigin3D } from '../util/unions.js';
+
+import { makeInPlaceColorConversion } from './color_space_conversion.js';
+import { TexelView } from './texture/texel_view.js';
+import { TexelCompareOptions } from './texture/texture_ok.js';
+
+/**
+ * Predefined copy sub rect meta infos.
+ */
+export const kCopySubrectInfo = [
+ {
+ srcOrigin: { x: 2, y: 2 },
+ dstOrigin: { x: 0, y: 0, z: 0 },
+ srcSize: { width: 16, height: 16 },
+ dstSize: { width: 4, height: 4 },
+ copyExtent: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ },
+ {
+ srcOrigin: { x: 10, y: 2 },
+ dstOrigin: { x: 0, y: 0, z: 0 },
+ srcSize: { width: 16, height: 16 },
+ dstSize: { width: 4, height: 4 },
+ copyExtent: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ },
+ {
+ srcOrigin: { x: 2, y: 10 },
+ dstOrigin: { x: 0, y: 0, z: 0 },
+ srcSize: { width: 16, height: 16 },
+ dstSize: { width: 4, height: 4 },
+ copyExtent: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ },
+ {
+ srcOrigin: { x: 10, y: 10 },
+ dstOrigin: { x: 0, y: 0, z: 0 },
+ srcSize: { width: 16, height: 16 },
+ dstSize: { width: 4, height: 4 },
+ copyExtent: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ },
+ {
+ srcOrigin: { x: 2, y: 2 },
+ dstOrigin: { x: 2, y: 2, z: 0 },
+ srcSize: { width: 16, height: 16 },
+ dstSize: { width: 16, height: 16 },
+ copyExtent: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ },
+ {
+ srcOrigin: { x: 10, y: 2 },
+ dstOrigin: { x: 2, y: 2, z: 0 },
+ srcSize: { width: 16, height: 16 },
+ dstSize: { width: 16, height: 16 },
+ copyExtent: { width: 4, height: 4, depthOrArrayLayers: 1 },
+ },
+] as const;
+
+export class CopyToTextureUtils extends TextureTestMixin(GPUTest) {
+ doFlipY(
+ sourcePixels: Uint8ClampedArray,
+ width: number,
+ height: number,
+ bytesPerPixel: number
+ ): Uint8ClampedArray {
+ const dstPixels = new Uint8ClampedArray(width * height * bytesPerPixel);
+ for (let i = 0; i < height; ++i) {
+ for (let j = 0; j < width; ++j) {
+ const srcPixelPos = i * width + j;
+ // WebGL readPixel returns pixels from bottom-left origin. Using CopyExternalImageToTexture
+ // to copy from WebGL Canvas keeps top-left origin. So the expectation from webgl.readPixel should
+ // be flipped.
+ const dstPixelPos = (height - i - 1) * width + j;
+
+ memcpy(
+ { src: sourcePixels, start: srcPixelPos * bytesPerPixel, length: bytesPerPixel },
+ { dst: dstPixels, start: dstPixelPos * bytesPerPixel }
+ );
+ }
+ }
+
+ return dstPixels;
+ }
+
+ getExpectedDstPixelsFromSrcPixels({
+ srcPixels,
+ srcOrigin,
+ srcSize,
+ dstOrigin,
+ dstSize,
+ subRectSize,
+ format,
+ flipSrcBeforeCopy,
+ srcDoFlipYDuringCopy,
+ conversion,
+ }: {
+ srcPixels: Uint8ClampedArray;
+ srcOrigin: GPUOrigin2D;
+ srcSize: GPUExtent3D;
+ dstOrigin: GPUOrigin3D;
+ dstSize: GPUExtent3D;
+ subRectSize: GPUExtent3D;
+ format: RegularTextureFormat;
+ flipSrcBeforeCopy: boolean;
+ srcDoFlipYDuringCopy: boolean;
+ conversion: {
+ srcPremultiplied: boolean;
+ dstPremultiplied: boolean;
+ srcColorSpace?: PredefinedColorSpace;
+ dstColorSpace?: PredefinedColorSpace;
+ };
+ }): TexelView {
+ const applyConversion = makeInPlaceColorConversion(conversion);
+
+ const reifySrcOrigin = reifyOrigin3D(srcOrigin);
+ const reifySrcSize = reifyExtent3D(srcSize);
+ const reifyDstOrigin = reifyOrigin3D(dstOrigin);
+ const reifyDstSize = reifyExtent3D(dstSize);
+ const reifySubRectSize = reifyExtent3D(subRectSize);
+
+ assert(
+ reifyDstOrigin.x + reifySubRectSize.width <= reifyDstSize.width &&
+ reifyDstOrigin.y + reifySubRectSize.height <= reifyDstSize.height,
+ 'subrect is out of bounds'
+ );
+
+ const divide = 255.0;
+ return TexelView.fromTexelsAsColors(
+ format,
+ coords => {
+ assert(
+ coords.x >= reifyDstOrigin.x &&
+ coords.y >= reifyDstOrigin.y &&
+ coords.x < reifyDstOrigin.x + reifySubRectSize.width &&
+ coords.y < reifyDstOrigin.y + reifySubRectSize.height &&
+ coords.z === 0,
+ 'out of bounds'
+ );
+ // Map dst coords to get candidate src pixel position in y.
+ let yInSubRect = coords.y - reifyDstOrigin.y;
+
+ // If srcDoFlipYDuringCopy is true, a flipY op has been applied to src during copy.
+ // WebGPU spec requires origin option relative to the top-left corner of the source image,
+ // increasing downward consistently.
+ // https://www.w3.org/TR/webgpu/#dom-gpuimagecopyexternalimage-flipy
+ // Flip only happens in copy rect contents and src origin always top-left.
+ // Get candidate src pixel position in y by mirroring in copy sub rect.
+ if (srcDoFlipYDuringCopy) yInSubRect = reifySubRectSize.height - 1 - yInSubRect;
+
+ let src_y = yInSubRect + reifySrcOrigin.y;
+
+ // Test might generate flipped source based on srcPixels, e.g. Create ImageBitmap based on srcPixels but set orientation to 'flipY'
+ // Get candidate src pixel position in y by mirroring in source.
+ if (flipSrcBeforeCopy) src_y = reifySrcSize.height - src_y - 1;
+
+ const pixelPos =
+ src_y * reifySrcSize.width + (coords.x - reifyDstOrigin.x) + reifySrcOrigin.x;
+
+ const rgba = {
+ R: srcPixels[pixelPos * 4] / divide,
+ G: srcPixels[pixelPos * 4 + 1] / divide,
+ B: srcPixels[pixelPos * 4 + 2] / divide,
+ A: srcPixels[pixelPos * 4 + 3] / divide,
+ };
+ applyConversion(rgba);
+ return rgba;
+ },
+ { clampToFormatRange: true }
+ );
+ }
+
+ doTestAndCheckResult(
+ imageCopyExternalImage: GPUImageCopyExternalImage,
+ dstTextureCopyView: GPUImageCopyTextureTagged,
+ expTexelView: TexelView,
+ copySize: Required<GPUExtent3DDict>,
+ texelCompareOptions: TexelCompareOptions
+ ): void {
+ this.device.queue.copyExternalImageToTexture(
+ imageCopyExternalImage,
+ dstTextureCopyView,
+ copySize
+ );
+
+ this.expectTexelViewComparisonIsOkInTexture(
+ { texture: dstTextureCopyView.texture, origin: dstTextureCopyView.origin },
+ expTexelView,
+ copySize,
+ texelCompareOptions
+ );
+ this.trackForCleanup(dstTextureCopyView.texture);
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/create_elements.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/create_elements.ts
new file mode 100644
index 0000000000..71d48ecc07
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/create_elements.ts
@@ -0,0 +1,82 @@
+import { Fixture } from '../../common/framework/fixture.js';
+import { unreachable } from '../../common/util/util.js';
+
+// TESTING_TODO: This should expand to more canvas types (which will enhance a bunch of tests):
+// - canvas element not in dom
+// - canvas element in dom
+// - offscreen canvas from transferControlToOffscreen from canvas not in dom
+// - offscreen canvas from transferControlToOffscreen from canvas in dom
+// - offscreen canvas from new OffscreenCanvas
+export const kAllCanvasTypes = ['onscreen', 'offscreen'] as const;
+export type CanvasType = (typeof kAllCanvasTypes)[number];
+
+type CanvasForCanvasType<T extends CanvasType> = {
+ onscreen: HTMLCanvasElement;
+ offscreen: OffscreenCanvas;
+}[T];
+
+/** Valid contextId for HTMLCanvasElement/OffscreenCanvas,
+ * spec: https://html.spec.whatwg.org/multipage/canvas.html#dom-canvas-getcontext
+ */
+export const kValidCanvasContextIds = [
+ '2d',
+ 'bitmaprenderer',
+ 'webgl',
+ 'webgl2',
+ 'webgpu',
+] as const;
+export type CanvasContext = (typeof kValidCanvasContextIds)[number];
+
+/** Create HTMLCanvas/OffscreenCanvas. */
+export function createCanvas<T extends CanvasType>(
+ test: Fixture,
+ canvasType: T,
+ width: number,
+ height: number
+): CanvasForCanvasType<T> {
+ if (canvasType === 'onscreen') {
+ if (typeof document !== 'undefined') {
+ return createOnscreenCanvas(test, width, height) as CanvasForCanvasType<T>;
+ } else {
+ test.skip('Cannot create HTMLCanvasElement');
+ }
+ } else if (canvasType === 'offscreen') {
+ if (typeof OffscreenCanvas !== 'undefined') {
+ return createOffscreenCanvas(test, width, height) as CanvasForCanvasType<T>;
+ } else {
+ test.skip('Cannot create an OffscreenCanvas');
+ }
+ } else {
+ unreachable();
+ }
+}
+
+/** Create HTMLCanvasElement. */
+export function createOnscreenCanvas(
+ test: Fixture,
+ width: number,
+ height: number
+): HTMLCanvasElement {
+ let canvas: HTMLCanvasElement;
+ if (typeof document !== 'undefined') {
+ canvas = document.createElement('canvas');
+ canvas.width = width;
+ canvas.height = height;
+ } else {
+ test.skip('Cannot create HTMLCanvasElement');
+ }
+ return canvas;
+}
+
+/** Create OffscreenCanvas. */
+export function createOffscreenCanvas(
+ test: Fixture,
+ width: number,
+ height: number
+): OffscreenCanvas {
+ if (typeof OffscreenCanvas === 'undefined') {
+ test.skip('OffscreenCanvas is not supported');
+ }
+
+ return new OffscreenCanvas(width, height);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/device_pool.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/device_pool.ts
new file mode 100644
index 0000000000..1e6c0402cb
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/device_pool.ts
@@ -0,0 +1,414 @@
+import { SkipTestCase, TestCaseRecorder } from '../../common/framework/fixture.js';
+import { attemptGarbageCollection } from '../../common/util/collect_garbage.js';
+import { getGPU, getDefaultRequestAdapterOptions } from '../../common/util/navigator_gpu.js';
+import {
+ assert,
+ raceWithRejectOnTimeout,
+ assertReject,
+ unreachable,
+} from '../../common/util/util.js';
+import { getDefaultLimits, kLimits } from '../capability_info.js';
+
+export interface DeviceProvider {
+ readonly device: GPUDevice;
+ expectDeviceLost(reason: GPUDeviceLostReason): void;
+}
+
+class TestFailedButDeviceReusable extends Error {}
+class FeaturesNotSupported extends Error {}
+export class TestOOMedShouldAttemptGC extends Error {}
+
+export class DevicePool {
+ private holders: 'uninitialized' | 'failed' | DescriptorToHolderMap = 'uninitialized';
+
+ /** Acquire a device from the pool and begin the error scopes. */
+ async acquire(
+ recorder: TestCaseRecorder,
+ descriptor?: UncanonicalizedDeviceDescriptor
+ ): Promise<DeviceProvider> {
+ let errorMessage = '';
+ if (this.holders === 'uninitialized') {
+ this.holders = new DescriptorToHolderMap();
+ try {
+ await this.holders.getOrCreate(recorder, undefined);
+ } catch (ex) {
+ this.holders = 'failed';
+ if (ex instanceof Error) {
+ errorMessage = ` with ${ex.name} "${ex.message}"`;
+ }
+ }
+ }
+
+ assert(
+ this.holders !== 'failed',
+ `WebGPU device failed to initialize${errorMessage}; not retrying`
+ );
+
+ const holder = await this.holders.getOrCreate(recorder, descriptor);
+
+ assert(holder.state === 'free', 'Device was in use on DevicePool.acquire');
+ holder.state = 'acquired';
+ holder.beginTestScope();
+ return holder;
+ }
+
+ /**
+ * End the error scopes and check for errors.
+ * Then, if the device seems reusable, release it back into the pool. Otherwise, drop it.
+ */
+ async release(holder: DeviceProvider): Promise<void> {
+ assert(this.holders instanceof DescriptorToHolderMap, 'DevicePool got into a bad state');
+ assert(holder instanceof DeviceHolder, 'DeviceProvider should always be a DeviceHolder');
+
+ assert(holder.state === 'acquired', 'trying to release a device while already released');
+ try {
+ await holder.endTestScope();
+
+ // (Hopefully if the device was lost, it has been reported by the time endErrorScopes()
+ // has finished (or timed out). If not, it could cause a finite number of extra test
+ // failures following this one (but should recover eventually).)
+ assert(
+ holder.lostInfo === undefined,
+ `Device was unexpectedly lost. Reason: ${holder.lostInfo?.reason}, Message: ${holder.lostInfo?.message}`
+ );
+ } catch (ex) {
+ // Any error that isn't explicitly TestFailedButDeviceReusable forces a new device to be
+ // created for the next test.
+ if (!(ex instanceof TestFailedButDeviceReusable)) {
+ this.holders.delete(holder);
+ if ('destroy' in holder.device) {
+ holder.device.destroy();
+ }
+
+ // Release the (hopefully only) ref to the GPUDevice.
+ holder.releaseGPUDevice();
+
+ // Try to clean up, in case there are stray GPU resources in need of collection.
+ if (ex instanceof TestOOMedShouldAttemptGC) {
+ await attemptGarbageCollection();
+ }
+ }
+ // In the try block, we may throw an error if the device is lost in order to force device
+ // reinitialization, however, if the device lost was expected we want to suppress the error
+ // The device lost is expected when `holder.expectedLostReason` is equal to
+ // `holder.lostInfo.reason`.
+ const expectedDeviceLost =
+ holder.expectedLostReason !== undefined &&
+ holder.lostInfo !== undefined &&
+ holder.expectedLostReason === holder.lostInfo.reason;
+ if (!expectedDeviceLost) {
+ throw ex;
+ }
+ } finally {
+ // Mark the holder as free so the device can be reused (if it's still in this.devices).
+ holder.state = 'free';
+ }
+ }
+}
+
+/**
+ * Map from GPUDeviceDescriptor to DeviceHolder.
+ */
+class DescriptorToHolderMap {
+ /** Map keys that are known to be unsupported and can be rejected quickly. */
+ private unsupported: Set<string> = new Set();
+ private holders: Map<string, DeviceHolder> = new Map();
+
+ /** Deletes an item from the map by DeviceHolder value. */
+ delete(holder: DeviceHolder): void {
+ for (const [k, v] of this.holders) {
+ if (v === holder) {
+ this.holders.delete(k);
+ return;
+ }
+ }
+ unreachable("internal error: couldn't find DeviceHolder to delete");
+ }
+
+ /**
+ * Gets a DeviceHolder from the map if it exists; otherwise, calls create() to create one,
+ * inserts it, and returns it.
+ *
+ * If an `uncanonicalizedDescriptor` is provided, it is canonicalized and used as the map key.
+ * If one is not provided, the map key is `""` (empty string).
+ *
+ * Throws SkipTestCase if devices with this descriptor are unsupported.
+ */
+ async getOrCreate(
+ recorder: TestCaseRecorder,
+ uncanonicalizedDescriptor: UncanonicalizedDeviceDescriptor | undefined
+ ): Promise<DeviceHolder> {
+ const [descriptor, key] = canonicalizeDescriptor(uncanonicalizedDescriptor);
+ // Quick-reject descriptors that are known to be unsupported already.
+ if (this.unsupported.has(key)) {
+ throw new SkipTestCase(
+ `GPUDeviceDescriptor previously failed: ${JSON.stringify(descriptor)}`
+ );
+ }
+
+ // Search for an existing device with the same descriptor.
+ {
+ const value = this.holders.get(key);
+ if (value) {
+ // Move it to the end of the Map (most-recently-used).
+ this.holders.delete(key);
+ this.holders.set(key, value);
+ return value;
+ }
+ }
+
+ // No existing item was found; add a new one.
+ let value;
+ try {
+ value = await DeviceHolder.create(recorder, descriptor);
+ } catch (ex) {
+ if (ex instanceof FeaturesNotSupported) {
+ this.unsupported.add(key);
+ throw new SkipTestCase(
+ `GPUDeviceDescriptor not supported: ${JSON.stringify(descriptor)}\n${ex?.message ?? ''}`
+ );
+ }
+
+ throw ex;
+ }
+ this.insertAndCleanUp(key, value);
+ return value;
+ }
+
+ /** Insert an entry, then remove the least-recently-used items if there are too many. */
+ private insertAndCleanUp(key: string, value: DeviceHolder) {
+ this.holders.set(key, value);
+
+ const kMaxEntries = 5;
+ if (this.holders.size > kMaxEntries) {
+ // Delete the first (least recently used) item in the set.
+ for (const [key] of this.holders) {
+ this.holders.delete(key);
+ return;
+ }
+ }
+ }
+}
+
+export type UncanonicalizedDeviceDescriptor = {
+ requiredFeatures?: Iterable<GPUFeatureName>;
+ requiredLimits?: Record<string, GPUSize32>;
+ /** @deprecated this field cannot be used */
+ nonGuaranteedFeatures?: undefined;
+ /** @deprecated this field cannot be used */
+ nonGuaranteedLimits?: undefined;
+ /** @deprecated this field cannot be used */
+ extensions?: undefined;
+ /** @deprecated this field cannot be used */
+ features?: undefined;
+};
+type CanonicalDeviceDescriptor = Omit<
+ Required<GPUDeviceDescriptor>,
+ 'label' | 'nonGuaranteedFeatures' | 'nonGuaranteedLimits'
+>;
+/**
+ * Make a stringified map-key from a GPUDeviceDescriptor.
+ * Tries to make sure all defaults are resolved, first - but it's okay if some are missed
+ * (it just means some GPUDevice objects won't get deduplicated).
+ *
+ * This does **not** canonicalize `undefined` (the "default" descriptor) into a fully-qualified
+ * GPUDeviceDescriptor. This is just because `undefined` is a common case and we want to use it
+ * as a sanity check that WebGPU is working.
+ */
+function canonicalizeDescriptor(
+ desc: UncanonicalizedDeviceDescriptor | undefined
+): [CanonicalDeviceDescriptor | undefined, string] {
+ if (desc === undefined) {
+ return [undefined, ''];
+ }
+
+ const featuresCanonicalized = desc.requiredFeatures
+ ? Array.from(new Set(desc.requiredFeatures)).sort()
+ : [];
+
+ /** Canonicalized version of the requested limits: in canonical order, with only values which are
+ * specified _and_ non-default. */
+ const limitsCanonicalized: Record<string, number> = {};
+ // MAINTENANCE_TODO: Remove cast when @webgpu/types includes compatibilityMode
+ const adapterOptions = getDefaultRequestAdapterOptions() as unknown as {
+ compatibilityMode?: boolean;
+ };
+ const featureLevel = adapterOptions?.compatibilityMode ? 'compatibility' : 'core';
+ const defaultLimits = getDefaultLimits(featureLevel);
+ if (desc.requiredLimits) {
+ for (const limit of kLimits) {
+ const requestedValue = desc.requiredLimits[limit];
+ const defaultValue = defaultLimits[limit].default;
+ // Skip adding a limit to limitsCanonicalized if it is the same as the default.
+ if (requestedValue !== undefined && requestedValue !== defaultValue) {
+ limitsCanonicalized[limit] = requestedValue;
+ }
+ }
+ }
+
+ // Type ensures every field is carried through.
+ const descriptorCanonicalized: CanonicalDeviceDescriptor = {
+ requiredFeatures: featuresCanonicalized,
+ requiredLimits: limitsCanonicalized,
+ defaultQueue: {},
+ };
+ return [descriptorCanonicalized, JSON.stringify(descriptorCanonicalized)];
+}
+
+function supportsFeature(
+ adapter: GPUAdapter,
+ descriptor: CanonicalDeviceDescriptor | undefined
+): boolean {
+ if (descriptor === undefined) {
+ return true;
+ }
+
+ for (const feature of descriptor.requiredFeatures) {
+ if (!adapter.features.has(feature)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * DeviceHolder has three states:
+ * - 'free': Free to be used for a new test.
+ * - 'acquired': In use by a running test.
+ */
+type DeviceHolderState = 'free' | 'acquired';
+
+/**
+ * Holds a GPUDevice and tracks its state (free/acquired) and handles device loss.
+ */
+class DeviceHolder implements DeviceProvider {
+ /** The device. Will be cleared during cleanup if there were unexpected errors. */
+ private _device: GPUDevice | undefined;
+ /** Whether the device is in use by a test or not. */
+ state: DeviceHolderState = 'free';
+ /** initially undefined; becomes set when the device is lost */
+ lostInfo?: GPUDeviceLostInfo;
+ /** Set if the device is expected to be lost. */
+ expectedLostReason?: GPUDeviceLostReason;
+
+ // Gets a device and creates a DeviceHolder.
+ // If the device is lost, DeviceHolder.lost gets set.
+ static async create(
+ recorder: TestCaseRecorder,
+ descriptor: CanonicalDeviceDescriptor | undefined
+ ): Promise<DeviceHolder> {
+ const gpu = getGPU(recorder);
+ const adapter = await gpu.requestAdapter();
+ assert(adapter !== null, 'requestAdapter returned null');
+ if (!supportsFeature(adapter, descriptor)) {
+ throw new FeaturesNotSupported('One or more features are not supported');
+ }
+ const device = await adapter.requestDevice(descriptor);
+ assert(device !== null, 'requestDevice returned null');
+
+ return new DeviceHolder(device);
+ }
+
+ private constructor(device: GPUDevice) {
+ this._device = device;
+ void this._device.lost.then(ev => {
+ this.lostInfo = ev;
+ });
+ }
+
+ get device() {
+ assert(this._device !== undefined);
+ return this._device;
+ }
+
+ /** Push error scopes that surround test execution. */
+ beginTestScope(): void {
+ assert(this.state === 'acquired');
+ this.device.pushErrorScope('validation');
+ this.device.pushErrorScope('internal');
+ this.device.pushErrorScope('out-of-memory');
+ }
+
+ /** Mark the DeviceHolder as expecting a device loss when the test scope ends. */
+ expectDeviceLost(reason: GPUDeviceLostReason) {
+ assert(this.state === 'acquired');
+ this.expectedLostReason = reason;
+ }
+
+ /**
+ * Attempt to end test scopes: Check that there are no extra error scopes, and that no
+ * otherwise-uncaptured errors occurred during the test. Time out if it takes too long.
+ */
+ endTestScope(): Promise<void> {
+ assert(this.state === 'acquired');
+ const kTimeout = 5000;
+
+ // Time out if attemptEndTestScope (popErrorScope or onSubmittedWorkDone) never completes. If
+ // this rejects, the device won't be reused, so it's OK that popErrorScope calls may not have
+ // finished.
+ //
+ // This could happen due to a browser bug - e.g.,
+ // as of this writing, on Chrome GPU process crash, popErrorScope just hangs.
+ return raceWithRejectOnTimeout(this.attemptEndTestScope(), kTimeout, 'endTestScope timed out');
+ }
+
+ private async attemptEndTestScope(): Promise<void> {
+ let gpuValidationError: GPUError | null;
+ let gpuInternalError: GPUError | null;
+ let gpuOutOfMemoryError: GPUError | null;
+
+ // Submit to the queue to attempt to force a GPU flush.
+ this.device.queue.submit([]);
+
+ try {
+ // May reject if the device was lost.
+ [gpuOutOfMemoryError, gpuInternalError, gpuValidationError] = await Promise.all([
+ this.device.popErrorScope(),
+ this.device.popErrorScope(),
+ this.device.popErrorScope(),
+ ]);
+ } catch (ex) {
+ assert(this.lostInfo !== undefined, 'popErrorScope failed; did beginTestScope get missed?');
+ throw ex;
+ }
+
+ // Attempt to wait for the queue to be idle.
+ if (this.device.queue.onSubmittedWorkDone) {
+ await this.device.queue.onSubmittedWorkDone();
+ }
+
+ await assertReject('OperationError', this.device.popErrorScope(), {
+ allowMissingStack: true,
+ message: 'There was an extra error scope on the stack after a test',
+ });
+
+ if (gpuOutOfMemoryError !== null) {
+ assert(gpuOutOfMemoryError instanceof GPUOutOfMemoryError);
+ // Don't allow the device to be reused; unexpected OOM could break the device.
+ throw new TestOOMedShouldAttemptGC('Unexpected out-of-memory error occurred');
+ }
+ if (gpuInternalError !== null) {
+ assert(gpuInternalError instanceof GPUInternalError);
+ // Allow the device to be reused.
+ throw new TestFailedButDeviceReusable(
+ `Unexpected internal error occurred: ${gpuInternalError.message}`
+ );
+ }
+ if (gpuValidationError !== null) {
+ assert(gpuValidationError instanceof GPUValidationError);
+ // Allow the device to be reused.
+ throw new TestFailedButDeviceReusable(
+ `Unexpected validation error occurred: ${gpuValidationError.message}`
+ );
+ }
+ }
+
+ /**
+ * Release the ref to the GPUDevice. This should be the only ref held by the DevicePool or
+ * GPUTest, so in theory it can get garbage collected.
+ */
+ releaseGPUDevice(): void {
+ this._device = undefined;
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/floating_point.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/floating_point.ts
new file mode 100644
index 0000000000..e271e7db7a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/floating_point.ts
@@ -0,0 +1,5441 @@
+import { ROArrayArray, ROArrayArrayArray } from '../../common/util/types.js';
+import { assert, unreachable } from '../../common/util/util.js';
+import { Float16Array } from '../../external/petamoriken/float16/float16.js';
+import { Case, IntervalFilter } from '../shader/execution/expression/expression.js';
+
+import BinaryStream from './binary_stream.js';
+import { anyOf } from './compare.js';
+import { kValue } from './constants.js';
+import {
+ abstractFloat,
+ f16,
+ f32,
+ isFloatType,
+ Scalar,
+ ScalarType,
+ toMatrix,
+ toVector,
+ u32,
+} from './conversion.js';
+import {
+ calculatePermutations,
+ cartesianProduct,
+ correctlyRoundedF16,
+ correctlyRoundedF32,
+ correctlyRoundedF64,
+ flatten2DArray,
+ FlushMode,
+ flushSubnormalNumberF16,
+ flushSubnormalNumberF32,
+ flushSubnormalNumberF64,
+ isFiniteF16,
+ isFiniteF32,
+ isSubnormalNumberF16,
+ isSubnormalNumberF32,
+ isSubnormalNumberF64,
+ map2DArray,
+ oneULPF16,
+ oneULPF32,
+ quantizeToF32,
+ quantizeToF16,
+ unflatten2DArray,
+ every2DArray,
+} from './math.js';
+
+/** Indicate the kind of WGSL floating point numbers being operated on */
+export type FPKind = 'f32' | 'f16' | 'abstract';
+
+enum SerializedFPIntervalKind {
+ Abstract,
+ F32,
+ F16,
+}
+
+/** serializeFPKind() serializes a FPKind to a BinaryStream */
+export function serializeFPKind(s: BinaryStream, value: FPKind) {
+ switch (value) {
+ case 'abstract':
+ s.writeU8(SerializedFPIntervalKind.Abstract);
+ break;
+ case 'f16':
+ s.writeU8(SerializedFPIntervalKind.F16);
+ break;
+ case 'f32':
+ s.writeU8(SerializedFPIntervalKind.F32);
+ break;
+ }
+}
+
+/** deserializeFPKind() deserializes a FPKind from a BinaryStream */
+export function deserializeFPKind(s: BinaryStream): FPKind {
+ const kind = s.readU8();
+ switch (kind) {
+ case SerializedFPIntervalKind.Abstract:
+ return 'abstract';
+ case SerializedFPIntervalKind.F16:
+ return 'f16';
+ case SerializedFPIntervalKind.F32:
+ return 'f32';
+ default:
+ unreachable(`invalid deserialized FPKind: ${kind}`);
+ }
+}
+// Containers
+
+/**
+ * Representation of bounds for an interval as an array with either one or two
+ * elements. Single element indicates that the interval is a single point. For
+ * two elements, the first is the lower bound of the interval and the second is
+ * the upper bound.
+ */
+export type IntervalBounds = readonly [number] | readonly [number, number];
+
+/** Represents a closed interval of floating point numbers */
+export class FPInterval {
+ public readonly kind: FPKind;
+ public readonly begin: number;
+ public readonly end: number;
+
+ /**
+ * Constructor
+ *
+ * `FPTraits.toInterval` is the preferred way to create FPIntervals
+ *
+ * @param kind the floating point number type this is an interval for
+ * @param bounds beginning and end of the interval
+ */
+ public constructor(kind: FPKind, ...bounds: IntervalBounds) {
+ this.kind = kind;
+
+ const begin = bounds[0];
+ const end = bounds.length === 2 ? bounds[1] : bounds[0];
+ assert(!Number.isNaN(begin) && !Number.isNaN(end), `bounds need to be non-NaN`);
+ assert(begin <= end, `bounds[0] (${begin}) must be less than or equal to bounds[1] (${end})`);
+
+ this.begin = begin;
+ this.end = end;
+ }
+
+ /** @returns the floating point traits for this interval */
+ public traits(): FPTraits {
+ return FP[this.kind];
+ }
+
+ /** @returns begin and end if non-point interval, otherwise just begin */
+ public bounds(): IntervalBounds {
+ return this.isPoint() ? [this.begin] : [this.begin, this.end];
+ }
+
+ /** @returns if a point or interval is completely contained by this interval */
+ public contains(n: number | FPInterval): boolean {
+ if (Number.isNaN(n)) {
+ // Being the 'any' interval indicates that accuracy is not defined for this
+ // test, so the test is just checking that this input doesn't cause the
+ // implementation to misbehave, so NaN is accepted.
+ return this.begin === Number.NEGATIVE_INFINITY && this.end === Number.POSITIVE_INFINITY;
+ }
+
+ if (n instanceof FPInterval) {
+ return this.begin <= n.begin && this.end >= n.end;
+ }
+ return this.begin <= n && this.end >= n;
+ }
+
+ /** @returns if any values in the interval may be flushed to zero, this
+ * includes any subnormals and zero itself.
+ */
+ public containsZeroOrSubnormals(): boolean {
+ return !(
+ this.end < this.traits().constants().negative.subnormal.min ||
+ this.begin > this.traits().constants().positive.subnormal.max
+ );
+ }
+
+ /** @returns if this interval contains a single point */
+ public isPoint(): boolean {
+ return this.begin === this.end;
+ }
+
+ /** @returns if this interval only contains finite values */
+ public isFinite(): boolean {
+ return this.traits().isFinite(this.begin) && this.traits().isFinite(this.end);
+ }
+
+ /** @returns a string representation for logging purposes */
+ public toString(): string {
+ return `{ '${this.kind}', [${this.bounds().map(this.traits().scalarBuilder)}] }`;
+ }
+}
+
+/** serializeFPInterval() serializes a FPInterval to a BinaryStream */
+export function serializeFPInterval(s: BinaryStream, i: FPInterval) {
+ serializeFPKind(s, i.kind);
+ const traits = FP[i.kind];
+ s.writeCond(i !== traits.constants().unboundedInterval, {
+ if_true: () => {
+ // Bounded
+ switch (i.kind) {
+ case 'abstract':
+ s.writeF64(i.begin);
+ s.writeF64(i.end);
+ break;
+ case 'f32':
+ s.writeF32(i.begin);
+ s.writeF32(i.end);
+ break;
+ case 'f16':
+ s.writeF16(i.begin);
+ s.writeF16(i.end);
+ break;
+ default:
+ unreachable(`Unable to serialize FPInterval ${i}`);
+ break;
+ }
+ },
+ if_false: () => {
+ // Unbounded
+ },
+ });
+}
+
+/** deserializeFPInterval() deserializes a FPInterval from a BinaryStream */
+export function deserializeFPInterval(s: BinaryStream): FPInterval {
+ const kind = deserializeFPKind(s);
+ const traits = FP[kind];
+ return s.readCond({
+ if_true: () => {
+ // Bounded
+ switch (kind) {
+ case 'abstract':
+ return new FPInterval(traits.kind, s.readF64(), s.readF64());
+ case 'f32':
+ return new FPInterval(traits.kind, s.readF32(), s.readF32());
+ case 'f16':
+ return new FPInterval(traits.kind, s.readF16(), s.readF16());
+ }
+ unreachable(`Unable to deserialize FPInterval with kind ${kind}`);
+ },
+ if_false: () => {
+ // Unbounded
+ return traits.constants().unboundedInterval;
+ },
+ });
+}
+
+/**
+ * Representation of a vec2/3/4 of floating point intervals as an array of
+ * FPIntervals.
+ */
+export type FPVector =
+ | [FPInterval, FPInterval]
+ | [FPInterval, FPInterval, FPInterval]
+ | [FPInterval, FPInterval, FPInterval, FPInterval];
+
+/** Shorthand for an Array of Arrays that contains a column-major matrix */
+type Array2D<T> = ROArrayArray<T>;
+
+/**
+ * Representation of a matCxR of floating point intervals as an array of arrays
+ * of FPIntervals. This maps onto the WGSL concept of matrix. Internally
+ */
+export type FPMatrix =
+ | readonly [readonly [FPInterval, FPInterval], readonly [FPInterval, FPInterval]]
+ | readonly [
+ readonly [FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval],
+ ]
+ | readonly [
+ readonly [FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval],
+ ]
+ | readonly [
+ readonly [FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval],
+ ]
+ | readonly [
+ readonly [FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval],
+ ]
+ | readonly [
+ readonly [FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval],
+ ]
+ | readonly [
+ readonly [FPInterval, FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval, FPInterval],
+ ]
+ | readonly [
+ readonly [FPInterval, FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval, FPInterval],
+ ]
+ | readonly [
+ readonly [FPInterval, FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval, FPInterval],
+ readonly [FPInterval, FPInterval, FPInterval, FPInterval],
+ ];
+
+// Utilities
+
+/** @returns input with an appended 0, if inputs contains non-zero subnormals */
+// When f16 traits is defined, this can be replaced with something like
+// `FP.f16..addFlushIfNeeded`
+function addFlushedIfNeededF16(values: readonly number[]): readonly number[] {
+ return values.some(v => v !== 0 && isSubnormalNumberF16(v)) ? values.concat(0) : values;
+}
+
+// Operations
+
+/**
+ * A function that converts a point to an acceptance interval.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface ScalarToInterval {
+ (x: number): FPInterval;
+}
+
+/** Operation used to implement a ScalarToInterval */
+interface ScalarToIntervalOp {
+ /** @returns acceptance interval for a function at point x */
+ impl: ScalarToInterval;
+
+ /**
+ * Calculates where in the domain defined by x the min/max extrema of impl
+ * occur and returns a span of those points to be used as the domain instead.
+ *
+ * Used by this.runScalarToIntervalOp before invoking impl.
+ * If not defined, the bounds of the existing domain are assumed to be the
+ * extrema.
+ *
+ * This is only implemented for operations that meet all the following
+ * criteria:
+ * a) non-monotonic
+ * b) used in inherited accuracy calculations
+ * c) need to take in an interval for b)
+ * i.e. fooInterval takes in x: number | FPInterval, not x: number
+ */
+ extrema?: (x: FPInterval) => FPInterval;
+}
+
+/**
+ * A function that converts a pair of points to an acceptance interval.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface ScalarPairToInterval {
+ (x: number, y: number): FPInterval;
+}
+
+/** Operation used to implement a ScalarPairToInterval */
+interface ScalarPairToIntervalOp {
+ /** @returns acceptance interval for a function at point (x, y) */
+ impl: ScalarPairToInterval;
+ /**
+ * Calculates where in domain defined by x & y the min/max extrema of impl
+ * occur and returns spans of those points to be used as the domain instead.
+ *
+ * Used by runScalarPairToIntervalOp before invoking impl.
+ * If not defined, the bounds of the existing domain are assumed to be the
+ * extrema.
+ *
+ * This is only implemented for functions that meet all of the following
+ * criteria:
+ * a) non-monotonic
+ * b) used in inherited accuracy calculations
+ * c) need to take in an interval for b)
+ */
+ extrema?: (x: FPInterval, y: FPInterval) => [FPInterval, FPInterval];
+}
+
+/** Domain for a ScalarPairToInterval implementation */
+interface ScalarPairToIntervalDomain {
+ // Arrays to support discrete valid domain intervals
+ x: readonly FPInterval[];
+ y: readonly FPInterval[];
+}
+
+/**
+ * A function that converts a triplet of points to an acceptance interval.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface ScalarTripleToInterval {
+ (x: number, y: number, z: number): FPInterval;
+}
+
+/** Operation used to implement a ScalarTripleToInterval */
+interface ScalarTripleToIntervalOp {
+ // Re-using the *Op interface pattern for symmetry with the other operations.
+ /** @returns acceptance interval for a function at point (x, y, z) */
+ impl: ScalarTripleToInterval;
+}
+
+// Currently ScalarToVector is not integrated with the rest of the floating point
+// framework, because the only builtins that use it are actually
+// u32 -> [f32, f32, f32, f32] functions, so the whole rounding and interval
+// process doesn't get applied to the inputs.
+// They do use the framework internally by invoking divisionInterval on segments
+// of the input.
+/**
+ * A function that converts a point to a vector of acceptance intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface ScalarToVector {
+ (n: number): FPVector;
+}
+
+/**
+ * A function that converts a vector to an acceptance interval.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface VectorToInterval {
+ (x: readonly number[]): FPInterval;
+}
+
+/** Operation used to implement a VectorToInterval */
+interface VectorToIntervalOp {
+ // Re-using the *Op interface pattern for symmetry with the other operations.
+ /** @returns acceptance interval for a function on vector x */
+ impl: VectorToInterval;
+}
+
+/**
+ * A function that converts a pair of vectors to an acceptance interval.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface VectorPairToInterval {
+ (x: readonly number[], y: readonly number[]): FPInterval;
+}
+
+/** Operation used to implement a VectorPairToInterval */
+interface VectorPairToIntervalOp {
+ // Re-using the *Op interface pattern for symmetry with the other operations.
+ /** @returns acceptance interval for a function on vectors (x, y) */
+ impl: VectorPairToInterval;
+}
+
+/**
+ * A function that converts a vector to a vector of acceptance intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface VectorToVector {
+ (x: readonly number[]): FPVector;
+}
+
+/** Operation used to implement a VectorToVector */
+interface VectorToVectorOp {
+ // Re-using the *Op interface pattern for symmetry with the other operations.
+ /** @returns a vector of acceptance intervals for a function on vector x */
+ impl: VectorToVector;
+}
+
+/**
+ * A function that converts a pair of vectors to a vector of acceptance
+ * intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface VectorPairToVector {
+ (x: readonly number[], y: readonly number[]): FPVector;
+}
+
+/** Operation used to implement a VectorPairToVector */
+interface VectorPairToVectorOp {
+ // Re-using the *Op interface pattern for symmetry with the other operations.
+ /** @returns a vector of acceptance intervals for a function on vectors (x, y) */
+ impl: VectorPairToVector;
+}
+
+/**
+ * A function that converts a vector and a scalar to a vector of acceptance
+ * intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface VectorScalarToVector {
+ (x: readonly number[], y: number): FPVector;
+}
+
+/**
+ * A function that converts a scalar and a vector to a vector of acceptance
+ * intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface ScalarVectorToVector {
+ (x: number, y: readonly number[]): FPVector;
+}
+
+/**
+ * A function that converts a matrix to an acceptance interval.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface MatrixToScalar {
+ (m: Array2D<number>): FPInterval;
+}
+
+/** Operation used to implement a MatrixToMatrix */
+interface MatrixToMatrixOp {
+ // Re-using the *Op interface pattern for symmetry with the other operations.
+ /** @returns a matrix of acceptance intervals for a function on matrix x */
+ impl: MatrixToMatrix;
+}
+
+/**
+ * A function that converts a matrix to a matrix of acceptance intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface MatrixToMatrix {
+ (m: Array2D<number>): FPMatrix;
+}
+
+/**
+ * A function that converts a pair of matrices to a matrix of acceptance
+ * intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface MatrixPairToMatrix {
+ (x: Array2D<number>, y: Array2D<number>): FPMatrix;
+}
+
+/**
+ * A function that converts a matrix and a scalar to a matrix of acceptance
+ * intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface MatrixScalarToMatrix {
+ (x: Array2D<number>, y: number): FPMatrix;
+}
+
+/**
+ * A function that converts a scalar and a matrix to a matrix of acceptance
+ * intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface ScalarMatrixToMatrix {
+ (x: number, y: Array2D<number>): FPMatrix;
+}
+
+/**
+ * A function that converts a matrix and a vector to a vector of acceptance
+ * intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface MatrixVectorToVector {
+ (x: Array2D<number>, y: readonly number[]): FPVector;
+}
+
+/**
+ * A function that converts a vector and a matrix to a vector of acceptance
+ * intervals.
+ * This is the public facing API for builtin implementations that is called
+ * from tests.
+ */
+export interface VectorMatrixToVector {
+ (x: readonly number[], y: Array2D<number>): FPVector;
+}
+
+// Traits
+
+/**
+ * Typed structure containing all the limits/constants defined for each
+ * WGSL floating point kind
+ */
+interface FPConstants {
+ positive: {
+ min: number;
+ max: number;
+ infinity: number;
+ nearest_max: number;
+ less_than_one: number;
+ subnormal: {
+ min: number;
+ max: number;
+ };
+ pi: {
+ whole: number;
+ three_quarters: number;
+ half: number;
+ third: number;
+ quarter: number;
+ sixth: number;
+ };
+ e: number;
+ };
+ negative: {
+ min: number;
+ max: number;
+ infinity: number;
+ nearest_min: number;
+ less_than_one: number;
+ subnormal: {
+ min: number;
+ max: number;
+ };
+ pi: {
+ whole: number;
+ three_quarters: number;
+ half: number;
+ third: number;
+ quarter: number;
+ sixth: number;
+ };
+ };
+ unboundedInterval: FPInterval;
+ zeroInterval: FPInterval;
+ negPiToPiInterval: FPInterval;
+ greaterThanZeroInterval: FPInterval;
+ zeroVector: {
+ 2: FPVector;
+ 3: FPVector;
+ 4: FPVector;
+ };
+ unboundedVector: {
+ 2: FPVector;
+ 3: FPVector;
+ 4: FPVector;
+ };
+ unboundedMatrix: {
+ 2: {
+ 2: FPMatrix;
+ 3: FPMatrix;
+ 4: FPMatrix;
+ };
+ 3: {
+ 2: FPMatrix;
+ 3: FPMatrix;
+ 4: FPMatrix;
+ };
+ 4: {
+ 2: FPMatrix;
+ 3: FPMatrix;
+ 4: FPMatrix;
+ };
+ };
+}
+
+/** A representation of an FPInterval for a case param */
+export type FPIntervalParam = {
+ kind: FPKind;
+ interval: number | IntervalBounds;
+};
+
+/** Abstract base class for all floating-point traits */
+export abstract class FPTraits {
+ public readonly kind: FPKind;
+ protected constructor(k: FPKind) {
+ this.kind = k;
+ }
+
+ public abstract constants(): FPConstants;
+
+ // Utilities - Implemented
+
+ /** @returns an interval containing the point or the original interval */
+ public toInterval(n: number | IntervalBounds | FPInterval): FPInterval {
+ if (n instanceof FPInterval) {
+ if (n.kind === this.kind) {
+ return n;
+ }
+
+ // Preserve if the original interval was unbounded or bounded
+ if (!n.isFinite()) {
+ return this.constants().unboundedInterval;
+ }
+
+ return new FPInterval(this.kind, ...n.bounds());
+ }
+
+ if (n instanceof Array) {
+ return new FPInterval(this.kind, ...n);
+ }
+
+ return new FPInterval(this.kind, n, n);
+ }
+
+ /**
+ * Makes a param that can be turned into an interval
+ */
+ public toParam(n: number | IntervalBounds): FPIntervalParam {
+ return {
+ kind: this.kind,
+ interval: n,
+ };
+ }
+
+ /**
+ * Converts p into an FPInterval if it is an FPIntervalPAram
+ */
+ public fromParam(
+ p: number | IntervalBounds | FPIntervalParam
+ ): number | IntervalBounds | FPInterval {
+ const param = p as FPIntervalParam;
+ if (param.interval && param.kind) {
+ assert(param.kind === this.kind);
+ return this.toInterval(param.interval);
+ }
+ return p as number | IntervalBounds;
+ }
+
+ /**
+ * @returns an interval with the tightest bounds that includes all provided
+ * intervals
+ */
+ public spanIntervals(...intervals: readonly FPInterval[]): FPInterval {
+ assert(intervals.length > 0, `span of an empty list of FPIntervals is not allowed`);
+ assert(
+ intervals.every(i => i.kind === this.kind),
+ `span is only defined for intervals with the same kind`
+ );
+ let begin = Number.POSITIVE_INFINITY;
+ let end = Number.NEGATIVE_INFINITY;
+ intervals.forEach(i => {
+ begin = Math.min(i.begin, begin);
+ end = Math.max(i.end, end);
+ });
+ return this.toInterval([begin, end]);
+ }
+
+ /** Narrow an array of values to FPVector if possible */
+ public isVector(v: ReadonlyArray<number | IntervalBounds | FPInterval>): v is FPVector {
+ if (v.every(e => e instanceof FPInterval && e.kind === this.kind)) {
+ return v.length === 2 || v.length === 3 || v.length === 4;
+ }
+ return false;
+ }
+
+ /** @returns an FPVector representation of an array of values if possible */
+ public toVector(v: ReadonlyArray<number | IntervalBounds | FPInterval>): FPVector {
+ if (this.isVector(v) && v.every(e => e.kind === this.kind)) {
+ return v;
+ }
+
+ const f = v.map(e => this.toInterval(e));
+ // The return of the map above is a readonly FPInterval[], which needs to be narrowed
+ // to FPVector, since FPVector is defined as fixed length tuples.
+ if (this.isVector(f)) {
+ return f;
+ }
+ unreachable(`Cannot convert [${v}] to FPVector`);
+ }
+
+ /**
+ * @returns a FPVector where each element is the span for corresponding
+ * elements at the same index in the input vectors
+ */
+ public spanVectors(...vectors: FPVector[]): FPVector {
+ assert(
+ vectors.every(e => this.isVector(e)),
+ 'Vector span is not defined for vectors of differing floating point kinds'
+ );
+
+ const vector_length = vectors[0].length;
+ assert(
+ vectors.every(e => e.length === vector_length),
+ `Vector span is not defined for vectors of differing lengths`
+ );
+
+ const result: FPInterval[] = new Array<FPInterval>(vector_length);
+
+ for (let i = 0; i < vector_length; i++) {
+ result[i] = this.spanIntervals(...vectors.map(v => v[i]));
+ }
+ return this.toVector(result);
+ }
+
+ /** Narrow an array of an array of values to FPMatrix if possible */
+ public isMatrix(m: Array2D<number | IntervalBounds | FPInterval> | FPVector[]): m is FPMatrix {
+ if (!m.every(c => c.every(e => e instanceof FPInterval && e.kind === this.kind))) {
+ return false;
+ }
+ // At this point m guaranteed to be a ROArrayArray<FPInterval>, but maybe typed as a
+ // FPVector[].
+ // Coercing the type since FPVector[] is functionally equivalent to
+ // ROArrayArray<FPInterval> for .length and .every, but they are type compatible,
+ // since tuples are not equivalent to arrays, so TS considers c in .every to
+ // be unresolvable below, even though our usage is safe.
+ m = m as ROArrayArray<FPInterval>;
+
+ if (m.length > 4 || m.length < 2) {
+ return false;
+ }
+
+ const num_rows = m[0].length;
+ if (num_rows > 4 || num_rows < 2) {
+ return false;
+ }
+
+ return m.every(c => c.length === num_rows);
+ }
+
+ /** @returns an FPMatrix representation of an array of an array of values if possible */
+ public toMatrix(m: Array2D<number | IntervalBounds | FPInterval> | FPVector[]): FPMatrix {
+ if (
+ this.isMatrix(m) &&
+ every2DArray(m, (e: FPInterval) => {
+ return e.kind === this.kind;
+ })
+ ) {
+ return m;
+ }
+
+ const result = map2DArray(m, this.toInterval.bind(this));
+
+ // The return of the map above is a ROArrayArray<FPInterval>, which needs to be
+ // narrowed to FPMatrix, since FPMatrix is defined as fixed length tuples.
+ if (this.isMatrix(result)) {
+ return result;
+ }
+ unreachable(`Cannot convert ${m} to FPMatrix`);
+ }
+
+ /**
+ * @returns a FPMatrix where each element is the span for corresponding
+ * elements at the same index in the input matrices
+ */
+ public spanMatrices(...matrices: FPMatrix[]): FPMatrix {
+ // Coercing the type of matrices, since tuples are not generally compatible
+ // with Arrays, but they are functionally equivalent for the usages in this
+ // function.
+ const ms = matrices as Array2D<FPInterval>[];
+ const num_cols = ms[0].length;
+ const num_rows = ms[0][0].length;
+ assert(
+ ms.every(m => m.length === num_cols && m.every(r => r.length === num_rows)),
+ `Matrix span is not defined for Matrices of differing dimensions`
+ );
+
+ const result: FPInterval[][] = [...Array(num_cols)].map(_ => [...Array(num_rows)]);
+ for (let i = 0; i < num_cols; i++) {
+ for (let j = 0; j < num_rows; j++) {
+ result[i][j] = this.spanIntervals(...ms.map(m => m[i][j]));
+ }
+ }
+
+ return this.toMatrix(result);
+ }
+
+ /** @returns input with an appended 0, if inputs contains non-zero subnormals */
+ public addFlushedIfNeeded(values: readonly number[]): readonly number[] {
+ const subnormals = values.filter(this.isSubnormal);
+ const needs_zero = subnormals.length > 0 && subnormals.every(s => s !== 0);
+ return needs_zero ? values.concat(0) : values;
+ }
+
+ /**
+ * Restrict the inputs to an ScalarToInterval operation
+ *
+ * Only used for operations that have tighter domain requirements than 'must
+ * be finite'.
+ *
+ * @param domain interval to restrict inputs to
+ * @param impl operation implementation to run if input is within the required domain
+ * @returns a ScalarToInterval that calls impl if domain contains the input,
+ * otherwise it returns an unbounded interval */
+ protected limitScalarToIntervalDomain(
+ domain: FPInterval,
+ impl: ScalarToInterval
+ ): ScalarToInterval {
+ return (n: number): FPInterval => {
+ return domain.contains(n) ? impl(n) : this.constants().unboundedInterval;
+ };
+ }
+
+ /**
+ * Restrict the inputs to a ScalarPairToInterval
+ *
+ * Only used for operations that have tighter domain requirements than 'must be
+ * finite'.
+ *
+ * @param domain set of intervals to restrict inputs to
+ * @param impl operation implementation to run if input is within the required domain
+ * @returns a ScalarPairToInterval that calls impl if domain contains the input,
+ * otherwise it returns an unbounded interval */
+ protected limitScalarPairToIntervalDomain(
+ domain: ScalarPairToIntervalDomain,
+ impl: ScalarPairToInterval
+ ): ScalarPairToInterval {
+ return (x: number, y: number): FPInterval => {
+ if (!domain.x.some(d => d.contains(x)) || !domain.y.some(d => d.contains(y))) {
+ return this.constants().unboundedInterval;
+ }
+
+ return impl(x, y);
+ };
+ }
+
+ /** Stub for scalar to interval generator */
+ protected unimplementedScalarToInterval(name: string, _x: number | FPInterval): FPInterval {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for scalar pair to interval generator */
+ protected unimplementedScalarPairToInterval(
+ name: string,
+ _x: number | FPInterval,
+ _y: number | FPInterval
+ ): FPInterval {
+ unreachable(`'${name}' is yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for scalar triple to interval generator */
+ protected unimplementedScalarTripleToInterval(
+ name: string,
+ _x: number | FPInterval,
+ _y: number | FPInterval,
+ _z: number | FPInterval
+ ): FPInterval {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for scalar to vector generator */
+ protected unimplementedScalarToVector(name: string, _x: number | FPInterval): FPVector {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for vector to interval generator */
+ protected unimplementedVectorToInterval(name: string, _x: (number | FPInterval)[]): FPInterval {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for vector pair to interval generator */
+ protected unimplementedVectorPairToInterval(
+ name: string,
+ _x: readonly (number | FPInterval)[],
+ _y: readonly (number | FPInterval)[]
+ ): FPInterval {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for vector to vector generator */
+ protected unimplementedVectorToVector(
+ name: string,
+ _x: readonly (number | FPInterval)[]
+ ): FPVector {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for vector pair to vector generator */
+ protected unimplementedVectorPairToVector(
+ name: string,
+ _x: readonly (number | FPInterval)[],
+ _y: readonly (number | FPInterval)[]
+ ): FPVector {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for vector-scalar to vector generator */
+ protected unimplementedVectorScalarToVector(
+ name: string,
+ _x: readonly (number | FPInterval)[],
+ _y: number | FPInterval
+ ): FPVector {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for scalar-vector to vector generator */
+ protected unimplementedScalarVectorToVector(
+ name: string,
+ _x: number | FPInterval,
+ _y: (number | FPInterval)[]
+ ): FPVector {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for matrix to interval generator */
+ protected unimplementedMatrixToInterval(name: string, _x: Array2D<number>): FPInterval {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for matrix to matirx generator */
+ protected unimplementedMatrixToMatrix(name: string, _x: Array2D<number>): FPMatrix {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for matrix pair to matrix generator */
+ protected unimplementedMatrixPairToMatrix(
+ name: string,
+ _x: Array2D<number>,
+ _y: Array2D<number>
+ ): FPMatrix {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for matrix-scalar to matrix generator */
+ protected unimplementedMatrixScalarToMatrix(
+ name: string,
+ _x: Array2D<number>,
+ _y: number | FPInterval
+ ): FPMatrix {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for scalar-matrix to matrix generator */
+ protected unimplementedScalarMatrixToMatrix(
+ name: string,
+ _x: number | FPInterval,
+ _y: Array2D<number>
+ ): FPMatrix {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for matrix-vector to vector generator */
+ protected unimplementedMatrixVectorToVector(
+ name: string,
+ _x: Array2D<number>,
+ _y: readonly (number | FPInterval)[]
+ ): FPVector {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for vector-matrix to vector generator */
+ protected unimplementedVectorMatrixToVector(
+ name: string,
+ _x: readonly (number | FPInterval)[],
+ _y: Array2D<number>
+ ): FPVector {
+ unreachable(`'${name}' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for distance generator */
+ protected unimplementedDistance(
+ _x: number | readonly number[],
+ _y: number | readonly number[]
+ ): FPInterval {
+ unreachable(`'distance' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for faceForward */
+ protected unimplementedFaceForward(
+ _x: readonly number[],
+ _y: readonly number[],
+ _z: readonly number[]
+ ): (FPVector | undefined)[] {
+ unreachable(`'faceForward' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for length generator */
+ protected unimplementedLength(
+ _x: number | FPInterval | readonly number[] | FPVector
+ ): FPInterval {
+ unreachable(`'length' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for modf generator */
+ protected unimplementedModf(_x: number): { fract: FPInterval; whole: FPInterval } {
+ unreachable(`'modf' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Stub for refract generator */
+ protected unimplementedRefract(
+ _i: readonly number[],
+ _s: readonly number[],
+ _r: number
+ ): FPVector {
+ unreachable(`'refract' is not yet implemented for '${this.kind}'`);
+ }
+
+ /** Version of absoluteErrorInterval that always returns the unboundedInterval */
+ protected unboundedAbsoluteErrorInterval(_n: number, _error_range: number): FPInterval {
+ return this.constants().unboundedInterval;
+ }
+
+ /** Version of ulpInterval that always returns the unboundedInterval */
+ protected unboundedUlpInterval(_n: number, _numULP: number): FPInterval {
+ return this.constants().unboundedInterval;
+ }
+
+ // Utilities - Defined by subclass
+ /**
+ * @returns the nearest precise value to the input. Rounding should be IEEE
+ * 'roundTiesToEven'.
+ */
+ public abstract readonly quantize: (n: number) => number;
+ /** @returns all valid roundings of input */
+ public abstract readonly correctlyRounded: (n: number) => readonly number[];
+ /** @returns true if input is considered finite, otherwise false */
+ public abstract readonly isFinite: (n: number) => boolean;
+ /** @returns true if input is considered subnormal, otherwise false */
+ public abstract readonly isSubnormal: (n: number) => boolean;
+ /** @returns 0 if the provided number is subnormal, otherwise returns the proved number */
+ public abstract readonly flushSubnormal: (n: number) => number;
+ /** @returns 1 * ULP: (number) */
+ public abstract readonly oneULP: (target: number, mode?: FlushMode) => number;
+ /** @returns a builder for converting numbers to Scalars */
+ public abstract readonly scalarBuilder: (n: number) => Scalar;
+
+ // Framework - Cases
+
+ /**
+ * @returns a Case for the param and the interval generator provided.
+ * The Case will use an interval comparator for matching results.
+ * @param param the param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ private makeScalarToIntervalCase(
+ param: number,
+ filter: IntervalFilter,
+ ...ops: ScalarToInterval[]
+ ): Case | undefined {
+ param = this.quantize(param);
+
+ const intervals = ops.map(o => o(param));
+ if (filter === 'finite' && intervals.some(i => !i.isFinite())) {
+ return undefined;
+ }
+ return { input: [this.scalarBuilder(param)], expected: anyOf(...intervals) };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param params array of inputs to try
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ public generateScalarToIntervalCases(
+ params: readonly number[],
+ filter: IntervalFilter,
+ ...ops: ScalarToInterval[]
+ ): Case[] {
+ return params.reduce((cases, e) => {
+ const c = this.makeScalarToIntervalCase(e, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the params and the interval generator provided.
+ * The Case will use an interval comparator for matching results.
+ * @param param0 the first param to pass in
+ * @param param1 the second param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ private makeScalarPairToIntervalCase(
+ param0: number,
+ param1: number,
+ filter: IntervalFilter,
+ ...ops: ScalarPairToInterval[]
+ ): Case | undefined {
+ param0 = this.quantize(param0);
+ param1 = this.quantize(param1);
+
+ const intervals = ops.map(o => o(param0, param1));
+ if (filter === 'finite' && intervals.some(i => !i.isFinite())) {
+ return undefined;
+ }
+ return {
+ input: [this.scalarBuilder(param0), this.scalarBuilder(param1)],
+ expected: anyOf(...intervals),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param param0s array of inputs to try for the first input
+ * @param param1s array of inputs to try for the second input
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ public generateScalarPairToIntervalCases(
+ param0s: readonly number[],
+ param1s: readonly number[],
+ filter: IntervalFilter,
+ ...ops: ScalarPairToInterval[]
+ ): Case[] {
+ return cartesianProduct(param0s, param1s).reduce((cases, e) => {
+ const c = this.makeScalarPairToIntervalCase(e[0], e[1], filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the params and the interval generator provided.
+ * The Case will use an interval comparator for matching results.
+ * @param param0 the first param to pass in
+ * @param param1 the second param to pass in
+ * @param param2 the third param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ public makeScalarTripleToIntervalCase(
+ param0: number,
+ param1: number,
+ param2: number,
+ filter: IntervalFilter,
+ ...ops: ScalarTripleToInterval[]
+ ): Case | undefined {
+ param0 = this.quantize(param0);
+ param1 = this.quantize(param1);
+ param2 = this.quantize(param2);
+
+ const intervals = ops.map(o => o(param0, param1, param2));
+ if (filter === 'finite' && intervals.some(i => !i.isFinite())) {
+ return undefined;
+ }
+ return {
+ input: [this.scalarBuilder(param0), this.scalarBuilder(param1), this.scalarBuilder(param2)],
+ expected: anyOf(...intervals),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param param0s array of inputs to try for the first input
+ * @param param1s array of inputs to try for the second input
+ * @param param2s array of inputs to try for the third input
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ public generateScalarTripleToIntervalCases(
+ param0s: readonly number[],
+ param1s: readonly number[],
+ param2s: readonly number[],
+ filter: IntervalFilter,
+ ...ops: ScalarTripleToInterval[]
+ ): Case[] {
+ return cartesianProduct(param0s, param1s, param2s).reduce((cases, e) => {
+ const c = this.makeScalarTripleToIntervalCase(e[0], e[1], e[2], filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the params and the interval generator provided.
+ * The Case will use an interval comparator for matching results.
+ * @param param the param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ private makeVectorToIntervalCase(
+ param: readonly number[],
+ filter: IntervalFilter,
+ ...ops: VectorToInterval[]
+ ): Case | undefined {
+ param = param.map(this.quantize);
+
+ const intervals = ops.map(o => o(param));
+ if (filter === 'finite' && intervals.some(i => !i.isFinite())) {
+ return undefined;
+ }
+ return {
+ input: [toVector(param, this.scalarBuilder)],
+ expected: anyOf(...intervals),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param params array of inputs to try
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ public generateVectorToIntervalCases(
+ params: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: VectorToInterval[]
+ ): Case[] {
+ return params.reduce((cases, e) => {
+ const c = this.makeVectorToIntervalCase(e, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the params and the interval generator provided.
+ * The Case will use an interval comparator for matching results.
+ * @param param0 the first param to pass in
+ * @param param1 the second param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ private makeVectorPairToIntervalCase(
+ param0: readonly number[],
+ param1: readonly number[],
+ filter: IntervalFilter,
+ ...ops: VectorPairToInterval[]
+ ): Case | undefined {
+ param0 = param0.map(this.quantize);
+ param1 = param1.map(this.quantize);
+
+ const intervals = ops.map(o => o(param0, param1));
+ if (filter === 'finite' && intervals.some(i => !i.isFinite())) {
+ return undefined;
+ }
+ return {
+ input: [toVector(param0, this.scalarBuilder), toVector(param1, this.scalarBuilder)],
+ expected: anyOf(...intervals),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param param0s array of inputs to try for the first input
+ * @param param1s array of inputs to try for the second input
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ public generateVectorPairToIntervalCases(
+ param0s: ROArrayArray<number>,
+ param1s: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: VectorPairToInterval[]
+ ): Case[] {
+ return cartesianProduct(param0s, param1s).reduce((cases, e) => {
+ const c = this.makeVectorPairToIntervalCase(e[0], e[1], filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the param and vector of intervals generator provided
+ * @param param the param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance
+ * intervals.
+ */
+ private makeVectorToVectorCase(
+ param: readonly number[],
+ filter: IntervalFilter,
+ ...ops: VectorToVector[]
+ ): Case | undefined {
+ param = param.map(this.quantize);
+
+ const vectors = ops.map(o => o(param));
+ if (filter === 'finite' && vectors.some(v => v.some(e => !e.isFinite()))) {
+ return undefined;
+ }
+ return {
+ input: [toVector(param, this.scalarBuilder)],
+ expected: anyOf(...vectors),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param params array of inputs to try
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance
+ * intervals.
+ */
+ public generateVectorToVectorCases(
+ params: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: VectorToVector[]
+ ): Case[] {
+ return params.reduce((cases, e) => {
+ const c = this.makeVectorToVectorCase(e, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the params and the interval vector generator provided.
+ * The Case will use an interval comparator for matching results.
+ * @param scalar the scalar param to pass in
+ * @param vector the vector param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance intervals
+ */
+ private makeScalarVectorToVectorCase(
+ scalar: number,
+ vector: readonly number[],
+ filter: IntervalFilter,
+ ...ops: ScalarVectorToVector[]
+ ): Case | undefined {
+ scalar = this.quantize(scalar);
+ vector = vector.map(this.quantize);
+
+ const results = ops.map(o => o(scalar, vector));
+ if (filter === 'finite' && results.some(r => r.some(e => !e.isFinite()))) {
+ return undefined;
+ }
+ return {
+ input: [this.scalarBuilder(scalar), toVector(vector, this.scalarBuilder)],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param scalars array of scalar inputs to try
+ * @param vectors array of vector inputs to try
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance intervals
+ */
+ public generateScalarVectorToVectorCases(
+ scalars: readonly number[],
+ vectors: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: ScalarVectorToVector[]
+ ): Case[] {
+ // Cannot use cartesianProduct here, due to heterogeneous types
+ const cases: Case[] = [];
+ scalars.forEach(scalar => {
+ vectors.forEach(vector => {
+ const c = this.makeScalarVectorToVectorCase(scalar, vector, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ });
+ });
+ return cases;
+ }
+
+ /**
+ * @returns a Case for the params and the interval vector generator provided.
+ * The Case will use an interval comparator for matching results.
+ * @param vector the vector param to pass in
+ * @param scalar the scalar param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance intervals
+ */
+ private makeVectorScalarToVectorCase(
+ vector: readonly number[],
+ scalar: number,
+ filter: IntervalFilter,
+ ...ops: VectorScalarToVector[]
+ ): Case | undefined {
+ vector = vector.map(this.quantize);
+ scalar = this.quantize(scalar);
+
+ const results = ops.map(o => o(vector, scalar));
+ if (filter === 'finite' && results.some(r => r.some(e => !e.isFinite()))) {
+ return undefined;
+ }
+ return {
+ input: [toVector(vector, this.scalarBuilder), this.scalarBuilder(scalar)],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param vectors array of vector inputs to try
+ * @param scalars array of scalar inputs to try
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance intervals
+ */
+ public generateVectorScalarToVectorCases(
+ vectors: ROArrayArray<number>,
+ scalars: readonly number[],
+ filter: IntervalFilter,
+ ...ops: VectorScalarToVector[]
+ ): Case[] {
+ // Cannot use cartesianProduct here, due to heterogeneous types
+ const cases: Case[] = [];
+ vectors.forEach(vector => {
+ scalars.forEach(scalar => {
+ const c = this.makeVectorScalarToVectorCase(vector, scalar, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ });
+ });
+ return cases;
+ }
+
+ /**
+ * @returns a Case for the param and vector of intervals generator provided
+ * @param param0 the first param to pass in
+ * @param param1 the second param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance
+ * intervals.
+ */
+ private makeVectorPairToVectorCase(
+ param0: readonly number[],
+ param1: readonly number[],
+ filter: IntervalFilter,
+ ...ops: VectorPairToVector[]
+ ): Case | undefined {
+ param0 = param0.map(this.quantize);
+ param1 = param1.map(this.quantize);
+ const vectors = ops.map(o => o(param0, param1));
+ if (filter === 'finite' && vectors.some(v => v.some(e => !e.isFinite()))) {
+ return undefined;
+ }
+ return {
+ input: [toVector(param0, this.scalarBuilder), toVector(param1, this.scalarBuilder)],
+ expected: anyOf(...vectors),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param param0s array of inputs to try for the first input
+ * @param param1s array of inputs to try for the second input
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance
+ * intervals.
+ */
+ public generateVectorPairToVectorCases(
+ param0s: ROArrayArray<number>,
+ param1s: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: VectorPairToVector[]
+ ): Case[] {
+ return cartesianProduct(param0s, param1s).reduce((cases, e) => {
+ const c = this.makeVectorPairToVectorCase(e[0], e[1], filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the params and the component-wise interval generator provided.
+ * The Case will use an interval comparator for matching results.
+ * @param param0 the first vector param to pass in
+ * @param param1 the second vector param to pass in
+ * @param param2 the scalar param to pass in
+ * @param filter what interval filtering to apply
+ * @param componentWiseOps callbacks that implement generating a component-wise acceptance interval,
+ * one component result at a time.
+ */
+ private makeVectorPairScalarToVectorComponentWiseCase(
+ param0: readonly number[],
+ param1: readonly number[],
+ param2: number,
+ filter: IntervalFilter,
+ ...componentWiseOps: ScalarTripleToInterval[]
+ ): Case | undefined {
+ // Width of input vector
+ const width = param0.length;
+ assert(2 <= width && width <= 4, 'input vector width must between 2 and 4');
+ assert(param1.length === width, 'two input vectors must have the same width');
+ param0 = param0.map(this.quantize);
+ param1 = param1.map(this.quantize);
+ param2 = this.quantize(param2);
+
+ // Call the component-wise interval generator and build the expectation FPVector
+ const results = componentWiseOps.map(o => {
+ return param0.map((el0, index) => o(el0, param1[index], param2)) as FPVector;
+ });
+ if (filter === 'finite' && results.some(r => r.some(e => !e.isFinite()))) {
+ return undefined;
+ }
+ return {
+ input: [
+ toVector(param0, this.scalarBuilder),
+ toVector(param1, this.scalarBuilder),
+ this.scalarBuilder(param2),
+ ],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param param0s array of first vector inputs to try
+ * @param param1s array of second vector inputs to try
+ * @param param2s array of scalar inputs to try
+ * @param filter what interval filtering to apply
+ * @param componentWiseOpscallbacks that implement generating a component-wise acceptance interval
+ */
+ public generateVectorPairScalarToVectorComponentWiseCase(
+ param0s: ROArrayArray<number>,
+ param1s: ROArrayArray<number>,
+ param2s: readonly number[],
+ filter: IntervalFilter,
+ ...componentWiseOps: ScalarTripleToInterval[]
+ ): Case[] {
+ // Cannot use cartesianProduct here, due to heterogeneous types
+ const cases: Case[] = [];
+ param0s.forEach(param0 => {
+ param1s.forEach(param1 => {
+ param2s.forEach(param2 => {
+ const c = this.makeVectorPairScalarToVectorComponentWiseCase(
+ param0,
+ param1,
+ param2,
+ filter,
+ ...componentWiseOps
+ );
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ });
+ });
+ });
+ return cases;
+ }
+
+ /**
+ * @returns a Case for the param and an array of interval generators provided
+ * @param param the param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ private makeMatrixToScalarCase(
+ param: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: MatrixToScalar[]
+ ): Case | undefined {
+ param = map2DArray(param, this.quantize);
+
+ const results = ops.map(o => o(param));
+ if (filter === 'finite' && results.some(e => !e.isFinite())) {
+ return undefined;
+ }
+
+ return {
+ input: [toMatrix(param, this.scalarBuilder)],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param params array of inputs to try
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ public generateMatrixToScalarCases(
+ params: ROArrayArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: MatrixToScalar[]
+ ): Case[] {
+ return params.reduce((cases, e) => {
+ const c = this.makeMatrixToScalarCase(e, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the param and an array of interval generators provided
+ * @param param the param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a matrix of acceptance
+ * intervals
+ */
+ private makeMatrixToMatrixCase(
+ param: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: MatrixToMatrix[]
+ ): Case | undefined {
+ param = map2DArray(param, this.quantize);
+
+ const results = ops.map(o => o(param));
+ if (filter === 'finite' && results.some(m => m.some(c => c.some(r => !r.isFinite())))) {
+ return undefined;
+ }
+
+ return {
+ input: [toMatrix(param, this.scalarBuilder)],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param params array of inputs to try
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a matrix of acceptance
+ * intervals
+ */
+ public generateMatrixToMatrixCases(
+ params: ROArrayArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: MatrixToMatrix[]
+ ): Case[] {
+ return params.reduce((cases, e) => {
+ const c = this.makeMatrixToMatrixCase(e, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the params and matrix of intervals generator provided
+ * @param param0 the first param to pass in
+ * @param param1 the second param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a matrix of acceptance
+ * intervals
+ */
+ private makeMatrixPairToMatrixCase(
+ param0: ROArrayArray<number>,
+ param1: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: MatrixPairToMatrix[]
+ ): Case | undefined {
+ param0 = map2DArray(param0, this.quantize);
+ param1 = map2DArray(param1, this.quantize);
+
+ const results = ops.map(o => o(param0, param1));
+ if (filter === 'finite' && results.some(m => m.some(c => c.some(r => !r.isFinite())))) {
+ return undefined;
+ }
+ return {
+ input: [toMatrix(param0, this.scalarBuilder), toMatrix(param1, this.scalarBuilder)],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param param0s array of inputs to try for the first input
+ * @param param1s array of inputs to try for the second input
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a matrix of acceptance
+ * intervals
+ */
+ public generateMatrixPairToMatrixCases(
+ param0s: ROArrayArrayArray<number>,
+ param1s: ROArrayArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: MatrixPairToMatrix[]
+ ): Case[] {
+ return cartesianProduct(param0s, param1s).reduce((cases, e) => {
+ const c = this.makeMatrixPairToMatrixCase(e[0], e[1], filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ /**
+ * @returns a Case for the params and matrix of intervals generator provided
+ * @param mat the matrix param to pass in
+ * @param scalar the scalar to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a matrix of acceptance
+ * intervals
+ */
+ private makeMatrixScalarToMatrixCase(
+ mat: ROArrayArray<number>,
+ scalar: number,
+ filter: IntervalFilter,
+ ...ops: MatrixScalarToMatrix[]
+ ): Case | undefined {
+ mat = map2DArray(mat, this.quantize);
+ scalar = this.quantize(scalar);
+
+ const results = ops.map(o => o(mat, scalar));
+ if (filter === 'finite' && results.some(m => m.some(c => c.some(r => !r.isFinite())))) {
+ return undefined;
+ }
+ return {
+ input: [toMatrix(mat, this.scalarBuilder), this.scalarBuilder(scalar)],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param mats array of inputs to try for the matrix input
+ * @param scalars array of inputs to try for the scalar input
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a matrix of acceptance
+ * intervals
+ */
+ public generateMatrixScalarToMatrixCases(
+ mats: ROArrayArrayArray<number>,
+ scalars: readonly number[],
+ filter: IntervalFilter,
+ ...ops: MatrixScalarToMatrix[]
+ ): Case[] {
+ // Cannot use cartesianProduct here, due to heterogeneous types
+ const cases: Case[] = [];
+ mats.forEach(mat => {
+ scalars.forEach(scalar => {
+ const c = this.makeMatrixScalarToMatrixCase(mat, scalar, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ });
+ });
+ return cases;
+ }
+
+ /**
+ * @returns a Case for the params and matrix of intervals generator provided
+ * @param scalar the scalar to pass in
+ * @param mat the matrix param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a matrix of acceptance
+ * intervals
+ */
+ private makeScalarMatrixToMatrixCase(
+ scalar: number,
+ mat: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: ScalarMatrixToMatrix[]
+ ): Case | undefined {
+ scalar = this.quantize(scalar);
+ mat = map2DArray(mat, this.quantize);
+
+ const results = ops.map(o => o(scalar, mat));
+ if (filter === 'finite' && results.some(m => m.some(c => c.some(r => !r.isFinite())))) {
+ return undefined;
+ }
+ return {
+ input: [this.scalarBuilder(scalar), toMatrix(mat, this.scalarBuilder)],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param scalars array of inputs to try for the scalar input
+ * @param mats array of inputs to try for the matrix input
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a matrix of acceptance
+ * intervals
+ */
+ public generateScalarMatrixToMatrixCases(
+ scalars: readonly number[],
+ mats: ROArrayArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: ScalarMatrixToMatrix[]
+ ): Case[] {
+ // Cannot use cartesianProduct here, due to heterogeneous types
+ const cases: Case[] = [];
+ mats.forEach(mat => {
+ scalars.forEach(scalar => {
+ const c = this.makeScalarMatrixToMatrixCase(scalar, mat, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ });
+ });
+ return cases;
+ }
+
+ /**
+ * @returns a Case for the params and the vector of intervals generator provided
+ * @param mat the matrix param to pass in
+ * @param vec the vector to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance
+ * intervals
+ */
+ private makeMatrixVectorToVectorCase(
+ mat: ROArrayArray<number>,
+ vec: readonly number[],
+ filter: IntervalFilter,
+ ...ops: MatrixVectorToVector[]
+ ): Case | undefined {
+ mat = map2DArray(mat, this.quantize);
+ vec = vec.map(this.quantize);
+
+ const results = ops.map(o => o(mat, vec));
+ if (filter === 'finite' && results.some(v => v.some(e => !e.isFinite()))) {
+ return undefined;
+ }
+ return {
+ input: [toMatrix(mat, this.scalarBuilder), toVector(vec, this.scalarBuilder)],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param mats array of inputs to try for the matrix input
+ * @param vecs array of inputs to try for the vector input
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance
+ * intervals
+ */
+ public generateMatrixVectorToVectorCases(
+ mats: ROArrayArrayArray<number>,
+ vecs: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: MatrixVectorToVector[]
+ ): Case[] {
+ // Cannot use cartesianProduct here, due to heterogeneous types
+ const cases: Case[] = [];
+ mats.forEach(mat => {
+ vecs.forEach(vec => {
+ const c = this.makeMatrixVectorToVectorCase(mat, vec, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ });
+ });
+ return cases;
+ }
+
+ /**
+ * @returns a Case for the params and the vector of intervals generator provided
+ * @param vec the vector to pass in
+ * @param mat the matrix param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance
+ * intervals
+ */
+ private makeVectorMatrixToVectorCase(
+ vec: readonly number[],
+ mat: ROArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: VectorMatrixToVector[]
+ ): Case | undefined {
+ vec = vec.map(this.quantize);
+ mat = map2DArray(mat, this.quantize);
+
+ const results = ops.map(o => o(vec, mat));
+ if (filter === 'finite' && results.some(v => v.some(e => !e.isFinite()))) {
+ return undefined;
+ }
+ return {
+ input: [toVector(vec, this.scalarBuilder), toMatrix(mat, this.scalarBuilder)],
+ expected: anyOf(...results),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param vecs array of inputs to try for the vector input
+ * @param mats array of inputs to try for the matrix input
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating a vector of acceptance
+ * intervals
+ */
+ public generateVectorMatrixToVectorCases(
+ vecs: ROArrayArray<number>,
+ mats: ROArrayArrayArray<number>,
+ filter: IntervalFilter,
+ ...ops: VectorMatrixToVector[]
+ ): Case[] {
+ // Cannot use cartesianProduct here, due to heterogeneous types
+ const cases: Case[] = [];
+ vecs.forEach(vec => {
+ mats.forEach(mat => {
+ const c = this.makeVectorMatrixToVectorCase(vec, mat, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ });
+ });
+ return cases;
+ }
+
+ // Framework - Intervals
+
+ /**
+ * Converts a point to an acceptance interval, using a specific function
+ *
+ * This handles correctly rounding and flushing inputs as needed.
+ * Duplicate inputs are pruned before invoking op.impl.
+ * op.extrema is invoked before this point in the call stack.
+ * op.domain is tested before this point in the call stack.
+ *
+ * @param n value to flush & round then invoke op.impl on
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ private roundAndFlushScalarToInterval(n: number, op: ScalarToIntervalOp) {
+ assert(!Number.isNaN(n), `flush not defined for NaN`);
+ const values = this.correctlyRounded(n);
+ const inputs = this.addFlushedIfNeeded(values);
+ const results = new Set<FPInterval>(inputs.map(op.impl));
+ return this.spanIntervals(...results);
+ }
+
+ /**
+ * Converts a pair to an acceptance interval, using a specific function
+ *
+ * This handles correctly rounding and flushing inputs as needed.
+ * Duplicate inputs are pruned before invoking op.impl.
+ * All unique combinations of x & y are run.
+ * op.extrema is invoked before this point in the call stack.
+ * op.domain is tested before this point in the call stack.
+ *
+ * @param x first param to flush & round then invoke op.impl on
+ * @param y second param to flush & round then invoke op.impl on
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ private roundAndFlushScalarPairToInterval(
+ x: number,
+ y: number,
+ op: ScalarPairToIntervalOp
+ ): FPInterval {
+ assert(!Number.isNaN(x), `flush not defined for NaN`);
+ assert(!Number.isNaN(y), `flush not defined for NaN`);
+ const x_values = this.correctlyRounded(x);
+ const y_values = this.correctlyRounded(y);
+ const x_inputs = this.addFlushedIfNeeded(x_values);
+ const y_inputs = this.addFlushedIfNeeded(y_values);
+ const intervals = new Set<FPInterval>();
+ x_inputs.forEach(inner_x => {
+ y_inputs.forEach(inner_y => {
+ intervals.add(op.impl(inner_x, inner_y));
+ });
+ });
+ return this.spanIntervals(...intervals);
+ }
+
+ /**
+ * Converts a triplet to an acceptance interval, using a specific function
+ *
+ * This handles correctly rounding and flushing inputs as needed.
+ * Duplicate inputs are pruned before invoking op.impl.
+ * All unique combinations of x, y & z are run.
+ *
+ * @param x first param to flush & round then invoke op.impl on
+ * @param y second param to flush & round then invoke op.impl on
+ * @param z third param to flush & round then invoke op.impl on
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ private roundAndFlushScalarTripleToInterval(
+ x: number,
+ y: number,
+ z: number,
+ op: ScalarTripleToIntervalOp
+ ): FPInterval {
+ assert(!Number.isNaN(x), `flush not defined for NaN`);
+ assert(!Number.isNaN(y), `flush not defined for NaN`);
+ assert(!Number.isNaN(z), `flush not defined for NaN`);
+ const x_values = this.correctlyRounded(x);
+ const y_values = this.correctlyRounded(y);
+ const z_values = this.correctlyRounded(z);
+ const x_inputs = this.addFlushedIfNeeded(x_values);
+ const y_inputs = this.addFlushedIfNeeded(y_values);
+ const z_inputs = this.addFlushedIfNeeded(z_values);
+ const intervals = new Set<FPInterval>();
+ // prettier-ignore
+ x_inputs.forEach(inner_x => {
+ y_inputs.forEach(inner_y => {
+ z_inputs.forEach(inner_z => {
+ intervals.add(op.impl(inner_x, inner_y, inner_z));
+ });
+ });
+ });
+
+ return this.spanIntervals(...intervals);
+ }
+
+ /**
+ * Converts a vector to an acceptance interval using a specific function
+ *
+ * This handles correctly rounding and flushing inputs as needed.
+ * Duplicate inputs are pruned before invoking op.impl.
+ *
+ * @param x param to flush & round then invoke op.impl on
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ private roundAndFlushVectorToInterval(x: readonly number[], op: VectorToIntervalOp): FPInterval {
+ assert(
+ x.every(e => !Number.isNaN(e)),
+ `flush not defined for NaN`
+ );
+
+ const x_rounded: ROArrayArray<number> = x.map(this.correctlyRounded);
+ const x_flushed: ROArrayArray<number> = x_rounded.map(this.addFlushedIfNeeded.bind(this));
+ const x_inputs = cartesianProduct<number>(...x_flushed);
+
+ const intervals = new Set<FPInterval>();
+ x_inputs.forEach(inner_x => {
+ intervals.add(op.impl(inner_x));
+ });
+ return this.spanIntervals(...intervals);
+ }
+
+ /**
+ * Converts a pair of vectors to an acceptance interval using a specific
+ * function
+ *
+ * This handles correctly rounding and flushing inputs as needed.
+ * Duplicate inputs are pruned before invoking op.impl.
+ * All unique combinations of x & y are run.
+ *
+ * @param x first param to flush & round then invoke op.impl on
+ * @param y second param to flush & round then invoke op.impl on
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ private roundAndFlushVectorPairToInterval(
+ x: readonly number[],
+ y: readonly number[],
+ op: VectorPairToIntervalOp
+ ): FPInterval {
+ assert(
+ x.every(e => !Number.isNaN(e)),
+ `flush not defined for NaN`
+ );
+ assert(
+ y.every(e => !Number.isNaN(e)),
+ `flush not defined for NaN`
+ );
+
+ const x_rounded: ROArrayArray<number> = x.map(this.correctlyRounded);
+ const y_rounded: ROArrayArray<number> = y.map(this.correctlyRounded);
+ const x_flushed: ROArrayArray<number> = x_rounded.map(this.addFlushedIfNeeded.bind(this));
+ const y_flushed: ROArrayArray<number> = y_rounded.map(this.addFlushedIfNeeded.bind(this));
+ const x_inputs = cartesianProduct<number>(...x_flushed);
+ const y_inputs = cartesianProduct<number>(...y_flushed);
+
+ const intervals = new Set<FPInterval>();
+ x_inputs.forEach(inner_x => {
+ y_inputs.forEach(inner_y => {
+ intervals.add(op.impl(inner_x, inner_y));
+ });
+ });
+ return this.spanIntervals(...intervals);
+ }
+
+ /**
+ * Converts a vector to a vector of acceptance intervals using a specific
+ * function
+ *
+ * This handles correctly rounding and flushing inputs as needed.
+ * Duplicate inputs are pruned before invoking op.impl.
+ *
+ * @param x param to flush & round then invoke op.impl on
+ * @param op operation defining the function being run
+ * @returns a vector of spans for each outputs of op.impl
+ */
+ private roundAndFlushVectorToVector(x: readonly number[], op: VectorToVectorOp): FPVector {
+ assert(
+ x.every(e => !Number.isNaN(e)),
+ `flush not defined for NaN`
+ );
+
+ const x_rounded: ROArrayArray<number> = x.map(this.correctlyRounded);
+ const x_flushed: ROArrayArray<number> = x_rounded.map(this.addFlushedIfNeeded.bind(this));
+ const x_inputs = cartesianProduct<number>(...x_flushed);
+
+ const interval_vectors = new Set<FPVector>();
+ x_inputs.forEach(inner_x => {
+ interval_vectors.add(op.impl(inner_x));
+ });
+
+ return this.spanVectors(...interval_vectors);
+ }
+
+ /**
+ * Converts a pair of vectors to a vector of acceptance intervals using a
+ * specific function
+ *
+ * This handles correctly rounding and flushing inputs as needed.
+ * Duplicate inputs are pruned before invoking op.impl.
+ *
+ * @param x first param to flush & round then invoke op.impl on
+ * @param y second param to flush & round then invoke op.impl on
+ * @param op operation defining the function being run
+ * @returns a vector of spans for each output of op.impl
+ */
+ private roundAndFlushVectorPairToVector(
+ x: readonly number[],
+ y: readonly number[],
+ op: VectorPairToVectorOp
+ ): FPVector {
+ assert(
+ x.every(e => !Number.isNaN(e)),
+ `flush not defined for NaN`
+ );
+ assert(
+ y.every(e => !Number.isNaN(e)),
+ `flush not defined for NaN`
+ );
+
+ const x_rounded: ROArrayArray<number> = x.map(this.correctlyRounded);
+ const y_rounded: ROArrayArray<number> = y.map(this.correctlyRounded);
+ const x_flushed: ROArrayArray<number> = x_rounded.map(this.addFlushedIfNeeded.bind(this));
+ const y_flushed: ROArrayArray<number> = y_rounded.map(this.addFlushedIfNeeded.bind(this));
+ const x_inputs = cartesianProduct<number>(...x_flushed);
+ const y_inputs = cartesianProduct<number>(...y_flushed);
+
+ const interval_vectors = new Set<FPVector>();
+ x_inputs.forEach(inner_x => {
+ y_inputs.forEach(inner_y => {
+ interval_vectors.add(op.impl(inner_x, inner_y));
+ });
+ });
+
+ return this.spanVectors(...interval_vectors);
+ }
+
+ /**
+ * Converts a matrix to a matrix of acceptance intervals using a specific
+ * function
+ *
+ * This handles correctly rounding and flushing inputs as needed.
+ * Duplicate inputs are pruned before invoking op.impl.
+ *
+ * @param m param to flush & round then invoke op.impl on
+ * @param op operation defining the function being run
+ * @returns a matrix of spans for each outputs of op.impl
+ */
+ private roundAndFlushMatrixToMatrix(m: Array2D<number>, op: MatrixToMatrixOp): FPMatrix {
+ const num_cols = m.length;
+ const num_rows = m[0].length;
+ assert(
+ m.every(c => c.every(r => !Number.isNaN(r))),
+ `flush not defined for NaN`
+ );
+
+ const m_flat = flatten2DArray(m);
+ const m_rounded: ROArrayArray<number> = m_flat.map(this.correctlyRounded);
+ const m_flushed: ROArrayArray<number> = m_rounded.map(this.addFlushedIfNeeded.bind(this));
+ const m_options: ROArrayArray<number> = cartesianProduct<number>(...m_flushed);
+ const m_inputs: ROArrayArrayArray<number> = m_options.map(e =>
+ unflatten2DArray(e, num_cols, num_rows)
+ );
+
+ const interval_matrices = new Set<FPMatrix>();
+ m_inputs.forEach(inner_m => {
+ interval_matrices.add(op.impl(inner_m));
+ });
+
+ return this.spanMatrices(...interval_matrices);
+ }
+
+ /**
+ * Calculate the acceptance interval for a unary function over an interval
+ *
+ * If the interval is actually a point, this just decays to
+ * roundAndFlushScalarToInterval.
+ *
+ * The provided domain interval may be adjusted if the operation defines an
+ * extrema function.
+ *
+ * @param x input domain interval
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ protected runScalarToIntervalOp(x: FPInterval, op: ScalarToIntervalOp): FPInterval {
+ if (!x.isFinite()) {
+ return this.constants().unboundedInterval;
+ }
+
+ if (op.extrema !== undefined) {
+ x = op.extrema(x);
+ }
+
+ const result = this.spanIntervals(
+ ...x.bounds().map(b => this.roundAndFlushScalarToInterval(b, op))
+ );
+ return result.isFinite() ? result : this.constants().unboundedInterval;
+ }
+
+ /**
+ * Calculate the acceptance interval for a binary function over an interval
+ *
+ * The provided domain intervals may be adjusted if the operation defines an
+ * extrema function.
+ *
+ * @param x first input domain interval
+ * @param y second input domain interval
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ protected runScalarPairToIntervalOp(
+ x: FPInterval,
+ y: FPInterval,
+ op: ScalarPairToIntervalOp
+ ): FPInterval {
+ if (!x.isFinite() || !y.isFinite()) {
+ return this.constants().unboundedInterval;
+ }
+
+ if (op.extrema !== undefined) {
+ [x, y] = op.extrema(x, y);
+ }
+
+ const outputs = new Set<FPInterval>();
+ x.bounds().forEach(inner_x => {
+ y.bounds().forEach(inner_y => {
+ outputs.add(this.roundAndFlushScalarPairToInterval(inner_x, inner_y, op));
+ });
+ });
+
+ const result = this.spanIntervals(...outputs);
+ return result.isFinite() ? result : this.constants().unboundedInterval;
+ }
+
+ /**
+ * Calculate the acceptance interval for a ternary function over an interval
+ *
+ * @param x first input domain interval
+ * @param y second input domain interval
+ * @param z third input domain interval
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ protected runScalarTripleToIntervalOp(
+ x: FPInterval,
+ y: FPInterval,
+ z: FPInterval,
+ op: ScalarTripleToIntervalOp
+ ): FPInterval {
+ if (!x.isFinite() || !y.isFinite() || !z.isFinite()) {
+ return this.constants().unboundedInterval;
+ }
+
+ const outputs = new Set<FPInterval>();
+ x.bounds().forEach(inner_x => {
+ y.bounds().forEach(inner_y => {
+ z.bounds().forEach(inner_z => {
+ outputs.add(this.roundAndFlushScalarTripleToInterval(inner_x, inner_y, inner_z, op));
+ });
+ });
+ });
+
+ const result = this.spanIntervals(...outputs);
+ return result.isFinite() ? result : this.constants().unboundedInterval;
+ }
+
+ /**
+ * Calculate the acceptance interval for a vector function over given
+ * intervals
+ *
+ * @param x input domain intervals vector
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ protected runVectorToIntervalOp(x: FPVector, op: VectorToIntervalOp): FPInterval {
+ if (x.some(e => !e.isFinite())) {
+ return this.constants().unboundedInterval;
+ }
+
+ const x_values = cartesianProduct<number>(...x.map(e => e.bounds()));
+
+ const outputs = new Set<FPInterval>();
+ x_values.forEach(inner_x => {
+ outputs.add(this.roundAndFlushVectorToInterval(inner_x, op));
+ });
+
+ const result = this.spanIntervals(...outputs);
+ return result.isFinite() ? result : this.constants().unboundedInterval;
+ }
+
+ /**
+ * Calculate the acceptance interval for a vector pair function over given
+ * intervals
+ *
+ * @param x first input domain intervals vector
+ * @param y second input domain intervals vector
+ * @param op operation defining the function being run
+ * @returns a span over all the outputs of op.impl
+ */
+ protected runVectorPairToIntervalOp(
+ x: FPVector,
+ y: FPVector,
+ op: VectorPairToIntervalOp
+ ): FPInterval {
+ if (x.some(e => !e.isFinite()) || y.some(e => !e.isFinite())) {
+ return this.constants().unboundedInterval;
+ }
+
+ const x_values = cartesianProduct<number>(...x.map(e => e.bounds()));
+ const y_values = cartesianProduct<number>(...y.map(e => e.bounds()));
+
+ const outputs = new Set<FPInterval>();
+ x_values.forEach(inner_x => {
+ y_values.forEach(inner_y => {
+ outputs.add(this.roundAndFlushVectorPairToInterval(inner_x, inner_y, op));
+ });
+ });
+
+ const result = this.spanIntervals(...outputs);
+ return result.isFinite() ? result : this.constants().unboundedInterval;
+ }
+
+ /**
+ * Calculate the vector of acceptance intervals for a pair of vector function
+ * over given intervals
+ *
+ * @param x input domain intervals vector
+ * @param op operation defining the function being run
+ * @returns a vector of spans over all the outputs of op.impl
+ */
+ protected runVectorToVectorOp(x: FPVector, op: VectorToVectorOp): FPVector {
+ if (x.some(e => !e.isFinite())) {
+ return this.constants().unboundedVector[x.length];
+ }
+
+ const x_values = cartesianProduct<number>(...x.map(e => e.bounds()));
+
+ const outputs = new Set<FPVector>();
+ x_values.forEach(inner_x => {
+ outputs.add(this.roundAndFlushVectorToVector(inner_x, op));
+ });
+
+ const result = this.spanVectors(...outputs);
+ return result.every(e => e.isFinite())
+ ? result
+ : this.constants().unboundedVector[result.length];
+ }
+
+ /**
+ * Calculate the vector of acceptance intervals by running a scalar operation
+ * component-wise over a vector.
+ *
+ * This is used for situations where a component-wise operation, like vector
+ * negation, is needed as part of an inherited accuracy, but the top-level
+ * operation test don't require an explicit vector definition of the function,
+ * due to the generated 'vectorize' tests being sufficient.
+ *
+ * @param x input domain intervals vector
+ * @param op scalar operation to be run component-wise
+ * @returns a vector of intervals with the outputs of op.impl
+ */
+ protected runScalarToIntervalOpComponentWise(x: FPVector, op: ScalarToIntervalOp): FPVector {
+ return this.toVector(x.map(e => this.runScalarToIntervalOp(e, op)));
+ }
+
+ /**
+ * Calculate the vector of acceptance intervals for a vector function over
+ * given intervals
+ *
+ * @param x first input domain intervals vector
+ * @param y second input domain intervals vector
+ * @param op operation defining the function being run
+ * @returns a vector of spans over all the outputs of op.impl
+ */
+ protected runVectorPairToVectorOp(x: FPVector, y: FPVector, op: VectorPairToVectorOp): FPVector {
+ if (x.some(e => !e.isFinite()) || y.some(e => !e.isFinite())) {
+ return this.constants().unboundedVector[x.length];
+ }
+
+ const x_values = cartesianProduct<number>(...x.map(e => e.bounds()));
+ const y_values = cartesianProduct<number>(...y.map(e => e.bounds()));
+
+ const outputs = new Set<FPVector>();
+ x_values.forEach(inner_x => {
+ y_values.forEach(inner_y => {
+ outputs.add(this.roundAndFlushVectorPairToVector(inner_x, inner_y, op));
+ });
+ });
+
+ const result = this.spanVectors(...outputs);
+ return result.every(e => e.isFinite())
+ ? result
+ : this.constants().unboundedVector[result.length];
+ }
+
+ /**
+ * Calculate the vector of acceptance intervals by running a scalar operation
+ * component-wise over a pair of vectors.
+ *
+ * This is used for situations where a component-wise operation, like vector
+ * subtraction, is needed as part of an inherited accuracy, but the top-level
+ * operation test don't require an explicit vector definition of the function,
+ * due to the generated 'vectorize' tests being sufficient.
+ *
+ * @param x first input domain intervals vector
+ * @param y second input domain intervals vector
+ * @param op scalar operation to be run component-wise
+ * @returns a vector of intervals with the outputs of op.impl
+ */
+ protected runScalarPairToIntervalOpVectorComponentWise(
+ x: FPVector,
+ y: FPVector,
+ op: ScalarPairToIntervalOp
+ ): FPVector {
+ assert(
+ x.length === y.length,
+ `runScalarPairToIntervalOpVectorComponentWise requires vectors of the same dimensions`
+ );
+
+ return this.toVector(
+ x.map((i, idx) => {
+ return this.runScalarPairToIntervalOp(i, y[idx], op);
+ })
+ );
+ }
+
+ /**
+ * Calculate the matrix of acceptance intervals for a pair of matrix function over
+ * given intervals
+ *
+ * @param m input domain intervals matrix
+ * @param op operation defining the function being run
+ * @returns a matrix of spans over all the outputs of op.impl
+ */
+ protected runMatrixToMatrixOp(m: FPMatrix, op: MatrixToMatrixOp): FPMatrix {
+ const num_cols = m.length;
+ const num_rows = m[0].length;
+ if (m.some(c => c.some(r => !r.isFinite()))) {
+ return this.constants().unboundedMatrix[num_cols][num_rows];
+ }
+
+ const m_flat: readonly FPInterval[] = flatten2DArray(m);
+ const m_values: ROArrayArray<number> = cartesianProduct<number>(...m_flat.map(e => e.bounds()));
+
+ const outputs = new Set<FPMatrix>();
+ m_values.forEach(inner_m => {
+ const unflat_m = unflatten2DArray(inner_m, num_cols, num_rows);
+ outputs.add(this.roundAndFlushMatrixToMatrix(unflat_m, op));
+ });
+
+ const result = this.spanMatrices(...outputs);
+ const result_cols = result.length;
+ const result_rows = result[0].length;
+
+ // FPMatrix has to be coerced to ROArrayArray<FPInterval> to use .every. This should
+ // always be safe, since FPMatrix are defined as fixed length array of
+ // arrays.
+ return (result as ROArrayArray<FPInterval>).every(c => c.every(r => r.isFinite()))
+ ? result
+ : this.constants().unboundedMatrix[result_cols][result_rows];
+ }
+
+ /**
+ * Calculate the Matrix of acceptance intervals by running a scalar operation
+ * component-wise over a pair of matrices.
+ *
+ * An example of this is performing matrix addition.
+ *
+ * @param x first input domain intervals matrix
+ * @param y second input domain intervals matrix
+ * @param op scalar operation to be run component-wise
+ * @returns a matrix of intervals with the outputs of op.impl
+ */
+ protected runScalarPairToIntervalOpMatrixComponentWise(
+ x: FPMatrix,
+ y: FPMatrix,
+ op: ScalarPairToIntervalOp
+ ): FPMatrix {
+ assert(
+ x.length === y.length && x[0].length === y[0].length,
+ `runScalarPairToIntervalOpMatrixComponentWise requires matrices of the same dimensions`
+ );
+
+ const cols = x.length;
+ const rows = x[0].length;
+ const flat_x = flatten2DArray(x);
+ const flat_y = flatten2DArray(y);
+
+ return this.toMatrix(
+ unflatten2DArray(
+ flat_x.map((i, idx) => {
+ return this.runScalarPairToIntervalOp(i, flat_y[idx], op);
+ }),
+ cols,
+ rows
+ )
+ );
+ }
+
+ // API - Fundamental Error Intervals
+
+ /** @returns a ScalarToIntervalOp for [n - error_range, n + error_range] */
+ private AbsoluteErrorIntervalOp(error_range: number): ScalarToIntervalOp {
+ const op: ScalarToIntervalOp = {
+ impl: (_: number) => {
+ return this.constants().unboundedInterval;
+ },
+ };
+
+ assert(
+ error_range >= 0,
+ `absoluteErrorInterval must have non-negative error range, get ${error_range}`
+ );
+
+ if (this.isFinite(error_range)) {
+ op.impl = (n: number) => {
+ assert(!Number.isNaN(n), `absolute error not defined for NaN`);
+ // Return anyInterval if given center n is infinity.
+ if (!this.isFinite(n)) {
+ return this.constants().unboundedInterval;
+ }
+ return this.toInterval([n - error_range, n + error_range]);
+ };
+ }
+
+ return op;
+ }
+
+ protected absoluteErrorIntervalImpl(n: number, error_range: number): FPInterval {
+ error_range = Math.abs(error_range);
+ return this.runScalarToIntervalOp(
+ this.toInterval(n),
+ this.AbsoluteErrorIntervalOp(error_range)
+ );
+ }
+
+ /** @returns an interval of the absolute error around the point */
+ public abstract readonly absoluteErrorInterval: (n: number, error_range: number) => FPInterval;
+
+ /**
+ * Defines a ScalarToIntervalOp for an interval of the correctly rounded values
+ * around the point
+ */
+ private readonly CorrectlyRoundedIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number) => {
+ assert(!Number.isNaN(n), `absolute not defined for NaN`);
+ return this.toInterval(n);
+ },
+ };
+
+ protected correctlyRoundedIntervalImpl(n: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.CorrectlyRoundedIntervalOp);
+ }
+
+ /** @returns an interval of the correctly rounded values around the point */
+ public abstract readonly correctlyRoundedInterval: (n: number | FPInterval) => FPInterval;
+
+ protected correctlyRoundedMatrixImpl(m: Array2D<number>): FPMatrix {
+ return this.toMatrix(map2DArray(m, this.correctlyRoundedInterval));
+ }
+
+ /** @returns a matrix of correctly rounded intervals for the provided matrix */
+ public abstract readonly correctlyRoundedMatrix: (m: Array2D<number>) => FPMatrix;
+
+ /** @returns a ScalarToIntervalOp for [n - numULP * ULP(n), n + numULP * ULP(n)] */
+ private ULPIntervalOp(numULP: number): ScalarToIntervalOp {
+ const op: ScalarToIntervalOp = {
+ impl: (_: number) => {
+ return this.constants().unboundedInterval;
+ },
+ };
+
+ if (this.isFinite(numULP)) {
+ op.impl = (n: number) => {
+ assert(!Number.isNaN(n), `ULP error not defined for NaN`);
+
+ const ulp = this.oneULP(n);
+ const begin = n - numULP * ulp;
+ const end = n + numULP * ulp;
+
+ return this.toInterval([
+ Math.min(begin, this.flushSubnormal(begin)),
+ Math.max(end, this.flushSubnormal(end)),
+ ]);
+ };
+ }
+
+ return op;
+ }
+
+ protected ulpIntervalImpl(n: number, numULP: number): FPInterval {
+ numULP = Math.abs(numULP);
+ return this.runScalarToIntervalOp(this.toInterval(n), this.ULPIntervalOp(numULP));
+ }
+
+ /** @returns an interval of N * ULP around the point */
+ public abstract readonly ulpInterval: (n: number, numULP: number) => FPInterval;
+
+ // API - Acceptance Intervals
+
+ private readonly AbsIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number) => {
+ return this.correctlyRoundedInterval(Math.abs(n));
+ },
+ };
+
+ protected absIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.AbsIntervalOp);
+ }
+
+ /** Calculate an acceptance interval for abs(n) */
+ public abstract readonly absInterval: (n: number) => FPInterval;
+
+ // This op is implemented differently for f32 and f16.
+ private readonly AcosIntervalOp: ScalarToIntervalOp = {
+ impl: this.limitScalarToIntervalDomain(this.toInterval([-1.0, 1.0]), (n: number) => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ // acos(n) = atan2(sqrt(1.0 - n * n), n) or a polynomial approximation with absolute error
+ const y = this.sqrtInterval(this.subtractionInterval(1, this.multiplicationInterval(n, n)));
+ const approx_abs_error = this.kind === 'f32' ? 6.77e-5 : 3.91e-3;
+ return this.spanIntervals(
+ this.atan2Interval(y, n),
+ this.absoluteErrorInterval(Math.acos(n), approx_abs_error)
+ );
+ }),
+ };
+
+ protected acosIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.AcosIntervalOp);
+ }
+
+ /** Calculate an acceptance interval for acos(n) */
+ public abstract readonly acosInterval: (n: number) => FPInterval;
+
+ private readonly AcoshAlternativeIntervalOp: ScalarToIntervalOp = {
+ impl: (x: number): FPInterval => {
+ // acosh(x) = log(x + sqrt((x + 1.0f) * (x - 1.0)))
+ const inner_value = this.multiplicationInterval(
+ this.additionInterval(x, 1.0),
+ this.subtractionInterval(x, 1.0)
+ );
+ const sqrt_value = this.sqrtInterval(inner_value);
+ return this.logInterval(this.additionInterval(x, sqrt_value));
+ },
+ };
+
+ protected acoshAlternativeIntervalImpl(x: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(x), this.AcoshAlternativeIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of acosh(x) using log(x + sqrt((x + 1.0f) * (x - 1.0))) */
+ public abstract readonly acoshAlternativeInterval: (x: number | FPInterval) => FPInterval;
+
+ private readonly AcoshPrimaryIntervalOp: ScalarToIntervalOp = {
+ impl: (x: number): FPInterval => {
+ // acosh(x) = log(x + sqrt(x * x - 1.0))
+ const inner_value = this.subtractionInterval(this.multiplicationInterval(x, x), 1.0);
+ const sqrt_value = this.sqrtInterval(inner_value);
+ return this.logInterval(this.additionInterval(x, sqrt_value));
+ },
+ };
+
+ protected acoshPrimaryIntervalImpl(x: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(x), this.AcoshPrimaryIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of acosh(x) using log(x + sqrt(x * x - 1.0)) */
+ protected abstract acoshPrimaryInterval: (x: number | FPInterval) => FPInterval;
+
+ /** All acceptance interval functions for acosh(x) */
+ public abstract readonly acoshIntervals: ScalarToInterval[];
+
+ private readonly AdditionIntervalOp: ScalarPairToIntervalOp = {
+ impl: (x: number, y: number): FPInterval => {
+ return this.correctlyRoundedInterval(x + y);
+ },
+ };
+
+ protected additionIntervalImpl(x: number | FPInterval, y: number | FPInterval): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.AdditionIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of x + y, when x and y are both scalars */
+ public abstract readonly additionInterval: (
+ x: number | FPInterval,
+ y: number | FPInterval
+ ) => FPInterval;
+
+ protected additionMatrixMatrixIntervalImpl(x: Array2D<number>, y: Array2D<number>): FPMatrix {
+ return this.runScalarPairToIntervalOpMatrixComponentWise(
+ this.toMatrix(x),
+ this.toMatrix(y),
+ this.AdditionIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of x + y, when x and y are matrices */
+ public abstract readonly additionMatrixMatrixInterval: (
+ x: Array2D<number>,
+ y: Array2D<number>
+ ) => FPMatrix;
+
+ // This op is implemented differently for f32 and f16.
+ private readonly AsinIntervalOp: ScalarToIntervalOp = {
+ impl: this.limitScalarToIntervalDomain(this.toInterval([-1.0, 1.0]), (n: number) => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ // asin(n) = atan2(n, sqrt(1.0 - n * n)) or a polynomial approximation with absolute error
+ const x = this.sqrtInterval(this.subtractionInterval(1, this.multiplicationInterval(n, n)));
+ const approx_abs_error = this.kind === 'f32' ? 6.77e-5 : 3.91e-3;
+ return this.spanIntervals(
+ this.atan2Interval(n, x),
+ this.absoluteErrorInterval(Math.asin(n), approx_abs_error)
+ );
+ }),
+ };
+
+ /** Calculate an acceptance interval for asin(n) */
+ protected asinIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.AsinIntervalOp);
+ }
+
+ /** Calculate an acceptance interval for asin(n) */
+ public abstract readonly asinInterval: (n: number) => FPInterval;
+
+ private readonly AsinhIntervalOp: ScalarToIntervalOp = {
+ impl: (x: number): FPInterval => {
+ // asinh(x) = log(x + sqrt(x * x + 1.0))
+ const inner_value = this.additionInterval(this.multiplicationInterval(x, x), 1.0);
+ const sqrt_value = this.sqrtInterval(inner_value);
+ return this.logInterval(this.additionInterval(x, sqrt_value));
+ },
+ };
+
+ protected asinhIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.AsinhIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of asinh(x) */
+ public abstract readonly asinhInterval: (n: number) => FPInterval;
+
+ private readonly AtanIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ const ulp_error = this.kind === 'f32' ? 4096 : 5;
+ return this.ulpInterval(Math.atan(n), ulp_error);
+ },
+ };
+
+ /** Calculate an acceptance interval of atan(x) */
+ protected atanIntervalImpl(n: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.AtanIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of atan(x) */
+ public abstract readonly atanInterval: (n: number | FPInterval) => FPInterval;
+
+ // This op is implemented differently for f32 and f16.
+ private Atan2IntervalOpBuilder(): ScalarPairToIntervalOp {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ const constants = this.constants();
+ // For atan2, the params are labelled (y, x), not (x, y), so domain.x is first parameter (y),
+ // and domain.y is the second parameter (x).
+ // The first param must be finite and normal.
+ const domain_x = [
+ this.toInterval([constants.negative.min, constants.negative.max]),
+ this.toInterval([constants.positive.min, constants.positive.max]),
+ ];
+ // inherited from division
+ const domain_y =
+ this.kind === 'f32'
+ ? [this.toInterval([-(2 ** 126), -(2 ** -126)]), this.toInterval([2 ** -126, 2 ** 126])]
+ : [this.toInterval([-(2 ** 14), -(2 ** -14)]), this.toInterval([2 ** -14, 2 ** 14])];
+ const ulp_error = this.kind === 'f32' ? 4096 : 5;
+ return {
+ impl: this.limitScalarPairToIntervalDomain(
+ {
+ x: domain_x,
+ y: domain_y,
+ },
+ (y: number, x: number): FPInterval => {
+ // Accurate result in f64
+ let atan_yx = Math.atan(y / x);
+ // Offset by +/-pi according to the definition. Use pi value in f64 because we are
+ // handling accurate result.
+ if (x < 0) {
+ // x < 0, y > 0, result is atan(y/x) + π
+ if (y > 0) {
+ atan_yx = atan_yx + kValue.f64.positive.pi.whole;
+ } else {
+ // x < 0, y < 0, result is atan(y/x) - π
+ atan_yx = atan_yx - kValue.f64.positive.pi.whole;
+ }
+ }
+
+ return this.ulpInterval(atan_yx, ulp_error);
+ }
+ ),
+ extrema: (y: FPInterval, x: FPInterval): [FPInterval, FPInterval] => {
+ // There is discontinuity, which generates an unbounded result, at y/x = 0 that will dominate the accuracy
+ if (y.contains(0)) {
+ if (x.contains(0)) {
+ return [this.toInterval(0), this.toInterval(0)];
+ }
+ return [this.toInterval(0), x];
+ }
+ return [y, x];
+ },
+ };
+ }
+
+ protected atan2IntervalImpl(y: number | FPInterval, x: number | FPInterval): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(y),
+ this.toInterval(x),
+ this.Atan2IntervalOpBuilder()
+ );
+ }
+
+ /** Calculate an acceptance interval of atan2(y, x) */
+ public abstract readonly atan2Interval: (
+ y: number | FPInterval,
+ x: number | FPInterval
+ ) => FPInterval;
+
+ private readonly AtanhIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number) => {
+ // atanh(x) = log((1.0 + x) / (1.0 - x)) * 0.5
+ const numerator = this.additionInterval(1.0, n);
+ const denominator = this.subtractionInterval(1.0, n);
+ const log_interval = this.logInterval(this.divisionInterval(numerator, denominator));
+ return this.multiplicationInterval(log_interval, 0.5);
+ },
+ };
+
+ protected atanhIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.AtanhIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of atanh(x) */
+ public abstract readonly atanhInterval: (n: number) => FPInterval;
+
+ private readonly CeilIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.correctlyRoundedInterval(Math.ceil(n));
+ },
+ };
+
+ protected ceilIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.CeilIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of ceil(x) */
+ public abstract readonly ceilInterval: (n: number) => FPInterval;
+
+ private readonly ClampMedianIntervalOp: ScalarTripleToIntervalOp = {
+ impl: (x: number, y: number, z: number): FPInterval => {
+ return this.correctlyRoundedInterval(
+ // Default sort is string sort, so have to implement numeric comparison.
+ // Cannot use the b-a one-liner, because that assumes no infinities.
+ [x, y, z].sort((a, b) => {
+ if (a < b) {
+ return -1;
+ }
+ if (a > b) {
+ return 1;
+ }
+ return 0;
+ })[1]
+ );
+ },
+ };
+
+ protected clampMedianIntervalImpl(
+ x: number | FPInterval,
+ y: number | FPInterval,
+ z: number | FPInterval
+ ): FPInterval {
+ return this.runScalarTripleToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.toInterval(z),
+ this.ClampMedianIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of clamp(x, y, z) via median(x, y, z) */
+ public abstract readonly clampMedianInterval: (
+ x: number | FPInterval,
+ y: number | FPInterval,
+ z: number | FPInterval
+ ) => FPInterval;
+
+ private readonly ClampMinMaxIntervalOp: ScalarTripleToIntervalOp = {
+ impl: (x: number, low: number, high: number): FPInterval => {
+ return this.minInterval(this.maxInterval(x, low), high);
+ },
+ };
+
+ protected clampMinMaxIntervalImpl(
+ x: number | FPInterval,
+ low: number | FPInterval,
+ high: number | FPInterval
+ ): FPInterval {
+ return this.runScalarTripleToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(low),
+ this.toInterval(high),
+ this.ClampMinMaxIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of clamp(x, high, low) via min(max(x, low), high) */
+ public abstract readonly clampMinMaxInterval: (
+ x: number | FPInterval,
+ low: number | FPInterval,
+ high: number | FPInterval
+ ) => FPInterval;
+
+ /** All acceptance interval functions for clamp(x, y, z) */
+ public abstract readonly clampIntervals: ScalarTripleToInterval[];
+
+ private readonly CosIntervalOp: ScalarToIntervalOp = {
+ impl: this.limitScalarToIntervalDomain(
+ this.constants().negPiToPiInterval,
+ (n: number): FPInterval => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ const abs_error = this.kind === 'f32' ? 2 ** -11 : 2 ** -7;
+ return this.absoluteErrorInterval(Math.cos(n), abs_error);
+ }
+ ),
+ };
+
+ protected cosIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.CosIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of cos(x) */
+ public abstract readonly cosInterval: (n: number) => FPInterval;
+
+ private readonly CoshIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ // cosh(x) = (exp(x) + exp(-x)) * 0.5
+ const minus_n = this.negationInterval(n);
+ return this.multiplicationInterval(
+ this.additionInterval(this.expInterval(n), this.expInterval(minus_n)),
+ 0.5
+ );
+ },
+ };
+
+ protected coshIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.CoshIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of cosh(x) */
+ public abstract readonly coshInterval: (n: number) => FPInterval;
+
+ private readonly CrossIntervalOp: VectorPairToVectorOp = {
+ impl: (x: readonly number[], y: readonly number[]): FPVector => {
+ assert(x.length === 3, `CrossIntervalOp received x with ${x.length} instead of 3`);
+ assert(y.length === 3, `CrossIntervalOp received y with ${y.length} instead of 3`);
+
+ // cross(x, y) = r, where
+ // r[0] = x[1] * y[2] - x[2] * y[1]
+ // r[1] = x[2] * y[0] - x[0] * y[2]
+ // r[2] = x[0] * y[1] - x[1] * y[0]
+
+ const r0 = this.subtractionInterval(
+ this.multiplicationInterval(x[1], y[2]),
+ this.multiplicationInterval(x[2], y[1])
+ );
+ const r1 = this.subtractionInterval(
+ this.multiplicationInterval(x[2], y[0]),
+ this.multiplicationInterval(x[0], y[2])
+ );
+ const r2 = this.subtractionInterval(
+ this.multiplicationInterval(x[0], y[1]),
+ this.multiplicationInterval(x[1], y[0])
+ );
+ return [r0, r1, r2];
+ },
+ };
+
+ protected crossIntervalImpl(x: readonly number[], y: readonly number[]): FPVector {
+ assert(x.length === 3, `Cross is only defined for vec3`);
+ assert(y.length === 3, `Cross is only defined for vec3`);
+ return this.runVectorPairToVectorOp(this.toVector(x), this.toVector(y), this.CrossIntervalOp);
+ }
+
+ /** Calculate a vector of acceptance intervals for cross(x, y) */
+ public abstract readonly crossInterval: (x: readonly number[], y: readonly number[]) => FPVector;
+
+ private readonly DegreesIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.multiplicationInterval(n, 57.295779513082322865);
+ },
+ };
+
+ protected degreesIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.DegreesIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of degrees(x) */
+ public abstract readonly degreesInterval: (n: number) => FPInterval;
+
+ /**
+ * Calculate the minor of a NxN matrix.
+ *
+ * The ijth minor of a square matrix, is the N-1xN-1 matrix created by removing
+ * the ith column and jth row from the original matrix.
+ */
+ private minorNxN(m: Array2D<number>, col: number, row: number): Array2D<number> {
+ const dim = m.length;
+ assert(m.length === m[0].length, `minorMatrix is only defined for square matrices`);
+ assert(col >= 0 && col < dim, `col ${col} needs be in [0, # of columns '${dim}')`);
+ assert(row >= 0 && row < dim, `row ${row} needs be in [0, # of rows '${dim}')`);
+
+ const result: number[][] = [...Array(dim - 1)].map(_ => [...Array(dim - 1)]);
+
+ const col_indices: readonly number[] = [...Array(dim).keys()].filter(e => e !== col);
+ const row_indices: readonly number[] = [...Array(dim).keys()].filter(e => e !== row);
+
+ col_indices.forEach((c, i) => {
+ row_indices.forEach((r, j) => {
+ result[i][j] = m[c][r];
+ });
+ });
+ return result;
+ }
+
+ /** Calculate an acceptance interval for determinant(m), where m is a 2x2 matrix */
+ private determinant2x2Interval(m: Array2D<number>): FPInterval {
+ assert(
+ m.length === m[0].length && m.length === 2,
+ `determinant2x2Interval called on non-2x2 matrix`
+ );
+ return this.subtractionInterval(
+ this.multiplicationInterval(m[0][0], m[1][1]),
+ this.multiplicationInterval(m[0][1], m[1][0])
+ );
+ }
+
+ /** Calculate an acceptance interval for determinant(m), where m is a 3x3 matrix */
+ private determinant3x3Interval(m: Array2D<number>): FPInterval {
+ assert(
+ m.length === m[0].length && m.length === 3,
+ `determinant3x3Interval called on non-3x3 matrix`
+ );
+
+ // M is a 3x3 matrix
+ // det(M) is A + B + C, where A, B, C are three elements in a row/column times
+ // their own co-factor.
+ // (The co-factor is the determinant of the minor of that position with the
+ // appropriate +/-)
+ // For simplicity sake A, B, C are calculated as the elements of the first
+ // column
+ const A = this.multiplicationInterval(
+ m[0][0],
+ this.determinant2x2Interval(this.minorNxN(m, 0, 0))
+ );
+ const B = this.multiplicationInterval(
+ -m[0][1],
+ this.determinant2x2Interval(this.minorNxN(m, 0, 1))
+ );
+ const C = this.multiplicationInterval(
+ m[0][2],
+ this.determinant2x2Interval(this.minorNxN(m, 0, 2))
+ );
+
+ // Need to calculate permutations, since for fp addition is not associative,
+ // so A + B + C is not guaranteed to equal B + C + A, etc.
+ const permutations: ROArrayArray<FPInterval> = calculatePermutations([A, B, C]);
+ return this.spanIntervals(
+ ...permutations.map(p =>
+ p.reduce((prev: FPInterval, cur: FPInterval) => this.additionInterval(prev, cur))
+ )
+ );
+ }
+
+ /** Calculate an acceptance interval for determinant(m), where m is a 4x4 matrix */
+ private determinant4x4Interval(m: Array2D<number>): FPInterval {
+ assert(
+ m.length === m[0].length && m.length === 4,
+ `determinant3x3Interval called on non-4x4 matrix`
+ );
+
+ // M is a 4x4 matrix
+ // det(M) is A + B + C + D, where A, B, C, D are four elements in a row/column
+ // times their own co-factor.
+ // (The co-factor is the determinant of the minor of that position with the
+ // appropriate +/-)
+ // For simplicity sake A, B, C, D are calculated as the elements of the
+ // first column
+ const A = this.multiplicationInterval(
+ m[0][0],
+ this.determinant3x3Interval(this.minorNxN(m, 0, 0))
+ );
+ const B = this.multiplicationInterval(
+ -m[0][1],
+ this.determinant3x3Interval(this.minorNxN(m, 0, 1))
+ );
+ const C = this.multiplicationInterval(
+ m[0][2],
+ this.determinant3x3Interval(this.minorNxN(m, 0, 2))
+ );
+ const D = this.multiplicationInterval(
+ -m[0][3],
+ this.determinant3x3Interval(this.minorNxN(m, 0, 3))
+ );
+
+ // Need to calculate permutations, since for fp addition is not associative
+ // so A + B + C + D is not guaranteed to equal B + C + A + D, etc.
+ const permutations: ROArrayArray<FPInterval> = calculatePermutations([A, B, C, D]);
+ return this.spanIntervals(
+ ...permutations.map(p =>
+ p.reduce((prev: FPInterval, cur: FPInterval) => this.additionInterval(prev, cur))
+ )
+ );
+ }
+
+ /**
+ * This code calculates 3x3 and 4x4 determinants using the textbook co-factor
+ * method, using the first column for the co-factor selection.
+ *
+ * For matrices composed of integer elements, e, with |e|^4 < 2**21, this
+ * should be fine.
+ *
+ * For e, where e is subnormal or 4*(e^4) might not be precisely expressible as
+ * a f32 values, this approach breaks down, because the rule of all co-factor
+ * definitions of determinant being equal doesn't hold in these cases.
+ *
+ * The general solution for this is to calculate all the permutations of the
+ * operations in the worked out formula for determinant.
+ * For 3x3 this is tractable, but for 4x4 this works out to ~23! permutations
+ * that need to be calculated.
+ * Thus, CTS testing and the spec definition of accuracy is restricted to the
+ * space that the simple implementation is valid.
+ */
+ protected determinantIntervalImpl(x: Array2D<number>): FPInterval {
+ const dim = x.length;
+ assert(
+ x[0].length === dim && (dim === 2 || dim === 3 || dim === 4),
+ `determinantInterval only defined for 2x2, 3x3 and 4x4 matrices`
+ );
+ switch (dim) {
+ case 2:
+ return this.determinant2x2Interval(x);
+ case 3:
+ return this.determinant3x3Interval(x);
+ case 4:
+ return this.determinant4x4Interval(x);
+ }
+ unreachable(
+ "determinantInterval called on x, where which has an unexpected dimension of '${dim}'"
+ );
+ }
+
+ /** Calculate an acceptance interval for determinant(x) */
+ public abstract readonly determinantInterval: (x: Array2D<number>) => FPInterval;
+
+ private readonly DistanceIntervalScalarOp: ScalarPairToIntervalOp = {
+ impl: (x: number, y: number): FPInterval => {
+ return this.lengthInterval(this.subtractionInterval(x, y));
+ },
+ };
+
+ private readonly DistanceIntervalVectorOp: VectorPairToIntervalOp = {
+ impl: (x: readonly number[], y: readonly number[]): FPInterval => {
+ return this.lengthInterval(
+ this.runScalarPairToIntervalOpVectorComponentWise(
+ this.toVector(x),
+ this.toVector(y),
+ this.SubtractionIntervalOp
+ )
+ );
+ },
+ };
+
+ protected distanceIntervalImpl(
+ x: number | readonly number[],
+ y: number | readonly number[]
+ ): FPInterval {
+ if (x instanceof Array && y instanceof Array) {
+ assert(
+ x.length === y.length,
+ `distanceInterval requires both params to have the same number of elements`
+ );
+ return this.runVectorPairToIntervalOp(
+ this.toVector(x),
+ this.toVector(y),
+ this.DistanceIntervalVectorOp
+ );
+ } else if (!(x instanceof Array) && !(y instanceof Array)) {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.DistanceIntervalScalarOp
+ );
+ }
+ unreachable(
+ `distanceInterval requires both params to both the same type, either scalars or vectors`
+ );
+ }
+
+ /** Calculate an acceptance interval of distance(x, y) */
+ public abstract readonly distanceInterval: (
+ x: number | readonly number[],
+ y: number | readonly number[]
+ ) => FPInterval;
+
+ // This op is implemented differently for f32 and f16.
+ private DivisionIntervalOpBuilder(): ScalarPairToIntervalOp {
+ const constants = this.constants();
+ const domain_x = [this.toInterval([constants.negative.min, constants.positive.max])];
+ const domain_y =
+ this.kind === 'f32' || this.kind === 'abstract'
+ ? [this.toInterval([-(2 ** 126), -(2 ** -126)]), this.toInterval([2 ** -126, 2 ** 126])]
+ : [this.toInterval([-(2 ** 14), -(2 ** -14)]), this.toInterval([2 ** -14, 2 ** 14])];
+ return {
+ impl: this.limitScalarPairToIntervalDomain(
+ {
+ x: domain_x,
+ y: domain_y,
+ },
+ (x: number, y: number): FPInterval => {
+ if (y === 0) {
+ return constants.unboundedInterval;
+ }
+ return this.ulpInterval(x / y, 2.5);
+ }
+ ),
+ extrema: (x: FPInterval, y: FPInterval): [FPInterval, FPInterval] => {
+ // division has a discontinuity at y = 0.
+ if (y.contains(0)) {
+ y = this.toInterval(0);
+ }
+ return [x, y];
+ },
+ };
+ }
+
+ protected divisionIntervalImpl(x: number | FPInterval, y: number | FPInterval): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.DivisionIntervalOpBuilder()
+ );
+ }
+
+ /** Calculate an acceptance interval of x / y */
+ public abstract readonly divisionInterval: (
+ x: number | FPInterval,
+ y: number | FPInterval
+ ) => FPInterval;
+
+ private readonly DotIntervalOp: VectorPairToIntervalOp = {
+ impl: (x: readonly number[], y: readonly number[]): FPInterval => {
+ // dot(x, y) = sum of x[i] * y[i]
+ const multiplications = this.runScalarPairToIntervalOpVectorComponentWise(
+ this.toVector(x),
+ this.toVector(y),
+ this.MultiplicationIntervalOp
+ );
+
+ // vec2 doesn't require permutations, since a + b = b + a for floats
+ if (multiplications.length === 2) {
+ return this.additionInterval(multiplications[0], multiplications[1]);
+ }
+
+ // The spec does not state the ordering of summation, so all the
+ // permutations are calculated and their results spanned, since addition
+ // of more than two floats is not transitive, i.e. a + b + c is not
+ // guaranteed to equal b + a + c
+ const permutations: ROArrayArray<FPInterval> = calculatePermutations(multiplications);
+ return this.spanIntervals(
+ ...permutations.map(p => p.reduce((prev, cur) => this.additionInterval(prev, cur)))
+ );
+ },
+ };
+
+ protected dotIntervalImpl(
+ x: readonly number[] | readonly FPInterval[],
+ y: readonly number[] | readonly FPInterval[]
+ ): FPInterval {
+ assert(x.length === y.length, `dot not defined for vectors with different lengths`);
+ return this.runVectorPairToIntervalOp(this.toVector(x), this.toVector(y), this.DotIntervalOp);
+ }
+
+ /** Calculated the acceptance interval for dot(x, y) */
+ public abstract readonly dotInterval: (
+ x: readonly number[] | readonly FPInterval[],
+ y: readonly number[] | readonly FPInterval[]
+ ) => FPInterval;
+
+ private readonly ExpIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ const ulp_error = this.kind === 'f32' ? 3 + 2 * Math.abs(n) : 1 + 2 * Math.abs(n);
+ return this.ulpInterval(Math.exp(n), ulp_error);
+ },
+ };
+
+ protected expIntervalImpl(x: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(x), this.ExpIntervalOp);
+ }
+
+ /** Calculate an acceptance interval for exp(x) */
+ public abstract readonly expInterval: (x: number | FPInterval) => FPInterval;
+
+ private readonly Exp2IntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ const ulp_error = this.kind === 'f32' ? 3 + 2 * Math.abs(n) : 1 + 2 * Math.abs(n);
+ return this.ulpInterval(Math.pow(2, n), ulp_error);
+ },
+ };
+
+ protected exp2IntervalImpl(x: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(x), this.Exp2IntervalOp);
+ }
+
+ /** Calculate an acceptance interval for exp2(x) */
+ public abstract readonly exp2Interval: (x: number | FPInterval) => FPInterval;
+
+ /**
+ * faceForward(x, y, z) = select(-x, x, dot(z, y) < 0.0)
+ *
+ * This builtin selects from two discrete results (delta rounding/flushing),
+ * so the majority of the framework code is not appropriate, since the
+ * framework attempts to span results.
+ *
+ * Thus, a bespoke implementation is used instead of
+ * defining an Op and running that through the framework.
+ */
+ protected faceForwardIntervalsImpl(
+ x: readonly number[],
+ y: readonly number[],
+ z: readonly number[]
+ ): (FPVector | undefined)[] {
+ const x_vec = this.toVector(x);
+ // Running vector through this.runScalarToIntervalOpComponentWise to make
+ // sure that flushing/rounding is handled, since toVector does not perform
+ // those operations.
+ const positive_x = this.runScalarToIntervalOpComponentWise(x_vec, {
+ impl: (i: number): FPInterval => {
+ return this.toInterval(i);
+ },
+ });
+ const negative_x = this.runScalarToIntervalOpComponentWise(x_vec, this.NegationIntervalOp);
+
+ const dot_interval = this.dotInterval(z, y);
+
+ const results: (FPVector | undefined)[] = [];
+
+ if (!dot_interval.isFinite()) {
+ // dot calculation went out of bounds
+ // Inserting undefined in the result, so that the test running framework
+ // is aware of this potential OOB.
+ // For const-eval tests, it means that the test case should be skipped,
+ // since the shader will fail to compile.
+ // For non-const-eval the undefined should be stripped out of the possible
+ // results.
+
+ results.push(undefined);
+ }
+
+ // Because the result of dot can be an interval, it might span across 0, thus
+ // it is possible that both -x and x are valid responses.
+ if (dot_interval.begin < 0 || dot_interval.end < 0) {
+ results.push(positive_x);
+ }
+
+ if (dot_interval.begin >= 0 || dot_interval.end >= 0) {
+ results.push(negative_x);
+ }
+
+ assert(
+ results.length > 0 || results.every(r => r === undefined),
+ `faceForwardInterval selected neither positive x or negative x for the result, this shouldn't be possible`
+ );
+ return results;
+ }
+
+ /** Calculate the acceptance intervals for faceForward(x, y, z) */
+ public abstract readonly faceForwardIntervals: (
+ x: readonly number[],
+ y: readonly number[],
+ z: readonly number[]
+ ) => (FPVector | undefined)[];
+
+ private readonly FloorIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.correctlyRoundedInterval(Math.floor(n));
+ },
+ };
+
+ protected floorIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.FloorIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of floor(x) */
+ public abstract readonly floorInterval: (n: number) => FPInterval;
+
+ private readonly FmaIntervalOp: ScalarTripleToIntervalOp = {
+ impl: (x: number, y: number, z: number): FPInterval => {
+ return this.additionInterval(this.multiplicationInterval(x, y), z);
+ },
+ };
+
+ protected fmaIntervalImpl(x: number, y: number, z: number): FPInterval {
+ return this.runScalarTripleToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.toInterval(z),
+ this.FmaIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval for fma(x, y, z) */
+ public abstract readonly fmaInterval: (x: number, y: number, z: number) => FPInterval;
+
+ private readonly FractIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ // fract(x) = x - floor(x) is defined in the spec.
+ // For people coming from a non-graphics background this will cause some
+ // unintuitive results. For example,
+ // fract(-1.1) is not 0.1 or -0.1, but instead 0.9.
+ // This is how other shading languages operate and allows for a desirable
+ // wrap around in graphics programming.
+ const result = this.subtractionInterval(n, this.floorInterval(n));
+ assert(
+ // negative.subnormal.min instead of 0, because FTZ can occur
+ // selectively during the calculation
+ this.toInterval([this.constants().negative.subnormal.min, 1.0]).contains(result),
+ `fract(${n}) interval [${result}] unexpectedly extends beyond [~0.0, 1.0]`
+ );
+ if (result.contains(1)) {
+ // Very small negative numbers can lead to catastrophic cancellation,
+ // thus calculating a fract of 1.0, which is technically not a
+ // fractional part, so some implementations clamp the result to next
+ // nearest number.
+ return this.spanIntervals(result, this.toInterval(this.constants().positive.less_than_one));
+ }
+ return result;
+ },
+ };
+
+ protected fractIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.FractIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of fract(x) */
+ public abstract readonly fractInterval: (n: number) => FPInterval;
+
+ private readonly InverseSqrtIntervalOp: ScalarToIntervalOp = {
+ impl: this.limitScalarToIntervalDomain(
+ this.constants().greaterThanZeroInterval,
+ (n: number): FPInterval => {
+ return this.ulpInterval(1 / Math.sqrt(n), 2);
+ }
+ ),
+ };
+
+ protected inverseSqrtIntervalImpl(n: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.InverseSqrtIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of inverseSqrt(x) */
+ public abstract readonly inverseSqrtInterval: (n: number | FPInterval) => FPInterval;
+
+ private readonly LdexpIntervalOp: ScalarPairToIntervalOp = {
+ impl: (e1: number, e2: number) => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ assert(Number.isInteger(e2), 'the second param of ldexp must be an integer');
+ const bias = this.kind === 'f32' ? 127 : 15;
+ // Spec explicitly calls indeterminate value if e2 > bias + 1
+ if (e2 > bias + 1) {
+ return this.constants().unboundedInterval;
+ }
+ // The spec says the result of ldexp(e1, e2) = e1 * 2 ^ e2, and the accuracy is correctly
+ // rounded to the true value, so the inheritance framework does not need to be invoked to
+ // determine bounds.
+ // Instead, the value at a higher precision is calculated and passed to
+ // correctlyRoundedInterval.
+ const result = e1 * 2 ** e2;
+ if (!Number.isFinite(result)) {
+ // Overflowed TS's number type, so definitely out of bounds for f32/f16
+ return this.constants().unboundedInterval;
+ }
+ // The result may be zero if e2 + bias <= 0, but we can't simply span the interval to 0.0.
+ // For example, for f32 input e1 = 2**120 and e2 = -130, e2 + bias = -3 <= 0, but
+ // e1 * 2 ** e2 = 2**-10, so the valid result is 2**-10 or 0.0, instead of [0.0, 2**-10].
+ // Always return the correctly-rounded interval, and special examination should be taken when
+ // using the result.
+ return this.correctlyRoundedInterval(result);
+ },
+ };
+
+ protected ldexpIntervalImpl(e1: number, e2: number): FPInterval {
+ // Only round and flush e1, as e2 is of integer type (i32 or abstract integer) and should be
+ // precise.
+ return this.roundAndFlushScalarToInterval(e1, {
+ impl: (e1: number) => this.LdexpIntervalOp.impl(e1, e2),
+ });
+ }
+
+ /**
+ * Calculate an acceptance interval of ldexp(e1, e2), where e2 is integer
+ *
+ * Spec indicate that the result may be zero if e2 + bias <= 0, no matter how large
+ * was e1 * 2 ** e2, i.e. the actual valid result is correctlyRounded(e1 * 2 ** e2) or 0.0, if
+ * e2 + bias <= 0. Such discontinious flush-to-zero behavior is hard to be expressed using
+ * FPInterval, therefore in the situation of e2 + bias <= 0 the returned interval would be just
+ * correctlyRounded(e1 * 2 ** e2), and special examination should be taken when using the result.
+ *
+ */
+ public abstract readonly ldexpInterval: (e1: number, e2: number) => FPInterval;
+
+ private readonly LengthIntervalScalarOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.sqrtInterval(this.multiplicationInterval(n, n));
+ },
+ };
+
+ private readonly LengthIntervalVectorOp: VectorToIntervalOp = {
+ impl: (n: readonly number[]): FPInterval => {
+ return this.sqrtInterval(this.dotInterval(n, n));
+ },
+ };
+
+ protected lengthIntervalImpl(n: number | FPInterval | readonly number[] | FPVector): FPInterval {
+ if (n instanceof Array) {
+ return this.runVectorToIntervalOp(this.toVector(n), this.LengthIntervalVectorOp);
+ } else {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.LengthIntervalScalarOp);
+ }
+ }
+
+ /** Calculate an acceptance interval of length(x) */
+ public abstract readonly lengthInterval: (
+ n: number | FPInterval | readonly number[] | FPVector
+ ) => FPInterval;
+
+ private readonly LogIntervalOp: ScalarToIntervalOp = {
+ impl: this.limitScalarToIntervalDomain(
+ this.constants().greaterThanZeroInterval,
+ (n: number): FPInterval => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ const abs_error = this.kind === 'f32' ? 2 ** -21 : 2 ** -7;
+ if (n >= 0.5 && n <= 2.0) {
+ return this.absoluteErrorInterval(Math.log(n), abs_error);
+ }
+ return this.ulpInterval(Math.log(n), 3);
+ }
+ ),
+ };
+
+ protected logIntervalImpl(x: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(x), this.LogIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of log(x) */
+ public abstract readonly logInterval: (x: number | FPInterval) => FPInterval;
+
+ private readonly Log2IntervalOp: ScalarToIntervalOp = {
+ impl: this.limitScalarToIntervalDomain(
+ this.constants().greaterThanZeroInterval,
+ (n: number): FPInterval => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ const abs_error = this.kind === 'f32' ? 2 ** -21 : 2 ** -7;
+ if (n >= 0.5 && n <= 2.0) {
+ return this.absoluteErrorInterval(Math.log2(n), abs_error);
+ }
+ return this.ulpInterval(Math.log2(n), 3);
+ }
+ ),
+ };
+
+ protected log2IntervalImpl(x: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(x), this.Log2IntervalOp);
+ }
+
+ /** Calculate an acceptance interval of log2(x) */
+ public abstract readonly log2Interval: (x: number | FPInterval) => FPInterval;
+
+ private readonly MaxIntervalOp: ScalarPairToIntervalOp = {
+ impl: (x: number, y: number): FPInterval => {
+ // If both of the inputs are subnormal, then either of the inputs can be returned
+ if (this.isSubnormal(x) && this.isSubnormal(y)) {
+ return this.correctlyRoundedInterval(
+ this.spanIntervals(this.toInterval(x), this.toInterval(y))
+ );
+ }
+
+ return this.correctlyRoundedInterval(Math.max(x, y));
+ },
+ };
+
+ protected maxIntervalImpl(x: number | FPInterval, y: number | FPInterval): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.MaxIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of max(x, y) */
+ public abstract readonly maxInterval: (
+ x: number | FPInterval,
+ y: number | FPInterval
+ ) => FPInterval;
+
+ private readonly MinIntervalOp: ScalarPairToIntervalOp = {
+ impl: (x: number, y: number): FPInterval => {
+ // If both of the inputs are subnormal, then either of the inputs can be returned
+ if (this.isSubnormal(x) && this.isSubnormal(y)) {
+ return this.correctlyRoundedInterval(
+ this.spanIntervals(this.toInterval(x), this.toInterval(y))
+ );
+ }
+
+ return this.correctlyRoundedInterval(Math.min(x, y));
+ },
+ };
+
+ protected minIntervalImpl(x: number | FPInterval, y: number | FPInterval): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.MinIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of min(x, y) */
+ public abstract readonly minInterval: (
+ x: number | FPInterval,
+ y: number | FPInterval
+ ) => FPInterval;
+
+ private readonly MixImpreciseIntervalOp: ScalarTripleToIntervalOp = {
+ impl: (x: number, y: number, z: number): FPInterval => {
+ // x + (y - x) * z =
+ // x + t, where t = (y - x) * z
+ const t = this.multiplicationInterval(this.subtractionInterval(y, x), z);
+ return this.additionInterval(x, t);
+ },
+ };
+
+ protected mixImpreciseIntervalImpl(x: number, y: number, z: number): FPInterval {
+ return this.runScalarTripleToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.toInterval(z),
+ this.MixImpreciseIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of mix(x, y, z) using x + (y - x) * z */
+ public abstract readonly mixImpreciseInterval: (x: number, y: number, z: number) => FPInterval;
+
+ private readonly MixPreciseIntervalOp: ScalarTripleToIntervalOp = {
+ impl: (x: number, y: number, z: number): FPInterval => {
+ // x * (1.0 - z) + y * z =
+ // t + s, where t = x * (1.0 - z), s = y * z
+ const t = this.multiplicationInterval(x, this.subtractionInterval(1.0, z));
+ const s = this.multiplicationInterval(y, z);
+ return this.additionInterval(t, s);
+ },
+ };
+
+ protected mixPreciseIntervalImpl(x: number, y: number, z: number): FPInterval {
+ return this.runScalarTripleToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.toInterval(z),
+ this.MixPreciseIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of mix(x, y, z) using x * (1.0 - z) + y * z */
+ public abstract readonly mixPreciseInterval: (x: number, y: number, z: number) => FPInterval;
+
+ /** All acceptance interval functions for mix(x, y, z) */
+ public abstract readonly mixIntervals: ScalarTripleToInterval[];
+
+ protected modfIntervalImpl(n: number): { fract: FPInterval; whole: FPInterval } {
+ const fract = this.correctlyRoundedInterval(n % 1.0);
+ const whole = this.correctlyRoundedInterval(n - (n % 1.0));
+ return { fract, whole };
+ }
+
+ /** Calculate an acceptance interval of modf(x) */
+ public abstract readonly modfInterval: (n: number) => { fract: FPInterval; whole: FPInterval };
+
+ private readonly MultiplicationInnerOp = {
+ impl: (x: number, y: number): FPInterval => {
+ return this.correctlyRoundedInterval(x * y);
+ },
+ };
+
+ private readonly MultiplicationIntervalOp: ScalarPairToIntervalOp = {
+ impl: (x: number, y: number): FPInterval => {
+ return this.roundAndFlushScalarPairToInterval(x, y, this.MultiplicationInnerOp);
+ },
+ };
+
+ protected multiplicationIntervalImpl(x: number | FPInterval, y: number | FPInterval): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.MultiplicationIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of x * y */
+ public abstract readonly multiplicationInterval: (
+ x: number | FPInterval,
+ y: number | FPInterval
+ ) => FPInterval;
+
+ /**
+ * @returns the vector result of multiplying the given vector by the given
+ * scalar
+ */
+ private multiplyVectorByScalar(v: readonly number[], c: number | FPInterval): FPVector {
+ return this.toVector(v.map(x => this.multiplicationInterval(x, c)));
+ }
+
+ protected multiplicationMatrixScalarIntervalImpl(mat: Array2D<number>, scalar: number): FPMatrix {
+ const cols = mat.length;
+ const rows = mat[0].length;
+ return this.toMatrix(
+ unflatten2DArray(
+ flatten2DArray(mat).map(e => this.multiplicationInterval(e, scalar)),
+ cols,
+ rows
+ )
+ );
+ }
+
+ /** Calculate an acceptance interval of x * y, when x is a matrix and y is a scalar */
+ public abstract readonly multiplicationMatrixScalarInterval: (
+ mat: Array2D<number>,
+ scalar: number
+ ) => FPMatrix;
+
+ protected multiplicationScalarMatrixIntervalImpl(scalar: number, mat: Array2D<number>): FPMatrix {
+ return this.multiplicationMatrixScalarIntervalImpl(mat, scalar);
+ }
+
+ /** Calculate an acceptance interval of x * y, when x is a scalar and y is a matrix */
+ public abstract readonly multiplicationScalarMatrixInterval: (
+ scalar: number,
+ mat: Array2D<number>
+ ) => FPMatrix;
+
+ protected multiplicationMatrixMatrixIntervalImpl(
+ mat_x: Array2D<number>,
+ mat_y: Array2D<number>
+ ): FPMatrix {
+ const x_cols = mat_x.length;
+ const x_rows = mat_x[0].length;
+ const y_cols = mat_y.length;
+ const y_rows = mat_y[0].length;
+ assert(x_cols === y_rows, `'mat${x_cols}x${x_rows} * mat${y_cols}x${y_rows}' is not defined`);
+
+ const x_transposed = this.transposeInterval(mat_x);
+
+ const result: FPInterval[][] = [...Array(y_cols)].map(_ => [...Array(x_rows)]);
+ mat_y.forEach((y, i) => {
+ x_transposed.forEach((x, j) => {
+ result[i][j] = this.dotInterval(x, y);
+ });
+ });
+
+ return result as ROArrayArray<FPInterval> as FPMatrix;
+ }
+
+ /** Calculate an acceptance interval of x * y, when x is a matrix and y is a matrix */
+ public abstract readonly multiplicationMatrixMatrixInterval: (
+ mat_x: Array2D<number>,
+ mat_y: Array2D<number>
+ ) => FPMatrix;
+
+ protected multiplicationMatrixVectorIntervalImpl(
+ x: Array2D<number>,
+ y: readonly number[]
+ ): FPVector {
+ const cols = x.length;
+ const rows = x[0].length;
+ assert(y.length === cols, `'mat${cols}x${rows} * vec${y.length}' is not defined`);
+
+ return this.transposeInterval(x).map(e => this.dotInterval(e, y)) as FPVector;
+ }
+
+ /** Calculate an acceptance interval of x * y, when x is a matrix and y is a vector */
+ public abstract readonly multiplicationMatrixVectorInterval: (
+ x: Array2D<number>,
+ y: readonly number[]
+ ) => FPVector;
+
+ protected multiplicationVectorMatrixIntervalImpl(
+ x: readonly number[],
+ y: Array2D<number>
+ ): FPVector {
+ const cols = y.length;
+ const rows = y[0].length;
+ assert(x.length === rows, `'vec${x.length} * mat${cols}x${rows}' is not defined`);
+
+ return y.map(e => this.dotInterval(x, e)) as FPVector;
+ }
+
+ /** Calculate an acceptance interval of x * y, when x is a vector and y is a matrix */
+ public abstract readonly multiplicationVectorMatrixInterval: (
+ x: readonly number[],
+ y: Array2D<number>
+ ) => FPVector;
+
+ private readonly NegationIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.correctlyRoundedInterval(-n);
+ },
+ };
+
+ protected negationIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.NegationIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of -x */
+ public abstract readonly negationInterval: (n: number) => FPInterval;
+
+ private readonly NormalizeIntervalOp: VectorToVectorOp = {
+ impl: (n: readonly number[]): FPVector => {
+ const length = this.lengthInterval(n);
+ return this.toVector(n.map(e => this.divisionInterval(e, length)));
+ },
+ };
+
+ protected normalizeIntervalImpl(n: readonly number[]): FPVector {
+ return this.runVectorToVectorOp(this.toVector(n), this.NormalizeIntervalOp);
+ }
+
+ public abstract readonly normalizeInterval: (n: readonly number[]) => FPVector;
+
+ private readonly PowIntervalOp: ScalarPairToIntervalOp = {
+ // pow(x, y) has no explicit domain restrictions, but inherits the x <= 0
+ // domain restriction from log2(x). Invoking log2Interval(x) in impl will
+ // enforce this, so there is no need to wrap the impl call here.
+ impl: (x: number, y: number): FPInterval => {
+ return this.exp2Interval(this.multiplicationInterval(y, this.log2Interval(x)));
+ },
+ };
+
+ protected powIntervalImpl(x: number | FPInterval, y: number | FPInterval): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.PowIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of pow(x, y) */
+ public abstract readonly powInterval: (
+ x: number | FPInterval,
+ y: number | FPInterval
+ ) => FPInterval;
+
+ private readonly RadiansIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.multiplicationInterval(n, 0.017453292519943295474);
+ },
+ };
+
+ protected radiansIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.RadiansIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of radians(x) */
+ public abstract readonly radiansInterval: (n: number) => FPInterval;
+
+ private readonly ReflectIntervalOp: VectorPairToVectorOp = {
+ impl: (x: readonly number[], y: readonly number[]): FPVector => {
+ assert(
+ x.length === y.length,
+ `ReflectIntervalOp received x (${x}) and y (${y}) with different numbers of elements`
+ );
+
+ // reflect(x, y) = x - 2.0 * dot(x, y) * y
+ // = x - t * y, t = 2.0 * dot(x, y)
+ // x = incident vector
+ // y = normal of reflecting surface
+ const t = this.multiplicationInterval(2.0, this.dotInterval(x, y));
+ const rhs = this.multiplyVectorByScalar(y, t);
+ return this.runScalarPairToIntervalOpVectorComponentWise(
+ this.toVector(x),
+ rhs,
+ this.SubtractionIntervalOp
+ );
+ },
+ };
+
+ protected reflectIntervalImpl(x: readonly number[], y: readonly number[]): FPVector {
+ assert(
+ x.length === y.length,
+ `reflect is only defined for vectors with the same number of elements`
+ );
+ return this.runVectorPairToVectorOp(this.toVector(x), this.toVector(y), this.ReflectIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of reflect(x, y) */
+ public abstract readonly reflectInterval: (
+ x: readonly number[],
+ y: readonly number[]
+ ) => FPVector;
+
+ /**
+ * refract is a singular function in the sense that it is the only builtin that
+ * takes in (FPVector, FPVector, F32/F16) and returns FPVector and is basically
+ * defined in terms of other functions.
+ *
+ * Instead of implementing all the framework code to integrate it with its
+ * own operation type, etc, it instead has a bespoke implementation that is a
+ * composition of other builtin functions that use the framework.
+ */
+ protected refractIntervalImpl(i: readonly number[], s: readonly number[], r: number): FPVector {
+ assert(
+ i.length === s.length,
+ `refract is only defined for vectors with the same number of elements`
+ );
+
+ const r_squared = this.multiplicationInterval(r, r);
+ const dot = this.dotInterval(s, i);
+ const dot_squared = this.multiplicationInterval(dot, dot);
+ const one_minus_dot_squared = this.subtractionInterval(1, dot_squared);
+ const k = this.subtractionInterval(
+ 1.0,
+ this.multiplicationInterval(r_squared, one_minus_dot_squared)
+ );
+
+ if (!k.isFinite() || k.containsZeroOrSubnormals()) {
+ // There is a discontinuity at k == 0, due to sqrt(k) being calculated, so exiting early
+ return this.constants().unboundedVector[this.toVector(i).length];
+ }
+
+ if (k.end < 0.0) {
+ // if k is negative, then the zero vector is the valid response
+ return this.constants().zeroVector[this.toVector(i).length];
+ }
+
+ const dot_times_r = this.multiplicationInterval(dot, r);
+ const k_sqrt = this.sqrtInterval(k);
+ const t = this.additionInterval(dot_times_r, k_sqrt); // t = r * dot(i, s) + sqrt(k)
+
+ return this.runScalarPairToIntervalOpVectorComponentWise(
+ this.multiplyVectorByScalar(i, r),
+ this.multiplyVectorByScalar(s, t),
+ this.SubtractionIntervalOp
+ ); // (i * r) - (s * t)
+ }
+
+ /** Calculate acceptance interval vectors of reflect(i, s, r) */
+ public abstract readonly refractInterval: (
+ i: readonly number[],
+ s: readonly number[],
+ r: number
+ ) => FPVector;
+
+ private readonly RemainderIntervalOp: ScalarPairToIntervalOp = {
+ impl: (x: number, y: number): FPInterval => {
+ // x % y = x - y * trunc(x/y)
+ return this.subtractionInterval(
+ x,
+ this.multiplicationInterval(y, this.truncInterval(this.divisionInterval(x, y)))
+ );
+ },
+ };
+
+ /** Calculate an acceptance interval for x % y */
+ protected remainderIntervalImpl(x: number, y: number): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.RemainderIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval for x % y */
+ public abstract readonly remainderInterval: (x: number, y: number) => FPInterval;
+
+ private readonly RoundIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ const k = Math.floor(n);
+ const diff_before = n - k;
+ const diff_after = k + 1 - n;
+ if (diff_before < diff_after) {
+ return this.correctlyRoundedInterval(k);
+ } else if (diff_before > diff_after) {
+ return this.correctlyRoundedInterval(k + 1);
+ }
+
+ // n is in the middle of two integers.
+ // The tie breaking rule is 'k if k is even, k + 1 if k is odd'
+ if (k % 2 === 0) {
+ return this.correctlyRoundedInterval(k);
+ }
+ return this.correctlyRoundedInterval(k + 1);
+ },
+ };
+
+ protected roundIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.RoundIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of round(x) */
+ public abstract readonly roundInterval: (n: number) => FPInterval;
+
+ /**
+ * The definition of saturate does not specify which version of clamp to use.
+ * Using min-max here, since it has wider acceptance intervals, that include
+ * all of median's.
+ */
+ protected saturateIntervalImpl(n: number): FPInterval {
+ return this.runScalarTripleToIntervalOp(
+ this.toInterval(n),
+ this.toInterval(0.0),
+ this.toInterval(1.0),
+ this.ClampMinMaxIntervalOp
+ );
+ }
+
+ /*** Calculate an acceptance interval of saturate(n) as clamp(n, 0.0, 1.0) */
+ public abstract readonly saturateInterval: (n: number) => FPInterval;
+
+ private readonly SignIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ if (n > 0.0) {
+ return this.correctlyRoundedInterval(1.0);
+ }
+ if (n < 0.0) {
+ return this.correctlyRoundedInterval(-1.0);
+ }
+
+ return this.correctlyRoundedInterval(0.0);
+ },
+ };
+
+ protected signIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.SignIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of sign(x) */
+ public abstract readonly signInterval: (n: number) => FPInterval;
+
+ private readonly SinIntervalOp: ScalarToIntervalOp = {
+ impl: this.limitScalarToIntervalDomain(
+ this.constants().negPiToPiInterval,
+ (n: number): FPInterval => {
+ assert(this.kind === 'f32' || this.kind === 'f16');
+ const abs_error = this.kind === 'f32' ? 2 ** -11 : 2 ** -7;
+ return this.absoluteErrorInterval(Math.sin(n), abs_error);
+ }
+ ),
+ };
+
+ protected sinIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.SinIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of sin(x) */
+ public abstract readonly sinInterval: (n: number) => FPInterval;
+
+ private readonly SinhIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ // sinh(x) = (exp(x) - exp(-x)) * 0.5
+ const minus_n = this.negationInterval(n);
+ return this.multiplicationInterval(
+ this.subtractionInterval(this.expInterval(n), this.expInterval(minus_n)),
+ 0.5
+ );
+ },
+ };
+
+ protected sinhIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.SinhIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of sinh(x) */
+ public abstract readonly sinhInterval: (n: number) => FPInterval;
+
+ private readonly SmoothStepOp: ScalarTripleToIntervalOp = {
+ impl: (low: number, high: number, x: number): FPInterval => {
+ // For clamp(foo, 0.0, 1.0) the different implementations of clamp provide
+ // the same value, so arbitrarily picking the minmax version to use.
+ // t = clamp((x - low) / (high - low), 0.0, 1.0)
+ // prettier-ignore
+ const t = this.clampMedianInterval(
+ this.divisionInterval(
+ this.subtractionInterval(x, low),
+ this.subtractionInterval(high, low)),
+ 0.0,
+ 1.0);
+ // Inherited from t * t * (3.0 - 2.0 * t)
+ // prettier-ignore
+ return this.multiplicationInterval(
+ t,
+ this.multiplicationInterval(t,
+ this.subtractionInterval(3.0,
+ this.multiplicationInterval(2.0, t))));
+ },
+ };
+
+ protected smoothStepIntervalImpl(low: number, high: number, x: number): FPInterval {
+ return this.runScalarTripleToIntervalOp(
+ this.toInterval(low),
+ this.toInterval(high),
+ this.toInterval(x),
+ this.SmoothStepOp
+ );
+ }
+
+ /** Calculate an acceptance interval of smoothStep(low, high, x) */
+ public abstract readonly smoothStepInterval: (low: number, high: number, x: number) => FPInterval;
+
+ private readonly SqrtIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.divisionInterval(1.0, this.inverseSqrtInterval(n));
+ },
+ };
+
+ protected sqrtIntervalImpl(n: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.SqrtIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of sqrt(x) */
+ public abstract readonly sqrtInterval: (n: number | FPInterval) => FPInterval;
+
+ private readonly StepIntervalOp: ScalarPairToIntervalOp = {
+ impl: (edge: number, x: number): FPInterval => {
+ if (edge <= x) {
+ return this.correctlyRoundedInterval(1.0);
+ }
+ return this.correctlyRoundedInterval(0.0);
+ },
+ };
+
+ protected stepIntervalImpl(edge: number, x: number): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(edge),
+ this.toInterval(x),
+ this.StepIntervalOp
+ );
+ }
+
+ /**
+ * Calculate an acceptance 'interval' for step(edge, x)
+ *
+ * step only returns two possible values, so its interval requires special
+ * interpretation in CTS tests.
+ * This interval will be one of four values: [0, 0], [0, 1], [1, 1] & [-∞, +∞].
+ * [0, 0] and [1, 1] indicate that the correct answer in point they encapsulate.
+ * [0, 1] should not be treated as a span, i.e. 0.1 is acceptable, but instead
+ * indicate either 0.0 or 1.0 are acceptable answers.
+ * [-∞, +∞] is treated as unbounded interval, since an unbounded or
+ * infinite value was passed in.
+ */
+ public abstract readonly stepInterval: (edge: number, x: number) => FPInterval;
+
+ private readonly SubtractionIntervalOp: ScalarPairToIntervalOp = {
+ impl: (x: number, y: number): FPInterval => {
+ return this.correctlyRoundedInterval(x - y);
+ },
+ };
+
+ protected subtractionIntervalImpl(x: number | FPInterval, y: number | FPInterval): FPInterval {
+ return this.runScalarPairToIntervalOp(
+ this.toInterval(x),
+ this.toInterval(y),
+ this.SubtractionIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of x - y */
+ public abstract readonly subtractionInterval: (
+ x: number | FPInterval,
+ y: number | FPInterval
+ ) => FPInterval;
+
+ protected subtractionMatrixMatrixIntervalImpl(x: Array2D<number>, y: Array2D<number>): FPMatrix {
+ return this.runScalarPairToIntervalOpMatrixComponentWise(
+ this.toMatrix(x),
+ this.toMatrix(y),
+ this.SubtractionIntervalOp
+ );
+ }
+
+ /** Calculate an acceptance interval of x - y, when x and y are matrices */
+ public abstract readonly subtractionMatrixMatrixInterval: (
+ x: Array2D<number>,
+ y: Array2D<number>
+ ) => FPMatrix;
+
+ private readonly TanIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.divisionInterval(this.sinInterval(n), this.cosInterval(n));
+ },
+ };
+
+ protected tanIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.TanIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of tan(x) */
+ public abstract readonly tanInterval: (n: number) => FPInterval;
+
+ private readonly TanhIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.divisionInterval(this.sinhInterval(n), this.coshInterval(n));
+ },
+ };
+
+ protected tanhIntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.TanhIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of tanh(x) */
+ public abstract readonly tanhInterval: (n: number) => FPInterval;
+
+ private readonly TransposeIntervalOp: MatrixToMatrixOp = {
+ impl: (m: Array2D<number>): FPMatrix => {
+ const num_cols = m.length;
+ const num_rows = m[0].length;
+ const result: FPInterval[][] = [...Array(num_rows)].map(_ => [...Array(num_cols)]);
+
+ for (let i = 0; i < num_cols; i++) {
+ for (let j = 0; j < num_rows; j++) {
+ result[j][i] = this.correctlyRoundedInterval(m[i][j]);
+ }
+ }
+ return this.toMatrix(result);
+ },
+ };
+
+ protected transposeIntervalImpl(m: Array2D<number>): FPMatrix {
+ return this.runMatrixToMatrixOp(this.toMatrix(m), this.TransposeIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of transpose(m) */
+ public abstract readonly transposeInterval: (m: Array2D<number>) => FPMatrix;
+
+ private readonly TruncIntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ return this.correctlyRoundedInterval(Math.trunc(n));
+ },
+ };
+
+ protected truncIntervalImpl(n: number | FPInterval): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.TruncIntervalOp);
+ }
+
+ /** Calculate an acceptance interval of trunc(x) */
+ public abstract readonly truncInterval: (n: number | FPInterval) => FPInterval;
+}
+
+// Pre-defined values that get used multiple times in _constants' initializers. Cannot use FPTraits members, since this
+// executes before they are defined.
+const kF32UnboundedInterval = new FPInterval(
+ 'f32',
+ Number.NEGATIVE_INFINITY,
+ Number.POSITIVE_INFINITY
+);
+const kF32ZeroInterval = new FPInterval('f32', 0);
+
+class F32Traits extends FPTraits {
+ private static _constants: FPConstants = {
+ positive: {
+ min: kValue.f32.positive.min,
+ max: kValue.f32.positive.max,
+ infinity: kValue.f32.positive.infinity,
+ nearest_max: kValue.f32.positive.nearest_max,
+ less_than_one: kValue.f32.positive.less_than_one,
+ subnormal: {
+ min: kValue.f32.positive.subnormal.min,
+ max: kValue.f32.positive.subnormal.max,
+ },
+ pi: {
+ whole: kValue.f32.positive.pi.whole,
+ three_quarters: kValue.f32.positive.pi.three_quarters,
+ half: kValue.f32.positive.pi.half,
+ third: kValue.f32.positive.pi.third,
+ quarter: kValue.f32.positive.pi.quarter,
+ sixth: kValue.f32.positive.pi.sixth,
+ },
+ e: kValue.f32.positive.e,
+ },
+ negative: {
+ min: kValue.f32.negative.min,
+ max: kValue.f32.negative.max,
+ infinity: kValue.f32.negative.infinity,
+ nearest_min: kValue.f32.negative.nearest_min,
+ less_than_one: kValue.f32.negative.less_than_one,
+ subnormal: {
+ min: kValue.f32.negative.subnormal.min,
+ max: kValue.f32.negative.subnormal.max,
+ },
+ pi: {
+ whole: kValue.f32.negative.pi.whole,
+ three_quarters: kValue.f32.negative.pi.three_quarters,
+ half: kValue.f32.negative.pi.half,
+ third: kValue.f32.negative.pi.third,
+ quarter: kValue.f32.negative.pi.quarter,
+ sixth: kValue.f32.negative.pi.sixth,
+ },
+ },
+ unboundedInterval: kF32UnboundedInterval,
+ zeroInterval: kF32ZeroInterval,
+ // Have to use the constants.ts values here, because values defined in the
+ // initializer cannot be referenced in the initializer
+ negPiToPiInterval: new FPInterval(
+ 'f32',
+ kValue.f32.negative.pi.whole,
+ kValue.f32.positive.pi.whole
+ ),
+ greaterThanZeroInterval: new FPInterval(
+ 'f32',
+ kValue.f32.positive.subnormal.min,
+ kValue.f32.positive.max
+ ),
+ zeroVector: {
+ 2: [kF32ZeroInterval, kF32ZeroInterval],
+ 3: [kF32ZeroInterval, kF32ZeroInterval, kF32ZeroInterval],
+ 4: [kF32ZeroInterval, kF32ZeroInterval, kF32ZeroInterval, kF32ZeroInterval],
+ },
+ unboundedVector: {
+ 2: [kF32UnboundedInterval, kF32UnboundedInterval],
+ 3: [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ 4: [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ },
+ unboundedMatrix: {
+ 2: {
+ 2: [
+ [kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval],
+ ],
+ 3: [
+ [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ ],
+ 4: [
+ [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ ],
+ },
+ 3: {
+ 2: [
+ [kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval],
+ ],
+ 3: [
+ [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ ],
+ 4: [
+ [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ ],
+ },
+ 4: {
+ 2: [
+ [kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval],
+ ],
+ 3: [
+ [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ [kF32UnboundedInterval, kF32UnboundedInterval, kF32UnboundedInterval],
+ ],
+ 4: [
+ [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ [
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ kF32UnboundedInterval,
+ ],
+ ],
+ },
+ },
+ };
+
+ public constructor() {
+ super('f32');
+ }
+
+ public constants(): FPConstants {
+ return F32Traits._constants;
+ }
+
+ // Utilities - Overrides
+ public readonly quantize = quantizeToF32;
+ public readonly correctlyRounded = correctlyRoundedF32;
+ public readonly isFinite = isFiniteF32;
+ public readonly isSubnormal = isSubnormalNumberF32;
+ public readonly flushSubnormal = flushSubnormalNumberF32;
+ public readonly oneULP = oneULPF32;
+ public readonly scalarBuilder = f32;
+
+ // Framework - Fundamental Error Intervals - Overrides
+ public readonly absoluteErrorInterval = this.absoluteErrorIntervalImpl.bind(this);
+ public readonly correctlyRoundedInterval = this.correctlyRoundedIntervalImpl.bind(this);
+ public readonly correctlyRoundedMatrix = this.correctlyRoundedMatrixImpl.bind(this);
+ public readonly ulpInterval = this.ulpIntervalImpl.bind(this);
+
+ // Framework - API - Overrides
+ public readonly absInterval = this.absIntervalImpl.bind(this);
+ public readonly acosInterval = this.acosIntervalImpl.bind(this);
+ public readonly acoshAlternativeInterval = this.acoshAlternativeIntervalImpl.bind(this);
+ public readonly acoshPrimaryInterval = this.acoshPrimaryIntervalImpl.bind(this);
+ public readonly acoshIntervals = [this.acoshAlternativeInterval, this.acoshPrimaryInterval];
+ public readonly additionInterval = this.additionIntervalImpl.bind(this);
+ public readonly additionMatrixMatrixInterval = this.additionMatrixMatrixIntervalImpl.bind(this);
+ public readonly asinInterval = this.asinIntervalImpl.bind(this);
+ public readonly asinhInterval = this.asinhIntervalImpl.bind(this);
+ public readonly atanInterval = this.atanIntervalImpl.bind(this);
+ public readonly atan2Interval = this.atan2IntervalImpl.bind(this);
+ public readonly atanhInterval = this.atanhIntervalImpl.bind(this);
+ public readonly ceilInterval = this.ceilIntervalImpl.bind(this);
+ public readonly clampMedianInterval = this.clampMedianIntervalImpl.bind(this);
+ public readonly clampMinMaxInterval = this.clampMinMaxIntervalImpl.bind(this);
+ public readonly clampIntervals = [this.clampMedianInterval, this.clampMinMaxInterval];
+ public readonly cosInterval = this.cosIntervalImpl.bind(this);
+ public readonly coshInterval = this.coshIntervalImpl.bind(this);
+ public readonly crossInterval = this.crossIntervalImpl.bind(this);
+ public readonly degreesInterval = this.degreesIntervalImpl.bind(this);
+ public readonly determinantInterval = this.determinantIntervalImpl.bind(this);
+ public readonly distanceInterval = this.distanceIntervalImpl.bind(this);
+ public readonly divisionInterval = this.divisionIntervalImpl.bind(this);
+ public readonly dotInterval = this.dotIntervalImpl.bind(this);
+ public readonly expInterval = this.expIntervalImpl.bind(this);
+ public readonly exp2Interval = this.exp2IntervalImpl.bind(this);
+ public readonly faceForwardIntervals = this.faceForwardIntervalsImpl.bind(this);
+ public readonly floorInterval = this.floorIntervalImpl.bind(this);
+ public readonly fmaInterval = this.fmaIntervalImpl.bind(this);
+ public readonly fractInterval = this.fractIntervalImpl.bind(this);
+ public readonly inverseSqrtInterval = this.inverseSqrtIntervalImpl.bind(this);
+ public readonly ldexpInterval = this.ldexpIntervalImpl.bind(this);
+ public readonly lengthInterval = this.lengthIntervalImpl.bind(this);
+ public readonly logInterval = this.logIntervalImpl.bind(this);
+ public readonly log2Interval = this.log2IntervalImpl.bind(this);
+ public readonly maxInterval = this.maxIntervalImpl.bind(this);
+ public readonly minInterval = this.minIntervalImpl.bind(this);
+ public readonly mixImpreciseInterval = this.mixImpreciseIntervalImpl.bind(this);
+ public readonly mixPreciseInterval = this.mixPreciseIntervalImpl.bind(this);
+ public readonly mixIntervals = [this.mixImpreciseInterval, this.mixPreciseInterval];
+ public readonly modfInterval = this.modfIntervalImpl.bind(this);
+ public readonly multiplicationInterval = this.multiplicationIntervalImpl.bind(this);
+ public readonly multiplicationMatrixMatrixInterval =
+ this.multiplicationMatrixMatrixIntervalImpl.bind(this);
+ public readonly multiplicationMatrixScalarInterval =
+ this.multiplicationMatrixScalarIntervalImpl.bind(this);
+ public readonly multiplicationScalarMatrixInterval =
+ this.multiplicationScalarMatrixIntervalImpl.bind(this);
+ public readonly multiplicationMatrixVectorInterval =
+ this.multiplicationMatrixVectorIntervalImpl.bind(this);
+ public readonly multiplicationVectorMatrixInterval =
+ this.multiplicationVectorMatrixIntervalImpl.bind(this);
+ public readonly negationInterval = this.negationIntervalImpl.bind(this);
+ public readonly normalizeInterval = this.normalizeIntervalImpl.bind(this);
+ public readonly powInterval = this.powIntervalImpl.bind(this);
+ public readonly radiansInterval = this.radiansIntervalImpl.bind(this);
+ public readonly reflectInterval = this.reflectIntervalImpl.bind(this);
+ public readonly refractInterval = this.refractIntervalImpl.bind(this);
+ public readonly remainderInterval = this.remainderIntervalImpl.bind(this);
+ public readonly roundInterval = this.roundIntervalImpl.bind(this);
+ public readonly saturateInterval = this.saturateIntervalImpl.bind(this);
+ public readonly signInterval = this.signIntervalImpl.bind(this);
+ public readonly sinInterval = this.sinIntervalImpl.bind(this);
+ public readonly sinhInterval = this.sinhIntervalImpl.bind(this);
+ public readonly smoothStepInterval = this.smoothStepIntervalImpl.bind(this);
+ public readonly sqrtInterval = this.sqrtIntervalImpl.bind(this);
+ public readonly stepInterval = this.stepIntervalImpl.bind(this);
+ public readonly subtractionInterval = this.subtractionIntervalImpl.bind(this);
+ public readonly subtractionMatrixMatrixInterval =
+ this.subtractionMatrixMatrixIntervalImpl.bind(this);
+ public readonly tanInterval = this.tanIntervalImpl.bind(this);
+ public readonly tanhInterval = this.tanhIntervalImpl.bind(this);
+ public readonly transposeInterval = this.transposeIntervalImpl.bind(this);
+ public readonly truncInterval = this.truncIntervalImpl.bind(this);
+
+ // Framework - Cases
+
+ // U32 -> Interval is used for testing f32 specific unpack* functions
+ /**
+ * @returns a Case for the param and the interval generator provided.
+ * The Case will use an interval comparator for matching results.
+ * @param param the param to pass in
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ private makeU32ToVectorCase(
+ param: number,
+ filter: IntervalFilter,
+ ...ops: ScalarToVector[]
+ ): Case | undefined {
+ param = Math.trunc(param);
+
+ const vectors = ops.map(o => o(param));
+ if (filter === 'finite' && vectors.some(v => !v.every(e => e.isFinite()))) {
+ return undefined;
+ }
+ return {
+ input: u32(param),
+ expected: anyOf(...vectors),
+ };
+ }
+
+ /**
+ * @returns an array of Cases for operations over a range of inputs
+ * @param params array of inputs to try
+ * @param filter what interval filtering to apply
+ * @param ops callbacks that implement generating an acceptance interval
+ */
+ public generateU32ToIntervalCases(
+ params: readonly number[],
+ filter: IntervalFilter,
+ ...ops: ScalarToVector[]
+ ): Case[] {
+ return params.reduce((cases, e) => {
+ const c = this.makeU32ToVectorCase(e, filter, ...ops);
+ if (c !== undefined) {
+ cases.push(c);
+ }
+ return cases;
+ }, new Array<Case>());
+ }
+
+ // Framework - API
+
+ private readonly QuantizeToF16IntervalOp: ScalarToIntervalOp = {
+ impl: (n: number): FPInterval => {
+ const rounded = correctlyRoundedF16(n);
+ const flushed = addFlushedIfNeededF16(rounded);
+ return this.spanIntervals(...flushed.map(f => this.toInterval(f)));
+ },
+ };
+
+ protected quantizeToF16IntervalImpl(n: number): FPInterval {
+ return this.runScalarToIntervalOp(this.toInterval(n), this.QuantizeToF16IntervalOp);
+ }
+
+ /** Calculate an acceptance interval of quantizeToF16(x) */
+ public readonly quantizeToF16Interval = this.quantizeToF16IntervalImpl.bind(this);
+
+ /**
+ * Once-allocated ArrayBuffer/views to avoid overhead of allocation when
+ * converting between numeric formats
+ *
+ * unpackData* is shared between all the unpack*Interval functions, so to
+ * avoid re-entrancy problems, they should not call each other or themselves
+ * directly or indirectly.
+ */
+ private readonly unpackData = new ArrayBuffer(4);
+ private readonly unpackDataU32 = new Uint32Array(this.unpackData);
+ private readonly unpackDataU16 = new Uint16Array(this.unpackData);
+ private readonly unpackDataU8 = new Uint8Array(this.unpackData);
+ private readonly unpackDataI16 = new Int16Array(this.unpackData);
+ private readonly unpackDataI8 = new Int8Array(this.unpackData);
+ private readonly unpackDataF16 = new Float16Array(this.unpackData);
+
+ private unpack2x16floatIntervalImpl(n: number): FPVector {
+ assert(
+ n >= kValue.u32.min && n <= kValue.u32.max,
+ 'unpack2x16floatInterval only accepts values on the bounds of u32'
+ );
+ this.unpackDataU32[0] = n;
+ if (this.unpackDataF16.some(f => !isFiniteF16(f))) {
+ return [this.constants().unboundedInterval, this.constants().unboundedInterval];
+ }
+
+ const result: FPVector = [
+ this.quantizeToF16Interval(this.unpackDataF16[0]),
+ this.quantizeToF16Interval(this.unpackDataF16[1]),
+ ];
+
+ if (result.some(r => !r.isFinite())) {
+ return [this.constants().unboundedInterval, this.constants().unboundedInterval];
+ }
+ return result;
+ }
+
+ /** Calculate an acceptance interval vector for unpack2x16float(x) */
+ public readonly unpack2x16floatInterval = this.unpack2x16floatIntervalImpl.bind(this);
+
+ private unpack2x16snormIntervalImpl(n: number): FPVector {
+ assert(
+ n >= kValue.u32.min && n <= kValue.u32.max,
+ 'unpack2x16snormInterval only accepts values on the bounds of u32'
+ );
+ const op = (n: number): FPInterval => {
+ return this.ulpInterval(Math.max(n / 32767, -1), 3);
+ };
+
+ this.unpackDataU32[0] = n;
+ return [op(this.unpackDataI16[0]), op(this.unpackDataI16[1])];
+ }
+
+ /** Calculate an acceptance interval vector for unpack2x16snorm(x) */
+ public readonly unpack2x16snormInterval = this.unpack2x16snormIntervalImpl.bind(this);
+
+ private unpack2x16unormIntervalImpl(n: number): FPVector {
+ assert(
+ n >= kValue.u32.min && n <= kValue.u32.max,
+ 'unpack2x16unormInterval only accepts values on the bounds of u32'
+ );
+ const op = (n: number): FPInterval => {
+ return this.ulpInterval(n / 65535, 3);
+ };
+
+ this.unpackDataU32[0] = n;
+ return [op(this.unpackDataU16[0]), op(this.unpackDataU16[1])];
+ }
+
+ /** Calculate an acceptance interval vector for unpack2x16unorm(x) */
+ public readonly unpack2x16unormInterval = this.unpack2x16unormIntervalImpl.bind(this);
+
+ private unpack4x8snormIntervalImpl(n: number): FPVector {
+ assert(
+ n >= kValue.u32.min && n <= kValue.u32.max,
+ 'unpack4x8snormInterval only accepts values on the bounds of u32'
+ );
+ const op = (n: number): FPInterval => {
+ return this.ulpInterval(Math.max(n / 127, -1), 3);
+ };
+ this.unpackDataU32[0] = n;
+ return [
+ op(this.unpackDataI8[0]),
+ op(this.unpackDataI8[1]),
+ op(this.unpackDataI8[2]),
+ op(this.unpackDataI8[3]),
+ ];
+ }
+
+ /** Calculate an acceptance interval vector for unpack4x8snorm(x) */
+ public readonly unpack4x8snormInterval = this.unpack4x8snormIntervalImpl.bind(this);
+
+ private unpack4x8unormIntervalImpl(n: number): FPVector {
+ assert(
+ n >= kValue.u32.min && n <= kValue.u32.max,
+ 'unpack4x8unormInterval only accepts values on the bounds of u32'
+ );
+ const op = (n: number): FPInterval => {
+ return this.ulpInterval(n / 255, 3);
+ };
+
+ this.unpackDataU32[0] = n;
+ return [
+ op(this.unpackDataU8[0]),
+ op(this.unpackDataU8[1]),
+ op(this.unpackDataU8[2]),
+ op(this.unpackDataU8[3]),
+ ];
+ }
+
+ /** Calculate an acceptance interval vector for unpack4x8unorm(x) */
+ public readonly unpack4x8unormInterval = this.unpack4x8unormIntervalImpl.bind(this);
+}
+
+// Need to separately allocate f32 traits, so they can be referenced by
+// FPAbstractTraits for forwarding.
+const kF32Traits = new F32Traits();
+
+// Pre-defined values that get used multiple times in _constants' initializers. Cannot use FPTraits members, since this
+// executes before they are defined.
+const kAbstractUnboundedInterval = new FPInterval(
+ 'abstract',
+ Number.NEGATIVE_INFINITY,
+ Number.POSITIVE_INFINITY
+);
+const kAbstractZeroInterval = new FPInterval('abstract', 0);
+
+// This is implementation is incomplete
+class FPAbstractTraits extends FPTraits {
+ private static _constants: FPConstants = {
+ positive: {
+ min: kValue.f64.positive.min,
+ max: kValue.f64.positive.max,
+ infinity: kValue.f64.positive.infinity,
+ nearest_max: kValue.f64.positive.nearest_max,
+ less_than_one: kValue.f64.positive.less_than_one,
+ subnormal: {
+ min: kValue.f64.positive.subnormal.min,
+ max: kValue.f64.positive.subnormal.max,
+ },
+ pi: {
+ whole: kValue.f64.positive.pi.whole,
+ three_quarters: kValue.f64.positive.pi.three_quarters,
+ half: kValue.f64.positive.pi.half,
+ third: kValue.f64.positive.pi.third,
+ quarter: kValue.f64.positive.pi.quarter,
+ sixth: kValue.f64.positive.pi.sixth,
+ },
+ e: kValue.f64.positive.e,
+ },
+ negative: {
+ min: kValue.f64.negative.min,
+ max: kValue.f64.negative.max,
+ infinity: kValue.f64.negative.infinity,
+ nearest_min: kValue.f64.negative.nearest_min,
+ less_than_one: kValue.f64.negative.less_than_one,
+ subnormal: {
+ min: kValue.f64.negative.subnormal.min,
+ max: kValue.f64.negative.subnormal.max,
+ },
+ pi: {
+ whole: kValue.f64.negative.pi.whole,
+ three_quarters: kValue.f64.negative.pi.three_quarters,
+ half: kValue.f64.negative.pi.half,
+ third: kValue.f64.negative.pi.third,
+ quarter: kValue.f64.negative.pi.quarter,
+ sixth: kValue.f64.negative.pi.sixth,
+ },
+ },
+ unboundedInterval: kAbstractUnboundedInterval,
+ zeroInterval: kAbstractZeroInterval,
+ // Have to use the constants.ts values here, because values defined in the
+ // initializer cannot be referenced in the initializer
+ negPiToPiInterval: new FPInterval(
+ 'abstract',
+ kValue.f64.negative.pi.whole,
+ kValue.f64.positive.pi.whole
+ ),
+ greaterThanZeroInterval: new FPInterval(
+ 'abstract',
+ kValue.f64.positive.subnormal.min,
+ kValue.f64.positive.max
+ ),
+ zeroVector: {
+ 2: [kAbstractZeroInterval, kAbstractZeroInterval],
+ 3: [kAbstractZeroInterval, kAbstractZeroInterval, kAbstractZeroInterval],
+ 4: [
+ kAbstractZeroInterval,
+ kAbstractZeroInterval,
+ kAbstractZeroInterval,
+ kAbstractZeroInterval,
+ ],
+ },
+ unboundedVector: {
+ 2: [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ 3: [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ 4: [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ },
+ unboundedMatrix: {
+ 2: {
+ 2: [
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ ],
+ 3: [
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ ],
+ 4: [
+ [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ ],
+ },
+ 3: {
+ 2: [
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ ],
+ 3: [
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ ],
+ 4: [
+ [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ ],
+ },
+ 4: {
+ 2: [
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ ],
+ 3: [
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ [kAbstractUnboundedInterval, kAbstractUnboundedInterval, kAbstractUnboundedInterval],
+ ],
+ 4: [
+ [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ [
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ kAbstractUnboundedInterval,
+ ],
+ ],
+ },
+ },
+ };
+
+ public constructor() {
+ super('abstract');
+ }
+
+ public constants(): FPConstants {
+ return FPAbstractTraits._constants;
+ }
+
+ // Utilities - Overrides
+ // number is represented as a f64 internally, so all number values are already
+ // quantized to f64
+ public readonly quantize = (n: number) => {
+ return n;
+ };
+ public readonly correctlyRounded = correctlyRoundedF64;
+ public readonly isFinite = Number.isFinite;
+ public readonly isSubnormal = isSubnormalNumberF64;
+ public readonly flushSubnormal = flushSubnormalNumberF64;
+ public readonly oneULP = (_target: number, _mode: FlushMode = 'flush'): number => {
+ unreachable(`'FPAbstractTraits.oneULP should never be called`);
+ };
+ public readonly scalarBuilder = abstractFloat;
+
+ // Framework - Fundamental Error Intervals - Overrides
+ public readonly absoluteErrorInterval = this.unboundedAbsoluteErrorInterval.bind(this);
+ public readonly correctlyRoundedInterval = this.correctlyRoundedIntervalImpl.bind(this);
+ public readonly correctlyRoundedMatrix = this.correctlyRoundedMatrixImpl.bind(this);
+ public readonly ulpInterval = (n: number, numULP: number): FPInterval => {
+ return this.toInterval(kF32Traits.ulpInterval(n, numULP));
+ };
+
+ // Framework - API - Overrides
+ public readonly absInterval = this.absIntervalImpl.bind(this);
+ public readonly acosInterval = this.unimplementedScalarToInterval.bind(this, 'acosInterval');
+ public readonly acoshAlternativeInterval = this.unimplementedScalarToInterval.bind(
+ this,
+ 'acoshAlternativeInterval'
+ );
+ public readonly acoshPrimaryInterval = this.unimplementedScalarToInterval.bind(
+ this,
+ 'acoshPrimaryInterval'
+ );
+ public readonly acoshIntervals = [this.acoshAlternativeInterval, this.acoshPrimaryInterval];
+ public readonly additionInterval = this.additionIntervalImpl.bind(this);
+ public readonly additionMatrixMatrixInterval = this.additionMatrixMatrixIntervalImpl.bind(this);
+ public readonly asinInterval = this.unimplementedScalarToInterval.bind(this, 'asinInterval');
+ public readonly asinhInterval = this.unimplementedScalarToInterval.bind(this, 'asinhInterval');
+ public readonly atanInterval = this.unimplementedScalarToInterval.bind(this, 'atanInterval');
+ public readonly atan2Interval = this.unimplementedScalarPairToInterval.bind(
+ this,
+ 'atan2Interval'
+ );
+ public readonly atanhInterval = this.unimplementedScalarToInterval.bind(this, 'atanhInterval');
+ public readonly ceilInterval = this.unimplementedScalarToInterval.bind(this, 'ceilInterval');
+ public readonly clampMedianInterval = this.clampMedianIntervalImpl.bind(this);
+ public readonly clampMinMaxInterval = this.clampMinMaxIntervalImpl.bind(this);
+ public readonly clampIntervals = [this.clampMedianInterval, this.clampMinMaxInterval];
+ public readonly cosInterval = this.unimplementedScalarToInterval.bind(this, 'cosInterval');
+ public readonly coshInterval = this.unimplementedScalarToInterval.bind(this, 'coshInterval');
+ public readonly crossInterval = this.crossIntervalImpl.bind(this);
+ public readonly degreesInterval = this.degreesIntervalImpl.bind(this);
+ public readonly determinantInterval = this.unimplementedMatrixToInterval.bind(
+ this,
+ 'determinantInterval'
+ );
+ public readonly distanceInterval = this.unimplementedDistance.bind(this);
+ public readonly divisionInterval = (
+ x: number | FPInterval,
+ y: number | FPInterval
+ ): FPInterval => {
+ return this.toInterval(kF32Traits.divisionInterval(x, y));
+ };
+ public readonly dotInterval = this.unimplementedVectorPairToInterval.bind(this, 'dotInterval');
+ public readonly expInterval = this.unimplementedScalarToInterval.bind(this, 'expInterval');
+ public readonly exp2Interval = this.unimplementedScalarToInterval.bind(this, 'exp2Interval');
+ public readonly faceForwardIntervals = this.unimplementedFaceForward.bind(this);
+ public readonly floorInterval = this.floorIntervalImpl.bind(this);
+ public readonly fmaInterval = this.fmaIntervalImpl.bind(this);
+ public readonly fractInterval = this.unimplementedScalarToInterval.bind(this, 'fractInterval');
+ public readonly inverseSqrtInterval = this.unimplementedScalarToInterval.bind(
+ this,
+ 'inverseSqrtInterval'
+ );
+ public readonly ldexpInterval = this.unimplementedScalarPairToInterval.bind(
+ this,
+ 'ldexpInterval'
+ );
+ public readonly lengthInterval = this.unimplementedLength.bind(this);
+ public readonly logInterval = this.unimplementedScalarToInterval.bind(this, 'logInterval');
+ public readonly log2Interval = this.unimplementedScalarToInterval.bind(this, 'log2Interval');
+ public readonly maxInterval = this.maxIntervalImpl.bind(this);
+ public readonly minInterval = this.minIntervalImpl.bind(this);
+ public readonly mixImpreciseInterval = this.unimplementedScalarTripleToInterval.bind(
+ this,
+ 'mixImpreciseInterval'
+ );
+ public readonly mixPreciseInterval = this.unimplementedScalarTripleToInterval.bind(
+ this,
+ 'mixPreciseInterval'
+ );
+ public readonly mixIntervals = [this.mixImpreciseInterval, this.mixPreciseInterval];
+ public readonly modfInterval = this.modfIntervalImpl.bind(this);
+ public readonly multiplicationInterval = this.multiplicationIntervalImpl.bind(this);
+ public readonly multiplicationMatrixMatrixInterval = this.unimplementedMatrixPairToMatrix.bind(
+ this,
+ 'multiplicationMatrixMatrixInterval'
+ );
+ public readonly multiplicationMatrixScalarInterval = this.unimplementedMatrixScalarToMatrix.bind(
+ this,
+ 'multiplicationMatrixScalarInterval'
+ );
+ public readonly multiplicationScalarMatrixInterval = this.unimplementedScalarMatrixToMatrix.bind(
+ this,
+ 'multiplicationScalarMatrixInterval'
+ );
+ public readonly multiplicationMatrixVectorInterval = this.unimplementedMatrixVectorToVector.bind(
+ this,
+ 'multiplicationMatrixVectorInterval'
+ );
+ public readonly multiplicationVectorMatrixInterval = this.unimplementedVectorMatrixToVector.bind(
+ this,
+ 'multiplicationVectorMatrixInterval'
+ );
+ public readonly negationInterval = this.negationIntervalImpl.bind(this);
+ public readonly normalizeInterval = this.unimplementedVectorToVector.bind(
+ this,
+ 'normalizeInterval'
+ );
+ public readonly powInterval = this.unimplementedScalarPairToInterval.bind(this, 'powInterval');
+ public readonly radiansInterval = this.radiansIntervalImpl.bind(this);
+ public readonly reflectInterval = this.unimplementedVectorPairToVector.bind(
+ this,
+ 'reflectInterval'
+ );
+ public readonly refractInterval = this.unimplementedRefract.bind(this);
+ public readonly remainderInterval = (x: number, y: number): FPInterval => {
+ return this.toInterval(kF32Traits.remainderInterval(x, y));
+ };
+ public readonly roundInterval = this.unimplementedScalarToInterval.bind(this, 'roundInterval');
+ public readonly saturateInterval = this.saturateIntervalImpl.bind(this);
+ public readonly signInterval = this.signIntervalImpl.bind(this);
+ public readonly sinInterval = this.unimplementedScalarToInterval.bind(this, 'sinInterval');
+ public readonly sinhInterval = this.unimplementedScalarToInterval.bind(this, 'sinhInterval');
+ public readonly smoothStepInterval = this.unimplementedScalarTripleToInterval.bind(
+ this,
+ 'smoothStepInterval'
+ );
+ public readonly sqrtInterval = this.unimplementedScalarToInterval.bind(this, 'sqrtInterval');
+ public readonly stepInterval = this.unimplementedScalarPairToInterval.bind(this, 'stepInterval');
+ public readonly subtractionInterval = this.subtractionIntervalImpl.bind(this);
+ public readonly subtractionMatrixMatrixInterval =
+ this.subtractionMatrixMatrixIntervalImpl.bind(this);
+ public readonly tanInterval = this.unimplementedScalarToInterval.bind(this, 'tanInterval');
+ public readonly tanhInterval = this.unimplementedScalarToInterval.bind(this, 'tanhInterval');
+ public readonly transposeInterval = this.transposeIntervalImpl.bind(this);
+ public readonly truncInterval = this.truncIntervalImpl.bind(this);
+}
+
+// Pre-defined values that get used multiple times in _constants' initializers. Cannot use FPTraits members, since this
+// executes before they are defined.
+const kF16UnboundedInterval = new FPInterval(
+ 'f16',
+ Number.NEGATIVE_INFINITY,
+ Number.POSITIVE_INFINITY
+);
+const kF16ZeroInterval = new FPInterval('f16', 0);
+
+// This is implementation is incomplete
+class F16Traits extends FPTraits {
+ private static _constants: FPConstants = {
+ positive: {
+ min: kValue.f16.positive.min,
+ max: kValue.f16.positive.max,
+ infinity: kValue.f16.positive.infinity,
+ nearest_max: kValue.f16.positive.nearest_max,
+ less_than_one: kValue.f16.positive.less_than_one,
+ subnormal: {
+ min: kValue.f16.positive.subnormal.min,
+ max: kValue.f16.positive.subnormal.max,
+ },
+ pi: {
+ whole: kValue.f16.positive.pi.whole,
+ three_quarters: kValue.f16.positive.pi.three_quarters,
+ half: kValue.f16.positive.pi.half,
+ third: kValue.f16.positive.pi.third,
+ quarter: kValue.f16.positive.pi.quarter,
+ sixth: kValue.f16.positive.pi.sixth,
+ },
+ e: kValue.f16.positive.e,
+ },
+ negative: {
+ min: kValue.f16.negative.min,
+ max: kValue.f16.negative.max,
+ infinity: kValue.f16.negative.infinity,
+ nearest_min: kValue.f16.negative.nearest_min,
+ less_than_one: kValue.f16.negative.less_than_one,
+ subnormal: {
+ min: kValue.f16.negative.subnormal.min,
+ max: kValue.f16.negative.subnormal.max,
+ },
+ pi: {
+ whole: kValue.f16.negative.pi.whole,
+ three_quarters: kValue.f16.negative.pi.three_quarters,
+ half: kValue.f16.negative.pi.half,
+ third: kValue.f16.negative.pi.third,
+ quarter: kValue.f16.negative.pi.quarter,
+ sixth: kValue.f16.negative.pi.sixth,
+ },
+ },
+ unboundedInterval: kF16UnboundedInterval,
+ zeroInterval: kF16ZeroInterval,
+ // Have to use the constants.ts values here, because values defined in the
+ // initializer cannot be referenced in the initializer
+ negPiToPiInterval: new FPInterval(
+ 'f16',
+ kValue.f16.negative.pi.whole,
+ kValue.f16.positive.pi.whole
+ ),
+ greaterThanZeroInterval: new FPInterval(
+ 'f16',
+ kValue.f16.positive.subnormal.min,
+ kValue.f16.positive.max
+ ),
+ zeroVector: {
+ 2: [kF16ZeroInterval, kF16ZeroInterval],
+ 3: [kF16ZeroInterval, kF16ZeroInterval, kF16ZeroInterval],
+ 4: [kF16ZeroInterval, kF16ZeroInterval, kF16ZeroInterval, kF16ZeroInterval],
+ },
+ unboundedVector: {
+ 2: [kF16UnboundedInterval, kF16UnboundedInterval],
+ 3: [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ 4: [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ },
+ unboundedMatrix: {
+ 2: {
+ 2: [
+ [kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval],
+ ],
+ 3: [
+ [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ ],
+ 4: [
+ [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ ],
+ },
+ 3: {
+ 2: [
+ [kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval],
+ ],
+ 3: [
+ [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ ],
+ 4: [
+ [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ ],
+ },
+ 4: {
+ 2: [
+ [kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval],
+ ],
+ 3: [
+ [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ [kF16UnboundedInterval, kF16UnboundedInterval, kF16UnboundedInterval],
+ ],
+ 4: [
+ [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ [
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ kF16UnboundedInterval,
+ ],
+ ],
+ },
+ },
+ };
+
+ public constructor() {
+ super('f16');
+ }
+
+ public constants(): FPConstants {
+ return F16Traits._constants;
+ }
+
+ // Utilities - Overrides
+ public readonly quantize = quantizeToF16;
+ public readonly correctlyRounded = correctlyRoundedF16;
+ public readonly isFinite = isFiniteF16;
+ public readonly isSubnormal = isSubnormalNumberF16;
+ public readonly flushSubnormal = flushSubnormalNumberF16;
+ public readonly oneULP = oneULPF16;
+ public readonly scalarBuilder = f16;
+
+ // Framework - Fundamental Error Intervals - Overrides
+ public readonly absoluteErrorInterval = this.absoluteErrorIntervalImpl.bind(this);
+ public readonly correctlyRoundedInterval = this.correctlyRoundedIntervalImpl.bind(this);
+ public readonly correctlyRoundedMatrix = this.correctlyRoundedMatrixImpl.bind(this);
+ public readonly ulpInterval = this.ulpIntervalImpl.bind(this);
+
+ // Framework - API - Overrides
+ public readonly absInterval = this.absIntervalImpl.bind(this);
+ public readonly acosInterval = this.acosIntervalImpl.bind(this);
+ public readonly acoshAlternativeInterval = this.acoshAlternativeIntervalImpl.bind(this);
+ public readonly acoshPrimaryInterval = this.acoshPrimaryIntervalImpl.bind(this);
+ public readonly acoshIntervals = [this.acoshAlternativeInterval, this.acoshPrimaryInterval];
+ public readonly additionInterval = this.additionIntervalImpl.bind(this);
+ public readonly additionMatrixMatrixInterval = this.additionMatrixMatrixIntervalImpl.bind(this);
+ public readonly asinInterval = this.asinIntervalImpl.bind(this);
+ public readonly asinhInterval = this.asinhIntervalImpl.bind(this);
+ public readonly atanInterval = this.atanIntervalImpl.bind(this);
+ public readonly atan2Interval = this.atan2IntervalImpl.bind(this);
+ public readonly atanhInterval = this.atanhIntervalImpl.bind(this);
+ public readonly ceilInterval = this.ceilIntervalImpl.bind(this);
+ public readonly clampMedianInterval = this.clampMedianIntervalImpl.bind(this);
+ public readonly clampMinMaxInterval = this.clampMinMaxIntervalImpl.bind(this);
+ public readonly clampIntervals = [this.clampMedianInterval, this.clampMinMaxInterval];
+ public readonly cosInterval = this.cosIntervalImpl.bind(this);
+ public readonly coshInterval = this.coshIntervalImpl.bind(this);
+ public readonly crossInterval = this.crossIntervalImpl.bind(this);
+ public readonly degreesInterval = this.degreesIntervalImpl.bind(this);
+ public readonly determinantInterval = this.determinantIntervalImpl.bind(this);
+ public readonly distanceInterval = this.distanceIntervalImpl.bind(this);
+ public readonly divisionInterval = this.divisionIntervalImpl.bind(this);
+ public readonly dotInterval = this.dotIntervalImpl.bind(this);
+ public readonly expInterval = this.expIntervalImpl.bind(this);
+ public readonly exp2Interval = this.exp2IntervalImpl.bind(this);
+ public readonly faceForwardIntervals = this.faceForwardIntervalsImpl.bind(this);
+ public readonly floorInterval = this.floorIntervalImpl.bind(this);
+ public readonly fmaInterval = this.fmaIntervalImpl.bind(this);
+ public readonly fractInterval = this.fractIntervalImpl.bind(this);
+ public readonly inverseSqrtInterval = this.inverseSqrtIntervalImpl.bind(this);
+ public readonly ldexpInterval = this.ldexpIntervalImpl.bind(this);
+ public readonly lengthInterval = this.lengthIntervalImpl.bind(this);
+ public readonly logInterval = this.logIntervalImpl.bind(this);
+ public readonly log2Interval = this.log2IntervalImpl.bind(this);
+ public readonly maxInterval = this.maxIntervalImpl.bind(this);
+ public readonly minInterval = this.minIntervalImpl.bind(this);
+ public readonly mixImpreciseInterval = this.mixImpreciseIntervalImpl.bind(this);
+ public readonly mixPreciseInterval = this.mixPreciseIntervalImpl.bind(this);
+ public readonly mixIntervals = [this.mixImpreciseInterval, this.mixPreciseInterval];
+ public readonly modfInterval = this.modfIntervalImpl.bind(this);
+ public readonly multiplicationInterval = this.multiplicationIntervalImpl.bind(this);
+ public readonly multiplicationMatrixMatrixInterval =
+ this.multiplicationMatrixMatrixIntervalImpl.bind(this);
+ public readonly multiplicationMatrixScalarInterval =
+ this.multiplicationMatrixScalarIntervalImpl.bind(this);
+ public readonly multiplicationScalarMatrixInterval =
+ this.multiplicationScalarMatrixIntervalImpl.bind(this);
+ public readonly multiplicationMatrixVectorInterval =
+ this.multiplicationMatrixVectorIntervalImpl.bind(this);
+ public readonly multiplicationVectorMatrixInterval =
+ this.multiplicationVectorMatrixIntervalImpl.bind(this);
+ public readonly negationInterval = this.negationIntervalImpl.bind(this);
+ public readonly normalizeInterval = this.normalizeIntervalImpl.bind(this);
+ public readonly powInterval = this.powIntervalImpl.bind(this);
+ public readonly radiansInterval = this.radiansIntervalImpl.bind(this);
+ public readonly reflectInterval = this.reflectIntervalImpl.bind(this);
+ public readonly refractInterval = this.refractIntervalImpl.bind(this);
+ public readonly remainderInterval = this.remainderIntervalImpl.bind(this);
+ public readonly roundInterval = this.roundIntervalImpl.bind(this);
+ public readonly saturateInterval = this.saturateIntervalImpl.bind(this);
+ public readonly signInterval = this.signIntervalImpl.bind(this);
+ public readonly sinInterval = this.sinIntervalImpl.bind(this);
+ public readonly sinhInterval = this.sinhIntervalImpl.bind(this);
+ public readonly smoothStepInterval = this.smoothStepIntervalImpl.bind(this);
+ public readonly sqrtInterval = this.sqrtIntervalImpl.bind(this);
+ public readonly stepInterval = this.stepIntervalImpl.bind(this);
+ public readonly subtractionInterval = this.subtractionIntervalImpl.bind(this);
+ public readonly subtractionMatrixMatrixInterval =
+ this.subtractionMatrixMatrixIntervalImpl.bind(this);
+ public readonly tanInterval = this.tanIntervalImpl.bind(this);
+ public readonly tanhInterval = this.tanhIntervalImpl.bind(this);
+ public readonly transposeInterval = this.transposeIntervalImpl.bind(this);
+ public readonly truncInterval = this.truncIntervalImpl.bind(this);
+}
+
+export const FP = {
+ f32: kF32Traits,
+ f16: new F16Traits(),
+ abstract: new FPAbstractTraits(),
+};
+
+/** @returns the floating-point traits for `type` */
+export function fpTraitsFor(type: ScalarType): FPTraits {
+ switch (type.kind) {
+ case 'abstract-float':
+ return FP.abstract;
+ case 'f32':
+ return FP.f32;
+ case 'f16':
+ return FP.f16;
+ default:
+ unreachable(`unsupported type: ${type}`);
+ }
+}
+
+/** @returns true if the value `value` is representable with `type` */
+export function isRepresentable(value: number, type: ScalarType) {
+ if (!Number.isFinite(value)) {
+ return false;
+ }
+ if (isFloatType(type)) {
+ const constants = fpTraitsFor(type).constants();
+ return value >= constants.negative.min && value <= constants.positive.max;
+ }
+ assert(false, `isRepresentable() is not yet implemented for type ${type}`);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/math.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/math.ts
new file mode 100644
index 0000000000..851db40c71
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/math.ts
@@ -0,0 +1,2247 @@
+import { ROArrayArray, ROArrayArrayArray } from '../../common/util/types.js';
+import { assert } from '../../common/util/util.js';
+import {
+ Float16Array,
+ getFloat16,
+ hfround,
+ setFloat16,
+} from '../../external/petamoriken/float16/float16.js';
+
+import { kBit, kValue } from './constants.js';
+import {
+ reinterpretF64AsU64,
+ reinterpretU64AsF64,
+ reinterpretU32AsF32,
+ reinterpretU16AsF16,
+} from './reinterpret.js';
+
+/**
+ * A multiple of 8 guaranteed to be way too large to allocate (just under 8 pebibytes).
+ * This is a "safe" integer (ULP <= 1.0) very close to MAX_SAFE_INTEGER.
+ *
+ * Note: allocations of this size are likely to exceed limitations other than just the system's
+ * physical memory, so test cases are also needed to try to trigger "true" OOM.
+ */
+export const kMaxSafeMultipleOf8 = Number.MAX_SAFE_INTEGER - 7;
+
+/** Round `n` up to the next multiple of `alignment` (inclusive). */
+// MAINTENANCE_TODO: Rename to `roundUp`
+export function align(n: number, alignment: number): number {
+ assert(Number.isInteger(n) && n >= 0, 'n must be a non-negative integer');
+ assert(Number.isInteger(alignment) && alignment > 0, 'alignment must be a positive integer');
+ return Math.ceil(n / alignment) * alignment;
+}
+
+/** Round `n` down to the next multiple of `alignment` (inclusive). */
+export function roundDown(n: number, alignment: number): number {
+ assert(Number.isInteger(n) && n >= 0, 'n must be a non-negative integer');
+ assert(Number.isInteger(alignment) && alignment > 0, 'alignment must be a positive integer');
+ return Math.floor(n / alignment) * alignment;
+}
+
+/** Clamp a number to the provided range. */
+export function clamp(n: number, { min, max }: { min: number; max: number }): number {
+ assert(max >= min);
+ return Math.min(Math.max(n, min), max);
+}
+
+/** @returns 0 if |val| is a subnormal f64 number, otherwise returns |val| */
+export function flushSubnormalNumberF64(val: number): number {
+ return isSubnormalNumberF64(val) ? 0 : val;
+}
+
+/** @returns if number is within subnormal range of f64 */
+export function isSubnormalNumberF64(n: number): boolean {
+ return n > kValue.f64.negative.max && n < kValue.f64.positive.min;
+}
+
+/** @returns 0 if |val| is a subnormal f32 number, otherwise returns |val| */
+export function flushSubnormalNumberF32(val: number): number {
+ return isSubnormalNumberF32(val) ? 0 : val;
+}
+
+/** @returns if number is within subnormal range of f32 */
+export function isSubnormalNumberF32(n: number): boolean {
+ return n > kValue.f32.negative.max && n < kValue.f32.positive.min;
+}
+
+/** @returns if number is in the finite range of f32 */
+export function isFiniteF32(n: number) {
+ return n >= kValue.f32.negative.min && n <= kValue.f32.positive.max;
+}
+
+/** @returns 0 if |val| is a subnormal f16 number, otherwise returns |val| */
+export function flushSubnormalNumberF16(val: number): number {
+ return isSubnormalNumberF16(val) ? 0 : val;
+}
+
+/** @returns if number is within subnormal range of f16 */
+export function isSubnormalNumberF16(n: number): boolean {
+ return n > kValue.f16.negative.max && n < kValue.f16.positive.min;
+}
+
+/** @returns if number is in the finite range of f16 */
+export function isFiniteF16(n: number) {
+ return n >= kValue.f16.negative.min && n <= kValue.f16.positive.max;
+}
+
+/** Should FTZ occur during calculations or not */
+export type FlushMode = 'flush' | 'no-flush';
+
+/** Should nextAfter calculate towards positive infinity or negative infinity */
+export type NextDirection = 'positive' | 'negative';
+
+/**
+ * Once-allocated ArrayBuffer/views to avoid overhead of allocation when
+ * converting between numeric formats
+ *
+ * Usage of a once-allocated pattern like this makes nextAfterF64 non-reentrant,
+ * so cannot call itself directly or indirectly.
+ */
+const nextAfterF64Data = new ArrayBuffer(8);
+const nextAfterF64Int = new BigUint64Array(nextAfterF64Data);
+const nextAfterF64Float = new Float64Array(nextAfterF64Data);
+
+/**
+ * @returns the next f64 value after |val|, towards +inf or -inf as specified by |dir|.
+
+ * If |mode| is 'flush', all subnormal values will be flushed to 0,
+ * before processing and for -/+0 the nextAfterF64 will be the closest normal in
+ * the correct direction.
+
+ * If |mode| is 'no-flush', the next subnormal will be calculated when appropriate,
+ * and for -/+0 the nextAfterF64 will be the closest subnormal in the correct
+ * direction.
+ *
+ * val needs to be in [min f64, max f64]
+ */
+export function nextAfterF64(val: number, dir: NextDirection, mode: FlushMode): number {
+ if (Number.isNaN(val)) {
+ return val;
+ }
+
+ if (val === Number.POSITIVE_INFINITY) {
+ return kValue.f64.positive.infinity;
+ }
+
+ if (val === Number.NEGATIVE_INFINITY) {
+ return kValue.f64.negative.infinity;
+ }
+
+ assert(
+ val <= kValue.f64.positive.max && val >= kValue.f64.negative.min,
+ `${val} is not in the range of f64`
+ );
+
+ val = mode === 'flush' ? flushSubnormalNumberF64(val) : val;
+
+ // -/+0 === 0 returns true
+ if (val === 0) {
+ if (dir === 'positive') {
+ return mode === 'flush' ? kValue.f64.positive.min : kValue.f64.positive.subnormal.min;
+ } else {
+ return mode === 'flush' ? kValue.f64.negative.max : kValue.f64.negative.subnormal.max;
+ }
+ }
+
+ nextAfterF64Float[0] = val;
+ const is_positive = (nextAfterF64Int[0] & 0x8000_0000_0000_0000n) === 0n;
+ if (is_positive === (dir === 'positive')) {
+ nextAfterF64Int[0] += 1n;
+ } else {
+ nextAfterF64Int[0] -= 1n;
+ }
+
+ // Checking for overflow
+ if ((nextAfterF64Int[0] & 0x7ff0_0000_0000_0000n) === 0x7ff0_0000_0000_0000n) {
+ if (dir === 'positive') {
+ return kValue.f64.positive.infinity;
+ } else {
+ return kValue.f64.negative.infinity;
+ }
+ }
+
+ return mode === 'flush' ? flushSubnormalNumberF64(nextAfterF64Float[0]) : nextAfterF64Float[0];
+}
+
+/**
+ * Once-allocated ArrayBuffer/views to avoid overhead of allocation when
+ * converting between numeric formats
+ *
+ * Usage of a once-allocated pattern like this makes nextAfterF32 non-reentrant,
+ * so cannot call itself directly or indirectly.
+ */
+const nextAfterF32Data = new ArrayBuffer(4);
+const nextAfterF32Int = new Uint32Array(nextAfterF32Data);
+const nextAfterF32Float = new Float32Array(nextAfterF32Data);
+
+/**
+ * @returns the next f32 value after |val|, towards +inf or -inf as specified by |dir|.
+
+ * If |mode| is 'flush', all subnormal values will be flushed to 0,
+ * before processing and for -/+0 the nextAfterF32 will be the closest normal in
+ * the correct direction.
+
+ * If |mode| is 'no-flush', the next subnormal will be calculated when appropriate,
+ * and for -/+0 the nextAfterF32 will be the closest subnormal in the correct
+ * direction.
+ *
+ * val needs to be in [min f32, max f32]
+ */
+export function nextAfterF32(val: number, dir: NextDirection, mode: FlushMode): number {
+ if (Number.isNaN(val)) {
+ return val;
+ }
+
+ if (val === Number.POSITIVE_INFINITY) {
+ return kValue.f32.positive.infinity;
+ }
+
+ if (val === Number.NEGATIVE_INFINITY) {
+ return kValue.f32.negative.infinity;
+ }
+
+ assert(
+ val <= kValue.f32.positive.max && val >= kValue.f32.negative.min,
+ `${val} is not in the range of f32`
+ );
+
+ val = mode === 'flush' ? flushSubnormalNumberF32(val) : val;
+
+ // -/+0 === 0 returns true
+ if (val === 0) {
+ if (dir === 'positive') {
+ return mode === 'flush' ? kValue.f32.positive.min : kValue.f32.positive.subnormal.min;
+ } else {
+ return mode === 'flush' ? kValue.f32.negative.max : kValue.f32.negative.subnormal.max;
+ }
+ }
+
+ nextAfterF32Float[0] = val; // This quantizes from number (f64) to f32
+ if (
+ (dir === 'positive' && nextAfterF32Float[0] <= val) ||
+ (dir === 'negative' && nextAfterF32Float[0] >= val)
+ ) {
+ // val is either f32 precise or quantizing rounded in the opposite direction
+ // from what is needed, so need to calculate the value in the correct
+ // direction.
+ const is_positive = (nextAfterF32Int[0] & 0x80000000) === 0;
+ if (is_positive === (dir === 'positive')) {
+ nextAfterF32Int[0] += 1;
+ } else {
+ nextAfterF32Int[0] -= 1;
+ }
+ }
+
+ // Checking for overflow
+ if ((nextAfterF32Int[0] & 0x7f800000) === 0x7f800000) {
+ if (dir === 'positive') {
+ return kValue.f32.positive.infinity;
+ } else {
+ return kValue.f32.negative.infinity;
+ }
+ }
+
+ return mode === 'flush' ? flushSubnormalNumberF32(nextAfterF32Float[0]) : nextAfterF32Float[0];
+}
+
+/**
+ * Once-allocated ArrayBuffer/views to avoid overhead of allocation when
+ * converting between numeric formats
+ *
+ * Usage of a once-allocated pattern like this makes nextAfterF16 non-reentrant,
+ * so cannot call itself directly or indirectly.
+ */
+const nextAfterF16Data = new ArrayBuffer(2);
+const nextAfterF16Hex = new Uint16Array(nextAfterF16Data);
+const nextAfterF16Float = new Float16Array(nextAfterF16Data);
+
+/**
+ * @returns the next f16 value after |val|, towards +inf or -inf as specified by |dir|.
+
+ * If |mode| is 'flush', all subnormal values will be flushed to 0,
+ * before processing and for -/+0 the nextAfterF16 will be the closest normal in
+ * the correct direction.
+
+ * If |mode| is 'no-flush', the next subnormal will be calculated when appropriate,
+ * and for -/+0 the nextAfterF16 will be the closest subnormal in the correct
+ * direction.
+ *
+ * val needs to be in [min f16, max f16]
+ */
+export function nextAfterF16(val: number, dir: NextDirection, mode: FlushMode): number {
+ if (Number.isNaN(val)) {
+ return val;
+ }
+
+ if (val === Number.POSITIVE_INFINITY) {
+ return kValue.f16.positive.infinity;
+ }
+
+ if (val === Number.NEGATIVE_INFINITY) {
+ return kValue.f16.negative.infinity;
+ }
+
+ assert(
+ val <= kValue.f16.positive.max && val >= kValue.f16.negative.min,
+ `${val} is not in the range of f16`
+ );
+
+ val = mode === 'flush' ? flushSubnormalNumberF16(val) : val;
+
+ // -/+0 === 0 returns true
+ if (val === 0) {
+ if (dir === 'positive') {
+ return mode === 'flush' ? kValue.f16.positive.min : kValue.f16.positive.subnormal.min;
+ } else {
+ return mode === 'flush' ? kValue.f16.negative.max : kValue.f16.negative.subnormal.max;
+ }
+ }
+
+ nextAfterF16Float[0] = val; // This quantizes from number (f64) to f16
+ if (
+ (dir === 'positive' && nextAfterF16Float[0] <= val) ||
+ (dir === 'negative' && nextAfterF16Float[0] >= val)
+ ) {
+ // val is either f16 precise or quantizing rounded in the opposite direction
+ // from what is needed, so need to calculate the value in the correct
+ // direction.
+ const is_positive = (nextAfterF16Hex[0] & 0x8000) === 0;
+ if (is_positive === (dir === 'positive')) {
+ nextAfterF16Hex[0] += 1;
+ } else {
+ nextAfterF16Hex[0] -= 1;
+ }
+ }
+
+ // Checking for overflow
+ if ((nextAfterF16Hex[0] & 0x7c00) === 0x7c00) {
+ if (dir === 'positive') {
+ return kValue.f16.positive.infinity;
+ } else {
+ return kValue.f16.negative.infinity;
+ }
+ }
+
+ return mode === 'flush' ? flushSubnormalNumberF16(nextAfterF16Float[0]) : nextAfterF16Float[0];
+}
+
+/**
+ * @returns ulp(x), the unit of least precision for a specific number as a 64-bit float
+ *
+ * ulp(x) is the distance between the two floating point numbers nearest x.
+ * This value is also called unit of last place, ULP, and 1 ULP.
+ * See the WGSL spec and http://www.ens-lyon.fr/LIP/Pub/Rapports/RR/RR2005/RR2005-09.pdf
+ * for a more detailed/nuanced discussion of the definition of ulp(x).
+ *
+ * @param target number to calculate ULP for
+ * @param mode should FTZ occurring during calculation or not
+ */
+export function oneULPF64(target: number, mode: FlushMode = 'flush'): number {
+ if (Number.isNaN(target)) {
+ return Number.NaN;
+ }
+
+ target = mode === 'flush' ? flushSubnormalNumberF64(target) : target;
+
+ // For values out of bounds for f64 ulp(x) is defined as the
+ // distance between the two nearest f64 representable numbers to the
+ // appropriate edge, which also happens to be the maximum possible ULP.
+ if (
+ target === Number.POSITIVE_INFINITY ||
+ target >= kValue.f64.positive.max ||
+ target === Number.NEGATIVE_INFINITY ||
+ target <= kValue.f64.negative.min
+ ) {
+ return kValue.f64.max_ulp;
+ }
+
+ // ulp(x) is min(after - before), where
+ // before <= x <= after
+ // before =/= after
+ // before and after are f64 representable
+ const before = nextAfterF64(target, 'negative', mode);
+ const after = nextAfterF64(target, 'positive', mode);
+ // Since number is internally a f64, |target| is always f64 representable, so
+ // either before or after will be x
+ return Math.min(target - before, after - target);
+}
+
+/**
+ * @returns ulp(x), the unit of least precision for a specific number as a 32-bit float
+ *
+ * ulp(x) is the distance between the two floating point numbers nearest x.
+ * This value is also called unit of last place, ULP, and 1 ULP.
+ * See the WGSL spec and http://www.ens-lyon.fr/LIP/Pub/Rapports/RR/RR2005/RR2005-09.pdf
+ * for a more detailed/nuanced discussion of the definition of ulp(x).
+ *
+ * @param target number to calculate ULP for
+ * @param mode should FTZ occurring during calculation or not
+ */
+export function oneULPF32(target: number, mode: FlushMode = 'flush'): number {
+ if (Number.isNaN(target)) {
+ return Number.NaN;
+ }
+
+ target = mode === 'flush' ? flushSubnormalNumberF32(target) : target;
+
+ // For values out of bounds for f32 ulp(x) is defined as the
+ // distance between the two nearest f32 representable numbers to the
+ // appropriate edge, which also happens to be the maximum possible ULP.
+ if (
+ target === Number.POSITIVE_INFINITY ||
+ target >= kValue.f32.positive.max ||
+ target === Number.NEGATIVE_INFINITY ||
+ target <= kValue.f32.negative.min
+ ) {
+ return kValue.f32.max_ulp;
+ }
+
+ // ulp(x) is min(after - before), where
+ // before <= x <= after
+ // before =/= after
+ // before and after are f32 representable
+ const before = nextAfterF32(target, 'negative', mode);
+ const after = nextAfterF32(target, 'positive', mode);
+ const converted: number = quantizeToF32(target);
+ if (converted === target) {
+ // |target| is f32 representable, so either before or after will be x
+ return Math.min(target - before, after - target);
+ } else {
+ // |target| is not f32 representable so taking distance of neighbouring f32s.
+ return after - before;
+ }
+}
+
+/**
+ * @returns ulp(x), the unit of least precision for a specific number as a 32-bit float
+ *
+ * ulp(x) is the distance between the two floating point numbers nearest x.
+ * This value is also called unit of last place, ULP, and 1 ULP.
+ * See the WGSL spec and http://www.ens-lyon.fr/LIP/Pub/Rapports/RR/RR2005/RR2005-09.pdf
+ * for a more detailed/nuanced discussion of the definition of ulp(x).
+ *
+ * @param target number to calculate ULP for
+ * @param mode should FTZ occurring during calculation or not
+ */
+export function oneULPF16(target: number, mode: FlushMode = 'flush'): number {
+ if (Number.isNaN(target)) {
+ return Number.NaN;
+ }
+
+ target = mode === 'flush' ? flushSubnormalNumberF16(target) : target;
+
+ // For values out of bounds for f16 ulp(x) is defined as the
+ // distance between the two nearest f16 representable numbers to the
+ // appropriate edge, which also happens to be the maximum possible ULP.
+ if (
+ target === Number.POSITIVE_INFINITY ||
+ target >= kValue.f16.positive.max ||
+ target === Number.NEGATIVE_INFINITY ||
+ target <= kValue.f16.negative.min
+ ) {
+ return kValue.f16.max_ulp;
+ }
+
+ // ulp(x) is min(after - before), where
+ // before <= x <= after
+ // before =/= after
+ // before and after are f16 representable
+ const before = nextAfterF16(target, 'negative', mode);
+ const after = nextAfterF16(target, 'positive', mode);
+ const converted: number = quantizeToF16(target);
+ if (converted === target) {
+ // |target| is f16 representable, so either before or after will be x
+ return Math.min(target - before, after - target);
+ } else {
+ // |target| is not f16 representable so taking distance of neighbouring f16s.
+ return after - before;
+ }
+}
+
+/**
+ * Calculate the valid roundings when quantizing to 64-bit floats
+ *
+ * TS/JS's number type is internally a f64, so the supplied value will be
+ * quanitized by definition. The only corner cases occur if a non-finite value
+ * is provided, since the valid roundings include the appropriate min or max
+ * value.
+ *
+ * @param n number to be quantized
+ * @returns all of the acceptable roundings for quantizing to 64-bits in
+ * ascending order.
+ */
+export function correctlyRoundedF64(n: number): readonly number[] {
+ assert(!Number.isNaN(n), `correctlyRoundedF32 not defined for NaN`);
+ // Above f64 range
+ if (n === Number.POSITIVE_INFINITY) {
+ return [kValue.f64.positive.max, Number.POSITIVE_INFINITY];
+ }
+
+ // Below f64 range
+ if (n === Number.NEGATIVE_INFINITY) {
+ return [Number.NEGATIVE_INFINITY, kValue.f64.negative.min];
+ }
+
+ return [n];
+}
+
+/**
+ * Calculate the valid roundings when quantizing to 32-bit floats
+ *
+ * TS/JS's number type is internally a f64, so quantization needs to occur when
+ * converting to f32 for WGSL. WGSL does not specify a specific rounding mode,
+ * so if a number is not precisely representable in 32-bits, but in the
+ * range, there are two possible valid quantizations. If it is precisely
+ * representable, there is only one valid quantization. This function calculates
+ * the valid roundings and returns them in an array.
+ *
+ * This function does not consider flushing mode, so subnormals are maintained.
+ * The caller is responsible to flushing before and after as appropriate.
+ *
+ * Out of bounds values need to consider how they interact with the overflow
+ * rules.
+ * * If a value is OOB but not too far out, an implementation may choose to round
+ * to nearest finite value or the correct infinity. This boundary is at
+ * 2^(f32.emax + 1) and -(2^(f32.emax + 1)) respectively.
+ * Values that are at or beyond these limits must be rounded towards the
+ * appropriate infinity.
+ *
+ * @param n number to be quantized
+ * @returns all of the acceptable roundings for quantizing to 32-bits in
+ * ascending order.
+ */
+export function correctlyRoundedF32(n: number): readonly number[] {
+ if (Number.isNaN(n)) {
+ return [n];
+ }
+
+ // Greater than or equal to the upper overflow boundry
+ if (n >= 2 ** (kValue.f32.emax + 1)) {
+ return [Number.POSITIVE_INFINITY];
+ }
+
+ // OOB, but less than the upper overflow boundary
+ if (n > kValue.f32.positive.max) {
+ return [kValue.f32.positive.max, Number.POSITIVE_INFINITY];
+ }
+
+ // f32 finite
+ if (n <= kValue.f32.positive.max && n >= kValue.f32.negative.min) {
+ const n_32 = quantizeToF32(n);
+ if (n === n_32) {
+ // n is precisely expressible as a f32, so should not be rounded
+ return [n];
+ }
+
+ if (n_32 > n) {
+ // n_32 rounded towards +inf, so is after n
+ const other = nextAfterF32(n_32, 'negative', 'no-flush');
+ return [other, n_32];
+ } else {
+ // n_32 rounded towards -inf, so is before n
+ const other = nextAfterF32(n_32, 'positive', 'no-flush');
+ return [n_32, other];
+ }
+ }
+
+ // OOB, but greater the lower overflow boundary
+ if (n > -(2 ** (kValue.f32.emax + 1))) {
+ return [Number.NEGATIVE_INFINITY, kValue.f32.negative.min];
+ }
+
+ // Less than or equal to the lower overflow boundary
+ return [Number.NEGATIVE_INFINITY];
+}
+
+/**
+ * Calculate the valid roundings when quantizing to 16-bit floats
+ *
+ * TS/JS's number type is internally a f64, so quantization needs to occur when
+ * converting to f16 for WGSL. WGSL does not specify a specific rounding mode,
+ * so if a number is not precisely representable in 16-bits, but in the
+ * range, there are two possible valid quantizations. If it is precisely
+ * representable, there is only one valid quantization. This function calculates
+ * the valid roundings and returns them in an array.
+ *
+ * This function does not consider flushing mode, so subnormals are maintained.
+ * The caller is responsible to flushing before and after as appropriate.
+ *
+ * Out of bounds values need to consider how they interact with the overflow
+ * rules.
+ * * If a value is OOB but not too far out, an implementation may choose to round
+ * to nearest finite value or the correct infinity. This boundary is at
+ * 2^(f16.emax + 1) and -(2^(f16.emax + 1)) respectively.
+ * Values that are at or beyond these limits must be rounded towards the
+ * appropriate infinity.
+ *
+ * @param n number to be quantized
+ * @returns all of the acceptable roundings for quantizing to 16-bits in
+ * ascending order.
+ */
+export function correctlyRoundedF16(n: number): readonly number[] {
+ if (Number.isNaN(n)) {
+ return [n];
+ }
+
+ // Greater than or equal to the upper overflow boundry
+ if (n >= 2 ** (kValue.f16.emax + 1)) {
+ return [Number.POSITIVE_INFINITY];
+ }
+
+ // OOB, but less than the upper overflow boundary
+ if (n > kValue.f16.positive.max) {
+ return [kValue.f16.positive.max, Number.POSITIVE_INFINITY];
+ }
+
+ // f16 finite
+ if (n <= kValue.f16.positive.max && n >= kValue.f16.negative.min) {
+ const n_16 = quantizeToF16(n);
+ if (n === n_16) {
+ // n is precisely expressible as a f16, so should not be rounded
+ return [n];
+ }
+
+ if (n_16 > n) {
+ // n_16 rounded towards +inf, so is after n
+ const other = nextAfterF16(n_16, 'negative', 'no-flush');
+ return [other, n_16];
+ } else {
+ // n_16 rounded towards -inf, so is before n
+ const other = nextAfterF16(n_16, 'positive', 'no-flush');
+ return [n_16, other];
+ }
+ }
+
+ // OOB, but greater the lower overflow boundary
+ if (n > -(2 ** (kValue.f16.emax + 1))) {
+ return [Number.NEGATIVE_INFINITY, kValue.f16.negative.min];
+ }
+
+ // Less than or equal to the lower overflow boundary
+ return [Number.NEGATIVE_INFINITY];
+}
+
+/**
+ * Calculates WGSL frexp
+ *
+ * Splits val into a fraction and an exponent so that
+ * val = fraction * 2 ^ exponent.
+ * The fraction is 0.0 or its magnitude is in the range [0.5, 1.0).
+ *
+ * @param val the float to split
+ * @param trait the float type, f32 or f16 or f64
+ * @returns the results of splitting val
+ */
+export function frexp(val: number, trait: 'f32' | 'f16' | 'f64'): { fract: number; exp: number } {
+ const buffer = new ArrayBuffer(8);
+ const dataView = new DataView(buffer);
+
+ // expBitCount and fractBitCount is the bitwidth of exponent and fractional part of the given FP type.
+ // expBias is the bias constant of exponent of the given FP type.
+ // Biased exponent (unsigned integer, i.e. the exponent part of float) = unbiased exponent (signed integer) + expBias.
+ let expBitCount: number, fractBitCount: number, expBias: number;
+ // To handle the exponent bits of given FP types (f16, f32, and f64), considering the highest 16
+ // bits is enough.
+ // expMaskForHigh16Bits indicates the exponent bitfield in the highest 16 bits of the given FP
+ // type, and targetExpBitsForHigh16Bits is the exponent bits that corresponding to unbiased
+ // exponent -1, i.e. the exponent bits when the FP values is in range [0.5, 1.0).
+ let expMaskForHigh16Bits: number, targetExpBitsForHigh16Bits: number;
+ // Helper function that store the given FP value into buffer as the given FP types
+ let setFloatToBuffer: (v: number) => void;
+ // Helper function that read back FP value from buffer as the given FP types
+ let getFloatFromBuffer: () => number;
+
+ let isFinite: (v: number) => boolean;
+ let isSubnormal: (v: number) => boolean;
+
+ if (trait === 'f32') {
+ // f32 bit pattern: s_eeeeeeee_fffffff_ffffffffffffffff
+ expBitCount = 8;
+ fractBitCount = 23;
+ expBias = 127;
+ // The exponent bitmask for high 16 bits of f32.
+ expMaskForHigh16Bits = 0x7f80;
+ // The target exponent bits is equal to those for f32 0.5 = 0x3f000000.
+ targetExpBitsForHigh16Bits = 0x3f00;
+ isFinite = isFiniteF32;
+ isSubnormal = isSubnormalNumberF32;
+ // Enforce big-endian so that offset 0 is highest byte.
+ setFloatToBuffer = (v: number) => dataView.setFloat32(0, v, false);
+ getFloatFromBuffer = () => dataView.getFloat32(0, false);
+ } else if (trait === 'f16') {
+ // f16 bit pattern: s_eeeee_ffffffffff
+ expBitCount = 5;
+ fractBitCount = 10;
+ expBias = 15;
+ // The exponent bitmask for 16 bits of f16.
+ expMaskForHigh16Bits = 0x7c00;
+ // The target exponent bits is equal to those for f16 0.5 = 0x3800.
+ targetExpBitsForHigh16Bits = 0x3800;
+ isFinite = isFiniteF16;
+ isSubnormal = isSubnormalNumberF16;
+ // Enforce big-endian so that offset 0 is highest byte.
+ setFloatToBuffer = (v: number) => setFloat16(dataView, 0, v, false);
+ getFloatFromBuffer = () => getFloat16(dataView, 0, false);
+ } else {
+ assert(trait === 'f64');
+ // f64 bit pattern: s_eeeeeeeeeee_ffff_ffffffffffffffffffffffffffffffffffffffffffffffff
+ expBitCount = 11;
+ fractBitCount = 52;
+ expBias = 1023;
+ // The exponent bitmask for 16 bits of f64.
+ expMaskForHigh16Bits = 0x7ff0;
+ // The target exponent bits is equal to those for f64 0.5 = 0x3fe0_0000_0000_0000.
+ targetExpBitsForHigh16Bits = 0x3fe0;
+ isFinite = Number.isFinite;
+ isSubnormal = isSubnormalNumberF64;
+ // Enforce big-endian so that offset 0 is highest byte.
+ setFloatToBuffer = (v: number) => dataView.setFloat64(0, v, false);
+ getFloatFromBuffer = () => dataView.getFloat64(0, false);
+ }
+ // Helper function that extract the unbiased exponent of the float in buffer.
+ const extractUnbiasedExpFromNormalFloatInBuffer = () => {
+ // Assert the float in buffer is finite normal float.
+ assert(isFinite(getFloatFromBuffer()) && !isSubnormal(getFloatFromBuffer()));
+ // Get the highest 16 bits of float as uint16, which can contain the whole exponent part for both f16, f32, and f64.
+ const high16BitsAsUint16 = dataView.getUint16(0, false);
+ // Return the unbiased exp by masking, shifting and unbiasing.
+ return ((high16BitsAsUint16 & expMaskForHigh16Bits) >> (16 - 1 - expBitCount)) - expBias;
+ };
+ // Helper function that modify the exponent of float in buffer to make it in range [0.5, 1.0).
+ // By setting the unbiased exponent to -1, the fp value will be in range 2**-1 * [1.0, 2.0), i.e. [0.5, 1.0).
+ const modifyExpOfNormalFloatInBuffer = () => {
+ // Assert the float in buffer is finite normal float.
+ assert(isFinite(getFloatFromBuffer()) && !isSubnormal(getFloatFromBuffer()));
+ // Get the highest 16 bits of float as uint16, which contains the whole exponent part for both f16, f32, and f64.
+ const high16BitsAsUint16 = dataView.getUint16(0, false);
+ // Modify the exponent bits.
+ const modifiedHigh16Bits =
+ (high16BitsAsUint16 & ~expMaskForHigh16Bits) | targetExpBitsForHigh16Bits;
+ // Set back to buffer
+ dataView.setUint16(0, modifiedHigh16Bits, false);
+ };
+
+ // +/- 0.0
+ if (val === 0) {
+ return { fract: val, exp: 0 };
+ }
+ // NaN and Inf
+ if (!isFinite(val)) {
+ return { fract: val, exp: 0 };
+ }
+
+ setFloatToBuffer(val);
+ // Don't use val below. Use helper functions working with buffer instead.
+
+ let exp = 0;
+ // Normailze the value if it is subnormal. Increase the exponent by multiplying a subnormal value
+ // with 2**fractBitCount will result in a finite normal FP value of the given FP type.
+ if (isSubnormal(getFloatFromBuffer())) {
+ setFloatToBuffer(getFloatFromBuffer() * 2 ** fractBitCount);
+ exp = -fractBitCount;
+ }
+ // A normal FP value v is represented as v = ((-1)**s)*(2**(unbiased exponent))*f, where f is in
+ // range [1.0, 2.0). By moving a factor 2 from f to exponent, we have
+ // v = ((-1)**s)*(2**(unbiased exponent + 1))*(f / 2), where (f / 2) is in range [0.5, 1.0), so
+ // the exp = (unbiased exponent + 1) and fract = ((-1)**s)*(f / 2) is what we expect to get from
+ // frexp function. Note that fract and v only differs in exponent bitfield as long as v is normal.
+ // Calc the result exp by getting the unbiased float exponent and plus 1.
+ exp += extractUnbiasedExpFromNormalFloatInBuffer() + 1;
+ // Modify the exponent of float in buffer to make it be in range [0.5, 1.0) to get fract.
+ modifyExpOfNormalFloatInBuffer();
+
+ return { fract: getFloatFromBuffer(), exp };
+}
+
+/**
+ * Calculates the linear interpolation between two values of a given fractional.
+ *
+ * If |t| is 0, |a| is returned, if |t| is 1, |b| is returned, otherwise
+ * interpolation/extrapolation equivalent to a + t(b - a) is performed.
+ *
+ * Numerical stable version is adapted from http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0811r2.html
+ */
+export function lerp(a: number, b: number, t: number): number {
+ if (!Number.isFinite(a) || !Number.isFinite(b)) {
+ return Number.NaN;
+ }
+
+ if ((a <= 0.0 && b >= 0.0) || (a >= 0.0 && b <= 0.0)) {
+ return t * b + (1 - t) * a;
+ }
+
+ if (t === 1.0) {
+ return b;
+ }
+
+ const x = a + t * (b - a);
+ return t > 1.0 === b > a ? Math.max(b, x) : Math.min(b, x);
+}
+
+/**
+ * Version of lerp that operates on bigint values
+ *
+ * lerp was not made into a generic or to take in (number|bigint), because that
+ * introduces a bunch of complexity overhead related to type differentiation
+ */
+export function lerpBigInt(a: bigint, b: bigint, idx: number, steps: number): bigint {
+ assert(Math.trunc(idx) === idx);
+ assert(Math.trunc(steps) === steps);
+
+ // This constrains t to [0.0, 1.0]
+ assert(idx >= 0);
+ assert(steps > 0);
+ assert(idx < steps);
+
+ if (steps === 1) {
+ return a;
+ }
+ if (idx === 0) {
+ return a;
+ }
+ if (idx === steps - 1) {
+ return b;
+ }
+
+ const min = (x: bigint, y: bigint): bigint => {
+ return x < y ? x : y;
+ };
+ const max = (x: bigint, y: bigint): bigint => {
+ return x > y ? x : y;
+ };
+
+ // For number the variable t is used, there t = idx / (steps - 1),
+ // but that is a fraction on [0, 1], so becomes either 0 or 1 when converted
+ // to bigint, so need to expand things out.
+ const big_idx = BigInt(idx);
+ const big_steps = BigInt(steps);
+ if ((a <= 0n && b >= 0n) || (a >= 0n && b <= 0n)) {
+ return (b * big_idx) / (big_steps - 1n) + (a - (a * big_idx) / (big_steps - 1n));
+ }
+
+ const x = a + (b * big_idx) / (big_steps - 1n) - (a * big_idx) / (big_steps - 1n);
+ return !(b > a) ? max(b, x) : min(b, x);
+}
+
+/** @returns a linear increasing range of numbers. */
+export function linearRange(a: number, b: number, num_steps: number): readonly number[] {
+ if (num_steps <= 0) {
+ return [];
+ }
+
+ // Avoid division by 0
+ if (num_steps === 1) {
+ return [a];
+ }
+
+ return Array.from(Array(num_steps).keys()).map(i => lerp(a, b, i / (num_steps - 1)));
+}
+
+/**
+ * Version of linearRange that operates on bigint values
+ *
+ * linearRange was not made into a generic or to take in (number|bigint),
+ * because that introduces a bunch of complexity overhead related to type
+ * differentiation
+ */
+export function linearRangeBigInt(a: bigint, b: bigint, num_steps: number): Array<bigint> {
+ if (num_steps <= 0) {
+ return [];
+ }
+
+ // Avoid division by 0
+ if (num_steps === 1) {
+ return [a];
+ }
+
+ return Array.from(Array(num_steps).keys()).map(i => lerpBigInt(a, b, i, num_steps));
+}
+
+/**
+ * @returns a non-linear increasing range of numbers, with a bias towards the beginning.
+ *
+ * Generates a linear range on [0,1] with |num_steps|, then squares all the values to make the curve be quadratic,
+ * thus biasing towards 0, but remaining on the [0, 1] range.
+ * This biased range is then scaled to the desired range using lerp.
+ * Different curves could be generated by changing c, where greater values of c will bias more towards 0.
+ */
+export function biasedRange(a: number, b: number, num_steps: number): readonly number[] {
+ const c = 2;
+ if (num_steps <= 0) {
+ return [];
+ }
+
+ // Avoid division by 0
+ if (num_steps === 1) {
+ return [a];
+ }
+
+ return Array.from(Array(num_steps).keys()).map(i => lerp(a, b, Math.pow(i / (num_steps - 1), c)));
+}
+
+/**
+ * @returns an ascending sorted array of numbers spread over the entire range of 32-bit floats
+ *
+ * Numbers are divided into 4 regions: negative normals, negative subnormals, positive subnormals & positive normals.
+ * Zero is included.
+ *
+ * Numbers are generated via taking a linear spread of the bit field representations of the values in each region. This
+ * means that number of precise f32 values between each returned value in a region should be about the same. This allows
+ * for a wide range of magnitudes to be generated, instead of being extremely biased towards the edges of the f32 range.
+ *
+ * This function is intended to provide dense coverage of the f32 range, for a minimal list of values to use to cover
+ * f32 behaviour, use sparseF32Range instead.
+ *
+ * @param counts structure param with 4 entries indicating the number of entries to be generated each region, entries
+ * must be 0 or greater.
+ */
+export function fullF32Range(
+ counts: {
+ neg_norm?: number;
+ neg_sub?: number;
+ pos_sub: number;
+ pos_norm: number;
+ } = { pos_sub: 10, pos_norm: 50 }
+): Array<number> {
+ counts.neg_norm = counts.neg_norm === undefined ? counts.pos_norm : counts.neg_norm;
+ counts.neg_sub = counts.neg_sub === undefined ? counts.pos_sub : counts.neg_sub;
+
+ // Generating bit fields first and then converting to f32, so that the spread across the possible f32 values is more
+ // even. Generating against the bounds of f32 values directly results in the values being extremely biased towards the
+ // extremes, since they are so much larger.
+ const bit_fields = [
+ ...linearRange(kBit.f32.negative.min, kBit.f32.negative.max, counts.neg_norm),
+ ...linearRange(
+ kBit.f32.negative.subnormal.min,
+ kBit.f32.negative.subnormal.max,
+ counts.neg_sub
+ ),
+ // -0.0
+ 0x80000000,
+ // +0.0
+ 0,
+ ...linearRange(
+ kBit.f32.positive.subnormal.min,
+ kBit.f32.positive.subnormal.max,
+ counts.pos_sub
+ ),
+ ...linearRange(kBit.f32.positive.min, kBit.f32.positive.max, counts.pos_norm),
+ ].map(Math.trunc);
+ return bit_fields.map(reinterpretU32AsF32);
+}
+
+/**
+ * @returns an ascending sorted array of numbers.
+ *
+ * The numbers returned are based on the `full32Range` as described above. The difference comes depending
+ * on the `source` parameter. If the `source` is `const` then the numbers will be restricted to be
+ * in the range `[low, high]`. This allows filtering out a set of `f32` values which are invalid for
+ * const-evaluation but are needed to test the non-const implementation.
+ *
+ * @param source the input source for the test. If the `source` is `const` then the return will be filtered
+ * @param low the lowest f32 value to permit when filtered
+ * @param high the highest f32 value to permit when filtered
+ */
+export function sourceFilteredF32Range(source: String, low: number, high: number): Array<number> {
+ return fullF32Range().filter(x => source !== 'const' || (x >= low && x <= high));
+}
+
+/**
+ * @returns an ascending sorted array of numbers spread over the entire range of 16-bit floats
+ *
+ * Numbers are divided into 4 regions: negative normals, negative subnormals, positive subnormals & positive normals.
+ * Zero is included.
+ *
+ * Numbers are generated via taking a linear spread of the bit field representations of the values in each region. This
+ * means that number of precise f16 values between each returned value in a region should be about the same. This allows
+ * for a wide range of magnitudes to be generated, instead of being extremely biased towards the edges of the f16 range.
+ *
+ * This function is intended to provide dense coverage of the f16 range, for a minimal list of values to use to cover
+ * f16 behaviour, use sparseF16Range instead.
+ *
+ * @param counts structure param with 4 entries indicating the number of entries to be generated each region, entries
+ * must be 0 or greater.
+ */
+export function fullF16Range(
+ counts: {
+ neg_norm?: number;
+ neg_sub?: number;
+ pos_sub: number;
+ pos_norm: number;
+ } = { pos_sub: 10, pos_norm: 50 }
+): Array<number> {
+ counts.neg_norm = counts.neg_norm === undefined ? counts.pos_norm : counts.neg_norm;
+ counts.neg_sub = counts.neg_sub === undefined ? counts.pos_sub : counts.neg_sub;
+
+ // Generating bit fields first and then converting to f16, so that the spread across the possible f16 values is more
+ // even. Generating against the bounds of f16 values directly results in the values being extremely biased towards the
+ // extremes, since they are so much larger.
+ const bit_fields = [
+ ...linearRange(kBit.f16.negative.min, kBit.f16.negative.max, counts.neg_norm),
+ ...linearRange(
+ kBit.f16.negative.subnormal.min,
+ kBit.f16.negative.subnormal.max,
+ counts.neg_sub
+ ),
+ // -0.0
+ 0x8000,
+ // +0.0
+ 0,
+ ...linearRange(
+ kBit.f16.positive.subnormal.min,
+ kBit.f16.positive.subnormal.max,
+ counts.pos_sub
+ ),
+ ...linearRange(kBit.f16.positive.min, kBit.f16.positive.max, counts.pos_norm),
+ ].map(Math.trunc);
+ return bit_fields.map(reinterpretU16AsF16);
+}
+
+/**
+ * @returns an ascending sorted array of numbers spread over the entire range of 64-bit floats
+ *
+ * Numbers are divided into 4 regions: negative normals, negative subnormals, positive subnormals & positive normals.
+ * Zero is included.
+ *
+ * Numbers are generated via taking a linear spread of the bit field representations of the values in each region. This
+ * means that number of precise f64 values between each returned value in a region should be about the same. This allows
+ * for a wide range of magnitudes to be generated, instead of being extremely biased towards the edges of the f64 range.
+ *
+ * This function is intended to provide dense coverage of the f64 range, for a minimal list of values to use to cover
+ * f64 behaviour, use sparseF64Range instead.
+ *
+ * @param counts structure param with 4 entries indicating the number of entries to be generated each region, entries
+ * must be 0 or greater.
+ */
+export function fullF64Range(
+ counts: {
+ neg_norm?: number;
+ neg_sub?: number;
+ pos_sub: number;
+ pos_norm: number;
+ } = { pos_sub: 10, pos_norm: 50 }
+): Array<number> {
+ counts.neg_norm = counts.neg_norm === undefined ? counts.pos_norm : counts.neg_norm;
+ counts.neg_sub = counts.neg_sub === undefined ? counts.pos_sub : counts.neg_sub;
+
+ // Generating bit fields first and then converting to f64, so that the spread across the possible f64 values is more
+ // even. Generating against the bounds of f64 values directly results in the values being extremely biased towards the
+ // extremes, since they are so much larger.
+ const bit_fields = [
+ ...linearRangeBigInt(kBit.f64.negative.min, kBit.f64.negative.max, counts.neg_norm),
+ ...linearRangeBigInt(
+ kBit.f64.negative.subnormal.min,
+ kBit.f64.negative.subnormal.max,
+ counts.neg_sub
+ ),
+ // -0.0
+ 0x8000_0000_0000_0000n,
+ // +0.0
+ 0n,
+ ...linearRangeBigInt(
+ kBit.f64.positive.subnormal.min,
+ kBit.f64.positive.subnormal.max,
+ counts.pos_sub
+ ),
+ ...linearRangeBigInt(kBit.f64.positive.min, kBit.f64.positive.max, counts.pos_norm),
+ ];
+ return bit_fields.map(reinterpretU64AsF64);
+}
+
+/**
+ * @returns an ascending sorted array of f64 values spread over specific range of f64 normal floats
+ *
+ * Numbers are divided into 4 regions: negative 64-bit normals, negative 64-bit subnormals, positive 64-bit subnormals &
+ * positive 64-bit normals.
+ * Zero is included.
+ *
+ * Numbers are generated via taking a linear spread of the bit field representations of the values in each region. This
+ * means that number of precise f64 values between each returned value in a region should be about the same. This allows
+ * for a wide range of magnitudes to be generated, instead of being extremely biased towards the edges of the range.
+ *
+ * @param begin a negative f64 normal float value
+ * @param end a positive f64 normal float value
+ * @param counts structure param with 4 entries indicating the number of entries
+ * to be generated each region, entries must be 0 or greater.
+ */
+export function filteredF64Range(
+ begin: number,
+ end: number,
+ counts: { neg_norm?: number; neg_sub?: number; pos_sub: number; pos_norm: number } = {
+ pos_sub: 10,
+ pos_norm: 50,
+ }
+): Array<number> {
+ assert(
+ begin <= kValue.f64.negative.max,
+ `Beginning of range ${begin} must be negative f64 normal`
+ );
+ assert(end >= kValue.f64.positive.min, `Ending of range ${end} must be positive f64 normal`);
+
+ counts.neg_norm = counts.neg_norm === undefined ? counts.pos_norm : counts.neg_norm;
+ counts.neg_sub = counts.neg_sub === undefined ? counts.pos_sub : counts.neg_sub;
+
+ const u64_begin = reinterpretF64AsU64(begin);
+ const u64_end = reinterpretF64AsU64(end);
+ // Generating bit fields first and then converting to f64, so that the spread across the possible f64 values is more
+ // even. Generating against the bounds of f64 values directly results in the values being extremely biased towards the
+ // extremes, since they are so much larger.
+ const bit_fields = [
+ ...linearRangeBigInt(u64_begin, kBit.f64.negative.max, counts.neg_norm),
+ ...linearRangeBigInt(
+ kBit.f64.negative.subnormal.min,
+ kBit.f64.negative.subnormal.max,
+ counts.neg_sub
+ ),
+ // -0.0
+ 0x8000_0000_0000_0000n,
+ // +0.0
+ 0n,
+ ...linearRangeBigInt(
+ kBit.f64.positive.subnormal.min,
+ kBit.f64.positive.subnormal.max,
+ counts.pos_sub
+ ),
+ ...linearRangeBigInt(kBit.f64.positive.min, u64_end, counts.pos_norm),
+ ];
+ return bit_fields.map(reinterpretU64AsF64);
+}
+
+/** Short list of i32 values of interest to test against */
+const kInterestingI32Values: readonly number[] = [
+ kValue.i32.negative.max,
+ Math.trunc(kValue.i32.negative.max / 2),
+ -256,
+ -10,
+ -1,
+ 0,
+ 1,
+ 10,
+ 256,
+ Math.trunc(kValue.i32.positive.max / 2),
+ kValue.i32.positive.max,
+];
+
+/** @returns minimal i32 values that cover the entire range of i32 behaviours
+ *
+ * This is used instead of fullI32Range when the number of test cases being
+ * generated is a super linear function of the length of i32 values which is
+ * leading to time outs.
+ */
+export function sparseI32Range(): readonly number[] {
+ return kInterestingI32Values;
+}
+
+const kVectorI32Values = {
+ 2: kInterestingI32Values.flatMap(f => [
+ [f, 1],
+ [1, f],
+ [f, -1],
+ [-1, f],
+ ]),
+ 3: kInterestingI32Values.flatMap(f => [
+ [f, 1, 2],
+ [1, f, 2],
+ [1, 2, f],
+ [f, -1, -2],
+ [-1, f, -2],
+ [-1, -2, f],
+ ]),
+ 4: kInterestingI32Values.flatMap(f => [
+ [f, 1, 2, 3],
+ [1, f, 2, 3],
+ [1, 2, f, 3],
+ [1, 2, 3, f],
+ [f, -1, -2, -3],
+ [-1, f, -2, -3],
+ [-1, -2, f, -3],
+ [-1, -2, -3, f],
+ ]),
+};
+
+/**
+ * Returns set of vectors, indexed by dimension containing interesting i32
+ * values.
+ *
+ * The tests do not do the simple option for coverage of computing the cartesian
+ * product of all of the interesting i32 values N times for vecN tests,
+ * because that creates a huge number of tests for vec3 and vec4, leading to
+ * time outs.
+ *
+ * Instead they insert the interesting i32 values into each location of the
+ * vector to get a spread of testing over the entire range. This reduces the
+ * number of cases being run substantially, but maintains coverage.
+ */
+export function vectorI32Range(dim: number): ROArrayArray<number> {
+ assert(dim === 2 || dim === 3 || dim === 4, 'vectorI32Range only accepts dimensions 2, 3, and 4');
+ return kVectorI32Values[dim];
+}
+
+/**
+ * @returns an ascending sorted array of numbers spread over the entire range of 32-bit signed ints
+ *
+ * Numbers are divided into 2 regions: negatives, and positives, with their spreads biased towards 0
+ * Zero is included in range.
+ *
+ * @param counts structure param with 2 entries indicating the number of entries to be generated each region, values must be 0 or greater.
+ */
+export function fullI32Range(
+ counts: {
+ negative?: number;
+ positive: number;
+ } = { positive: 50 }
+): Array<number> {
+ counts.negative = counts.negative === undefined ? counts.positive : counts.negative;
+ return [
+ ...biasedRange(kValue.i32.negative.min, -1, counts.negative),
+ 0,
+ ...biasedRange(1, kValue.i32.positive.max, counts.positive),
+ ].map(Math.trunc);
+}
+
+/** Short list of u32 values of interest to test against */
+const kInterestingU32Values: readonly number[] = [
+ 0,
+ 1,
+ 10,
+ 256,
+ Math.trunc(kValue.u32.max / 2),
+ kValue.u32.max,
+];
+
+/** @returns minimal u32 values that cover the entire range of u32 behaviours
+ *
+ * This is used instead of fullU32Range when the number of test cases being
+ * generated is a super linear function of the length of u32 values which is
+ * leading to time outs.
+ */
+export function sparseU32Range(): readonly number[] {
+ return kInterestingU32Values;
+}
+
+const kVectorU32Values = {
+ 2: kInterestingU32Values.flatMap(f => [
+ [f, 1],
+ [1, f],
+ ]),
+ 3: kInterestingU32Values.flatMap(f => [
+ [f, 1, 2],
+ [1, f, 2],
+ [1, 2, f],
+ ]),
+ 4: kInterestingU32Values.flatMap(f => [
+ [f, 1, 2, 3],
+ [1, f, 2, 3],
+ [1, 2, f, 3],
+ [1, 2, 3, f],
+ ]),
+};
+
+/**
+ * Returns set of vectors, indexed by dimension containing interesting u32
+ * values.
+ *
+ * The tests do not do the simple option for coverage of computing the cartesian
+ * product of all of the interesting u32 values N times for vecN tests,
+ * because that creates a huge number of tests for vec3 and vec4, leading to
+ * time outs.
+ *
+ * Instead they insert the interesting u32 values into each location of the
+ * vector to get a spread of testing over the entire range. This reduces the
+ * number of cases being run substantially, but maintains coverage.
+ */
+export function vectorU32Range(dim: number): ROArrayArray<number> {
+ assert(dim === 2 || dim === 3 || dim === 4, 'vectorU32Range only accepts dimensions 2, 3, and 4');
+ return kVectorU32Values[dim];
+}
+
+/**
+ * @returns an ascending sorted array of numbers spread over the entire range of 32-bit unsigned ints
+ *
+ * Numbers are biased towards 0, and 0 is included in the range.
+ *
+ * @param count number of entries to include in the range, in addition to 0, must be greater than 0, defaults to 50
+ */
+export function fullU32Range(count: number = 50): Array<number> {
+ return [0, ...biasedRange(1, kValue.u32.max, count)].map(Math.trunc);
+}
+
+/** Short list of f32 values of interest to test against */
+const kInterestingF32Values: readonly number[] = [
+ kValue.f32.negative.min,
+ -10.0,
+ -1.0,
+ -0.125,
+ kValue.f32.negative.max,
+ kValue.f32.negative.subnormal.min,
+ kValue.f32.negative.subnormal.max,
+ -0.0,
+ 0.0,
+ kValue.f32.positive.subnormal.min,
+ kValue.f32.positive.subnormal.max,
+ kValue.f32.positive.min,
+ 0.125,
+ 1.0,
+ 10.0,
+ kValue.f32.positive.max,
+];
+
+/** @returns minimal f32 values that cover the entire range of f32 behaviours
+ *
+ * Has specially selected values that cover edge cases, normals, and subnormals.
+ * This is used instead of fullF32Range when the number of test cases being
+ * generated is a super linear function of the length of f32 values which is
+ * leading to time outs.
+ *
+ * These values have been chosen to attempt to test the widest range of f32
+ * behaviours in the lowest number of entries, so may potentially miss function
+ * specific values of interest. If there are known values of interest they
+ * should be appended to this list in the test generation code.
+ */
+export function sparseF32Range(): readonly number[] {
+ return kInterestingF32Values;
+}
+
+const kVectorF32Values = {
+ 2: sparseF32Range().flatMap(f => [
+ [f, 1.0],
+ [1.0, f],
+ [f, -1.0],
+ [-1.0, f],
+ ]),
+ 3: sparseF32Range().flatMap(f => [
+ [f, 1.0, 2.0],
+ [1.0, f, 2.0],
+ [1.0, 2.0, f],
+ [f, -1.0, -2.0],
+ [-1.0, f, -2.0],
+ [-1.0, -2.0, f],
+ ]),
+ 4: sparseF32Range().flatMap(f => [
+ [f, 1.0, 2.0, 3.0],
+ [1.0, f, 2.0, 3.0],
+ [1.0, 2.0, f, 3.0],
+ [1.0, 2.0, 3.0, f],
+ [f, -1.0, -2.0, -3.0],
+ [-1.0, f, -2.0, -3.0],
+ [-1.0, -2.0, f, -3.0],
+ [-1.0, -2.0, -3.0, f],
+ ]),
+};
+
+/**
+ * Returns set of vectors, indexed by dimension containing interesting float
+ * values.
+ *
+ * The tests do not do the simple option for coverage of computing the cartesian
+ * product of all of the interesting float values N times for vecN tests,
+ * because that creates a huge number of tests for vec3 and vec4, leading to
+ * time outs.
+ *
+ * Instead they insert the interesting f32 values into each location of the
+ * vector to get a spread of testing over the entire range. This reduces the
+ * number of cases being run substantially, but maintains coverage.
+ */
+export function vectorF32Range(dim: number): ROArrayArray<number> {
+ assert(dim === 2 || dim === 3 || dim === 4, 'vectorF32Range only accepts dimensions 2, 3, and 4');
+ return kVectorF32Values[dim];
+}
+
+const kSparseVectorF32Values = {
+ 2: sparseF32Range().map((f, idx) => [idx % 2 === 0 ? f : idx, idx % 2 === 1 ? f : -idx]),
+ 3: sparseF32Range().map((f, idx) => [
+ idx % 3 === 0 ? f : idx,
+ idx % 3 === 1 ? f : -idx,
+ idx % 3 === 2 ? f : idx,
+ ]),
+ 4: sparseF32Range().map((f, idx) => [
+ idx % 4 === 0 ? f : idx,
+ idx % 4 === 1 ? f : -idx,
+ idx % 4 === 2 ? f : idx,
+ idx % 4 === 3 ? f : -idx,
+ ]),
+};
+
+/**
+ * Minimal set of vectors, indexed by dimension, that contain interesting float
+ * values.
+ *
+ * This is an even more stripped down version of `vectorF32Range` for when
+ * pairs of vectors are being tested.
+ * All of the interesting floats from sparseF32 are guaranteed to be tested, but
+ * not in every position.
+ */
+export function sparseVectorF32Range(dim: number): ROArrayArray<number> {
+ assert(
+ dim === 2 || dim === 3 || dim === 4,
+ 'sparseVectorF32Range only accepts dimensions 2, 3, and 4'
+ );
+ return kSparseVectorF32Values[dim];
+}
+
+const kSparseMatrixF32Values = {
+ 2: {
+ 2: kInterestingF32Values.map((f, idx) => [
+ [idx % 4 === 0 ? f : idx, idx % 4 === 1 ? f : -idx],
+ [idx % 4 === 2 ? f : -idx, idx % 4 === 3 ? f : idx],
+ ]),
+ 3: kInterestingF32Values.map((f, idx) => [
+ [idx % 6 === 0 ? f : idx, idx % 6 === 1 ? f : -idx, idx % 6 === 2 ? f : idx],
+ [idx % 6 === 3 ? f : -idx, idx % 6 === 4 ? f : idx, idx % 6 === 5 ? f : -idx],
+ ]),
+ 4: kInterestingF32Values.map((f, idx) => [
+ [
+ idx % 8 === 0 ? f : idx,
+ idx % 8 === 1 ? f : -idx,
+ idx % 8 === 2 ? f : idx,
+ idx % 8 === 3 ? f : -idx,
+ ],
+ [
+ idx % 8 === 4 ? f : -idx,
+ idx % 8 === 5 ? f : idx,
+ idx % 8 === 6 ? f : -idx,
+ idx % 8 === 7 ? f : idx,
+ ],
+ ]),
+ },
+ 3: {
+ 2: kInterestingF32Values.map((f, idx) => [
+ [idx % 6 === 0 ? f : idx, idx % 6 === 1 ? f : -idx],
+ [idx % 6 === 2 ? f : -idx, idx % 6 === 3 ? f : idx],
+ [idx % 6 === 4 ? f : idx, idx % 6 === 5 ? f : -idx],
+ ]),
+ 3: kInterestingF32Values.map((f, idx) => [
+ [idx % 9 === 0 ? f : idx, idx % 9 === 1 ? f : -idx, idx % 9 === 2 ? f : idx],
+ [idx % 9 === 3 ? f : -idx, idx % 9 === 4 ? f : idx, idx % 9 === 5 ? f : -idx],
+ [idx % 9 === 6 ? f : idx, idx % 9 === 7 ? f : -idx, idx % 9 === 8 ? f : idx],
+ ]),
+ 4: kInterestingF32Values.map((f, idx) => [
+ [
+ idx % 12 === 0 ? f : idx,
+ idx % 12 === 1 ? f : -idx,
+ idx % 12 === 2 ? f : idx,
+ idx % 12 === 3 ? f : -idx,
+ ],
+ [
+ idx % 12 === 4 ? f : -idx,
+ idx % 12 === 5 ? f : idx,
+ idx % 12 === 6 ? f : -idx,
+ idx % 12 === 7 ? f : idx,
+ ],
+ [
+ idx % 12 === 8 ? f : idx,
+ idx % 12 === 9 ? f : -idx,
+ idx % 12 === 10 ? f : idx,
+ idx % 12 === 11 ? f : -idx,
+ ],
+ ]),
+ },
+ 4: {
+ 2: kInterestingF32Values.map((f, idx) => [
+ [idx % 8 === 0 ? f : idx, idx % 8 === 1 ? f : -idx],
+ [idx % 8 === 2 ? f : -idx, idx % 8 === 3 ? f : idx],
+ [idx % 8 === 4 ? f : idx, idx % 8 === 5 ? f : -idx],
+ [idx % 8 === 6 ? f : -idx, idx % 8 === 7 ? f : idx],
+ ]),
+ 3: kInterestingF32Values.map((f, idx) => [
+ [idx % 12 === 0 ? f : idx, idx % 12 === 1 ? f : -idx, idx % 12 === 2 ? f : idx],
+ [idx % 12 === 3 ? f : -idx, idx % 12 === 4 ? f : idx, idx % 12 === 5 ? f : -idx],
+ [idx % 12 === 6 ? f : idx, idx % 12 === 7 ? f : -idx, idx % 12 === 8 ? f : idx],
+ [idx % 12 === 9 ? f : -idx, idx % 12 === 10 ? f : idx, idx % 12 === 11 ? f : -idx],
+ ]),
+ 4: kInterestingF32Values.map((f, idx) => [
+ [
+ idx % 16 === 0 ? f : idx,
+ idx % 16 === 1 ? f : -idx,
+ idx % 16 === 2 ? f : idx,
+ idx % 16 === 3 ? f : -idx,
+ ],
+ [
+ idx % 16 === 4 ? f : -idx,
+ idx % 16 === 5 ? f : idx,
+ idx % 16 === 6 ? f : -idx,
+ idx % 16 === 7 ? f : idx,
+ ],
+ [
+ idx % 16 === 8 ? f : idx,
+ idx % 16 === 9 ? f : -idx,
+ idx % 16 === 10 ? f : idx,
+ idx % 16 === 11 ? f : -idx,
+ ],
+ [
+ idx % 16 === 12 ? f : -idx,
+ idx % 16 === 13 ? f : idx,
+ idx % 16 === 14 ? f : -idx,
+ idx % 16 === 15 ? f : idx,
+ ],
+ ]),
+ },
+};
+
+/**
+ * Returns a minimal set of matrices, indexed by dimension containing interesting
+ * float values.
+ *
+ * This is the matrix analogue of `sparseVectorF32Range`, so it is producing a
+ * minimal coverage set of matrices that test all of the interesting f32 values.
+ * There is not a more expansive set of matrices, since matrices are even more
+ * expensive than vectors for increasing runtime with coverage.
+ *
+ * All of the interesting floats from sparseF32 are guaranteed to be tested, but
+ * not in every position.
+ */
+export function sparseMatrixF32Range(c: number, r: number): ROArrayArrayArray<number> {
+ assert(
+ c === 2 || c === 3 || c === 4,
+ 'sparseMatrixF32Range only accepts column counts of 2, 3, and 4'
+ );
+ assert(
+ r === 2 || r === 3 || r === 4,
+ 'sparseMatrixF32Range only accepts row counts of 2, 3, and 4'
+ );
+ return kSparseMatrixF32Values[c][r];
+}
+
+/** Short list of f16 values of interest to test against */
+const kInterestingF16Values: readonly number[] = [
+ kValue.f16.negative.min,
+ -10.0,
+ -1.0,
+ -0.125,
+ kValue.f16.negative.max,
+ kValue.f16.negative.subnormal.min,
+ kValue.f16.negative.subnormal.max,
+ -0.0,
+ 0.0,
+ kValue.f16.positive.subnormal.min,
+ kValue.f16.positive.subnormal.max,
+ kValue.f16.positive.min,
+ 0.125,
+ 1.0,
+ 10.0,
+ kValue.f16.positive.max,
+];
+
+/** @returns minimal f16 values that cover the entire range of f16 behaviours
+ *
+ * Has specially selected values that cover edge cases, normals, and subnormals.
+ * This is used instead of fullF16Range when the number of test cases being
+ * generated is a super linear function of the length of f16 values which is
+ * leading to time outs.
+ *
+ * These values have been chosen to attempt to test the widest range of f16
+ * behaviours in the lowest number of entries, so may potentially miss function
+ * specific values of interest. If there are known values of interest they
+ * should be appended to this list in the test generation code.
+ */
+export function sparseF16Range(): readonly number[] {
+ return kInterestingF16Values;
+}
+
+const kVectorF16Values = {
+ 2: sparseF16Range().flatMap(f => [
+ [f, 1.0],
+ [1.0, f],
+ [f, -1.0],
+ [-1.0, f],
+ ]),
+ 3: sparseF16Range().flatMap(f => [
+ [f, 1.0, 2.0],
+ [1.0, f, 2.0],
+ [1.0, 2.0, f],
+ [f, -1.0, -2.0],
+ [-1.0, f, -2.0],
+ [-1.0, -2.0, f],
+ ]),
+ 4: sparseF16Range().flatMap(f => [
+ [f, 1.0, 2.0, 3.0],
+ [1.0, f, 2.0, 3.0],
+ [1.0, 2.0, f, 3.0],
+ [1.0, 2.0, 3.0, f],
+ [f, -1.0, -2.0, -3.0],
+ [-1.0, f, -2.0, -3.0],
+ [-1.0, -2.0, f, -3.0],
+ [-1.0, -2.0, -3.0, f],
+ ]),
+};
+
+/**
+ * Returns set of vectors, indexed by dimension containing interesting f16
+ * values.
+ *
+ * The tests do not do the simple option for coverage of computing the cartesian
+ * product of all of the interesting float values N times for vecN tests,
+ * because that creates a huge number of tests for vec3 and vec4, leading to
+ * time outs.
+ *
+ * Instead they insert the interesting f16 values into each location of the
+ * vector to get a spread of testing over the entire range. This reduces the
+ * number of cases being run substantially, but maintains coverage.
+ */
+export function vectorF16Range(dim: number): ROArrayArray<number> {
+ assert(dim === 2 || dim === 3 || dim === 4, 'vectorF16Range only accepts dimensions 2, 3, and 4');
+ return kVectorF16Values[dim];
+}
+
+const kSparseVectorF16Values = {
+ 2: sparseF16Range().map((f, idx) => [idx % 2 === 0 ? f : idx, idx % 2 === 1 ? f : -idx]),
+ 3: sparseF16Range().map((f, idx) => [
+ idx % 3 === 0 ? f : idx,
+ idx % 3 === 1 ? f : -idx,
+ idx % 3 === 2 ? f : idx,
+ ]),
+ 4: sparseF16Range().map((f, idx) => [
+ idx % 4 === 0 ? f : idx,
+ idx % 4 === 1 ? f : -idx,
+ idx % 4 === 2 ? f : idx,
+ idx % 4 === 3 ? f : -idx,
+ ]),
+};
+
+/**
+ * Minimal set of vectors, indexed by dimension, that contain interesting f16
+ * values.
+ *
+ * This is an even more stripped down version of `vectorF16Range` for when
+ * pairs of vectors are being tested.
+ * All of the interesting floats from sparseF16 are guaranteed to be tested, but
+ * not in every position.
+ */
+export function sparseVectorF16Range(dim: number): ROArrayArray<number> {
+ assert(
+ dim === 2 || dim === 3 || dim === 4,
+ 'sparseVectorF16Range only accepts dimensions 2, 3, and 4'
+ );
+ return kSparseVectorF16Values[dim];
+}
+
+const kSparseMatrixF16Values = {
+ 2: {
+ 2: kInterestingF16Values.map((f, idx) => [
+ [idx % 4 === 0 ? f : idx, idx % 4 === 1 ? f : -idx],
+ [idx % 4 === 2 ? f : -idx, idx % 4 === 3 ? f : idx],
+ ]),
+ 3: kInterestingF16Values.map((f, idx) => [
+ [idx % 6 === 0 ? f : idx, idx % 6 === 1 ? f : -idx, idx % 6 === 2 ? f : idx],
+ [idx % 6 === 3 ? f : -idx, idx % 6 === 4 ? f : idx, idx % 6 === 5 ? f : -idx],
+ ]),
+ 4: kInterestingF16Values.map((f, idx) => [
+ [
+ idx % 8 === 0 ? f : idx,
+ idx % 8 === 1 ? f : -idx,
+ idx % 8 === 2 ? f : idx,
+ idx % 8 === 3 ? f : -idx,
+ ],
+ [
+ idx % 8 === 4 ? f : -idx,
+ idx % 8 === 5 ? f : idx,
+ idx % 8 === 6 ? f : -idx,
+ idx % 8 === 7 ? f : idx,
+ ],
+ ]),
+ },
+ 3: {
+ 2: kInterestingF16Values.map((f, idx) => [
+ [idx % 6 === 0 ? f : idx, idx % 6 === 1 ? f : -idx],
+ [idx % 6 === 2 ? f : -idx, idx % 6 === 3 ? f : idx],
+ [idx % 6 === 4 ? f : idx, idx % 6 === 5 ? f : -idx],
+ ]),
+ 3: kInterestingF16Values.map((f, idx) => [
+ [idx % 9 === 0 ? f : idx, idx % 9 === 1 ? f : -idx, idx % 9 === 2 ? f : idx],
+ [idx % 9 === 3 ? f : -idx, idx % 9 === 4 ? f : idx, idx % 9 === 5 ? f : -idx],
+ [idx % 9 === 6 ? f : idx, idx % 9 === 7 ? f : -idx, idx % 9 === 8 ? f : idx],
+ ]),
+ 4: kInterestingF16Values.map((f, idx) => [
+ [
+ idx % 12 === 0 ? f : idx,
+ idx % 12 === 1 ? f : -idx,
+ idx % 12 === 2 ? f : idx,
+ idx % 12 === 3 ? f : -idx,
+ ],
+ [
+ idx % 12 === 4 ? f : -idx,
+ idx % 12 === 5 ? f : idx,
+ idx % 12 === 6 ? f : -idx,
+ idx % 12 === 7 ? f : idx,
+ ],
+ [
+ idx % 12 === 8 ? f : idx,
+ idx % 12 === 9 ? f : -idx,
+ idx % 12 === 10 ? f : idx,
+ idx % 12 === 11 ? f : -idx,
+ ],
+ ]),
+ },
+ 4: {
+ 2: kInterestingF16Values.map((f, idx) => [
+ [idx % 8 === 0 ? f : idx, idx % 8 === 1 ? f : -idx],
+ [idx % 8 === 2 ? f : -idx, idx % 8 === 3 ? f : idx],
+ [idx % 8 === 4 ? f : idx, idx % 8 === 5 ? f : -idx],
+ [idx % 8 === 6 ? f : -idx, idx % 8 === 7 ? f : idx],
+ ]),
+ 3: kInterestingF16Values.map((f, idx) => [
+ [idx % 12 === 0 ? f : idx, idx % 12 === 1 ? f : -idx, idx % 12 === 2 ? f : idx],
+ [idx % 12 === 3 ? f : -idx, idx % 12 === 4 ? f : idx, idx % 12 === 5 ? f : -idx],
+ [idx % 12 === 6 ? f : idx, idx % 12 === 7 ? f : -idx, idx % 12 === 8 ? f : idx],
+ [idx % 12 === 9 ? f : -idx, idx % 12 === 10 ? f : idx, idx % 12 === 11 ? f : -idx],
+ ]),
+ 4: kInterestingF16Values.map((f, idx) => [
+ [
+ idx % 16 === 0 ? f : idx,
+ idx % 16 === 1 ? f : -idx,
+ idx % 16 === 2 ? f : idx,
+ idx % 16 === 3 ? f : -idx,
+ ],
+ [
+ idx % 16 === 4 ? f : -idx,
+ idx % 16 === 5 ? f : idx,
+ idx % 16 === 6 ? f : -idx,
+ idx % 16 === 7 ? f : idx,
+ ],
+ [
+ idx % 16 === 8 ? f : idx,
+ idx % 16 === 9 ? f : -idx,
+ idx % 16 === 10 ? f : idx,
+ idx % 16 === 11 ? f : -idx,
+ ],
+ [
+ idx % 16 === 12 ? f : -idx,
+ idx % 16 === 13 ? f : idx,
+ idx % 16 === 14 ? f : -idx,
+ idx % 16 === 15 ? f : idx,
+ ],
+ ]),
+ },
+};
+
+/**
+ * Returns a minimal set of matrices, indexed by dimension containing interesting
+ * f16 values.
+ *
+ * This is the matrix analogue of `sparseVectorF16Range`, so it is producing a
+ * minimal coverage set of matrices that test all of the interesting f16 values.
+ * There is not a more expansive set of matrices, since matrices are even more
+ * expensive than vectors for increasing runtime with coverage.
+ *
+ * All of the interesting floats from sparseF16 are guaranteed to be tested, but
+ * not in every position.
+ */
+export function sparseMatrixF16Range(c: number, r: number): ROArrayArray<number>[] {
+ assert(
+ c === 2 || c === 3 || c === 4,
+ 'sparseMatrixF16Range only accepts column counts of 2, 3, and 4'
+ );
+ assert(
+ r === 2 || r === 3 || r === 4,
+ 'sparseMatrixF16Range only accepts row counts of 2, 3, and 4'
+ );
+ return kSparseMatrixF16Values[c][r];
+}
+
+/** Short list of f64 values of interest to test against */
+const kInterestingF64Values: readonly number[] = [
+ kValue.f64.negative.min,
+ -10.0,
+ -1.0,
+ -0.125,
+ kValue.f64.negative.max,
+ kValue.f64.negative.subnormal.min,
+ kValue.f64.negative.subnormal.max,
+ -0.0,
+ 0.0,
+ kValue.f64.positive.subnormal.min,
+ kValue.f64.positive.subnormal.max,
+ kValue.f64.positive.min,
+ 0.125,
+ 1.0,
+ 10.0,
+ kValue.f64.positive.max,
+];
+
+/** @returns minimal F64 values that cover the entire range of F64 behaviours
+ *
+ * Has specially selected values that cover edge cases, normals, and subnormals.
+ * This is used instead of fullF64Range when the number of test cases being
+ * generated is a super linear function of the length of F64 values which is
+ * leading to time outs.
+ *
+ * These values have been chosen to attempt to test the widest range of F64
+ * behaviours in the lowest number of entries, so may potentially miss function
+ * specific values of interest. If there are known values of interest they
+ * should be appended to this list in the test generation code.
+ */
+export function sparseF64Range(): readonly number[] {
+ return kInterestingF64Values;
+}
+
+const kVectorF64Values = {
+ 2: sparseF64Range().flatMap(f => [
+ [f, 1.0],
+ [1.0, f],
+ [f, -1.0],
+ [-1.0, f],
+ ]),
+ 3: sparseF64Range().flatMap(f => [
+ [f, 1.0, 2.0],
+ [1.0, f, 2.0],
+ [1.0, 2.0, f],
+ [f, -1.0, -2.0],
+ [-1.0, f, -2.0],
+ [-1.0, -2.0, f],
+ ]),
+ 4: sparseF64Range().flatMap(f => [
+ [f, 1.0, 2.0, 3.0],
+ [1.0, f, 2.0, 3.0],
+ [1.0, 2.0, f, 3.0],
+ [1.0, 2.0, 3.0, f],
+ [f, -1.0, -2.0, -3.0],
+ [-1.0, f, -2.0, -3.0],
+ [-1.0, -2.0, f, -3.0],
+ [-1.0, -2.0, -3.0, f],
+ ]),
+};
+
+/**
+ * Returns set of vectors, indexed by dimension containing interesting float
+ * values.
+ *
+ * The tests do not do the simple option for coverage of computing the cartesian
+ * product of all of the interesting float values N times for vecN tests,
+ * because that creates a huge number of tests for vec3 and vec4, leading to
+ * time outs.
+ *
+ * Instead they insert the interesting F64 values into each location of the
+ * vector to get a spread of testing over the entire range. This reduces the
+ * number of cases being run substantially, but maintains coverage.
+ */
+export function vectorF64Range(dim: number): ROArrayArray<number> {
+ assert(dim === 2 || dim === 3 || dim === 4, 'vectorF64Range only accepts dimensions 2, 3, and 4');
+ return kVectorF64Values[dim];
+}
+
+const kSparseVectorF64Values = {
+ 2: sparseF64Range().map((f, idx) => [idx % 2 === 0 ? f : idx, idx % 2 === 1 ? f : -idx]),
+ 3: sparseF64Range().map((f, idx) => [
+ idx % 3 === 0 ? f : idx,
+ idx % 3 === 1 ? f : -idx,
+ idx % 3 === 2 ? f : idx,
+ ]),
+ 4: sparseF64Range().map((f, idx) => [
+ idx % 4 === 0 ? f : idx,
+ idx % 4 === 1 ? f : -idx,
+ idx % 4 === 2 ? f : idx,
+ idx % 4 === 3 ? f : -idx,
+ ]),
+};
+
+/**
+ * Minimal set of vectors, indexed by dimension, that contain interesting f64
+ * values.
+ *
+ * This is an even more stripped down version of `vectorF64Range` for when
+ * pairs of vectors are being tested.
+ * All the interesting floats from sparseF64 are guaranteed to be tested, but
+ * not in every position.
+ */
+export function sparseVectorF64Range(dim: number): ROArrayArray<number> {
+ assert(
+ dim === 2 || dim === 3 || dim === 4,
+ 'sparseVectorF64Range only accepts dimensions 2, 3, and 4'
+ );
+ return kSparseVectorF64Values[dim];
+}
+
+const kSparseMatrixF64Values = {
+ 2: {
+ 2: kInterestingF64Values.map((f, idx) => [
+ [idx % 4 === 0 ? f : idx, idx % 4 === 1 ? f : -idx],
+ [idx % 4 === 2 ? f : -idx, idx % 4 === 3 ? f : idx],
+ ]),
+ 3: kInterestingF64Values.map((f, idx) => [
+ [idx % 6 === 0 ? f : idx, idx % 6 === 1 ? f : -idx, idx % 6 === 2 ? f : idx],
+ [idx % 6 === 3 ? f : -idx, idx % 6 === 4 ? f : idx, idx % 6 === 5 ? f : -idx],
+ ]),
+ 4: kInterestingF64Values.map((f, idx) => [
+ [
+ idx % 8 === 0 ? f : idx,
+ idx % 8 === 1 ? f : -idx,
+ idx % 8 === 2 ? f : idx,
+ idx % 8 === 3 ? f : -idx,
+ ],
+ [
+ idx % 8 === 4 ? f : -idx,
+ idx % 8 === 5 ? f : idx,
+ idx % 8 === 6 ? f : -idx,
+ idx % 8 === 7 ? f : idx,
+ ],
+ ]),
+ },
+ 3: {
+ 2: kInterestingF64Values.map((f, idx) => [
+ [idx % 6 === 0 ? f : idx, idx % 6 === 1 ? f : -idx],
+ [idx % 6 === 2 ? f : -idx, idx % 6 === 3 ? f : idx],
+ [idx % 6 === 4 ? f : idx, idx % 6 === 5 ? f : -idx],
+ ]),
+ 3: kInterestingF64Values.map((f, idx) => [
+ [idx % 9 === 0 ? f : idx, idx % 9 === 1 ? f : -idx, idx % 9 === 2 ? f : idx],
+ [idx % 9 === 3 ? f : -idx, idx % 9 === 4 ? f : idx, idx % 9 === 5 ? f : -idx],
+ [idx % 9 === 6 ? f : idx, idx % 9 === 7 ? f : -idx, idx % 9 === 8 ? f : idx],
+ ]),
+ 4: kInterestingF64Values.map((f, idx) => [
+ [
+ idx % 12 === 0 ? f : idx,
+ idx % 12 === 1 ? f : -idx,
+ idx % 12 === 2 ? f : idx,
+ idx % 12 === 3 ? f : -idx,
+ ],
+ [
+ idx % 12 === 4 ? f : -idx,
+ idx % 12 === 5 ? f : idx,
+ idx % 12 === 6 ? f : -idx,
+ idx % 12 === 7 ? f : idx,
+ ],
+ [
+ idx % 12 === 8 ? f : idx,
+ idx % 12 === 9 ? f : -idx,
+ idx % 12 === 10 ? f : idx,
+ idx % 12 === 11 ? f : -idx,
+ ],
+ ]),
+ },
+ 4: {
+ 2: kInterestingF64Values.map((f, idx) => [
+ [idx % 8 === 0 ? f : idx, idx % 8 === 1 ? f : -idx],
+ [idx % 8 === 2 ? f : -idx, idx % 8 === 3 ? f : idx],
+ [idx % 8 === 4 ? f : idx, idx % 8 === 5 ? f : -idx],
+ [idx % 8 === 6 ? f : -idx, idx % 8 === 7 ? f : idx],
+ ]),
+ 3: kInterestingF64Values.map((f, idx) => [
+ [idx % 12 === 0 ? f : idx, idx % 12 === 1 ? f : -idx, idx % 12 === 2 ? f : idx],
+ [idx % 12 === 3 ? f : -idx, idx % 12 === 4 ? f : idx, idx % 12 === 5 ? f : -idx],
+ [idx % 12 === 6 ? f : idx, idx % 12 === 7 ? f : -idx, idx % 12 === 8 ? f : idx],
+ [idx % 12 === 9 ? f : -idx, idx % 12 === 10 ? f : idx, idx % 12 === 11 ? f : -idx],
+ ]),
+ 4: kInterestingF64Values.map((f, idx) => [
+ [
+ idx % 16 === 0 ? f : idx,
+ idx % 16 === 1 ? f : -idx,
+ idx % 16 === 2 ? f : idx,
+ idx % 16 === 3 ? f : -idx,
+ ],
+ [
+ idx % 16 === 4 ? f : -idx,
+ idx % 16 === 5 ? f : idx,
+ idx % 16 === 6 ? f : -idx,
+ idx % 16 === 7 ? f : idx,
+ ],
+ [
+ idx % 16 === 8 ? f : idx,
+ idx % 16 === 9 ? f : -idx,
+ idx % 16 === 10 ? f : idx,
+ idx % 16 === 11 ? f : -idx,
+ ],
+ [
+ idx % 16 === 12 ? f : -idx,
+ idx % 16 === 13 ? f : idx,
+ idx % 16 === 14 ? f : -idx,
+ idx % 16 === 15 ? f : idx,
+ ],
+ ]),
+ },
+};
+
+/**
+ * Returns a minimal set of matrices, indexed by dimension containing interesting
+ * float values.
+ *
+ * This is the matrix analogue of `sparseVectorF64Range`, so it is producing a
+ * minimal coverage set of matrices that test all the interesting f64 values.
+ * There is not a more expansive set of matrices, since matrices are even more
+ * expensive than vectors for increasing runtime with coverage.
+ *
+ * All the interesting floats from sparseF64 are guaranteed to be tested, but
+ * not in every position.
+ */
+export function sparseMatrixF64Range(c: number, r: number): ROArrayArray<number>[] {
+ assert(
+ c === 2 || c === 3 || c === 4,
+ 'sparseMatrixF64Range only accepts column counts of 2, 3, and 4'
+ );
+ assert(
+ r === 2 || r === 3 || r === 4,
+ 'sparseMatrixF64Range only accepts row counts of 2, 3, and 4'
+ );
+ return kSparseMatrixF64Values[c][r];
+}
+
+/**
+ * @returns the result matrix in Array<Array<number>> type.
+ *
+ * Matrix multiplication. A is m x n and B is n x p. Returns
+ * m x p result.
+ */
+// A is m x n. B is n x p. product is m x p.
+export function multiplyMatrices(
+ A: Array<Array<number>>,
+ B: Array<Array<number>>
+): Array<Array<number>> {
+ assert(A.length > 0 && B.length > 0 && B[0].length > 0 && A[0].length === B.length);
+ const product = new Array<Array<number>>(A.length);
+ for (let i = 0; i < product.length; ++i) {
+ product[i] = new Array<number>(B[0].length).fill(0);
+ }
+
+ for (let m = 0; m < A.length; ++m) {
+ for (let p = 0; p < B[0].length; ++p) {
+ for (let n = 0; n < B.length; ++n) {
+ product[m][p] += A[m][n] * B[n][p];
+ }
+ }
+ }
+
+ return product;
+}
+
+/** Sign-extend the `bits`-bit number `n` to a 32-bit signed integer. */
+export function signExtend(n: number, bits: number): number {
+ const shift = 32 - bits;
+ return (n << shift) >> shift;
+}
+
+export interface QuantizeFunc {
+ (num: number): number;
+}
+
+/** @returns the closest 32-bit floating point value to the input */
+export function quantizeToF32(num: number): number {
+ return Math.fround(num);
+}
+
+/** @returns the closest 16-bit floating point value to the input */
+export function quantizeToF16(num: number): number {
+ return hfround(num);
+}
+
+/**
+ * @returns the closest 32-bit signed integer value to the input, rounding
+ * towards 0, if not already an integer
+ */
+export function quantizeToI32(num: number): number {
+ if (num >= kValue.i32.positive.max) {
+ return kValue.i32.positive.max;
+ }
+ if (num <= kValue.i32.negative.min) {
+ return kValue.i32.negative.min;
+ }
+ return Math.trunc(num);
+}
+
+/**
+ * @returns the closest 32-bit unsigned integer value to the input, rounding
+ * towards 0, if not already an integer
+ */
+export function quantizeToU32(num: number): number {
+ if (num >= kValue.u32.max) {
+ return kValue.u32.max;
+ }
+ if (num <= 0) {
+ return 0;
+ }
+ return Math.trunc(num);
+}
+
+/** @returns whether the number is an integer and a power of two */
+export function isPowerOfTwo(n: number): boolean {
+ if (!Number.isInteger(n)) {
+ return false;
+ }
+ return n !== 0 && (n & (n - 1)) === 0;
+}
+
+/** @returns the Greatest Common Divisor (GCD) of the inputs */
+export function gcd(a: number, b: number): number {
+ assert(Number.isInteger(a) && a > 0);
+ assert(Number.isInteger(b) && b > 0);
+
+ while (b !== 0) {
+ const bTemp = b;
+ b = a % b;
+ a = bTemp;
+ }
+
+ return a;
+}
+
+/** @returns the Least Common Multiplier (LCM) of the inputs */
+export function lcm(a: number, b: number): number {
+ return (a * b) / gcd(a, b);
+}
+
+/** @returns the cross of an array with the intermediate result of cartesianProduct
+ *
+ * @param elements array of values to cross with the intermediate result of
+ * cartesianProduct
+ * @param intermediate arrays of values representing the partial result of
+ * cartesianProduct
+ */
+function cartesianProductImpl<T>(
+ elements: readonly T[],
+ intermediate: ROArrayArray<T>
+): ROArrayArray<T> {
+ const result: T[][] = [];
+ elements.forEach((e: T) => {
+ if (intermediate.length > 0) {
+ intermediate.forEach((i: readonly T[]) => {
+ result.push([...i, e]);
+ });
+ } else {
+ result.push([e]);
+ }
+ });
+ return result;
+}
+
+/** @returns the cartesian product (NxMx...) of a set of arrays
+ *
+ * This is implemented by calculating the cross of a single input against an
+ * intermediate result for each input to build up the final array of arrays.
+ *
+ * There are examples of doing this more succinctly using map & reduce online,
+ * but they are a bit more opaque to read.
+ *
+ * @param inputs arrays of numbers to calculate cartesian product over
+ */
+export function cartesianProduct<T>(...inputs: ROArrayArray<T>): ROArrayArray<T> {
+ let result: ROArrayArray<T> = [];
+ inputs.forEach((i: readonly T[]) => {
+ result = cartesianProductImpl<T>(i, result);
+ });
+
+ return result;
+}
+
+/** @returns all of the permutations of an array
+ *
+ * Recursively calculates all of the permutations, does not cull duplicate
+ * entries.
+ *
+ * Only feasible for inputs of lengths 5 or so, since the number of permutations
+ * is (input.length)!, so will cause the stack to explode for longer inputs.
+ *
+ * This code could be made iterative using something like
+ * Steinhaus–Johnson–Trotter and additionally turned into a generator to reduce
+ * the stack size, but there is still a fundamental combinatorial explosion
+ * here that will affect runtime.
+ *
+ * @param input the array to get permutations of
+ */
+export function calculatePermutations<T>(input: readonly T[]): ROArrayArray<T> {
+ if (input.length === 0) {
+ return [];
+ }
+
+ if (input.length === 1) {
+ return [input];
+ }
+
+ if (input.length === 2) {
+ return [input, [input[1], input[0]]];
+ }
+
+ const result: T[][] = [];
+ input.forEach((head, idx) => {
+ const tail = input.slice(0, idx).concat(input.slice(idx + 1));
+ const permutations = calculatePermutations(tail);
+ permutations.forEach(p => {
+ result.push([head, ...p]);
+ });
+ });
+
+ return result;
+}
+
+/**
+ * Convert an Array of Arrays to linear array
+ *
+ * Caller is responsible to retaining the dimensions of the array for later
+ * unflattening
+ *
+ * @param m Matrix to convert
+ */
+export function flatten2DArray<T>(m: ROArrayArray<T>): T[] {
+ const c = m.length;
+ const r = m[0].length;
+ assert(
+ m.every(c => c.length === r),
+ `Unexpectedly received jagged array to flatten`
+ );
+ const result: T[] = Array<T>(c * r);
+ for (let i = 0; i < c; i++) {
+ for (let j = 0; j < r; j++) {
+ result[j + i * r] = m[i][j];
+ }
+ }
+ return result;
+}
+
+/**
+ * Convert linear array to an Array of Arrays
+ * @param n an array to convert
+ * @param c number of elements in the array containing arrays
+ * @param r number of elements in the arrays that are contained
+ */
+export function unflatten2DArray<T>(n: readonly T[], c: number, r: number): ROArrayArray<T> {
+ assert(
+ c > 0 && Number.isInteger(c) && r > 0 && Number.isInteger(r),
+ `columns (${c}) and rows (${r}) need to be positive integers`
+ );
+ assert(n.length === c * r, `m.length(${n.length}) should equal c * r (${c * r})`);
+ const result: T[][] = [...Array(c)].map(_ => [...Array(r)]);
+ for (let i = 0; i < c; i++) {
+ for (let j = 0; j < r; j++) {
+ result[i][j] = n[j + i * r];
+ }
+ }
+ return result;
+}
+
+/**
+ * Performs a .map over a matrix and return the result
+ * The shape of the input and output matrices will be the same
+ *
+ * @param m input matrix of type T
+ * @param op operation that converts an element of type T to one of type S
+ * @returns a matrix with elements of type S that are calculated by applying op element by element
+ */
+export function map2DArray<T, S>(m: ROArrayArray<T>, op: (input: T) => S): ROArrayArray<S> {
+ const c = m.length;
+ const r = m[0].length;
+ assert(
+ m.every(c => c.length === r),
+ `Unexpectedly received jagged array to map`
+ );
+ const result: S[][] = [...Array(c)].map(_ => [...Array(r)]);
+ for (let i = 0; i < c; i++) {
+ for (let j = 0; j < r; j++) {
+ result[i][j] = op(m[i][j]);
+ }
+ }
+ return result;
+}
+
+/**
+ * Performs a .every over a matrix and return the result
+ *
+ * @param m input matrix of type T
+ * @param op operation that performs a test on an element
+ * @returns a boolean indicating if the test passed for every element
+ */
+export function every2DArray<T>(m: ROArrayArray<T>, op: (input: T) => boolean): boolean {
+ const r = m[0].length;
+ assert(
+ m.every(c => c.length === r),
+ `Unexpectedly received jagged array to map`
+ );
+ return m.every(col => col.every(el => op(el)));
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/memory.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/memory.ts
new file mode 100644
index 0000000000..bc5c916495
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/memory.ts
@@ -0,0 +1,25 @@
+/**
+ * Helper to exhaust VRAM until there is less than 64 MB of capacity. Returns
+ * an opaque closure which can be called to free the allocated resources later.
+ */
+export async function exhaustVramUntilUnder64MB(device: GPUDevice) {
+ const allocateUntilOom = async (device: GPUDevice, size: number) => {
+ const buffers = [];
+ for (;;) {
+ device.pushErrorScope('out-of-memory');
+ const buffer = device.createBuffer({ size, usage: GPUBufferUsage.STORAGE });
+ if (await device.popErrorScope()) {
+ return buffers;
+ }
+ buffers.push(buffer);
+ }
+ };
+
+ const kLargeChunkSize = 512 * 1024 * 1024;
+ const kSmallChunkSize = 64 * 1024 * 1024;
+ const buffers = await allocateUntilOom(device, kLargeChunkSize);
+ buffers.push(...(await allocateUntilOom(device, kSmallChunkSize)));
+ return () => {
+ buffers.forEach(buffer => buffer.destroy());
+ };
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/pretty_diff_tables.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/pretty_diff_tables.ts
new file mode 100644
index 0000000000..af98ab7ecf
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/pretty_diff_tables.ts
@@ -0,0 +1,51 @@
+import { range } from '../../common/util/util.js';
+
+/**
+ * Pretty-prints a "table" of cell values (each being `number | string`), right-aligned.
+ * Each row may be any iterator, including lazily-generated (potentially infinite) rows.
+ *
+ * The first argument is the printing options:
+ * - fillToWidth: Keep printing columns (as long as there is data) until this width is passed.
+ * If there is more data, "..." is appended.
+ * - numberToString: if a cell value is a number, this is used to stringify it.
+ *
+ * Each remaining argument provides one row for the table.
+ */
+export function generatePrettyTable(
+ { fillToWidth, numberToString }: { fillToWidth: number; numberToString: (n: number) => string },
+ rows: ReadonlyArray<Iterable<string | number>>
+): string {
+ const rowStrings = range(rows.length, () => '');
+ let totalTableWidth = 0;
+ const iters = rows.map(row => row[Symbol.iterator]());
+
+ // Loop over columns
+ for (;;) {
+ const cellsForColumn = iters.map(iter => {
+ const r = iter.next(); // Advance the iterator for each row, in lock-step.
+ return r.done ? undefined : typeof r.value === 'number' ? numberToString(r.value) : r.value;
+ });
+ if (cellsForColumn.every(cell => cell === undefined)) break;
+
+ // Maximum width of any cell in this column, plus one for space between columns
+ // (also inserts a space at the left of the first column).
+ const colWidth = Math.max(...cellsForColumn.map(c => (c === undefined ? 0 : c.length))) + 1;
+ for (let row = 0; row < rowStrings.length; ++row) {
+ const cell = cellsForColumn[row];
+ if (cell !== undefined) {
+ rowStrings[row] += cell.padStart(colWidth);
+ }
+ }
+
+ totalTableWidth += colWidth;
+ if (totalTableWidth >= fillToWidth) {
+ for (let row = 0; row < rowStrings.length; ++row) {
+ if (cellsForColumn[row] !== undefined) {
+ rowStrings[row] += ' ...';
+ }
+ }
+ break;
+ }
+ }
+ return rowStrings.join('\n');
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/prng.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/prng.ts
new file mode 100644
index 0000000000..641a937081
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/prng.ts
@@ -0,0 +1,125 @@
+import { assert } from '../../common/util/util.js';
+
+import { kValue } from './constants.js';
+
+/**
+ * Seed-able deterministic pseudo random generator for the WebGPU CTS
+ *
+ * This generator requires setting a seed value and the sequence of values
+ * generated is deterministic based on the seed.
+ *
+ * This generator is intended to be a replacement for Math.random().
+ *
+ * This generator is not cryptographically secure, though nothing in the CTS
+ * should be needing cryptographic security.
+ *
+ * The current implementation is based on TinyMT
+ * (https://github.com/MersenneTwister-Lab/TinyMT), which is a version of
+ * Mersenne Twister that has reduced the internal state size at the cost of
+ * shortening the period length of the generated sequence. The period is still
+ * 2^127 - 1 entries long, so should be sufficient for use in the CTS, but it is
+ * less costly to create multiple instances of the class.
+ */
+export class PRNG {
+ // Storing variables for temper() as members, so they don't need to be
+ // reallocated per call to temper()
+ private readonly t_vars: Uint32Array;
+
+ // Storing variables for next() as members, so they don't need to be
+ // reallocated per call to next()
+ private readonly n_vars: Uint32Array;
+
+ // Generator internal state
+ private readonly state: Uint32Array;
+
+ // Default tuning parameters for TinyMT.
+ // These are tested to not generate an all zero initial state.
+ private static readonly kMat1: number = 0x8f7011ee;
+ private static readonly kMat2: number = 0xfc78ff1f;
+ private static readonly kTMat: number = 0x3793fdff;
+
+ // TinyMT algorithm internal magic numbers
+ private static readonly kMask = 0x7fffffff;
+ private static readonly kMinLoop = 8;
+ private static readonly kPreLoop = 8;
+ private static readonly kSH0 = 1;
+ private static readonly kSH1 = 10;
+ private static readonly kSH8 = 8;
+
+ // u32.max + 1, used to scale the u32 value from temper() to [0, 1).
+ private static readonly kRandomDivisor = 4294967296.0;
+
+ /**
+ * constructor
+ *
+ * @param seed value used to initialize random number sequence. Results are
+ * guaranteed to be deterministic based on this.
+ * This value must be in the range of unsigned 32-bit integers.
+ * Non-integers will be rounded.
+ */
+ constructor(seed: number) {
+ assert(seed >= 0 && seed <= kValue.u32.max, 'seed to PRNG needs to a u32');
+
+ this.t_vars = new Uint32Array(2);
+ this.n_vars = new Uint32Array(2);
+
+ this.state = new Uint32Array([Math.round(seed), PRNG.kMat1, PRNG.kMat2, PRNG.kTMat]);
+ for (let i = 1; i < PRNG.kMinLoop; i++) {
+ this.state[i & 3] ^=
+ i + Math.imul(1812433253, this.state[(i - 1) & 3] ^ (this.state[(i - 1) & 3] >>> 30));
+ }
+
+ // Check that the initial state isn't all 0s, since the algorithm assumes
+ // that this never occurs
+ assert(
+ (this.state[0] & PRNG.kMask) !== 0 ||
+ this.state[1] !== 0 ||
+ this.state[2] !== 0 ||
+ this.state[2] !== 0,
+ 'Initialization of PRNG unexpectedly generated all 0s initial state, this means the tuning parameters are bad'
+ );
+
+ for (let i = 0; i < PRNG.kPreLoop; i++) {
+ this.next();
+ }
+ }
+
+ /** Advances the internal state to the next values */
+ private next() {
+ this.n_vars[0] = (this.state[0] & PRNG.kMask) ^ this.state[1] ^ this.state[2];
+ this.n_vars[1] = this.state[3];
+ this.n_vars[0] ^= this.n_vars[0] << PRNG.kSH0;
+ this.n_vars[1] ^= (this.n_vars[1] >>> PRNG.kSH0) ^ this.n_vars[0];
+ this.state[0] = this.state[1];
+ this.state[1] = this.state[2];
+ this.state[2] = this.n_vars[0] ^ (this.n_vars[1] << PRNG.kSH1);
+ this.state[3] = this.n_vars[1];
+ if ((this.n_vars[1] & 1) !== 0) {
+ this.state[1] ^= PRNG.kMat1;
+ this.state[2] ^= PRNG.kMat2;
+ }
+ }
+
+ /** @returns a 32-bit unsigned integer based on the current state */
+ private temper(): number {
+ this.t_vars[0] = this.state[3];
+ this.t_vars[1] = this.state[0] + (this.state[2] >>> PRNG.kSH8);
+ this.t_vars[0] ^= this.t_vars[1];
+ if ((this.t_vars[1] & 1) !== 0) {
+ this.t_vars[0] ^= PRNG.kTMat;
+ }
+ return this.t_vars[0];
+ }
+
+ /** @returns a value on the range of [0, 1) and advances the state */
+ public random(): number {
+ this.next();
+ return this.temper() / PRNG.kRandomDivisor;
+ }
+
+ /** @returns a 32-bit unsigned integer value and advances the state */
+ public randomU32(): number {
+ this.next();
+ return this.temper();
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/reinterpret.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/reinterpret.ts
new file mode 100644
index 0000000000..2ffb24b231
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/reinterpret.ts
@@ -0,0 +1,118 @@
+import { Float16Array } from '../../external/petamoriken/float16/float16.js';
+
+/**
+ * Once-allocated ArrayBuffer/views to avoid overhead of allocation when converting between numeric formats
+ *
+ * workingData* is shared between multiple functions in this file, so to avoid re-entrancy problems, make sure in
+ * functions that use it that they don't call themselves or other functions that use workingData*.
+ */
+const workingData = new ArrayBuffer(8);
+const workingDataU32 = new Uint32Array(workingData);
+const workingDataU16 = new Uint16Array(workingData);
+const workingDataF32 = new Float32Array(workingData);
+const workingDataF16 = new Float16Array(workingData);
+const workingDataI32 = new Int32Array(workingData);
+const workingDataF64 = new Float64Array(workingData);
+const workingDataU64 = new BigUint64Array(workingData);
+
+/**
+ * @returns a 64-bit float value via interpreting the input as the bit
+ * representation as a 64-bit integer
+ */
+export function reinterpretU64AsF64(input: bigint): number {
+ workingDataU64[0] = input;
+ return workingDataF64[0];
+}
+
+/**
+ * @returns the 64-bit integer bit representation of 64-bit float value
+ */
+export function reinterpretF64AsU64(input: number): bigint {
+ workingDataF64[0] = input;
+ return workingDataU64[0];
+}
+
+// Encoding to u32s, instead of BigInt, for serialization
+export function reinterpretF64AsU32s(f64: number): [number, number] {
+ workingDataF64[0] = f64;
+ return [workingDataU32[0], workingDataU32[1]];
+}
+
+// De-encoding from u32s, instead of BigInt, for serialization
+export function reinterpretU32sAsF64(u32s: [number, number]): number {
+ workingDataU32[0] = u32s[0];
+ workingDataU32[1] = u32s[1];
+ return workingDataF64[0];
+}
+
+/**
+ * @returns a number representing the u32 interpretation
+ * of the bits of a number assumed to be an f32 value.
+ */
+export function reinterpretF32AsU32(f32: number): number {
+ workingDataF32[0] = f32;
+ return workingDataU32[0];
+}
+
+/**
+ * @returns a number representing the i32 interpretation
+ * of the bits of a number assumed to be an f32 value.
+ */
+export function reinterpretF32AsI32(f32: number): number {
+ workingDataF32[0] = f32;
+ return workingDataI32[0];
+}
+
+/**
+ * @returns a number representing the f32 interpretation
+ * of the bits of a number assumed to be an u32 value.
+ */
+export function reinterpretU32AsF32(u32: number): number {
+ workingDataU32[0] = u32;
+ return workingDataF32[0];
+}
+
+/**
+ * @returns a number representing the i32 interpretation
+ * of the bits of a number assumed to be an u32 value.
+ */
+export function reinterpretU32AsI32(u32: number): number {
+ workingDataU32[0] = u32;
+ return workingDataI32[0];
+}
+
+/**
+ * @returns a number representing the u32 interpretation
+ * of the bits of a number assumed to be an i32 value.
+ */
+export function reinterpretI32AsU32(i32: number): number {
+ workingDataI32[0] = i32;
+ return workingDataU32[0];
+}
+
+/**
+ * @returns a number representing the f32 interpretation
+ * of the bits of a number assumed to be an i32 value.
+ */
+export function reinterpretI32AsF32(i32: number): number {
+ workingDataI32[0] = i32;
+ return workingDataF32[0];
+}
+
+/**
+ * @returns a number representing the u16 interpretation
+ * of the bits of a number assumed to be an f16 value.
+ */
+export function reinterpretF16AsU16(f16: number): number {
+ workingDataF16[0] = f16;
+ return workingDataU16[0];
+}
+
+/**
+ * @returns a number representing the f16 interpretation
+ * of the bits of a number assumed to be an u16 value.
+ */
+export function reinterpretU16AsF16(u16: number): number {
+ workingDataU16[0] = u16;
+ return workingDataF16[0];
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/shader.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/shader.ts
new file mode 100644
index 0000000000..2a09061527
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/shader.ts
@@ -0,0 +1,196 @@
+import { unreachable } from '../../common/util/util.js';
+
+export const kDefaultVertexShaderCode = `
+@vertex fn main() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+}
+`;
+
+export const kDefaultFragmentShaderCode = `
+@fragment fn main() -> @location(0) vec4<f32> {
+ return vec4<f32>(1.0, 1.0, 1.0, 1.0);
+}`;
+
+const kPlainTypeInfo = {
+ i32: {
+ suffix: '',
+ fractionDigits: 0,
+ },
+ u32: {
+ suffix: 'u',
+ fractionDigits: 0,
+ },
+ f32: {
+ suffix: '',
+ fractionDigits: 4,
+ },
+};
+
+/**
+ *
+ * @param sampleType sampleType of texture format
+ * @returns plain type compatible of the sampleType
+ */
+export function getPlainTypeInfo(sampleType: GPUTextureSampleType): keyof typeof kPlainTypeInfo {
+ switch (sampleType) {
+ case 'sint':
+ return 'i32';
+ case 'uint':
+ return 'u32';
+ case 'float':
+ case 'unfilterable-float':
+ case 'depth':
+ return 'f32';
+ default:
+ unreachable();
+ }
+}
+
+/**
+ * Build a fragment shader based on output value and types
+ * e.g. write to color target 0 a `vec4<f32>(1.0, 0.0, 1.0, 1.0)` and color target 2 a `vec2<u32>(1, 2)`
+ * ```
+ * outputs: [
+ * {
+ * values: [1, 0, 1, 1],,
+ * plainType: 'f32',
+ * componentCount: 4,
+ * },
+ * null,
+ * {
+ * values: [1, 2],
+ * plainType: 'u32',
+ * componentCount: 2,
+ * },
+ * ]
+ * ```
+ *
+ * return:
+ * ```
+ * struct Outputs {
+ * @location(0) o1 : vec4<f32>,
+ * @location(2) o3 : vec2<u32>,
+ * }
+ * @fragment fn main() -> Outputs {
+ * return Outputs(vec4<f32>(1.0, 0.0, 1.0, 1.0), vec4<u32>(1, 2));
+ * }
+ * ```
+ *
+ * If fragDepth is given there will be an extra @builtin(frag_depth) output with the specified value assigned.
+ *
+ * @param outputs the shader outputs for each location attribute
+ * @param fragDepth the shader outputs frag_depth value (optional)
+ * @returns the fragment shader string
+ */
+export function getFragmentShaderCodeWithOutput(
+ outputs: ({
+ values: readonly number[];
+ plainType: 'i32' | 'u32' | 'f32';
+ componentCount: number;
+ } | null)[],
+ fragDepth: { value: number } | null = null
+): string {
+ if (outputs.length === 0) {
+ if (fragDepth) {
+ return `
+ @fragment fn main() -> @builtin(frag_depth) f32 {
+ return ${fragDepth.value.toFixed(kPlainTypeInfo['f32'].fractionDigits)};
+ }`;
+ }
+ return `
+ @fragment fn main() {
+ }`;
+ }
+
+ const resultStrings = [] as string[];
+ let outputStructString = '';
+
+ if (fragDepth) {
+ resultStrings.push(`${fragDepth.value.toFixed(kPlainTypeInfo['f32'].fractionDigits)}`);
+ outputStructString += `@builtin(frag_depth) depth_out: f32,\n`;
+ }
+
+ for (let i = 0; i < outputs.length; i++) {
+ const o = outputs[i];
+ if (o === null) {
+ continue;
+ }
+
+ const plainType = o.plainType;
+ const { suffix, fractionDigits } = kPlainTypeInfo[plainType];
+
+ let outputType;
+ const v = o.values.map(n => n.toFixed(fractionDigits));
+ switch (o.componentCount) {
+ case 1:
+ outputType = plainType;
+ resultStrings.push(`${v[0]}${suffix}`);
+ break;
+ case 2:
+ outputType = `vec2<${plainType}>`;
+ resultStrings.push(`${outputType}(${v[0]}${suffix}, ${v[1]}${suffix})`);
+ break;
+ case 3:
+ outputType = `vec3<${plainType}>`;
+ resultStrings.push(`${outputType}(${v[0]}${suffix}, ${v[1]}${suffix}, ${v[2]}${suffix})`);
+ break;
+ case 4:
+ outputType = `vec4<${plainType}>`;
+ resultStrings.push(
+ `${outputType}(${v[0]}${suffix}, ${v[1]}${suffix}, ${v[2]}${suffix}, ${v[3]}${suffix})`
+ );
+ break;
+ default:
+ unreachable();
+ }
+
+ outputStructString += `@location(${i}) o${i} : ${outputType},\n`;
+ }
+
+ return `
+ struct Outputs {
+ ${outputStructString}
+ }
+
+ @fragment fn main() -> Outputs {
+ return Outputs(${resultStrings.join(',')});
+ }`;
+}
+
+export type TShaderStage = 'compute' | 'vertex' | 'fragment' | 'empty';
+
+/**
+ * Return a foo shader of the given stage with the given entry point
+ * @param shaderStage
+ * @param entryPoint
+ * @returns the shader string
+ */
+export function getShaderWithEntryPoint(shaderStage: TShaderStage, entryPoint: string): string {
+ let code;
+ switch (shaderStage) {
+ case 'compute': {
+ code = `@compute @workgroup_size(1) fn ${entryPoint}() {}`;
+ break;
+ }
+ case 'vertex': {
+ code = `
+ @vertex fn ${entryPoint}() -> @builtin(position) vec4<f32> {
+ return vec4<f32>(0.0, 0.0, 0.0, 1.0);
+ }`;
+ break;
+ }
+ case 'fragment': {
+ code = `
+ @fragment fn ${entryPoint}() -> @location(0) vec4<f32> {
+ return vec4<f32>(0.0, 1.0, 0.0, 1.0);
+ }`;
+ break;
+ }
+ case 'empty':
+ default: {
+ code = '';
+ break;
+ }
+ }
+ return code;
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture.ts
new file mode 100644
index 0000000000..48ff1430b5
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture.ts
@@ -0,0 +1,81 @@
+import { assert } from '../../common/util/util.js';
+
+import { getTextureCopyLayout } from './texture/layout.js';
+import { TexelView } from './texture/texel_view.js';
+import { reifyExtent3D } from './unions.js';
+
+/**
+ * Creates a mipmapped texture where each mipmap level's (`i`) content is
+ * from `texelViews[i]`.
+ */
+export function createTextureFromTexelViews(
+ device: GPUDevice,
+ texelViews: TexelView[],
+ desc: Omit<GPUTextureDescriptor, 'format'>
+): GPUTexture {
+ // All texel views must be the same format for mipmaps.
+ assert(texelViews.length > 0 && texelViews.every(e => e.format === texelViews[0].format));
+ const format = texelViews[0].format;
+ const { width, height, depthOrArrayLayers } = reifyExtent3D(desc.size);
+
+ // Create the texture and then initialize each mipmap level separately.
+ const texture = device.createTexture({
+ ...desc,
+ format: texelViews[0].format,
+ usage: desc.usage | GPUTextureUsage.COPY_DST,
+ mipLevelCount: texelViews.length,
+ });
+
+ // Copy the texel view into each mip level layer.
+ const commandEncoder = device.createCommandEncoder();
+ const stagingBuffers = [];
+ for (let mipLevel = 0; mipLevel < texelViews.length; mipLevel++) {
+ const {
+ bytesPerRow,
+ mipSize: [mipWidth, mipHeight, mipDepthOrArray],
+ } = getTextureCopyLayout(format, desc.dimension ?? '2d', [width, height, depthOrArrayLayers], {
+ mipLevel,
+ });
+
+ // Create a staging buffer to upload the texture mip level contents.
+ const stagingBuffer = device.createBuffer({
+ mappedAtCreation: true,
+ size: bytesPerRow * mipHeight * mipDepthOrArray,
+ usage: GPUBufferUsage.COPY_SRC,
+ });
+ stagingBuffers.push(stagingBuffer);
+
+ // Write the texels into the staging buffer.
+ texelViews[mipLevel].writeTextureData(new Uint8Array(stagingBuffer.getMappedRange()), {
+ bytesPerRow,
+ rowsPerImage: mipHeight,
+ subrectOrigin: [0, 0, 0],
+ subrectSize: [mipWidth, mipHeight, mipDepthOrArray],
+ });
+ stagingBuffer.unmap();
+
+ // Copy from the staging buffer into the texture.
+ commandEncoder.copyBufferToTexture(
+ { buffer: stagingBuffer, bytesPerRow },
+ { texture, mipLevel },
+ [mipWidth, mipHeight, mipDepthOrArray]
+ );
+ }
+ device.queue.submit([commandEncoder.finish()]);
+
+ // Cleanup the staging buffers.
+ stagingBuffers.forEach(value => value.destroy());
+
+ return texture;
+}
+
+/**
+ * Creates a 1 mip level texture with the contents of a TexelView.
+ */
+export function createTextureFromTexelView(
+ device: GPUDevice,
+ texelView: TexelView,
+ desc: Omit<GPUTextureDescriptor, 'format'>
+): GPUTexture {
+ return createTextureFromTexelViews(device, [texelView], desc);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/base.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/base.ts
new file mode 100644
index 0000000000..67b4fc7156
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/base.ts
@@ -0,0 +1,243 @@
+import { assert, unreachable } from '../../../common/util/util.js';
+import { kTextureFormatInfo } from '../../format_info.js';
+import { align } from '../../util/math.js';
+import { reifyExtent3D } from '../../util/unions.js';
+
+/**
+ * Compute the maximum mip level count allowed for a given texture size and texture dimension.
+ */
+export function maxMipLevelCount({
+ size,
+ dimension = '2d',
+}: {
+ readonly size: Readonly<GPUExtent3DDict> | readonly number[];
+ readonly dimension?: GPUTextureDimension;
+}): number {
+ const sizeDict = reifyExtent3D(size);
+
+ let maxMippedDimension = 0;
+ switch (dimension) {
+ case '1d':
+ maxMippedDimension = 1; // No mipmaps allowed.
+ break;
+ case '2d':
+ maxMippedDimension = Math.max(sizeDict.width, sizeDict.height);
+ break;
+ case '3d':
+ maxMippedDimension = Math.max(sizeDict.width, sizeDict.height, sizeDict.depthOrArrayLayers);
+ break;
+ }
+
+ return Math.floor(Math.log2(maxMippedDimension)) + 1;
+}
+
+/**
+ * Compute the "physical size" of a mip level: the size of the level, rounded up to a
+ * multiple of the texel block size.
+ */
+export function physicalMipSize(
+ baseSize: Required<GPUExtent3DDict>,
+ format: GPUTextureFormat,
+ dimension: GPUTextureDimension,
+ level: number
+): Required<GPUExtent3DDict> {
+ switch (dimension) {
+ case '1d':
+ assert(level === 0, '1d textures cannot be mipmapped');
+ assert(baseSize.height === 1 && baseSize.depthOrArrayLayers === 1, '1d texture not Wx1x1');
+ return { width: baseSize.width, height: 1, depthOrArrayLayers: 1 };
+
+ case '2d': {
+ assert(
+ Math.max(baseSize.width, baseSize.height) >> level > 0,
+ () => `level (${level}) too large for base size (${baseSize.width}x${baseSize.height})`
+ );
+
+ const virtualWidthAtLevel = Math.max(baseSize.width >> level, 1);
+ const virtualHeightAtLevel = Math.max(baseSize.height >> level, 1);
+ const physicalWidthAtLevel = align(
+ virtualWidthAtLevel,
+ kTextureFormatInfo[format].blockWidth
+ );
+ const physicalHeightAtLevel = align(
+ virtualHeightAtLevel,
+ kTextureFormatInfo[format].blockHeight
+ );
+ return {
+ width: physicalWidthAtLevel,
+ height: physicalHeightAtLevel,
+ depthOrArrayLayers: baseSize.depthOrArrayLayers,
+ };
+ }
+
+ case '3d': {
+ assert(
+ Math.max(baseSize.width, baseSize.height, baseSize.depthOrArrayLayers) >> level > 0,
+ () =>
+ `level (${level}) too large for base size (${baseSize.width}x${baseSize.height}x${baseSize.depthOrArrayLayers})`
+ );
+ assert(
+ kTextureFormatInfo[format].blockWidth === 1 && kTextureFormatInfo[format].blockHeight === 1,
+ 'not implemented for 3d block formats'
+ );
+ return {
+ width: Math.max(baseSize.width >> level, 1),
+ height: Math.max(baseSize.height >> level, 1),
+ depthOrArrayLayers: Math.max(baseSize.depthOrArrayLayers >> level, 1),
+ };
+ }
+ }
+}
+
+/**
+ * Compute the "physical size" of a mip level: the size of the level, rounded up to a
+ * multiple of the texel block size.
+ */
+export function physicalMipSizeFromTexture(
+ texture: GPUTexture,
+ mipLevel: number
+): [number, number, number] {
+ const size = physicalMipSize(texture, texture.format, texture.dimension, mipLevel);
+ return [size.width, size.height, size.depthOrArrayLayers];
+}
+
+/**
+ * Compute the "virtual size" of a mip level of a texture (not accounting for texel block rounding).
+ *
+ * MAINTENANCE_TODO: Change input/output to Required<GPUExtent3DDict> for consistency.
+ */
+export function virtualMipSize(
+ dimension: GPUTextureDimension,
+ size: readonly [number, number, number],
+ mipLevel: number
+): [number, number, number] {
+ const shiftMinOne = (n: number) => Math.max(1, n >> mipLevel);
+ switch (dimension) {
+ case '1d':
+ assert(size[2] === 1);
+ return [shiftMinOne(size[0]), size[1], size[2]];
+ case '2d':
+ return [shiftMinOne(size[0]), shiftMinOne(size[1]), size[2]];
+ case '3d':
+ return [shiftMinOne(size[0]), shiftMinOne(size[1]), shiftMinOne(size[2])];
+ default:
+ unreachable();
+ }
+}
+
+/**
+ * Get texture dimension from view dimension in order to create an compatible texture for a given
+ * view dimension.
+ */
+export function getTextureDimensionFromView(viewDimension: GPUTextureViewDimension) {
+ switch (viewDimension) {
+ case '1d':
+ return '1d';
+ case '2d':
+ case '2d-array':
+ case 'cube':
+ case 'cube-array':
+ return '2d';
+ case '3d':
+ return '3d';
+ default:
+ unreachable();
+ }
+}
+
+/** Returns the possible valid view dimensions for a given texture dimension. */
+export function viewDimensionsForTextureDimension(textureDimension: GPUTextureDimension) {
+ switch (textureDimension) {
+ case '1d':
+ return ['1d'] as const;
+ case '2d':
+ return ['2d', '2d-array', 'cube', 'cube-array'] as const;
+ case '3d':
+ return ['3d'] as const;
+ }
+}
+
+/** Returns the default view dimension for a given texture descriptor. */
+export function defaultViewDimensionsForTexture(textureDescriptor: Readonly<GPUTextureDescriptor>) {
+ switch (textureDescriptor.dimension) {
+ case '1d':
+ return '1d';
+ case '2d': {
+ const sizeDict = reifyExtent3D(textureDescriptor.size);
+ return sizeDict.depthOrArrayLayers > 1 ? '2d-array' : '2d';
+ }
+ case '3d':
+ return '3d';
+ default:
+ unreachable();
+ }
+}
+
+/** Reifies the optional fields of `GPUTextureDescriptor`.
+ * MAINTENANCE_TODO: viewFormats should not be omitted here, but it seems likely that the
+ * @webgpu/types definition will have to change before we can include it again.
+ */
+export function reifyTextureDescriptor(
+ desc: Readonly<GPUTextureDescriptor>
+): Required<Omit<GPUTextureDescriptor, 'label' | 'viewFormats'>> {
+ return { dimension: '2d' as const, mipLevelCount: 1, sampleCount: 1, ...desc };
+}
+
+/** Reifies the optional fields of `GPUTextureViewDescriptor` (given a `GPUTextureDescriptor`). */
+export function reifyTextureViewDescriptor(
+ textureDescriptor: Readonly<GPUTextureDescriptor>,
+ view: Readonly<GPUTextureViewDescriptor>
+): Required<Omit<GPUTextureViewDescriptor, 'label'>> {
+ const texture = reifyTextureDescriptor(textureDescriptor);
+
+ // IDL defaulting
+
+ const baseMipLevel = view.baseMipLevel ?? 0;
+ const baseArrayLayer = view.baseArrayLayer ?? 0;
+ const aspect = view.aspect ?? 'all';
+
+ // Spec defaulting
+
+ const format = view.format ?? texture.format;
+ const mipLevelCount = view.mipLevelCount ?? texture.mipLevelCount - baseMipLevel;
+ const dimension = view.dimension ?? defaultViewDimensionsForTexture(texture);
+
+ let arrayLayerCount = view.arrayLayerCount;
+ if (arrayLayerCount === undefined) {
+ if (dimension === '2d-array' || dimension === 'cube-array') {
+ arrayLayerCount = reifyExtent3D(texture.size).depthOrArrayLayers - baseArrayLayer;
+ } else if (dimension === 'cube') {
+ arrayLayerCount = 6;
+ } else {
+ arrayLayerCount = 1;
+ }
+ }
+
+ return {
+ format,
+ dimension,
+ aspect,
+ baseMipLevel,
+ mipLevelCount,
+ baseArrayLayer,
+ arrayLayerCount,
+ };
+}
+
+/**
+ * Get generator of all the coordinates in a subrect.
+ * @param subrectOrigin - Subrect origin
+ * @param subrectSize - Subrect size
+ */
+export function* fullSubrectCoordinates(
+ subrectOrigin: Required<GPUOrigin3DDict>,
+ subrectSize: Required<GPUExtent3DDict>
+): Generator<Required<GPUOrigin3DDict>> {
+ for (let z = subrectOrigin.z; z < subrectOrigin.z + subrectSize.depthOrArrayLayers; ++z) {
+ for (let y = subrectOrigin.y; y < subrectOrigin.y + subrectSize.height; ++y) {
+ for (let x = subrectOrigin.x; x < subrectOrigin.x + subrectSize.width; ++x) {
+ yield { x, y, z };
+ }
+ }
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/data_generation.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/data_generation.ts
new file mode 100644
index 0000000000..7ad7d30e08
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/data_generation.ts
@@ -0,0 +1,83 @@
+/**
+ * A helper class that generates ranges of dummy data for buffer or texture operations
+ * efficiently. Tries to minimize allocations and data updates.
+ */
+export class DataArrayGenerator {
+ private dataBuffer = new Uint8Array(256);
+
+ private lastOffset = 0;
+ private lastStart = 0;
+ private lastByteSize = 0;
+
+ /** Find the nearest power of two greater than or equal to the input value. */
+ private nextPowerOfTwo(value: number) {
+ return 1 << (32 - Math.clz32(value - 1));
+ }
+
+ private generateData(byteSize: number, start: number = 0, offset: number = 0) {
+ const prevSize = this.dataBuffer.length;
+
+ if (prevSize < byteSize) {
+ // If the requested data is larger than the allocated buffer, reallocate it to a buffer large
+ // enough to handle the new request.
+ const newData = new Uint8Array(this.nextPowerOfTwo(byteSize));
+
+ if (this.lastOffset === offset && this.lastStart === start && this.lastByteSize) {
+ // Do a fast copy of any previous data that was generated.
+ newData.set(this.dataBuffer);
+ }
+
+ this.dataBuffer = newData;
+ } else if (this.lastOffset < offset) {
+ // Ensure all values up to the offset are zeroed out.
+ this.dataBuffer.fill(0, this.lastOffset, offset);
+ }
+
+ // If the offset or start values have changed, the whole data range needs to be regenerated.
+ if (this.lastOffset !== offset || this.lastStart !== start) {
+ this.lastByteSize = 0;
+ }
+
+ // Generate any new values that are required
+ if (this.lastByteSize < byteSize) {
+ for (let i = this.lastByteSize; i < byteSize - offset; ++i) {
+ this.dataBuffer[i + offset] = ((i ** 3 + i + start) % 251) + 1; // Ensure data is always non-zero
+ }
+
+ this.lastOffset = offset;
+ this.lastStart = start;
+ this.lastByteSize = byteSize;
+ }
+ }
+
+ /**
+ * Returns a new view into the generated data that's the correct length. Because this is a view
+ * previously returned views from the same generator will have their values overwritten as well.
+ * @param {number} byteSize - Number of bytes the returned view should contain.
+ * @param {number} [start] - The value of the first element generated in the view.
+ * @param {number} [offset] - Offset of the generated data within the view. Preceeding values will be 0.
+ * @returns {Uint8Array} A new Uint8Array view into the generated data.
+ */
+ generateView(byteSize: number, start: number = 0, offset: number = 0): Uint8Array {
+ this.generateData(byteSize, start, offset);
+
+ if (this.dataBuffer.length === byteSize) {
+ return this.dataBuffer;
+ }
+ return new Uint8Array(this.dataBuffer.buffer, 0, byteSize);
+ }
+
+ /**
+ * Returns a copy of the generated data. Note that this still changes the underlying buffer, so
+ * any previously generated views will still be overwritten, but the returned copy won't reflect
+ * future generate* calls.
+ * @param {number} byteSize - Number of bytes the returned array should contain.
+ * @param {number} [start] - The value of the first element generated in the view.
+ * @param {number} [offset] - Offset of the generated data within the view. Preceeding values will be 0.
+ * @returns {Uint8Array} A new Uint8Array copy of the generated data.
+ */
+ generateAndCopyView(byteSize: number, start: number = 0, offset: number = 0) {
+ this.generateData(byteSize, start, offset);
+ return this.dataBuffer.slice(0, byteSize);
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/layout.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/layout.ts
new file mode 100644
index 0000000000..8e6b564da6
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/layout.ts
@@ -0,0 +1,371 @@
+import { assert, memcpy } from '../../../common/util/util.js';
+import {
+ kTextureFormatInfo,
+ resolvePerAspectFormat,
+ SizedTextureFormat,
+ EncodableTextureFormat,
+} from '../../format_info.js';
+import { align } from '../math.js';
+import { reifyExtent3D } from '../unions.js';
+
+import { physicalMipSize, virtualMipSize } from './base.js';
+
+/** The minimum `bytesPerRow` alignment, per spec. */
+export const kBytesPerRowAlignment = 256;
+/** The minimum buffer copy alignment, per spec. */
+export const kBufferCopyAlignment = 4;
+
+/**
+ * Overridable layout options for {@link getTextureCopyLayout}.
+ */
+export interface LayoutOptions {
+ mipLevel: number;
+ bytesPerRow?: number;
+ rowsPerImage?: number;
+ aspect?: GPUTextureAspect;
+}
+
+const kDefaultLayoutOptions = {
+ mipLevel: 0,
+ bytesPerRow: undefined,
+ rowsPerImage: undefined,
+ aspect: 'all' as const,
+};
+
+/** The info returned by {@link getTextureSubCopyLayout}. */
+export interface TextureSubCopyLayout {
+ bytesPerBlock: number;
+ byteLength: number;
+ /** Number of bytes in each row, not accounting for {@link kBytesPerRowAlignment}. */
+ minBytesPerRow: number;
+ /**
+ * Actual value of bytesPerRow, defaulting to `align(minBytesPerRow, kBytesPerRowAlignment}`
+ * if not overridden.
+ */
+ bytesPerRow: number;
+ /** Actual value of rowsPerImage, defaulting to `mipSize[1]` if not overridden. */
+ rowsPerImage: number;
+}
+
+/** The info returned by {@link getTextureCopyLayout}. */
+export interface TextureCopyLayout extends TextureSubCopyLayout {
+ mipSize: [number, number, number];
+}
+
+/**
+ * Computes layout information for a copy of the whole subresource at `mipLevel` of a GPUTexture
+ * of size `baseSize` with the provided `format` and `dimension`.
+ *
+ * Computes default values for `bytesPerRow` and `rowsPerImage` if not specified.
+ *
+ * MAINTENANCE_TODO: Change input/output to Required<GPUExtent3DDict> for consistency.
+ */
+export function getTextureCopyLayout(
+ format: GPUTextureFormat,
+ dimension: GPUTextureDimension,
+ baseSize: readonly [number, number, number],
+ { mipLevel, bytesPerRow, rowsPerImage, aspect }: LayoutOptions = kDefaultLayoutOptions
+): TextureCopyLayout {
+ const mipSize = physicalMipSize(
+ { width: baseSize[0], height: baseSize[1], depthOrArrayLayers: baseSize[2] },
+ format,
+ dimension,
+ mipLevel
+ );
+
+ const layout = getTextureSubCopyLayout(format, mipSize, { bytesPerRow, rowsPerImage, aspect });
+ return { ...layout, mipSize: [mipSize.width, mipSize.height, mipSize.depthOrArrayLayers] };
+}
+
+/**
+ * Computes layout information for a copy of size `copySize` to/from a GPUTexture with the provided
+ * `format`.
+ *
+ * Computes default values for `bytesPerRow` and `rowsPerImage` if not specified.
+ */
+export function getTextureSubCopyLayout(
+ format: GPUTextureFormat,
+ copySize: GPUExtent3D,
+ {
+ bytesPerRow,
+ rowsPerImage,
+ aspect = 'all' as const,
+ }: {
+ readonly bytesPerRow?: number;
+ readonly rowsPerImage?: number;
+ readonly aspect?: GPUTextureAspect;
+ } = {}
+): TextureSubCopyLayout {
+ format = resolvePerAspectFormat(format, aspect);
+ const { blockWidth, blockHeight, bytesPerBlock } = kTextureFormatInfo[format];
+ assert(bytesPerBlock !== undefined);
+
+ const copySize_ = reifyExtent3D(copySize);
+ assert(
+ copySize_.width > 0 && copySize_.height > 0 && copySize_.depthOrArrayLayers > 0,
+ 'not implemented for empty copySize'
+ );
+ assert(
+ copySize_.width % blockWidth === 0 && copySize_.height % blockHeight === 0,
+ () =>
+ `copySize (${copySize_.width},${copySize_.height}) must be a multiple of the block size (${blockWidth},${blockHeight})`
+ );
+ const copySizeBlocks = {
+ width: copySize_.width / blockWidth,
+ height: copySize_.height / blockHeight,
+ depthOrArrayLayers: copySize_.depthOrArrayLayers,
+ };
+
+ const minBytesPerRow = copySizeBlocks.width * bytesPerBlock;
+ const alignedMinBytesPerRow = align(minBytesPerRow, kBytesPerRowAlignment);
+ if (bytesPerRow !== undefined) {
+ assert(bytesPerRow >= alignedMinBytesPerRow);
+ assert(bytesPerRow % kBytesPerRowAlignment === 0);
+ } else {
+ bytesPerRow = alignedMinBytesPerRow;
+ }
+
+ if (rowsPerImage !== undefined) {
+ assert(rowsPerImage >= copySizeBlocks.height);
+ } else {
+ rowsPerImage = copySizeBlocks.height;
+ }
+
+ const bytesPerSlice = bytesPerRow * rowsPerImage;
+ const sliceSize =
+ bytesPerRow * (copySizeBlocks.height - 1) + bytesPerBlock * copySizeBlocks.width;
+ const byteLength = bytesPerSlice * (copySizeBlocks.depthOrArrayLayers - 1) + sliceSize;
+
+ return {
+ bytesPerBlock,
+ byteLength: align(byteLength, kBufferCopyAlignment),
+ minBytesPerRow,
+ bytesPerRow,
+ rowsPerImage,
+ };
+}
+
+/**
+ * Fill an ArrayBuffer with the linear-memory representation of a solid-color
+ * texture where every texel has the byte value `texelValue`.
+ * Preserves the contents of `outputBuffer` which are in "padding" space between image rows.
+ *
+ * Effectively emulates a copyTextureToBuffer from a solid-color texture to a buffer.
+ */
+export function fillTextureDataWithTexelValue(
+ texelValue: ArrayBuffer,
+ format: EncodableTextureFormat,
+ dimension: GPUTextureDimension,
+ outputBuffer: ArrayBuffer,
+ size: [number, number, number],
+ options: LayoutOptions = kDefaultLayoutOptions
+): void {
+ const { blockWidth, blockHeight, bytesPerBlock } = kTextureFormatInfo[format];
+ // Block formats are not handled correctly below.
+ assert(blockWidth === 1);
+ assert(blockHeight === 1);
+
+ assert(bytesPerBlock === texelValue.byteLength, 'texelValue must be of size bytesPerBlock');
+
+ const { byteLength, rowsPerImage, bytesPerRow } = getTextureCopyLayout(
+ format,
+ dimension,
+ size,
+ options
+ );
+
+ assert(byteLength <= outputBuffer.byteLength);
+
+ const mipSize = virtualMipSize(dimension, size, options.mipLevel);
+
+ const outputTexelValueBytes = new Uint8Array(outputBuffer);
+ for (let slice = 0; slice < mipSize[2]; ++slice) {
+ for (let row = 0; row < mipSize[1]; row += blockHeight) {
+ for (let col = 0; col < mipSize[0]; col += blockWidth) {
+ const byteOffset =
+ slice * rowsPerImage * bytesPerRow + row * bytesPerRow + col * texelValue.byteLength;
+ memcpy({ src: texelValue }, { dst: outputTexelValueBytes, start: byteOffset });
+ }
+ }
+ }
+}
+
+/**
+ * Create a `COPY_SRC` GPUBuffer containing the linear-memory representation of a solid-color
+ * texture where every texel has the byte value `texelValue`.
+ */
+export function createTextureUploadBuffer(
+ texelValue: ArrayBuffer,
+ device: GPUDevice,
+ format: EncodableTextureFormat,
+ dimension: GPUTextureDimension,
+ size: [number, number, number],
+ options: LayoutOptions = kDefaultLayoutOptions
+): {
+ buffer: GPUBuffer;
+ bytesPerRow: number;
+ rowsPerImage: number;
+} {
+ const { byteLength, bytesPerRow, rowsPerImage, bytesPerBlock } = getTextureCopyLayout(
+ format,
+ dimension,
+ size,
+ options
+ );
+
+ const buffer = device.createBuffer({
+ mappedAtCreation: true,
+ size: byteLength,
+ usage: GPUBufferUsage.COPY_SRC,
+ });
+ const mapping = buffer.getMappedRange();
+
+ assert(texelValue.byteLength === bytesPerBlock);
+ fillTextureDataWithTexelValue(texelValue, format, dimension, mapping, size, options);
+ buffer.unmap();
+
+ return {
+ buffer,
+ bytesPerRow,
+ rowsPerImage,
+ };
+}
+
+export type ImageCopyType = 'WriteTexture' | 'CopyB2T' | 'CopyT2B';
+export const kImageCopyTypes: readonly ImageCopyType[] = [
+ 'WriteTexture',
+ 'CopyB2T',
+ 'CopyT2B',
+] as const;
+
+/**
+ * Computes `bytesInACompleteRow` (as defined by the WebGPU spec) for image copies (B2T/T2B/writeTexture).
+ */
+export function bytesInACompleteRow(copyWidth: number, format: SizedTextureFormat): number {
+ const info = kTextureFormatInfo[format];
+ assert(copyWidth % info.blockWidth === 0);
+ return (info.bytesPerBlock * copyWidth) / info.blockWidth;
+}
+
+function validateBytesPerRow({
+ bytesPerRow,
+ bytesInLastRow,
+ sizeInBlocks,
+}: {
+ bytesPerRow: number | undefined;
+ bytesInLastRow: number;
+ sizeInBlocks: Required<GPUExtent3DDict>;
+}) {
+ // If specified, layout.bytesPerRow must be greater than or equal to bytesInLastRow.
+ if (bytesPerRow !== undefined && bytesPerRow < bytesInLastRow) {
+ return false;
+ }
+ // If heightInBlocks > 1, layout.bytesPerRow must be specified.
+ // If copyExtent.depthOrArrayLayers > 1, layout.bytesPerRow and layout.rowsPerImage must be specified.
+ if (
+ bytesPerRow === undefined &&
+ (sizeInBlocks.height > 1 || sizeInBlocks.depthOrArrayLayers > 1)
+ ) {
+ return false;
+ }
+ return true;
+}
+
+function validateRowsPerImage({
+ rowsPerImage,
+ sizeInBlocks,
+}: {
+ rowsPerImage: number | undefined;
+ sizeInBlocks: Required<GPUExtent3DDict>;
+}) {
+ // If specified, layout.rowsPerImage must be greater than or equal to heightInBlocks.
+ if (rowsPerImage !== undefined && rowsPerImage < sizeInBlocks.height) {
+ return false;
+ }
+ // If copyExtent.depthOrArrayLayers > 1, layout.bytesPerRow and layout.rowsPerImage must be specified.
+ if (rowsPerImage === undefined && sizeInBlocks.depthOrArrayLayers > 1) {
+ return false;
+ }
+ return true;
+}
+
+interface DataBytesForCopyArgs {
+ layout: GPUImageDataLayout;
+ format: SizedTextureFormat;
+ copySize: Readonly<GPUExtent3DDict> | readonly number[];
+ method: ImageCopyType;
+}
+
+/**
+ * Validate a copy and compute the number of bytes it needs. Throws if the copy is invalid.
+ */
+export function dataBytesForCopyOrFail(args: DataBytesForCopyArgs): number {
+ const { minDataSizeOrOverestimate, copyValid } = dataBytesForCopyOrOverestimate(args);
+ assert(copyValid, 'copy was invalid');
+ return minDataSizeOrOverestimate;
+}
+
+/**
+ * Validate a copy and compute the number of bytes it needs. If the copy is invalid, attempts to
+ * "conservatively guess" (overestimate) the number of bytes that could be needed for a copy, even
+ * if the copy parameters turn out to be invalid. This hopes to avoid "buffer too small" validation
+ * errors when attempting to test other validation errors.
+ */
+export function dataBytesForCopyOrOverestimate({
+ layout,
+ format,
+ copySize: copySize_,
+ method,
+}: DataBytesForCopyArgs): { minDataSizeOrOverestimate: number; copyValid: boolean } {
+ const copyExtent = reifyExtent3D(copySize_);
+
+ const info = kTextureFormatInfo[format];
+ assert(copyExtent.width % info.blockWidth === 0);
+ assert(copyExtent.height % info.blockHeight === 0);
+ const sizeInBlocks = {
+ width: copyExtent.width / info.blockWidth,
+ height: copyExtent.height / info.blockHeight,
+ depthOrArrayLayers: copyExtent.depthOrArrayLayers,
+ } as const;
+ const bytesInLastRow = sizeInBlocks.width * info.bytesPerBlock;
+
+ let valid = true;
+ const offset = layout.offset ?? 0;
+ if (method !== 'WriteTexture') {
+ if (offset % info.bytesPerBlock !== 0) valid = false;
+ if (layout.bytesPerRow && layout.bytesPerRow % 256 !== 0) valid = false;
+ }
+
+ let requiredBytesInCopy = 0;
+ {
+ let { bytesPerRow, rowsPerImage } = layout;
+
+ // If bytesPerRow or rowsPerImage is invalid, guess a value for the sake of various tests that
+ // don't actually care about the exact value.
+ // (In particular for validation tests that want to test invalid bytesPerRow or rowsPerImage but
+ // need to make sure the total buffer size is still big enough.)
+ if (!validateBytesPerRow({ bytesPerRow, bytesInLastRow, sizeInBlocks })) {
+ bytesPerRow = undefined;
+ valid = false;
+ }
+ if (!validateRowsPerImage({ rowsPerImage, sizeInBlocks })) {
+ rowsPerImage = undefined;
+ valid = false;
+ }
+ // Pick values for cases when (a) bpr/rpi was invalid or (b) they're validly undefined.
+ bytesPerRow ??= align(info.bytesPerBlock * sizeInBlocks.width, 256);
+ rowsPerImage ??= sizeInBlocks.height;
+
+ if (copyExtent.depthOrArrayLayers > 1) {
+ const bytesPerImage = bytesPerRow * rowsPerImage;
+ const bytesBeforeLastImage = bytesPerImage * (copyExtent.depthOrArrayLayers - 1);
+ requiredBytesInCopy += bytesBeforeLastImage;
+ }
+ if (copyExtent.depthOrArrayLayers > 0) {
+ if (sizeInBlocks.height > 1) requiredBytesInCopy += bytesPerRow * (sizeInBlocks.height - 1);
+ if (sizeInBlocks.height > 0) requiredBytesInCopy += bytesInLastRow;
+ }
+ }
+
+ return { minDataSizeOrOverestimate: offset + requiredBytesInCopy, copyValid: valid };
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/subresource.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/subresource.ts
new file mode 100644
index 0000000000..b8d6e3eb21
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/subresource.ts
@@ -0,0 +1,68 @@
+/** A range of indices expressed as `{ begin, count }`. */
+export interface BeginCountRange {
+ begin: number;
+ count: number;
+}
+
+/* A range of indices, expressed as `{ begin, end }`. */
+export interface BeginEndRange {
+ begin: number;
+ end: number;
+}
+
+function endOfRange(r: BeginEndRange | BeginCountRange): number {
+ return 'count' in r ? r.begin + r.count : r.end;
+}
+
+function* rangeAsIterator(r: BeginEndRange | BeginCountRange): Generator<number> {
+ for (let i = r.begin; i < endOfRange(r); ++i) {
+ yield i;
+ }
+}
+
+/**
+ * Represents a range of subresources of a single-plane texture:
+ * a min/max mip level and min/max array layer.
+ */
+export class SubresourceRange {
+ readonly mipRange: BeginEndRange;
+ readonly layerRange: BeginEndRange;
+
+ constructor(subresources: {
+ mipRange: BeginEndRange | BeginCountRange;
+ layerRange: BeginEndRange | BeginCountRange;
+ }) {
+ this.mipRange = {
+ begin: subresources.mipRange.begin,
+ end: endOfRange(subresources.mipRange),
+ };
+ this.layerRange = {
+ begin: subresources.layerRange.begin,
+ end: endOfRange(subresources.layerRange),
+ };
+ }
+
+ /**
+ * Iterates over the "rectangle" of `{ level, layer }` pairs represented by the range.
+ */
+ *each(): Generator<{ level: number; layer: number }> {
+ for (let level = this.mipRange.begin; level < this.mipRange.end; ++level) {
+ for (let layer = this.layerRange.begin; layer < this.layerRange.end; ++layer) {
+ yield { level, layer };
+ }
+ }
+ }
+
+ /**
+ * Iterates over the mip levels represented by the range, each level including an iterator
+ * over the array layers at that level.
+ */
+ *mipLevels(): Generator<{ level: number; layers: Generator<number> }> {
+ for (let level = this.mipRange.begin; level < this.mipRange.end; ++level) {
+ yield {
+ level,
+ layers: rangeAsIterator(this.layerRange),
+ };
+ }
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_data.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_data.spec.ts
new file mode 100644
index 0000000000..20f075e6f2
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_data.spec.ts
@@ -0,0 +1,334 @@
+export const description = 'Test helpers for texel data produce the expected data in the shader';
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { assert } from '../../../common/util/util.js';
+import {
+ kEncodableTextureFormats,
+ kTextureFormatInfo,
+ EncodableTextureFormat,
+} from '../../format_info.js';
+import { GPUTest } from '../../gpu_test.js';
+
+import {
+ kTexelRepresentationInfo,
+ getSingleDataType,
+ getComponentReadbackTraits,
+} from './texel_data.js';
+
+export const g = makeTestGroup(GPUTest);
+
+function doTest(
+ t: GPUTest & {
+ params: {
+ format: EncodableTextureFormat;
+ componentData: {
+ R?: number;
+ G?: number;
+ B?: number;
+ A?: number;
+ };
+ };
+ }
+) {
+ const { format } = t.params;
+ const componentData = t.params.componentData;
+
+ const rep = kTexelRepresentationInfo[format];
+ const texelData = rep.pack(componentData);
+ const texture = t.device.createTexture({
+ format,
+ size: [1, 1, 1],
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.TEXTURE_BINDING,
+ });
+
+ t.device.queue.writeTexture(
+ { texture },
+ texelData,
+ {
+ bytesPerRow: texelData.byteLength,
+ },
+ [1]
+ );
+
+ const { ReadbackTypedArray, shaderType } = getComponentReadbackTraits(getSingleDataType(format));
+
+ const shader = `
+ @group(0) @binding(0) var tex : texture_2d<${shaderType}>;
+
+ struct Output {
+ ${rep.componentOrder.map(C => `result${C} : ${shaderType},`).join('\n')}
+ };
+ @group(0) @binding(1) var<storage, read_write> output : Output;
+
+ @compute @workgroup_size(1)
+ fn main() {
+ var texel : vec4<${shaderType}> = textureLoad(tex, vec2<i32>(0, 0), 0);
+ ${rep.componentOrder.map(C => `output.result${C} = texel.${C.toLowerCase()};`).join('\n')}
+ return;
+ }`;
+
+ const pipeline = t.device.createComputePipeline({
+ layout: 'auto',
+ compute: {
+ module: t.device.createShaderModule({
+ code: shader,
+ }),
+ entryPoint: 'main',
+ },
+ });
+
+ const outputBuffer = t.device.createBuffer({
+ size: rep.componentOrder.length * 4,
+ usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
+ });
+
+ const bindGroup = t.device.createBindGroup({
+ layout: pipeline.getBindGroupLayout(0),
+ entries: [
+ {
+ binding: 0,
+ resource: texture.createView(),
+ },
+ {
+ binding: 1,
+ resource: {
+ buffer: outputBuffer,
+ },
+ },
+ ],
+ });
+
+ const encoder = t.device.createCommandEncoder();
+ const pass = encoder.beginComputePass();
+ pass.setPipeline(pipeline);
+ pass.setBindGroup(0, bindGroup);
+ pass.dispatchWorkgroups(1);
+ pass.end();
+ t.device.queue.submit([encoder.finish()]);
+
+ t.expectGPUBufferValuesEqual(
+ outputBuffer,
+ new ReadbackTypedArray(
+ rep.componentOrder.map(c => {
+ const value = rep.decode(componentData)[c];
+ assert(value !== undefined);
+ return value;
+ })
+ )
+ );
+}
+
+// Make a test parameter by mapping a format and each component to a texel component
+// data value.
+function makeParam(
+ format: EncodableTextureFormat,
+ fn: (bitLength: number, index: number) => number
+) {
+ const rep = kTexelRepresentationInfo[format];
+ return {
+ R: rep.componentInfo.R ? fn(rep.componentInfo.R.bitLength, 0) : undefined,
+ G: rep.componentInfo.G ? fn(rep.componentInfo.G.bitLength, 1) : undefined,
+ B: rep.componentInfo.B ? fn(rep.componentInfo.B.bitLength, 2) : undefined,
+ A: rep.componentInfo.A ? fn(rep.componentInfo.A.bitLength, 3) : undefined,
+ };
+}
+
+g.test('unorm_texel_data_in_shader')
+ .params(u =>
+ u
+ .combine('format', kEncodableTextureFormats)
+ .filter(({ format }) => {
+ const info = kTextureFormatInfo[format];
+ return !!info.color && info.color.copyDst && getSingleDataType(format) === 'unorm';
+ })
+ .beginSubcases()
+ .expand('componentData', ({ format }) => {
+ const max = (bitLength: number) => Math.pow(2, bitLength) - 1;
+ return [
+ // Test extrema
+ makeParam(format, () => 0),
+ makeParam(format, bitLength => max(bitLength)),
+
+ // Test a middle value
+ makeParam(format, bitLength => Math.floor(max(bitLength) / 2)),
+
+ // Test mixed values
+ makeParam(format, (bitLength, i) => {
+ const offset = [0.13, 0.63, 0.42, 0.89];
+ return Math.floor(offset[i] * max(bitLength));
+ }),
+ ];
+ })
+ )
+ .beforeAllSubcases(t => {
+ t.skipIfTextureFormatNotSupported(t.params.format);
+ })
+ .fn(doTest);
+
+g.test('snorm_texel_data_in_shader')
+ .params(u =>
+ u
+ .combine('format', kEncodableTextureFormats)
+ .filter(({ format }) => {
+ const info = kTextureFormatInfo[format];
+ return !!info.color && info.color.copyDst && getSingleDataType(format) === 'snorm';
+ })
+ .beginSubcases()
+ .expand('componentData', ({ format }) => {
+ const max = (bitLength: number) => Math.pow(2, bitLength - 1) - 1;
+ return [
+ // Test extrema
+ makeParam(format, () => 0),
+ makeParam(format, bitLength => max(bitLength)),
+ makeParam(format, bitLength => -max(bitLength)),
+ makeParam(format, bitLength => -max(bitLength) - 1),
+
+ // Test a middle value
+ makeParam(format, bitLength => Math.floor(max(bitLength) / 2)),
+
+ // Test mixed values
+ makeParam(format, (bitLength, i) => {
+ const offset = [0.13, 0.63, 0.42, 0.89];
+ const range = 2 * max(bitLength);
+ return -max(bitLength) + Math.floor(offset[i] * range);
+ }),
+ ];
+ })
+ )
+ .fn(doTest);
+
+g.test('uint_texel_data_in_shader')
+ .params(u =>
+ u
+ .combine('format', kEncodableTextureFormats)
+ .filter(({ format }) => {
+ const info = kTextureFormatInfo[format];
+ return !!info.color && info.color.copyDst && getSingleDataType(format) === 'uint';
+ })
+ .beginSubcases()
+ .expand('componentData', ({ format }) => {
+ const max = (bitLength: number) => Math.pow(2, bitLength) - 1;
+ return [
+ // Test extrema
+ makeParam(format, () => 0),
+ makeParam(format, bitLength => max(bitLength)),
+
+ // Test a middle value
+ makeParam(format, bitLength => Math.floor(max(bitLength) / 2)),
+
+ // Test mixed values
+ makeParam(format, (bitLength, i) => {
+ const offset = [0.13, 0.63, 0.42, 0.89];
+ return Math.floor(offset[i] * max(bitLength));
+ }),
+ ];
+ })
+ )
+ .fn(doTest);
+
+g.test('sint_texel_data_in_shader')
+ .params(u =>
+ u
+ .combine('format', kEncodableTextureFormats)
+ .filter(({ format }) => {
+ const info = kTextureFormatInfo[format];
+ return !!info.color && info.color.copyDst && getSingleDataType(format) === 'sint';
+ })
+ .beginSubcases()
+ .expand('componentData', ({ format }) => {
+ const max = (bitLength: number) => Math.pow(2, bitLength - 1) - 1;
+ return [
+ // Test extrema
+ makeParam(format, () => 0),
+ makeParam(format, bitLength => max(bitLength)),
+ makeParam(format, bitLength => -max(bitLength) - 1),
+
+ // Test a middle value
+ makeParam(format, bitLength => Math.floor(max(bitLength) / 2)),
+
+ // Test mixed values
+ makeParam(format, (bitLength, i) => {
+ const offset = [0.13, 0.63, 0.42, 0.89];
+ const range = 2 * max(bitLength);
+ return -max(bitLength) + Math.floor(offset[i] * range);
+ }),
+ ];
+ })
+ )
+ .fn(doTest);
+
+g.test('float_texel_data_in_shader')
+ .desc(
+ `
+TODO: Test NaN, Infinity, -Infinity [1]`
+ )
+ .params(u =>
+ u
+ .combine('format', kEncodableTextureFormats)
+ .filter(({ format }) => {
+ const info = kTextureFormatInfo[format];
+ return !!info.color && info.color.copyDst && getSingleDataType(format) === 'float';
+ })
+ .beginSubcases()
+ .expand('componentData', ({ format }) => {
+ return [
+ // Test extrema
+ makeParam(format, () => 0),
+
+ // [1]: Test NaN, Infinity, -Infinity
+
+ // Test some values
+ makeParam(format, () => 0.1199951171875),
+ makeParam(format, () => 1.4072265625),
+ makeParam(format, () => 24928),
+ makeParam(format, () => -0.1319580078125),
+ makeParam(format, () => -323.25),
+ makeParam(format, () => -7440),
+
+ // Test mixed values
+ makeParam(format, (bitLength, i) => {
+ return [24896, -0.1319580078125, -323.25, -234.375][i];
+ }),
+ ];
+ })
+ )
+ .fn(doTest);
+
+g.test('ufloat_texel_data_in_shader')
+ .desc(
+ `
+TODO: Test NaN, Infinity [1]`
+ )
+ .params(u =>
+ u
+ .combine('format', kEncodableTextureFormats)
+ .filter(({ format }) => {
+ const info = kTextureFormatInfo[format];
+ return !!info.color && info.color.copyDst && getSingleDataType(format) === 'ufloat';
+ })
+ .beginSubcases()
+ .expand('componentData', ({ format }) => {
+ return [
+ // Test extrema
+ makeParam(format, () => 0),
+
+ // [2]: Test NaN, Infinity
+
+ // Test some values
+ makeParam(format, () => 0.119140625),
+ makeParam(format, () => 1.40625),
+ makeParam(format, () => 24896),
+
+ // Test scattered mixed values
+ makeParam(format, (bitLength, i) => {
+ return [24896, 1.40625, 0.119140625, 0.23095703125][i];
+ }),
+
+ // Test mixed values that are close in magnitude.
+ makeParam(format, (bitLength, i) => {
+ return [0.1337890625, 0.17919921875, 0.119140625, 0.125][i];
+ }),
+ ];
+ })
+ )
+ .fn(doTest);
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_data.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_data.ts
new file mode 100644
index 0000000000..42490d800b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_data.ts
@@ -0,0 +1,980 @@
+import { assert, unreachable } from '../../../common/util/util.js';
+import { UncompressedTextureFormat, EncodableTextureFormat } from '../../format_info.js';
+import {
+ assertInIntegerRange,
+ float32ToFloatBits,
+ float32ToFloat16Bits,
+ floatAsNormalizedInteger,
+ gammaCompress,
+ gammaDecompress,
+ normalizedIntegerAsFloat,
+ packRGB9E5UFloat,
+ floatBitsToNumber,
+ float16BitsToFloat32,
+ floatBitsToNormalULPFromZero,
+ kFloat32Format,
+ kFloat16Format,
+ kUFloat9e5Format,
+ numberToFloat32Bits,
+ float32BitsToNumber,
+ numberToFloatBits,
+ ufloatM9E5BitsToNumber,
+} from '../conversion.js';
+import { clamp, signExtend } from '../math.js';
+
+/** A component of a texture format: R, G, B, A, Depth, or Stencil. */
+export const enum TexelComponent {
+ R = 'R',
+ G = 'G',
+ B = 'B',
+ A = 'A',
+ Depth = 'Depth',
+ Stencil = 'Stencil',
+}
+
+/** Arbitrary data, per component of a texel format. */
+export type PerTexelComponent<T> = { [c in TexelComponent]?: T };
+
+/** How a component is encoded in its bit range of a texel format. */
+export type ComponentDataType = 'uint' | 'sint' | 'unorm' | 'snorm' | 'float' | 'ufloat' | null;
+
+/**
+ * Maps component values to component values
+ * @param {PerTexelComponent<number>} components - The input components.
+ * @returns {PerTexelComponent<number>} The new output components.
+ */
+type ComponentMapFn = (components: PerTexelComponent<number>) => PerTexelComponent<number>;
+
+/**
+ * Packs component values as an ArrayBuffer
+ * @param {PerTexelComponent<number>} components - The input components.
+ * @returns {ArrayBuffer} The packed data.
+ */
+type ComponentPackFn = (components: PerTexelComponent<number>) => ArrayBuffer;
+
+/** Unpacks component values from a Uint8Array */
+type ComponentUnpackFn = (data: Uint8Array) => PerTexelComponent<number>;
+
+/**
+ * Create a PerTexelComponent object filled with the same value for all components.
+ * @param {TexelComponent[]} components - The component names.
+ * @param {T} value - The value to assign to each component.
+ * @returns {PerTexelComponent<T>}
+ */
+function makePerTexelComponent<T>(components: TexelComponent[], value: T): PerTexelComponent<T> {
+ const values: PerTexelComponent<T> = {};
+ for (const c of components) {
+ values[c] = value;
+ }
+ return values;
+}
+
+/**
+ * Create a function which applies clones a `PerTexelComponent<number>` and then applies the
+ * function `fn` to each component of `components`.
+ * @param {(value: number) => number} fn - The mapping function to apply to component values.
+ * @param {TexelComponent[]} components - The component names.
+ * @returns {ComponentMapFn} The map function which clones the input component values, and applies
+ * `fn` to each of component of `components`.
+ */
+function applyEach(fn: (value: number) => number, components: TexelComponent[]): ComponentMapFn {
+ return (values: PerTexelComponent<number>) => {
+ values = Object.assign({}, values);
+ for (const c of components) {
+ assert(values[c] !== undefined);
+ values[c] = fn(values[c]!);
+ }
+ return values;
+ };
+}
+
+/**
+ * A `ComponentMapFn` for encoding sRGB.
+ * @param {PerTexelComponent<number>} components - The input component values.
+ * @returns {TexelComponent<number>} Gamma-compressed copy of `components`.
+ */
+const encodeSRGB: ComponentMapFn = components => {
+ assert(
+ components.R !== undefined && components.G !== undefined && components.B !== undefined,
+ 'sRGB requires all of R, G, and B components'
+ );
+ return applyEach(gammaCompress, kRGB)(components);
+};
+
+/**
+ * A `ComponentMapFn` for decoding sRGB.
+ * @param {PerTexelComponent<number>} components - The input component values.
+ * @returns {TexelComponent<number>} Gamma-decompressed copy of `components`.
+ */
+const decodeSRGB: ComponentMapFn = components => {
+ components = Object.assign({}, components);
+ assert(
+ components.R !== undefined && components.G !== undefined && components.B !== undefined,
+ 'sRGB requires all of R, G, and B components'
+ );
+ return applyEach(gammaDecompress, kRGB)(components);
+};
+
+/**
+ * Makes a `ComponentMapFn` for clamping values to the specified range.
+ */
+export function makeClampToRange(format: EncodableTextureFormat): ComponentMapFn {
+ const repr = kTexelRepresentationInfo[format];
+ assert(repr.numericRange !== null, 'Format has unknown numericRange');
+ return applyEach(x => clamp(x, repr.numericRange!), repr.componentOrder);
+}
+
+// MAINTENANCE_TODO: Look into exposing this map to the test fixture so that it can be GCed at the
+// end of each test group. That would allow for caching of larger buffers (though it's unclear how
+// ofter larger buffers are used by packComponents.)
+const smallComponentDataViews = new Map();
+function getComponentDataView(byteLength: number): DataView {
+ if (byteLength > 32) {
+ const buffer = new ArrayBuffer(byteLength);
+ return new DataView(buffer);
+ }
+ let dataView = smallComponentDataViews.get(byteLength);
+ if (!dataView) {
+ const buffer = new ArrayBuffer(byteLength);
+ dataView = new DataView(buffer);
+ smallComponentDataViews.set(byteLength, dataView);
+ }
+ return dataView;
+}
+
+/**
+ * Helper function to pack components as an ArrayBuffer.
+ * @param {TexelComponent[]} componentOrder - The order of the component data.
+ * @param {PerTexelComponent<number>} components - The input component values.
+ * @param {number | PerTexelComponent<number>} bitLengths - The length in bits of each component.
+ * If a single number, all components are the same length, otherwise this is a dictionary of
+ * per-component bit lengths.
+ * @param {ComponentDataType | PerTexelComponent<ComponentDataType>} componentDataTypes -
+ * The type of the data in `components`. If a single value, all components have the same value.
+ * Otherwise, this is a dictionary of per-component data types.
+ * @returns {ArrayBuffer} The packed component data.
+ */
+function packComponents(
+ componentOrder: TexelComponent[],
+ components: PerTexelComponent<number>,
+ bitLengths: number | PerTexelComponent<number>,
+ componentDataTypes: ComponentDataType | PerTexelComponent<ComponentDataType>
+): ArrayBuffer {
+ let bitLengthMap;
+ let totalBitLength;
+ if (typeof bitLengths === 'number') {
+ bitLengthMap = makePerTexelComponent(componentOrder, bitLengths);
+ totalBitLength = bitLengths * componentOrder.length;
+ } else {
+ bitLengthMap = bitLengths;
+ totalBitLength = Object.entries(bitLengthMap).reduce((acc, [, value]) => {
+ assert(value !== undefined);
+ return acc + value;
+ }, 0);
+ }
+ assert(totalBitLength % 8 === 0);
+
+ const componentDataTypeMap =
+ typeof componentDataTypes === 'string' || componentDataTypes === null
+ ? makePerTexelComponent(componentOrder, componentDataTypes)
+ : componentDataTypes;
+
+ const dataView = getComponentDataView(totalBitLength / 8);
+ let bitOffset = 0;
+ for (const c of componentOrder) {
+ const value = components[c];
+ const type = componentDataTypeMap[c];
+ const bitLength = bitLengthMap[c];
+ assert(value !== undefined);
+ assert(type !== undefined);
+ assert(bitLength !== undefined);
+
+ const byteOffset = Math.floor(bitOffset / 8);
+ const byteLength = Math.ceil(bitLength / 8);
+ switch (type) {
+ case 'uint':
+ case 'unorm':
+ if (byteOffset === bitOffset / 8 && byteLength === bitLength / 8) {
+ switch (byteLength) {
+ case 1:
+ dataView.setUint8(byteOffset, value);
+ break;
+ case 2:
+ dataView.setUint16(byteOffset, value, true);
+ break;
+ case 4:
+ dataView.setUint32(byteOffset, value, true);
+ break;
+ default:
+ unreachable();
+ }
+ } else {
+ // Packed representations are all 32-bit and use Uint as the data type.
+ // ex.) rg10b11float, rgb10a2unorm
+ switch (dataView.byteLength) {
+ case 4: {
+ const currentValue = dataView.getUint32(0, true);
+
+ let mask = 0xffffffff;
+ const bitsToClearRight = bitOffset;
+ const bitsToClearLeft = 32 - (bitLength + bitOffset);
+
+ mask = (mask >>> bitsToClearRight) << bitsToClearRight;
+ mask = (mask << bitsToClearLeft) >>> bitsToClearLeft;
+
+ const newValue = (currentValue & ~mask) | (value << bitOffset);
+
+ dataView.setUint32(0, newValue, true);
+ break;
+ }
+ default:
+ unreachable();
+ }
+ }
+ break;
+ case 'sint':
+ case 'snorm':
+ assert(byteOffset === bitOffset / 8 && byteLength === bitLength / 8);
+ switch (byteLength) {
+ case 1:
+ dataView.setInt8(byteOffset, value);
+ break;
+ case 2:
+ dataView.setInt16(byteOffset, value, true);
+ break;
+ case 4:
+ dataView.setInt32(byteOffset, value, true);
+ break;
+ default:
+ unreachable();
+ }
+ break;
+ case 'float':
+ assert(byteOffset === bitOffset / 8 && byteLength === bitLength / 8);
+ switch (byteLength) {
+ case 4:
+ dataView.setFloat32(byteOffset, value, true);
+ break;
+ default:
+ unreachable();
+ }
+ break;
+ case 'ufloat':
+ case null:
+ unreachable();
+ }
+
+ bitOffset += bitLength;
+ }
+
+ return dataView.buffer;
+}
+
+/**
+ * Unpack substrings of bits from a Uint8Array, e.g. [8,8,8,8] or [9,9,9,5].
+ */
+function unpackComponentsBits(
+ componentOrder: TexelComponent[],
+ byteView: Uint8Array,
+ bitLengths: number | PerTexelComponent<number>
+): PerTexelComponent<number> {
+ const components = makePerTexelComponent(componentOrder, 0);
+
+ let bitLengthMap;
+ let totalBitLength;
+ if (typeof bitLengths === 'number') {
+ let index = 0;
+ // Optimized cases for when the bit lengths are all a well aligned value.
+ switch (bitLengths) {
+ case 8:
+ for (const c of componentOrder) {
+ components[c] = byteView[index++];
+ }
+ return components;
+ case 16: {
+ const shortView = new Uint16Array(byteView.buffer, byteView.byteOffset);
+ for (const c of componentOrder) {
+ components[c] = shortView[index++];
+ }
+ return components;
+ }
+ case 32: {
+ const longView = new Uint32Array(byteView.buffer, byteView.byteOffset);
+ for (const c of componentOrder) {
+ components[c] = longView[index++];
+ }
+ return components;
+ }
+ }
+
+ bitLengthMap = makePerTexelComponent(componentOrder, bitLengths);
+ totalBitLength = bitLengths * componentOrder.length;
+ } else {
+ bitLengthMap = bitLengths;
+ totalBitLength = Object.entries(bitLengthMap).reduce((acc, [, value]) => {
+ assert(value !== undefined);
+ return acc + value;
+ }, 0);
+ }
+
+ assert(totalBitLength % 8 === 0);
+
+ const dataView = new DataView(byteView.buffer, byteView.byteOffset, byteView.byteLength);
+ let bitOffset = 0;
+ for (const c of componentOrder) {
+ const bitLength = bitLengthMap[c];
+ assert(bitLength !== undefined);
+
+ let value: number;
+
+ const byteOffset = Math.floor(bitOffset / 8);
+ const byteLength = Math.ceil(bitLength / 8);
+ if (byteOffset === bitOffset / 8 && byteLength === bitLength / 8) {
+ switch (byteLength) {
+ case 1:
+ value = dataView.getUint8(byteOffset);
+ break;
+ case 2:
+ value = dataView.getUint16(byteOffset, true);
+ break;
+ case 4:
+ value = dataView.getUint32(byteOffset, true);
+ break;
+ default:
+ unreachable();
+ }
+ } else {
+ // Packed representations are all 32-bit and use Uint as the data type.
+ // ex.) rg10b11float, rgb10a2unorm
+ assert(dataView.byteLength === 4);
+ const word = dataView.getUint32(0, true);
+ value = (word >>> bitOffset) & ((1 << bitLength) - 1);
+ }
+
+ bitOffset += bitLength;
+ components[c] = value;
+ }
+
+ return components;
+}
+
+/**
+ * Create an entry in `kTexelRepresentationInfo` for normalized integer texel data with constant
+ * bitlength.
+ * @param {TexelComponent[]} componentOrder - The order of the component data.
+ * @param {number} bitLength - The number of bits in each component.
+ * @param {{signed: boolean; sRGB: boolean}} opt - Boolean flags for `signed` and `sRGB`.
+ */
+function makeNormalizedInfo(
+ componentOrder: TexelComponent[],
+ bitLength: number,
+ opt: { signed: boolean; sRGB: boolean }
+): TexelRepresentationInfo {
+ const encodeNonSRGB = applyEach(
+ (n: number) => floatAsNormalizedInteger(n, bitLength, opt.signed),
+ componentOrder
+ );
+ const decodeNonSRGB = applyEach(
+ (n: number) => normalizedIntegerAsFloat(n, bitLength, opt.signed),
+ componentOrder
+ );
+
+ const numberToBitsNonSRGB = applyEach(
+ n => floatAsNormalizedInteger(n, bitLength, opt.signed),
+ componentOrder
+ );
+ let bitsToNumberNonSRGB: ComponentMapFn;
+ if (opt.signed) {
+ bitsToNumberNonSRGB = applyEach(
+ n => normalizedIntegerAsFloat(signExtend(n, bitLength), bitLength, opt.signed),
+ componentOrder
+ );
+ } else {
+ bitsToNumberNonSRGB = applyEach(
+ n => normalizedIntegerAsFloat(n, bitLength, opt.signed),
+ componentOrder
+ );
+ }
+
+ let encode: ComponentMapFn;
+ let decode: ComponentMapFn;
+ let numberToBits: ComponentMapFn;
+ let bitsToNumber: ComponentMapFn;
+ if (opt.sRGB) {
+ encode = components => encodeNonSRGB(encodeSRGB(components));
+ decode = components => decodeSRGB(decodeNonSRGB(components));
+ numberToBits = components => numberToBitsNonSRGB(encodeSRGB(components));
+ bitsToNumber = components => decodeSRGB(bitsToNumberNonSRGB(components));
+ } else {
+ encode = encodeNonSRGB;
+ decode = decodeNonSRGB;
+ numberToBits = numberToBitsNonSRGB;
+ bitsToNumber = bitsToNumberNonSRGB;
+ }
+
+ let bitsToULPFromZero: ComponentMapFn;
+ if (opt.signed) {
+ const maxValue = (1 << (bitLength - 1)) - 1; // e.g. 127 for snorm8
+ bitsToULPFromZero = applyEach(
+ n => Math.max(-maxValue, signExtend(n, bitLength)),
+ componentOrder
+ );
+ } else {
+ bitsToULPFromZero = components => components;
+ }
+
+ const dataType: ComponentDataType = opt.signed ? 'snorm' : 'unorm';
+ return {
+ componentOrder,
+ componentInfo: makePerTexelComponent(componentOrder, {
+ dataType,
+ bitLength,
+ }),
+ encode,
+ decode,
+ pack: (components: PerTexelComponent<number>) =>
+ packComponents(componentOrder, components, bitLength, dataType),
+ unpackBits: (data: Uint8Array) => unpackComponentsBits(componentOrder, data, bitLength),
+ numberToBits,
+ bitsToNumber,
+ bitsToULPFromZero,
+ numericRange: { min: opt.signed ? -1 : 0, max: 1 },
+ };
+}
+
+/**
+ * Create an entry in `kTexelRepresentationInfo` for integer texel data with constant bitlength.
+ * @param {TexelComponent[]} componentOrder - The order of the component data.
+ * @param {number} bitLength - The number of bits in each component.
+ * @param {{signed: boolean}} opt - Boolean flag for `signed`.
+ */
+function makeIntegerInfo(
+ componentOrder: TexelComponent[],
+ bitLength: number,
+ opt: { signed: boolean }
+): TexelRepresentationInfo {
+ assert(bitLength <= 32);
+ const numericRange = opt.signed
+ ? { min: -(2 ** (bitLength - 1)), max: 2 ** (bitLength - 1) - 1 }
+ : { min: 0, max: 2 ** bitLength - 1 };
+ const maxUnsignedValue = 2 ** bitLength;
+ const encode = applyEach(
+ (n: number) => (assertInIntegerRange(n, bitLength, opt.signed), n),
+ componentOrder
+ );
+ const decode = applyEach(
+ (n: number) => (assertInIntegerRange(n, bitLength, opt.signed), n),
+ componentOrder
+ );
+ const bitsToNumber = applyEach((n: number) => {
+ const decodedN = opt.signed ? (n > numericRange.max ? n - maxUnsignedValue : n) : n;
+ assertInIntegerRange(decodedN, bitLength, opt.signed);
+ return decodedN;
+ }, componentOrder);
+
+ let bitsToULPFromZero: ComponentMapFn;
+ if (opt.signed) {
+ bitsToULPFromZero = applyEach(n => signExtend(n, bitLength), componentOrder);
+ } else {
+ bitsToULPFromZero = components => components;
+ }
+
+ const dataType: ComponentDataType = opt.signed ? 'sint' : 'uint';
+ const bitMask = (1 << bitLength) - 1;
+ return {
+ componentOrder,
+ componentInfo: makePerTexelComponent(componentOrder, {
+ dataType,
+ bitLength,
+ }),
+ encode,
+ decode,
+ pack: (components: PerTexelComponent<number>) =>
+ packComponents(componentOrder, components, bitLength, dataType),
+ unpackBits: (data: Uint8Array) => unpackComponentsBits(componentOrder, data, bitLength),
+ numberToBits: applyEach(v => v & bitMask, componentOrder),
+ bitsToNumber,
+ bitsToULPFromZero,
+ numericRange,
+ };
+}
+
+/**
+ * Create an entry in `kTexelRepresentationInfo` for floating point texel data with constant
+ * bitlength.
+ * @param {TexelComponent[]} componentOrder - The order of the component data.
+ * @param {number} bitLength - The number of bits in each component.
+ */
+function makeFloatInfo(
+ componentOrder: TexelComponent[],
+ bitLength: number,
+ { restrictedDepth = false }: { restrictedDepth?: boolean } = {}
+): TexelRepresentationInfo {
+ let encode: ComponentMapFn;
+ let numberToBits;
+ let bitsToNumber;
+ let bitsToULPFromZero;
+ switch (bitLength) {
+ case 32:
+ if (restrictedDepth) {
+ encode = applyEach(v => {
+ assert(v >= 0.0 && v <= 1.0, 'depth out of range');
+ return new Float32Array([v])[0];
+ }, componentOrder);
+ } else {
+ encode = applyEach(v => new Float32Array([v])[0], componentOrder);
+ }
+ numberToBits = applyEach(numberToFloat32Bits, componentOrder);
+ bitsToNumber = applyEach(float32BitsToNumber, componentOrder);
+ bitsToULPFromZero = applyEach(
+ v => floatBitsToNormalULPFromZero(v, kFloat32Format),
+ componentOrder
+ );
+ break;
+ case 16:
+ if (restrictedDepth) {
+ encode = applyEach(v => {
+ assert(v >= 0.0 && v <= 1.0, 'depth out of range');
+ return float16BitsToFloat32(float32ToFloat16Bits(v));
+ }, componentOrder);
+ } else {
+ encode = applyEach(v => float16BitsToFloat32(float32ToFloat16Bits(v)), componentOrder);
+ }
+ numberToBits = applyEach(float32ToFloat16Bits, componentOrder);
+ bitsToNumber = applyEach(float16BitsToFloat32, componentOrder);
+ bitsToULPFromZero = applyEach(
+ v => floatBitsToNormalULPFromZero(v, kFloat16Format),
+ componentOrder
+ );
+ break;
+ default:
+ unreachable();
+ }
+ const decode = applyEach(identity, componentOrder);
+
+ return {
+ componentOrder,
+ componentInfo: makePerTexelComponent(componentOrder, {
+ dataType: 'float' as const,
+ bitLength,
+ }),
+ encode,
+ decode,
+ pack: (components: PerTexelComponent<number>) => {
+ switch (bitLength) {
+ case 16:
+ components = applyEach(float32ToFloat16Bits, componentOrder)(components);
+ return packComponents(componentOrder, components, 16, 'uint');
+ case 32:
+ return packComponents(componentOrder, components, bitLength, 'float');
+ default:
+ unreachable();
+ }
+ },
+ unpackBits: (data: Uint8Array) => unpackComponentsBits(componentOrder, data, bitLength),
+ numberToBits,
+ bitsToNumber,
+ bitsToULPFromZero,
+ numericRange: restrictedDepth
+ ? { min: 0, max: 1 }
+ : { min: Number.NEGATIVE_INFINITY, max: Number.POSITIVE_INFINITY },
+ };
+}
+
+const kR = [TexelComponent.R];
+const kRG = [TexelComponent.R, TexelComponent.G];
+const kRGB = [TexelComponent.R, TexelComponent.G, TexelComponent.B];
+const kRGBA = [TexelComponent.R, TexelComponent.G, TexelComponent.B, TexelComponent.A];
+const kBGRA = [TexelComponent.B, TexelComponent.G, TexelComponent.R, TexelComponent.A];
+
+const identity = (n: number) => n;
+
+const kFloat11Format = { signed: 0, exponentBits: 5, mantissaBits: 6, bias: 15 } as const;
+const kFloat10Format = { signed: 0, exponentBits: 5, mantissaBits: 5, bias: 15 } as const;
+
+export type TexelRepresentationInfo = {
+ /** Order of components in the packed representation. */
+ readonly componentOrder: TexelComponent[];
+ /** Data type and bit length of each component in the format. */
+ readonly componentInfo: PerTexelComponent<{
+ dataType: ComponentDataType;
+ bitLength: number;
+ }>;
+ /** Encode shader values into their data representation. ex.) float 1.0 -> unorm8 255 */
+ // MAINTENANCE_TODO: Replace with numberToBits?
+ readonly encode: ComponentMapFn;
+ /** Decode the data representation into the shader values. ex.) unorm8 255 -> float 1.0 */
+ // MAINTENANCE_TODO: Replace with bitsToNumber?
+ readonly decode: ComponentMapFn;
+ /** Pack texel component values into an ArrayBuffer. ex.) rg8unorm `{r: 0, g: 255}` -> 0xFF00 */
+ // MAINTENANCE_TODO: Replace with packBits?
+ readonly pack: ComponentPackFn;
+
+ /** Convert integer bit representations into numeric values, e.g. unorm8 255 -> numeric 1.0 */
+ readonly bitsToNumber: ComponentMapFn;
+ /** Convert numeric values into integer bit representations, e.g. numeric 1.0 -> unorm8 255 */
+ readonly numberToBits: ComponentMapFn;
+ /** Unpack integer bit representations from an ArrayBuffer, e.g. 0xFF00 -> rg8unorm [0,255] */
+ readonly unpackBits: ComponentUnpackFn;
+ /** Convert integer bit representations into ULPs-from-zero, e.g. unorm8 255 -> 255 ULPs */
+ readonly bitsToULPFromZero: ComponentMapFn;
+ /** The valid range of numeric "color" values, e.g. [0, Infinity] for ufloat. */
+ readonly numericRange: null | { min: number; max: number };
+
+ // Add fields as needed
+};
+export const kTexelRepresentationInfo: {
+ readonly [k in UncompressedTextureFormat]: TexelRepresentationInfo;
+} = {
+ .../* prettier-ignore */ {
+ 'r8unorm': makeNormalizedInfo( kR, 8, { signed: false, sRGB: false }),
+ 'r8snorm': makeNormalizedInfo( kR, 8, { signed: true, sRGB: false }),
+ 'r8uint': makeIntegerInfo( kR, 8, { signed: false }),
+ 'r8sint': makeIntegerInfo( kR, 8, { signed: true }),
+ 'r16uint': makeIntegerInfo( kR, 16, { signed: false }),
+ 'r16sint': makeIntegerInfo( kR, 16, { signed: true }),
+ 'r16float': makeFloatInfo( kR, 16),
+ 'rg8unorm': makeNormalizedInfo( kRG, 8, { signed: false, sRGB: false }),
+ 'rg8snorm': makeNormalizedInfo( kRG, 8, { signed: true, sRGB: false }),
+ 'rg8uint': makeIntegerInfo( kRG, 8, { signed: false }),
+ 'rg8sint': makeIntegerInfo( kRG, 8, { signed: true }),
+ 'r32uint': makeIntegerInfo( kR, 32, { signed: false }),
+ 'r32sint': makeIntegerInfo( kR, 32, { signed: true }),
+ 'r32float': makeFloatInfo( kR, 32),
+ 'rg16uint': makeIntegerInfo( kRG, 16, { signed: false }),
+ 'rg16sint': makeIntegerInfo( kRG, 16, { signed: true }),
+ 'rg16float': makeFloatInfo( kRG, 16),
+ 'rgba8unorm': makeNormalizedInfo(kRGBA, 8, { signed: false, sRGB: false }),
+ 'rgba8unorm-srgb': makeNormalizedInfo(kRGBA, 8, { signed: false, sRGB: true }),
+ 'rgba8snorm': makeNormalizedInfo(kRGBA, 8, { signed: true, sRGB: false }),
+ 'rgba8uint': makeIntegerInfo( kRGBA, 8, { signed: false }),
+ 'rgba8sint': makeIntegerInfo( kRGBA, 8, { signed: true }),
+ 'bgra8unorm': makeNormalizedInfo(kBGRA, 8, { signed: false, sRGB: false }),
+ 'bgra8unorm-srgb': makeNormalizedInfo(kBGRA, 8, { signed: false, sRGB: true }),
+ 'rg32uint': makeIntegerInfo( kRG, 32, { signed: false }),
+ 'rg32sint': makeIntegerInfo( kRG, 32, { signed: true }),
+ 'rg32float': makeFloatInfo( kRG, 32),
+ 'rgba16uint': makeIntegerInfo( kRGBA, 16, { signed: false }),
+ 'rgba16sint': makeIntegerInfo( kRGBA, 16, { signed: true }),
+ 'rgba16float': makeFloatInfo( kRGBA, 16),
+ 'rgba32uint': makeIntegerInfo( kRGBA, 32, { signed: false }),
+ 'rgba32sint': makeIntegerInfo( kRGBA, 32, { signed: true }),
+ 'rgba32float': makeFloatInfo( kRGBA, 32),
+ },
+ ...{
+ rgb10a2uint: {
+ componentOrder: kRGBA,
+ componentInfo: {
+ R: { dataType: 'uint', bitLength: 10 },
+ G: { dataType: 'uint', bitLength: 10 },
+ B: { dataType: 'uint', bitLength: 10 },
+ A: { dataType: 'uint', bitLength: 2 },
+ },
+ encode: components => {
+ assertInIntegerRange(components.R!, 10, false);
+ assertInIntegerRange(components.G!, 10, false);
+ assertInIntegerRange(components.B!, 10, false);
+ assertInIntegerRange(components.A!, 2, false);
+ return components;
+ },
+ decode: components => {
+ assertInIntegerRange(components.R!, 10, false);
+ assertInIntegerRange(components.G!, 10, false);
+ assertInIntegerRange(components.B!, 10, false);
+ assertInIntegerRange(components.A!, 2, false);
+ return components;
+ },
+ pack: components =>
+ packComponents(
+ kRGBA,
+ components,
+ {
+ R: 10,
+ G: 10,
+ B: 10,
+ A: 2,
+ },
+ 'uint'
+ ),
+ unpackBits: (data: Uint8Array) =>
+ unpackComponentsBits(kRGBA, data, { R: 10, G: 10, B: 10, A: 2 }),
+ numberToBits: components => ({
+ R: components.R! & 0x3ff,
+ G: components.G! & 0x3ff,
+ B: components.B! & 0x3ff,
+ A: components.A! & 0x3,
+ }),
+ bitsToNumber: components => {
+ assertInIntegerRange(components.R!, 10, false);
+ assertInIntegerRange(components.G!, 10, false);
+ assertInIntegerRange(components.B!, 10, false);
+ assertInIntegerRange(components.A!, 2, false);
+ return components;
+ },
+ bitsToULPFromZero: components => components,
+ numericRange: null,
+ },
+ rgb10a2unorm: {
+ componentOrder: kRGBA,
+ componentInfo: {
+ R: { dataType: 'unorm', bitLength: 10 },
+ G: { dataType: 'unorm', bitLength: 10 },
+ B: { dataType: 'unorm', bitLength: 10 },
+ A: { dataType: 'unorm', bitLength: 2 },
+ },
+ encode: components => {
+ return {
+ R: floatAsNormalizedInteger(components.R ?? unreachable(), 10, false),
+ G: floatAsNormalizedInteger(components.G ?? unreachable(), 10, false),
+ B: floatAsNormalizedInteger(components.B ?? unreachable(), 10, false),
+ A: floatAsNormalizedInteger(components.A ?? unreachable(), 2, false),
+ };
+ },
+ decode: components => {
+ return {
+ R: normalizedIntegerAsFloat(components.R ?? unreachable(), 10, false),
+ G: normalizedIntegerAsFloat(components.G ?? unreachable(), 10, false),
+ B: normalizedIntegerAsFloat(components.B ?? unreachable(), 10, false),
+ A: normalizedIntegerAsFloat(components.A ?? unreachable(), 2, false),
+ };
+ },
+ pack: components =>
+ packComponents(
+ kRGBA,
+ components,
+ {
+ R: 10,
+ G: 10,
+ B: 10,
+ A: 2,
+ },
+ 'uint'
+ ),
+ unpackBits: (data: Uint8Array) =>
+ unpackComponentsBits(kRGBA, data, { R: 10, G: 10, B: 10, A: 2 }),
+ numberToBits: components => ({
+ R: floatAsNormalizedInteger(components.R ?? unreachable(), 10, false),
+ G: floatAsNormalizedInteger(components.G ?? unreachable(), 10, false),
+ B: floatAsNormalizedInteger(components.B ?? unreachable(), 10, false),
+ A: floatAsNormalizedInteger(components.A ?? unreachable(), 2, false),
+ }),
+ bitsToNumber: components => ({
+ R: normalizedIntegerAsFloat(components.R!, 10, false),
+ G: normalizedIntegerAsFloat(components.G!, 10, false),
+ B: normalizedIntegerAsFloat(components.B!, 10, false),
+ A: normalizedIntegerAsFloat(components.A!, 2, false),
+ }),
+ bitsToULPFromZero: components => components,
+ numericRange: { min: 0, max: 1 },
+ },
+ rg11b10ufloat: {
+ componentOrder: kRGB,
+ encode: applyEach(identity, kRGB),
+ decode: applyEach(identity, kRGB),
+ componentInfo: {
+ R: { dataType: 'ufloat', bitLength: 11 },
+ G: { dataType: 'ufloat', bitLength: 11 },
+ B: { dataType: 'ufloat', bitLength: 10 },
+ },
+ pack: components => {
+ const componentsBits = {
+ R: float32ToFloatBits(components.R ?? unreachable(), 0, 5, 6, 15),
+ G: float32ToFloatBits(components.G ?? unreachable(), 0, 5, 6, 15),
+ B: float32ToFloatBits(components.B ?? unreachable(), 0, 5, 5, 15),
+ };
+ return packComponents(
+ kRGB,
+ componentsBits,
+ {
+ R: 11,
+ G: 11,
+ B: 10,
+ },
+ 'uint'
+ );
+ },
+ unpackBits: (data: Uint8Array) => unpackComponentsBits(kRGB, data, { R: 11, G: 11, B: 10 }),
+ numberToBits: components => ({
+ R: numberToFloatBits(components.R ?? unreachable(), kFloat11Format),
+ G: numberToFloatBits(components.G ?? unreachable(), kFloat11Format),
+ B: numberToFloatBits(components.B ?? unreachable(), kFloat10Format),
+ }),
+ bitsToNumber: components => ({
+ R: floatBitsToNumber(components.R!, kFloat11Format),
+ G: floatBitsToNumber(components.G!, kFloat11Format),
+ B: floatBitsToNumber(components.B!, kFloat10Format),
+ }),
+ bitsToULPFromZero: components => ({
+ R: floatBitsToNormalULPFromZero(components.R!, kFloat11Format),
+ G: floatBitsToNormalULPFromZero(components.G!, kFloat11Format),
+ B: floatBitsToNormalULPFromZero(components.B!, kFloat10Format),
+ }),
+ numericRange: { min: 0, max: Number.POSITIVE_INFINITY },
+ },
+ rgb9e5ufloat: {
+ componentOrder: kRGB,
+ componentInfo: makePerTexelComponent(kRGB, {
+ dataType: 'ufloat',
+ bitLength: -1, // Components don't really have a bitLength since the format is packed.
+ }),
+ encode: applyEach(identity, kRGB),
+ decode: applyEach(identity, kRGB),
+ pack: components =>
+ new Uint32Array([
+ packRGB9E5UFloat(
+ components.R ?? unreachable(),
+ components.G ?? unreachable(),
+ components.B ?? unreachable()
+ ),
+ ]).buffer,
+ unpackBits: (data: Uint8Array) => {
+ const encoded = (data[3] << 24) | (data[2] << 16) | (data[1] << 8) | data[0];
+ const redMantissa = (encoded >>> 0) & 0b111111111;
+ const greenMantissa = (encoded >>> 9) & 0b111111111;
+ const blueMantissa = (encoded >>> 18) & 0b111111111;
+ const exponentSharedBits = ((encoded >>> 27) & 0b11111) << 9;
+ return {
+ R: exponentSharedBits | redMantissa,
+ G: exponentSharedBits | greenMantissa,
+ B: exponentSharedBits | blueMantissa,
+ };
+ },
+ numberToBits: components => ({
+ R: float32ToFloatBits(components.R ?? unreachable(), 0, 5, 9, 15),
+ G: float32ToFloatBits(components.G ?? unreachable(), 0, 5, 9, 15),
+ B: float32ToFloatBits(components.B ?? unreachable(), 0, 5, 9, 15),
+ }),
+ bitsToNumber: components => ({
+ R: ufloatM9E5BitsToNumber(components.R!, kUFloat9e5Format),
+ G: ufloatM9E5BitsToNumber(components.G!, kUFloat9e5Format),
+ B: ufloatM9E5BitsToNumber(components.B!, kUFloat9e5Format),
+ }),
+ bitsToULPFromZero: components => ({
+ R: floatBitsToNormalULPFromZero(components.R!, kUFloat9e5Format),
+ G: floatBitsToNormalULPFromZero(components.G!, kUFloat9e5Format),
+ B: floatBitsToNormalULPFromZero(components.B!, kUFloat9e5Format),
+ }),
+ numericRange: { min: 0, max: Number.POSITIVE_INFINITY },
+ },
+ depth32float: makeFloatInfo([TexelComponent.Depth], 32, { restrictedDepth: true }),
+ depth16unorm: makeNormalizedInfo([TexelComponent.Depth], 16, { signed: false, sRGB: false }),
+ depth24plus: {
+ componentOrder: [TexelComponent.Depth],
+ componentInfo: { Depth: { dataType: null, bitLength: 24 } },
+ encode: applyEach(() => unreachable('depth24plus cannot be encoded'), [TexelComponent.Depth]),
+ decode: applyEach(() => unreachable('depth24plus cannot be decoded'), [TexelComponent.Depth]),
+ pack: () => unreachable('depth24plus data cannot be packed'),
+ unpackBits: () => unreachable('depth24plus data cannot be unpacked'),
+ numberToBits: () => unreachable('depth24plus has no representation'),
+ bitsToNumber: () => unreachable('depth24plus has no representation'),
+ bitsToULPFromZero: () => unreachable('depth24plus has no representation'),
+ numericRange: { min: 0, max: 1 },
+ },
+ stencil8: makeIntegerInfo([TexelComponent.Stencil], 8, { signed: false }),
+ 'depth32float-stencil8': {
+ componentOrder: [TexelComponent.Depth, TexelComponent.Stencil],
+ componentInfo: {
+ Depth: {
+ dataType: 'float',
+ bitLength: 32,
+ },
+ Stencil: {
+ dataType: 'uint',
+ bitLength: 8,
+ },
+ },
+ encode: components => {
+ assert(components.Stencil !== undefined);
+ assertInIntegerRange(components.Stencil, 8, false);
+ return components;
+ },
+ decode: components => {
+ assert(components.Stencil !== undefined);
+ assertInIntegerRange(components.Stencil, 8, false);
+ return components;
+ },
+ pack: () => unreachable('depth32float-stencil8 data cannot be packed'),
+ unpackBits: () => unreachable('depth32float-stencil8 data cannot be unpacked'),
+ numberToBits: () => unreachable('not implemented'),
+ bitsToNumber: () => unreachable('not implemented'),
+ bitsToULPFromZero: () => unreachable('not implemented'),
+ numericRange: null,
+ },
+ 'depth24plus-stencil8': {
+ componentOrder: [TexelComponent.Depth, TexelComponent.Stencil],
+ componentInfo: {
+ Depth: {
+ dataType: null,
+ bitLength: 24,
+ },
+ Stencil: {
+ dataType: 'uint',
+ bitLength: 8,
+ },
+ },
+ encode: components => {
+ assert(components.Depth === undefined, 'depth24plus cannot be encoded');
+ assert(components.Stencil !== undefined);
+ assertInIntegerRange(components.Stencil, 8, false);
+ return components;
+ },
+ decode: components => {
+ assert(components.Depth === undefined, 'depth24plus cannot be decoded');
+ assert(components.Stencil !== undefined);
+ assertInIntegerRange(components.Stencil, 8, false);
+ return components;
+ },
+ pack: () => unreachable('depth24plus-stencil8 data cannot be packed'),
+ unpackBits: () => unreachable('depth24plus-stencil8 data cannot be unpacked'),
+ numberToBits: () => unreachable('depth24plus-stencil8 has no representation'),
+ bitsToNumber: () => unreachable('depth24plus-stencil8 has no representation'),
+ bitsToULPFromZero: () => unreachable('depth24plus-stencil8 has no representation'),
+ numericRange: null,
+ },
+ },
+};
+
+/**
+ * Get the `ComponentDataType` for a format. All components must have the same type.
+ * @param {UncompressedTextureFormat} format - The input format.
+ * @returns {ComponentDataType} The data of the components.
+ */
+export function getSingleDataType(format: UncompressedTextureFormat): ComponentDataType {
+ const infos = Object.values(kTexelRepresentationInfo[format].componentInfo);
+ assert(infos.length > 0);
+ return infos.reduce((acc, cur) => {
+ assert(cur !== undefined);
+ assert(acc === undefined || acc === cur.dataType);
+ return cur.dataType;
+ }, infos[0]!.dataType);
+}
+
+/**
+ * Get traits for generating code to readback data from a component.
+ * @param {ComponentDataType} dataType - The input component data type.
+ * @returns A dictionary containing the respective `ReadbackTypedArray` and `shaderType`.
+ */
+export function getComponentReadbackTraits(dataType: ComponentDataType) {
+ switch (dataType) {
+ case 'ufloat':
+ case 'float':
+ case 'unorm':
+ case 'snorm':
+ return {
+ ReadbackTypedArray: Float32Array,
+ shaderType: 'f32' as const,
+ };
+ case 'uint':
+ return {
+ ReadbackTypedArray: Uint32Array,
+ shaderType: 'u32' as const,
+ };
+ case 'sint':
+ return {
+ ReadbackTypedArray: Int32Array,
+ shaderType: 'i32' as const,
+ };
+ default:
+ unreachable();
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_view.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_view.ts
new file mode 100644
index 0000000000..fea23b674e
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texel_view.ts
@@ -0,0 +1,201 @@
+import { assert, memcpy } from '../../../common/util/util.js';
+import { kTextureFormatInfo, EncodableTextureFormat } from '../../format_info.js';
+import { generatePrettyTable } from '../pretty_diff_tables.js';
+import { reifyExtent3D, reifyOrigin3D } from '../unions.js';
+
+import { fullSubrectCoordinates } from './base.js';
+import { kTexelRepresentationInfo, makeClampToRange, PerTexelComponent } from './texel_data.js';
+
+/** Function taking some x,y,z coordinates and returning `Readonly<T>`. */
+export type PerPixelAtLevel<T> = (coords: Required<GPUOrigin3DDict>) => Readonly<T>;
+
+/**
+ * Wrapper to view various representations of texture data in other ways. E.g., can:
+ * - Provide a mapped buffer, containing copied texture data, and read color values.
+ * - Provide a function that generates color values by coordinate, and convert to ULPs-from-zero.
+ *
+ * MAINTENANCE_TODO: Would need some refactoring to support block formats, which could be partially
+ * supported if useful.
+ */
+export class TexelView {
+ /** The GPUTextureFormat of the TexelView. */
+ readonly format: EncodableTextureFormat;
+ /** Generates the bytes for the texel at the given coordinates. */
+ readonly bytes: PerPixelAtLevel<Uint8Array>;
+ /** Generates the ULPs-from-zero for the texel at the given coordinates. */
+ readonly ulpFromZero: PerPixelAtLevel<PerTexelComponent<number>>;
+ /** Generates the color for the texel at the given coordinates. */
+ readonly color: PerPixelAtLevel<PerTexelComponent<number>>;
+
+ private constructor(
+ format: EncodableTextureFormat,
+ {
+ bytes,
+ ulpFromZero,
+ color,
+ }: {
+ bytes: PerPixelAtLevel<Uint8Array>;
+ ulpFromZero: PerPixelAtLevel<PerTexelComponent<number>>;
+ color: PerPixelAtLevel<PerTexelComponent<number>>;
+ }
+ ) {
+ this.format = format;
+ this.bytes = bytes;
+ this.ulpFromZero = ulpFromZero;
+ this.color = color;
+ }
+
+ /**
+ * Produces a TexelView from "linear image data", i.e. the `writeTexture` format. Takes a
+ * reference to the input `subrectData`, so any changes to it will be visible in the TexelView.
+ */
+ static fromTextureDataByReference(
+ format: EncodableTextureFormat,
+ subrectData: Uint8Array | Uint8ClampedArray,
+ {
+ bytesPerRow,
+ rowsPerImage,
+ subrectOrigin,
+ subrectSize,
+ }: {
+ bytesPerRow: number;
+ rowsPerImage: number;
+ subrectOrigin: GPUOrigin3D;
+ subrectSize: GPUExtent3D;
+ }
+ ) {
+ const origin = reifyOrigin3D(subrectOrigin);
+ const size = reifyExtent3D(subrectSize);
+
+ const info = kTextureFormatInfo[format];
+ assert(info.blockWidth === 1 && info.blockHeight === 1, 'unimplemented for block formats');
+
+ return TexelView.fromTexelsAsBytes(format, coords => {
+ assert(
+ coords.x >= origin.x &&
+ coords.y >= origin.y &&
+ coords.z >= origin.z &&
+ coords.x < origin.x + size.width &&
+ coords.y < origin.y + size.height &&
+ coords.z < origin.z + size.depthOrArrayLayers,
+ () => `coordinate (${coords.x},${coords.y},${coords.z}) out of bounds`
+ );
+
+ const imageOffsetInRows = (coords.z - origin.z) * rowsPerImage;
+ const rowOffset = (imageOffsetInRows + (coords.y - origin.y)) * bytesPerRow;
+ const offset = rowOffset + (coords.x - origin.x) * info.bytesPerBlock;
+
+ // MAINTENANCE_TODO: To support block formats, decode the block and then index into the result.
+ return subrectData.subarray(offset, offset + info.bytesPerBlock) as Uint8Array;
+ });
+ }
+
+ /** Produces a TexelView from a generator of bytes for individual texel blocks. */
+ static fromTexelsAsBytes(
+ format: EncodableTextureFormat,
+ generator: PerPixelAtLevel<Uint8Array>
+ ): TexelView {
+ const info = kTextureFormatInfo[format];
+ assert(info.blockWidth === 1 && info.blockHeight === 1, 'unimplemented for block formats');
+
+ const repr = kTexelRepresentationInfo[format];
+ return new TexelView(format, {
+ bytes: generator,
+ ulpFromZero: coords => repr.bitsToULPFromZero(repr.unpackBits(generator(coords))),
+ color: coords => repr.bitsToNumber(repr.unpackBits(generator(coords))),
+ });
+ }
+
+ /** Produces a TexelView from a generator of numeric "color" values for each texel. */
+ static fromTexelsAsColors(
+ format: EncodableTextureFormat,
+ generator: PerPixelAtLevel<PerTexelComponent<number>>,
+ { clampToFormatRange = false }: { clampToFormatRange?: boolean } = {}
+ ): TexelView {
+ const info = kTextureFormatInfo[format];
+ assert(info.blockWidth === 1 && info.blockHeight === 1, 'unimplemented for block formats');
+
+ if (clampToFormatRange) {
+ const applyClamp = makeClampToRange(format);
+ const oldGenerator = generator;
+ generator = coords => applyClamp(oldGenerator(coords));
+ }
+
+ const repr = kTexelRepresentationInfo[format];
+ return new TexelView(format, {
+ bytes: coords => new Uint8Array(repr.pack(repr.encode(generator(coords)))),
+ ulpFromZero: coords => repr.bitsToULPFromZero(repr.numberToBits(generator(coords))),
+ color: generator,
+ });
+ }
+
+ /** Writes the contents of a TexelView as "linear image data", i.e. the `writeTexture` format. */
+ writeTextureData(
+ subrectData: Uint8Array | Uint8ClampedArray,
+ {
+ bytesPerRow,
+ rowsPerImage,
+ subrectOrigin: subrectOrigin_,
+ subrectSize: subrectSize_,
+ }: {
+ bytesPerRow: number;
+ rowsPerImage: number;
+ subrectOrigin: GPUOrigin3D;
+ subrectSize: GPUExtent3D;
+ }
+ ): void {
+ const subrectOrigin = reifyOrigin3D(subrectOrigin_);
+ const subrectSize = reifyExtent3D(subrectSize_);
+
+ const info = kTextureFormatInfo[this.format];
+ assert(info.blockWidth === 1 && info.blockHeight === 1, 'unimplemented for block formats');
+
+ for (let z = subrectOrigin.z; z < subrectOrigin.z + subrectSize.depthOrArrayLayers; ++z) {
+ for (let y = subrectOrigin.y; y < subrectOrigin.y + subrectSize.height; ++y) {
+ for (let x = subrectOrigin.x; x < subrectOrigin.x + subrectSize.width; ++x) {
+ const start = (z * rowsPerImage + y) * bytesPerRow + x * info.bytesPerBlock;
+ memcpy({ src: this.bytes({ x, y, z }) }, { dst: subrectData, start });
+ }
+ }
+ }
+ }
+
+ /** Returns a pretty table string of the given coordinates and their values. */
+ // MAINTENANCE_TODO: Unify some internal helpers with those in texture_ok.ts.
+ toString(subrectOrigin: Required<GPUOrigin3DDict>, subrectSize: Required<GPUExtent3DDict>) {
+ const info = kTextureFormatInfo[this.format];
+ const repr = kTexelRepresentationInfo[this.format];
+
+ const integerSampleType = info.sampleType === 'uint' || info.sampleType === 'sint';
+ const numberToString = integerSampleType
+ ? (n: number) => n.toFixed()
+ : (n: number) => n.toPrecision(6);
+
+ const componentOrderStr = repr.componentOrder.join(',') + ':';
+ const subrectCoords = [...fullSubrectCoordinates(subrectOrigin, subrectSize)];
+
+ const printCoords = (function* () {
+ yield* [' coords', '==', 'X,Y,Z:'];
+ for (const coords of subrectCoords) yield `${coords.x},${coords.y},${coords.z}`;
+ })();
+ const printActualBytes = (function* (t: TexelView) {
+ yield* [' act. texel bytes (little-endian)', '==', '0x:'];
+ for (const coords of subrectCoords) {
+ yield Array.from(t.bytes(coords), b => b.toString(16).padStart(2, '0')).join(' ');
+ }
+ })(this);
+ const printActualColors = (function* (t: TexelView) {
+ yield* [' act. colors', '==', componentOrderStr];
+ for (const coords of subrectCoords) {
+ const pixel = t.color(coords);
+ yield `${repr.componentOrder.map(ch => numberToString(pixel[ch]!)).join(',')}`;
+ }
+ })(this);
+
+ const opts = {
+ fillToWidth: 120,
+ numberToString,
+ };
+ return `${generatePrettyTable(opts, [printCoords, printActualBytes, printActualColors])}`;
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texture_ok.spec.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texture_ok.spec.ts
new file mode 100644
index 0000000000..ad7635f939
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texture_ok.spec.ts
@@ -0,0 +1,159 @@
+export const description = 'checkPixels helpers behave as expected against real textures';
+
+import { makeTestGroup } from '../../../common/framework/test_group.js';
+import { GPUTest } from '../../gpu_test.js';
+
+import { TexelView } from './texel_view.js';
+import { textureContentIsOKByT2B } from './texture_ok.js';
+
+export const g = makeTestGroup(GPUTest);
+
+g.test('float32')
+ .desc(`Basic test that actual/expected must match, for float32.`)
+ .params(u =>
+ u
+ .combineWithParams([
+ { format: 'rgba32float' }, //
+ { format: 'rg32float' },
+ ] as const)
+ .beginSubcases()
+ .combineWithParams([
+ // Expected data is 0.6 in all channels
+ { data: 0.6, opts: { maxFractionalDiff: 0.0000001 }, _ok: true },
+ { data: 0.6, opts: { maxDiffULPsForFloatFormat: 1 }, _ok: true },
+
+ { data: 0.5999, opts: { maxFractionalDiff: 0 }, _ok: false },
+ { data: 0.5999, opts: { maxFractionalDiff: 0.0001001 }, _ok: true },
+
+ { data: 0.6001, opts: { maxFractionalDiff: 0 }, _ok: false },
+ { data: 0.6001, opts: { maxFractionalDiff: 0.0001001 }, _ok: true },
+
+ { data: 0.5999, opts: { maxDiffULPsForFloatFormat: 1677 }, _ok: false },
+ { data: 0.5999, opts: { maxDiffULPsForFloatFormat: 1678 }, _ok: true },
+
+ { data: 0.6001, opts: { maxDiffULPsForFloatFormat: 1676 }, _ok: false },
+ { data: 0.6001, opts: { maxDiffULPsForFloatFormat: 1677 }, _ok: true },
+ ])
+ )
+ .fn(async t => {
+ const { format, data, opts, _ok } = t.params;
+
+ const size = [1, 1];
+ const texture = t.device.createTexture({
+ format,
+ size,
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC,
+ });
+ t.trackForCleanup(texture);
+ t.device.queue.writeTexture({ texture }, new Float32Array([data, data, data, data]), {}, size);
+
+ const expColor = { R: 0.6, G: 0.6, B: 0.6, A: 0.6 };
+ const expTexelView = TexelView.fromTexelsAsColors(format, _coords => expColor);
+
+ const result = await textureContentIsOKByT2B(t, { texture }, size, { expTexelView }, opts);
+ t.expect((result === undefined) === _ok, `expected ${_ok}, got ${result === undefined}`);
+ });
+
+g.test('norm')
+ .desc(`Basic test that actual/expected must match, for unorm/snorm.`)
+ .params(u =>
+ u
+ .combine('mode', ['bytes', 'colors'] as const)
+ .combineWithParams([
+ { format: 'r8unorm', _maxValue: 255 },
+ { format: 'r8snorm', _maxValue: 127 },
+ ] as const)
+ .beginSubcases()
+ .combineWithParams([
+ // Expected data is [10, 10]
+ { data: [10, 10], _ok: true },
+ { data: [10, 11], _ok: false },
+ { data: [11, 10], _ok: false },
+ { data: [11, 11], _ok: false },
+ ])
+ )
+ .fn(async t => {
+ const { mode, format, _maxValue, data, _ok } = t.params;
+
+ const size = [2, 1];
+ const texture = t.device.createTexture({
+ format,
+ size,
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC,
+ });
+ t.trackForCleanup(texture);
+ t.device.queue.writeTexture({ texture }, new Int8Array(data), {}, size);
+
+ let expTexelView;
+ switch (mode) {
+ case 'bytes':
+ expTexelView = TexelView.fromTexelsAsBytes(format, _coords => new Uint8Array([10]));
+ break;
+ case 'colors':
+ expTexelView = TexelView.fromTexelsAsColors(format, _coords => ({ R: 10 / _maxValue }));
+ break;
+ }
+
+ const result = await textureContentIsOKByT2B(
+ t,
+ { texture },
+ size,
+ { expTexelView },
+ { maxDiffULPsForNormFormat: 0 }
+ );
+ t.expect((result === undefined) === _ok, result?.message);
+ });
+
+g.test('snorm_min')
+ .desc(
+ `The minimum snorm value has two possible representations (e.g. -127 and -128). Ensure that
+ actual/expected can mismatch in both directions and pass the test.`
+ )
+ .params(u =>
+ u //
+ .combine('mode', ['bytes', 'colors'] as const)
+ .combineWithParams([
+ //
+ { format: 'r8snorm', _maxValue: 127 },
+ ] as const)
+ )
+ .fn(async t => {
+ const { mode, format, _maxValue } = t.params;
+
+ const data = [-_maxValue, -_maxValue - 1];
+
+ const size = [2, 1];
+ const texture = t.device.createTexture({
+ format,
+ size,
+ usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC,
+ });
+ t.trackForCleanup(texture);
+ t.device.queue.writeTexture({ texture }, new Int8Array(data), {}, size);
+
+ let expTexelView;
+ switch (mode) {
+ case 'bytes':
+ {
+ // Actual value should be [-127,-128], expected value is [-128,-127], both should pass.
+ const exp = [-_maxValue - 1, -_maxValue];
+ expTexelView = TexelView.fromTexelsAsBytes(
+ format,
+ coords => new Uint8Array([exp[coords.x]])
+ );
+ }
+ break;
+ case 'colors':
+ expTexelView = TexelView.fromTexelsAsColors(format, _coords => ({ R: -1 }));
+ break;
+ }
+
+ const result = await textureContentIsOKByT2B(
+ t,
+ { texture },
+ size,
+ { expTexelView },
+ { maxDiffULPsForNormFormat: 0 }
+ );
+ t.expectOK(result);
+ });
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texture_ok.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texture_ok.ts
new file mode 100644
index 0000000000..7b85489246
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/texture/texture_ok.ts
@@ -0,0 +1,348 @@
+import { assert, ErrorWithExtra, unreachable } from '../../../common/util/util.js';
+import { kTextureFormatInfo, EncodableTextureFormat } from '../../format_info.js';
+import { GPUTest } from '../../gpu_test.js';
+import { numbersApproximatelyEqual } from '../conversion.js';
+import { generatePrettyTable } from '../pretty_diff_tables.js';
+import { reifyExtent3D, reifyOrigin3D } from '../unions.js';
+
+import { fullSubrectCoordinates } from './base.js';
+import { getTextureSubCopyLayout } from './layout.js';
+import { kTexelRepresentationInfo, PerTexelComponent, TexelComponent } from './texel_data.js';
+import { TexelView } from './texel_view.js';
+
+type PerPixelAtLevel<T> = (coords: Required<GPUOrigin3DDict>) => T;
+
+/** Threshold options for comparing texels of different formats (norm/float/int). */
+export type TexelCompareOptions = {
+ /** Threshold for integer texture formats. Defaults to 0. */
+ maxIntDiff?: number;
+ /** Threshold for non-integer (norm/float) texture formats, if not overridden. */
+ maxFractionalDiff?: number;
+ /** Threshold in ULPs for unorm/snorm texture formats. Overrides `maxFractionalDiff`. */
+ maxDiffULPsForNormFormat?: number;
+ /** Threshold in ULPs for float/ufloat texture formats. Overrides `maxFractionalDiff`. */
+ maxDiffULPsForFloatFormat?: number;
+};
+
+export type PixelExpectation = PerTexelComponent<number> | Uint8Array;
+
+export type PerPixelComparison<E extends PixelExpectation> = {
+ coord: GPUOrigin3D;
+ exp: E;
+};
+
+type TexelViewComparer = {
+ /** Given coords, returns whether the two texel views are considered matching at that point. */
+ predicate: PerPixelAtLevel<boolean>;
+ /**
+ * Given a list of failed coords, returns table rows for `generatePrettyTable` that
+ * display the actual/expected values and diffs for debugging.
+ */
+ tableRows: (failedCoords: readonly Required<GPUOrigin3DDict>[]) => Iterable<string>[];
+};
+
+function makeTexelViewComparer(
+ format: EncodableTextureFormat,
+ { actTexelView, expTexelView }: { actTexelView: TexelView; expTexelView: TexelView },
+ opts: TexelCompareOptions
+): TexelViewComparer {
+ const {
+ maxIntDiff = 0,
+ maxFractionalDiff,
+ maxDiffULPsForNormFormat,
+ maxDiffULPsForFloatFormat,
+ } = opts;
+
+ assert(maxIntDiff >= 0, 'threshold must be non-negative');
+ if (maxFractionalDiff !== undefined) {
+ assert(maxFractionalDiff >= 0, 'threshold must be non-negative');
+ }
+ if (maxDiffULPsForFloatFormat !== undefined) {
+ assert(maxDiffULPsForFloatFormat >= 0, 'threshold must be non-negative');
+ }
+ if (maxDiffULPsForNormFormat !== undefined) {
+ assert(maxDiffULPsForNormFormat >= 0, 'threshold must be non-negative');
+ }
+
+ const fmtIsInt = format.includes('int');
+ const fmtIsNorm = format.includes('norm');
+ const fmtIsFloat = format.includes('float');
+
+ const tvc = {} as TexelViewComparer;
+ if (fmtIsInt) {
+ tvc.predicate = coords =>
+ comparePerComponent(actTexelView.color(coords), expTexelView.color(coords), maxIntDiff);
+ } else if (fmtIsNorm && maxDiffULPsForNormFormat !== undefined) {
+ tvc.predicate = coords =>
+ comparePerComponent(
+ actTexelView.ulpFromZero(coords),
+ expTexelView.ulpFromZero(coords),
+ maxDiffULPsForNormFormat
+ );
+ } else if (fmtIsFloat && maxDiffULPsForFloatFormat !== undefined) {
+ tvc.predicate = coords =>
+ comparePerComponent(
+ actTexelView.ulpFromZero(coords),
+ expTexelView.ulpFromZero(coords),
+ maxDiffULPsForFloatFormat
+ );
+ } else if (maxFractionalDiff !== undefined) {
+ tvc.predicate = coords =>
+ comparePerComponent(
+ actTexelView.color(coords),
+ expTexelView.color(coords),
+ maxFractionalDiff
+ );
+ } else {
+ if (fmtIsNorm) {
+ unreachable('need maxFractionalDiff or maxDiffULPsForNormFormat to compare norm textures');
+ } else if (fmtIsFloat) {
+ unreachable('need maxFractionalDiff or maxDiffULPsForFloatFormat to compare float textures');
+ } else {
+ unreachable();
+ }
+ }
+
+ const repr = kTexelRepresentationInfo[format];
+ if (fmtIsInt) {
+ tvc.tableRows = failedCoords => [
+ [`tolerance ± ${maxIntDiff}`],
+ (function* () {
+ yield* [` diff (act - exp)`, '==', ''];
+ for (const coords of failedCoords) {
+ const act = actTexelView.color(coords);
+ const exp = expTexelView.color(coords);
+ yield repr.componentOrder.map(ch => act[ch]! - exp[ch]!).join(',');
+ }
+ })(),
+ ];
+ } else if (
+ (fmtIsNorm && maxDiffULPsForNormFormat !== undefined) ||
+ (fmtIsFloat && maxDiffULPsForFloatFormat !== undefined)
+ ) {
+ const toleranceULPs = fmtIsNorm ? maxDiffULPsForNormFormat! : maxDiffULPsForFloatFormat!;
+ tvc.tableRows = failedCoords => [
+ [`tolerance ± ${toleranceULPs} normal-ULPs`],
+ (function* () {
+ yield* [` diff (act - exp) in normal-ULPs`, '==', ''];
+ for (const coords of failedCoords) {
+ const act = actTexelView.ulpFromZero(coords);
+ const exp = expTexelView.ulpFromZero(coords);
+ yield repr.componentOrder.map(ch => act[ch]! - exp[ch]!).join(',');
+ }
+ })(),
+ ];
+ } else {
+ assert(maxFractionalDiff !== undefined);
+ tvc.tableRows = failedCoords => [
+ [`tolerance ± ${maxFractionalDiff}`],
+ (function* () {
+ yield* [` diff (act - exp)`, '==', ''];
+ for (const coords of failedCoords) {
+ const act = actTexelView.color(coords);
+ const exp = expTexelView.color(coords);
+ yield repr.componentOrder.map(ch => (act[ch]! - exp[ch]!).toPrecision(4)).join(',');
+ }
+ })(),
+ ];
+ }
+
+ return tvc;
+}
+
+function comparePerComponent(
+ actual: PerTexelComponent<number>,
+ expected: PerTexelComponent<number>,
+ maxDiff: number
+) {
+ return Object.keys(actual).every(key => {
+ const k = key as TexelComponent;
+ const act = actual[k]!;
+ const exp = expected[k];
+ if (exp === undefined) return false;
+ return numbersApproximatelyEqual(act, exp, maxDiff);
+ });
+}
+
+/** Create a new mappable GPUBuffer, and copy a subrectangle of GPUTexture data into it. */
+function createTextureCopyForMapRead(
+ t: GPUTest,
+ source: GPUImageCopyTexture,
+ copySize: GPUExtent3D,
+ { format }: { format: EncodableTextureFormat }
+): { buffer: GPUBuffer; bytesPerRow: number; rowsPerImage: number } {
+ const { byteLength, bytesPerRow, rowsPerImage } = getTextureSubCopyLayout(format, copySize, {
+ aspect: source.aspect,
+ });
+
+ const buffer = t.device.createBuffer({
+ usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
+ size: byteLength,
+ });
+ t.trackForCleanup(buffer);
+
+ const cmd = t.device.createCommandEncoder();
+ cmd.copyTextureToBuffer(source, { buffer, bytesPerRow, rowsPerImage }, copySize);
+ t.device.queue.submit([cmd.finish()]);
+
+ return { buffer, bytesPerRow, rowsPerImage };
+}
+
+export function findFailedPixels(
+ format: EncodableTextureFormat,
+ subrectOrigin: Required<GPUOrigin3DDict>,
+ subrectSize: Required<GPUExtent3DDict>,
+ { actTexelView, expTexelView }: { actTexelView: TexelView; expTexelView: TexelView },
+ texelCompareOptions: TexelCompareOptions,
+ coords?: Generator<Required<GPUOrigin3DDict>>
+) {
+ const comparer = makeTexelViewComparer(
+ format,
+ { actTexelView, expTexelView },
+ texelCompareOptions
+ );
+
+ const lowerCorner = [subrectSize.width, subrectSize.height, subrectSize.depthOrArrayLayers];
+ const upperCorner = [0, 0, 0];
+ const failedPixels: Required<GPUOrigin3DDict>[] = [];
+ for (const coord of coords ?? fullSubrectCoordinates(subrectOrigin, subrectSize)) {
+ const { x, y, z } = coord;
+ if (!comparer.predicate(coord)) {
+ failedPixels.push(coord);
+ lowerCorner[0] = Math.min(lowerCorner[0], x);
+ lowerCorner[1] = Math.min(lowerCorner[1], y);
+ lowerCorner[2] = Math.min(lowerCorner[2], z);
+ upperCorner[0] = Math.max(upperCorner[0], x);
+ upperCorner[1] = Math.max(upperCorner[1], y);
+ upperCorner[2] = Math.max(upperCorner[2], z);
+ }
+ }
+ if (failedPixels.length === 0) {
+ return undefined;
+ }
+
+ const info = kTextureFormatInfo[format];
+ const repr = kTexelRepresentationInfo[format];
+
+ const integerSampleType = info.sampleType === 'uint' || info.sampleType === 'sint';
+ const numberToString = integerSampleType
+ ? (n: number) => n.toFixed()
+ : (n: number) => n.toPrecision(6);
+
+ const componentOrderStr = repr.componentOrder.join(',') + ':';
+
+ const printCoords = (function* () {
+ yield* [' coords', '==', 'X,Y,Z:'];
+ for (const coords of failedPixels) yield `${coords.x},${coords.y},${coords.z}`;
+ })();
+ const printActualBytes = (function* () {
+ yield* [' act. texel bytes (little-endian)', '==', '0x:'];
+ for (const coords of failedPixels) {
+ yield Array.from(actTexelView.bytes(coords), b => b.toString(16).padStart(2, '0')).join(' ');
+ }
+ })();
+ const printActualColors = (function* () {
+ yield* [' act. colors', '==', componentOrderStr];
+ for (const coords of failedPixels) {
+ const pixel = actTexelView.color(coords);
+ yield `${repr.componentOrder.map(ch => numberToString(pixel[ch]!)).join(',')}`;
+ }
+ })();
+ const printExpectedColors = (function* () {
+ yield* [' exp. colors', '==', componentOrderStr];
+ for (const coords of failedPixels) {
+ const pixel = expTexelView.color(coords);
+ yield `${repr.componentOrder.map(ch => numberToString(pixel[ch]!)).join(',')}`;
+ }
+ })();
+ const printActualULPs = (function* () {
+ yield* [' act. normal-ULPs-from-zero', '==', componentOrderStr];
+ for (const coords of failedPixels) {
+ const pixel = actTexelView.ulpFromZero(coords);
+ yield `${repr.componentOrder.map(ch => pixel[ch]).join(',')}`;
+ }
+ })();
+ const printExpectedULPs = (function* () {
+ yield* [` exp. normal-ULPs-from-zero`, '==', componentOrderStr];
+ for (const coords of failedPixels) {
+ const pixel = expTexelView.ulpFromZero(coords);
+ yield `${repr.componentOrder.map(ch => pixel[ch]).join(',')}`;
+ }
+ })();
+
+ const opts = {
+ fillToWidth: 120,
+ numberToString,
+ };
+ return `\
+ between ${lowerCorner} and ${upperCorner} inclusive:
+${generatePrettyTable(opts, [
+ printCoords,
+ printActualBytes,
+ printActualColors,
+ printExpectedColors,
+ printActualULPs,
+ printExpectedULPs,
+ ...comparer.tableRows(failedPixels),
+])}`;
+}
+
+/**
+ * Check the contents of a GPUTexture by reading it back (with copyTextureToBuffer+mapAsync), then
+ * comparing the data with the data in `expTexelView`.
+ *
+ * The actual and expected texture data are both converted to the "NormalULPFromZero" format,
+ * which is a signed number representing how far the number is from zero, in ULPs, skipping
+ * subnormal numbers (where ULP is defined for float, normalized, and integer formats).
+ */
+export async function textureContentIsOKByT2B(
+ t: GPUTest,
+ source: GPUImageCopyTexture,
+ copySize_: GPUExtent3D,
+ { expTexelView }: { expTexelView: TexelView },
+ texelCompareOptions: TexelCompareOptions,
+ coords?: Generator<Required<GPUOrigin3DDict>>
+): Promise<ErrorWithExtra | undefined> {
+ const subrectOrigin = reifyOrigin3D(source.origin ?? [0, 0, 0]);
+ const subrectSize = reifyExtent3D(copySize_);
+ const format = expTexelView.format;
+
+ const { buffer, bytesPerRow, rowsPerImage } = createTextureCopyForMapRead(
+ t,
+ source,
+ subrectSize,
+ { format }
+ );
+
+ await buffer.mapAsync(GPUMapMode.READ);
+ const data = new Uint8Array(buffer.getMappedRange());
+
+ const texelViewConfig = {
+ bytesPerRow,
+ rowsPerImage,
+ subrectOrigin,
+ subrectSize,
+ } as const;
+
+ const actTexelView = TexelView.fromTextureDataByReference(format, data, texelViewConfig);
+
+ const failedPixelsMessage = findFailedPixels(
+ format,
+ subrectOrigin,
+ subrectSize,
+ { actTexelView, expTexelView },
+ texelCompareOptions,
+ coords
+ );
+
+ if (failedPixelsMessage === undefined) {
+ return undefined;
+ }
+
+ const msg = 'Texture level had unexpected contents:\n' + failedPixelsMessage;
+ return new ErrorWithExtra(msg, () => ({
+ expTexelView,
+ // Make a new TexelView with a copy of the data so we can unmap the buffer (debug mode only).
+ actTexelView: TexelView.fromTextureDataByReference(format, data.slice(), texelViewConfig),
+ }));
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/webgpu/util/unions.ts b/dom/webgpu/tests/cts/checkout/src/webgpu/util/unions.ts
new file mode 100644
index 0000000000..2f9e8b64d3
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/webgpu/util/unions.ts
@@ -0,0 +1,45 @@
+/**
+ * Reifies a `GPUOrigin3D` into a `Required<GPUOrigin3DDict>`.
+ */
+export function reifyOrigin3D(
+ val: Readonly<GPUOrigin3DDict> | Iterable<number>
+): Required<GPUOrigin3DDict> {
+ if (Symbol.iterator in val) {
+ const v = Array.from(val);
+ return {
+ x: (v[0] ?? 0) | 0,
+ y: (v[1] ?? 0) | 0,
+ z: (v[2] ?? 0) | 0,
+ };
+ } else {
+ const v = val;
+ return {
+ x: (v.x ?? 0) | 0,
+ y: (v.y ?? 0) | 0,
+ z: (v.z ?? 0) | 0,
+ };
+ }
+}
+
+/**
+ * Reifies a `GPUExtent3D` into a `Required<GPUExtent3DDict>`.
+ */
+export function reifyExtent3D(
+ val: Readonly<GPUExtent3DDict> | Iterable<number>
+): Required<GPUExtent3DDict> {
+ if (Symbol.iterator in val) {
+ const v = Array.from(val);
+ return {
+ width: (v[0] ?? 1) | 0,
+ height: (v[1] ?? 1) | 0,
+ depthOrArrayLayers: (v[2] ?? 1) | 0,
+ };
+ } else {
+ const v = val;
+ return {
+ width: (v.width ?? 1) | 0,
+ height: (v.height ?? 1) | 0,
+ depthOrArrayLayers: (v.depthOrArrayLayers ?? 1) | 0,
+ };
+ }
+}