summaryrefslogtreecommitdiffstats
path: root/dom/webgpu/tests/cts/checkout/src/common/tools
diff options
context:
space:
mode:
Diffstat (limited to 'dom/webgpu/tests/cts/checkout/src/common/tools')
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/.eslintrc.json11
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/checklist.ts136
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/crawl.ts167
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/dev_server.ts214
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/gen_cache.ts198
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/gen_listings.ts63
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/gen_wpt_cts_html.ts252
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/image_utils.ts58
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/merge_listing_times.ts177
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/run_wpt_ref_tests.ts446
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/setup-ts-in-node.js51
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/validate.ts36
-rw-r--r--dom/webgpu/tests/cts/checkout/src/common/tools/version.ts4
13 files changed, 1813 insertions, 0 deletions
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/.eslintrc.json b/dom/webgpu/tests/cts/checkout/src/common/tools/.eslintrc.json
new file mode 100644
index 0000000000..e589f291bb
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/.eslintrc.json
@@ -0,0 +1,11 @@
+{
+ "parser": "@typescript-eslint/parser",
+ "parserOptions": { "project": "./tsconfig.json" },
+ "rules": {
+ "no-console": "off",
+ "no-process-exit": "off",
+ "node/no-unpublished-import": "off",
+ "node/no-unpublished-require": "off",
+ "@typescript-eslint/no-var-requires": "off"
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/checklist.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/checklist.ts
new file mode 100644
index 0000000000..e301cfb2c8
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/checklist.ts
@@ -0,0 +1,136 @@
+import * as fs from 'fs';
+import * as process from 'process';
+
+import { DefaultTestFileLoader } from '../internal/file_loader.js';
+import { Ordering, compareQueries } from '../internal/query/compare.js';
+import { parseQuery } from '../internal/query/parseQuery.js';
+import { TestQuery, TestQueryMultiFile } from '../internal/query/query.js';
+import { loadTreeForQuery, TestTree } from '../internal/tree.js';
+import { StacklessError } from '../internal/util.js';
+import { assert } from '../util/util.js';
+
+function usage(rc: number): void {
+ console.error('Usage:');
+ console.error(' tools/checklist FILE');
+ console.error(' tools/checklist my/list.txt');
+ process.exit(rc);
+}
+
+if (process.argv.length === 2) usage(0);
+if (process.argv.length !== 3) usage(1);
+
+type QueryInSuite = { readonly query: TestQuery; readonly done: boolean };
+type QueriesInSuite = QueryInSuite[];
+type QueriesBySuite = Map<string, QueriesInSuite>;
+async function loadQueryListFromTextFile(filename: string): Promise<QueriesBySuite> {
+ const lines = (await fs.promises.readFile(filename, 'utf8')).split(/\r?\n/);
+ const allQueries = lines
+ .filter(l => l)
+ .map(l => {
+ const [doneStr, q] = l.split(/\s+/);
+ assert(doneStr === 'DONE' || doneStr === 'TODO', 'first column must be DONE or TODO');
+ return { query: parseQuery(q), done: doneStr === 'DONE' } as const;
+ });
+
+ const queriesBySuite: QueriesBySuite = new Map();
+ for (const q of allQueries) {
+ let suiteQueries = queriesBySuite.get(q.query.suite);
+ if (suiteQueries === undefined) {
+ suiteQueries = [];
+ queriesBySuite.set(q.query.suite, suiteQueries);
+ }
+
+ suiteQueries.push(q);
+ }
+
+ return queriesBySuite;
+}
+
+function checkForOverlappingQueries(queries: QueriesInSuite): void {
+ for (let i1 = 0; i1 < queries.length; ++i1) {
+ for (let i2 = i1 + 1; i2 < queries.length; ++i2) {
+ const q1 = queries[i1].query;
+ const q2 = queries[i2].query;
+ if (compareQueries(q1, q2) !== Ordering.Unordered) {
+ console.log(` FYI, the following checklist items overlap:\n ${q1}\n ${q2}`);
+ }
+ }
+ }
+}
+
+function checkForUnmatchedSubtreesAndDoneness(
+ tree: TestTree,
+ matchQueries: QueriesInSuite
+): number {
+ let subtreeCount = 0;
+ const unmatchedSubtrees: TestQuery[] = [];
+ const overbroadMatches: [TestQuery, TestQuery][] = [];
+ const donenessMismatches: QueryInSuite[] = [];
+ const alwaysExpandThroughLevel = 1; // expand to, at minimum, every file.
+ for (const subtree of tree.iterateCollapsedNodes({
+ includeIntermediateNodes: true,
+ includeEmptySubtrees: true,
+ alwaysExpandThroughLevel,
+ })) {
+ subtreeCount++;
+ const subtreeDone = !subtree.subtreeCounts?.nodesWithTODO;
+
+ let subtreeMatched = false;
+ for (const q of matchQueries) {
+ const comparison = compareQueries(q.query, subtree.query);
+ if (comparison !== Ordering.Unordered) subtreeMatched = true;
+ if (comparison === Ordering.StrictSubset) continue;
+ if (comparison === Ordering.StrictSuperset) overbroadMatches.push([q.query, subtree.query]);
+ if (comparison === Ordering.Equal && q.done !== subtreeDone) donenessMismatches.push(q);
+ }
+ if (!subtreeMatched) unmatchedSubtrees.push(subtree.query);
+ }
+
+ if (overbroadMatches.length) {
+ // (note, this doesn't show ALL multi-test queries - just ones that actually match any .spec.ts)
+ console.log(` FYI, the following checklist items were broader than one file:`);
+ for (const [q, collapsedSubtree] of overbroadMatches) {
+ console.log(` ${q} > ${collapsedSubtree}`);
+ }
+ }
+
+ if (unmatchedSubtrees.length) {
+ throw new StacklessError(`Found unmatched tests:\n ${unmatchedSubtrees.join('\n ')}`);
+ }
+
+ if (donenessMismatches.length) {
+ throw new StacklessError(
+ 'Found done/todo mismatches:\n ' +
+ donenessMismatches
+ .map(q => `marked ${q.done ? 'DONE, but is TODO' : 'TODO, but is DONE'}: ${q.query}`)
+ .join('\n ')
+ );
+ }
+
+ return subtreeCount;
+}
+
+(async () => {
+ console.log('Loading queries...');
+ const queriesBySuite = await loadQueryListFromTextFile(process.argv[2]);
+ console.log(' Found suites: ' + Array.from(queriesBySuite.keys()).join(' '));
+
+ const loader = new DefaultTestFileLoader();
+ for (const [suite, queriesInSuite] of queriesBySuite.entries()) {
+ console.log(`Suite "${suite}":`);
+ console.log(` Checking overlaps between ${queriesInSuite.length} checklist items...`);
+ checkForOverlappingQueries(queriesInSuite);
+ const suiteQuery = new TestQueryMultiFile(suite, []);
+ console.log(` Loading tree ${suiteQuery}...`);
+ const tree = await loadTreeForQuery(loader, suiteQuery, {
+ subqueriesToExpand: queriesInSuite.map(q => q.query),
+ });
+ console.log(' Found no invalid queries in the checklist. Checking for unmatched tests...');
+ const subtreeCount = checkForUnmatchedSubtreesAndDoneness(tree, queriesInSuite);
+ console.log(` No unmatched tests or done/todo mismatches among ${subtreeCount} subtrees!`);
+ }
+ console.log(`Checklist looks good!`);
+})().catch(ex => {
+ console.log(ex.stack ?? ex.toString());
+ process.exit(1);
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/crawl.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/crawl.ts
new file mode 100644
index 0000000000..50340dd68b
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/crawl.ts
@@ -0,0 +1,167 @@
+// Node can look at the filesystem, but JS in the browser can't.
+// This crawls the file tree under src/suites/${suite} to generate a (non-hierarchical) static
+// listing file that can then be used in the browser to load the modules containing the tests.
+
+import * as fs from 'fs';
+import * as path from 'path';
+
+import { loadMetadataForSuite } from '../framework/metadata.js';
+import { SpecFile } from '../internal/file_loader.js';
+import { TestQueryMultiCase, TestQueryMultiFile } from '../internal/query/query.js';
+import { validQueryPart } from '../internal/query/validQueryPart.js';
+import { TestSuiteListingEntry, TestSuiteListing } from '../internal/test_suite_listing.js';
+import { assert, unreachable } from '../util/util.js';
+
+const specFileSuffix = __filename.endsWith('.ts') ? '.spec.ts' : '.spec.js';
+
+async function crawlFilesRecursively(dir: string): Promise<string[]> {
+ const subpathInfo = await Promise.all(
+ (await fs.promises.readdir(dir)).map(async d => {
+ const p = path.join(dir, d);
+ const stats = await fs.promises.stat(p);
+ return {
+ path: p,
+ isDirectory: stats.isDirectory(),
+ isFile: stats.isFile(),
+ };
+ })
+ );
+
+ const files = subpathInfo
+ .filter(
+ i =>
+ i.isFile &&
+ (i.path.endsWith(specFileSuffix) ||
+ i.path.endsWith(`${path.sep}README.txt`) ||
+ i.path === 'README.txt')
+ )
+ .map(i => i.path);
+
+ return files.concat(
+ await subpathInfo
+ .filter(i => i.isDirectory)
+ .map(i => crawlFilesRecursively(i.path))
+ .reduce(async (a, b) => (await a).concat(await b), Promise.resolve([]))
+ );
+}
+
+export async function crawl(suiteDir: string, validate: boolean): Promise<TestSuiteListingEntry[]> {
+ if (!fs.existsSync(suiteDir)) {
+ throw new Error(`Could not find suite: ${suiteDir}`);
+ }
+
+ let validateTimingsEntries;
+ if (validate) {
+ const metadata = loadMetadataForSuite(suiteDir);
+ if (metadata) {
+ validateTimingsEntries = {
+ metadata,
+ testsFoundInFiles: new Set<string>(),
+ };
+ }
+ }
+
+ // Crawl files and convert paths to be POSIX-style, relative to suiteDir.
+ const filesToEnumerate = (await crawlFilesRecursively(suiteDir))
+ .map(f => path.relative(suiteDir, f).replace(/\\/g, '/'))
+ .sort();
+
+ const entries: TestSuiteListingEntry[] = [];
+ for (const file of filesToEnumerate) {
+ // |file| is the suite-relative file path.
+ if (file.endsWith(specFileSuffix)) {
+ const filepathWithoutExtension = file.substring(0, file.length - specFileSuffix.length);
+ const pathSegments = filepathWithoutExtension.split('/');
+
+ const suite = path.basename(suiteDir);
+
+ if (validate) {
+ const filename = `../../${suite}/${filepathWithoutExtension}.spec.js`;
+
+ assert(!process.env.STANDALONE_DEV_SERVER);
+ const mod = (await import(filename)) as SpecFile;
+ assert(mod.description !== undefined, 'Test spec file missing description: ' + filename);
+ assert(mod.g !== undefined, 'Test spec file missing TestGroup definition: ' + filename);
+
+ mod.g.validate(new TestQueryMultiFile(suite, pathSegments));
+
+ for (const { testPath } of mod.g.collectNonEmptyTests()) {
+ const testQuery = new TestQueryMultiCase(suite, pathSegments, testPath, {}).toString();
+ if (validateTimingsEntries) {
+ validateTimingsEntries.testsFoundInFiles.add(testQuery);
+ }
+ }
+ }
+
+ for (const p of pathSegments) {
+ assert(validQueryPart.test(p), `Invalid directory name ${p}; must match ${validQueryPart}`);
+ }
+ entries.push({ file: pathSegments });
+ } else if (path.basename(file) === 'README.txt') {
+ const dirname = path.dirname(file);
+ const readme = fs.readFileSync(path.join(suiteDir, file), 'utf8').trim();
+
+ const pathSegments = dirname !== '.' ? dirname.split('/') : [];
+ entries.push({ file: pathSegments, readme });
+ } else {
+ unreachable(`Matched an unrecognized filename ${file}`);
+ }
+ }
+
+ if (validateTimingsEntries) {
+ let failed = false;
+
+ const zeroEntries = [];
+ const staleEntries = [];
+ for (const [metadataKey, metadataValue] of Object.entries(validateTimingsEntries.metadata)) {
+ if (metadataKey.startsWith('_')) {
+ // Ignore json "_comments".
+ continue;
+ }
+ if (metadataValue.subcaseMS <= 0) {
+ zeroEntries.push(metadataKey);
+ }
+ if (!validateTimingsEntries.testsFoundInFiles.has(metadataKey)) {
+ staleEntries.push(metadataKey);
+ }
+ }
+ if (zeroEntries.length) {
+ console.warn('WARNING: subcaseMS≤0 found in listing_meta.json (allowed, but try to avoid):');
+ for (const metadataKey of zeroEntries) {
+ console.warn(` ${metadataKey}`);
+ }
+ }
+ if (staleEntries.length) {
+ console.error('ERROR: Non-existent tests found in listing_meta.json:');
+ for (const metadataKey of staleEntries) {
+ console.error(` ${metadataKey}`);
+ }
+ failed = true;
+ }
+
+ const missingEntries = [];
+ for (const metadataKey of validateTimingsEntries.testsFoundInFiles) {
+ if (!(metadataKey in validateTimingsEntries.metadata)) {
+ missingEntries.push(metadataKey);
+ }
+ }
+ if (missingEntries.length) {
+ console.error(
+ 'ERROR: Tests missing from listing_meta.json. Please add the new tests (See docs/adding_timing_metadata.md):'
+ );
+ for (const metadataKey of missingEntries) {
+ console.error(` ${metadataKey}`);
+ failed = true;
+ }
+ }
+ assert(!failed);
+ }
+
+ return entries;
+}
+
+export function makeListing(filename: string): Promise<TestSuiteListing> {
+ // Don't validate. This path is only used for the dev server and running tests with Node.
+ // Validation is done for listing generation and presubmit.
+ return crawl(path.dirname(filename), false);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/dev_server.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/dev_server.ts
new file mode 100644
index 0000000000..57cb6a7ea4
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/dev_server.ts
@@ -0,0 +1,214 @@
+import * as fs from 'fs';
+import * as os from 'os';
+import * as path from 'path';
+
+import * as babel from '@babel/core';
+import * as chokidar from 'chokidar';
+import * as express from 'express';
+import * as morgan from 'morgan';
+import * as portfinder from 'portfinder';
+import * as serveIndex from 'serve-index';
+
+import { makeListing } from './crawl.js';
+
+// Make sure that makeListing doesn't cache imported spec files. See crawl().
+process.env.STANDALONE_DEV_SERVER = '1';
+
+function usage(rc: number): void {
+ console.error(`\
+Usage:
+ tools/dev_server
+ tools/dev_server 0.0.0.0
+ npm start
+ npm start 0.0.0.0
+
+By default, serves on localhost only. If the argument 0.0.0.0 is passed, serves on all interfaces.
+`);
+ process.exit(rc);
+}
+
+const srcDir = path.resolve(__dirname, '../../');
+
+// Import the project's babel.config.js. We'll use the same config for the runtime compiler.
+const babelConfig = {
+ ...require(path.resolve(srcDir, '../babel.config.js'))({
+ cache: () => {
+ /* not used */
+ },
+ }),
+ sourceMaps: 'inline',
+};
+
+// Caches for the generated listing file and compiled TS sources to speed up reloads.
+// Keyed by suite name
+const listingCache = new Map<string, string>();
+// Keyed by the path to the .ts file, without src/
+const compileCache = new Map<string, string>();
+
+console.log('Watching changes in', srcDir);
+const watcher = chokidar.watch(srcDir, {
+ persistent: true,
+});
+
+/**
+ * Handler to dirty the compile cache for changed .ts files.
+ */
+function dirtyCompileCache(absPath: string, stats?: fs.Stats) {
+ const relPath = path.relative(srcDir, absPath);
+ if ((stats === undefined || stats.isFile()) && relPath.endsWith('.ts')) {
+ const tsUrl = relPath;
+ if (compileCache.has(tsUrl)) {
+ console.debug('Dirtying compile cache', tsUrl);
+ }
+ compileCache.delete(tsUrl);
+ }
+}
+
+/**
+ * Handler to dirty the listing cache for:
+ * - Directory changes
+ * - .spec.ts changes
+ * - README.txt changes
+ * Also dirties the compile cache for changed files.
+ */
+function dirtyListingAndCompileCache(absPath: string, stats?: fs.Stats) {
+ const relPath = path.relative(srcDir, absPath);
+
+ const segments = relPath.split(path.sep);
+ // The listing changes if the directories change, or if a .spec.ts file is added/removed.
+ const listingChange =
+ // A directory or a file with no extension that we can't stat.
+ // (stat doesn't work for deletions)
+ ((path.extname(relPath) === '' && (stats === undefined || !stats.isFile())) ||
+ // A spec file
+ relPath.endsWith('.spec.ts') ||
+ // A README.txt
+ path.basename(relPath, 'txt') === 'README') &&
+ segments.length > 0;
+ if (listingChange) {
+ const suite = segments[0];
+ if (listingCache.has(suite)) {
+ console.debug('Dirtying listing cache', suite);
+ }
+ listingCache.delete(suite);
+ }
+
+ dirtyCompileCache(absPath, stats);
+}
+
+watcher.on('add', dirtyListingAndCompileCache);
+watcher.on('unlink', dirtyListingAndCompileCache);
+watcher.on('addDir', dirtyListingAndCompileCache);
+watcher.on('unlinkDir', dirtyListingAndCompileCache);
+watcher.on('change', dirtyCompileCache);
+
+const app = express();
+
+// Send Chrome Origin Trial tokens
+app.use((_req, res, next) => {
+ res.header('Origin-Trial', [
+ // Token for http://localhost:8080
+ 'AvyDIV+RJoYs8fn3W6kIrBhWw0te0klraoz04mw/nPb8VTus3w5HCdy+vXqsSzomIH745CT6B5j1naHgWqt/tw8AAABJeyJvcmlnaW4iOiJodHRwOi8vbG9jYWxob3N0OjgwODAiLCJmZWF0dXJlIjoiV2ViR1BVIiwiZXhwaXJ5IjoxNjYzNzE4Mzk5fQ==',
+ ]);
+ next();
+});
+
+// Set up logging
+app.use(morgan('dev'));
+
+// Serve the standalone runner directory
+app.use('/standalone', express.static(path.resolve(srcDir, '../standalone')));
+// Add out-wpt/ build dir for convenience
+app.use('/out-wpt', express.static(path.resolve(srcDir, '../out-wpt')));
+app.use('/docs/tsdoc', express.static(path.resolve(srcDir, '../docs/tsdoc')));
+
+// Serve a suite's listing.js file by crawling the filesystem for all tests.
+app.get('/out/:suite([a-zA-Z0-9_-]+)/listing.js', async (req, res, next) => {
+ const suite = req.params['suite'];
+
+ if (listingCache.has(suite)) {
+ res.setHeader('Content-Type', 'application/javascript');
+ res.send(listingCache.get(suite));
+ return;
+ }
+
+ try {
+ const listing = await makeListing(path.resolve(srcDir, suite, 'listing.ts'));
+ const result = `export const listing = ${JSON.stringify(listing, undefined, 2)}`;
+
+ listingCache.set(suite, result);
+ res.setHeader('Content-Type', 'application/javascript');
+ res.send(result);
+ } catch (err) {
+ next(err);
+ }
+});
+
+// Serve all other .js files by fetching the source .ts file and compiling it.
+app.get('/out/**/*.js', async (req, res, next) => {
+ const jsUrl = path.relative('/out', req.url);
+ const tsUrl = jsUrl.replace(/\.js$/, '.ts');
+ if (compileCache.has(tsUrl)) {
+ res.setHeader('Content-Type', 'application/javascript');
+ res.send(compileCache.get(tsUrl));
+ return;
+ }
+
+ let absPath = path.join(srcDir, tsUrl);
+ if (!fs.existsSync(absPath)) {
+ // The .ts file doesn't exist. Try .js file in case this is a .js/.d.ts pair.
+ absPath = path.join(srcDir, jsUrl);
+ }
+
+ try {
+ const result = await babel.transformFileAsync(absPath, babelConfig);
+ if (result && result.code) {
+ compileCache.set(tsUrl, result.code);
+
+ res.setHeader('Content-Type', 'application/javascript');
+ res.send(result.code);
+ } else {
+ throw new Error(`Failed compile ${tsUrl}.`);
+ }
+ } catch (err) {
+ next(err);
+ }
+});
+
+// Serve everything else (not .js) as static, and directories as directory listings.
+app.use('/out', serveIndex(path.resolve(srcDir, '../src')));
+app.use('/out', express.static(path.resolve(srcDir, '../src')));
+
+void (async () => {
+ let host = '127.0.0.1';
+ if (process.argv.length >= 3) {
+ if (process.argv.length !== 3) usage(1);
+ if (process.argv[2] === '0.0.0.0') {
+ host = '0.0.0.0';
+ } else {
+ usage(1);
+ }
+ }
+
+ console.log(`Finding an available port on ${host}...`);
+ const kPortFinderStart = 8080;
+ const port = await portfinder.getPortPromise({ host, port: kPortFinderStart });
+
+ watcher.on('ready', () => {
+ // Listen on the available port.
+ app.listen(port, host, () => {
+ console.log('Standalone test runner running at:');
+ if (host === '0.0.0.0') {
+ for (const iface of Object.values(os.networkInterfaces())) {
+ for (const details of iface || []) {
+ if (details.family === 'IPv4') {
+ console.log(` http://${details.address}:${port}/standalone/`);
+ }
+ }
+ }
+ } else {
+ console.log(` http://${host}:${port}/standalone/`);
+ }
+ });
+ });
+})();
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/gen_cache.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/gen_cache.ts
new file mode 100644
index 0000000000..ce0854aa20
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/gen_cache.ts
@@ -0,0 +1,198 @@
+import * as fs from 'fs';
+import * as path from 'path';
+import * as process from 'process';
+
+import { Cacheable, dataCache, setIsBuildingDataCache } from '../framework/data_cache.js';
+
+function usage(rc: number): void {
+ console.error(`Usage: tools/gen_cache [options] [OUT_DIR] [SUITE_DIRS...]
+
+For each suite in SUITE_DIRS, pre-compute data that is expensive to generate
+at runtime and store it under OUT_DIR. If the data file is found then the
+DataCache will load this instead of building the expensive data at CTS runtime.
+
+Options:
+ --help Print this message and exit.
+ --list Print the list of output files without writing them.
+ --nth i/n Only process every file where (file_index % n == i)
+ --validate Check that cache should build (Tests for collisions).
+ --verbose Print each action taken.
+`);
+ process.exit(rc);
+}
+
+let mode: 'emit' | 'list' | 'validate' = 'emit';
+let nth = { i: 0, n: 1 };
+let verbose = false;
+
+const nonFlagsArgs: string[] = [];
+
+for (let i = 0; i < process.argv.length; i++) {
+ const arg = process.argv[i];
+ if (arg.startsWith('-')) {
+ switch (arg) {
+ case '--list': {
+ mode = 'list';
+ break;
+ }
+ case '--help': {
+ usage(0);
+ break;
+ }
+ case '--verbose': {
+ verbose = true;
+ break;
+ }
+ case '--validate': {
+ mode = 'validate';
+ break;
+ }
+ case '--nth': {
+ const err = () => {
+ console.error(
+ `--nth requires a value of the form 'i/n', where i and n are positive integers and i < n`
+ );
+ process.exit(1);
+ };
+ i++;
+ if (i >= process.argv.length) {
+ err();
+ }
+ const value = process.argv[i];
+ const parts = value.split('/');
+ if (parts.length !== 2) {
+ err();
+ }
+ nth = { i: parseInt(parts[0]), n: parseInt(parts[1]) };
+ if (nth.i < 0 || nth.n < 1 || nth.i > nth.n) {
+ err();
+ }
+ break;
+ }
+ default: {
+ console.log('unrecognized flag: ', arg);
+ usage(1);
+ }
+ }
+ } else {
+ nonFlagsArgs.push(arg);
+ }
+}
+
+if (nonFlagsArgs.length < 4) {
+ usage(0);
+}
+
+const outRootDir = nonFlagsArgs[2];
+
+dataCache.setStore({
+ load: (path: string) => {
+ return new Promise<Uint8Array>((resolve, reject) => {
+ fs.readFile(`data/${path}`, (err, data) => {
+ if (err !== null) {
+ reject(err.message);
+ } else {
+ resolve(data);
+ }
+ });
+ });
+ },
+});
+setIsBuildingDataCache();
+
+void (async () => {
+ for (const suiteDir of nonFlagsArgs.slice(3)) {
+ await build(suiteDir);
+ }
+})();
+
+const specFileSuffix = __filename.endsWith('.ts') ? '.spec.ts' : '.spec.js';
+
+async function crawlFilesRecursively(dir: string): Promise<string[]> {
+ const subpathInfo = await Promise.all(
+ (await fs.promises.readdir(dir)).map(async d => {
+ const p = path.join(dir, d);
+ const stats = await fs.promises.stat(p);
+ return {
+ path: p,
+ isDirectory: stats.isDirectory(),
+ isFile: stats.isFile(),
+ };
+ })
+ );
+
+ const files = subpathInfo
+ .filter(i => i.isFile && i.path.endsWith(specFileSuffix))
+ .map(i => i.path);
+
+ return files.concat(
+ await subpathInfo
+ .filter(i => i.isDirectory)
+ .map(i => crawlFilesRecursively(i.path))
+ .reduce(async (a, b) => (await a).concat(await b), Promise.resolve([]))
+ );
+}
+
+async function build(suiteDir: string) {
+ if (!fs.existsSync(suiteDir)) {
+ console.error(`Could not find ${suiteDir}`);
+ process.exit(1);
+ }
+
+ // Crawl files and convert paths to be POSIX-style, relative to suiteDir.
+ let filesToEnumerate = (await crawlFilesRecursively(suiteDir)).sort();
+
+ // Filter out non-spec files
+ filesToEnumerate = filesToEnumerate.filter(f => f.endsWith(specFileSuffix));
+
+ const cacheablePathToTS = new Map<string, string>();
+
+ let fileIndex = 0;
+ for (const file of filesToEnumerate) {
+ const pathWithoutExtension = file.substring(0, file.length - specFileSuffix.length);
+ const mod = await import(`../../../${pathWithoutExtension}.spec.js`);
+ if (mod.d?.serialize !== undefined) {
+ const cacheable = mod.d as Cacheable<unknown>;
+
+ {
+ // Check for collisions
+ const existing = cacheablePathToTS.get(cacheable.path);
+ if (existing !== undefined) {
+ console.error(
+ `error: Cacheable '${cacheable.path}' is emitted by both:
+ '${existing}'
+and
+ '${file}'`
+ );
+ process.exit(1);
+ }
+ cacheablePathToTS.set(cacheable.path, file);
+ }
+
+ const outPath = `${outRootDir}/data/${cacheable.path}`;
+
+ if (fileIndex++ % nth.n === nth.i) {
+ switch (mode) {
+ case 'emit': {
+ if (verbose) {
+ console.log(`building '${outPath}'`);
+ }
+ const data = await cacheable.build();
+ const serialized = cacheable.serialize(data);
+ fs.mkdirSync(path.dirname(outPath), { recursive: true });
+ fs.writeFileSync(outPath, serialized, 'binary');
+ break;
+ }
+ case 'list': {
+ console.log(outPath);
+ break;
+ }
+ case 'validate': {
+ // Only check currently performed is the collision detection above
+ break;
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/gen_listings.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/gen_listings.ts
new file mode 100644
index 0000000000..fc5e1f3cde
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/gen_listings.ts
@@ -0,0 +1,63 @@
+import * as fs from 'fs';
+import * as path from 'path';
+import * as process from 'process';
+
+import { crawl } from './crawl.js';
+
+function usage(rc: number): void {
+ console.error(`Usage: tools/gen_listings [options] [OUT_DIR] [SUITE_DIRS...]
+
+For each suite in SUITE_DIRS, generate listings and write each listing.js
+into OUT_DIR/{suite}/listing.js. Example:
+ tools/gen_listings out/ src/unittests/ src/webgpu/
+
+Options:
+ --help Print this message and exit.
+`);
+ process.exit(rc);
+}
+
+const argv = process.argv;
+if (argv.indexOf('--help') !== -1) {
+ usage(0);
+}
+
+{
+ // Ignore old argument that is now the default
+ const i = argv.indexOf('--no-validate');
+ if (i !== -1) {
+ argv.splice(i, 1);
+ }
+}
+
+if (argv.length < 4) {
+ usage(0);
+}
+
+const myself = 'src/common/tools/gen_listings.ts';
+
+const outDir = argv[2];
+
+for (const suiteDir of argv.slice(3)) {
+ // Run concurrently for each suite (might be a tiny bit more efficient)
+ void crawl(suiteDir, false).then(listing => {
+ const suite = path.basename(suiteDir);
+ const outFile = path.normalize(path.join(outDir, `${suite}/listing.js`));
+ fs.mkdirSync(path.join(outDir, suite), { recursive: true });
+ fs.writeFileSync(
+ outFile,
+ `\
+// AUTO-GENERATED - DO NOT EDIT. See ${myself}.
+
+export const listing = ${JSON.stringify(listing, undefined, 2)};
+`
+ );
+
+ // If there was a sourcemap for the file we just replaced, delete it.
+ try {
+ fs.unlinkSync(outFile + '.map');
+ } catch (ex) {
+ // ignore if file didn't exist
+ }
+ });
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/gen_wpt_cts_html.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/gen_wpt_cts_html.ts
new file mode 100644
index 0000000000..e8161304e9
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/gen_wpt_cts_html.ts
@@ -0,0 +1,252 @@
+import { promises as fs } from 'fs';
+import * as path from 'path';
+
+import { DefaultTestFileLoader } from '../internal/file_loader.js';
+import {
+ TestQueryMultiCase,
+ TestQueryMultiFile,
+ TestQueryMultiTest,
+} from '../internal/query/query.js';
+import { assert } from '../util/util.js';
+
+function printUsageAndExit(rc: number): never {
+ console.error(`\
+Usage (simple, for webgpu:* suite only):
+ tools/gen_wpt_cts_html OUTPUT_FILE TEMPLATE_FILE
+ tools/gen_wpt_cts_html out-wpt/cts.https.html templates/cts.https.html
+
+Usage (config file):
+ tools/gen_wpt_cts_html CONFIG_JSON_FILE
+
+where CONFIG_JSON_FILE is a JSON file in the format documented in the code of
+gen_wpt_cts_html.ts. Example:
+ {
+ "suite": "webgpu",
+ "out": "path/to/output/cts.https.html",
+ "template": "path/to/template/cts.https.html",
+ "maxChunkTimeMS": 2000
+ }
+
+Usage (advanced) (deprecated, use config file):
+ tools/gen_wpt_cts_html OUTPUT_FILE TEMPLATE_FILE ARGUMENTS_PREFIXES_FILE EXPECTATIONS_FILE EXPECTATIONS_PREFIX [SUITE]
+ tools/gen_wpt_cts_html my/path/to/cts.https.html templates/cts.https.html arguments.txt myexpectations.txt 'path/to/cts.https.html' cts
+
+where arguments.txt is a file containing a list of arguments prefixes to both generate and expect
+in the expectations. The entire variant list generation runs *once per prefix*, so this
+multiplies the size of the variant list.
+
+ ?worker=0&q=
+ ?worker=1&q=
+
+and myexpectations.txt is a file containing a list of WPT paths to suppress, e.g.:
+
+ path/to/cts.https.html?worker=0&q=webgpu:a/foo:bar={"x":1}
+ path/to/cts.https.html?worker=1&q=webgpu:a/foo:bar={"x":1}
+
+ path/to/cts.https.html?worker=1&q=webgpu:a/foo:bar={"x":3}
+`);
+ process.exit(rc);
+}
+
+interface ConfigJSON {
+ /** Test suite to generate from. */
+ suite: string;
+ /** Output filename, relative to JSON file. */
+ out: string;
+ /** Input template filename, relative to JSON file. */
+ template: string;
+ /**
+ * Maximum time for a single WPT "variant" chunk, in milliseconds. Defaults to infinity.
+ *
+ * This data is typically captured by developers on higher-end computers, so typical test
+ * machines might execute more slowly. For this reason, use a time much less than 5 seconds
+ * (a typical default time limit in WPT test executors).
+ */
+ maxChunkTimeMS?: number;
+ /** List of argument prefixes (what comes before the test query). Defaults to `['?q=']`. */
+ argumentsPrefixes?: string[];
+ expectations?: {
+ /** File containing a list of WPT paths to suppress. */
+ file: string;
+ /** The prefix to trim from every line of the expectations_file. */
+ prefix: string;
+ };
+}
+
+interface Config {
+ suite: string;
+ out: string;
+ template: string;
+ maxChunkTimeMS: number;
+ argumentsPrefixes: string[];
+ expectations?: {
+ file: string;
+ prefix: string;
+ };
+}
+
+let config: Config;
+
+(async () => {
+ // Load the config
+ switch (process.argv.length) {
+ case 3: {
+ const configFile = process.argv[2];
+ const configJSON: ConfigJSON = JSON.parse(await fs.readFile(configFile, 'utf8'));
+ const jsonFileDir = path.dirname(configFile);
+
+ config = {
+ suite: configJSON.suite,
+ out: path.resolve(jsonFileDir, configJSON.out),
+ template: path.resolve(jsonFileDir, configJSON.template),
+ maxChunkTimeMS: configJSON.maxChunkTimeMS ?? Infinity,
+ argumentsPrefixes: configJSON.argumentsPrefixes ?? ['?q='],
+ };
+ if (configJSON.expectations) {
+ config.expectations = {
+ file: path.resolve(jsonFileDir, configJSON.expectations.file),
+ prefix: configJSON.expectations.prefix,
+ };
+ }
+ break;
+ }
+ case 4:
+ case 7:
+ case 8: {
+ const [
+ _nodeBinary,
+ _thisScript,
+ outFile,
+ templateFile,
+ argsPrefixesFile,
+ expectationsFile,
+ expectationsPrefix,
+ suite = 'webgpu',
+ ] = process.argv;
+
+ config = {
+ out: outFile,
+ template: templateFile,
+ suite,
+ maxChunkTimeMS: Infinity,
+ argumentsPrefixes: ['?q='],
+ };
+ if (process.argv.length >= 7) {
+ config.argumentsPrefixes = (await fs.readFile(argsPrefixesFile, 'utf8'))
+ .split(/\r?\n/)
+ .filter(a => a.length);
+ config.expectations = {
+ file: expectationsFile,
+ prefix: expectationsPrefix,
+ };
+ }
+ break;
+ }
+ default:
+ console.error('incorrect number of arguments!');
+ printUsageAndExit(1);
+ }
+
+ const useChunking = Number.isFinite(config.maxChunkTimeMS);
+
+ // Sort prefixes from longest to shortest
+ config.argumentsPrefixes.sort((a, b) => b.length - a.length);
+
+ // Load expectations (if any)
+ let expectationLines = new Set<string>();
+ if (config.expectations) {
+ expectationLines = new Set(
+ (await fs.readFile(config.expectations.file, 'utf8')).split(/\r?\n/).filter(l => l.length)
+ );
+ }
+
+ const expectations: Map<string, string[]> = new Map();
+ for (const prefix of config.argumentsPrefixes) {
+ expectations.set(prefix, []);
+ }
+
+ expLoop: for (const exp of expectationLines) {
+ // Take each expectation for the longest prefix it matches.
+ for (const argsPrefix of config.argumentsPrefixes) {
+ const prefix = config.expectations!.prefix + argsPrefix;
+ if (exp.startsWith(prefix)) {
+ expectations.get(argsPrefix)!.push(exp.substring(prefix.length));
+ continue expLoop;
+ }
+ }
+ console.log('note: ignored expectation: ' + exp);
+ }
+
+ const loader = new DefaultTestFileLoader();
+ const lines = [];
+ for (const prefix of config.argumentsPrefixes) {
+ const rootQuery = new TestQueryMultiFile(config.suite, []);
+ const tree = await loader.loadTree(rootQuery, {
+ subqueriesToExpand: expectations.get(prefix),
+ maxChunkTime: config.maxChunkTimeMS,
+ });
+
+ lines.push(undefined); // output blank line between prefixes
+ const prefixComment = { comment: `Prefix: "${prefix}"` }; // contents will be updated later
+ if (useChunking) lines.push(prefixComment);
+
+ const filesSeen = new Set<string>();
+ const testsSeen = new Set<string>();
+ let variantCount = 0;
+
+ const alwaysExpandThroughLevel = 2; // expand to, at minimum, every test.
+ for (const { query, subtreeCounts } of tree.iterateCollapsedNodes({
+ alwaysExpandThroughLevel,
+ })) {
+ assert(query instanceof TestQueryMultiCase);
+ const queryString = query.toString();
+ // Check for a safe-ish path length limit. Filename must be <= 255, and on Windows the whole
+ // path must be <= 259. Leave room for e.g.:
+ // 'c:\b\s\w\xxxxxxxx\layout-test-results\external\wpt\webgpu\cts_worker=0_q=...-actual.txt'
+ assert(
+ queryString.length < 185,
+ `Generated test variant would produce too-long -actual.txt filename. Possible solutions:
+- Reduce the length of the parts of the test query
+- Reduce the parameterization of the test
+- Make the test function faster and regenerate the listing_meta entry
+- Reduce the specificity of test expectations (if you're using them)
+${queryString}`
+ );
+
+ lines.push({
+ urlQueryString: prefix + query.toString(), // "?worker=0&q=..."
+ comment: useChunking ? `estimated: ${subtreeCounts?.totalTimeMS.toFixed(3)} ms` : undefined,
+ });
+
+ variantCount++;
+ filesSeen.add(new TestQueryMultiTest(query.suite, query.filePathParts, []).toString());
+ testsSeen.add(
+ new TestQueryMultiCase(query.suite, query.filePathParts, query.testPathParts, {}).toString()
+ );
+ }
+ prefixComment.comment += `; ${variantCount} variants generated from ${testsSeen.size} tests in ${filesSeen.size} files`;
+ }
+ await generateFile(lines);
+})().catch(ex => {
+ console.log(ex.stack ?? ex.toString());
+ process.exit(1);
+});
+
+async function generateFile(
+ lines: Array<{ urlQueryString?: string; comment?: string } | undefined>
+): Promise<void> {
+ let result = '';
+ result += '<!-- AUTO-GENERATED - DO NOT EDIT. See WebGPU CTS: tools/gen_wpt_cts_html. -->\n';
+
+ result += await fs.readFile(config.template, 'utf8');
+
+ for (const line of lines) {
+ if (line !== undefined) {
+ if (line.urlQueryString) result += `<meta name=variant content='${line.urlQueryString}'>`;
+ if (line.comment) result += `<!-- ${line.comment} -->`;
+ }
+ result += '\n';
+ }
+
+ await fs.writeFile(config.out, result);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/image_utils.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/image_utils.ts
new file mode 100644
index 0000000000..84cf9adfa8
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/image_utils.ts
@@ -0,0 +1,58 @@
+import * as fs from 'fs';
+
+import { Page } from 'playwright-core';
+import { PNG } from 'pngjs';
+import { screenshot, WindowInfo } from 'screenshot-ftw';
+
+// eslint-disable-next-line ban/ban
+const waitMS = (ms: number) => new Promise(resolve => setTimeout(resolve, ms));
+
+export function readPng(filename: string) {
+ const data = fs.readFileSync(filename);
+ return PNG.sync.read(data);
+}
+
+export function writePng(filename: string, width: number, height: number, data: Buffer) {
+ const png = new PNG({ colorType: 6, width, height });
+ for (let i = 0; i < data.byteLength; ++i) {
+ png.data[i] = data[i];
+ }
+ const buffer = PNG.sync.write(png);
+ fs.writeFileSync(filename, buffer);
+}
+
+export class ScreenshotManager {
+ window?: WindowInfo;
+
+ async init(page: Page) {
+ // set the title to some random number so we can find the window by title
+ const title: string = await page.evaluate(() => {
+ const title = `t-${Math.random()}`;
+ document.title = title;
+ return title;
+ });
+
+ // wait for the window to show up
+ let window;
+ for (let i = 0; !window && i < 100; ++i) {
+ await waitMS(50);
+ const windows = await screenshot.getWindows();
+ window = windows.find(window => window.title.includes(title));
+ }
+ if (!window) {
+ throw Error(`could not find window: ${title}`);
+ }
+ this.window = window;
+ }
+
+ async takeScreenshot(page: Page, screenshotName: string) {
+ // await page.screenshot({ path: screenshotName });
+
+ // we need to set the url and title since the screenshot will include the chrome
+ await page.evaluate(() => {
+ document.title = 'screenshot';
+ window.history.replaceState({}, '', '/screenshot');
+ });
+ await screenshot.captureWindowById(screenshotName, this.window!.id);
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/merge_listing_times.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/merge_listing_times.ts
new file mode 100644
index 0000000000..fb33ae20fb
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/merge_listing_times.ts
@@ -0,0 +1,177 @@
+import * as fs from 'fs';
+import * as process from 'process';
+import * as readline from 'readline';
+
+import { TestMetadataListing } from '../framework/metadata.js';
+import { parseQuery } from '../internal/query/parseQuery.js';
+import { TestQueryMultiCase, TestQuerySingleCase } from '../internal/query/query.js';
+import { CaseTimingLogLine } from '../internal/test_group.js';
+import { assert } from '../util/util.js';
+
+// For information on listing_meta.json file maintenance, please read
+// tools/merge_listing_times first.
+
+function usage(rc: number): never {
+ console.error(`Usage: tools/merge_listing_times [options] SUITES... -- [TIMING_LOG_FILES...]
+
+Options:
+ --help Print this message and exit.
+
+Reads raw case timing data for each suite in SUITES, from all TIMING_LOG_FILES
+(see below), and merges it into the src/*/listing_meta.json files checked into
+the repository. The timing data in the listing_meta.json files is updated with
+the newly-observed timing data *if the new timing is slower*. That is, it will
+only increase the values in the listing_meta.json file, and will only cause WPT
+chunks to become smaller.
+
+If there are no TIMING_LOG_FILES, this just regenerates (reformats) the file
+using the data already present.
+
+In more detail:
+
+- Reads per-case timing data in any of the SUITES, from all TIMING_LOG_FILES
+ (ignoring skipped cases), and averages it over the number of subcases.
+ In the case of cases that have run multiple times, takes the max of each.
+- Compiles the average time-per-subcase for each test seen.
+- For each suite seen, loads its listing_meta.json, takes the max of the old and
+ new data, and writes it back out.
+
+How to generate TIMING_LOG_FILES files:
+
+- Launch the 'websocket-logger' tool (see its README.md), which listens for
+ log messages on localhost:59497.
+- Run the tests you want to capture data for, on the same system. Since
+ logging is done through the websocket side-channel, you can run the tests
+ under any runtime (standalone, WPT, etc.) as long as WebSocket support is
+ available (always true in browsers).
+- Run \`tools/merge_listing_times webgpu -- tools/websocket-logger/wslog-*.txt\`
+`);
+ process.exit(rc);
+}
+
+const kHeader = `{
+ "_comment": "SEMI AUTO-GENERATED: Please read docs/adding_timing_metadata.md.",
+`;
+const kFooter = `\
+ "_end": ""
+}
+`;
+
+const argv = process.argv;
+if (argv.some(v => v.startsWith('-') && v !== '--') || argv.every(v => v !== '--')) {
+ usage(0);
+}
+const suites = [];
+const timingLogFilenames = [];
+let seenDashDash = false;
+for (const arg of argv.slice(2)) {
+ if (arg === '--') {
+ seenDashDash = true;
+ continue;
+ } else if (arg.startsWith('-')) {
+ usage(0);
+ }
+
+ if (seenDashDash) {
+ timingLogFilenames.push(arg);
+ } else {
+ suites.push(arg);
+ }
+}
+if (!seenDashDash) {
+ usage(0);
+}
+
+void (async () => {
+ // Read the log files to find the log line for each *case* query. If a case
+ // ran multiple times, take the one with the largest average subcase time.
+ const caseTimes = new Map<string, CaseTimingLogLine>();
+ for (const timingLogFilename of timingLogFilenames) {
+ const rl = readline.createInterface({
+ input: fs.createReadStream(timingLogFilename),
+ crlfDelay: Infinity,
+ });
+
+ for await (const line of rl) {
+ const parsed: CaseTimingLogLine = JSON.parse(line);
+
+ const prev = caseTimes.get(parsed.q);
+ if (prev !== undefined) {
+ const timePerSubcase = parsed.timems / Math.max(1, parsed.nonskippedSubcaseCount);
+ const prevTimePerSubcase = prev.timems / Math.max(1, prev.nonskippedSubcaseCount);
+
+ if (timePerSubcase > prevTimePerSubcase) {
+ caseTimes.set(parsed.q, parsed);
+ }
+ } else {
+ caseTimes.set(parsed.q, parsed);
+ }
+ }
+ }
+
+ // Accumulate total times per test. Map of suite -> query -> {totalTimeMS, caseCount}.
+ const testTimes = new Map<string, Map<string, { totalTimeMS: number; subcaseCount: number }>>();
+ for (const suite of suites) {
+ testTimes.set(suite, new Map());
+ }
+ for (const [caseQString, caseTime] of caseTimes) {
+ const caseQ = parseQuery(caseQString);
+ assert(caseQ instanceof TestQuerySingleCase);
+ const suite = caseQ.suite;
+ const suiteTestTimes = testTimes.get(suite);
+ if (suiteTestTimes === undefined) {
+ continue;
+ }
+
+ const testQ = new TestQueryMultiCase(suite, caseQ.filePathParts, caseQ.testPathParts, {});
+ const testQString = testQ.toString();
+
+ const prev = suiteTestTimes.get(testQString);
+ if (prev !== undefined) {
+ prev.totalTimeMS += caseTime.timems;
+ prev.subcaseCount += caseTime.nonskippedSubcaseCount;
+ } else {
+ suiteTestTimes.set(testQString, {
+ totalTimeMS: caseTime.timems,
+ subcaseCount: caseTime.nonskippedSubcaseCount,
+ });
+ }
+ }
+
+ for (const suite of suites) {
+ const currentMetadata: TestMetadataListing = JSON.parse(
+ fs.readFileSync(`./src/${suite}/listing_meta.json`, 'utf8')
+ );
+
+ const metadata = { ...currentMetadata };
+ for (const [testQString, { totalTimeMS, subcaseCount }] of testTimes.get(suite)!) {
+ const avgTime = totalTimeMS / Math.max(1, subcaseCount);
+ if (testQString in metadata) {
+ metadata[testQString].subcaseMS = Math.max(metadata[testQString].subcaseMS, avgTime);
+ } else {
+ metadata[testQString] = { subcaseMS: avgTime };
+ }
+ }
+
+ writeListings(suite, metadata);
+ }
+})();
+
+function writeListings(suite: string, metadata: TestMetadataListing) {
+ const output = fs.createWriteStream(`./src/${suite}/listing_meta.json`);
+ try {
+ output.write(kHeader);
+ const keys = Object.keys(metadata).sort();
+ for (const k of keys) {
+ if (k.startsWith('_')) {
+ // Ignore json "_comments".
+ continue;
+ }
+ assert(k.indexOf('"') === -1);
+ output.write(` "${k}": { "subcaseMS": ${metadata[k].subcaseMS.toFixed(3)} },\n`);
+ }
+ output.write(kFooter);
+ } finally {
+ output.close();
+ }
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/run_wpt_ref_tests.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/run_wpt_ref_tests.ts
new file mode 100644
index 0000000000..9f8661b9c4
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/run_wpt_ref_tests.ts
@@ -0,0 +1,446 @@
+import * as fs from 'fs';
+import * as path from 'path';
+
+import { chromium, firefox, webkit, Page, Browser } from 'playwright-core';
+
+import { ScreenshotManager, readPng, writePng } from './image_utils.js';
+
+declare function wptRefTestPageReady(): boolean;
+declare function wptRefTestGetTimeout(): boolean;
+
+const verbose = !!process.env.VERBOSE;
+const kRefTestsBaseURL = 'http://localhost:8080/out/webgpu/web_platform/reftests';
+const kRefTestsPath = 'src/webgpu/web_platform/reftests';
+const kScreenshotPath = 'out-wpt-reftest-screenshots';
+
+// note: technically we should use an HTML parser to find this to deal with whitespace
+// attribute order, quotes, entities, etc but since we control the test source we can just
+// make sure they match
+const kRefLinkRE = /<link\s+rel="match"\s+href="(.*?)"/;
+const kRefWaitClassRE = /class="reftest-wait"/;
+const kFuzzy = /<meta\s+name="?fuzzy"?\s+content="(.*?)">/;
+
+function printUsage() {
+ console.log(`
+run_wpt_ref_tests path-to-browser-executable [ref-test-name]
+
+where ref-test-name is just a simple check for the test including the given string.
+If not passed all ref tests are run
+
+MacOS Chrome Example:
+ node tools/run_wpt_ref_tests /Applications/Google\\ Chrome\\ Canary.app/Contents/MacOS/Google\\ Chrome\\ Canary
+
+`);
+}
+
+// Get all of filenames that end with '.html'
+function getRefTestNames(refTestPath: string) {
+ return fs.readdirSync(refTestPath).filter(name => name.endsWith('.html'));
+}
+
+// Given a regex with one capture, return it or the empty string if no match.
+function getRegexMatchCapture(re: RegExp, content: string) {
+ const m = re.exec(content);
+ return m ? m[1] : '';
+}
+
+type FileInfo = {
+ content: string;
+ refLink: string;
+ refWait: boolean;
+ fuzzy: string;
+};
+
+function readHTMLFile(filename: string): FileInfo {
+ const content = fs.readFileSync(filename, { encoding: 'utf8' });
+ return {
+ content,
+ refLink: getRegexMatchCapture(kRefLinkRE, content),
+ refWait: kRefWaitClassRE.test(content),
+ fuzzy: getRegexMatchCapture(kFuzzy, content),
+ };
+}
+
+/**
+ * This is workaround for a bug in Chrome. The bug is when in emulation mode
+ * Chrome lets you set a devicePixelRatio but Chrome still renders in the
+ * actual devicePixelRatio, at least on MacOS.
+ * So, we compute the ratio and then use that.
+ */
+async function getComputedDevicePixelRatio(browser: Browser): Promise<number> {
+ const context = await browser.newContext();
+ const page = await context.newPage();
+ await page.goto('data:text/html,<html></html>');
+ await page.waitForLoadState('networkidle');
+ const devicePixelRatio = await page.evaluate(() => {
+ let resolve: (v: number) => void;
+ const promise = new Promise(_resolve => (resolve = _resolve));
+ const observer = new ResizeObserver(entries => {
+ const devicePixelWidth = entries[0].devicePixelContentBoxSize[0].inlineSize;
+ const clientWidth = entries[0].target.clientWidth;
+ const devicePixelRatio = devicePixelWidth / clientWidth;
+ resolve(devicePixelRatio);
+ });
+ observer.observe(document.documentElement);
+ return promise;
+ });
+ await page.close();
+ await context.close();
+ return devicePixelRatio as number;
+}
+
+// Note: If possible, rather then start adding command line options to this tool,
+// see if you can just make it work based off the path.
+async function getBrowserInterface(executablePath: string) {
+ const lc = executablePath.toLowerCase();
+ if (lc.includes('chrom')) {
+ const browser = await chromium.launch({
+ executablePath,
+ headless: false,
+ args: ['--enable-unsafe-webgpu'],
+ });
+ const devicePixelRatio = await getComputedDevicePixelRatio(browser);
+ const context = await browser.newContext({
+ deviceScaleFactor: devicePixelRatio,
+ });
+ return { browser, context };
+ } else if (lc.includes('firefox')) {
+ const browser = await firefox.launch({
+ executablePath,
+ headless: false,
+ });
+ const context = await browser.newContext();
+ return { browser, context };
+ } else if (lc.includes('safari') || lc.includes('webkit')) {
+ const browser = await webkit.launch({
+ executablePath,
+ headless: false,
+ });
+ const context = await browser.newContext();
+ return { browser, context };
+ } else {
+ throw new Error(`could not guess browser from executable path: ${executablePath}`);
+ }
+}
+
+// Parses a fuzzy spec as defined here
+// https://web-platform-tests.org/writing-tests/reftests.html#fuzzy-matching
+// Note: This is not robust but the tests will eventually be run in the real wpt.
+function parseFuzzy(fuzzy: string) {
+ if (!fuzzy) {
+ return { maxDifference: [0, 0], totalPixels: [0, 0] };
+ } else {
+ const parts = fuzzy.split(';');
+ if (parts.length !== 2) {
+ throw Error(`unhandled fuzzy format: ${fuzzy}`);
+ }
+ const ranges = parts.map(part => {
+ const range = part
+ .replace(/[a-zA-Z=]/g, '')
+ .split('-')
+ .map(v => parseInt(v));
+ return range.length === 1 ? [0, range[0]] : range;
+ });
+ return {
+ maxDifference: ranges[0],
+ totalPixels: ranges[1],
+ };
+ }
+}
+
+// Compares two images using the algorithm described in the web platform tests
+// https://web-platform-tests.org/writing-tests/reftests.html#fuzzy-matching
+// If they are different will write out a diff mask.
+function compareImages(
+ filename1: string,
+ filename2: string,
+ fuzzy: string,
+ diffName: string,
+ startingRow: number = 0
+) {
+ const img1 = readPng(filename1);
+ const img2 = readPng(filename2);
+ const { width, height } = img1;
+ if (img2.width !== width || img2.height !== height) {
+ console.error('images are not the same size:', filename1, filename2);
+ return;
+ }
+
+ const { maxDifference, totalPixels } = parseFuzzy(fuzzy);
+
+ const diffData = Buffer.alloc(width * height * 4);
+ const diffPixels = new Uint32Array(diffData.buffer);
+ const kRed = 0xff0000ff;
+ const kWhite = 0xffffffff;
+ const kYellow = 0xff00ffff;
+
+ let numPixelsDifferent = 0;
+ let anyPixelsOutOfRange = false;
+ for (let y = startingRow; y < height; ++y) {
+ for (let x = 0; x < width; ++x) {
+ const offset = y * width + x;
+ let isDifferent = false;
+ let outOfRange = false;
+ for (let c = 0; c < 4 && !outOfRange; ++c) {
+ const off = offset * 4 + c;
+ const v0 = img1.data[off];
+ const v1 = img2.data[off];
+ const channelDiff = Math.abs(v0 - v1);
+ outOfRange ||= channelDiff < maxDifference[0] || channelDiff > maxDifference[1];
+ isDifferent ||= channelDiff > 0;
+ }
+ numPixelsDifferent += isDifferent ? 1 : 0;
+ anyPixelsOutOfRange ||= outOfRange;
+ diffPixels[offset] = outOfRange ? kRed : isDifferent ? kYellow : kWhite;
+ }
+ }
+
+ const pass =
+ !anyPixelsOutOfRange &&
+ numPixelsDifferent >= totalPixels[0] &&
+ numPixelsDifferent <= totalPixels[1];
+ if (!pass) {
+ writePng(diffName, width, height, diffData);
+ console.error(
+ `FAIL: too many differences in: ${filename1} vs ${filename2}
+ ${numPixelsDifferent} differences, expected: ${totalPixels[0]}-${totalPixels[1]} with range: ${maxDifference[0]}-${maxDifference[1]}
+ wrote difference to: ${diffName};
+ `
+ );
+ } else {
+ console.log(`PASS`);
+ }
+ return pass;
+}
+
+function exists(filename: string) {
+ try {
+ fs.accessSync(filename);
+ return true;
+ } catch (e) {
+ return false;
+ }
+}
+
+async function waitForPageRender(page: Page) {
+ await page.evaluate(() => {
+ return new Promise(resolve => requestAnimationFrame(resolve));
+ });
+}
+
+// returns true if the page timed out.
+async function runPage(page: Page, url: string, refWait: boolean) {
+ console.log(' loading:', url);
+ // we need to load about:blank to force the browser to re-render
+ // else the previous page may still be visible if the page we are loading fails
+ await page.goto('about:blank');
+ await page.waitForLoadState('domcontentloaded');
+ await waitForPageRender(page);
+
+ await page.goto(url);
+ await page.waitForLoadState('domcontentloaded');
+ await waitForPageRender(page);
+
+ if (refWait) {
+ await page.waitForFunction(() => wptRefTestPageReady());
+ const timeout = await page.evaluate(() => wptRefTestGetTimeout());
+ if (timeout) {
+ return true;
+ }
+ }
+ return false;
+}
+
+async function main() {
+ const args = process.argv.slice(2);
+ if (args.length < 1 || args.length > 2) {
+ printUsage();
+ return;
+ }
+
+ const [executablePath, refTestName] = args;
+
+ if (!exists(executablePath)) {
+ console.error(executablePath, 'does not exist');
+ return;
+ }
+
+ const testNames = getRefTestNames(kRefTestsPath).filter(name =>
+ refTestName ? name.includes(refTestName) : true
+ );
+
+ if (!exists(kScreenshotPath)) {
+ fs.mkdirSync(kScreenshotPath, { recursive: true });
+ }
+
+ if (testNames.length === 0) {
+ console.error(`no tests include "${refTestName}"`);
+ return;
+ }
+
+ const { browser, context } = await getBrowserInterface(executablePath);
+ const page = await context.newPage();
+
+ const screenshotManager = new ScreenshotManager();
+ await screenshotManager.init(page);
+
+ if (verbose) {
+ page.on('console', async msg => {
+ const { url, lineNumber, columnNumber } = msg.location();
+ const values = await Promise.all(msg.args().map(a => a.jsonValue()));
+ console.log(`${url}:${lineNumber}:${columnNumber}:`, ...values);
+ });
+ }
+
+ await page.addInitScript({
+ content: `
+ (() => {
+ let timeout = false;
+ setTimeout(() => timeout = true, 5000);
+
+ window.wptRefTestPageReady = function() {
+ return timeout || !document.documentElement.classList.contains('reftest-wait');
+ };
+
+ window.wptRefTestGetTimeout = function() {
+ return timeout;
+ };
+ })();
+ `,
+ });
+
+ type Result = {
+ status: string;
+ testName: string;
+ refName: string;
+ testScreenshotName: string;
+ refScreenshotName: string;
+ diffName: string;
+ };
+ const results: Result[] = [];
+ const addResult = (
+ status: string,
+ testName: string,
+ refName: string,
+ testScreenshotName: string = '',
+ refScreenshotName: string = '',
+ diffName: string = ''
+ ) => {
+ results.push({ status, testName, refName, testScreenshotName, refScreenshotName, diffName });
+ };
+
+ for (const testName of testNames) {
+ console.log('processing:', testName);
+ const { refLink, refWait, fuzzy } = readHTMLFile(path.join(kRefTestsPath, testName));
+ if (!refLink) {
+ throw new Error(`could not find ref link in: ${testName}`);
+ }
+ const testURL = `${kRefTestsBaseURL}/${testName}`;
+ const refURL = `${kRefTestsBaseURL}/${refLink}`;
+
+ // Technically this is not correct but it fits the existing tests.
+ // It assumes refLink is relative to the refTestsPath but it's actually
+ // supposed to be relative to the test. It might also be an absolute
+ // path. Neither of those cases exist at the time of writing this.
+ const refFileInfo = readHTMLFile(path.join(kRefTestsPath, refLink));
+ const testScreenshotName = path.join(kScreenshotPath, `${testName}-actual.png`);
+ const refScreenshotName = path.join(kScreenshotPath, `${testName}-expected.png`);
+ const diffName = path.join(kScreenshotPath, `${testName}-diff.png`);
+
+ const timeoutTest = await runPage(page, testURL, refWait);
+ if (timeoutTest) {
+ addResult('TIMEOUT', testName, refLink);
+ continue;
+ }
+ await screenshotManager.takeScreenshot(page, testScreenshotName);
+
+ const timeoutRef = await runPage(page, refURL, refFileInfo.refWait);
+ if (timeoutRef) {
+ addResult('TIMEOUT', testName, refLink);
+ continue;
+ }
+ await screenshotManager.takeScreenshot(page, refScreenshotName);
+
+ const pass = compareImages(testScreenshotName, refScreenshotName, fuzzy, diffName);
+ addResult(
+ pass ? 'PASS' : 'FAILURE',
+ testName,
+ refLink,
+ testScreenshotName,
+ refScreenshotName,
+ diffName
+ );
+ }
+
+ console.log(
+ `----results----\n${results
+ .map(({ status, testName }) => `[ ${status.padEnd(7)} ] ${testName}`)
+ .join('\n')}`
+ );
+
+ const imgLink = (filename: string, title: string) => {
+ const name = path.basename(filename);
+ return `
+ <div class="screenshot">
+ ${title}
+ <a href="${name}" title="${name}">
+ <img src="${name}" width="256"/>
+ </a>
+ </div>`;
+ };
+
+ const indexName = path.join(kScreenshotPath, 'index.html');
+ fs.writeFileSync(
+ indexName,
+ `<!DOCTYPE html>
+<html>
+ <head>
+ <style>
+ .screenshot {
+ display: inline-block;
+ background: #CCC;
+ margin-right: 5px;
+ padding: 5px;
+ }
+ .screenshot a {
+ display: block;
+ }
+ .screenshot
+ </style>
+ </head>
+ <body>
+ ${results
+ .map(({ status, testName, refName, testScreenshotName, refScreenshotName, diffName }) => {
+ return `
+ <div>
+ <div>[ ${status} ]: ${testName} ref: ${refName}</div>
+ ${
+ status === 'FAILURE'
+ ? `${imgLink(testScreenshotName, 'actual')}
+ ${imgLink(refScreenshotName, 'ref')}
+ ${imgLink(diffName, 'diff')}`
+ : ``
+ }
+ </div>
+ <hr>
+ `;
+ })
+ .join('\n')}
+ </body>
+</html>
+ `
+ );
+
+ // the file:// with an absolute path makes it clickable in some terminals
+ console.log(`\nsee: file://${path.resolve(indexName)}\n`);
+
+ await page.close();
+ await context.close();
+ // I have no idea why it's taking ~30 seconds for playwright to close.
+ console.log('-- [ done: waiting for browser to close ] --');
+ await browser.close();
+}
+
+main().catch(e => {
+ throw e;
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/setup-ts-in-node.js b/dom/webgpu/tests/cts/checkout/src/common/tools/setup-ts-in-node.js
new file mode 100644
index 0000000000..89e91e8c9d
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/setup-ts-in-node.js
@@ -0,0 +1,51 @@
+const path = require('path');
+
+// Automatically transpile .ts imports
+require('ts-node').register({
+ // Specify the project file so ts-node doesn't try to find it itself based on the CWD.
+ project: path.resolve(__dirname, '../../../tsconfig.json'),
+ compilerOptions: {
+ module: 'commonjs',
+ },
+ transpileOnly: true,
+});
+const Module = require('module');
+
+// Redirect imports of .js files to .ts files
+const resolveFilename = Module._resolveFilename;
+Module._resolveFilename = (request, parentModule, isMain) => {
+ do {
+ if (request.startsWith('.') && parentModule.filename.endsWith('.ts')) {
+ // Required for browser (because it needs the actual correct file path and
+ // can't do any kind of file resolution).
+ if (request.endsWith('/index.js')) {
+ throw new Error(
+ "Avoid the name `index.js`; we don't have Node-style path resolution: " + request
+ );
+ }
+
+ // Import of Node addon modules are valid and should pass through.
+ if (request.endsWith('.node')) {
+ break;
+ }
+
+ if (!request.endsWith('.js')) {
+ throw new Error('All relative imports must end in .js: ' + request);
+ }
+
+ try {
+ const tsRequest = request.substring(0, request.length - '.js'.length) + '.ts';
+ return resolveFilename.call(this, tsRequest, parentModule, isMain);
+ } catch (ex) {
+ // If the .ts file doesn't exist, try .js instead.
+ break;
+ }
+ }
+ } while (0);
+
+ return resolveFilename.call(this, request, parentModule, isMain);
+};
+
+process.on('unhandledRejection', ex => {
+ throw ex;
+});
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/validate.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/validate.ts
new file mode 100644
index 0000000000..164ee3259a
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/validate.ts
@@ -0,0 +1,36 @@
+import * as process from 'process';
+
+import { crawl } from './crawl.js';
+
+function usage(rc: number): void {
+ console.error(`Usage: tools/validate [options] [SUITE_DIRS...]
+
+For each suite in SUITE_DIRS, validate some properties about the file:
+- It has a .description and .g
+- That each test:
+ - Has a test function (or is marked unimplemented)
+ - Has no duplicate cases
+ - Configures batching correctly, if used
+- That each case query is not too long
+
+Example:
+ tools/validate src/unittests/ src/webgpu/
+
+Options:
+ --help Print this message and exit.
+`);
+ process.exit(rc);
+}
+
+const args = process.argv.slice(2);
+if (args.indexOf('--help') !== -1) {
+ usage(0);
+}
+
+if (args.length < 1) {
+ usage(0);
+}
+
+for (const suiteDir of args) {
+ void crawl(suiteDir, true);
+}
diff --git a/dom/webgpu/tests/cts/checkout/src/common/tools/version.ts b/dom/webgpu/tests/cts/checkout/src/common/tools/version.ts
new file mode 100644
index 0000000000..2b51700b12
--- /dev/null
+++ b/dom/webgpu/tests/cts/checkout/src/common/tools/version.ts
@@ -0,0 +1,4 @@
+export const version = require('child_process')
+ .execSync('git describe --always --abbrev=0 --dirty')
+ .toString()
+ .trim();