summaryrefslogtreecommitdiffstats
path: root/testing/web-platform/tests/tools/wptrunner
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /testing/web-platform/tests/tools/wptrunner
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'testing/web-platform/tests/tools/wptrunner')
-rw-r--r--testing/web-platform/tests/tools/wptrunner/.gitignore8
-rw-r--r--testing/web-platform/tests/tools/wptrunner/MANIFEST.in6
-rw-r--r--testing/web-platform/tests/tools/wptrunner/README.rst14
-rw-r--r--testing/web-platform/tests/tools/wptrunner/docs/architecture.svg1
-rw-r--r--testing/web-platform/tests/tools/wptrunner/docs/commands.rst79
-rw-r--r--testing/web-platform/tests/tools/wptrunner/docs/design.rst108
-rw-r--r--testing/web-platform/tests/tools/wptrunner/docs/expectation.rst366
-rw-r--r--testing/web-platform/tests/tools/wptrunner/docs/internals.rst23
-rw-r--r--testing/web-platform/tests/tools/wptrunner/requirements.txt11
-rw-r--r--testing/web-platform/tests/tools/wptrunner/requirements_chromium.txt1
-rw-r--r--testing/web-platform/tests/tools/wptrunner/requirements_firefox.txt10
-rw-r--r--testing/web-platform/tests/tools/wptrunner/requirements_opera.txt2
-rw-r--r--testing/web-platform/tests/tools/wptrunner/requirements_safari.txt1
-rw-r--r--testing/web-platform/tests/tools/wptrunner/requirements_sauce.txt2
-rw-r--r--testing/web-platform/tests/tools/wptrunner/setup.py66
-rw-r--r--testing/web-platform/tests/tools/wptrunner/tox.ini23
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner.default.ini11
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/__init__.py0
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/__init__.py44
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/android_weblayer.py105
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/android_webview.py103
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/base.py424
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome.py181
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome_android.py244
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome_ios.py58
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome_spki_certs.py13
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chromium.py57
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/content_shell.py299
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/edgechromium.py70
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/epiphany.py75
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/firefox.py1039
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/firefox_android.py407
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/ladybird.py56
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/opera.py70
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/safari.py218
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/sauce.py245
-rwxr-xr-xtesting/web-platform/tests/tools/wptrunner/wptrunner/browsers/sauce_setup/safari-prerun.sh3
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/servo.py119
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/servodriver.py184
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/webkit.py83
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/webkitgtk_minibrowser.py84
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/wktr.py239
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/config.py67
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/environment.py364
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/__init__.py5
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/actions.py480
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/base.py810
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorchrome.py273
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorcontentshell.py328
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executoredge.py101
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executormarionette.py1371
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorselenium.py463
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorservo.py333
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorservodriver.py303
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorwebdriver.py762
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorwktr.py268
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/process.py22
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/protocol.py804
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/pytestrunner/__init__.py1
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/pytestrunner/runner.py179
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/reftest.js1
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/runner.js1
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/test-wait.js55
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/testharness_servodriver.js2
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/testharness_webdriver_resume.js5
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/executors/window-loaded.js9
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/expected.py16
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/expectedtree.py132
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/font.py144
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/__init__.py0
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/chromium.py338
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/tests/__init__.py0
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/tests/test_chromium.py828
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/wptreport.py144
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/wptscreenshot.py49
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/instruments.py121
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/manifestexpected.py529
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/manifestinclude.py156
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/manifestupdate.py992
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/metadata.py859
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/mpcontext.py13
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/print_pdf_runner.html33
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/products.py60
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/stability.py417
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/testdriver-extra.js330
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/testdriver-vendor.js1
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/testharness_runner.html6
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport-content-shell.js45
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport-servo.js17
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport-servodriver.js23
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport-wktr.js23
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport.js88
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/testloader.py708
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/testrunner.py1049
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/tests/__init__.py9
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/tests/base.py62
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/tests/browsers/__init__.py0
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/tests/browsers/test_base.py52
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/tests/browsers/test_sauce.py177
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/tests/browsers/test_webkitgtk.py77
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_executors.py17
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_expectedtree.py120
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_formatters.py152
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_manifestexpected.py35
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_manifestupdate.py58
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_metadata.py47
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_products.py60
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_stability.py186
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_testloader.py349
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_update.py1893
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_wptrunner.py79
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_wpttest.py234
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/update/__init__.py47
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/update/base.py69
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/update/metadata.py62
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/update/state.py159
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/update/sync.py150
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/update/tree.py407
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/update/update.py191
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/vcs.py67
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/wptcommandline.py831
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/wptlogging.py109
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/__init__.py5
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/__init__.py0
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/base.py221
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/conditional.py402
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/static.py102
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/node.py173
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/parser.py873
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/serializer.py160
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/__init__.py0
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_conditional.py143
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_parser.py155
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_serializer.py356
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_static.py98
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_tokenizer.py385
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/wptrunner.py581
-rw-r--r--testing/web-platform/tests/tools/wptrunner/wptrunner/wpttest.py740
138 files changed, 29373 insertions, 0 deletions
diff --git a/testing/web-platform/tests/tools/wptrunner/.gitignore b/testing/web-platform/tests/tools/wptrunner/.gitignore
new file mode 100644
index 0000000000..495616ef1d
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/.gitignore
@@ -0,0 +1,8 @@
+*.py[co]
+*~
+*#
+\#*
+_virtualenv
+test/test.cfg
+test/metadata/MANIFEST.json
+wptrunner.egg-info
diff --git a/testing/web-platform/tests/tools/wptrunner/MANIFEST.in b/testing/web-platform/tests/tools/wptrunner/MANIFEST.in
new file mode 100644
index 0000000000..d36344f966
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/MANIFEST.in
@@ -0,0 +1,6 @@
+exclude MANIFEST.in
+include requirements.txt
+include wptrunner.default.ini
+include wptrunner/testharness_runner.html
+include wptrunner/*.js
+include wptrunner/executors/*.js
diff --git a/testing/web-platform/tests/tools/wptrunner/README.rst b/testing/web-platform/tests/tools/wptrunner/README.rst
new file mode 100644
index 0000000000..dae7d6ade7
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/README.rst
@@ -0,0 +1,14 @@
+wptrunner: A web-platform-tests harness
+=======================================
+
+wptrunner is a harness for running the W3C `web-platform-tests testsuite`_.
+
+.. toctree::
+ :maxdepth: 2
+
+ docs/expectation
+ docs/commands
+ docs/design
+ docs/internals
+
+.. _`web-platform-tests testsuite`: https://github.com/web-platform-tests/wpt
diff --git a/testing/web-platform/tests/tools/wptrunner/docs/architecture.svg b/testing/web-platform/tests/tools/wptrunner/docs/architecture.svg
new file mode 100644
index 0000000000..b8d5aa21c1
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/docs/architecture.svg
@@ -0,0 +1 @@
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="780px" height="1087px" version="1.1"><defs><linearGradient x1="0%" y1="0%" x2="0%" y2="100%" id="mx-gradient-a9c4eb-1-a9c4eb-1-s-0"><stop offset="0%" style="stop-color:#A9C4EB"/><stop offset="100%" style="stop-color:#A9C4EB"/></linearGradient></defs><g transform="translate(0.5,0.5)"><rect x="498" y="498" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(500,521)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestRunner</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="338" y="778" width="120" height="60" fill="#f19c99" stroke="#000000" pointer-events="none"/><g transform="translate(340,801)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">Product under test</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="338" y="388" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(340,411)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestRunnerManager</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="338" y="228" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(340,251)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">ManagerGroup</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="658" y="608" width="120" height="60" fill="#ffce9f" stroke="#000000" pointer-events="none"/><g transform="translate(660,631)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">Executor</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="338" y="498" width="120" height="60" fill="url(#mx-gradient-a9c4eb-1-a9c4eb-1-s-0)" stroke="#000000" pointer-events="none"/><g transform="translate(340,521)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">Browser</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 398 288 L 398 382" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 387 L 395 380 L 398 382 L 402 380 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 448 L 398 492" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 497 L 395 490 L 398 492 L 402 490 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 618 528 L 684 603" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 687 607 L 680 604 L 684 603 L 685 600 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="498" y="608" width="120" height="60" fill="#a9c4eb" stroke="#000000" pointer-events="none"/><g transform="translate(500,631)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">ExecutorBrowser</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 624 638 L 658 638" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 619 638 L 626 635 L 624 638 L 626 642 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 428 448 L 552 496" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 557 498 L 549 498 L 552 496 L 552 492 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 558 L 398 772" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 398 777 L 395 770 L 398 772 L 402 770 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="338" y="48" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(340,71)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">run_tests</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 458 78 L 652 78" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 657 78 L 650 82 L 652 78 L 650 75 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="658" y="48" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(660,71)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestLoader</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="71" y="48" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(73,71)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestEnvironment</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="151" y="618" width="120" height="60" fill="#b9e0a5" stroke="#000000" pointer-events="none"/><g transform="translate(153,641)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">wptserve</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="1" y="618" width="120" height="60" fill="#b9e0a5" stroke="#000000" pointer-events="none"/><g transform="translate(3,641)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">pywebsocket</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 338 78 L 197 78" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 192 78 L 199 75 L 197 78 L 199 82 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 101 308 L 62 612" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 61 617 L 59 610 L 62 612 L 66 610 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 161 308 L 204 612" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 204 617 L 200 610 L 204 612 L 207 609 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 338 823 L 61 678" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 211 678 L 338 793" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 398 108 L 398 222" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 227 L 395 220 L 398 222 L 402 220 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 706 288 L 618 513" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><rect x="658" y="388" width="70" height="40" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="693" y="412">Queue.get</text></g><path d="M 458 808 L 718 668" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><rect x="71" y="248" width="120" height="60" fill="#b9e0a5" stroke="#000000" pointer-events="none"/><g transform="translate(73,271)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">serve.py</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 131 108 L 131 242" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 131 247 L 128 240 L 131 242 L 135 240 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 88 973 L 132 973" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 137 973 L 130 977 L 132 973 L 130 970 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="138" y="1018" width="180" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="228" y="1037">Communication (cross process)</text></g><path d="M 88 1002 L 132 1002" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 137 1002 L 130 1006 L 132 1002 L 130 999 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="138" y="958" width="180" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="228" y="977">Ownership (same process)</text></g><path d="M 88 1033 L 138 1033" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><rect x="143" y="988" width="180" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="233" y="1007">Ownership (cross process)</text></g><rect x="428" y="966" width="50" height="15" fill="#e6d0de" stroke="#000000" pointer-events="none"/><rect x="428" y="990" width="50" height="15" fill="#a9c4eb" stroke="#000000" pointer-events="none"/><rect x="428" y="1015" width="50" height="15" fill="#ffce9f" stroke="#000000" pointer-events="none"/><rect x="428" y="1063" width="50" height="15" fill="#f19c99" stroke="#000000" pointer-events="none"/><rect x="428" y="1038" width="50" height="15" fill="#b9e0a5" stroke="#000000" pointer-events="none"/><rect x="485" y="958" width="90" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="530" y="977">wptrunner class</text></g><rect x="486" y="983" width="150" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="561" y="1002">Per-product wptrunner class</text></g><rect x="486" y="1008" width="150" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="561" y="1027">Per-protocol wptrunner class</text></g><rect x="491" y="1031" width="150" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="566" y="1050">Web-platform-tests component</text></g><rect x="486" y="1055" width="90" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="531" y="1074">Browser process</text></g><path d="M 398 8 L 398 42" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 47 L 395 40 L 398 42 L 402 40 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="478" y="388" width="120" height="60" fill-opacity="0.5" fill="#e6d0de" stroke="#000000" stroke-opacity="0.5" pointer-events="none"/><g transform="translate(480,411)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestRunnerManager</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 398 288 L 533 384" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 537 387 L 529 386 L 533 384 L 533 380 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="198" y="388" width="120" height="60" fill-opacity="0.5" fill="#e6d0de" stroke="#000000" stroke-opacity="0.5" pointer-events="none"/><g transform="translate(200,411)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestRunnerManager</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 398 288 L 263 384" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 259 387 L 263 380 L 263 384 L 267 386 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="575" y="748" width="110" height="40" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="630" y="758">Browser control</text><text x="630" y="772">protocol</text><text x="630" y="786">(e.g. WebDriver)</text></g><rect x="258" y="708" width="80" height="40" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="298" y="732">HTTP</text></g><rect x="111" y="728" width="80" height="40" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="151" y="752">websockets</text></g><rect x="658" y="228" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(660,251)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">Tests Queue</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 718 108 L 718 222" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 718 227 L 715 220 L 718 222 L 722 220 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 428 970 L 428 970" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/></g></svg>
diff --git a/testing/web-platform/tests/tools/wptrunner/docs/commands.rst b/testing/web-platform/tests/tools/wptrunner/docs/commands.rst
new file mode 100644
index 0000000000..02147a7129
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/docs/commands.rst
@@ -0,0 +1,79 @@
+commands.json
+=============
+
+:code:`commands.json` files define how subcommands are executed by the
+:code:`./wpt` command. :code:`wpt` searches all command.json files under the top
+directory and sets up subcommands from these JSON files. A typical commands.json
+would look like the following::
+
+ {
+ "foo": {
+ "path": "foo.py",
+ "script": "run",
+ "parser": "get_parser",
+ "help": "Run foo"
+ },
+ "bar": {
+ "path": "bar.py",
+ "script": "run",
+ "virtualenv": true,
+ "requirements": [
+ "requirements.txt"
+ ]
+ }
+ }
+
+Each key of the top level object defines a name of a subcommand, and its value
+(a properties object) specifies how the subcommand is executed. Each properties
+object must contain :code:`path` and :code:`script` fields and may contain
+additional fields. All paths are relative to the commands.json.
+
+:code:`path`
+ The path to a Python script that implements the subcommand.
+
+:code:`script`
+ The name of a function that is used as the entry point of the subcommand.
+
+:code:`parser`
+ The name of a function that creates an argparse parser for the subcommand.
+
+:code:`parse_known`
+ When True, `parse_known_args() <https://docs.python.org/3/library/argparse.html#argparse.ArgumentParser.parse_known_args>`_
+ is used instead of parse_args() for the subcommand. Default to False.
+
+:code:`help`
+ Brief description of the subcommand.
+
+:code:`virtualenv`
+ When True, the subcommand is executed with a virtualenv environment. Default
+ to True.
+
+:code:`requirements`
+ A list of paths where each path specifies a requirements.txt. All requirements
+ listed in these files are installed into the virtualenv environment before
+ running the subcommand. :code:`virtualenv` must be true when this field is
+ set.
+
+:code:`conditional_requirements`
+ A key-value object. Each key represents a condition, and value represents
+ additional requirements when the condition is met. The requirements have the
+ same format as :code:`requirements`. Currently "commandline_flag" is the only
+ supported key. "commandline_flag" is used to specify requirements needed for a
+ certain command line flag of the subcommand. For example, given the following
+ commands.json::
+
+ "baz": {
+ "path": "baz.py",
+ "script": "run",
+ "virtualenv": true,
+ "conditional_requirements": {
+ "commandline_flag": {
+ "enable_feature1": [
+ "requirements_feature1.txt"
+ ]
+ }
+ }
+ }
+
+ Requirements in :code:`requirements_features1.txt` are installed only when
+ :code:`--enable-feature1` is specified to :code:`./wpt baz`.
diff --git a/testing/web-platform/tests/tools/wptrunner/docs/design.rst b/testing/web-platform/tests/tools/wptrunner/docs/design.rst
new file mode 100644
index 0000000000..30f82711a5
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/docs/design.rst
@@ -0,0 +1,108 @@
+wptrunner Design
+================
+
+The design of wptrunner is intended to meet the following
+requirements:
+
+ * Possible to run tests from W3C web-platform-tests.
+
+ * Tests should be run as fast as possible. In particular it should
+ not be necessary to restart the browser between tests, or similar.
+
+ * As far as possible, the tests should run in a "normal" browser and
+ browsing context. In particular many tests assume that they are
+ running in a top-level browsing context, so we must avoid the use
+ of an ``iframe`` test container.
+
+ * It must be possible to deal with all kinds of behaviour of the
+ browser under test, for example, crashing, hanging, etc.
+
+ * It should be possible to add support for new platforms and browsers
+ with minimal code changes.
+
+ * It must be possible to run tests in parallel to further improve
+ performance.
+
+ * Test output must be in a machine readable form.
+
+Architecture
+------------
+
+In order to meet the above requirements, wptrunner is designed to
+push as much of the test scheduling as possible into the harness. This
+allows the harness to monitor the state of the browser and perform
+appropriate action if it gets into an unwanted state e.g. kill the
+browser if it appears to be hung.
+
+The harness will typically communicate with the browser via some remote
+control protocol such as WebDriver. However for browsers where no such
+protocol is supported, other implementation strategies are possible,
+typically at the expense of speed.
+
+The overall architecture of wptrunner is shown in the diagram below:
+
+.. image:: architecture.svg
+
+.. currentmodule:: wptrunner
+
+The main entry point to the code is :py:func:`~wptrunner.run_tests` in
+``wptrunner.py``. This is responsible for setting up the test
+environment, loading the list of tests to be executed, and invoking
+the remainder of the code to actually execute some tests.
+
+The test environment is encapsulated in the
+:py:class:`~environment.TestEnvironment` class. This defers to code in
+``web-platform-tests`` which actually starts the required servers to
+run the tests.
+
+The set of tests to run is defined by the
+:py:class:`~testloader.TestLoader`. This is constructed with a
+:py:class:`~testloader.TestFilter` (not shown), which takes any filter arguments
+from the command line to restrict the set of tests that will be
+run. The :py:class:`~testloader.TestLoader` reads both the ``web-platform-tests``
+JSON manifest and the expectation data stored in ini files and
+produces a :py:class:`multiprocessing.Queue` of tests to run, and
+their expected results.
+
+Actually running the tests happens through the
+:py:class:`~testrunner.ManagerGroup` object. This takes the :py:class:`~multiprocessing.Queue` of
+tests to be run and starts a :py:class:`~testrunner.TestRunnerManager` for each
+instance of the browser under test that will be started. These
+:py:class:`~testrunner.TestRunnerManager` instances are each started in their own
+thread.
+
+A :py:class:`~testrunner.TestRunnerManager` coordinates starting the product under
+test, and outputting results from the test. In the case that the test
+has timed out or the browser has crashed, it has to restart the
+browser to ensure the test run can continue. The functionality for
+initialising the browser under test, and probing its state
+(e.g. whether the process is still alive) is implemented through a
+:py:class:`~browsers.base.Browser` object. An implementation of this class must be
+provided for each product that is supported.
+
+The functionality for actually running the tests is provided by a
+:py:class:`~testrunner.TestRunner` object. :py:class:`~testrunner.TestRunner` instances are
+run in their own child process created with the
+:py:mod:`multiprocessing` module. This allows them to run concurrently
+and to be killed and restarted as required. Communication between the
+:py:class:`~testrunner.TestRunnerManager` and the :py:class:`~testrunner.TestRunner` is
+provided by a pair of queues, one for sending messages in each
+direction. In particular test results are sent from the
+:py:class:`~testrunner.TestRunner` to the :py:class:`~testrunner.TestRunnerManager` using one
+of these queues.
+
+The :py:class:`~testrunner.TestRunner` object is generic in that the same
+:py:class:`~testrunner.TestRunner` is used regardless of the product under
+test. However the details of how to run the test may vary greatly with
+the product since different products support different remote control
+protocols (or none at all). These protocol-specific parts are placed
+in the :py:class:`~executors.base.TestExecutor` object. There is typically a different
+:py:class:`~executors.base.TestExecutor` class for each combination of control protocol
+and test type. The :py:class:`~testrunner.TestRunner` is responsible for pulling
+each test off the :py:class:`multiprocessing.Queue` of tests and passing it down to
+the :py:class:`~executors.base.TestExecutor`.
+
+The executor often requires access to details of the particular
+browser instance that it is testing so that it knows e.g. which port
+to connect to to send commands to the browser. These details are
+encapsulated in the :py:class:`~browsers.base.ExecutorBrowser` class.
diff --git a/testing/web-platform/tests/tools/wptrunner/docs/expectation.rst b/testing/web-platform/tests/tools/wptrunner/docs/expectation.rst
new file mode 100644
index 0000000000..fea676565b
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/docs/expectation.rst
@@ -0,0 +1,366 @@
+Test Metadata
+=============
+
+Directory Layout
+----------------
+
+Metadata files must be stored under the ``metadata`` directory passed
+to the test runner. The directory layout follows that of
+web-platform-tests with each test source path having a corresponding
+metadata file. Because the metadata path is based on the source file
+path, files that generate multiple URLs e.g. tests with multiple
+variants, or multi-global tests generated from an ``any.js`` input
+file, share the same metadata file for all their corresponding
+tests. The metadata path under the ``metadata`` directory is the same
+as the source path under the ``tests`` directory, with an additional
+``.ini`` suffix.
+
+For example a test with URL::
+
+ /spec/section/file.html?query=param
+
+generated from a source file with path::
+
+ <tests root>/spec/section.file.html
+
+would have a metadata file ::
+
+ <metadata root>/spec/section/file.html.ini
+
+As an optimisation, files which produce only default results
+(i.e. ``PASS`` or ``OK``), and which don't have any other associated
+metadata, don't require a corresponding metadata file.
+
+Directory Metadata
+~~~~~~~~~~~~~~~~~~
+
+In addition to per-test metadata, default metadata can be applied to
+all the tests in a given source location, using a ``__dir__.ini``
+metadata file. For example to apply metadata to all tests under
+``<tests root>/spec/`` add the metadata in ``<tests
+root>/spec/__dir__.ini``.
+
+Metadata Format
+---------------
+The format of the metadata files is based on the ini format. Files are
+divided into sections, each (apart from the root section) having a
+heading enclosed in square braces. Within each section are key-value
+pairs. There are several notable differences from standard .ini files,
+however:
+
+ * Sections may be hierarchically nested, with significant whitespace
+ indicating nesting depth.
+
+ * Only ``:`` is valid as a key/value separator
+
+A simple example of a metadata file is::
+
+ root_key: root_value
+
+ [section]
+ section_key: section_value
+
+ [subsection]
+ subsection_key: subsection_value
+
+ [another_section]
+ another_key: [list, value]
+
+Conditional Values
+~~~~~~~~~~~~~~~~~~
+
+In order to support values that depend on some external data, the
+right hand side of a key/value pair can take a set of conditionals
+rather than a plain value. These values are placed on a new line
+following the key, with significant indentation. Conditional values
+are prefixed with ``if`` and terminated with a colon, for example::
+
+ key:
+ if cond1: value1
+ if cond2: value2
+ value3
+
+In this example, the value associated with ``key`` is determined by
+first evaluating ``cond1`` against external data. If that is true,
+``key`` is assigned the value ``value1``, otherwise ``cond2`` is
+evaluated in the same way. If both ``cond1`` and ``cond2`` are false,
+the unconditional ``value3`` is used.
+
+Conditions themselves use a Python-like expression syntax. Operands
+can either be variables, corresponding to data passed in, numbers
+(integer or floating point; exponential notation is not supported) or
+quote-delimited strings. Equality is tested using ``==`` and
+inequality by ``!=``. The operators ``and``, ``or`` and ``not`` are
+used in the expected way. Parentheses can also be used for
+grouping. For example::
+
+ key:
+ if (a == 2 or a == 3) and b == "abc": value1
+ if a == 1 or b != "abc": value2
+ value3
+
+Here ``a`` and ``b`` are variables, the value of which will be
+supplied when the metadata is used.
+
+Web-Platform-Tests Metadata
+---------------------------
+
+When used for expectation data, metadata files have the following format:
+
+ * A section per test URL provided by the corresponding source file,
+ with the section heading being the part of the test URL following
+ the last ``/`` in the path (this allows multiple tests in a single
+ metadata file with the same path part of the URL, but different
+ query parts). This may be omitted if there's no non-default
+ metadata for the test.
+
+ * A subsection per subtest, with the heading being the title of the
+ subtest. This may be omitted if there's no non-default metadata for
+ the subtest.
+
+ * The following known keys:
+
+ :expected:
+ The expectation value or values of each (sub)test. In
+ the case this value is a list, the first value represents the
+ typical expected test outcome, and subsequent values indicate
+ known intermittent outcomes e.g. ``expected: [PASS, ERROR]``
+ would indicate a test that usually passes but has a known-flaky
+ ``ERROR`` outcome.
+
+ :disabled:
+ Any values apart from the special value ``@False``
+ indicates that the (sub)test is disabled and should either not be
+ run (for tests) or that its results should be ignored (subtests).
+
+ :restart-after:
+ Any value apart from the special value ``@False``
+ indicates that the runner should restart the browser after running
+ this test (e.g. to clear out unwanted state).
+
+ :fuzzy:
+ Used for reftests. This is interpreted as a list containing
+ entries like ``<meta name=fuzzy>`` content value, which consists of
+ an optional reference identifier followed by a colon, then a range
+ indicating the maximum permitted pixel difference per channel, then
+ semicolon, then a range indicating the maximum permitted total
+ number of differing pixels. The reference identifier is either a
+ single relative URL, resolved against the base test URL, in which
+ case the fuzziness applies to any comparison with that URL, or
+ takes the form lhs URL, comparison, rhs URL, in which case the
+ fuzziness only applies for any comparison involving that specific
+ pair of URLs. Some illustrative examples are given below.
+
+ :implementation-status:
+ One of the values ``implementing``,
+ ``not-implementing`` or ``default``. This is used in conjunction
+ with the ``--skip-implementation-status`` command line argument to
+ ``wptrunner`` to ignore certain features where running the test is
+ low value.
+
+ :tags:
+ A list of labels associated with a given test that can be
+ used in conjunction with the ``--tag`` command line argument to
+ ``wptrunner`` for test selection.
+
+ In addition there are extra arguments which are currently tied to
+ specific implementations. For example Gecko-based browsers support
+ ``min-asserts``, ``max-asserts``, ``prefs``, ``lsan-disabled``,
+ ``lsan-allowed``, ``lsan-max-stack-depth``, ``leak-allowed``, and
+ ``leak-threshold`` properties.
+
+ * Variables taken from the ``RunInfo`` data which describe the
+ configuration of the test run. Common properties include:
+
+ :product: A string giving the name of the browser under test
+ :browser_channel: A string giving the release channel of the browser under test
+ :debug: A Boolean indicating whether the build is a debug build
+ :os: A string the operating system
+ :version: A string indicating the particular version of that operating system
+ :processor: A string indicating the processor architecture.
+
+ This information is typically provided by :py:mod:`mozinfo`, but
+ different environments may add additional information, and not all
+ the properties above are guaranteed to be present in all
+ environments. The definitive list of available properties for a
+ specific run may be determined by looking at the ``run_info`` key
+ in the ``wptreport.json`` output for the run.
+
+ * Top level keys are taken as defaults for the whole file. So, for
+ example, a top level key with ``expected: FAIL`` would indicate
+ that all tests and subtests in the file are expected to fail,
+ unless they have an ``expected`` key of their own.
+
+An simple example metadata file might look like::
+
+ [test.html?variant=basic]
+ type: testharness
+
+ [Test something unsupported]
+ expected: FAIL
+
+ [Test with intermittent statuses]
+ expected: [PASS, TIMEOUT]
+
+ [test.html?variant=broken]
+ expected: ERROR
+
+ [test.html?variant=unstable]
+ disabled: http://test.bugs.example.org/bugs/12345
+
+A more complex metadata file with conditional properties might be::
+
+ [canvas_test.html]
+ expected:
+ if os == "mac": FAIL
+ if os == "windows" and version == "XP": FAIL
+ PASS
+
+Note that ``PASS`` in the above works, but is unnecessary since it's
+the default expected result.
+
+A metadata file with fuzzy reftest values might be::
+
+ [reftest.html]
+ fuzzy: [10;200, ref1.html:20;200-300, subtest1.html==ref2.html:10-15;20]
+
+In this case the default fuzziness for any comparison would be to
+require a maximum difference per channel of less than or equal to 10
+and less than or equal to 200 total pixels different. For any
+comparison involving ref1.html on the right hand side, the limits
+would instead be a difference per channel not more than 20 and a total
+difference count of not less than 200 and not more than 300. For the
+specific comparison ``subtest1.html == ref2.html`` (both resolved against
+the test URL) these limits would instead be 10 to 15 and 0 to 20,
+respectively.
+
+Generating Expectation Files
+----------------------------
+
+wpt provides the tool ``wpt update-expectations`` command to generate
+expectation files from the results of a set of test runs. The basic
+syntax for this is::
+
+ ./wpt update-expectations [options] [logfile]...
+
+Each ``logfile`` is a wptreport log file from a previous run. These
+can be generated from wptrunner using the ``--log-wptreport`` option
+e.g. ``--log-wptreport=wptreport.json``.
+
+``update-expectations`` takes several options:
+
+--full Overwrite all the expectation data for any tests that have a
+ result in the passed log files, not just data for the same run
+ configuration.
+
+--disable-intermittent When updating test results, disable tests that
+ have inconsistent results across many
+ runs. This can precede a message providing a
+ reason why that test is disable. If no message
+ is provided, ``unstable`` is the default text.
+
+--update-intermittent When this option is used, the ``expected`` key
+ stores expected intermittent statuses in
+ addition to the primary expected status. If
+ there is more than one status, it appears as a
+ list. The default behaviour of this option is to
+ retain any existing intermittent statuses in the
+ list unless ``--remove-intermittent`` is
+ specified.
+
+--remove-intermittent This option is used in conjunction with
+ ``--update-intermittent``. When the
+ ``expected`` statuses are updated, any obsolete
+ intermittent statuses that did not occur in the
+ specified log files are removed from the list.
+
+Property Configuration
+~~~~~~~~~~~~~~~~~~~~~~
+
+In cases where the expectation depends on the run configuration ``wpt
+update-expectations`` is able to generate conditional values. Because
+the relevant variables depend on the range of configurations that need
+to be covered, it's necessary to specify the list of configuration
+variables that should be used. This is done using a ``json`` format
+file that can be specified with the ``--properties-file`` command line
+argument to ``wpt update-expectations``. When this isn't supplied the
+defaults from ``<metadata root>/update_properties.json`` are used, if
+present.
+
+Properties File Format
+++++++++++++++++++++++
+
+The file is JSON formatted with two top-level keys:
+
+:``properties``:
+ A list of property names to consider for conditionals
+ e.g ``["product", "os"]``.
+
+:``dependents``:
+ An optional dictionary containing properties that
+ should only be used as "tie-breakers" when differentiating based on a
+ specific top-level property has failed. This is useful when the
+ dependent property is always more specific than the top-level
+ property, but less understandable when used directly. For example the
+ ``version`` property covering different OS versions is typically
+ unique amongst different operating systems, but using it when the
+ ``os`` property would do instead is likely to produce metadata that's
+ too specific to the current configuration and more difficult to
+ read. But where there are multiple versions of the same operating
+ system with different results, it can be necessary. So specifying
+ ``{"os": ["version"]}`` as a dependent property means that the
+ ``version`` property will only be used if the condition already
+ contains the ``os`` property and further conditions are required to
+ separate the observed results.
+
+So an example ``update-properties.json`` file might look like::
+
+ {
+ "properties": ["product", "os"],
+ "dependents": {"product": ["browser_channel"], "os": ["version"]}
+ }
+
+Examples
+~~~~~~~~
+
+Update all the expectations from a set of cross-platform test runs::
+
+ wpt update-expectations --full osx.log linux.log windows.log
+
+Add expectation data for some new tests that are expected to be
+platform-independent::
+
+ wpt update-expectations tests.log
+
+Why a Custom Format?
+--------------------
+
+Introduction
+------------
+
+Given the use of the metadata files in CI systems, it was desirable to
+have something with the following properties:
+
+ * Human readable
+
+ * Human editable
+
+ * Machine readable / writable
+
+ * Capable of storing key-value pairs
+
+ * Suitable for storing in a version control system (i.e. text-based)
+
+The need for different results per platform means either having
+multiple expectation files for each platform, or having a way to
+express conditional values within a certain file. The former would be
+rather cumbersome for humans updating the expectation files, so the
+latter approach has been adopted, leading to the requirement:
+
+ * Capable of storing result values that are conditional on the platform.
+
+There are few extant formats that clearly meet these requirements. In
+particular although conditional properties could be expressed in many
+existing formats, the representation would likely be cumbersome and
+error-prone for hand authoring. Therefore it was decided that a custom
+format offered the best tradeoffs given the requirements.
diff --git a/testing/web-platform/tests/tools/wptrunner/docs/internals.rst b/testing/web-platform/tests/tools/wptrunner/docs/internals.rst
new file mode 100644
index 0000000000..780df872ed
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/docs/internals.rst
@@ -0,0 +1,23 @@
+wptrunner Internals
+===================
+
+.. These modules are intentionally referenced as submodules from the parent
+ directory. This ensures that Sphinx interprets them as packages.
+
+.. automodule:: wptrunner.browsers.base
+ :members:
+
+.. automodule:: wptrunner.environment
+ :members:
+
+.. automodule:: wptrunner.executors.base
+ :members:
+
+.. automodule:: wptrunner.wptrunner
+ :members:
+
+.. automodule:: wptrunner.testloader
+ :members:
+
+.. automodule:: wptrunner.testrunner
+ :members:
diff --git a/testing/web-platform/tests/tools/wptrunner/requirements.txt b/testing/web-platform/tests/tools/wptrunner/requirements.txt
new file mode 100644
index 0000000000..a7face3bd0
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/requirements.txt
@@ -0,0 +1,11 @@
+html5lib==1.1
+mozdebug==0.3.0
+mozinfo==1.2.3 # https://bugzilla.mozilla.org/show_bug.cgi?id=1621226
+mozlog==8.0.0
+mozprocess==1.3.1
+packaging==23.1
+pillow==9.5.0
+requests==2.31.0
+six==1.16.0
+urllib3==2.0.7
+aioquic==0.9.19
diff --git a/testing/web-platform/tests/tools/wptrunner/requirements_chromium.txt b/testing/web-platform/tests/tools/wptrunner/requirements_chromium.txt
new file mode 100644
index 0000000000..883b061589
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/requirements_chromium.txt
@@ -0,0 +1 @@
+aioquic==0.9.21
diff --git a/testing/web-platform/tests/tools/wptrunner/requirements_firefox.txt b/testing/web-platform/tests/tools/wptrunner/requirements_firefox.txt
new file mode 100644
index 0000000000..3ba4731494
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/requirements_firefox.txt
@@ -0,0 +1,10 @@
+marionette_driver==3.4.0
+mozcrash==2.2.0
+mozdevice==4.1.1
+mozinstall==2.1.0
+mozleak==0.2
+mozprofile==2.6.1
+mozrunner==8.3.0
+mozversion==2.4.0
+psutil==5.9.5
+redo==2.0.4
diff --git a/testing/web-platform/tests/tools/wptrunner/requirements_opera.txt b/testing/web-platform/tests/tools/wptrunner/requirements_opera.txt
new file mode 100644
index 0000000000..4ff0fedd32
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/requirements_opera.txt
@@ -0,0 +1,2 @@
+mozprocess==1.3.1
+selenium==4.14.0
diff --git a/testing/web-platform/tests/tools/wptrunner/requirements_safari.txt b/testing/web-platform/tests/tools/wptrunner/requirements_safari.txt
new file mode 100644
index 0000000000..bcce11aed8
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/requirements_safari.txt
@@ -0,0 +1 @@
+psutil==5.9.5
diff --git a/testing/web-platform/tests/tools/wptrunner/requirements_sauce.txt b/testing/web-platform/tests/tools/wptrunner/requirements_sauce.txt
new file mode 100644
index 0000000000..5538fb0672
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/requirements_sauce.txt
@@ -0,0 +1,2 @@
+selenium==4.14.0
+requests==2.31.0
diff --git a/testing/web-platform/tests/tools/wptrunner/setup.py b/testing/web-platform/tests/tools/wptrunner/setup.py
new file mode 100644
index 0000000000..3a0c1a1f73
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/setup.py
@@ -0,0 +1,66 @@
+import glob
+import os
+import sys
+import textwrap
+
+from setuptools import setup, find_packages
+
+here = os.path.dirname(__file__)
+
+PACKAGE_NAME = 'wptrunner'
+PACKAGE_VERSION = '1.14'
+
+# Dependencies
+with open(os.path.join(here, "requirements.txt")) as f:
+ deps = f.read().splitlines()
+
+# Browser-specific requirements
+requirements_files = glob.glob("requirements_*.txt")
+
+profile_dest = None
+dest_exists = False
+
+setup(name=PACKAGE_NAME,
+ version=PACKAGE_VERSION,
+ description="Harness for running the W3C web-platform-tests against various products",
+ author='Mozilla Automation and Testing Team',
+ author_email='tools@lists.mozilla.org',
+ license='MPL 2.0',
+ packages=find_packages(exclude=["tests", "metadata", "prefs"]),
+ entry_points={
+ 'console_scripts': [
+ 'wptrunner = wptrunner.wptrunner:main',
+ 'wptupdate = wptrunner.update:main',
+ ]
+ },
+ zip_safe=False,
+ platforms=['Any'],
+ classifiers=['Development Status :: 4 - Beta',
+ 'Environment :: Console',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: BSD License',
+ 'Operating System :: OS Independent'],
+ package_data={"wptrunner": ["executors/testharness_marionette.js",
+ "executors/testharness_webdriver.js",
+ "executors/reftest.js",
+ "executors/reftest-wait.js",
+ "testharnessreport.js",
+ "testharness_runner.html",
+ "wptrunner.default.ini",
+ "browsers/sauce_setup/*",
+ "prefs/*"]},
+ include_package_data=True,
+ data_files=[("requirements", requirements_files)],
+ )
+
+if "install" in sys.argv:
+ path = os.path.relpath(os.path.join(sys.prefix, "requirements"), os.curdir)
+ print(textwrap.fill("""In order to use with one of the built-in browser
+products, you will need to install the extra dependencies. These are provided
+as requirements_[name].txt in the %s directory and can be installed using
+e.g.""" % path, 80))
+
+ print("""
+
+pip install -r %s/requirements_firefox.txt
+""" % path)
diff --git a/testing/web-platform/tests/tools/wptrunner/tox.ini b/testing/web-platform/tests/tools/wptrunner/tox.ini
new file mode 100644
index 0000000000..82d3ac6f55
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/tox.ini
@@ -0,0 +1,23 @@
+[pytest]
+xfail_strict=true
+
+[tox]
+envlist = py311-{base,chrome,firefox,opera,safari,sauce,servo,webkit,webkitgtk_minibrowser,epiphany},{py37,py38,py39,py310}-base
+skip_missing_interpreters = False
+
+[testenv]
+deps =
+ -r{toxinidir}/../requirements_pytest.txt
+ -r{toxinidir}/requirements.txt
+ chrome: -r{toxinidir}/requirements_chromium.txt
+ firefox: -r{toxinidir}/requirements_firefox.txt
+ opera: -r{toxinidir}/requirements_opera.txt
+ safari: -r{toxinidir}/requirements_safari.txt
+ sauce: -r{toxinidir}/requirements_sauce.txt
+
+commands = pytest -c{toxinidir}/../pytest.ini --rootdir={toxinidir} {posargs}
+
+setenv = CURRENT_TOX_ENV = {envname}
+
+passenv =
+ TASKCLUSTER_ROOT_URL
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner.default.ini b/testing/web-platform/tests/tools/wptrunner/wptrunner.default.ini
new file mode 100644
index 0000000000..19462bc317
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner.default.ini
@@ -0,0 +1,11 @@
+[products]
+
+[web-platform-tests]
+remote_url = https://github.com/web-platform-tests/wpt.git
+branch = master
+sync_path = %(pwd)s/sync
+
+[manifest:default]
+tests = %(pwd)s/tests
+metadata = %(pwd)s/meta
+url_base = / \ No newline at end of file
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/__init__.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/__init__.py
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/__init__.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/__init__.py
new file mode 100644
index 0000000000..81dc549d73
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/__init__.py
@@ -0,0 +1,44 @@
+"""Subpackage where each product is defined. Each product is created by adding a
+a .py file containing a __wptrunner__ variable in the global scope. This must be
+a dictionary with the fields
+
+"product": Name of the product, assumed to be unique.
+"browser": String indicating the Browser implementation used to launch that
+ product.
+"executor": Dictionary with keys as supported test types and values as the name
+ of the Executor implementation that will be used to run that test
+ type.
+"browser_kwargs": String naming function that takes product, binary,
+ prefs_root and the wptrunner.run_tests kwargs dict as arguments
+ and returns a dictionary of kwargs to use when creating the
+ Browser class.
+"executor_kwargs": String naming a function that takes http server url and
+ timeout multiplier and returns kwargs to use when creating
+ the executor class.
+"env_options": String naming a function of no arguments that returns the
+ arguments passed to the TestEnvironment.
+
+All classes and functions named in the above dict must be imported into the
+module global scope.
+"""
+
+product_list = ["android_weblayer",
+ "android_webview",
+ "chrome",
+ "chrome_android",
+ "chrome_ios",
+ "chromium",
+ "content_shell",
+ "edgechromium",
+ "firefox",
+ "firefox_android",
+ "safari",
+ "sauce",
+ "servo",
+ "servodriver",
+ "opera",
+ "webkit",
+ "webkitgtk_minibrowser",
+ "wktr",
+ "epiphany",
+ "ladybird"]
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/android_weblayer.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/android_weblayer.py
new file mode 100644
index 0000000000..db23b64793
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/android_weblayer.py
@@ -0,0 +1,105 @@
+# mypy: allow-untyped-defs
+
+from .base import NullBrowser # noqa: F401
+from .base import require_arg
+from .base import get_timeout_multiplier # noqa: F401
+from .chrome import executor_kwargs as chrome_executor_kwargs
+from .chrome_android import ChromeAndroidBrowserBase
+from ..executors.base import WdspecExecutor # noqa: F401
+from ..executors.executorchrome import ChromeDriverPrintRefTestExecutor # noqa: F401
+from ..executors.executorwebdriver import (WebDriverCrashtestExecutor, # noqa: F401
+ WebDriverTestharnessExecutor, # noqa: F401
+ WebDriverRefTestExecutor) # noqa: F401
+
+
+__wptrunner__ = {"product": "android_weblayer",
+ "check_args": "check_args",
+ "browser": {None: "WeblayerShell",
+ "wdspec": "NullBrowser"},
+ "executor": {"testharness": "WebDriverTestharnessExecutor",
+ "reftest": "WebDriverRefTestExecutor",
+ "print-reftest": "ChromeDriverPrintRefTestExecutor",
+ "wdspec": "WdspecExecutor",
+ "crashtest": "WebDriverCrashtestExecutor"},
+ "browser_kwargs": "browser_kwargs",
+ "executor_kwargs": "executor_kwargs",
+ "env_extras": "env_extras",
+ "env_options": "env_options",
+ "timeout_multiplier": "get_timeout_multiplier"}
+
+_wptserve_ports = set()
+
+
+def check_args(**kwargs):
+ require_arg(kwargs, "webdriver_binary")
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
+ return {"binary": kwargs["binary"],
+ "adb_binary": kwargs["adb_binary"],
+ "device_serial": kwargs["device_serial"],
+ "webdriver_binary": kwargs["webdriver_binary"],
+ "webdriver_args": kwargs.get("webdriver_args"),
+ "stackwalk_binary": kwargs.get("stackwalk_binary"),
+ "symbols_path": kwargs.get("symbols_path")}
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs):
+ # Use update() to modify the global list in place.
+ _wptserve_ports.update(set(
+ test_environment.config['ports']['http'] + test_environment.config['ports']['https'] +
+ test_environment.config['ports']['ws'] + test_environment.config['ports']['wss']
+ ))
+
+ executor_kwargs = chrome_executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs)
+ del executor_kwargs["capabilities"]["goog:chromeOptions"]["prefs"]
+ capabilities = executor_kwargs["capabilities"]
+ # Note that for WebLayer, we launch a test shell and have the test shell use
+ # WebLayer.
+ # https://cs.chromium.org/chromium/src/weblayer/shell/android/shell_apk/
+ capabilities["goog:chromeOptions"]["androidPackage"] = \
+ "org.chromium.weblayer.shell"
+ capabilities["goog:chromeOptions"]["androidActivity"] = ".WebLayerShellActivity"
+ capabilities["goog:chromeOptions"]["androidKeepAppDataDir"] = \
+ kwargs.get("keep_app_data_directory")
+
+ # Workaround: driver.quit() cannot quit WeblayerShell.
+ executor_kwargs["pause_after_test"] = False
+ # Workaround: driver.close() is not supported.
+ executor_kwargs["restart_after_test"] = True
+ executor_kwargs["close_after_done"] = False
+ return executor_kwargs
+
+
+def env_extras(**kwargs):
+ return []
+
+
+def env_options():
+ # allow the use of host-resolver-rules in lieu of modifying /etc/hosts file
+ return {"server_host": "127.0.0.1"}
+
+
+class WeblayerShell(ChromeAndroidBrowserBase):
+ """Chrome is backed by chromedriver, which is supplied through
+ ``wptrunner.webdriver.ChromeDriverServer``.
+ """
+
+ def __init__(self, logger, binary,
+ webdriver_binary="chromedriver",
+ adb_binary=None,
+ remote_queue=None,
+ device_serial=None,
+ webdriver_args=None,
+ stackwalk_binary=None,
+ symbols_path=None):
+ """Creates a new representation of Chrome. The `binary` argument gives
+ the browser binary to use for testing."""
+ super().__init__(logger,
+ webdriver_binary, adb_binary, remote_queue,
+ device_serial, webdriver_args, stackwalk_binary,
+ symbols_path)
+ self.binary = binary
+ self.wptserver_ports = _wptserve_ports
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/android_webview.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/android_webview.py
new file mode 100644
index 0000000000..4ad7066178
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/android_webview.py
@@ -0,0 +1,103 @@
+# mypy: allow-untyped-defs
+
+from .base import NullBrowser # noqa: F401
+from .base import require_arg
+from .base import get_timeout_multiplier # noqa: F401
+from .chrome import executor_kwargs as chrome_executor_kwargs
+from .chrome_android import ChromeAndroidBrowserBase
+from ..executors.base import WdspecExecutor # noqa: F401
+from ..executors.executorchrome import ChromeDriverPrintRefTestExecutor # noqa: F401
+from ..executors.executorwebdriver import (WebDriverCrashtestExecutor, # noqa: F401
+ WebDriverTestharnessExecutor, # noqa: F401
+ WebDriverRefTestExecutor) # noqa: F401
+
+
+__wptrunner__ = {"product": "android_webview",
+ "check_args": "check_args",
+ "browser": "SystemWebViewShell",
+ "executor": {"testharness": "WebDriverTestharnessExecutor",
+ "reftest": "WebDriverRefTestExecutor",
+ "print-reftest": "ChromeDriverPrintRefTestExecutor",
+ "wdspec": "WdspecExecutor",
+ "crashtest": "WebDriverCrashtestExecutor"},
+ "browser_kwargs": "browser_kwargs",
+ "executor_kwargs": "executor_kwargs",
+ "env_extras": "env_extras",
+ "env_options": "env_options",
+ "timeout_multiplier": "get_timeout_multiplier"}
+
+_wptserve_ports = set()
+
+
+def check_args(**kwargs):
+ require_arg(kwargs, "webdriver_binary")
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
+ return {"binary": kwargs["binary"],
+ "adb_binary": kwargs["adb_binary"],
+ "device_serial": kwargs["device_serial"],
+ "webdriver_binary": kwargs["webdriver_binary"],
+ "webdriver_args": kwargs.get("webdriver_args"),
+ "stackwalk_binary": kwargs.get("stackwalk_binary"),
+ "symbols_path": kwargs.get("symbols_path")}
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs):
+ # Use update() to modify the global list in place.
+ _wptserve_ports.update(set(
+ test_environment.config['ports']['http'] + test_environment.config['ports']['https'] +
+ test_environment.config['ports']['ws'] + test_environment.config['ports']['wss']
+ ))
+
+ executor_kwargs = chrome_executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs)
+ del executor_kwargs["capabilities"]["goog:chromeOptions"]["prefs"]
+ capabilities = executor_kwargs["capabilities"]
+ # Note that for WebView, we launch a test shell and have the test shell use WebView.
+ # https://chromium.googlesource.com/chromium/src/+/HEAD/android_webview/docs/webview-shell.md
+ capabilities["goog:chromeOptions"]["androidPackage"] = \
+ kwargs.get("package_name", "org.chromium.webview_shell")
+ capabilities["goog:chromeOptions"]["androidActivity"] = \
+ "org.chromium.webview_shell.WebPlatformTestsActivity"
+ capabilities["goog:chromeOptions"]["androidKeepAppDataDir"] = \
+ kwargs.get("keep_app_data_directory")
+
+ # Workaround: driver.quit() cannot quit SystemWebViewShell.
+ executor_kwargs["pause_after_test"] = False
+ # Workaround: driver.close() is not supported.
+ executor_kwargs["restart_after_test"] = True
+ executor_kwargs["close_after_done"] = False
+ return executor_kwargs
+
+
+def env_extras(**kwargs):
+ return []
+
+
+def env_options():
+ # allow the use of host-resolver-rules in lieu of modifying /etc/hosts file
+ return {"server_host": "127.0.0.1"}
+
+
+class SystemWebViewShell(ChromeAndroidBrowserBase):
+ """Chrome is backed by chromedriver, which is supplied through
+ ``wptrunner.webdriver.ChromeDriverServer``.
+ """
+
+ def __init__(self, logger, binary, webdriver_binary="chromedriver",
+ adb_binary=None,
+ remote_queue=None,
+ device_serial=None,
+ webdriver_args=None,
+ stackwalk_binary=None,
+ symbols_path=None):
+ """Creates a new representation of Chrome. The `binary` argument gives
+ the browser binary to use for testing."""
+ super().__init__(logger,
+ webdriver_binary, adb_binary, remote_queue,
+ device_serial, webdriver_args, stackwalk_binary,
+ symbols_path)
+ self.binary = binary
+ self.wptserver_ports = _wptserve_ports
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/base.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/base.py
new file mode 100644
index 0000000000..180e3fb959
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/base.py
@@ -0,0 +1,424 @@
+# mypy: allow-untyped-defs
+
+import enum
+import errno
+import os
+import platform
+import socket
+import time
+import traceback
+from abc import ABCMeta, abstractmethod
+
+import mozprocess
+
+from ..environment import wait_for_service
+from ..wptcommandline import require_arg # noqa: F401
+
+here = os.path.dirname(__file__)
+
+
+def cmd_arg(name, value=None):
+ prefix = "-" if platform.system() == "Windows" else "--"
+ rv = prefix + name
+ if value is not None:
+ rv += "=" + value
+ return rv
+
+
+def maybe_add_args(required_args, current_args):
+ for required_arg in required_args:
+ # If the arg is in the form of "variable=value", only add it if
+ # no arg with another value for "variable" is already there.
+ if "=" in required_arg:
+ required_arg_prefix = "%s=" % required_arg.split("=")[0]
+ if not any(item.startswith(required_arg_prefix) for item in current_args):
+ current_args.append(required_arg)
+ else:
+ if required_arg not in current_args:
+ current_args.append(required_arg)
+ return current_args
+
+
+def certificate_domain_list(list_of_domains, certificate_file):
+ """Build a list of domains where certificate_file should be used"""
+ cert_list = []
+ for domain in list_of_domains:
+ cert_list.append({"host": domain, "certificateFile": certificate_file})
+ return cert_list
+
+
+def get_free_port():
+ """Get a random unbound port"""
+ while True:
+ s = socket.socket()
+ try:
+ s.bind(("127.0.0.1", 0))
+ except OSError:
+ continue
+ else:
+ return s.getsockname()[1]
+ finally:
+ s.close()
+
+
+def get_timeout_multiplier(test_type, run_info_data, **kwargs):
+ if kwargs["timeout_multiplier"] is not None:
+ return kwargs["timeout_multiplier"]
+ return 1
+
+
+def browser_command(binary, args, debug_info):
+ if debug_info:
+ if debug_info.requiresEscapedArgs:
+ args = [item.replace("&", "\\&") for item in args]
+ debug_args = [debug_info.path] + debug_info.args
+ else:
+ debug_args = []
+
+ command = [binary] + args
+
+ return debug_args, command
+
+
+class BrowserError(Exception):
+ pass
+
+
+class Browser:
+ """Abstract class serving as the basis for Browser implementations.
+
+ The Browser is used in the TestRunnerManager to start and stop the browser
+ process, and to check the state of that process.
+
+ :param logger: Structured logger to use for output.
+ """
+ __metaclass__ = ABCMeta
+
+ process_cls = None
+ init_timeout = 30
+
+ def __init__(self, logger):
+ self.logger = logger
+
+ def setup(self):
+ """Used for browser-specific setup that happens at the start of a test run"""
+ pass
+
+ def settings(self, test):
+ """Dictionary of metadata that is constant for a specific launch of a browser.
+
+ This is used to determine when the browser instance configuration changes, requiring
+ a relaunch of the browser. The test runner calls this method for each test, and if the
+ returned value differs from that for the previous test, the browser is relaunched.
+ """
+ return {}
+
+ @abstractmethod
+ def start(self, group_metadata, **kwargs):
+ """Launch the browser object and get it into a state where is is ready to run tests"""
+ pass
+
+ @abstractmethod
+ def stop(self, force=False):
+ """Stop the running browser process."""
+ pass
+
+ @abstractmethod
+ def pid(self):
+ """pid of the browser process or None if there is no pid"""
+ pass
+
+ @abstractmethod
+ def is_alive(self):
+ """Boolean indicating whether the browser process is still running"""
+ pass
+
+ def cleanup(self):
+ """Browser-specific cleanup that is run after the testrun is finished"""
+ pass
+
+ def executor_browser(self):
+ """Returns the ExecutorBrowser subclass for this Browser subclass and the keyword arguments
+ with which it should be instantiated"""
+ return ExecutorBrowser, {}
+
+ def maybe_parse_tombstone(self):
+ """Possibly parse tombstones on Android device for Android target"""
+ pass
+
+ def check_crash(self, process, test):
+ """Check if a crash occured and output any useful information to the
+ log. Returns a boolean indicating whether a crash occured."""
+ return False
+
+ @property
+ def pac(self):
+ return None
+
+class NullBrowser(Browser):
+ def __init__(self, logger, **kwargs):
+ super().__init__(logger)
+
+ def start(self, **kwargs):
+ """No-op browser to use in scenarios where the TestRunnerManager shouldn't
+ actually own the browser process (e.g. Servo where we start one browser
+ per test)"""
+ pass
+
+ def stop(self, force=False):
+ pass
+
+ def pid(self):
+ return None
+
+ def is_alive(self):
+ return True
+
+
+class ExecutorBrowser:
+ """View of the Browser used by the Executor object.
+ This is needed because the Executor runs in a child process and
+ we can't ship Browser instances between processes on Windows.
+
+ Typically this will have a few product-specific properties set,
+ but in some cases it may have more elaborate methods for setting
+ up the browser from the runner process.
+ """
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+
+@enum.unique
+class OutputHandlerState(enum.IntEnum):
+ BEFORE_PROCESS_START = 1
+ AFTER_PROCESS_START = 2
+ AFTER_HANDLER_START = 3
+ AFTER_PROCESS_STOP = 4
+
+
+class OutputHandler:
+ """Class for handling output from a browser process.
+
+ This class is responsible for consuming the logging from a browser process
+ and passing it into the relevant logger. A class instance is designed to
+ be passed as the processOutputLine argument to mozprocess.ProcessHandler.
+
+ The setup of this class is complex for various reasons:
+
+ * We need to create an instance of the class before starting the process
+ * We want access to data about the running process e.g. the pid
+ * We want to launch the process and later setup additional log handling
+ which is restrospectively applied to any existing output (this supports
+ prelaunching browsers for performance, but having log output depend on the
+ tests that are run e.g. for leak suppression).
+
+ Therefore the lifecycle is as follows::
+
+ output_handler = OutputHandler(logger, command, **output_handler_kwargs)
+ proc = ProcessHandler(command, ..., processOutputLine=output_handler)
+ output_handler.after_process_start(proc.pid)
+ [...]
+ # All logging to this point was buffered in-memory, but after start()
+ # it's actually sent to the logger.
+ output_handler.start(**output_logger_start_kwargs)
+ [...]
+ proc.wait()
+ output_handler.after_process_stop()
+
+ Since the process lifetime and the output handler lifetime are coupled (it doesn't
+ work to reuse an output handler for multiple processes), it might make sense to have
+ a single class that owns the process and the output processing for the process.
+ This is complicated by the fact that we don't always run the process directly,
+ but sometimes use a wrapper e.g. mozrunner.
+ """
+
+ def __init__(self, logger, command, **kwargs):
+ self.logger = logger
+ self.command = command
+ self.pid = None
+ self.state = OutputHandlerState.BEFORE_PROCESS_START
+ self.line_buffer = []
+
+ def after_process_start(self, pid):
+ assert self.state == OutputHandlerState.BEFORE_PROCESS_START
+ self.logger.debug("OutputHandler.after_process_start")
+ self.pid = pid
+ self.state = OutputHandlerState.AFTER_PROCESS_START
+
+ def start(self, **kwargs):
+ assert self.state == OutputHandlerState.AFTER_PROCESS_START
+ self.logger.debug("OutputHandler.start")
+ # Need to change the state here before we try to empty the buffer
+ # or we'll just re-buffer the existing output.
+ self.state = OutputHandlerState.AFTER_HANDLER_START
+ for item in self.line_buffer:
+ self(item)
+ self.line_buffer = None
+
+ def after_process_stop(self, clean_shutdown=True):
+ # If we didn't get as far as configure, just
+ # dump all logs with no configuration
+ self.logger.debug("OutputHandler.after_process_stop")
+ if self.state < OutputHandlerState.AFTER_HANDLER_START:
+ self.start()
+ self.state = OutputHandlerState.AFTER_PROCESS_STOP
+
+ def __call__(self, line):
+ if self.state < OutputHandlerState.AFTER_HANDLER_START:
+ self.line_buffer.append(line)
+ return
+
+ # Could assert that there's no output handled once we're in the
+ # after_process_stop phase, although technically there's a race condition
+ # here because we don't know the logging thread has finished draining the
+ # logs. The solution might be to move this into mozprocess itself.
+
+ self.logger.process_output(self.pid,
+ line.decode("utf8", "replace"),
+ command=" ".join(self.command) if self.command else "")
+
+
+class WebDriverBrowser(Browser):
+ __metaclass__ = ABCMeta
+
+ def __init__(self, logger, binary=None, webdriver_binary=None,
+ webdriver_args=None, host="127.0.0.1", port=None, base_path="/",
+ env=None, supports_pac=True, **kwargs):
+ super().__init__(logger)
+
+ if webdriver_binary is None:
+ raise ValueError("WebDriver server binary must be given "
+ "to --webdriver-binary argument")
+
+ self.logger = logger
+ self.binary = binary
+ self.webdriver_binary = webdriver_binary
+
+ self.host = host
+ self._port = port
+ self._supports_pac = supports_pac
+
+ self.base_path = base_path
+ self.env = os.environ.copy() if env is None else env
+ self.webdriver_args = webdriver_args if webdriver_args is not None else []
+
+ self.init_deadline = None
+ self._output_handler = None
+ self._cmd = None
+ self._proc = None
+ self._pac = None
+
+ def make_command(self):
+ """Returns the full command for starting the server process as a list."""
+ return [self.webdriver_binary] + self.webdriver_args
+
+ def start(self, group_metadata, **kwargs):
+ self.init_deadline = time.time() + self.init_timeout
+ try:
+ self._run_server(group_metadata, **kwargs)
+ except KeyboardInterrupt:
+ self.stop()
+ raise
+
+ def create_output_handler(self, cmd):
+ """Return an instance of the class used to handle application output.
+
+ This can be overridden by subclasses which have particular requirements
+ for parsing, or otherwise using, the output."""
+ return OutputHandler(self.logger, cmd)
+
+ def _run_server(self, group_metadata, **kwargs):
+ cmd = self.make_command()
+ self._output_handler = self.create_output_handler(cmd)
+
+ self._proc = mozprocess.ProcessHandler(
+ cmd,
+ processOutputLine=self._output_handler,
+ env=self.env,
+ storeOutput=False)
+
+ self.logger.debug("Starting WebDriver: %s" % ' '.join(cmd))
+ try:
+ self._proc.run()
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise OSError(
+ "WebDriver executable not found: %s" % self.webdriver_binary) from e
+ raise
+ self._output_handler.after_process_start(self._proc.pid)
+
+ try:
+ wait_for_service(
+ self.logger,
+ self.host,
+ self.port,
+ timeout=self.init_deadline - time.time(),
+ server_process=self._proc,
+ )
+ except Exception:
+ self.logger.error(
+ "WebDriver was not accessible "
+ f"within the timeout:\n{traceback.format_exc()}")
+ raise
+ finally:
+ self._output_handler.start(group_metadata=group_metadata, **kwargs)
+ self.logger.debug("_run complete")
+
+ def stop(self, force=False):
+ self.logger.debug("Stopping WebDriver")
+ clean = True
+ if self.is_alive():
+ # Pass a timeout value to mozprocess Processhandler.kill()
+ # to ensure it always returns within it.
+ # See https://bugzilla.mozilla.org/show_bug.cgi?id=1760080
+ kill_result = self._proc.kill(timeout=5)
+ if force and kill_result != 0:
+ clean = False
+ self._proc.kill(9, timeout=5)
+ success = not self.is_alive()
+ if success and self._output_handler is not None:
+ # Only try to do output post-processing if we managed to shut down
+ self._output_handler.after_process_stop(clean)
+ self._output_handler = None
+ return success
+
+ def is_alive(self):
+ return hasattr(self._proc, "proc") and self._proc.poll() is None
+
+ @property
+ def url(self):
+ if self.port is not None:
+ return f"http://{self.host}:{self.port}{self.base_path}"
+ raise ValueError("Can't get WebDriver URL before port is assigned")
+
+ @property
+ def pid(self):
+ if self._proc is not None:
+ return self._proc.pid
+
+ @property
+ def port(self):
+ # If no port is supplied, we'll get a free port right before we use it.
+ # Nothing guarantees an absence of race conditions here.
+ if self._port is None:
+ self._port = get_free_port()
+ return self._port
+
+ def cleanup(self):
+ self.stop()
+
+ def executor_browser(self):
+ return ExecutorBrowser, {"webdriver_url": self.url,
+ "host": self.host,
+ "port": self.port,
+ "pac": self.pac,
+ "env": self.env}
+
+ def settings(self, test):
+ self._pac = test.environment.get("pac", None) if self._supports_pac else None
+ return {"pac": self._pac}
+
+ @property
+ def pac(self):
+ return self._pac
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome.py
new file mode 100644
index 0000000000..05f81461e2
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome.py
@@ -0,0 +1,181 @@
+# mypy: allow-untyped-defs
+
+from . import chrome_spki_certs
+from .base import WebDriverBrowser, require_arg
+from .base import NullBrowser # noqa: F401
+from .base import get_timeout_multiplier # noqa: F401
+from .base import cmd_arg
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.executorwebdriver import WebDriverCrashtestExecutor # noqa: F401
+from ..executors.base import WdspecExecutor # noqa: F401
+from ..executors.executorchrome import ( # noqa: F401
+ ChromeDriverPrintRefTestExecutor,
+ ChromeDriverRefTestExecutor,
+ ChromeDriverTestharnessExecutor,
+)
+
+
+__wptrunner__ = {"product": "chrome",
+ "check_args": "check_args",
+ "browser": "ChromeBrowser",
+ "executor": {"testharness": "ChromeDriverTestharnessExecutor",
+ "reftest": "ChromeDriverRefTestExecutor",
+ "print-reftest": "ChromeDriverPrintRefTestExecutor",
+ "wdspec": "WdspecExecutor",
+ "crashtest": "WebDriverCrashtestExecutor"},
+ "browser_kwargs": "browser_kwargs",
+ "executor_kwargs": "executor_kwargs",
+ "env_extras": "env_extras",
+ "env_options": "env_options",
+ "update_properties": "update_properties",
+ "timeout_multiplier": "get_timeout_multiplier",}
+
+
+def debug_args(debug_info):
+ if debug_info.interactive:
+ # Keep in sync with:
+ # https://chromium.googlesource.com/chromium/src/+/main/third_party/blink/tools/debug_renderer
+ return [
+ "--no-sandbox",
+ "--disable-hang-monitor",
+ "--wait-for-debugger-on-navigation",
+ ]
+ return []
+
+
+def check_args(**kwargs):
+ require_arg(kwargs, "webdriver_binary")
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
+ return {"binary": kwargs["binary"],
+ "webdriver_binary": kwargs["webdriver_binary"],
+ "webdriver_args": kwargs.get("webdriver_args")}
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs):
+ sanitizer_enabled = kwargs.get("sanitizer_enabled")
+ if sanitizer_enabled:
+ test_type = "crashtest"
+ executor_kwargs = base_executor_kwargs(test_type, test_environment, run_info_data,
+ **kwargs)
+ executor_kwargs["close_after_done"] = True
+ executor_kwargs["sanitizer_enabled"] = sanitizer_enabled
+ executor_kwargs["reuse_window"] = kwargs.get("reuse_window", False)
+
+ capabilities = {
+ "goog:chromeOptions": {
+ "prefs": {
+ "profile": {
+ "default_content_setting_values": {
+ "popups": 1
+ }
+ }
+ },
+ "excludeSwitches": ["enable-automation"],
+ "w3c": True,
+ }
+ }
+
+ chrome_options = capabilities["goog:chromeOptions"]
+ if kwargs["binary"] is not None:
+ chrome_options["binary"] = kwargs["binary"]
+
+ # Here we set a few Chrome flags that are always passed.
+ # ChromeDriver's "acceptInsecureCerts" capability only controls the current
+ # browsing context, whereas the CLI flag works for workers, too.
+ chrome_options["args"] = []
+
+ chrome_options["args"].append("--ignore-certificate-errors-spki-list=%s" %
+ ','.join(chrome_spki_certs.IGNORE_CERTIFICATE_ERRORS_SPKI_LIST))
+
+ # Allow audio autoplay without a user gesture.
+ chrome_options["args"].append("--autoplay-policy=no-user-gesture-required")
+ # Allow WebRTC tests to call getUserMedia and getDisplayMedia.
+ chrome_options["args"].append("--use-fake-device-for-media-stream")
+ chrome_options["args"].append("--use-fake-ui-for-media-stream")
+ # Use a fake UI for FedCM to allow testing it.
+ chrome_options["args"].append("--use-fake-ui-for-fedcm")
+ # Shorten delay for Reporting <https://w3c.github.io/reporting/>.
+ chrome_options["args"].append("--short-reporting-delay")
+ # Point all .test domains to localhost for Chrome
+ chrome_options["args"].append("--host-resolver-rules=MAP nonexistent.*.test ^NOTFOUND, MAP *.test 127.0.0.1, MAP *.test. 127.0.0.1")
+ # Enable Secure Payment Confirmation for Chrome. This is normally disabled
+ # on Linux as it hasn't shipped there yet, but in WPT we enable virtual
+ # authenticator devices anyway for testing and so SPC works.
+ chrome_options["args"].append("--enable-features=SecurePaymentConfirmationBrowser")
+ # For WebTransport tests.
+ chrome_options["args"].append("--webtransport-developer-mode")
+ # The GenericSensorExtraClasses flag enables the browser-side
+ # implementation of sensors such as Ambient Light Sensor.
+ chrome_options["args"].append("--enable-features=GenericSensorExtraClasses")
+
+ # Classify `http-private`, `http-public` and https variants in the
+ # appropriate IP address spaces.
+ # For more details, see: https://github.com/web-platform-tests/rfcs/blob/master/rfcs/address_space_overrides.md
+ address_space_overrides_ports = [
+ ("http-private", "private"),
+ ("http-public", "public"),
+ ("https-private", "private"),
+ ("https-public", "public"),
+ ]
+ address_space_overrides_arg = ",".join(
+ f"127.0.0.1:{port_number}={address_space}"
+ for port_name, address_space in address_space_overrides_ports
+ for port_number in test_environment.config.ports.get(port_name, [])
+ )
+ if address_space_overrides_arg:
+ chrome_options["args"].append(
+ "--ip-address-space-overrides=" + address_space_overrides_arg)
+
+ if kwargs["enable_mojojs"]:
+ chrome_options["args"].append("--enable-blink-features=MojoJS,MojoJSTest")
+
+ if kwargs["enable_swiftshader"]:
+ # https://chromium.googlesource.com/chromium/src/+/HEAD/docs/gpu/swiftshader.md
+ chrome_options["args"].extend(["--use-gl=angle", "--use-angle=swiftshader"])
+
+ if kwargs["enable_experimental"]:
+ chrome_options["args"].extend(["--enable-experimental-web-platform-features"])
+
+ # Copy over any other flags that were passed in via `--binary-arg`
+ for arg in kwargs.get("binary_args", []):
+ if arg not in chrome_options["args"]:
+ chrome_options["args"].append(arg)
+
+ # Pass the --headless=new flag to Chrome if WPT's own --headless flag was
+ # set. '--headless' should always mean the new headless mode, as the old
+ # headless mode is not used anyway.
+ if kwargs["headless"] and ("--headless=new" not in chrome_options["args"] and
+ "--headless" not in chrome_options["args"]):
+ chrome_options["args"].append("--headless=new")
+
+ if test_type == "wdspec":
+ executor_kwargs["binary_args"] = chrome_options["args"]
+
+ executor_kwargs["capabilities"] = capabilities
+
+ return executor_kwargs
+
+
+def env_extras(**kwargs):
+ return []
+
+
+def env_options():
+ # TODO(crbug.com/1440021): Support text-based debuggers for `chrome` through
+ # `chromedriver`.
+ return {"server_host": "127.0.0.1"}
+
+
+def update_properties():
+ return (["debug", "os", "processor"], {"os": ["version"], "processor": ["bits"]})
+
+
+class ChromeBrowser(WebDriverBrowser):
+ def make_command(self):
+ return [self.webdriver_binary,
+ cmd_arg("port", str(self.port)),
+ cmd_arg("url-base", self.base_path),
+ cmd_arg("enable-chrome-logs")] + self.webdriver_args
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome_android.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome_android.py
new file mode 100644
index 0000000000..820323e615
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome_android.py
@@ -0,0 +1,244 @@
+# mypy: allow-untyped-defs
+
+import mozprocess
+import subprocess
+
+from .base import cmd_arg, require_arg
+from .base import get_timeout_multiplier # noqa: F401
+from .base import WebDriverBrowser # noqa: F401
+from .chrome import executor_kwargs as chrome_executor_kwargs
+from ..executors.base import WdspecExecutor # noqa: F401
+from ..executors.executorchrome import ChromeDriverPrintRefTestExecutor # noqa: F401
+from ..executors.executorwebdriver import (WebDriverCrashtestExecutor, # noqa: F401
+ WebDriverTestharnessExecutor, # noqa: F401
+ WebDriverRefTestExecutor) # noqa: F401
+
+
+__wptrunner__ = {"product": "chrome_android",
+ "check_args": "check_args",
+ "browser": "ChromeAndroidBrowser",
+ "executor": {"testharness": "WebDriverTestharnessExecutor",
+ "reftest": "WebDriverRefTestExecutor",
+ "print-reftest": "ChromeDriverPrintRefTestExecutor",
+ "wdspec": "WdspecExecutor",
+ "crashtest": "WebDriverCrashtestExecutor"},
+ "browser_kwargs": "browser_kwargs",
+ "executor_kwargs": "executor_kwargs",
+ "env_extras": "env_extras",
+ "env_options": "env_options",
+ "timeout_multiplier": "get_timeout_multiplier"}
+
+_wptserve_ports = set()
+
+
+def check_args(**kwargs):
+ require_arg(kwargs, "package_name")
+ require_arg(kwargs, "webdriver_binary")
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
+ return {"package_name": kwargs["package_name"],
+ "adb_binary": kwargs["adb_binary"],
+ "device_serial": kwargs["device_serial"],
+ "webdriver_binary": kwargs["webdriver_binary"],
+ "webdriver_args": kwargs.get("webdriver_args"),
+ "stackwalk_binary": kwargs.get("stackwalk_binary"),
+ "symbols_path": kwargs.get("symbols_path")}
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs):
+ # Use update() to modify the global list in place.
+ _wptserve_ports.update(set(
+ test_environment.config['ports']['http'] + test_environment.config['ports']['https'] +
+ test_environment.config['ports']['ws'] + test_environment.config['ports']['wss']
+ ))
+
+ executor_kwargs = chrome_executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs)
+ # Remove unsupported options on mobile.
+ del executor_kwargs["capabilities"]["goog:chromeOptions"]["prefs"]
+
+ assert kwargs["package_name"], "missing --package-name"
+ capabilities = executor_kwargs["capabilities"]
+ capabilities["goog:chromeOptions"]["androidPackage"] = \
+ kwargs["package_name"]
+ capabilities["goog:chromeOptions"]["androidKeepAppDataDir"] = \
+ kwargs.get("keep_app_data_directory")
+
+ return executor_kwargs
+
+
+def env_extras(**kwargs):
+ return []
+
+
+def env_options():
+ # allow the use of host-resolver-rules in lieu of modifying /etc/hosts file
+ return {"server_host": "127.0.0.1"}
+
+
+class LogcatRunner:
+ def __init__(self, logger, browser, remote_queue):
+ self.logger = logger
+ self.browser = browser
+ self.remote_queue = remote_queue
+
+ def start(self):
+ try:
+ self._run()
+ except KeyboardInterrupt:
+ self.stop()
+
+ def _run(self):
+ try:
+ # TODO: adb logcat -c fail randomly with message
+ # "failed to clear the 'main' log"
+ self.browser.clear_log()
+ except subprocess.CalledProcessError:
+ self.logger.error("Failed to clear logcat buffer")
+
+ self._cmd = self.browser.logcat_cmd()
+ self._proc = mozprocess.ProcessHandler(
+ self._cmd,
+ processOutputLine=self.on_output,
+ storeOutput=False)
+ self._proc.run()
+
+ def _send_message(self, command, *args):
+ try:
+ self.remote_queue.put((command, args))
+ except AssertionError:
+ self.logger.warning("Error when send to remote queue")
+
+ def stop(self, force=False):
+ if self.is_alive():
+ kill_result = self._proc.kill()
+ if force and kill_result != 0:
+ self._proc.kill(9)
+
+ def is_alive(self):
+ return hasattr(self._proc, "proc") and self._proc.poll() is None
+
+ def on_output(self, line):
+ data = {
+ "action": "process_output",
+ "process": "LOGCAT",
+ "command": "logcat",
+ "data": line
+ }
+ self._send_message("log", data)
+
+
+class ChromeAndroidBrowserBase(WebDriverBrowser):
+ def __init__(self,
+ logger,
+ webdriver_binary="chromedriver",
+ adb_binary=None,
+ remote_queue=None,
+ device_serial=None,
+ webdriver_args=None,
+ stackwalk_binary=None,
+ symbols_path=None):
+ super().__init__(logger,
+ binary=None,
+ webdriver_binary=webdriver_binary,
+ webdriver_args=webdriver_args,)
+ self.adb_binary = adb_binary or "adb"
+ self.device_serial = device_serial
+ self.stackwalk_binary = stackwalk_binary
+ self.symbols_path = symbols_path
+ self.remote_queue = remote_queue
+
+ if self.remote_queue is not None:
+ self.logcat_runner = LogcatRunner(self.logger, self, self.remote_queue)
+
+ def setup(self):
+ self.setup_adb_reverse()
+ if self.remote_queue is not None:
+ self.logcat_runner.start()
+
+ def _adb_run(self, args):
+ cmd = [self.adb_binary]
+ if self.device_serial:
+ cmd.extend(['-s', self.device_serial])
+ cmd.extend(args)
+ self.logger.info(' '.join(cmd))
+ subprocess.check_call(cmd)
+
+ def make_command(self):
+ return [self.webdriver_binary,
+ cmd_arg("port", str(self.port)),
+ cmd_arg("url-base", self.base_path),
+ cmd_arg("enable-chrome-logs")] + self.webdriver_args
+
+ def cleanup(self):
+ super().cleanup()
+ self._adb_run(['forward', '--remove-all'])
+ self._adb_run(['reverse', '--remove-all'])
+ if self.remote_queue is not None:
+ self.logcat_runner.stop(force=True)
+
+ def executor_browser(self):
+ cls, kwargs = super().executor_browser()
+ kwargs["capabilities"] = {
+ "goog:chromeOptions": {
+ "androidDeviceSerial": self.device_serial
+ }
+ }
+ return cls, kwargs
+
+ def clear_log(self):
+ self._adb_run(['logcat', '-c'])
+
+ def logcat_cmd(self):
+ cmd = [self.adb_binary]
+ if self.device_serial:
+ cmd.extend(['-s', self.device_serial])
+ cmd.extend(['logcat', '*:D'])
+ return cmd
+
+ def check_crash(self, process, test):
+ self.maybe_parse_tombstone()
+ # Existence of a tombstone does not necessarily mean test target has
+ # crashed. Always return False so we don't change the test results.
+ return False
+
+ def maybe_parse_tombstone(self):
+ if self.stackwalk_binary:
+ cmd = [self.stackwalk_binary, "-a", "-w"]
+ if self.device_serial:
+ cmd.extend(["--device", self.device_serial])
+ cmd.extend(["--output-directory", self.symbols_path])
+ raw_output = subprocess.check_output(cmd)
+ for line in raw_output.splitlines():
+ self.logger.process_output("TRACE", line, "logcat")
+
+ def setup_adb_reverse(self):
+ self._adb_run(['wait-for-device'])
+ self._adb_run(['forward', '--remove-all'])
+ self._adb_run(['reverse', '--remove-all'])
+ # "adb reverse" forwards network connection from device to host.
+ for port in self.wptserver_ports:
+ self._adb_run(['reverse', 'tcp:%d' % port, 'tcp:%d' % port])
+
+
+class ChromeAndroidBrowser(ChromeAndroidBrowserBase):
+ """Chrome is backed by chromedriver, which is supplied through
+ ``wptrunner.webdriver.ChromeDriverServer``.
+ """
+
+ def __init__(self, logger, package_name,
+ webdriver_binary="chromedriver",
+ adb_binary=None,
+ remote_queue = None,
+ device_serial=None,
+ webdriver_args=None,
+ stackwalk_binary=None,
+ symbols_path=None):
+ super().__init__(logger,
+ webdriver_binary, adb_binary, remote_queue,
+ device_serial, webdriver_args, stackwalk_binary,
+ symbols_path)
+ self.package_name = package_name
+ self.wptserver_ports = _wptserve_ports
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome_ios.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome_ios.py
new file mode 100644
index 0000000000..85c98f2994
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome_ios.py
@@ -0,0 +1,58 @@
+# mypy: allow-untyped-defs
+
+from .base import WebDriverBrowser, require_arg
+from .base import get_timeout_multiplier # noqa: F401
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.base import WdspecExecutor # noqa: F401
+from ..executors.executorwebdriver import (WebDriverTestharnessExecutor, # noqa: F401
+ WebDriverRefTestExecutor) # noqa: F401
+
+
+__wptrunner__ = {"product": "chrome_ios",
+ "check_args": "check_args",
+ "browser": "ChromeiOSBrowser",
+ "executor": {"testharness": "WebDriverTestharnessExecutor",
+ "reftest": "WebDriverRefTestExecutor"},
+ "browser_kwargs": "browser_kwargs",
+ "executor_kwargs": "executor_kwargs",
+ "env_extras": "env_extras",
+ "env_options": "env_options",
+ "timeout_multiplier": "get_timeout_multiplier"}
+
+def check_args(**kwargs):
+ require_arg(kwargs, "webdriver_binary")
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
+ return {"webdriver_binary": kwargs["webdriver_binary"],
+ "webdriver_args": kwargs.get("webdriver_args")}
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs):
+ executor_kwargs = base_executor_kwargs(test_type, test_environment, run_info_data,
+ **kwargs)
+ executor_kwargs["close_after_done"] = True
+ executor_kwargs["capabilities"] = {}
+ return executor_kwargs
+
+
+def env_extras(**kwargs):
+ return []
+
+
+def env_options():
+ # allow the use of host-resolver-rules in lieu of modifying /etc/hosts file
+ return {"server_host": "127.0.0.1"}
+
+
+class ChromeiOSBrowser(WebDriverBrowser):
+ """ChromeiOS is backed by CWTChromeDriver, which is supplied through
+ ``wptrunner.webdriver.CWTChromeDriverServer``.
+ """
+
+ init_timeout = 120
+
+ def make_command(self):
+ return ([self.webdriver_binary, f"--port={self.port}"] +
+ self.webdriver_args)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome_spki_certs.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome_spki_certs.py
new file mode 100644
index 0000000000..0f7825c80f
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chrome_spki_certs.py
@@ -0,0 +1,13 @@
+# This file is automatically generated by 'wpt regen-certs'
+# DO NOT EDIT MANUALLY.
+
+# tools/certs/web-platform.test.pem
+WPT_FINGERPRINT = 'sCJ8962Wxqgz44IKoPQLcDT7YRRAxO2w1iYIqpMYHhg='
+
+# signed-exchange/resources/127.0.0.1.sxg.pem
+SXG_WPT_FINGERPRINT = '0Rt4mT6SJXojEMHTnKnlJ/hBKMBcI4kteBlhR1eTTdk='
+
+IGNORE_CERTIFICATE_ERRORS_SPKI_LIST = [
+ WPT_FINGERPRINT,
+ SXG_WPT_FINGERPRINT
+]
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chromium.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chromium.py
new file mode 100644
index 0000000000..13cb49aed2
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/chromium.py
@@ -0,0 +1,57 @@
+# mypy: allow-untyped-defs
+
+from . import chrome
+from .base import NullBrowser # noqa: F401
+from .base import get_timeout_multiplier # noqa: F401
+from ..executors.executorwebdriver import (WebDriverTestharnessExecutor, # noqa: F401
+ WebDriverRefTestExecutor, # noqa: F401
+ WebDriverCrashtestExecutor) # noqa: F401
+from ..executors.base import WdspecExecutor # noqa: F401
+from ..executors.executorchrome import ChromeDriverPrintRefTestExecutor # noqa: F401
+
+
+__wptrunner__ = {"product": "chromium",
+ "check_args": "check_args",
+ "browser": "ChromiumBrowser",
+ "executor": {"testharness": "WebDriverTestharnessExecutor",
+ "reftest": "WebDriverRefTestExecutor",
+ "print-reftest": "ChromeDriverPrintRefTestExecutor",
+ "wdspec": "WdspecExecutor",
+ "crashtest": "WebDriverCrashtestExecutor"},
+ "browser_kwargs": "browser_kwargs",
+ "executor_kwargs": "executor_kwargs",
+ "env_extras": "env_extras",
+ "env_options": "env_options",
+ "update_properties": "update_properties",
+ "timeout_multiplier": "get_timeout_multiplier"}
+
+
+# Chromium will rarely need a product definition that is different from Chrome.
+# If any wptrunner options need to differ from Chrome, they can be added as
+# an additional step after the execution of Chrome's functions.
+def check_args(**kwargs):
+ chrome.check_args(**kwargs)
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
+ return chrome.browser_kwargs(logger, test_type, run_info_data, config, **kwargs)
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data, **kwargs):
+ return chrome.executor_kwargs(logger, test_type, test_environment, run_info_data, **kwargs)
+
+
+def env_extras(**kwargs):
+ return chrome.env_extras(**kwargs)
+
+
+def env_options():
+ return chrome.env_options()
+
+
+def update_properties():
+ return chrome.update_properties()
+
+
+class ChromiumBrowser(chrome.ChromeBrowser):
+ pass
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/content_shell.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/content_shell.py
new file mode 100644
index 0000000000..23f4e99da6
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/content_shell.py
@@ -0,0 +1,299 @@
+# mypy: allow-untyped-defs
+
+import contextlib
+import os
+import subprocess
+from multiprocessing import Queue, Event
+from threading import Thread
+from urllib.parse import urljoin
+
+from . import chrome_spki_certs
+from .base import (
+ Browser,
+ ExecutorBrowser,
+ OutputHandler,
+ browser_command,
+)
+from .base import get_timeout_multiplier # noqa: F401
+from .chrome import debug_args
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.base import server_url
+from ..executors.executorcontentshell import ( # noqa: F401
+ ContentShellCrashtestExecutor,
+ ContentShellPrintRefTestExecutor,
+ ContentShellRefTestExecutor,
+ ContentShellTestharnessExecutor,
+)
+
+ENABLE_THREADED_COMPOSITING_FLAG = '--enable-threaded-compositing'
+DISABLE_THREADED_COMPOSITING_FLAG = '--disable-threaded-compositing'
+DISABLE_THREADED_ANIMATION_FLAG = '--disable-threaded-animation'
+
+
+__wptrunner__ = {"product": "content_shell",
+ "check_args": "check_args",
+ "browser": "ContentShellBrowser",
+ "executor": {
+ "crashtest": "ContentShellCrashtestExecutor",
+ "print-reftest": "ContentShellPrintRefTestExecutor",
+ "reftest": "ContentShellRefTestExecutor",
+ "testharness": "ContentShellTestharnessExecutor",
+ },
+ "browser_kwargs": "browser_kwargs",
+ "executor_kwargs": "executor_kwargs",
+ "env_extras": "env_extras",
+ "env_options": "env_options",
+ "update_properties": "update_properties",
+ "timeout_multiplier": "get_timeout_multiplier",}
+
+
+def check_args(**kwargs):
+ pass
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, subsuite, **kwargs):
+ args = []
+ args.append("--ignore-certificate-errors-spki-list=%s" %
+ ','.join(chrome_spki_certs.IGNORE_CERTIFICATE_ERRORS_SPKI_LIST))
+ # For WebTransport tests.
+ args.append("--webtransport-developer-mode")
+
+ if not kwargs["headless"]:
+ args.append("--disable-headless-mode")
+
+ if kwargs["debug_info"]:
+ args.extend(debug_args(kwargs["debug_info"]))
+
+ # `--run-web-tests -` are specific to content_shell - they activate web
+ # test protocol mode.
+ args.append("--run-web-tests")
+ for arg in kwargs.get("binary_args", []):
+ if arg not in args:
+ args.append(arg)
+
+ # Temporary workaround to align with RWT behavior. Unless a vts explicitly
+ # enables threaded compositing, we should use single threaded compositing
+ if ENABLE_THREADED_COMPOSITING_FLAG not in subsuite.config.get("binary_args", []):
+ args.extend([DISABLE_THREADED_COMPOSITING_FLAG,
+ DISABLE_THREADED_ANIMATION_FLAG])
+
+ for arg in subsuite.config.get("binary_args", []):
+ if arg not in args:
+ args.append(arg)
+ args.append("-")
+
+ return {"binary": kwargs["binary"],
+ "binary_args": args,
+ "debug_info": kwargs["debug_info"],
+ "pac_origin": server_url(config, "http")}
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs):
+ sanitizer_enabled = kwargs.get("sanitizer_enabled")
+ if sanitizer_enabled:
+ test_type = "crashtest"
+ executor_kwargs = base_executor_kwargs(test_type, test_environment, run_info_data,
+ **kwargs)
+ executor_kwargs["sanitizer_enabled"] = sanitizer_enabled
+ return executor_kwargs
+
+
+def env_extras(**kwargs):
+ return []
+
+
+def env_options():
+ return {"server_host": "127.0.0.1",
+ "testharnessreport": "testharnessreport-content-shell.js",
+ "supports_debugger": True}
+
+
+def update_properties():
+ return (["debug", "os", "processor"], {"os": ["version"], "processor": ["bits"]})
+
+
+class ContentShellBrowser(Browser):
+ """Class that represents an instance of content_shell.
+
+ Upon startup, the stdout, stderr, and stdin pipes of the underlying content_shell
+ process are connected to multiprocessing Queues so that the runner process can
+ interact with content_shell through its protocol mode.
+
+ See Also:
+ Protocol Mode: https://chromium.googlesource.com/chromium/src.git/+/HEAD/content/web_test/browser/test_info_extractor.h
+ """
+ # Seconds to wait for the process to stop after it was sent a `QUIT`
+ # command, after which `SIGTERM` or `TerminateProcess()` forces termination.
+ # The timeout is ported from:
+ # https://chromium.googlesource.com/chromium/src/+/b175d48d3ea4ea66eea35c88c11aa80d233f3bee/third_party/blink/tools/blinkpy/web_tests/port/base.py#476
+ termination_timeout: float = 3
+
+ def __init__(self, logger, binary="content_shell", binary_args=None,
+ debug_info=None, pac_origin=None, **kwargs):
+ super().__init__(logger)
+ self._debug_cmd_prefix, self._browser_cmd = browser_command(
+ binary, binary_args or [], debug_info)
+ self._output_handler = None
+ self._proc = None
+ self._pac_origin = pac_origin
+ self._pac = None
+
+ def start(self, group_metadata, **settings):
+ browser_cmd, pac = list(self._browser_cmd), settings.get("pac")
+ if pac:
+ browser_cmd.insert(1, f"--proxy-pac-url={pac}")
+ self.logger.debug(f"Starting content shell: {browser_cmd[0]}...")
+ args = [*self._debug_cmd_prefix, *browser_cmd]
+ self._output_handler = OutputHandler(self.logger, args)
+ if os.name == "posix":
+ close_fds, preexec_fn = True, lambda: os.setpgid(0, 0)
+ else:
+ close_fds, preexec_fn = False, None
+ self._proc = subprocess.Popen(args,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ close_fds=close_fds,
+ preexec_fn=preexec_fn)
+ self._output_handler.after_process_start(self._proc.pid)
+
+ self._stdout_queue = Queue()
+ self._stderr_queue = Queue()
+ self._stdin_queue = Queue()
+ self._io_stopped = Event()
+
+ self._stdout_reader = self._create_reader_thread("stdout-reader",
+ self._proc.stdout,
+ self._stdout_queue,
+ prefix=b"OUT: ")
+ self._stderr_reader = self._create_reader_thread("stderr-reader",
+ self._proc.stderr,
+ self._stderr_queue,
+ prefix=b"ERR: ")
+ self._stdin_writer = self._create_writer_thread("stdin-writer",
+ self._proc.stdin,
+ self._stdin_queue)
+
+ # Content shell is likely still in the process of initializing. The actual waiting
+ # for the startup to finish is done in the ContentShellProtocol.
+ self.logger.debug("Content shell has been started.")
+ self._output_handler.start(group_metadata=group_metadata, **settings)
+
+ def stop(self, force=False):
+ self.logger.debug("Stopping content shell...")
+
+ clean_shutdown = stopped = True
+ if self.is_alive():
+ clean_shutdown = self._terminate_process(force=force)
+
+ # Close these queues cleanly to avoid broken pipe error spam in the logs.
+ self._stdin_queue.put(None)
+ for thread in [self._stdout_reader, self._stderr_reader, self._stdin_writer]:
+ thread.join(2)
+ if thread.is_alive():
+ self.logger.warning(f"Content shell IO thread {thread.name} did not shut down gracefully.")
+ stopped = False
+
+ if not self.is_alive():
+ self.logger.debug(
+ "Content shell has been stopped "
+ f"(PID: {self._proc.pid}, exit code: {self._proc.returncode})")
+ else:
+ stopped = False
+ self.logger.warning(f"Content shell failed to stop (PID: {self._proc.pid})")
+ if stopped and self._output_handler is not None:
+ self._output_handler.after_process_stop(clean_shutdown)
+ self._output_handler = None
+ return stopped
+
+ def _terminate_process(self, force: bool = False) -> bool:
+ self._stdin_queue.put(b"QUIT\n")
+ with contextlib.suppress(subprocess.TimeoutExpired):
+ self._proc.wait(timeout=self.termination_timeout)
+ return True
+ self.logger.warning(
+ "Content shell failed to respond to QUIT command "
+ f"(PID: {self._proc.pid}, timeout: {self.termination_timeout}s)")
+ # Skip `terminate()` on Windows, which is an alias for `kill()`, and
+ # only `kill()` for `force=True`.
+ #
+ # [1]: https://docs.python.org/3/library/subprocess.html#subprocess.Popen.kill
+ if os.name == "posix":
+ self._proc.terminate()
+ with contextlib.suppress(subprocess.TimeoutExpired):
+ self._proc.wait(timeout=1)
+ return False
+ if force:
+ self._proc.kill()
+ return False
+
+ def is_alive(self):
+ return self._proc is not None and self._proc.poll() is None
+
+ def pid(self):
+ return self._proc.pid if self._proc else None
+
+ def executor_browser(self):
+ """This function returns the `ExecutorBrowser` object that is used by other
+ processes to interact with content_shell. In our case, this consists of the three
+ multiprocessing Queues as well as an `io_stopped` event to signal when the
+ underlying pipes have reached EOF.
+ """
+ return ExecutorBrowser, {"stdout_queue": self._stdout_queue,
+ "stderr_queue": self._stderr_queue,
+ "stdin_queue": self._stdin_queue,
+ "io_stopped": self._io_stopped}
+
+ def check_crash(self, process, test):
+ return not self.is_alive()
+
+ def settings(self, test):
+ pac_path = test.environment.get("pac")
+ if self._pac_origin and pac_path:
+ self._pac = urljoin(self._pac_origin, pac_path)
+ return {"pac": self._pac}
+ return {}
+
+ def _create_reader_thread(self, name, stream, queue, prefix=b""):
+ """This creates (and starts) a background thread which reads lines from `stream` and
+ puts them into `queue` until `stream` reports EOF.
+ """
+ def reader_thread(stream, queue, stop_event):
+ while True:
+ line = stream.readline()
+ if not line:
+ break
+ self._output_handler(prefix + line.rstrip())
+ queue.put(line)
+
+ stop_event.set()
+ queue.close()
+ queue.join_thread()
+
+ result = Thread(name=name,
+ target=reader_thread,
+ args=(stream, queue, self._io_stopped),
+ daemon=True)
+ result.start()
+ return result
+
+ def _create_writer_thread(self, name, stream, queue):
+ """This creates (and starts) a background thread which gets items from `queue` and
+ writes them into `stream` until it encounters a None item in the queue.
+ """
+ def writer_thread(stream, queue):
+ while True:
+ line = queue.get()
+ if not line:
+ break
+
+ stream.write(line)
+ stream.flush()
+
+ result = Thread(name=name,
+ target=writer_thread,
+ args=(stream, queue),
+ daemon=True)
+ result.start()
+ return result
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/edgechromium.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/edgechromium.py
new file mode 100644
index 0000000000..4f5bffa06c
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/edgechromium.py
@@ -0,0 +1,70 @@
+# mypy: allow-untyped-defs
+from .base import WebDriverBrowser, require_arg
+from .base import get_timeout_multiplier # noqa: F401
+from .base import cmd_arg
+from .chrome import executor_kwargs as chrome_executor_kwargs
+from ..executors.executorwebdriver import WebDriverCrashtestExecutor # noqa: F401
+from ..executors.base import WdspecExecutor # noqa: F401
+from ..executors.executoredge import ( # noqa: F401
+ EdgeChromiumDriverPrintRefTestExecutor,
+ EdgeChromiumDriverRefTestExecutor,
+ EdgeChromiumDriverTestharnessExecutor,
+)
+
+
+__wptrunner__ = {"product": "edgechromium",
+ "check_args": "check_args",
+ "browser": "EdgeChromiumBrowser",
+ "executor": {"testharness": "EdgeChromiumDriverTestharnessExecutor",
+ "reftest": "EdgeChromiumDriverRefTestExecutor",
+ "print-reftest": "EdgeChromiumDriverPrintRefTestExecutor",
+ "wdspec": "WdspecExecutor",
+ "crashtest": "WebDriverCrashtestExecutor"},
+ "browser_kwargs": "browser_kwargs",
+ "executor_kwargs": "executor_kwargs",
+ "env_extras": "env_extras",
+ "env_options": "env_options",
+ "update_properties": "update_properties",
+ "timeout_multiplier": "get_timeout_multiplier",}
+
+
+def check_args(**kwargs):
+ require_arg(kwargs, "webdriver_binary")
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
+ return {"binary": kwargs["binary"],
+ "webdriver_binary": kwargs["webdriver_binary"],
+ "webdriver_args": kwargs.get("webdriver_args")}
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs):
+ executor_kwargs = chrome_executor_kwargs(logger, test_type, test_environment, run_info_data, **kwargs)
+ capabilities = executor_kwargs["capabilities"]
+ capabilities["ms:edgeOptions"] = capabilities.pop("goog:chromeOptions")
+ return executor_kwargs
+
+
+def env_extras(**kwargs):
+ return []
+
+
+def env_options():
+ return {"server_host": "127.0.0.1"}
+
+
+def update_properties():
+ return (["debug", "os", "processor"], {"os": ["version"], "processor": ["bits"]})
+
+
+class EdgeChromiumBrowser(WebDriverBrowser):
+ """MicrosoftEdge is backed by MSEdgeDriver, which is supplied through
+ ``wptrunner.webdriver.EdgeChromiumDriverServer``.
+ """
+
+ def make_command(self):
+ return [self.webdriver_binary,
+ cmd_arg("port", str(self.port)),
+ cmd_arg("url-base", self.base_path),
+ cmd_arg("enable-edge-logs")] + self.webdriver_args
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/epiphany.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/epiphany.py
new file mode 100644
index 0000000000..562b2dce2c
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/epiphany.py
@@ -0,0 +1,75 @@
+# mypy: allow-untyped-defs
+
+from .base import (NullBrowser, # noqa: F401
+ certificate_domain_list,
+ get_timeout_multiplier, # noqa: F401
+ maybe_add_args)
+from .webkit import WebKitBrowser # noqa: F401
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.base import WdspecExecutor # noqa: F401
+from ..executors.executorwebdriver import (WebDriverTestharnessExecutor, # noqa: F401
+ WebDriverRefTestExecutor, # noqa: F401
+ WebDriverCrashtestExecutor) # noqa: F401
+
+__wptrunner__ = {"product": "epiphany",
+ "check_args": "check_args",
+ "browser": {None: "WebKitBrowser",
+ "wdspec": "NullBrowser"},
+ "browser_kwargs": "browser_kwargs",
+ "executor": {"testharness": "WebDriverTestharnessExecutor",
+ "reftest": "WebDriverRefTestExecutor",
+ "wdspec": "WdspecExecutor",
+ "crashtest": "WebDriverCrashtestExecutor"},
+ "executor_kwargs": "executor_kwargs",
+ "env_extras": "env_extras",
+ "env_options": "env_options",
+ "run_info_extras": "run_info_extras",
+ "timeout_multiplier": "get_timeout_multiplier"}
+
+
+def check_args(**kwargs):
+ pass
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
+ # Workaround for https://gitlab.gnome.org/GNOME/libsoup/issues/172
+ webdriver_required_args = ["--host=127.0.0.1"]
+ webdriver_args = maybe_add_args(webdriver_required_args, kwargs.get("webdriver_args"))
+ return {"binary": kwargs["binary"],
+ "webdriver_binary": kwargs["webdriver_binary"],
+ "webdriver_args": webdriver_args}
+
+
+def capabilities(server_config, **kwargs):
+ args = kwargs.get("binary_args", [])
+ if "--automation-mode" not in args:
+ args.append("--automation-mode")
+
+ return {
+ "browserName": "Epiphany",
+ "browserVersion": "3.31.4", # First version to support automation
+ "platformName": "ANY",
+ "webkitgtk:browserOptions": {
+ "binary": kwargs["binary"],
+ "args": args,
+ "certificates": certificate_domain_list(server_config.domains_set, kwargs["host_cert_path"])}}
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs):
+ executor_kwargs = base_executor_kwargs(test_type, test_environment, run_info_data, **kwargs)
+ executor_kwargs["close_after_done"] = True
+ executor_kwargs["capabilities"] = capabilities(test_environment.config, **kwargs)
+ return executor_kwargs
+
+
+def env_extras(**kwargs):
+ return []
+
+
+def env_options():
+ return {}
+
+
+def run_info_extras(logger, **kwargs):
+ return {"webkit_port": "gtk"}
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/firefox.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/firefox.py
new file mode 100644
index 0000000000..6bcbef7c47
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/firefox.py
@@ -0,0 +1,1039 @@
+# mypy: allow-untyped-defs
+
+import json
+import os
+import re
+import signal
+import subprocess
+import sys
+import tempfile
+import time
+from abc import ABCMeta, abstractmethod
+from http.client import HTTPConnection
+
+import mozinfo
+import mozleak
+import mozversion
+from mozprocess import ProcessHandler
+from mozprofile import FirefoxProfile, Preferences
+from mozrunner import FirefoxRunner
+from mozrunner.utils import test_environment, get_stack_fixer_function
+from mozcrash import mozcrash
+
+from .base import (Browser,
+ ExecutorBrowser,
+ WebDriverBrowser,
+ OutputHandler,
+ OutputHandlerState,
+ browser_command,
+ cmd_arg,
+ get_free_port,
+ require_arg)
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.executormarionette import (MarionetteTestharnessExecutor, # noqa: F401
+ MarionetteRefTestExecutor, # noqa: F401
+ MarionettePrintRefTestExecutor, # noqa: F401
+ MarionetteWdspecExecutor, # noqa: F401
+ MarionetteCrashtestExecutor) # noqa: F401
+
+
+
+__wptrunner__ = {"product": "firefox",
+ "check_args": "check_args",
+ "browser": {None: "FirefoxBrowser",
+ "wdspec": "FirefoxWdSpecBrowser"},
+ "executor": {"crashtest": "MarionetteCrashtestExecutor",
+ "testharness": "MarionetteTestharnessExecutor",
+ "reftest": "MarionetteRefTestExecutor",
+ "print-reftest": "MarionettePrintRefTestExecutor",
+ "wdspec": "MarionetteWdspecExecutor"},
+ "browser_kwargs": "browser_kwargs",
+ "executor_kwargs": "executor_kwargs",
+ "env_extras": "env_extras",
+ "env_options": "env_options",
+ "run_info_extras": "run_info_extras",
+ "update_properties": "update_properties",
+ "timeout_multiplier": "get_timeout_multiplier"}
+
+
+def get_timeout_multiplier(test_type, run_info_data, **kwargs):
+ if kwargs["timeout_multiplier"] is not None:
+ return kwargs["timeout_multiplier"]
+
+ multiplier = 1
+ if run_info_data["verify"]:
+ if kwargs.get("chaos_mode_flags", None) is not None:
+ multiplier = 2
+
+ if test_type == "reftest":
+ if (run_info_data["debug"] or
+ run_info_data.get("asan") or
+ run_info_data.get("tsan")):
+ return 4 * multiplier
+ else:
+ return 2 * multiplier
+ elif test_type == "wdspec":
+ if (run_info_data.get("asan") or
+ run_info_data.get("ccov") or
+ run_info_data.get("debug")):
+ return 4 * multiplier
+ elif run_info_data.get("tsan"):
+ return 8 * multiplier
+
+ if run_info_data["os"] == "android":
+ return 4 * multiplier
+ return 1 * multiplier
+ elif (run_info_data["debug"] or
+ run_info_data.get("asan") or
+ run_info_data.get("tsan")):
+ if run_info_data.get("ccov"):
+ return 4 * multiplier
+ else:
+ return 3 * multiplier
+ elif run_info_data["os"] == "android":
+ return 4 * multiplier
+ # https://bugzilla.mozilla.org/show_bug.cgi?id=1538725
+ elif run_info_data["os"] == "win" and run_info_data["processor"] == "aarch64":
+ return 4 * multiplier
+ elif run_info_data.get("ccov"):
+ return 2 * multiplier
+ return 1 * multiplier
+
+
+def check_args(**kwargs):
+ require_arg(kwargs, "binary")
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, subsuite, **kwargs):
+ browser_kwargs = {"binary": kwargs["binary"],
+ "package_name": None,
+ "webdriver_binary": kwargs["webdriver_binary"],
+ "webdriver_args": kwargs["webdriver_args"].copy(),
+ "prefs_root": kwargs["prefs_root"],
+ "extra_prefs": kwargs["extra_prefs"].copy(),
+ "test_type": test_type,
+ "debug_info": kwargs["debug_info"],
+ "symbols_path": kwargs["symbols_path"],
+ "stackwalk_binary": kwargs["stackwalk_binary"],
+ "certutil_binary": kwargs["certutil_binary"],
+ "ca_certificate_path": config.ssl_config["ca_cert_path"],
+ "e10s": kwargs["gecko_e10s"],
+ "disable_fission": kwargs["disable_fission"],
+ "stackfix_dir": kwargs["stackfix_dir"],
+ "binary_args": kwargs["binary_args"].copy(),
+ "timeout_multiplier": get_timeout_multiplier(test_type,
+ run_info_data,
+ **kwargs),
+ "leak_check": run_info_data["debug"] and (kwargs["leak_check"] is not False),
+ "asan": run_info_data.get("asan"),
+ "chaos_mode_flags": kwargs["chaos_mode_flags"],
+ "config": config,
+ "browser_channel": kwargs["browser_channel"],
+ "headless": kwargs["headless"],
+ "preload_browser": kwargs["preload_browser"] and not kwargs["pause_after_test"] and not kwargs["num_test_groups"] == 1,
+ "specialpowers_path": kwargs["specialpowers_path"],
+ "debug_test": kwargs["debug_test"]}
+ if test_type == "wdspec" and kwargs["binary"]:
+ browser_kwargs["webdriver_args"].extend(["--binary", kwargs["binary"]])
+ browser_kwargs["binary_args"].extend(subsuite.config.get("binary_args", []))
+ browser_kwargs["extra_prefs"].extend(subsuite.config.get("prefs", []))
+ return browser_kwargs
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs):
+ executor_kwargs = base_executor_kwargs(test_type, test_environment, run_info_data,
+ **kwargs)
+ executor_kwargs["close_after_done"] = test_type != "reftest"
+ executor_kwargs["timeout_multiplier"] = get_timeout_multiplier(test_type,
+ run_info_data,
+ **kwargs)
+ executor_kwargs["e10s"] = run_info_data["e10s"]
+ capabilities = {}
+ if test_type == "testharness":
+ capabilities["pageLoadStrategy"] = "eager"
+ if test_type in ("reftest", "print-reftest"):
+ executor_kwargs["reftest_internal"] = kwargs["reftest_internal"]
+ if test_type == "wdspec":
+ options = {"args": []}
+ if kwargs["binary"]:
+ executor_kwargs["webdriver_args"].extend(["--binary", kwargs["binary"]])
+ if kwargs["binary_args"]:
+ options["args"] = kwargs["binary_args"]
+
+ if not kwargs["binary"] and kwargs["headless"] and "--headless" not in options["args"]:
+ options["args"].append("--headless")
+
+ executor_kwargs["binary_args"] = options["args"]
+ capabilities["moz:firefoxOptions"] = options
+
+ if kwargs["certutil_binary"] is None:
+ capabilities["acceptInsecureCerts"] = True
+ if capabilities:
+ executor_kwargs["capabilities"] = capabilities
+ executor_kwargs["debug"] = run_info_data["debug"]
+ executor_kwargs["ccov"] = run_info_data.get("ccov", False)
+ executor_kwargs["browser_version"] = run_info_data.get("browser_version")
+ executor_kwargs["debug_test"] = kwargs["debug_test"]
+ executor_kwargs["disable_fission"] = kwargs["disable_fission"]
+ return executor_kwargs
+
+
+def env_extras(**kwargs):
+ return []
+
+
+def env_options():
+ # The server host is set to 127.0.0.1 as Firefox is configured (through the
+ # network.dns.localDomains preference set below) to resolve the test
+ # domains to localhost without relying on the network stack.
+ #
+ # https://github.com/web-platform-tests/wpt/pull/9480
+ return {"server_host": "127.0.0.1",
+ "supports_debugger": True}
+
+
+def run_info_extras(logger, **kwargs):
+
+ def get_bool_pref_if_exists(pref):
+ for key, value in kwargs.get('extra_prefs', []):
+ if pref == key:
+ return value.lower() in ('true', '1')
+ return None
+
+ def get_bool_pref(pref):
+ pref_value = get_bool_pref_if_exists(pref)
+ return pref_value if pref_value is not None else False
+
+ # Default fission to on, unless we get --disable-fission
+ rv = {"e10s": kwargs["gecko_e10s"],
+ "wasm": kwargs.get("wasm", True),
+ "verify": kwargs["verify"],
+ "headless": kwargs.get("headless", False) or "MOZ_HEADLESS" in os.environ,
+ "fission": not kwargs.get("disable_fission"),
+ "sessionHistoryInParent": (not kwargs.get("disable_fission") or
+ not get_bool_pref("fission.disableSessionHistoryInParent")),
+ "swgl": get_bool_pref("gfx.webrender.software"),
+ "privateBrowsing": (kwargs["tags"] is not None and ("privatebrowsing" in kwargs["tags"]))}
+
+ rv.update(run_info_browser_version(**kwargs))
+
+ return rv
+
+
+def run_info_browser_version(**kwargs):
+ try:
+ version_info = mozversion.get_version(kwargs["binary"])
+ except mozversion.errors.VersionError:
+ version_info = None
+ if version_info:
+ rv = {"browser_build_id": version_info.get("application_buildid", None),
+ "browser_changeset": version_info.get("application_changeset", None)}
+ if "browser_version" not in kwargs:
+ rv["browser_version"] = version_info.get("application_version")
+ return rv
+ return {}
+
+
+def update_properties():
+ return ([
+ "os",
+ "debug",
+ "fission",
+ "processor",
+ "swgl",
+ "asan",
+ "tsan",
+ "subsuite"], {
+ "os": ["version"],
+ "processor": ["bits"]})
+
+
+def log_gecko_crashes(logger, process, test, profile_dir, symbols_path, stackwalk_binary):
+ dump_dir = os.path.join(profile_dir, "minidumps")
+
+ try:
+ return bool(mozcrash.log_crashes(logger,
+ dump_dir,
+ symbols_path=symbols_path,
+ stackwalk_binary=stackwalk_binary,
+ process=process,
+ test=test))
+ except OSError:
+ logger.warning("Looking for crash dump files failed")
+ return False
+
+
+def get_environ(logger, binary, debug_info, headless, chaos_mode_flags=None, e10s=True):
+ # Hack: test_environment expects a bin_suffix key in mozinfo that in gecko infrastructure
+ # is set in the build system. Set it manually here.
+ if "bin_suffix" not in mozinfo.info:
+ mozinfo.info["bin_suffix"] = (".exe" if sys.platform in ["win32", "msys", "cygwin"]
+ else "")
+
+ # test_environment has started returning None values for some environment variables
+ # that are only set in a gecko checkout
+ env = {key: value for key, value in
+ test_environment(xrePath=os.path.abspath(os.path.dirname(binary)),
+ debugger=debug_info is not None,
+ useLSan=True,
+ log=logger).items()
+ if value is not None}
+
+ # Disable window occlusion. Bug 1733955
+ env["MOZ_WINDOW_OCCLUSION"] = "0"
+ if chaos_mode_flags is not None:
+ env["MOZ_CHAOSMODE"] = hex(chaos_mode_flags)
+ if headless:
+ env["MOZ_HEADLESS"] = "1"
+ if not e10s:
+ env["MOZ_FORCE_DISABLE_E10S"] = "1"
+ return env
+
+
+def setup_leak_report(leak_check, profile, env):
+ leak_report_file = None
+ if leak_check:
+ filename = "runtests_leaks_%s.log" % os.getpid()
+ if profile is not None:
+ leak_report_file = os.path.join(profile.profile, filename)
+ else:
+ leak_report_file = os.path.join(tempfile.gettempdir(), filename)
+ if os.path.exists(leak_report_file):
+ os.remove(leak_report_file)
+ env["XPCOM_MEM_BLOAT_LOG"] = leak_report_file
+
+ return leak_report_file
+
+
+class FirefoxInstanceManager:
+ __metaclass__ = ABCMeta
+
+ def __init__(self, logger, binary, binary_args, profile_creator, debug_info,
+ chaos_mode_flags, headless,
+ leak_check, stackfix_dir, symbols_path, asan, e10s):
+ """Object that manages starting and stopping instances of Firefox."""
+ self.logger = logger
+ self.binary = binary
+ self.binary_args = binary_args
+ self.base_profile = profile_creator.create()
+ self.debug_info = debug_info
+ self.chaos_mode_flags = chaos_mode_flags
+ self.headless = headless
+ self.leak_check = leak_check
+ self.stackfix_dir = stackfix_dir
+ self.symbols_path = symbols_path
+ self.asan = asan
+ self.e10s = e10s
+
+ self.previous = None
+ self.current = None
+
+ @abstractmethod
+ def teardown(self, force=False):
+ pass
+
+ @abstractmethod
+ def get(self):
+ """Get a BrowserInstance for a running Firefox.
+
+ This can only be called once per instance, and between calls stop_current()
+ must be called."""
+ pass
+
+ def stop_current(self, force=False):
+ """Shutdown the current instance of Firefox.
+
+ The BrowserInstance remains available through self.previous, since some
+ operations happen after shutdown."""
+ if not self.current:
+ return
+
+ self.current.stop(force)
+ self.previous = self.current
+ self.current = None
+
+ def start(self):
+ """Start an instance of Firefox, returning a BrowserInstance handle"""
+ profile = self.base_profile.clone(self.base_profile.profile)
+
+ marionette_port = get_free_port()
+ profile.set_preferences({"marionette.port": marionette_port})
+
+ env = get_environ(self.logger, self.binary, self.debug_info,
+ self.headless, self.chaos_mode_flags, self.e10s)
+
+ args = self.binary_args[:] if self.binary_args else []
+ args += [cmd_arg("marionette"), "about:blank"]
+
+ debug_args, cmd = browser_command(self.binary,
+ args,
+ self.debug_info)
+
+ leak_report_file = setup_leak_report(self.leak_check, profile, env)
+ output_handler = FirefoxOutputHandler(self.logger,
+ cmd,
+ stackfix_dir=self.stackfix_dir,
+ symbols_path=self.symbols_path,
+ asan=self.asan,
+ leak_report_file=leak_report_file)
+ runner = FirefoxRunner(profile=profile,
+ binary=cmd[0],
+ cmdargs=cmd[1:],
+ env=env,
+ process_class=ProcessHandler,
+ process_args={"processOutputLine": [output_handler]})
+ instance = BrowserInstance(self.logger, runner, marionette_port,
+ output_handler, leak_report_file)
+
+ self.logger.debug("Starting Firefox")
+ runner.start(debug_args=debug_args,
+ interactive=self.debug_info and self.debug_info.interactive)
+ output_handler.after_process_start(runner.process_handler.pid)
+ self.logger.debug("Firefox Started")
+
+ return instance
+
+
+class SingleInstanceManager(FirefoxInstanceManager):
+ """FirefoxInstanceManager that manages a single Firefox instance"""
+ def get(self):
+ assert not self.current, ("Tried to call get() on InstanceManager that has "
+ "an existing instance")
+ if self.previous:
+ self.previous.cleanup()
+ self.previous = None
+ self.current = self.start()
+ return self.current
+
+ def teardown(self, force=False):
+ for instance in [self.previous, self.current]:
+ if instance:
+ instance.stop(force)
+ instance.cleanup()
+ self.base_profile.cleanup()
+
+
+class PreloadInstanceManager(FirefoxInstanceManager):
+ def __init__(self, *args, **kwargs):
+ """FirefoxInstanceManager that keeps once Firefox instance preloaded
+ to allow rapid resumption after an instance shuts down."""
+ super().__init__(*args, **kwargs)
+ self.pending = None
+
+ def get(self):
+ assert not self.current, ("Tried to call get() on InstanceManager that has "
+ "an existing instance")
+ if self.previous:
+ self.previous.cleanup()
+ self.previous = None
+ if not self.pending:
+ self.pending = self.start()
+ self.current = self.pending
+ self.pending = self.start()
+ return self.current
+
+ def teardown(self, force=False):
+ for instance, unused in [(self.previous, False),
+ (self.current, False),
+ (self.pending, True)]:
+ if instance:
+ instance.stop(force, unused)
+ instance.cleanup()
+ self.base_profile.cleanup()
+
+
+class BrowserInstance:
+ shutdown_timeout = 70
+
+ def __init__(self, logger, runner, marionette_port, output_handler, leak_report_file):
+ """Handle to a running Firefox instance"""
+ self.logger = logger
+ self.runner = runner
+ self.marionette_port = marionette_port
+ self.output_handler = output_handler
+ self.leak_report_file = leak_report_file
+
+ def stop(self, force=False, unused=False):
+ """Stop Firefox
+
+ :param force: Signal the firefox process without waiting for a clean shutdown
+ :param unused: This instance was not used for running tests and so
+ doesn't have an active marionette session and doesn't require
+ output postprocessing.
+ """
+ is_running = self.runner is not None and self.runner.is_running()
+ if is_running:
+ self.logger.debug("Stopping Firefox %s" % self.pid())
+ shutdown_methods = [(True, lambda: self.runner.wait(self.shutdown_timeout)),
+ (False, lambda: self.runner.stop(signal.SIGTERM,
+ self.shutdown_timeout))]
+ if hasattr(signal, "SIGKILL"):
+ shutdown_methods.append((False, lambda: self.runner.stop(signal.SIGKILL,
+ self.shutdown_timeout)))
+ if unused or force:
+ # Don't wait for the instance to close itself
+ shutdown_methods = shutdown_methods[1:]
+ try:
+ # For Firefox we assume that stopping the runner prompts the
+ # browser to shut down. This allows the leak log to be written
+ for i, (clean, stop_f) in enumerate(shutdown_methods):
+ self.logger.debug("Shutting down attempt %i/%i" % (i + 1, len(shutdown_methods)))
+ retcode = stop_f()
+ if retcode is not None:
+ self.logger.info("Browser exited with return code %s" % retcode)
+ break
+ except OSError:
+ # This can happen on Windows if the process is already dead
+ pass
+ elif self.runner:
+ # The browser was already stopped, which we assume was a crash
+ # TODO: Should we check the exit code here?
+ clean = False
+ if not unused:
+ self.output_handler.after_process_stop(clean_shutdown=clean)
+
+ def pid(self):
+ if self.runner.process_handler is None:
+ return None
+
+ try:
+ return self.runner.process_handler.pid
+ except AttributeError:
+ return None
+
+ def is_alive(self):
+ if self.runner:
+ return self.runner.is_running()
+ return False
+
+ def cleanup(self):
+ self.runner.cleanup()
+ self.runner = None
+
+
+class FirefoxOutputHandler(OutputHandler):
+ def __init__(self, logger, command, symbols_path=None, stackfix_dir=None, asan=False,
+ leak_report_file=None):
+ """Filter for handling Firefox process output.
+
+ This receives Firefox process output in the __call__ function, does
+ any additional processing that's required, and decides whether to log
+ the output. Because the Firefox process can be started before we know
+ which filters are going to be required, we buffer all output until
+ setup() is called. This is responsible for doing the final configuration
+ of the output handlers.
+ """
+
+ super().__init__(logger, command)
+
+ self.symbols_path = symbols_path
+ if stackfix_dir:
+ # We hide errors because they cause disconcerting `CRITICAL`
+ # warnings in web platform test output.
+ self.stack_fixer = get_stack_fixer_function(stackfix_dir,
+ self.symbols_path,
+ hideErrors=True)
+ else:
+ self.stack_fixer = None
+ self.asan = asan
+ self.leak_report_file = leak_report_file
+
+ # These are filled in after configure_handlers() is called
+ self.lsan_handler = None
+ self.mozleak_allowed = None
+ self.mozleak_thresholds = None
+ self.group_metadata = {}
+
+ def start(self, group_metadata=None, lsan_disabled=False, lsan_allowed=None,
+ lsan_max_stack_depth=None, mozleak_allowed=None, mozleak_thresholds=None,
+ **kwargs):
+ """Configure the output handler"""
+ if group_metadata is None:
+ group_metadata = {}
+ self.group_metadata = group_metadata
+
+ self.mozleak_allowed = mozleak_allowed
+ self.mozleak_thresholds = mozleak_thresholds
+
+ if self.asan:
+ self.lsan_handler = mozleak.LSANLeaks(self.logger,
+ scope=group_metadata.get("scope", "/"),
+ allowed=lsan_allowed,
+ maxNumRecordedFrames=lsan_max_stack_depth,
+ allowAll=lsan_disabled)
+ else:
+ self.lsan_handler = None
+ super().start()
+
+ def after_process_stop(self, clean_shutdown=True):
+ super().after_process_stop(clean_shutdown)
+ if self.lsan_handler:
+ self.lsan_handler.process()
+ if self.leak_report_file is not None:
+ processed_files = None
+ if not clean_shutdown:
+ # If we didn't get a clean shutdown there probably isn't a leak report file
+ self.logger.warning("Firefox didn't exit cleanly, not processing leak logs")
+ else:
+ # We have to ignore missing leaks in the tab because it can happen that the
+ # content process crashed and in that case we don't want the test to fail.
+ # Ideally we would record which content process crashed and just skip those.
+ self.logger.info("PROCESS LEAKS %s" % self.leak_report_file)
+ processed_files = mozleak.process_leak_log(
+ self.leak_report_file,
+ leak_thresholds=self.mozleak_thresholds,
+ ignore_missing_leaks=["tab", "gmplugin"],
+ log=self.logger,
+ stack_fixer=self.stack_fixer,
+ scope=self.group_metadata.get("scope"),
+ allowed=self.mozleak_allowed)
+ if processed_files:
+ for path in processed_files:
+ if os.path.exists(path):
+ os.unlink(path)
+ # Fallback for older versions of mozleak, or if we didn't shutdown cleanly
+ if os.path.exists(self.leak_report_file):
+ os.unlink(self.leak_report_file)
+
+ def __call__(self, line):
+ """Write a line of output from the firefox process to the log"""
+ if b"GLib-GObject-CRITICAL" in line:
+ return
+ if line:
+ if self.state < OutputHandlerState.AFTER_HANDLER_START:
+ self.line_buffer.append(line)
+ return
+ data = line.decode("utf8", "replace")
+ if self.stack_fixer:
+ data = self.stack_fixer(data)
+ if self.lsan_handler:
+ data = self.lsan_handler.log(data)
+ if data is not None:
+ self.logger.process_output(self.pid,
+ data,
+ command=" ".join(self.command))
+
+
+class GeckodriverOutputHandler(FirefoxOutputHandler):
+ PORT_RE = re.compile(rb".*Listening on [^ :]*:(\d+)")
+
+ def __init__(self, logger, command, symbols_path=None, stackfix_dir=None, asan=False,
+ leak_report_file=None, init_deadline=None):
+ super().__init__(logger, command, symbols_path=symbols_path, stackfix_dir=stackfix_dir, asan=asan,
+ leak_report_file=leak_report_file)
+ self.port = None
+ self.init_deadline = None
+
+ def after_process_start(self, pid):
+ super().after_process_start(pid)
+ while self.port is None:
+ time.sleep(0.1)
+ if self.init_deadline is not None and time.time() > self.init_deadline:
+ raise TimeoutError("Failed to get geckodriver port within the timeout")
+
+ def __call__(self, line):
+ if self.port is None:
+ m = self.PORT_RE.match(line)
+ if m is not None:
+ self.port = int(m.groups()[0])
+ self.logger.debug(f"Got geckodriver port {self.port}")
+ super().__call__(line)
+
+
+class ProfileCreator:
+ def __init__(self, logger, prefs_root, config, test_type, extra_prefs,
+ disable_fission, debug_test, browser_channel, binary,
+ package_name, certutil_binary, ca_certificate_path):
+ self.logger = logger
+ self.prefs_root = prefs_root
+ self.config = config
+ self.test_type = test_type
+ self.extra_prefs = extra_prefs
+ self.disable_fission = disable_fission
+ self.debug_test = debug_test
+ self.browser_channel = browser_channel
+ self.ca_certificate_path = ca_certificate_path
+ self.binary = binary
+ self.package_name = package_name
+ self.certutil_binary = certutil_binary
+ self.ca_certificate_path = ca_certificate_path
+
+ def create(self, **kwargs):
+ """Create a Firefox profile and return the mozprofile Profile object pointing at that
+ profile
+
+ :param kwargs: Additional arguments to pass into the profile constructor
+ """
+ preferences = self._load_prefs()
+
+ profile = FirefoxProfile(preferences=preferences,
+ restore=False,
+ **kwargs)
+ self._set_required_prefs(profile)
+ if self.ca_certificate_path is not None:
+ self._setup_ssl(profile)
+
+ return profile
+
+ def _load_prefs(self):
+ prefs = Preferences()
+
+ pref_paths = []
+
+ profiles = os.path.join(self.prefs_root, 'profiles.json')
+ if os.path.isfile(profiles):
+ with open(profiles) as fh:
+ for name in json.load(fh)['web-platform-tests']:
+ if self.browser_channel in (None, 'nightly'):
+ pref_paths.append(os.path.join(self.prefs_root, name, 'user.js'))
+ elif name != 'unittest-features':
+ pref_paths.append(os.path.join(self.prefs_root, name, 'user.js'))
+ else:
+ self.logger.warning(f"Failed to load profiles from {profiles}")
+
+ for path in pref_paths:
+ if os.path.exists(path):
+ prefs.add(Preferences.read_prefs(path))
+ else:
+ self.logger.warning(f"Failed to find prefs file in {path}")
+
+ # Add any custom preferences
+ prefs.add(self.extra_prefs, cast=True)
+
+ return prefs()
+
+ def _set_required_prefs(self, profile):
+ """Set preferences required for wptrunner to function.
+
+ Note that this doesn't set the marionette port, since we don't always
+ know that at profile creation time. So the caller is responisble for
+ setting that once it's available."""
+ profile.set_preferences({
+ "network.dns.localDomains": ",".join(self.config.domains_set),
+ "dom.file.createInChild": True,
+ # TODO: Remove preferences once Firefox 64 is stable (Bug 905404)
+ "network.proxy.type": 0,
+ "places.history.enabled": False,
+ })
+
+ profile.set_preferences({"fission.autostart": True})
+ if self.disable_fission:
+ profile.set_preferences({"fission.autostart": False})
+
+ if self.test_type in ("reftest", "print-reftest"):
+ profile.set_preferences({"layout.interruptible-reflow.enabled": False})
+
+ if self.test_type == "print-reftest":
+ profile.set_preferences({"print.always_print_silent": True})
+
+ if self.test_type == "wdspec":
+ profile.set_preferences({"remote.prefs.recommended": True})
+
+ if self.debug_test:
+ profile.set_preferences({"devtools.console.stdout.content": True})
+
+ def _setup_ssl(self, profile):
+ """Create a certificate database to use in the test profile. This is configured
+ to trust the CA Certificate that has signed the web-platform.test server
+ certificate."""
+ if self.certutil_binary is None:
+ self.logger.info("--certutil-binary not supplied; Firefox will not check certificates")
+ return
+
+ self.logger.info("Setting up ssl")
+
+ # Make sure the certutil libraries from the source tree are loaded when using a
+ # local copy of certutil
+ # TODO: Maybe only set this if certutil won't launch?
+ env = os.environ.copy()
+ certutil_dir = os.path.dirname(self.binary or self.certutil_binary)
+ if mozinfo.isMac:
+ env_var = "DYLD_LIBRARY_PATH"
+ elif mozinfo.isUnix:
+ env_var = "LD_LIBRARY_PATH"
+ else:
+ env_var = "PATH"
+
+
+ env[env_var] = (os.path.pathsep.join([certutil_dir, env[env_var]])
+ if env_var in env else certutil_dir)
+
+ def certutil(*args):
+ cmd = [self.certutil_binary] + list(args)
+ self.logger.process_output("certutil",
+ subprocess.check_output(cmd,
+ env=env,
+ stderr=subprocess.STDOUT),
+ " ".join(cmd))
+
+ pw_path = os.path.join(profile.profile, ".crtdbpw")
+ with open(pw_path, "w") as f:
+ # Use empty password for certificate db
+ f.write("\n")
+
+ cert_db_path = profile.profile
+
+ # Create a new certificate db
+ certutil("-N", "-d", cert_db_path, "-f", pw_path)
+
+ # Add the CA certificate to the database and mark as trusted to issue server certs
+ certutil("-A", "-d", cert_db_path, "-f", pw_path, "-t", "CT,,",
+ "-n", "web-platform-tests", "-i", self.ca_certificate_path)
+
+ # List all certs in the database
+ certutil("-L", "-d", cert_db_path)
+
+
+class FirefoxBrowser(Browser):
+ init_timeout = 70
+
+ def __init__(self, logger, binary, package_name, prefs_root, test_type,
+ extra_prefs=None, debug_info=None,
+ symbols_path=None, stackwalk_binary=None, certutil_binary=None,
+ ca_certificate_path=None, e10s=False, disable_fission=False,
+ stackfix_dir=None, binary_args=None, timeout_multiplier=None, leak_check=False,
+ asan=False, chaos_mode_flags=None, config=None,
+ browser_channel="nightly", headless=None, preload_browser=False,
+ specialpowers_path=None, debug_test=False, **kwargs):
+ Browser.__init__(self, logger)
+
+ self.logger = logger
+
+ if timeout_multiplier:
+ self.init_timeout = self.init_timeout * timeout_multiplier
+
+ self.instance = None
+ self._settings = None
+
+ self.stackfix_dir = stackfix_dir
+ self.symbols_path = symbols_path
+ self.stackwalk_binary = stackwalk_binary
+
+ self.asan = asan
+ self.leak_check = leak_check
+
+ self.specialpowers_path = specialpowers_path
+
+ profile_creator = ProfileCreator(logger,
+ prefs_root,
+ config,
+ test_type,
+ extra_prefs,
+ disable_fission,
+ debug_test,
+ browser_channel,
+ binary,
+ package_name,
+ certutil_binary,
+ ca_certificate_path)
+
+ if preload_browser:
+ instance_manager_cls = PreloadInstanceManager
+ else:
+ instance_manager_cls = SingleInstanceManager
+ self.instance_manager = instance_manager_cls(logger,
+ binary,
+ binary_args,
+ profile_creator,
+ debug_info,
+ chaos_mode_flags,
+ headless,
+ leak_check,
+ stackfix_dir,
+ symbols_path,
+ asan,
+ e10s)
+
+ def settings(self, test):
+ self._settings = {"check_leaks": self.leak_check and not test.leaks,
+ "lsan_disabled": test.lsan_disabled,
+ "lsan_allowed": test.lsan_allowed,
+ "lsan_max_stack_depth": test.lsan_max_stack_depth,
+ "mozleak_allowed": self.leak_check and test.mozleak_allowed,
+ "mozleak_thresholds": self.leak_check and test.mozleak_threshold,
+ "special_powers": self.specialpowers_path and test.url_base == "/_mozilla/"}
+ return self._settings
+
+ def start(self, group_metadata=None, **kwargs):
+ self.instance = self.instance_manager.get()
+ self.instance.output_handler.start(group_metadata,
+ **kwargs)
+
+ def stop(self, force=False):
+ self.instance_manager.stop_current(force)
+ self.logger.debug("stopped")
+
+ def pid(self):
+ return self.instance.pid()
+
+ def is_alive(self):
+ return self.instance and self.instance.is_alive()
+
+ def cleanup(self, force=False):
+ self.instance_manager.teardown(force)
+
+ def executor_browser(self):
+ assert self.instance is not None
+ extensions = []
+ if self._settings.get("special_powers", False):
+ extensions.append(self.specialpowers_path)
+ return ExecutorBrowser, {"marionette_port": self.instance.marionette_port,
+ "extensions": extensions,
+ "supports_devtools": True}
+
+ def check_crash(self, process, test):
+ return log_gecko_crashes(self.logger,
+ process,
+ test,
+ self.instance.runner.profile.profile,
+ self.symbols_path,
+ self.stackwalk_binary)
+
+
+class FirefoxWdSpecBrowser(WebDriverBrowser):
+ def __init__(self, logger, binary, package_name, prefs_root, webdriver_binary, webdriver_args,
+ extra_prefs=None, debug_info=None, symbols_path=None, stackwalk_binary=None,
+ certutil_binary=None, ca_certificate_path=None, e10s=False,
+ disable_fission=False, stackfix_dir=None, leak_check=False,
+ asan=False, chaos_mode_flags=None, config=None, browser_channel="nightly",
+ headless=None, debug_test=False, profile_creator_cls=ProfileCreator,
+ **kwargs):
+
+ super().__init__(logger, binary, webdriver_binary, webdriver_args)
+ self.binary = binary
+ self.package_name = package_name
+ self.webdriver_binary = webdriver_binary
+
+ self.stackfix_dir = stackfix_dir
+ self.symbols_path = symbols_path
+ self.stackwalk_binary = stackwalk_binary
+
+ self.asan = asan
+ self.leak_check = leak_check
+ self.leak_report_file = None
+
+ self.env = self.get_env(binary, debug_info, headless, chaos_mode_flags, e10s)
+
+ profile_creator = profile_creator_cls(logger,
+ prefs_root,
+ config,
+ "wdspec",
+ extra_prefs,
+ disable_fission,
+ debug_test,
+ browser_channel,
+ binary,
+ package_name,
+ certutil_binary,
+ ca_certificate_path)
+
+ self.profile = profile_creator.create()
+ self.marionette_port = None
+
+ def get_env(self, binary, debug_info, headless, chaos_mode_flags, e10s):
+ env = get_environ(self.logger,
+ binary,
+ debug_info,
+ headless,
+ chaos_mode_flags, e10s)
+ env["RUST_BACKTRACE"] = "1"
+ return env
+
+ def create_output_handler(self, cmd):
+ return GeckodriverOutputHandler(self.logger,
+ cmd,
+ stackfix_dir=self.stackfix_dir,
+ symbols_path=self.symbols_path,
+ asan=self.asan,
+ leak_report_file=self.leak_report_file,
+ init_deadline=self.init_deadline)
+
+ def start(self, group_metadata, **kwargs):
+ self.leak_report_file = setup_leak_report(self.leak_check, self.profile, self.env)
+ super().start(group_metadata, **kwargs)
+
+ def stop(self, force=False):
+ # Initially wait for any WebDriver session to cleanly shutdown if the
+ # process doesn't have to be force stopped.
+ # When this is called the executor is usually sending an end session
+ # command to the browser. We don't have a synchronisation mechanism
+ # that allows us to know that process is ongoing, so poll the status
+ # endpoint until there isn't a session, before killing the driver.
+ if self.is_alive() and not force:
+ end_time = time.time() + BrowserInstance.shutdown_timeout
+ while time.time() < end_time:
+ self.logger.debug("Waiting for WebDriver session to end")
+ try:
+ self.logger.debug(f"Connecting to http://{self.host}:{self.port}/status")
+ conn = HTTPConnection(self.host, self.port)
+ conn.request("GET", "/status")
+ res = conn.getresponse()
+ self.logger.debug(f"Got response from http://{self.host}:{self.port}/status")
+ except Exception:
+ self.logger.debug(
+ f"Connecting to http://{self.host}:{self.port}/status failed")
+ break
+ if res.status != 200:
+ self.logger.debug(f"Connecting to http://{self.host}:{self.port}/status "
+ f"gave status {res.status}")
+ break
+ data = res.read()
+ try:
+ msg = json.loads(data)
+ except ValueError:
+ self.logger.debug("/status response was not valid JSON")
+ break
+ if msg.get("value", {}).get("ready") is True:
+ self.logger.debug("Got ready status")
+ break
+ self.logger.debug(f"Got status response {data}")
+ time.sleep(1)
+ else:
+ self.logger.debug("WebDriver session didn't end")
+ try:
+ super().stop(force=force)
+ finally:
+ if self._output_handler is not None:
+ self._output_handler.port = None
+ self._port = None
+
+ def cleanup(self):
+ super().cleanup()
+ self.profile.cleanup()
+
+ def settings(self, test):
+ return {"check_leaks": self.leak_check and not test.leaks,
+ "lsan_disabled": test.lsan_disabled,
+ "lsan_allowed": test.lsan_allowed,
+ "lsan_max_stack_depth": test.lsan_max_stack_depth,
+ "mozleak_allowed": self.leak_check and test.mozleak_allowed,
+ "mozleak_thresholds": self.leak_check and test.mozleak_threshold}
+
+ @property
+ def port(self):
+ # We read the port from geckodriver on startup
+ if self._port is None:
+ if self._output_handler is None or self._output_handler.port is None:
+ raise ValueError("Can't get geckodriver port before it's started")
+ self._port = self._output_handler.port
+ return self._port
+
+ def make_command(self):
+ return [self.webdriver_binary,
+ "--host", self.host,
+ "--port", "0"] + self.webdriver_args
+
+ def executor_browser(self):
+ cls, args = super().executor_browser()
+ args["supports_devtools"] = False
+ args["profile"] = self.profile.profile
+ return cls, args
+
+ def check_crash(self, process, test):
+ return log_gecko_crashes(self.logger,
+ process,
+ test,
+ self.profile.profile,
+ self.symbols_path,
+ self.stackwalk_binary)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/firefox_android.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/firefox_android.py
new file mode 100644
index 0000000000..3ce3b11d1f
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/firefox_android.py
@@ -0,0 +1,407 @@
+# mypy: allow-untyped-defs
+
+import os
+import subprocess
+import re
+
+from mozrunner import FennecEmulatorRunner, get_app_context
+
+from .base import (get_free_port,
+ cmd_arg,
+ browser_command)
+from ..executors.executormarionette import (MarionetteTestharnessExecutor, # noqa: F401
+ MarionetteRefTestExecutor, # noqa: F401
+ MarionetteCrashtestExecutor, # noqa: F401
+ MarionetteWdspecExecutor) # noqa: F401
+from .base import (Browser,
+ ExecutorBrowser)
+from .firefox import (get_timeout_multiplier, # noqa: F401
+ run_info_browser_version,
+ run_info_extras as fx_run_info_extras,
+ update_properties, # noqa: F401
+ executor_kwargs as fx_executor_kwargs, # noqa: F401
+ FirefoxWdSpecBrowser,
+ ProfileCreator as FirefoxProfileCreator)
+
+
+__wptrunner__ = {"product": "firefox_android",
+ "check_args": "check_args",
+ "browser": {None: "FirefoxAndroidBrowser",
+ "wdspec": "FirefoxAndroidWdSpecBrowser"},
+ "executor": {"testharness": "MarionetteTestharnessExecutor",
+ "reftest": "MarionetteRefTestExecutor",
+ "crashtest": "MarionetteCrashtestExecutor",
+ "wdspec": "MarionetteWdspecExecutor"},
+ "browser_kwargs": "browser_kwargs",
+ "executor_kwargs": "executor_kwargs",
+ "env_extras": "env_extras",
+ "env_options": "env_options",
+ "run_info_extras": "run_info_extras",
+ "update_properties": "update_properties",
+ "timeout_multiplier": "get_timeout_multiplier"}
+
+
+def check_args(**kwargs):
+ pass
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
+ return {"adb_binary": kwargs["adb_binary"],
+ "webdriver_binary": kwargs["webdriver_binary"],
+ "webdriver_args": kwargs["webdriver_args"].copy(),
+ "binary": None,
+ "package_name": kwargs["package_name"],
+ "device_serial": kwargs["device_serial"],
+ "prefs_root": kwargs["prefs_root"],
+ "extra_prefs": kwargs["extra_prefs"].copy(),
+ "test_type": test_type,
+ "debug_info": kwargs["debug_info"],
+ "symbols_path": kwargs["symbols_path"],
+ "stackwalk_binary": kwargs["stackwalk_binary"],
+ "certutil_binary": kwargs["certutil_binary"],
+ "ca_certificate_path": config.ssl_config["ca_cert_path"],
+ "stackfix_dir": kwargs["stackfix_dir"],
+ "binary_args": kwargs["binary_args"].copy(),
+ "timeout_multiplier": get_timeout_multiplier(test_type,
+ run_info_data,
+ **kwargs),
+ "disable_fission": kwargs["disable_fission"],
+ # desktop only
+ "leak_check": False,
+ "chaos_mode_flags": kwargs["chaos_mode_flags"],
+ "config": config,
+ "install_fonts": kwargs["install_fonts"],
+ "tests_root": config.doc_root,
+ "specialpowers_path": kwargs["specialpowers_path"],
+ "debug_test": kwargs["debug_test"],
+ "env_extras": dict([x.split('=') for x in kwargs.get("env", [])])}
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs):
+ rv = fx_executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs)
+ if test_type == "wdspec":
+ rv["capabilities"]["moz:firefoxOptions"]["androidPackage"] = kwargs["package_name"]
+ return rv
+
+
+def env_extras(**kwargs):
+ return []
+
+
+def run_info_extras(logger, **kwargs):
+ rv = fx_run_info_extras(logger, **kwargs)
+ rv.update({"headless": False})
+
+ if kwargs["browser_version"] is None:
+ rv.update(run_info_browser_version(**kwargs))
+
+ if rv.get("browser_version") is None:
+ # If we didn't get the browser version from the apk, try to get it from adb dumpsys
+ rv["browser_version"] = get_package_browser_version(logger,
+ kwargs["adb_binary"],
+ kwargs["package_name"])
+
+ return rv
+
+
+def get_package_browser_version(logger, adb_binary, package_name):
+ if adb_binary is None:
+ logger.warning("Couldn't run adb to get Firefox Android version number")
+ return None
+ try:
+ completed = subprocess.run([adb_binary, "shell", "dumpsys", "package", package_name],
+ check=True,
+ capture_output=True,
+ encoding="utf8")
+ except subprocess.CalledProcessError as e:
+ logger.warning(f"adb failed with return code {e.returncode}\nCaptured stderr:\n{e.stderr}")
+ return None
+
+ version_name_re = re.compile(r"^\s+versionName=(.*)")
+ for line in completed.stdout.splitlines():
+ m = version_name_re.match(line)
+ if m is not None:
+ return m.group(1)
+ logger.warning("Failed to find versionName property in dumpsys output")
+
+
+def env_options():
+ return {"server_host": "127.0.0.1",
+ "supports_debugger": True}
+
+
+def get_environ(chaos_mode_flags, env_extras=None):
+ env = {}
+ if env_extras is not None:
+ env.update(env_extras)
+ env["MOZ_CRASHREPORTER"] = "1"
+ env["MOZ_CRASHREPORTER_SHUTDOWN"] = "1"
+ env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
+ if chaos_mode_flags is not None:
+ env["MOZ_CHAOSMODE"] = hex(chaos_mode_flags)
+ return env
+
+
+class ProfileCreator(FirefoxProfileCreator):
+ def __init__(self, logger, prefs_root, config, test_type, extra_prefs,
+ disable_fission, debug_test, browser_channel, binary,
+ package_name, certutil_binary, ca_certificate_path):
+
+ super().__init__(logger, prefs_root, config, test_type, extra_prefs,
+ disable_fission, debug_test, browser_channel, None,
+ package_name, certutil_binary, ca_certificate_path)
+
+ def _set_required_prefs(self, profile):
+ profile.set_preferences({
+ "network.dns.localDomains": ",".join(self.config.domains_set),
+ "dom.disable_open_during_load": False,
+ "places.history.enabled": False,
+ "dom.send_after_paint_to_content": True,
+ })
+
+ if self.package_name == "org.mozilla.geckoview.test_runner":
+ # Bug 1879324: The TestRunner doesn't support "beforeunload" prompts yet
+ profile.set_preferences({"dom.disable_beforeunload": True})
+
+ if self.test_type == "reftest":
+ self.logger.info("Setting android reftest preferences")
+ profile.set_preferences({
+ "browser.viewport.desktopWidth": 800,
+ # Disable high DPI
+ "layout.css.devPixelsPerPx": "1.0",
+ # Ensure that the full browser element
+ # appears in the screenshot
+ "apz.allow_zooming": False,
+ "android.widget_paints_background": False,
+ # Ensure that scrollbars are always painted
+ "layout.testing.overlay-scrollbars.always-visible": True,
+ })
+
+ if self.test_type == "wdspec":
+ profile.set_preferences({"remote.prefs.recommended": True})
+
+ profile.set_preferences({"fission.autostart": True})
+ if self.disable_fission:
+ profile.set_preferences({"fission.autostart": False})
+
+
+class FirefoxAndroidBrowser(Browser):
+ init_timeout = 300
+ shutdown_timeout = 60
+
+ def __init__(self, logger, prefs_root, test_type, package_name="org.mozilla.geckoview.test_runner",
+ device_serial=None, extra_prefs=None, debug_info=None,
+ symbols_path=None, stackwalk_binary=None, certutil_binary=None,
+ ca_certificate_path=None, stackfix_dir=None,
+ binary_args=None, timeout_multiplier=None, leak_check=False, asan=False,
+ chaos_mode_flags=None, config=None, browser_channel="nightly",
+ install_fonts=False, tests_root=None, specialpowers_path=None, adb_binary=None,
+ debug_test=False, disable_fission=False, **kwargs):
+
+ super().__init__(logger)
+ self.prefs_root = prefs_root
+ self.test_type = test_type
+ self.package_name = package_name
+ self.device_serial = device_serial
+ self.debug_info = debug_info
+ self.symbols_path = symbols_path
+ self.stackwalk_binary = stackwalk_binary
+ self.certutil_binary = certutil_binary
+ self.ca_certificate_path = ca_certificate_path
+ self.stackfix_dir = stackfix_dir
+ self.binary_args = binary_args
+ self.timeout_multiplier = timeout_multiplier
+ self.leak_check = leak_check
+ self.asan = asan
+ self.chaos_mode_flags = chaos_mode_flags
+ self.config = config
+ self.browser_channel = browser_channel
+ self.install_fonts = install_fonts
+ self.tests_root = tests_root
+ self.specialpowers_path = specialpowers_path
+ self.adb_binary = adb_binary
+ self.disable_fission = disable_fission
+
+ self.profile_creator = ProfileCreator(logger,
+ prefs_root,
+ config,
+ test_type,
+ extra_prefs,
+ disable_fission,
+ debug_test,
+ browser_channel,
+ None,
+ package_name,
+ certutil_binary,
+ ca_certificate_path)
+
+ self.marionette_port = None
+ self.profile = None
+ self.runner = None
+ self.env_extras = kwargs["env_extras"]
+ self._settings = {}
+
+ def settings(self, test):
+ self._settings = {"check_leaks": self.leak_check and not test.leaks,
+ "lsan_allowed": test.lsan_allowed,
+ "lsan_max_stack_depth": test.lsan_max_stack_depth,
+ "mozleak_allowed": self.leak_check and test.mozleak_allowed,
+ "mozleak_thresholds": self.leak_check and test.mozleak_threshold,
+ "special_powers": self.specialpowers_path and test.url_base == "/_mozilla/"}
+ return self._settings
+
+ def start(self, **kwargs):
+ if self.marionette_port is None:
+ self.marionette_port = get_free_port()
+
+ addons = [self.specialpowers_path] if self._settings.get("special_powers") else None
+ self.profile = self.profile_creator.create(addons=addons)
+ self.profile.set_preferences({"marionette.port": self.marionette_port})
+
+ if self.install_fonts:
+ self.logger.debug("Copying Ahem font to profile")
+ font_dir = os.path.join(self.profile.profile, "fonts")
+ if not os.path.exists(font_dir):
+ os.makedirs(font_dir)
+ with open(os.path.join(self.tests_root, "fonts", "Ahem.ttf"), "rb") as src:
+ with open(os.path.join(font_dir, "Ahem.ttf"), "wb") as dest:
+ dest.write(src.read())
+
+ self.leak_report_file = None
+
+ debug_args, cmd = browser_command(self.package_name,
+ self.binary_args if self.binary_args else [] +
+ [cmd_arg("marionette"), "about:blank"],
+ self.debug_info)
+
+ env = get_environ(self.chaos_mode_flags, self.env_extras)
+
+ self.runner = FennecEmulatorRunner(app=self.package_name,
+ profile=self.profile,
+ cmdargs=cmd[1:],
+ env=env,
+ symbols_path=self.symbols_path,
+ serial=self.device_serial,
+ # TODO - choose appropriate log dir
+ logdir=os.getcwd(),
+ adb_path=self.adb_binary,
+ explicit_cleanup=True)
+
+ self.logger.debug("Starting %s" % self.package_name)
+ # connect to a running emulator
+ self.runner.device.connect()
+
+ self.runner.stop()
+ self.runner.start(debug_args=debug_args,
+ interactive=self.debug_info and self.debug_info.interactive)
+
+ self.runner.device.device.forward(
+ local=f"tcp:{self.marionette_port}",
+ remote=f"tcp:{self.marionette_port}")
+
+ for ports in self.config.ports.values():
+ for port in ports:
+ self.runner.device.device.reverse(
+ local=f"tcp:{port}",
+ remote=f"tcp:{port}")
+
+ self.logger.debug("%s Started" % self.package_name)
+
+ def stop(self, force=False):
+ if self.runner is not None:
+ if self.runner.device.connected:
+ try:
+ self.runner.device.device.remove_forwards()
+ self.runner.device.device.remove_reverses()
+ except Exception as e:
+ self.logger.warning("Failed to remove forwarded or reversed ports: %s" % e)
+ # We assume that stopping the runner prompts the
+ # browser to shut down.
+ self.runner.cleanup()
+ self.logger.debug("stopped")
+
+ def pid(self):
+ if self.runner.process_handler is None:
+ return None
+
+ try:
+ return self.runner.process_handler.pid
+ except AttributeError:
+ return None
+
+ def is_alive(self):
+ if self.runner:
+ return self.runner.is_running()
+ return False
+
+ def cleanup(self, force=False):
+ self.stop(force)
+
+ def executor_browser(self):
+ return ExecutorBrowser, {"marionette_port": self.marionette_port,
+ # We never want marionette to install extensions because
+ # that doesn't work on Android; instead they are in the profile
+ "extensions": [],
+ "supports_devtools": False}
+
+ def check_crash(self, process, test):
+ if not os.environ.get("MINIDUMP_STACKWALK", "") and self.stackwalk_binary:
+ os.environ["MINIDUMP_STACKWALK"] = self.stackwalk_binary
+ return bool(self.runner.check_for_crashes(test_name=test))
+
+
+class FirefoxAndroidWdSpecBrowser(FirefoxWdSpecBrowser):
+ def __init__(self, logger, prefs_root, webdriver_binary, webdriver_args,
+ extra_prefs=None, debug_info=None, symbols_path=None, stackwalk_binary=None,
+ certutil_binary=None, ca_certificate_path=None,
+ disable_fission=False, stackfix_dir=None, leak_check=False,
+ asan=False, chaos_mode_flags=None, config=None,
+ browser_channel="nightly", headless=None, debug_test=None,
+ binary=None, package_name="org.mozilla.geckoview.test_runner", device_serial=None,
+ adb_binary=None, profile_creator_cls=ProfileCreator, **kwargs):
+
+ super().__init__(logger, None, package_name, prefs_root, webdriver_binary, webdriver_args,
+ extra_prefs=extra_prefs, debug_info=debug_info, symbols_path=symbols_path,
+ stackwalk_binary=stackwalk_binary, certutil_binary=certutil_binary,
+ ca_certificate_path=ca_certificate_path,
+ disable_fission=disable_fission, stackfix_dir=stackfix_dir,
+ leak_check=leak_check, asan=asan,
+ chaos_mode_flags=chaos_mode_flags, config=config,
+ browser_channel=browser_channel, headless=headless,
+ debug_test=debug_test, profile_creator_cls=profile_creator_cls, **kwargs)
+
+ self.config = config
+ self.device_serial = device_serial
+ # This is just to support the same adb lookup as for other test types
+ context = get_app_context("fennec")(adb_path=adb_binary, device_serial=device_serial)
+ self.device = context.get_device(context.adb, self.device_serial)
+
+ def start(self, group_metadata, **kwargs):
+ for ports in self.config.ports.values():
+ for port in ports:
+ self.device.reverse(
+ local=f"tcp:{port}",
+ remote=f"tcp:{port}")
+ super().start(group_metadata, **kwargs)
+
+ def stop(self, force=False):
+ try:
+ self.device.remove_reverses()
+ except Exception as e:
+ self.logger.warning("Failed to remove forwarded or reversed ports: %s" % e)
+ super().stop(force=force)
+
+ def get_env(self, binary, debug_info, headless, chaos_mode_flags, e10s):
+ env = get_environ(chaos_mode_flags)
+ env["RUST_BACKTRACE"] = "1"
+ return env
+
+ def executor_browser(self):
+ cls, args = super().executor_browser()
+ args["androidPackage"] = self.package_name
+ args["androidDeviceSerial"] = self.device_serial
+ args["env"] = self.env
+ args["supports_devtools"] = False
+ return cls, args
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/ladybird.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/ladybird.py
new file mode 100644
index 0000000000..4738a71f19
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/ladybird.py
@@ -0,0 +1,56 @@
+# mypy: allow-untyped-defs
+
+from .base import (WebDriverBrowser, # noqa: F401
+ get_timeout_multiplier, # noqa: F401
+ require_arg)
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.base import WdspecExecutor # noqa: F401
+from ..executors.executorwebdriver import (WebDriverTestharnessExecutor, # noqa: F401
+ WebDriverRefTestExecutor, # noqa: F401
+ WebDriverCrashtestExecutor) # noqa: F401
+
+__wptrunner__ = {
+ "product": "ladybird",
+ "check_args": "check_args",
+ "browser": "LadybirdBrowser",
+ "browser_kwargs": "browser_kwargs",
+ "executor_kwargs": "executor_kwargs",
+ "env_options": "env_options",
+ "env_extras": "env_extras",
+ "timeout_multiplier": "get_timeout_multiplier",
+ "executor": {
+ "testharness": "WebDriverTestharnessExecutor",
+ "reftest": "WebDriverRefTestExecutor",
+ "wdspec": "WdspecExecutor",
+ "crashtest": "WebDriverCrashtestExecutor"
+ }
+}
+
+def check_args(**kwargs):
+ require_arg(kwargs, "webdriver_binary")
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
+ return {"binary": kwargs["binary"],
+ "webdriver_binary": kwargs["webdriver_binary"],
+ "webdriver_args": kwargs.get("webdriver_args")}
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs):
+ executor_kwargs = base_executor_kwargs(test_type, test_environment, run_info_data, **kwargs)
+ executor_kwargs["capabilities"] = {}
+ return executor_kwargs
+
+
+def env_options():
+ return {}
+
+
+def env_extras(**kwargs):
+ return []
+
+
+class LadybirdBrowser(WebDriverBrowser):
+ def make_command(self):
+ return [self.webdriver_binary, "--port", str(self.port)] + self.webdriver_args
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/opera.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/opera.py
new file mode 100644
index 0000000000..a2448f4a90
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/opera.py
@@ -0,0 +1,70 @@
+# mypy: allow-untyped-defs
+
+from .base import require_arg
+from .base import get_timeout_multiplier # noqa: F401
+from .chrome import ChromeBrowser
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.base import WdspecExecutor # noqa: F401
+from ..executors.executorselenium import (SeleniumTestharnessExecutor, # noqa: F401
+ SeleniumRefTestExecutor) # noqa: F401
+
+
+__wptrunner__ = {"product": "opera",
+ "check_args": "check_args",
+ "browser": "OperaBrowser",
+ "executor": {"testharness": "SeleniumTestharnessExecutor",
+ "reftest": "SeleniumRefTestExecutor",
+ "wdspec": "WdspecExecutor"},
+ "browser_kwargs": "browser_kwargs",
+ "executor_kwargs": "executor_kwargs",
+ "env_extras": "env_extras",
+ "env_options": "env_options",
+ "timeout_multiplier": "get_timeout_multiplier"}
+
+
+def check_args(**kwargs):
+ require_arg(kwargs, "webdriver_binary")
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
+ return {"binary": kwargs["binary"],
+ "webdriver_binary": kwargs["webdriver_binary"],
+ "webdriver_args": kwargs.get("webdriver_args")}
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs):
+ from selenium.webdriver import DesiredCapabilities
+
+ executor_kwargs = base_executor_kwargs(test_type, test_environment, run_info_data, **kwargs)
+ executor_kwargs["close_after_done"] = True
+ capabilities = dict(DesiredCapabilities.OPERA.items())
+ capabilities.setdefault("operaOptions", {})["prefs"] = {
+ "profile": {
+ "default_content_setting_values": {
+ "popups": 1
+ }
+ }
+ }
+ for (kwarg, capability) in [("binary", "binary"), ("binary_args", "args")]:
+ if kwargs[kwarg] is not None:
+ capabilities["operaOptions"][capability] = kwargs[kwarg]
+ if test_type == "testharness":
+ capabilities["operaOptions"]["useAutomationExtension"] = False
+ capabilities["operaOptions"]["excludeSwitches"] = ["enable-automation"]
+ if test_type == "wdspec":
+ capabilities["operaOptions"]["w3c"] = True
+ executor_kwargs["capabilities"] = capabilities
+ return executor_kwargs
+
+
+def env_extras(**kwargs):
+ return []
+
+
+def env_options():
+ return {}
+
+
+class OperaBrowser(ChromeBrowser):
+ pass
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/safari.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/safari.py
new file mode 100644
index 0000000000..44d289c7e3
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/safari.py
@@ -0,0 +1,218 @@
+# mypy: allow-untyped-defs
+
+import os
+import plistlib
+from packaging.version import Version
+from shutil import which
+
+import psutil
+
+from .base import WebDriverBrowser, require_arg
+from .base import get_timeout_multiplier # noqa: F401
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.base import WdspecExecutor # noqa: F401
+from ..executors.executorwebdriver import (WebDriverTestharnessExecutor, # noqa: F401
+ WebDriverRefTestExecutor, # noqa: F401
+ WebDriverCrashtestExecutor) # noqa: F401
+
+
+__wptrunner__ = {"product": "safari",
+ "check_args": "check_args",
+ "browser": "SafariBrowser",
+ "executor": {"testharness": "WebDriverTestharnessExecutor",
+ "reftest": "WebDriverRefTestExecutor",
+ "wdspec": "WdspecExecutor",
+ "crashtest": "WebDriverCrashtestExecutor"},
+ "browser_kwargs": "browser_kwargs",
+ "executor_kwargs": "executor_kwargs",
+ "env_extras": "env_extras",
+ "env_options": "env_options",
+ "run_info_extras": "run_info_extras",
+ "timeout_multiplier": "get_timeout_multiplier"}
+
+
+def check_args(**kwargs):
+ require_arg(kwargs, "webdriver_binary")
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
+ return {"webdriver_binary": kwargs["webdriver_binary"],
+ "webdriver_args": kwargs.get("webdriver_args"),
+ "kill_safari": kwargs.get("kill_safari", False)}
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data, **kwargs):
+ executor_kwargs = base_executor_kwargs(test_type, test_environment, run_info_data, **kwargs)
+ executor_kwargs["close_after_done"] = True
+ executor_kwargs["capabilities"] = {}
+ if test_type == "testharness":
+ executor_kwargs["capabilities"]["pageLoadStrategy"] = "eager"
+ if kwargs["binary"] is not None:
+ raise ValueError("Safari doesn't support setting executable location")
+
+ browser_bundle_version = run_info_data["browser_bundle_version"]
+ if (browser_bundle_version is not None and
+ Version(browser_bundle_version[2:]) >= Version("613.1.7.1")):
+ logger.debug("using acceptInsecureCerts=True")
+ executor_kwargs["capabilities"]["acceptInsecureCerts"] = True
+ else:
+ logger.warning("not using acceptInsecureCerts, Safari will require certificates to be trusted")
+
+ return executor_kwargs
+
+
+def env_extras(**kwargs):
+ return []
+
+
+def env_options():
+ return {}
+
+
+def run_info_extras(logger, **kwargs):
+ webdriver_binary = kwargs["webdriver_binary"]
+ rv = {}
+
+ safari_bundle, safari_info = get_safari_info(webdriver_binary)
+
+ if safari_info is not None:
+ assert safari_bundle is not None # if safari_info is not None, this can't be
+ _, webkit_info = get_webkit_info(safari_bundle)
+ if webkit_info is None:
+ webkit_info = {}
+ else:
+ safari_info = {}
+ webkit_info = {}
+
+ rv["browser_marketing_version"] = safari_info.get("CFBundleShortVersionString")
+ rv["browser_bundle_version"] = safari_info.get("CFBundleVersion")
+ rv["browser_webkit_bundle_version"] = webkit_info.get("CFBundleVersion")
+
+ with open("/System/Library/CoreServices/SystemVersion.plist", "rb") as fp:
+ system_version = plistlib.load(fp)
+
+ rv["os_build"] = system_version["ProductBuildVersion"]
+
+ return rv
+
+
+def get_safari_info(wd_path):
+ bundle_paths = [
+ os.path.join(os.path.dirname(wd_path), "..", ".."), # bundled Safari (e.g. STP)
+ os.path.join(os.path.dirname(wd_path), "Safari.app"), # local Safari build
+ "/Applications/Safari.app", # system Safari
+ ]
+
+ for bundle_path in bundle_paths:
+ info_path = os.path.join(bundle_path, "Contents", "Info.plist")
+ if not os.path.isfile(info_path):
+ continue
+
+ with open(info_path, "rb") as fp:
+ info = plistlib.load(fp)
+
+ # check we have a Safari family bundle
+ ident = info.get("CFBundleIdentifier")
+ if not isinstance(ident, str) or not ident.startswith("com.apple.Safari"):
+ continue
+
+ return (bundle_path, info)
+
+ return (None, None)
+
+
+def get_webkit_info(safari_bundle_path):
+ framework_paths = [
+ os.path.join(os.path.dirname(safari_bundle_path), "Contents", "Frameworks"), # bundled Safari (e.g. STP)
+ os.path.join(os.path.dirname(safari_bundle_path), ".."), # local Safari build
+ "/System/Library/PrivateFrameworks",
+ "/Library/Frameworks",
+ "/System/Library/Frameworks",
+ ]
+
+ for framework_path in framework_paths:
+ info_path = os.path.join(framework_path, "WebKit.framework", "Versions", "Current", "Resources", "Info.plist")
+ if not os.path.isfile(info_path):
+ continue
+
+ with open(info_path, "rb") as fp:
+ info = plistlib.load(fp)
+ return (framework_path, info)
+
+ return (None, None)
+
+
+class SafariBrowser(WebDriverBrowser):
+ """Safari is backed by safaridriver, which is supplied through
+ ``wptrunner.webdriver.SafariDriverServer``.
+ """
+ def __init__(self, logger, binary=None, webdriver_binary=None, webdriver_args=None,
+ port=None, env=None, kill_safari=False, **kwargs):
+ """Creates a new representation of Safari. The `webdriver_binary`
+ argument gives the WebDriver binary to use for testing. (The browser
+ binary location cannot be specified, as Safari and SafariDriver are
+ coupled.) If `kill_safari` is True, then `Browser.stop` will stop Safari."""
+ super().__init__(logger,
+ binary,
+ webdriver_binary,
+ webdriver_args=webdriver_args,
+ port=None,
+ supports_pac=False,
+ env=env)
+
+ if "/" not in webdriver_binary:
+ wd_path = which(webdriver_binary)
+ else:
+ wd_path = webdriver_binary
+ self.safari_path = self._find_safari_executable(wd_path)
+
+ logger.debug("WebDriver executable path: %s" % wd_path)
+ logger.debug("Safari executable path: %s" % self.safari_path)
+
+ self.kill_safari = kill_safari
+
+ def _find_safari_executable(self, wd_path):
+ bundle_path, info = get_safari_info(wd_path)
+
+ exe = info.get("CFBundleExecutable")
+ if not isinstance(exe, str):
+ return None
+
+ exe_path = os.path.join(bundle_path, "Contents", "MacOS", exe)
+ if not os.path.isfile(exe_path):
+ return None
+
+ return exe_path
+
+ def make_command(self):
+ return [self.webdriver_binary, f"--port={self.port}"] + self.webdriver_args
+
+ def stop(self, force=False):
+ super().stop(force)
+
+ if self.kill_safari:
+ self.logger.debug("Going to stop Safari")
+ for proc in psutil.process_iter(attrs=["exe"]):
+ if proc.info["exe"] is None:
+ continue
+
+ try:
+ if not os.path.samefile(proc.info["exe"], self.safari_path):
+ continue
+ except OSError:
+ continue
+
+ self.logger.debug("Stopping Safari %s" % proc.pid)
+ try:
+ proc.terminate()
+ try:
+ proc.wait(10)
+ except psutil.TimeoutExpired:
+ proc.kill()
+ proc.wait(10)
+ except psutil.NoSuchProcess:
+ pass
+ except Exception:
+ # Safari is a singleton, so treat failure here as a critical error.
+ self.logger.critical("Failed to stop Safari")
+ raise
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/sauce.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/sauce.py
new file mode 100644
index 0000000000..0f21afd38f
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/sauce.py
@@ -0,0 +1,245 @@
+# mypy: allow-untyped-defs
+
+import glob
+import os
+import shutil
+import subprocess
+import tarfile
+import tempfile
+import time
+
+import requests
+
+from io import StringIO
+
+from .base import Browser, ExecutorBrowser, require_arg
+from .base import get_timeout_multiplier # noqa: F401
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.executorselenium import (SeleniumTestharnessExecutor, # noqa: F401
+ SeleniumRefTestExecutor) # noqa: F401
+
+here = os.path.dirname(__file__)
+# Number of seconds to wait between polling operations when detecting status of
+# Sauce Connect sub-process.
+sc_poll_period = 1
+
+
+__wptrunner__ = {"product": "sauce",
+ "check_args": "check_args",
+ "browser": "SauceBrowser",
+ "executor": {"testharness": "SeleniumTestharnessExecutor",
+ "reftest": "SeleniumRefTestExecutor"},
+ "browser_kwargs": "browser_kwargs",
+ "executor_kwargs": "executor_kwargs",
+ "env_extras": "env_extras",
+ "env_options": "env_options",
+ "timeout_multiplier": "get_timeout_multiplier"}
+
+
+def get_capabilities(**kwargs):
+ browser_name = kwargs["sauce_browser"]
+ platform = kwargs["sauce_platform"]
+ version = kwargs["sauce_version"]
+ build = kwargs["sauce_build"]
+ tags = kwargs["sauce_tags"]
+ tunnel_id = kwargs["sauce_tunnel_id"]
+ prerun_script = {
+ "safari": {
+ "executable": "sauce-storage:safari-prerun.sh",
+ "background": False,
+ }
+ }
+ capabilities = {
+ "browserName": browser_name,
+ "build": build,
+ "disablePopupHandler": True,
+ "name": f"{browser_name} {version} on {platform}",
+ "platform": platform,
+ "public": "public",
+ "selenium-version": "3.3.1",
+ "tags": tags,
+ "tunnel-identifier": tunnel_id,
+ "version": version,
+ "prerun": prerun_script.get(browser_name)
+ }
+
+ return capabilities
+
+
+def get_sauce_config(**kwargs):
+ browser_name = kwargs["sauce_browser"]
+ sauce_user = kwargs["sauce_user"]
+ sauce_key = kwargs["sauce_key"]
+
+ hub_url = f"{sauce_user}:{sauce_key}@localhost:4445"
+ data = {
+ "url": "http://%s/wd/hub" % hub_url,
+ "browserName": browser_name,
+ "capabilities": get_capabilities(**kwargs)
+ }
+
+ return data
+
+
+def check_args(**kwargs):
+ require_arg(kwargs, "sauce_browser")
+ require_arg(kwargs, "sauce_platform")
+ require_arg(kwargs, "sauce_version")
+ require_arg(kwargs, "sauce_user")
+ require_arg(kwargs, "sauce_key")
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
+ sauce_config = get_sauce_config(**kwargs)
+
+ return {"sauce_config": sauce_config}
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs):
+ executor_kwargs = base_executor_kwargs(test_type, test_environment, run_info_data, **kwargs)
+
+ executor_kwargs["capabilities"] = get_capabilities(**kwargs)
+
+ return executor_kwargs
+
+
+def env_extras(**kwargs):
+ return [SauceConnect(**kwargs)]
+
+
+def env_options():
+ return {"supports_debugger": False}
+
+
+def get_tar(url, dest):
+ resp = requests.get(url, stream=True)
+ resp.raise_for_status()
+ with tarfile.open(fileobj=StringIO(resp.raw.read())) as f:
+ f.extractall(path=dest)
+
+
+class SauceConnect():
+
+ def __init__(self, **kwargs):
+ self.sauce_user = kwargs["sauce_user"]
+ self.sauce_key = kwargs["sauce_key"]
+ self.sauce_tunnel_id = kwargs["sauce_tunnel_id"]
+ self.sauce_connect_binary = kwargs.get("sauce_connect_binary")
+ self.sauce_connect_args = kwargs.get("sauce_connect_args")
+ self.sauce_init_timeout = kwargs.get("sauce_init_timeout")
+ self.sc_process = None
+ self.temp_dir = None
+ self.env_config = None
+
+ def __call__(self, env_options, env_config):
+ self.env_config = env_config
+
+ return self
+
+ def __enter__(self):
+ # Because this class implements the context manager protocol, it is
+ # possible for instances to be provided to the `with` statement
+ # directly. This class implements the callable protocol so that data
+ # which is not available during object initialization can be provided
+ # prior to this moment. Instances must be invoked in preparation for
+ # the context manager protocol, but this additional constraint is not
+ # itself part of the protocol.
+ assert self.env_config is not None, 'The instance has been invoked.'
+
+ if not self.sauce_connect_binary:
+ self.temp_dir = tempfile.mkdtemp()
+ get_tar("https://saucelabs.com/downloads/sc-4.4.9-linux.tar.gz", self.temp_dir)
+ self.sauce_connect_binary = glob.glob(os.path.join(self.temp_dir, "sc-*-linux/bin/sc"))[0]
+
+ self.upload_prerun_exec('edge-prerun.bat')
+ self.upload_prerun_exec('safari-prerun.sh')
+
+ self.sc_process = subprocess.Popen([
+ self.sauce_connect_binary,
+ "--user=%s" % self.sauce_user,
+ "--api-key=%s" % self.sauce_key,
+ "--no-remove-colliding-tunnels",
+ "--tunnel-identifier=%s" % self.sauce_tunnel_id,
+ "--metrics-address=0.0.0.0:9876",
+ "--readyfile=./sauce_is_ready",
+ "--tunnel-domains",
+ ",".join(self.env_config.domains_set)
+ ] + self.sauce_connect_args)
+
+ tot_wait = 0
+ while not os.path.exists('./sauce_is_ready') and self.sc_process.poll() is None:
+ if not self.sauce_init_timeout or (tot_wait >= self.sauce_init_timeout):
+ self.quit()
+
+ raise SauceException("Sauce Connect Proxy was not ready after %d seconds" % tot_wait)
+
+ time.sleep(sc_poll_period)
+ tot_wait += sc_poll_period
+
+ if self.sc_process.returncode is not None:
+ raise SauceException("Unable to start Sauce Connect Proxy. Process exited with code %s", self.sc_process.returncode)
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.env_config = None
+ self.quit()
+ if self.temp_dir and os.path.exists(self.temp_dir):
+ try:
+ shutil.rmtree(self.temp_dir)
+ except OSError:
+ pass
+
+ def upload_prerun_exec(self, file_name):
+ auth = (self.sauce_user, self.sauce_key)
+ url = f"https://saucelabs.com/rest/v1/storage/{self.sauce_user}/{file_name}?overwrite=true"
+
+ with open(os.path.join(here, 'sauce_setup', file_name), 'rb') as f:
+ requests.post(url, data=f, auth=auth)
+
+ def quit(self):
+ """The Sauce Connect process may be managing an active "tunnel" to the
+ Sauce Labs service. Issue a request to the process to close any tunnels
+ and exit. If this does not occur within 5 seconds, force the process to
+ close."""
+ kill_wait = 5
+ tot_wait = 0
+ self.sc_process.terminate()
+
+ while self.sc_process.poll() is None:
+ time.sleep(sc_poll_period)
+ tot_wait += sc_poll_period
+
+ if tot_wait >= kill_wait:
+ self.sc_process.kill()
+ break
+
+
+class SauceException(Exception):
+ pass
+
+
+class SauceBrowser(Browser):
+ init_timeout = 300
+
+ def __init__(self, logger, sauce_config, **kwargs):
+ Browser.__init__(self, logger)
+ self.sauce_config = sauce_config
+
+ def start(self, **kwargs):
+ pass
+
+ def stop(self, force=False):
+ pass
+
+ def pid(self):
+ return None
+
+ def is_alive(self):
+ # TODO: Should this check something about the connection?
+ return True
+
+ def cleanup(self):
+ pass
+
+ def executor_browser(self):
+ return ExecutorBrowser, {"webdriver_url": self.sauce_config["url"]}
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/sauce_setup/safari-prerun.sh b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/sauce_setup/safari-prerun.sh
new file mode 100755
index 0000000000..39390e618f
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/sauce_setup/safari-prerun.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+curl https://raw.githubusercontent.com/web-platform-tests/wpt/master/fonts/Ahem.ttf > ~/Library/Fonts/Ahem.ttf
+defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2JavaScriptCanOpenWindowsAutomatically -bool true
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/servo.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/servo.py
new file mode 100644
index 0000000000..d0bf0a38ea
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/servo.py
@@ -0,0 +1,119 @@
+# mypy: allow-untyped-defs
+
+import os
+
+from .base import ExecutorBrowser, NullBrowser, WebDriverBrowser, require_arg
+from .base import get_timeout_multiplier # noqa: F401
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.base import WdspecExecutor # noqa: F401
+from ..executors.executorservo import (ServoCrashtestExecutor, # noqa: F401
+ ServoTestharnessExecutor, # noqa: F401
+ ServoRefTestExecutor) # noqa: F401
+
+
+here = os.path.dirname(__file__)
+
+__wptrunner__ = {
+ "product": "servo",
+ "check_args": "check_args",
+ "browser": {None: "ServoBrowser",
+ "wdspec": "ServoWdspecBrowser"},
+ "executor": {
+ "crashtest": "ServoCrashtestExecutor",
+ "testharness": "ServoTestharnessExecutor",
+ "reftest": "ServoRefTestExecutor",
+ "wdspec": "WdspecExecutor",
+ },
+ "browser_kwargs": "browser_kwargs",
+ "executor_kwargs": "executor_kwargs",
+ "env_extras": "env_extras",
+ "env_options": "env_options",
+ "timeout_multiplier": "get_timeout_multiplier",
+ "update_properties": "update_properties",
+}
+
+
+def check_args(**kwargs):
+ require_arg(kwargs, "binary")
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
+ return {
+ "binary": kwargs["binary"],
+ "debug_info": kwargs["debug_info"],
+ "binary_args": kwargs["binary_args"],
+ "user_stylesheets": kwargs.get("user_stylesheets"),
+ "ca_certificate_path": config.ssl_config["ca_cert_path"],
+ }
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs):
+ rv = base_executor_kwargs(test_type, test_environment, run_info_data, **kwargs)
+ rv["pause_after_test"] = kwargs["pause_after_test"]
+ if test_type == "wdspec":
+ rv["capabilities"] = {}
+ return rv
+
+
+def env_extras(**kwargs):
+ return []
+
+
+def env_options():
+ return {"server_host": "127.0.0.1",
+ "bind_address": False,
+ "testharnessreport": "testharnessreport-servo.js",
+ "supports_debugger": True}
+
+
+def update_properties():
+ return ["debug", "os", "processor"], {"os": ["version"], "processor": ["bits"]}
+
+
+class ServoBrowser(NullBrowser):
+ def __init__(self, logger, binary, debug_info=None, binary_args=None,
+ user_stylesheets=None, ca_certificate_path=None, **kwargs):
+ NullBrowser.__init__(self, logger)
+ self.binary = binary
+ self.debug_info = debug_info
+ self.binary_args = binary_args or []
+ self.user_stylesheets = user_stylesheets or []
+ self.ca_certificate_path = ca_certificate_path
+
+ def executor_browser(self):
+ return ExecutorBrowser, {
+ "binary": self.binary,
+ "debug_info": self.debug_info,
+ "binary_args": self.binary_args,
+ "user_stylesheets": self.user_stylesheets,
+ "ca_certificate_path": self.ca_certificate_path,
+ }
+
+
+class ServoWdspecBrowser(WebDriverBrowser):
+ # TODO: could share an implemenation with servodriver.py, perhaps
+ def __init__(self, logger, binary="servo", webdriver_binary="servo",
+ binary_args=None, webdriver_args=None, env=None, port=None,
+ **kwargs):
+
+ env = os.environ.copy() if env is None else env
+ env["RUST_BACKTRACE"] = "1"
+
+ super().__init__(logger,
+ binary=binary,
+ webdriver_binary=webdriver_binary,
+ webdriver_args=webdriver_args,
+ port=port,
+ env=env,
+ **kwargs)
+ self.binary_args = binary_args
+
+ def make_command(self):
+ command = [self.binary,
+ f"--webdriver={self.port}",
+ "--hard-fail",
+ "--headless"] + self.webdriver_args
+ if self.binary_args:
+ command += self.binary_args
+ return command
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/servodriver.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/servodriver.py
new file mode 100644
index 0000000000..5195fa6442
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/servodriver.py
@@ -0,0 +1,184 @@
+# mypy: allow-untyped-defs
+
+import os
+import subprocess
+import tempfile
+
+from mozprocess import ProcessHandler
+
+from tools.serve.serve import make_hosts_file
+
+from .base import (Browser,
+ ExecutorBrowser,
+ OutputHandler,
+ require_arg,
+ get_free_port,
+ browser_command)
+from .base import get_timeout_multiplier # noqa: F401
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.executorservodriver import (ServoWebDriverTestharnessExecutor, # noqa: F401
+ ServoWebDriverRefTestExecutor) # noqa: F401
+
+here = os.path.dirname(__file__)
+
+__wptrunner__ = {
+ "product": "servodriver",
+ "check_args": "check_args",
+ "browser": "ServoWebDriverBrowser",
+ "executor": {
+ "testharness": "ServoWebDriverTestharnessExecutor",
+ "reftest": "ServoWebDriverRefTestExecutor",
+ },
+ "browser_kwargs": "browser_kwargs",
+ "executor_kwargs": "executor_kwargs",
+ "env_extras": "env_extras",
+ "env_options": "env_options",
+ "timeout_multiplier": "get_timeout_multiplier",
+ "update_properties": "update_properties",
+}
+
+
+def check_args(**kwargs):
+ require_arg(kwargs, "binary")
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
+ return {
+ "binary": kwargs["binary"],
+ "binary_args": kwargs["binary_args"],
+ "debug_info": kwargs["debug_info"],
+ "server_config": config,
+ "user_stylesheets": kwargs.get("user_stylesheets"),
+ "headless": kwargs.get("headless"),
+ }
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data, **kwargs):
+ rv = base_executor_kwargs(test_type, test_environment, run_info_data, **kwargs)
+ return rv
+
+
+def env_extras(**kwargs):
+ return []
+
+
+def env_options():
+ return {"server_host": "127.0.0.1",
+ "testharnessreport": "testharnessreport-servodriver.js",
+ "supports_debugger": True}
+
+
+def update_properties():
+ return (["debug", "os", "processor"], {"os": ["version"], "processor": ["bits"]})
+
+
+def write_hosts_file(config):
+ hosts_fd, hosts_path = tempfile.mkstemp()
+ with os.fdopen(hosts_fd, "w") as f:
+ f.write(make_hosts_file(config, "127.0.0.1"))
+ return hosts_path
+
+
+class ServoWebDriverBrowser(Browser):
+ init_timeout = 300 # Large timeout for cases where we're booting an Android emulator
+
+ def __init__(self, logger, binary, debug_info=None, webdriver_host="127.0.0.1",
+ server_config=None, binary_args=None,
+ user_stylesheets=None, headless=None, **kwargs):
+ Browser.__init__(self, logger)
+ self.binary = binary
+ self.binary_args = binary_args or []
+ self.webdriver_host = webdriver_host
+ self.webdriver_port = None
+ self.proc = None
+ self.debug_info = debug_info
+ self.hosts_path = write_hosts_file(server_config)
+ self.server_ports = server_config.ports if server_config else {}
+ self.command = None
+ self.user_stylesheets = user_stylesheets if user_stylesheets else []
+ self.headless = headless if headless else False
+ self.ca_certificate_path = server_config.ssl_config["ca_cert_path"]
+ self.output_handler = None
+
+ def start(self, **kwargs):
+ self.webdriver_port = get_free_port()
+
+ env = os.environ.copy()
+ env["HOST_FILE"] = self.hosts_path
+ env["RUST_BACKTRACE"] = "1"
+ env["EMULATOR_REVERSE_FORWARD_PORTS"] = ",".join(
+ str(port)
+ for _protocol, ports in self.server_ports.items()
+ for port in ports
+ if port
+ )
+
+ debug_args, command = browser_command(
+ self.binary,
+ self.binary_args + [
+ "--hard-fail",
+ "--webdriver=%s" % self.webdriver_port,
+ "about:blank",
+ ],
+ self.debug_info
+ )
+
+ if self.headless:
+ command += ["--headless"]
+
+ if self.ca_certificate_path:
+ command += ["--certificate-path", self.ca_certificate_path]
+
+ for stylesheet in self.user_stylesheets:
+ command += ["--user-stylesheet", stylesheet]
+
+ self.command = command
+
+ self.command = debug_args + self.command
+
+ if not self.debug_info or not self.debug_info.interactive:
+ self.output_handler = OutputHandler(self.logger, self.command)
+ self.proc = ProcessHandler(self.command,
+ processOutputLine=[self.on_output],
+ env=env,
+ storeOutput=False)
+ self.proc.run()
+ self.output_handler.after_process_start(self.proc.pid)
+ self.output_handler.start()
+ else:
+ self.proc = subprocess.Popen(self.command, env=env)
+
+ self.logger.debug("Servo Started")
+
+ def stop(self, force=False):
+ self.logger.debug("Stopping browser")
+ if self.proc is not None:
+ try:
+ self.proc.kill()
+ except OSError:
+ # This can happen on Windows if the process is already dead
+ pass
+ if self.output_handler is not None:
+ self.output_handler.after_process_stop()
+
+ def pid(self):
+ if self.proc is None:
+ return None
+
+ try:
+ return self.proc.pid
+ except AttributeError:
+ return None
+
+ def is_alive(self):
+ return self.proc.poll() is None
+
+ def cleanup(self):
+ self.stop()
+ os.remove(self.hosts_path)
+
+ def executor_browser(self):
+ assert self.webdriver_port is not None
+ return ExecutorBrowser, {"webdriver_host": self.webdriver_host,
+ "webdriver_port": self.webdriver_port,
+ "init_timeout": self.init_timeout}
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/webkit.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/webkit.py
new file mode 100644
index 0000000000..a3e8d1361c
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/webkit.py
@@ -0,0 +1,83 @@
+# mypy: allow-untyped-defs
+
+from .base import WebDriverBrowser, require_arg
+from .base import get_timeout_multiplier, certificate_domain_list # noqa: F401
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.base import WdspecExecutor # noqa: F401
+from ..executors.executorwebdriver import (WebDriverTestharnessExecutor, # noqa: F401
+ WebDriverRefTestExecutor, # noqa: F401
+ WebDriverCrashtestExecutor) # noqa: F401
+
+
+__wptrunner__ = {"product": "webkit",
+ "check_args": "check_args",
+ "browser": "WebKitBrowser",
+ "browser_kwargs": "browser_kwargs",
+ "executor": {"testharness": "WebDriverTestharnessExecutor",
+ "reftest": "WebDriverRefTestExecutor",
+ "wdspec": "WdspecExecutor",
+ "crashtest": "WebDriverCrashtestExecutor"},
+ "executor_kwargs": "executor_kwargs",
+ "env_extras": "env_extras",
+ "env_options": "env_options",
+ "run_info_extras": "run_info_extras",
+ "timeout_multiplier": "get_timeout_multiplier"}
+
+
+def check_args(**kwargs):
+ require_arg(kwargs, "binary")
+ require_arg(kwargs, "webdriver_binary")
+ require_arg(kwargs, "webkit_port")
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
+ return {"binary": kwargs["binary"],
+ "webdriver_binary": kwargs["webdriver_binary"],
+ "webdriver_args": kwargs.get("webdriver_args")}
+
+
+def capabilities_for_port(server_config, **kwargs):
+ port_name = kwargs["webkit_port"]
+ if port_name in ["gtk", "wpe"]:
+ port_key_map = {"gtk": "webkitgtk"}
+ browser_options_port = port_key_map.get(port_name, port_name)
+ browser_options_key = "%s:browserOptions" % browser_options_port
+
+ return {
+ "browserName": "MiniBrowser",
+ "browserVersion": "2.20",
+ "platformName": "ANY",
+ browser_options_key: {
+ "binary": kwargs["binary"],
+ "args": kwargs.get("binary_args", []),
+ "certificates": certificate_domain_list(server_config.domains_set, kwargs["host_cert_path"])}}
+
+ return {}
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs):
+ executor_kwargs = base_executor_kwargs(test_type, test_environment, run_info_data, **kwargs)
+ executor_kwargs["close_after_done"] = True
+ executor_kwargs["capabilities"] = capabilities_for_port(test_environment.config,
+ **kwargs)
+ return executor_kwargs
+
+
+def env_extras(**kwargs):
+ return []
+
+
+def env_options():
+ return {}
+
+
+def run_info_extras(logger, **kwargs):
+ return {"webkit_port": kwargs["webkit_port"]}
+
+
+class WebKitBrowser(WebDriverBrowser):
+ """Generic WebKit browser is backed by WebKit's WebDriver implementation"""
+
+ def make_command(self):
+ return [self.webdriver_binary, f"--port={self.port}"] + self.webdriver_args
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/webkitgtk_minibrowser.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/webkitgtk_minibrowser.py
new file mode 100644
index 0000000000..29a9563887
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/webkitgtk_minibrowser.py
@@ -0,0 +1,84 @@
+# mypy: allow-untyped-defs
+
+from .base import (NullBrowser, # noqa: F401
+ certificate_domain_list,
+ get_timeout_multiplier, # noqa: F401
+ maybe_add_args)
+from .webkit import WebKitBrowser
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.base import WdspecExecutor # noqa: F401
+from ..executors.executorwebdriver import (WebDriverTestharnessExecutor, # noqa: F401
+ WebDriverRefTestExecutor, # noqa: F401
+ WebDriverCrashtestExecutor) # noqa: F401
+
+__wptrunner__ = {"product": "webkitgtk_minibrowser",
+ "check_args": "check_args",
+ "browser": "WebKitGTKMiniBrowser",
+ "browser_kwargs": "browser_kwargs",
+ "executor": {"testharness": "WebDriverTestharnessExecutor",
+ "reftest": "WebDriverRefTestExecutor",
+ "wdspec": "WdspecExecutor",
+ "crashtest": "WebDriverCrashtestExecutor"},
+ "executor_kwargs": "executor_kwargs",
+ "env_extras": "env_extras",
+ "env_options": "env_options",
+ "run_info_extras": "run_info_extras",
+ "timeout_multiplier": "get_timeout_multiplier"}
+
+
+def check_args(**kwargs):
+ pass
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
+ # Workaround for https://gitlab.gnome.org/GNOME/libsoup/issues/172
+ webdriver_required_args = ["--host=127.0.0.1"]
+ webdriver_args = maybe_add_args(webdriver_required_args, kwargs.get("webdriver_args"))
+ return {"binary": kwargs["binary"],
+ "webdriver_binary": kwargs["webdriver_binary"],
+ "webdriver_args": webdriver_args}
+
+
+def capabilities(server_config, **kwargs):
+ browser_required_args = ["--automation",
+ "--javascript-can-open-windows-automatically=true",
+ "--enable-xss-auditor=false",
+ "--enable-media-capabilities=true",
+ "--enable-encrypted-media=true",
+ "--enable-media-stream=true",
+ "--enable-mock-capture-devices=true",
+ "--enable-webaudio=true"]
+ args = kwargs.get("binary_args", [])
+ args = maybe_add_args(browser_required_args, args)
+ return {
+ "browserName": "MiniBrowser",
+ "webkitgtk:browserOptions": {
+ "binary": kwargs["binary"],
+ "args": args,
+ "certificates": certificate_domain_list(server_config.domains_set, kwargs["host_cert_path"])}}
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs):
+ executor_kwargs = base_executor_kwargs(test_type, test_environment, run_info_data, **kwargs)
+ executor_kwargs["close_after_done"] = True
+ executor_kwargs["capabilities"] = capabilities(test_environment.config, **kwargs)
+ if test_type == "wdspec":
+ executor_kwargs["binary_args"] = executor_kwargs["capabilities"]["webkitgtk:browserOptions"]["args"]
+ return executor_kwargs
+
+
+def env_extras(**kwargs):
+ return []
+
+
+def env_options():
+ return {}
+
+
+def run_info_extras(logger, **kwargs):
+ return {"webkit_port": "gtk"}
+
+
+class WebKitGTKMiniBrowser(WebKitBrowser):
+ pass
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/wktr.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/wktr.py
new file mode 100644
index 0000000000..8d429f357b
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/wktr.py
@@ -0,0 +1,239 @@
+# mypy: allow-untyped-defs
+
+import gc
+import os
+import sys
+from multiprocessing import Queue
+from subprocess import PIPE
+from threading import Thread
+from mozprocess import ProcessHandlerMixin
+
+from .base import Browser, ExecutorBrowser
+from .base import get_timeout_multiplier # noqa: F401
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.executorwktr import ( # noqa: F401
+ WKTRCrashtestExecutor,
+ WKTRRefTestExecutor,
+ WKTRTestharnessExecutor,
+)
+
+
+__wptrunner__ = {"product": "WebKitTestRunner",
+ "check_args": "check_args",
+ "browser": "WKTRBrowser",
+ "executor": {
+ "crashtest": "WKTRCrashtestExecutor",
+ "reftest": "WKTRRefTestExecutor",
+ "testharness": "WKTRTestharnessExecutor",
+ },
+ "browser_kwargs": "browser_kwargs",
+ "executor_kwargs": "executor_kwargs",
+ "env_extras": "env_extras",
+ "env_options": "env_options",
+ "update_properties": "update_properties",
+ "timeout_multiplier": "get_timeout_multiplier",}
+
+
+def check_args(**kwargs):
+ pass
+
+
+def browser_kwargs(logger, test_type, run_info_data, config, **kwargs):
+ args = list(kwargs["binary_args"])
+
+ args.append("--allow-any-certificate-for-allowed-hosts")
+
+ for host in config.domains_set:
+ args.append('--allowed-host')
+ args.append(host)
+
+ args.append("-")
+
+ return {"binary": kwargs["binary"],
+ "binary_args": args}
+
+
+def executor_kwargs(logger, test_type, test_environment, run_info_data,
+ **kwargs):
+ executor_kwargs = base_executor_kwargs(test_type, test_environment, run_info_data,
+ **kwargs)
+ return executor_kwargs
+
+
+def env_extras(**kwargs):
+ return []
+
+
+def env_options():
+ return {"server_host": "127.0.0.1",
+ "testharnessreport": "testharnessreport-wktr.js"}
+
+
+def update_properties():
+ return (["debug", "os", "processor"], {"os": ["version"], "processor": ["bits"]})
+
+
+class WKTRBrowser(Browser):
+ """Class that represents an instance of WebKitTestRunner.
+
+ Upon startup, the stdout, stderr, and stdin pipes of the underlying WebKitTestRunner
+ process are connected to multiprocessing Queues so that the runner process can
+ interact with WebKitTestRunner through its protocol mode.
+ """
+
+ def __init__(self, logger, binary="WebKitTestRunner", binary_args=[], **kwargs):
+ super().__init__(logger)
+
+ self._args = [binary] + binary_args
+ self._proc = None
+
+ build_root_path = os.path.dirname(binary)
+
+ def append_env(var, item, separator):
+ raw = os.environ.get(var)
+ old = raw.split(separator) if raw is not None else []
+ return separator.join(old + [item])
+
+ env = {}
+
+ if sys.platform.startswith("darwin"):
+ env["CA_DISABLE_GENERIC_SHADERS"] = "1"
+ env["__XPC_CA_DISABLE_GENERIC_SHADERS"] = "1"
+
+ env["DYLD_LIBRARY_PATH"] = append_env(
+ "DYLD_LIBRARY_PATH", build_root_path, ":"
+ )
+ env["__XPC_DYLD_LIBRARY_PATH"] = append_env(
+ "__XPC_DYLD_LIBRARY_PATH", build_root_path, ":"
+ )
+ env["DYLD_FRAMEWORK_PATH"] = append_env(
+ "DYLD_FRAMEWORK_PATH", build_root_path, ":"
+ )
+ env["__XPC_DYLD_FRAMEWORK_PATH"] = append_env(
+ "__XPC_DYLD_FRAMEWORK_PATH", build_root_path, ":"
+ )
+
+ env["SQLITE_EXEMPT_PATH_FROM_VNODE_GUARDS"] = "/"
+ env["__XPC_SQLITE_EXEMPT_PATH_FROM_VNODE_GUARDS"] = "/"
+
+ self._extra_env = env
+
+ def start(self, group_metadata, **kwargs):
+ self.logger.debug("Starting WebKitTestRunner: %s..." % self._args[0])
+ #self.logger.debug(repr(self._args))
+
+ env = os.environ
+ env.update(self._extra_env)
+
+ # Unfortunately we need to use the Process class directly because we do not
+ # want mozprocess to do any output handling at all.
+ self._proc = ProcessHandlerMixin.Process(
+ self._args, stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env
+ )
+ if os.name == "posix":
+ self._proc.pgid = ProcessHandlerMixin._getpgid(self._proc.pid)
+ self._proc.detached_pid = None
+
+ self._stdout_queue = Queue()
+ self._stderr_queue = Queue()
+ self._stdin_queue = Queue()
+
+ self._stdout_reader = self._create_reader_thread(self._proc.stdout, self._stdout_queue)
+ self._stderr_reader = self._create_reader_thread(self._proc.stderr, self._stderr_queue)
+ self._stdin_writer = self._create_writer_thread(self._proc.stdin, self._stdin_queue)
+
+ # WebKitTestRunner is likely still in the process of initializing. The actual waiting
+ # for the startup to finish is done in the WKTRProtocol.
+ self.logger.debug("WebKitTestRunner has been started.")
+
+ def stop(self, force=False):
+ self.logger.debug("Stopping WebKitTestRunner...")
+
+ if self.is_alive():
+ kill_result = self._proc.kill(timeout=5)
+ # This makes sure any left-over child processes get killed.
+ # See http://bugzilla.mozilla.org/show_bug.cgi?id=1760080
+ if force and kill_result != 0:
+ self._proc.kill(9, timeout=5)
+
+ # We need to shut down these queues cleanly to avoid broken pipe error spam in the logs.
+ self._stdout_reader.join(2)
+ self._stderr_reader.join(2)
+
+ self._stdin_queue.put(None)
+ self._stdin_writer.join(2)
+
+ for thread in [self._stdout_reader, self._stderr_reader, self._stdin_writer]:
+ if thread.is_alive():
+ self.logger.warning("WebKitTestRunner IO threads did not shut down gracefully.")
+ return False
+
+ stopped = not self.is_alive()
+ if stopped:
+ self._proc = None
+ self._stdout_queue.close()
+ self.logger.debug("WebKitTestRunner has been stopped.")
+
+ # We sometimes accumulate too many process-related objects,
+ # ultimately running out of OS file handles, via circular
+ # references to them, thus manually trigger a GC while stopping.
+ gc.collect()
+ else:
+ self.logger.warning("WebKitTestRunner failed to stop.")
+
+ return stopped
+
+ def is_alive(self):
+ return self._proc is not None and self._proc.poll() is None
+
+ def pid(self):
+ return self._proc.pid if self._proc else None
+
+ def executor_browser(self):
+ """This function returns the `ExecutorBrowser` object that is used by other
+ processes to interact with WebKitTestRunner. In our case, this consists of the three
+ multiprocessing Queues as well as an `io_stopped` event to signal when the
+ underlying pipes have reached EOF.
+ """
+ return ExecutorBrowser, {"stdout_queue": self._stdout_queue,
+ "stderr_queue": self._stderr_queue,
+ "stdin_queue": self._stdin_queue}
+
+ def check_crash(self, process, test):
+ return not self.is_alive()
+
+ def _create_reader_thread(self, stream, queue):
+ """This creates (and starts) a background thread which reads lines from `stream` and
+ puts them into `queue` until `stream` reports EOF.
+ """
+ def reader_thread(stream, queue):
+ while True:
+ line = stream.readline()
+ if not line:
+ break
+
+ queue.put(line)
+
+ queue.close()
+ queue.join_thread()
+
+ result = Thread(target=reader_thread, args=(stream, queue), daemon=True)
+ result.start()
+ return result
+
+ def _create_writer_thread(self, stream, queue):
+ """This creates (and starts) a background thread which gets items from `queue` and
+ writes them into `stream` until it encounters a None item in the queue.
+ """
+ def writer_thread(stream, queue):
+ while True:
+ line = queue.get()
+ if not line:
+ break
+
+ stream.write(line)
+ stream.flush()
+
+ result = Thread(target=writer_thread, args=(stream, queue), daemon=True)
+ result.start()
+ return result
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/config.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/config.py
new file mode 100644
index 0000000000..d78efbfd2d
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/config.py
@@ -0,0 +1,67 @@
+# mypy: allow-untyped-defs
+
+from configparser import ConfigParser
+import os
+import sys
+from collections import OrderedDict
+from typing import Dict, Mapping, Optional
+
+here = os.path.dirname(__file__)
+
+
+class ConfigDict(Dict[str, str]):
+ def __init__(self, base_path: str, *args: str, **kwargs: str):
+ self.base_path = base_path
+ dict.__init__(self, *args, **kwargs)
+
+ def get_path(self, key: str, default:Optional[str] = None) -> Optional[str]:
+ if key not in self:
+ return default
+ path = self[key]
+ os.path.expanduser(path)
+ return os.path.abspath(os.path.join(self.base_path, path))
+
+
+def read(config_path: str) -> Mapping[str, ConfigDict]:
+ config_path = os.path.abspath(config_path)
+ config_root = os.path.dirname(config_path)
+ parser = ConfigParser()
+ success = parser.read(config_path)
+ assert config_path in success, success
+
+ subns = {"pwd": os.path.abspath(os.path.curdir)}
+
+ rv = OrderedDict()
+ for section in parser.sections():
+ rv[section] = ConfigDict(config_root)
+ for key in parser.options(section):
+ rv[section][key] = parser.get(section, key, raw=False, vars=subns)
+
+ return rv
+
+
+def path(argv=None):
+ if argv is None:
+ argv = []
+ path = None
+
+ for i, arg in enumerate(argv):
+ if arg == "--config":
+ if i + 1 < len(argv):
+ path = argv[i + 1]
+ elif arg.startswith("--config="):
+ path = arg.split("=", 1)[1]
+ if path is not None:
+ break
+
+ if path is None:
+ if os.path.exists("wptrunner.ini"):
+ path = os.path.abspath("wptrunner.ini")
+ else:
+ path = os.path.join(here, "..", "wptrunner.default.ini")
+
+ return os.path.abspath(path)
+
+
+def load():
+ return read(path(sys.argv))
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/environment.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/environment.py
new file mode 100644
index 0000000000..e206e42754
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/environment.py
@@ -0,0 +1,364 @@
+# mypy: allow-untyped-defs
+
+import errno
+import json
+import os
+import signal
+import socket
+import sys
+import time
+
+from mozlog import get_default_logger, handlers
+
+from . import mpcontext
+from .wptlogging import LogLevelRewriter, QueueHandler, LogQueueThread
+
+here = os.path.dirname(__file__)
+repo_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir, os.pardir))
+
+sys.path.insert(0, repo_root)
+from tools import localpaths # noqa: F401
+
+from wptserve.handlers import StringHandler
+
+serve = None
+
+
+def do_delayed_imports(logger, test_paths):
+ global serve
+
+ serve_root = serve_path(test_paths)
+ sys.path.insert(0, serve_root)
+
+ failed = []
+
+ try:
+ from tools.serve import serve
+ except ImportError:
+ failed.append("serve")
+
+ if failed:
+ logger.critical(
+ "Failed to import %s. Ensure that tests path %s contains web-platform-tests" %
+ (", ".join(failed), serve_root))
+ sys.exit(1)
+
+
+def serve_path(test_paths):
+ return test_paths["/"].tests_path
+
+
+def webtranport_h3_server_is_running(host, port, timeout):
+ # TODO(bashi): Move the following import to the beginning of this file
+ # once WebTransportH3Server is enabled by default.
+ from webtransport.h3.webtransport_h3_server import server_is_running # type: ignore
+ return server_is_running(host, port, timeout)
+
+
+class TestEnvironmentError(Exception):
+ pass
+
+
+def get_server_logger():
+ logger = get_default_logger(component="wptserve")
+ log_filter = handlers.LogLevelFilter(lambda x: x, "info")
+ # Downgrade errors to warnings for the server
+ log_filter = LogLevelRewriter(log_filter, ["error"], "warning")
+ logger.component_filter = log_filter
+ return logger
+
+
+class ProxyLoggingContext:
+ """Context manager object that handles setup and teardown of a log queue
+ for handling logging messages from wptserve."""
+
+ def __init__(self, logger):
+ mp_context = mpcontext.get_context()
+ self.log_queue = mp_context.Queue()
+ self.logging_thread = LogQueueThread(self.log_queue, logger)
+ self.logger_handler = QueueHandler(self.log_queue)
+
+ def __enter__(self):
+ self.logging_thread.start()
+ return self.logger_handler
+
+ def __exit__(self, *args):
+ self.log_queue.put(None)
+ # Wait for thread to shut down but not for too long since it's a daemon
+ self.logging_thread.join(1)
+
+
+class TestEnvironment:
+ """Context manager that owns the test environment i.e. the http and
+ websockets servers"""
+ def __init__(self, test_paths, testharness_timeout_multipler,
+ pause_after_test, debug_test, debug_info, options, ssl_config, env_extras,
+ enable_webtransport=False, mojojs_path=None, inject_script=None,
+ suppress_handler_traceback=None):
+
+ self.test_paths = test_paths
+ self.server = None
+ self.config_ctx = None
+ self.config = None
+ self.server_logger = get_server_logger()
+ self.server_logging_ctx = ProxyLoggingContext(self.server_logger)
+ self.testharness_timeout_multipler = testharness_timeout_multipler
+ self.pause_after_test = pause_after_test
+ self.debug_test = debug_test
+ self.test_server_port = options.pop("test_server_port", True)
+ self.debug_info = debug_info
+ self.options = options if options is not None else {}
+
+ mp_context = mpcontext.get_context()
+ self.cache_manager = mp_context.Manager()
+ self.stash = serve.stash.StashServer(mp_context=mp_context)
+ self.env_extras = env_extras
+ self.env_extras_cms = None
+ self.ssl_config = ssl_config
+ self.enable_webtransport = enable_webtransport
+ self.mojojs_path = mojojs_path
+ self.inject_script = inject_script
+ self.suppress_handler_traceback = suppress_handler_traceback
+
+ def __enter__(self):
+ server_log_handler = self.server_logging_ctx.__enter__()
+ self.config_ctx = self.build_config()
+
+ self.config = self.config_ctx.__enter__()
+
+ self.stash.__enter__()
+ self.cache_manager.__enter__()
+
+ assert self.env_extras_cms is None, (
+ "A TestEnvironment object cannot be nested")
+
+ self.env_extras_cms = []
+
+ for env in self.env_extras:
+ cm = env(self.options, self.config)
+ cm.__enter__()
+ self.env_extras_cms.append(cm)
+
+ self.servers = serve.start(self.server_logger,
+ self.config,
+ self.get_routes(),
+ mp_context=mpcontext.get_context(),
+ log_handlers=[server_log_handler],
+ webtransport_h3=self.enable_webtransport)
+
+ if self.options.get("supports_debugger") and self.debug_info and self.debug_info.interactive:
+ self.ignore_interrupts()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.process_interrupts()
+
+ for servers in self.servers.values():
+ for _, server in servers:
+ server.request_shutdown()
+ for servers in self.servers.values():
+ for _, server in servers:
+ server.wait()
+ for cm in self.env_extras_cms:
+ cm.__exit__(exc_type, exc_val, exc_tb)
+
+ self.env_extras_cms = None
+
+ self.cache_manager.__exit__(exc_type, exc_val, exc_tb)
+ self.stash.__exit__()
+ self.config_ctx.__exit__(exc_type, exc_val, exc_tb)
+ self.server_logging_ctx.__exit__(exc_type, exc_val, exc_tb)
+
+ def ignore_interrupts(self):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+ def process_interrupts(self):
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+
+ def build_config(self):
+ override_path = os.path.join(serve_path(self.test_paths), "config.json")
+
+ config = serve.ConfigBuilder(self.server_logger)
+
+ ports = {
+ "http": [8000, 8001],
+ "http-private": [8002],
+ "http-public": [8003],
+ "https": [8443, 8444],
+ "https-private": [8445],
+ "https-public": [8446],
+ "ws": [8888],
+ "wss": [8889],
+ "h2": [9000],
+ "webtransport-h3": [11000],
+ }
+ config.ports = ports
+
+ if os.path.exists(override_path):
+ with open(override_path) as f:
+ override_obj = json.load(f)
+ config.update(override_obj)
+
+ config.check_subdomains = False
+
+ ssl_config = self.ssl_config.copy()
+ ssl_config["encrypt_after_connect"] = self.options.get("encrypt_after_connect", False)
+ config.ssl = ssl_config
+
+ if "browser_host" in self.options:
+ config.browser_host = self.options["browser_host"]
+
+ if "bind_address" in self.options:
+ config.bind_address = self.options["bind_address"]
+
+ config.server_host = self.options.get("server_host", None)
+ config.doc_root = serve_path(self.test_paths)
+ config.inject_script = self.inject_script
+
+ if self.suppress_handler_traceback is not None:
+ config.logging["suppress_handler_traceback"] = self.suppress_handler_traceback
+
+ return config
+
+ def get_routes(self):
+ route_builder = serve.get_route_builder(
+ self.server_logger,
+ self.config.aliases,
+ self.config)
+
+ for path, format_args, content_type, route in [
+ ("testharness_runner.html", {}, "text/html", "/testharness_runner.html"),
+ ("print_pdf_runner.html", {}, "text/html", "/print_pdf_runner.html"),
+ (os.path.join(here, "..", "..", "third_party", "pdf_js", "pdf.js"), None,
+ "text/javascript", "/_pdf_js/pdf.js"),
+ (os.path.join(here, "..", "..", "third_party", "pdf_js", "pdf.worker.js"), None,
+ "text/javascript", "/_pdf_js/pdf.worker.js"),
+ (self.options.get("testharnessreport", "testharnessreport.js"),
+ {"output": self.pause_after_test,
+ "timeout_multiplier": self.testharness_timeout_multipler,
+ "explicit_timeout": "true" if self.debug_info is not None else "false",
+ "debug": "true" if self.debug_test else "false"},
+ "text/javascript;charset=utf8",
+ "/resources/testharnessreport.js")]:
+ path = os.path.normpath(os.path.join(here, path))
+ # Note that .headers. files don't apply to static routes, so we need to
+ # readd any static headers here.
+ headers = {"Cache-Control": "max-age=3600"}
+ route_builder.add_static(path, format_args, content_type, route,
+ headers=headers)
+
+ route_builder.add_handler("GET", "/resources/testdriver.js", TestdriverLoader())
+
+ for url_base, test_root in self.test_paths.items():
+ if url_base == "/":
+ continue
+ route_builder.add_mount_point(url_base, test_root.tests_path)
+
+ if "/" not in self.test_paths:
+ del route_builder.mountpoint_routes["/"]
+
+ if self.mojojs_path:
+ route_builder.add_mount_point("/gen/", self.mojojs_path)
+
+ return route_builder.get_routes()
+
+ def ensure_started(self):
+ # Pause for a while to ensure that the server has a chance to start
+ total_sleep_secs = 60
+ each_sleep_secs = 0.5
+ end_time = time.time() + total_sleep_secs
+ while time.time() < end_time:
+ failed, pending = self.test_servers()
+ if failed:
+ break
+ if not pending:
+ return
+ time.sleep(each_sleep_secs)
+ if failed:
+ failures = ", ".join(f"{scheme}:{port}" for scheme, port in failed)
+ msg = f"Servers failed to start: {failures}"
+ else:
+ pending = ", ".join(f"{scheme}:{port}" for scheme, port in pending)
+ msg = f"Timed out wait for servers to start: {pending}"
+ raise OSError(msg)
+
+ def test_servers(self):
+ failed = []
+ pending = []
+ host = self.config["server_host"]
+ for scheme, servers in self.servers.items():
+ for port, server in servers:
+ if not server.is_alive():
+ failed.append((scheme, port))
+
+ if not failed and self.test_server_port:
+ for scheme, servers in self.servers.items():
+ for port, server in servers:
+ if scheme == "webtransport-h3":
+ if not webtranport_h3_server_is_running(host, port, timeout=5.0):
+ pending.append((host, port))
+ continue
+ s = socket.socket()
+ s.settimeout(0.1)
+ try:
+ s.connect((host, port))
+ except OSError:
+ pending.append((scheme, port))
+ finally:
+ s.close()
+
+ return failed, pending
+
+
+class TestdriverLoader:
+ """A special static handler for serving `/resources/testdriver.js`.
+
+ This handler lazily reads `testdriver{,-extra}.js` so that wptrunner doesn't
+ need to pass the entire file contents to child `wptserve` processes, which
+ can slow `wptserve` startup by several seconds (crbug.com/1479850).
+ """
+ def __init__(self):
+ self._handler = None
+
+ def __call__(self, request, response):
+ if not self._handler:
+ data = b""
+ with open(os.path.join(repo_root, "resources", "testdriver.js"), "rb") as fp:
+ data += fp.read()
+ with open(os.path.join(here, "testdriver-extra.js"), "rb") as fp:
+ data += fp.read()
+ self._handler = StringHandler(data, "text/javascript")
+ return self._handler(request, response)
+
+
+def wait_for_service(logger, host, port, timeout=60, server_process=None):
+ """Waits until network service given as a tuple of (host, port) becomes
+ available, `timeout` duration is reached, or the `server_process` exits at
+ which point ``socket.error`` is raised."""
+ addr = (host, port)
+ logger.debug(f"Trying to connect to {host}:{port}")
+ end = time.time() + timeout
+ while end > time.time():
+ if server_process is not None and server_process.poll() is not None:
+ returncode = server_process.poll()
+ logger.debug(
+ f"Server process {server_process.pid} exited with "
+ f"{returncode}, giving up trying to connect"
+ )
+ break
+
+ so = socket.socket()
+ try:
+ so.connect(addr)
+ except socket.timeout:
+ pass
+ except OSError as e:
+ if e.errno != errno.ECONNREFUSED:
+ raise
+ else:
+ logger.debug(f"Connected to {host}:{port}")
+ return True
+ finally:
+ so.close()
+ time.sleep(0.5)
+ raise OSError("Service is unavailable: %s:%i" % addr)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/__init__.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/__init__.py
new file mode 100644
index 0000000000..bf829d93e9
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/__init__.py
@@ -0,0 +1,5 @@
+# flake8: noqa (not ideal, but nicer than adding noqa: F401 to every line!)
+from .base import (executor_kwargs,
+ testharness_result_converter,
+ reftest_result_converter,
+ TestExecutor)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/actions.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/actions.py
new file mode 100644
index 0000000000..6e671f4cfd
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/actions.py
@@ -0,0 +1,480 @@
+# mypy: allow-untyped-defs
+
+class ClickAction:
+ name = "click"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ selector = payload["selector"]
+ element = self.protocol.select.element_by_selector(selector)
+ self.logger.debug("Clicking element: %s" % selector)
+ self.protocol.click.element(element)
+
+
+class DeleteAllCookiesAction:
+ name = "delete_all_cookies"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ self.logger.debug("Deleting all cookies")
+ self.protocol.cookies.delete_all_cookies()
+
+
+class GetAllCookiesAction:
+ name = "get_all_cookies"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ self.logger.debug("Getting all cookies")
+ return self.protocol.cookies.get_all_cookies()
+
+
+class GetComputedLabelAction:
+ name = "get_computed_label"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ selector = payload["selector"]
+ element = self.protocol.select.element_by_selector(selector)
+ self.logger.debug("Getting computed label for element: %s" % element)
+ return self.protocol.accessibility.get_computed_label(element)
+
+
+class GetComputedRoleAction:
+ name = "get_computed_role"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ selector = payload["selector"]
+ element = self.protocol.select.element_by_selector(selector)
+ self.logger.debug("Getting computed role for element: %s" % element)
+ return self.protocol.accessibility.get_computed_role(element)
+
+
+class GetNamedCookieAction:
+ name = "get_named_cookie"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ name = payload["name"]
+ self.logger.debug("Getting cookie named %s" % name)
+ return self.protocol.cookies.get_named_cookie(name)
+
+
+class SendKeysAction:
+ name = "send_keys"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ selector = payload["selector"]
+ keys = payload["keys"]
+ element = self.protocol.select.element_by_selector(selector)
+ self.logger.debug("Sending keys to element: %s" % selector)
+ self.protocol.send_keys.send_keys(element, keys)
+
+
+class MinimizeWindowAction:
+ name = "minimize_window"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ return self.protocol.window.minimize()
+
+
+class SetWindowRectAction:
+ name = "set_window_rect"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ rect = payload["rect"]
+ self.protocol.window.set_rect(rect)
+
+class GetWindowRectAction:
+ name = "get_window_rect"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ return self.protocol.window.get_rect()
+
+class ActionSequenceAction:
+ name = "action_sequence"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+ self.requires_state_reset = False
+
+ def __call__(self, payload):
+ # TODO: some sort of shallow error checking
+ if self.requires_state_reset:
+ self.reset()
+ self.requires_state_reset = True
+ actions = payload["actions"]
+ for actionSequence in actions:
+ if actionSequence["type"] == "pointer":
+ for action in actionSequence["actions"]:
+ if (action["type"] == "pointerMove" and
+ isinstance(action["origin"], dict)):
+ action["origin"] = self.get_element(action["origin"]["selector"])
+ self.protocol.action_sequence.send_actions({"actions": actions})
+
+ def get_element(self, element_selector):
+ return self.protocol.select.element_by_selector(element_selector)
+
+ def reset(self):
+ self.protocol.action_sequence.release()
+ self.requires_state_reset = False
+
+
+class GenerateTestReportAction:
+ name = "generate_test_report"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ message = payload["message"]
+ self.logger.debug("Generating test report: %s" % message)
+ self.protocol.generate_test_report.generate_test_report(message)
+
+class SetPermissionAction:
+ name = "set_permission"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ permission_params = payload["permission_params"]
+ descriptor = permission_params["descriptor"]
+ name = descriptor["name"]
+ state = permission_params["state"]
+ self.logger.debug("Setting permission %s to %s" % (name, state))
+ self.protocol.set_permission.set_permission(descriptor, state)
+
+class AddVirtualAuthenticatorAction:
+ name = "add_virtual_authenticator"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ self.logger.debug("Adding virtual authenticator")
+ config = payload["config"]
+ authenticator_id = self.protocol.virtual_authenticator.add_virtual_authenticator(config)
+ self.logger.debug("Authenticator created with ID %s" % authenticator_id)
+ return authenticator_id
+
+class RemoveVirtualAuthenticatorAction:
+ name = "remove_virtual_authenticator"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ authenticator_id = payload["authenticator_id"]
+ self.logger.debug("Removing virtual authenticator %s" % authenticator_id)
+ return self.protocol.virtual_authenticator.remove_virtual_authenticator(authenticator_id)
+
+
+class AddCredentialAction:
+ name = "add_credential"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ authenticator_id = payload["authenticator_id"]
+ credential = payload["credential"]
+ self.logger.debug("Adding credential to virtual authenticator %s " % authenticator_id)
+ return self.protocol.virtual_authenticator.add_credential(authenticator_id, credential)
+
+class GetCredentialsAction:
+ name = "get_credentials"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ authenticator_id = payload["authenticator_id"]
+ self.logger.debug("Getting credentials from virtual authenticator %s " % authenticator_id)
+ return self.protocol.virtual_authenticator.get_credentials(authenticator_id)
+
+class RemoveCredentialAction:
+ name = "remove_credential"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ authenticator_id = payload["authenticator_id"]
+ credential_id = payload["credential_id"]
+ self.logger.debug("Removing credential %s from authenticator %s" % (credential_id, authenticator_id))
+ return self.protocol.virtual_authenticator.remove_credential(authenticator_id, credential_id)
+
+class RemoveAllCredentialsAction:
+ name = "remove_all_credentials"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ authenticator_id = payload["authenticator_id"]
+ self.logger.debug("Removing all credentials from authenticator %s" % authenticator_id)
+ return self.protocol.virtual_authenticator.remove_all_credentials(authenticator_id)
+
+class SetUserVerifiedAction:
+ name = "set_user_verified"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ authenticator_id = payload["authenticator_id"]
+ uv = payload["uv"]
+ self.logger.debug(
+ "Setting user verified flag on authenticator %s to %s" % (authenticator_id, uv["isUserVerified"]))
+ return self.protocol.virtual_authenticator.set_user_verified(authenticator_id, uv)
+
+class SetSPCTransactionModeAction:
+ name = "set_spc_transaction_mode"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ mode = payload["mode"]
+ self.logger.debug("Setting SPC transaction mode to %s" % mode)
+ return self.protocol.spc_transactions.set_spc_transaction_mode(mode)
+
+class SetRPHRegistrationModeAction:
+ name = "set_rph_registration_mode"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ mode = payload["mode"]
+ self.logger.debug("Setting RPH registration mode to %s" % mode)
+ return self.protocol.rph_registrations.set_rph_registration_mode(mode)
+
+class CancelFedCMDialogAction:
+ name = "cancel_fedcm_dialog"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ self.logger.debug("Canceling FedCM dialog")
+ return self.protocol.fedcm.cancel_fedcm_dialog()
+
+class ClickFedCMDialogButtonAction:
+ name = "click_fedcm_dialog_button"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ dialog_button = payload["dialog_button"]
+ self.logger.debug(f"Clicking FedCM dialog button: {dialog_button}")
+ return self.protocol.fedcm.click_fedcm_dialog_button()
+
+class SelectFedCMAccountAction:
+ name = "select_fedcm_account"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ account_index = payload["account_index"]
+ self.logger.debug(f"Selecting FedCM account of index: {account_index}")
+ return self.protocol.fedcm.select_fedcm_account(account_index)
+
+class GetFedCMAccountListAction:
+ name = "get_fedcm_account_list"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ self.logger.debug("Getting FedCM account list")
+ return self.protocol.fedcm.get_fedcm_account_list()
+
+class GetFedCMDialogTitleAction:
+ name = "get_fedcm_dialog_title"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ self.logger.debug("Getting FedCM dialog title")
+ return self.protocol.fedcm.get_fedcm_dialog_title()
+
+class GetFedCMDialogTypeAction:
+ name = "get_fedcm_dialog_type"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ self.logger.debug("Getting FedCM dialog type")
+ return self.protocol.fedcm.get_fedcm_dialog_type()
+
+class SetFedCMDelayEnabledAction:
+ name = "set_fedcm_delay_enabled"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ enabled = payload["enabled"]
+ self.logger.debug("Setting FedCM delay enabled as %s" % enabled)
+ return self.protocol.fedcm.set_fedcm_delay_enabled(enabled)
+
+class ResetFedCMCooldownAction:
+ name = "reset_fedcm_cooldown"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ self.logger.debug("Resetting FedCM cooldown")
+ return self.protocol.fedcm.reset_fedcm_cooldown()
+
+
+class CreateVirtualSensorAction:
+ name = "create_virtual_sensor"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ sensor_type = payload["sensor_type"]
+ sensor_params = payload["sensor_params"]
+ self.logger.debug("Creating %s sensor with %s values" % (sensor_type, sensor_params))
+ return self.protocol.virtual_sensor.create_virtual_sensor(sensor_type, sensor_params)
+
+
+class UpdateVirtualSensorAction:
+ name = "update_virtual_sensor"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ sensor_type = payload["sensor_type"]
+ reading = payload["reading"]
+ self.logger.debug("Updating %s sensor with new readings: %s" % (sensor_type, reading))
+ return self.protocol.virtual_sensor.update_virtual_sensor(sensor_type, reading)
+
+
+class RemoveVirtualSensorAction:
+ name = "remove_virtual_sensor"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ sensor_type = payload["sensor_type"]
+ self.logger.debug("Removing %s sensor" % sensor_type)
+ return self.protocol.virtual_sensor.remove_virtual_sensor(sensor_type)
+
+
+class GetVirtualSensorInformationAction:
+ name = "get_virtual_sensor_information"
+
+ def __init__(self, logger, protocol):
+ self.logger = logger
+ self.protocol = protocol
+
+ def __call__(self, payload):
+ sensor_type = payload["sensor_type"]
+ self.logger.debug("Requesting information from %s sensor" % sensor_type)
+ return self.protocol.virtual_sensor.get_virtual_sensor_information(sensor_type)
+
+
+actions = [ClickAction,
+ DeleteAllCookiesAction,
+ GetAllCookiesAction,
+ GetNamedCookieAction,
+ GetComputedLabelAction,
+ GetComputedRoleAction,
+ SendKeysAction,
+ MinimizeWindowAction,
+ SetWindowRectAction,
+ GetWindowRectAction,
+ ActionSequenceAction,
+ GenerateTestReportAction,
+ SetPermissionAction,
+ AddVirtualAuthenticatorAction,
+ RemoveVirtualAuthenticatorAction,
+ AddCredentialAction,
+ GetCredentialsAction,
+ RemoveCredentialAction,
+ RemoveAllCredentialsAction,
+ SetUserVerifiedAction,
+ SetSPCTransactionModeAction,
+ SetRPHRegistrationModeAction,
+ CancelFedCMDialogAction,
+ ClickFedCMDialogButtonAction,
+ SelectFedCMAccountAction,
+ GetFedCMAccountListAction,
+ GetFedCMDialogTitleAction,
+ GetFedCMDialogTypeAction,
+ SetFedCMDelayEnabledAction,
+ ResetFedCMCooldownAction,
+ CreateVirtualSensorAction,
+ UpdateVirtualSensorAction,
+ RemoveVirtualSensorAction,
+ GetVirtualSensorInformationAction]
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/base.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/base.py
new file mode 100644
index 0000000000..763b6fcb19
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/base.py
@@ -0,0 +1,810 @@
+# mypy: allow-untyped-defs
+
+import base64
+import hashlib
+import io
+import json
+import os
+import threading
+import traceback
+import socket
+import sys
+from abc import ABCMeta, abstractmethod
+from typing import Any, Callable, ClassVar, Tuple, Type
+from urllib.parse import urljoin, urlsplit, urlunsplit
+
+from . import pytestrunner
+from .actions import actions
+from .protocol import Protocol, WdspecProtocol
+
+
+here = os.path.dirname(__file__)
+
+
+def executor_kwargs(test_type, test_environment, run_info_data, subsuite, **kwargs):
+ timeout_multiplier = kwargs["timeout_multiplier"]
+ if timeout_multiplier is None:
+ timeout_multiplier = 1
+
+ executor_kwargs = {"server_config": test_environment.config,
+ "timeout_multiplier": timeout_multiplier,
+ "debug_info": kwargs["debug_info"],
+ "subsuite": subsuite.name}
+
+ if test_type in ("reftest", "print-reftest"):
+ executor_kwargs["screenshot_cache"] = test_environment.cache_manager.dict()
+ executor_kwargs["reftest_screenshot"] = kwargs["reftest_screenshot"]
+
+ if test_type == "wdspec":
+ executor_kwargs["binary"] = kwargs["binary"]
+ executor_kwargs["binary_args"] = kwargs["binary_args"].copy()
+ executor_kwargs["webdriver_binary"] = kwargs["webdriver_binary"]
+ executor_kwargs["webdriver_args"] = kwargs["webdriver_args"].copy()
+
+ # By default the executor may try to cleanup windows after a test (to best
+ # associate any problems with the test causing them). If the user might
+ # want to view the results, however, the executor has to skip that cleanup.
+ if kwargs["pause_after_test"] or kwargs["pause_on_unexpected"]:
+ executor_kwargs["cleanup_after_test"] = False
+ executor_kwargs["debug_test"] = kwargs["debug_test"]
+ return executor_kwargs
+
+
+def strip_server(url):
+ """Remove the scheme and netloc from a url, leaving only the path and any query
+ or fragment.
+
+ url - the url to strip
+
+ e.g. http://example.org:8000/tests?id=1#2 becomes /tests?id=1#2"""
+
+ url_parts = list(urlsplit(url))
+ url_parts[0] = ""
+ url_parts[1] = ""
+ return urlunsplit(url_parts)
+
+
+def server_url(server_config, protocol, subdomain=False):
+ scheme = "https" if protocol == "h2" else protocol
+ host = server_config["browser_host"]
+ if subdomain:
+ # The only supported subdomain filename flag is "www".
+ host = "{subdomain}.{host}".format(subdomain="www", host=host)
+ return "{scheme}://{host}:{port}".format(scheme=scheme, host=host,
+ port=server_config["ports"][protocol][0])
+
+
+class TestharnessResultConverter:
+ harness_codes = {0: "OK",
+ 1: "ERROR",
+ 2: "TIMEOUT",
+ 3: "PRECONDITION_FAILED"}
+
+ test_codes = {0: "PASS",
+ 1: "FAIL",
+ 2: "TIMEOUT",
+ 3: "NOTRUN",
+ 4: "PRECONDITION_FAILED"}
+
+ def __call__(self, test, result, extra=None):
+ """Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
+ result_url, status, message, stack, subtest_results = result
+ assert result_url == test.url, (f"Got results from {result_url}, expected {test.url}")
+ harness_result = test.make_result(self.harness_codes[status], message, extra=extra, stack=stack)
+ return (harness_result,
+ [test.make_subtest_result(st_name, self.test_codes[st_status], st_message, st_stack)
+ for st_name, st_status, st_message, st_stack in subtest_results])
+
+
+testharness_result_converter = TestharnessResultConverter()
+
+
+def hash_screenshots(screenshots):
+ """Computes the sha1 checksum of a list of base64-encoded screenshots."""
+ return [hashlib.sha1(base64.b64decode(screenshot)).hexdigest()
+ for screenshot in screenshots]
+
+
+def _ensure_hash_in_reftest_screenshots(extra):
+ """Make sure reftest_screenshots have hashes.
+
+ Marionette internal reftest runner does not produce hashes.
+ """
+ log_data = extra.get("reftest_screenshots")
+ if not log_data:
+ return
+ for item in log_data:
+ if not isinstance(item, dict):
+ # Skip relation strings.
+ continue
+ if "hash" not in item:
+ item["hash"] = hash_screenshots([item["screenshot"]])[0]
+
+
+def get_pages(ranges_value, total_pages):
+ """Get a set of page numbers to include in a print reftest.
+
+ :param ranges_value: Parsed page ranges as a list e.g. [[1,2], [4], [6,None]]
+ :param total_pages: Integer total number of pages in the paginated output.
+ :retval: Set containing integer page numbers to include in the comparison e.g.
+ for the example ranges value and 10 total pages this would be
+ {1,2,4,6,7,8,9,10}"""
+ if not ranges_value:
+ return set(range(1, total_pages + 1))
+
+ rv = set()
+
+ for range_limits in ranges_value:
+ if len(range_limits) == 1:
+ range_limits = [range_limits[0], range_limits[0]]
+
+ if range_limits[0] is None:
+ range_limits[0] = 1
+ if range_limits[1] is None:
+ range_limits[1] = total_pages
+
+ if range_limits[0] > total_pages:
+ continue
+ rv |= set(range(range_limits[0], range_limits[1] + 1))
+ return rv
+
+
+def reftest_result_converter(self, test, result):
+ extra = result.get("extra", {})
+ _ensure_hash_in_reftest_screenshots(extra)
+ return (test.make_result(
+ result["status"],
+ result["message"],
+ extra=extra,
+ stack=result.get("stack")), [])
+
+
+def pytest_result_converter(self, test, data):
+ harness_data, subtest_data = data
+
+ if subtest_data is None:
+ subtest_data = []
+
+ harness_result = test.make_result(*harness_data)
+ subtest_results = [test.make_subtest_result(*item) for item in subtest_data]
+
+ return (harness_result, subtest_results)
+
+
+def crashtest_result_converter(self, test, result):
+ return test.make_result(**result), []
+
+
+class ExecutorException(Exception):
+ def __init__(self, status, message):
+ self.status = status
+ self.message = message
+
+
+class TimedRunner:
+ def __init__(self, logger, func, protocol, url, timeout, extra_timeout):
+ self.func = func
+ self.logger = logger
+ self.result = None
+ self.protocol = protocol
+ self.url = url
+ self.timeout = timeout
+ self.extra_timeout = extra_timeout
+ self.result_flag = threading.Event()
+
+ def run(self):
+ for setup_fn in [self.set_timeout, self.before_run]:
+ err = setup_fn()
+ if err:
+ self.result = (False, err)
+ return self.result
+
+ executor = threading.Thread(target=self.run_func)
+ executor.start()
+
+ # Add twice the extra timeout since the called function is expected to
+ # wait at least self.timeout + self.extra_timeout and this gives some leeway
+ timeout = self.timeout + 2 * self.extra_timeout if self.timeout else None
+ finished = self.result_flag.wait(timeout)
+ if self.result is None:
+ if finished:
+ # flag is True unless we timeout; this *shouldn't* happen, but
+ # it can if self.run_func fails to set self.result due to raising
+ self.result = False, ("INTERNAL-ERROR", "%s.run_func didn't set a result" %
+ self.__class__.__name__)
+ else:
+ if self.protocol.is_alive():
+ message = "Executor hit external timeout (this may indicate a hang)\n"
+ # get a traceback for the current stack of the executor thread
+ message += "".join(traceback.format_stack(sys._current_frames()[executor.ident]))
+ self.result = False, ("EXTERNAL-TIMEOUT", message)
+ else:
+ self.logger.info("Browser not responding, setting status to CRASH")
+ self.result = False, ("CRASH", None)
+ elif self.result[1] is None:
+ # We didn't get any data back from the test, so check if the
+ # browser is still responsive
+ if self.protocol.is_alive():
+ self.result = False, ("INTERNAL-ERROR", None)
+ else:
+ self.logger.info("Browser not responding, setting status to CRASH")
+ self.result = False, ("CRASH", None)
+
+ return self.result
+
+ def set_timeout(self):
+ raise NotImplementedError
+
+ def before_run(self):
+ pass
+
+ def run_func(self):
+ raise NotImplementedError
+
+
+class TestExecutor:
+ """Abstract Base class for object that actually executes the tests in a
+ specific browser. Typically there will be a different TestExecutor
+ subclass for each test type and method of executing tests.
+
+ :param browser: ExecutorBrowser instance providing properties of the
+ browser that will be tested.
+ :param server_config: Dictionary of wptserve server configuration of the
+ form stored in TestEnvironment.config
+ :param timeout_multiplier: Multiplier relative to base timeout to use
+ when setting test timeout.
+ """
+ __metaclass__ = ABCMeta
+
+ test_type: ClassVar[str]
+ # convert_result is a class variable set to a callable converter
+ # (e.g. reftest_result_converter) converting from an instance of
+ # URLManifestItem (e.g. RefTest) + type-dependent results object +
+ # type-dependent extra data, returning a tuple of Result and list of
+ # SubtestResult. For now, any callable is accepted. TODO: Make this type
+ # stricter when more of the surrounding code is annotated.
+ convert_result: ClassVar[Callable[..., Any]]
+ supports_testdriver = False
+ supports_jsshell = False
+ # Extra timeout to use after internal test timeout at which the harness
+ # should force a timeout
+ extra_timeout = 5 # seconds
+
+
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1,
+ debug_info=None, subsuite=None, **kwargs):
+ self.logger = logger
+ self.runner = None
+ self.browser = browser
+ self.server_config = server_config
+ self.timeout_multiplier = timeout_multiplier
+ self.debug_info = debug_info
+ self.subsuite = subsuite
+ self.last_environment = {"protocol": "http",
+ "prefs": {}}
+ self.protocol = None # This must be set in subclasses
+
+ def setup(self, runner):
+ """Run steps needed before tests can be started e.g. connecting to
+ browser instance
+
+ :param runner: TestRunner instance that is going to run the tests"""
+ self.runner = runner
+ if self.protocol is not None:
+ self.protocol.setup(runner)
+
+ def teardown(self):
+ """Run cleanup steps after tests have finished"""
+ if self.protocol is not None:
+ self.protocol.teardown()
+
+ def reset(self):
+ """Re-initialize internal state to facilitate repeated test execution
+ as implemented by the `--rerun` command-line argument."""
+ pass
+
+ def run_test(self, test):
+ """Run a particular test.
+
+ :param test: The test to run"""
+ try:
+ if test.environment != self.last_environment:
+ self.on_environment_change(test.environment)
+ result = self.do_test(test)
+ except Exception as e:
+ exception_string = traceback.format_exc()
+ message = f"Exception in TextExecutor.run:\n{exception_string}"
+ self.logger.warning(message)
+ result = self.result_from_exception(test, e, exception_string)
+
+ # log result of parent test
+ if result[0].status == "ERROR":
+ self.logger.debug(result[0].message)
+
+ self.last_environment = test.environment
+
+ self.runner.send_message("test_ended", test, result)
+
+ def server_url(self, protocol, subdomain=False):
+ return server_url(self.server_config, protocol, subdomain)
+
+ def test_url(self, test):
+ return urljoin(self.server_url(test.environment["protocol"],
+ test.subdomain), test.url)
+
+ @abstractmethod
+ def do_test(self, test):
+ """Test-type and protocol specific implementation of running a
+ specific test.
+
+ :param test: The test to run."""
+ pass
+
+ def on_environment_change(self, new_environment):
+ pass
+
+ def result_from_exception(self, test, e, exception_string):
+ if hasattr(e, "status") and e.status in test.result_cls.statuses:
+ status = e.status
+ else:
+ status = "INTERNAL-ERROR"
+ message = str(getattr(e, "message", ""))
+ if message:
+ message += "\n"
+ message += exception_string
+ return test.make_result(status, message), []
+
+ def wait(self):
+ return self.protocol.base.wait()
+
+
+class TestharnessExecutor(TestExecutor):
+ convert_result = testharness_result_converter
+
+
+class RefTestExecutor(TestExecutor):
+ convert_result = reftest_result_converter
+ is_print = False
+
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
+ debug_info=None, reftest_screenshot="unexpected", **kwargs):
+ TestExecutor.__init__(self, logger, browser, server_config,
+ timeout_multiplier=timeout_multiplier,
+ debug_info=debug_info)
+
+ self.screenshot_cache = screenshot_cache
+ self.reftest_screenshot = reftest_screenshot
+
+
+class CrashtestExecutor(TestExecutor):
+ convert_result = crashtest_result_converter
+
+
+class PrintRefTestExecutor(TestExecutor):
+ convert_result = reftest_result_converter
+ is_print = True
+
+
+class RefTestImplementation:
+ def __init__(self, executor):
+ self.timeout_multiplier = executor.timeout_multiplier
+ self.executor = executor
+ self.subsuite = executor.subsuite
+ # Cache of url:(screenshot hash, screenshot). Typically the
+ # screenshot is None, but we set this value if a test fails
+ # and the screenshot was taken from the cache so that we may
+ # retrieve the screenshot from the cache directly in the future
+ self.screenshot_cache = self.executor.screenshot_cache
+ self.message = None
+ self.reftest_screenshot = executor.reftest_screenshot
+
+ def setup(self):
+ pass
+
+ def teardown(self):
+ pass
+
+ @property
+ def logger(self):
+ return self.executor.logger
+
+ def get_hash(self, test, viewport_size, dpi, page_ranges):
+ key = (self.subsuite, test.url, viewport_size, dpi)
+
+ if key not in self.screenshot_cache:
+ success, data = self.get_screenshot_list(test, viewport_size, dpi, page_ranges)
+
+ if not success:
+ return False, data
+
+ screenshots = data
+ hash_values = hash_screenshots(data)
+ self.screenshot_cache[key] = (hash_values, screenshots)
+
+ rv = (hash_values, screenshots)
+ else:
+ rv = self.screenshot_cache[key]
+
+ self.message.append(f"{test.url} {rv[0]}")
+ return True, rv
+
+ def reset(self):
+ self.screenshot_cache.clear()
+
+ def check_pass(self, hashes, screenshots, urls, relation, fuzzy):
+ """Check if a test passes, and return a tuple of (pass, page_idx),
+ where page_idx is the zero-based index of the first page on which a
+ difference occurs if any, or None if there are no differences"""
+
+ assert relation in ("==", "!=")
+ lhs_hashes, rhs_hashes = hashes
+ lhs_screenshots, rhs_screenshots = screenshots
+
+ if len(lhs_hashes) != len(rhs_hashes):
+ self.logger.info("Got different number of pages")
+ return relation == "!=", -1
+
+ assert len(lhs_screenshots) == len(lhs_hashes) == len(rhs_screenshots) == len(rhs_hashes)
+
+ for (page_idx, (lhs_hash,
+ rhs_hash,
+ lhs_screenshot,
+ rhs_screenshot)) in enumerate(zip(lhs_hashes,
+ rhs_hashes,
+ lhs_screenshots,
+ rhs_screenshots)):
+ comparison_screenshots = (lhs_screenshot, rhs_screenshot)
+ if not fuzzy or fuzzy == ((0, 0), (0, 0)):
+ equal = lhs_hash == rhs_hash
+ # sometimes images can have different hashes, but pixels can be identical.
+ if not equal:
+ self.logger.info("Image hashes didn't match%s, checking pixel differences" %
+ ("" if len(hashes) == 1 else " on page %i" % (page_idx + 1)))
+ max_per_channel, pixels_different = self.get_differences(comparison_screenshots,
+ urls)
+ equal = pixels_different == 0 and max_per_channel == 0
+ else:
+ max_per_channel, pixels_different = self.get_differences(comparison_screenshots,
+ urls,
+ page_idx if len(hashes) > 1 else None)
+ allowed_per_channel, allowed_different = fuzzy
+ self.logger.info("Allowed %s pixels different, maximum difference per channel %s" %
+ ("-".join(str(item) for item in allowed_different),
+ "-".join(str(item) for item in allowed_per_channel)))
+ equal = ((pixels_different == 0 and allowed_different[0] == 0) or
+ (max_per_channel == 0 and allowed_per_channel[0] == 0) or
+ (allowed_per_channel[0] <= max_per_channel <= allowed_per_channel[1] and
+ allowed_different[0] <= pixels_different <= allowed_different[1]))
+ if not equal:
+ return (False if relation == "==" else True, page_idx)
+ # All screenshots were equal within the fuzziness
+ return (True if relation == "==" else False, -1)
+
+ def get_differences(self, screenshots, urls, page_idx=None):
+ from PIL import Image, ImageChops, ImageStat
+
+ lhs = Image.open(io.BytesIO(base64.b64decode(screenshots[0]))).convert("RGB")
+ rhs = Image.open(io.BytesIO(base64.b64decode(screenshots[1]))).convert("RGB")
+ self.check_if_solid_color(lhs, urls[0])
+ self.check_if_solid_color(rhs, urls[1])
+ diff = ImageChops.difference(lhs, rhs)
+ minimal_diff = diff.crop(diff.getbbox())
+ mask = minimal_diff.convert("L", dither=None)
+ stat = ImageStat.Stat(minimal_diff, mask)
+ per_channel = max(item[1] for item in stat.extrema)
+ count = stat.count[0]
+ self.logger.info("Found %s pixels different, maximum difference per channel %s%s" %
+ (count,
+ per_channel,
+ "" if page_idx is None else " on page %i" % (page_idx + 1)))
+ return per_channel, count
+
+ def check_if_solid_color(self, image, url):
+ extrema = image.getextrema()
+ if all(min == max for min, max in extrema):
+ color = ''.join('%02X' % value for value, _ in extrema)
+ self.message.append(f"Screenshot is solid color 0x{color} for {url}\n")
+
+ def run_test(self, test):
+ viewport_size = test.viewport_size
+ dpi = test.dpi
+ page_ranges = test.page_ranges
+ self.message = []
+
+
+ # Depth-first search of reference tree, with the goal
+ # of reachings a leaf node with only pass results
+
+ stack = list(((test, item[0]), item[1]) for item in reversed(test.references))
+
+ while stack:
+ hashes = [None, None]
+ screenshots = [None, None]
+ urls = [None, None]
+
+ nodes, relation = stack.pop()
+ fuzzy = self.get_fuzzy(test, nodes, relation)
+
+ for i, node in enumerate(nodes):
+ success, data = self.get_hash(node, viewport_size, dpi, page_ranges)
+ if success is False:
+ return {"status": data[0], "message": data[1]}
+
+ hashes[i], screenshots[i] = data
+ urls[i] = node.url
+
+ is_pass, page_idx = self.check_pass(hashes, screenshots, urls, relation, fuzzy)
+ log_data = [
+ {"url": urls[0], "screenshot": screenshots[0][page_idx],
+ "hash": hashes[0][page_idx]},
+ relation,
+ {"url": urls[1], "screenshot": screenshots[1][page_idx],
+ "hash": hashes[1][page_idx]}
+ ]
+
+ if is_pass:
+ fuzzy = self.get_fuzzy(test, nodes, relation)
+ if nodes[1].references:
+ stack.extend(list(((nodes[1], item[0]), item[1])
+ for item in reversed(nodes[1].references)))
+ else:
+ test_result = {"status": "PASS", "message": None}
+ if (self.reftest_screenshot == "always" or
+ self.reftest_screenshot == "unexpected" and
+ test.expected() != "PASS"):
+ test_result["extra"] = {"reftest_screenshots": log_data}
+ # We passed
+ return test_result
+
+ # We failed, so construct a failure message
+
+ for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
+ if screenshot is None:
+ success, screenshot = self.retake_screenshot(node, viewport_size, dpi, page_ranges)
+ if success:
+ screenshots[i] = screenshot
+
+ test_result = {"status": "FAIL",
+ "message": "\n".join(self.message)}
+ if (self.reftest_screenshot in ("always", "fail") or
+ self.reftest_screenshot == "unexpected" and
+ test.expected() != "FAIL"):
+ test_result["extra"] = {"reftest_screenshots": log_data}
+ return test_result
+
+ def get_fuzzy(self, root_test, test_nodes, relation):
+ full_key = tuple([item.url for item in test_nodes] + [relation])
+ ref_only_key = test_nodes[1].url
+
+ fuzzy_override = root_test.fuzzy_override
+ fuzzy = test_nodes[0].fuzzy
+
+ sources = [fuzzy_override, fuzzy]
+ keys = [full_key, ref_only_key, None]
+ value = None
+ for source in sources:
+ for key in keys:
+ if key in source:
+ value = source[key]
+ break
+ if value:
+ break
+ return value
+
+ def retake_screenshot(self, node, viewport_size, dpi, page_ranges):
+ success, data = self.get_screenshot_list(node,
+ viewport_size,
+ dpi,
+ page_ranges)
+ if not success:
+ return False, data
+
+ key = (node.url, viewport_size, dpi)
+ hash_val, _ = self.screenshot_cache[key]
+ self.screenshot_cache[key] = hash_val, data
+ return True, data
+
+ def get_screenshot_list(self, node, viewport_size, dpi, page_ranges):
+ success, data = self.executor.screenshot(node, viewport_size, dpi, page_ranges)
+ if success and not isinstance(data, list):
+ return success, [data]
+ return success, data
+
+
+class WdspecExecutor(TestExecutor):
+ convert_result = pytest_result_converter
+ protocol_cls: ClassVar[Type[Protocol]] = WdspecProtocol
+
+ def __init__(self, logger, browser, server_config, webdriver_binary,
+ webdriver_args, timeout_multiplier=1, capabilities=None,
+ debug_info=None, binary=None, binary_args=None, **kwargs):
+ super().__init__(logger, browser, server_config,
+ timeout_multiplier=timeout_multiplier,
+ debug_info=debug_info)
+ self.webdriver_binary = webdriver_binary
+ self.webdriver_args = webdriver_args
+ self.timeout_multiplier = timeout_multiplier
+ self.capabilities = capabilities
+ self.binary = binary
+ self.binary_args = binary_args
+
+ def setup(self, runner):
+ self.protocol = self.protocol_cls(self, self.browser)
+ super().setup(runner)
+
+ def is_alive(self):
+ return self.protocol.is_alive()
+
+ def on_environment_change(self, new_environment):
+ pass
+
+ def do_test(self, test):
+ timeout = test.timeout * self.timeout_multiplier + self.extra_timeout
+
+ success, data = WdspecRun(self.do_wdspec,
+ test.abs_path,
+ timeout).run()
+
+ if success:
+ return self.convert_result(test, data)
+
+ return (test.make_result(*data), [])
+
+ def do_wdspec(self, path, timeout):
+ session_config = {"host": self.browser.host,
+ "port": self.browser.port,
+ "capabilities": self.capabilities,
+ "timeout_multiplier": self.timeout_multiplier,
+ "browser": {
+ "binary": self.binary,
+ "args": self.binary_args,
+ "env": self.browser.env,
+ },
+ "webdriver": {
+ "binary": self.webdriver_binary,
+ "args": self.webdriver_args
+ }}
+
+ return pytestrunner.run(path,
+ self.server_config,
+ session_config,
+ timeout=timeout)
+
+
+class WdspecRun:
+ def __init__(self, func, path, timeout):
+ self.func = func
+ self.result = (None, None)
+ self.path = path
+ self.timeout = timeout
+ self.result_flag = threading.Event()
+
+ def run(self):
+ """Runs function in a thread and interrupts it if it exceeds the
+ given timeout. Returns (True, (Result, [SubtestResult ...])) in
+ case of success, or (False, (status, extra information)) in the
+ event of failure.
+ """
+
+ executor = threading.Thread(target=self._run)
+ executor.start()
+
+ self.result_flag.wait(self.timeout)
+ if self.result[1] is None:
+ self.result = False, ("EXTERNAL-TIMEOUT", None)
+
+ return self.result
+
+ def _run(self):
+ try:
+ self.result = True, self.func(self.path, self.timeout)
+ except (socket.timeout, OSError):
+ self.result = False, ("CRASH", None)
+ except Exception as e:
+ message = getattr(e, "message")
+ if message:
+ message += "\n"
+ message += traceback.format_exc()
+ self.result = False, ("INTERNAL-ERROR", message)
+ finally:
+ self.result_flag.set()
+
+
+class CallbackHandler:
+ """Handle callbacks from testdriver-using tests.
+
+ The default implementation here makes sense for things that are roughly like
+ WebDriver. Things that are more different to WebDriver may need to create a
+ fully custom implementation."""
+
+ unimplemented_exc: ClassVar[Tuple[Type[Exception], ...]] = (NotImplementedError,)
+ expected_exc: ClassVar[Tuple[Type[Exception], ...]] = ()
+
+ def __init__(self, logger, protocol, test_window):
+ self.protocol = protocol
+ self.test_window = test_window
+ self.logger = logger
+ self.callbacks = {
+ "action": self.process_action,
+ "complete": self.process_complete
+ }
+
+ self.actions = {cls.name: cls(self.logger, self.protocol) for cls in actions}
+
+ def __call__(self, result):
+ url, command, payload = result
+ self.logger.debug("Got async callback: %s" % result[1])
+ try:
+ callback = self.callbacks[command]
+ except KeyError as e:
+ raise ValueError("Unknown callback type %r" % result[1]) from e
+ return callback(url, payload)
+
+ def process_complete(self, url, payload):
+ rv = [strip_server(url)] + payload
+ return True, rv
+
+ def process_action(self, url, payload):
+ action = payload["action"]
+ cmd_id = payload["id"]
+ self.logger.debug(f"Got action: {action}")
+ try:
+ action_handler = self.actions[action]
+ except KeyError as e:
+ raise ValueError(f"Unknown action {action}") from e
+ try:
+ with ActionContext(self.logger, self.protocol, payload.get("context")):
+ try:
+ result = action_handler(payload)
+ except AttributeError as e:
+ # If we fail to get an attribute from the protocol presumably that's a
+ # ProtocolPart we don't implement
+ # AttributeError got an obj property in Python 3.10, for older versions we
+ # fall back to looking at the error message.
+ if ((hasattr(e, "obj") and getattr(e, "obj") == self.protocol) or
+ f"'{self.protocol.__class__.__name__}' object has no attribute" in str(e)):
+ raise NotImplementedError from e
+ raise
+ except self.unimplemented_exc:
+ self.logger.warning("Action %s not implemented" % action)
+ self._send_message(cmd_id, "complete", "error", f"Action {action} not implemented")
+ except self.expected_exc:
+ self.logger.debug(f"Action {action} failed with an expected exception")
+ self._send_message(cmd_id, "complete", "error", f"Action {action} failed")
+ except Exception:
+ self.logger.warning(f"Action {action} failed")
+ self._send_message(cmd_id, "complete", "error")
+ raise
+ else:
+ self.logger.debug(f"Action {action} completed with result {result}")
+ return_message = {"result": result}
+ self._send_message(cmd_id, "complete", "success", json.dumps(return_message))
+
+ return False, None
+
+ def _send_message(self, cmd_id, message_type, status, message=None):
+ self.protocol.testdriver.send_message(cmd_id, message_type, status, message=message)
+
+
+class ActionContext:
+ def __init__(self, logger, protocol, context):
+ self.logger = logger
+ self.protocol = protocol
+ self.context = context
+ self.initial_window = None
+
+ def __enter__(self):
+ if self.context is None:
+ return
+
+ self.initial_window = self.protocol.base.current_window
+ self.logger.debug("Switching to window %s" % self.context)
+ self.protocol.testdriver.switch_to_window(self.context, self.initial_window)
+
+ def __exit__(self, *args):
+ if self.context is None:
+ return
+
+ self.logger.debug("Switching back to initial window")
+ self.protocol.base.set_window(self.initial_window)
+ self.initial_window = None
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorchrome.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorchrome.py
new file mode 100644
index 0000000000..f564147156
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorchrome.py
@@ -0,0 +1,273 @@
+# mypy: allow-untyped-defs
+
+import os
+import time
+import traceback
+from typing import Type
+from urllib.parse import urljoin
+
+from webdriver import error
+
+from .base import (
+ CrashtestExecutor,
+ TestharnessExecutor,
+ get_pages,
+)
+from .executorwebdriver import (
+ WebDriverCrashtestExecutor,
+ WebDriverFedCMProtocolPart,
+ WebDriverProtocol,
+ WebDriverRefTestExecutor,
+ WebDriverRun,
+ WebDriverTestharnessExecutor,
+ WebDriverTestharnessProtocolPart,
+)
+from .protocol import PrintProtocolPart
+
+here = os.path.dirname(__file__)
+
+
+def make_sanitizer_mixin(crashtest_executor_cls: Type[CrashtestExecutor]): # type: ignore[no-untyped-def]
+ class SanitizerMixin:
+ def __new__(cls, logger, browser, **kwargs):
+ # Overriding `__new__` is the least worst way we can force tests to run
+ # as crashtests at runtime while still supporting:
+ # * Class attributes (e.g., `extra_timeout`)
+ # * Pickleability for `multiprocessing` transport
+ # * The `__wptrunner__` product interface
+ #
+ # These requirements rule out approaches with `functools.partial(...)`
+ # or global variables.
+ if kwargs.get("sanitizer_enabled"):
+ executor = crashtest_executor_cls(logger, browser, **kwargs)
+
+ def convert_from_crashtest_result(test, result):
+ if issubclass(cls, TestharnessExecutor):
+ status = result["status"]
+ if status == "PASS":
+ status = "OK"
+ harness_result = test.make_result(status, result["message"])
+ # Don't report subtests.
+ return harness_result, []
+ # `crashtest` statuses are a subset of `(print-)reftest`
+ # ones, so no extra conversion necessary.
+ return cls.convert_result(executor, test, result)
+
+ executor.convert_result = convert_from_crashtest_result
+ return executor
+ return super().__new__(cls)
+ return SanitizerMixin
+
+
+_SanitizerMixin = make_sanitizer_mixin(WebDriverCrashtestExecutor)
+
+
+class ChromeDriverTestharnessProtocolPart(WebDriverTestharnessProtocolPart):
+ """Implementation of `testharness.js` tests controlled by ChromeDriver.
+
+ The main difference from the default WebDriver testharness implementation is
+ that the test window can be reused between tests for better performance.
+ """
+
+ def setup(self):
+ super().setup()
+ # Handle (an alphanumeric string) that may be set if window reuse is
+ # enabled. This state allows the protocol to distinguish the test
+ # window from other windows a test itself may create that the "Get
+ # Window Handles" command also returns.
+ #
+ # Because test window persistence is a Chrome-only feature, it's not
+ # exposed to the base WebDriver testharness executor.
+ self.test_window = None
+ self.reuse_window = self.parent.reuse_window
+ # Company prefix to apply to vendor-specific WebDriver extension commands.
+ self.cdp_company_prefix = "goog"
+
+ def close_test_window(self):
+ if self.test_window:
+ self._close_window(self.test_window)
+ self.test_window = None
+
+ def close_old_windows(self):
+ self.webdriver.actions.release()
+ for handle in self.webdriver.handles:
+ if handle not in {self.runner_handle, self.test_window}:
+ self._close_window(handle)
+ if not self.reuse_window:
+ self.close_test_window()
+ self.webdriver.window_handle = self.runner_handle
+ return self.runner_handle
+
+ def open_test_window(self, window_id):
+ if self.test_window:
+ # Try to reuse the existing test window by emulating the `about:blank`
+ # page with no history you would get with a new window.
+ try:
+ self.webdriver.window_handle = self.test_window
+ # Reset navigation history with Chrome DevTools Protocol:
+ # https://chromedevtools.github.io/devtools-protocol/tot/Page/#method-resetNavigationHistory
+ body = {
+ "cmd": "Page.resetNavigationHistory",
+ "params": {},
+ }
+ self.webdriver.send_session_command("POST",
+ self.cdp_company_prefix + "/cdp/execute",
+ body=body)
+ self.webdriver.url = "about:blank"
+ return
+ except error.NoSuchWindowException:
+ self.test_window = None
+ super().open_test_window(window_id)
+
+ def get_test_window(self, window_id, parent, timeout=5):
+ if self.test_window:
+ return self.test_window
+ # Poll the handles endpoint for the test window like the base WebDriver
+ # protocol part, but don't bother checking for the serialized
+ # WindowProxy (not supported by Chrome currently).
+ deadline = time.time() + timeout
+ while time.time() < deadline:
+ self.test_window = self._poll_handles_for_test_window(parent)
+ if self.test_window is not None:
+ assert self.test_window != parent
+ return self.test_window
+ time.sleep(0.03)
+ raise Exception("unable to find test window")
+
+
+class ChromeDriverPrintProtocolPart(PrintProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+ self.runner_handle = None
+ # Company prefix to apply to vendor-specific WebDriver extension commands.
+ self.cdp_company_prefix = "goog"
+
+ def load_runner(self):
+ url = urljoin(self.parent.executor.server_url("http"), "/print_pdf_runner.html")
+ self.logger.debug("Loading %s" % url)
+ try:
+ self.webdriver.url = url
+ except Exception as e:
+ self.logger.critical(
+ "Loading initial page %s failed. Ensure that the "
+ "there are no other programs bound to this port and "
+ "that your firewall rules or network setup does not "
+ "prevent access.\n%s" % (url, traceback.format_exc(e)))
+ raise
+ self.runner_handle = self.webdriver.window_handle
+
+ def render_as_pdf(self, width, height):
+ margin = 0.5
+ body = {
+ "cmd": "Page.printToPDF",
+ "params": {
+ # Chrome accepts dimensions in inches; we are using cm
+ "paperWidth": width / 2.54,
+ "paperHeight": height / 2.54,
+ "marginLeft": margin,
+ "marginRight": margin,
+ "marginTop": margin,
+ "marginBottom": margin,
+ "shrinkToFit": False,
+ "printBackground": True,
+ }
+ }
+ return self.webdriver.send_session_command("POST",
+ self.cdp_company_prefix + "/cdp/execute",
+ body=body)["data"]
+
+ def pdf_to_png(self, pdf_base64, ranges):
+ handle = self.webdriver.window_handle
+ self.webdriver.window_handle = self.runner_handle
+ try:
+ rv = self.webdriver.execute_async_script("""
+let callback = arguments[arguments.length - 1];
+render('%s').then(result => callback(result))""" % pdf_base64)
+ page_numbers = get_pages(ranges, len(rv))
+ rv = [item for i, item in enumerate(rv) if i + 1 in page_numbers]
+ return rv
+ finally:
+ self.webdriver.window_handle = handle
+
+
+class ChromeDriverFedCMProtocolPart(WebDriverFedCMProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+ # Company prefix to apply to vendor-specific WebDriver extension commands.
+ self.fedcm_company_prefix = "goog"
+
+
+ def confirm_idp_login(self):
+ return self.webdriver.send_session_command("POST",
+ self.fedcm_company_prefix + "/fedcm/confirmidplogin")
+
+
+class ChromeDriverProtocol(WebDriverProtocol):
+ implements = [
+ ChromeDriverFedCMProtocolPart,
+ ChromeDriverPrintProtocolPart,
+ ChromeDriverTestharnessProtocolPart,
+ *(part for part in WebDriverProtocol.implements
+ if part.name != ChromeDriverTestharnessProtocolPart.name and
+ part.name != ChromeDriverFedCMProtocolPart.name)
+ ]
+ reuse_window = False
+
+
+class ChromeDriverRefTestExecutor(WebDriverRefTestExecutor, _SanitizerMixin): # type: ignore
+ protocol_cls = ChromeDriverProtocol
+
+
+class ChromeDriverTestharnessExecutor(WebDriverTestharnessExecutor, _SanitizerMixin): # type: ignore
+ protocol_cls = ChromeDriverProtocol
+
+ def __init__(self, *args, reuse_window=False, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.protocol.reuse_window = reuse_window
+
+
+class ChromeDriverPrintRefTestExecutor(ChromeDriverRefTestExecutor):
+ protocol_cls = ChromeDriverProtocol
+
+ def setup(self, runner):
+ super().setup(runner)
+ self.protocol.pdf_print.load_runner()
+ self.has_window = False
+ with open(os.path.join(here, "reftest.js")) as f:
+ self.script = f.read()
+
+ def screenshot(self, test, viewport_size, dpi, page_ranges):
+ # https://github.com/web-platform-tests/wpt/issues/7140
+ assert dpi is None
+
+ if not self.has_window:
+ self.protocol.base.execute_script(self.script)
+ self.protocol.base.set_window(self.protocol.webdriver.handles[-1])
+ self.has_window = True
+
+ self.viewport_size = viewport_size
+ self.page_ranges = page_ranges.get(test.url)
+ timeout = self.timeout_multiplier * test.timeout if self.debug_info is None else None
+
+ test_url = self.test_url(test)
+
+ return WebDriverRun(self.logger,
+ self._render,
+ self.protocol,
+ test_url,
+ timeout,
+ self.extra_timeout).run()
+
+ def _render(self, protocol, url, timeout):
+ protocol.webdriver.url = url
+
+ protocol.base.execute_script(self.wait_script, asynchronous=True)
+
+ pdf = protocol.pdf_print.render_as_pdf(*self.viewport_size)
+ screenshots = protocol.pdf_print.pdf_to_png(pdf, self.page_ranges)
+ for i, screenshot in enumerate(screenshots):
+ # strip off the data:img/png, part of the url
+ if screenshot.startswith("data:image/png;base64,"):
+ screenshots[i] = screenshot.split(",", 1)[1]
+
+ return screenshots
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorcontentshell.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorcontentshell.py
new file mode 100644
index 0000000000..82a6aebcdb
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorcontentshell.py
@@ -0,0 +1,328 @@
+# mypy: allow-untyped-defs
+
+from .base import RefTestExecutor, RefTestImplementation, CrashtestExecutor, TestharnessExecutor
+from .executorchrome import make_sanitizer_mixin
+from .protocol import Protocol, ProtocolPart
+from time import time
+from queue import Empty
+from base64 import b64encode
+import json
+
+
+class CrashError(BaseException):
+ pass
+
+class LeakError(BaseException):
+ pass
+
+def _read_line(io_queue, deadline=None, encoding=None, errors="strict", raise_crash_leak=True):
+ """Reads a single line from the io queue. The read must succeed before `deadline` or
+ a TimeoutError is raised. The line is returned as a bytestring or optionally with the
+ specified `encoding`. If `raise_crash_leak` is set, a CrashError is raised if the line
+ happens to be a crash message, or a LeakError is raised if the line happens to be a
+ leak message.
+ """
+ current_time = time()
+
+ if deadline and current_time > deadline:
+ raise TimeoutError()
+
+ try:
+ line = io_queue.get(True, deadline - current_time if deadline else None)
+ if raise_crash_leak and line.startswith(b"#CRASHED"):
+ raise CrashError()
+ if raise_crash_leak and line.startswith(b"#LEAK"):
+ raise LeakError()
+ except Empty as e:
+ raise TimeoutError() from e
+
+ return line.decode(encoding, errors) if encoding else line
+
+
+class ContentShellTestPart(ProtocolPart):
+ """This protocol part is responsible for running tests via content_shell's protocol mode.
+
+ For more details, see:
+ https://chromium.googlesource.com/chromium/src.git/+/HEAD/content/web_test/browser/test_info_extractor.h
+ """
+ name = "content_shell_test"
+ eof_marker = '#EOF\n' # Marker sent by content_shell after blocks.
+
+ def __init__(self, parent):
+ super().__init__(parent)
+ self.stdout_queue = parent.browser.stdout_queue
+ self.stdin_queue = parent.browser.stdin_queue
+
+ def do_test(self, command, timeout=None):
+ """Send a command to content_shell and return the resulting outputs.
+
+ A command consists of a URL to navigate to, followed by an optional
+ expected image hash and 'print' mode specifier. The syntax looks like:
+ http://web-platform.test:8000/test.html['<hash>['print]]
+ """
+ self._send_command(command)
+
+ deadline = time() + timeout if timeout else None
+ # The first block can also contain audio data but not in WPT.
+ text = self._read_block(deadline)
+ image = self._read_block(deadline)
+
+ return text, image
+
+ def _send_command(self, command):
+ """Sends a single `command`, i.e. a URL to open, to content_shell.
+ """
+ self.stdin_queue.put((command + "\n").encode("utf-8"))
+
+ def _read_block(self, deadline=None):
+ """Tries to read a single block of content from stdout before the `deadline`.
+ """
+ while True:
+ line = _read_line(self.stdout_queue, deadline, "latin-1").rstrip()
+
+ if line == "Content-Type: text/plain":
+ return self._read_text_block(deadline)
+
+ if line == "Content-Type: image/png":
+ return self._read_image_block(deadline)
+
+ if line == "#EOF":
+ return None
+
+ def _read_text_block(self, deadline=None):
+ """Tries to read a plain text block in utf-8 encoding before the `deadline`.
+ """
+ result = ""
+
+ while True:
+ line = _read_line(self.stdout_queue, deadline, "utf-8", "replace", False)
+
+ if line.endswith(self.eof_marker):
+ result += line[:-len(self.eof_marker)]
+ break
+ elif line.endswith('#EOF\r\n'):
+ result += line[:-len('#EOF\r\n')]
+ self.logger.warning('Got a CRLF-terminated #EOF - this is a driver bug.')
+ break
+
+ result += line
+
+ return result
+
+ def _read_image_block(self, deadline=None):
+ """Tries to read an image block (as a binary png) before the `deadline`.
+ """
+ content_length_line = _read_line(self.stdout_queue, deadline, "utf-8")
+ assert content_length_line.startswith("Content-Length:")
+ content_length = int(content_length_line[15:])
+
+ result = bytearray()
+
+ while True:
+ line = _read_line(self.stdout_queue, deadline, raise_crash_leak=False)
+ excess = len(line) + len(result) - content_length
+
+ if excess > 0:
+ # This is the line that contains the EOF marker.
+ assert excess == len(self.eof_marker)
+ result += line[:-excess]
+ break
+
+ result += line
+
+ return result
+
+
+class ContentShellErrorsPart(ProtocolPart):
+ """This protocol part is responsible for collecting the errors reported by content_shell.
+ """
+ name = "content_shell_errors"
+
+ def __init__(self, parent):
+ super().__init__(parent)
+ self.stderr_queue = parent.browser.stderr_queue
+
+ def read_errors(self):
+ """Reads the entire content of the stderr queue as is available right now (no blocking).
+ """
+ result = ""
+
+ while not self.stderr_queue.empty():
+ # There is no potential for race conditions here because this is the only place
+ # where we read from the stderr queue.
+ result += _read_line(self.stderr_queue, None, "utf-8", "replace", False)
+
+ return result
+
+
+class ContentShellBasePart(ProtocolPart):
+ """This protocol part provides functionality common to all executors.
+
+ In particular, this protocol part implements `wait()`, which, when
+ `--pause-after-test` is enabled, test runners block on until the next test
+ should run.
+ """
+ name = "base"
+
+ def __init__(self, parent):
+ super().__init__(parent)
+ self.io_stopped = parent.browser.io_stopped
+
+ def wait(self):
+ # This worker is unpaused when the browser window is closed, which this
+ # `multiprocessing.Event` signals.
+ self.io_stopped.wait()
+ # Never rerun the test.
+ return False
+
+
+class ContentShellProtocol(Protocol):
+ implements = [
+ ContentShellBasePart,
+ ContentShellTestPart,
+ ContentShellErrorsPart,
+ ]
+ init_timeout = 10 # Timeout (seconds) to wait for #READY message.
+
+ def connect(self):
+ """Waits for content_shell to emit its "#READY" message which signals that it is fully
+ initialized. We wait for a maximum of self.init_timeout seconds.
+ """
+ deadline = time() + self.init_timeout
+
+ while True:
+ if _read_line(self.browser.stdout_queue, deadline).rstrip() == b"#READY":
+ break
+
+ def after_connect(self):
+ pass
+
+ def teardown(self):
+ # Close the queue properly to avoid broken pipe spam in the log.
+ self.browser.stdin_queue.close()
+ self.browser.stdin_queue.join_thread()
+
+ def is_alive(self):
+ """Checks if content_shell is alive by determining if the IO pipes are still
+ open. This does not guarantee that the process is responsive.
+ """
+ return self.browser.io_stopped.is_set()
+
+
+def _convert_exception(test, exception, errors):
+ """Converts our TimeoutError and CrashError exceptions into test results.
+ """
+ if isinstance(exception, TimeoutError):
+ return (test.make_result("EXTERNAL-TIMEOUT", errors), [])
+ if isinstance(exception, CrashError):
+ return (test.make_result("CRASH", errors), [])
+ if isinstance(exception, LeakError):
+ # TODO: the internal error is to force a restart, but it doesn't correctly
+ # describe what the issue is. Need to find a way to return a "FAIL",
+ # and restart the content_shell after the test run.
+ return (test.make_result("INTERNAL-ERROR", errors), [])
+ raise exception
+
+
+def timeout_for_test(executor, test):
+ if executor.debug_info and executor.debug_info.interactive:
+ return None
+ return test.timeout * executor.timeout_multiplier
+
+
+class ContentShellCrashtestExecutor(CrashtestExecutor):
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1, debug_info=None,
+ **kwargs):
+ super().__init__(logger, browser, server_config, timeout_multiplier, debug_info, **kwargs)
+ self.protocol = ContentShellProtocol(self, browser)
+
+ def do_test(self, test):
+ try:
+ _ = self.protocol.content_shell_test.do_test(self.test_url(test),
+ timeout_for_test(self, test))
+ self.protocol.content_shell_errors.read_errors()
+ return self.convert_result(test, {"status": "PASS", "message": None})
+ except BaseException as exception:
+ return _convert_exception(test, exception, self.protocol.content_shell_errors.read_errors())
+
+
+_SanitizerMixin = make_sanitizer_mixin(ContentShellCrashtestExecutor)
+
+
+class ContentShellRefTestExecutor(RefTestExecutor, _SanitizerMixin): # type: ignore
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
+ debug_info=None, reftest_screenshot="unexpected", **kwargs):
+ super().__init__(logger, browser, server_config, timeout_multiplier, screenshot_cache,
+ debug_info, reftest_screenshot, **kwargs)
+ self.implementation = RefTestImplementation(self)
+ self.protocol = ContentShellProtocol(self, browser)
+
+ def reset(self):
+ self.implementation.reset()
+
+ def do_test(self, test):
+ try:
+ result = self.implementation.run_test(test)
+ self.protocol.content_shell_errors.read_errors()
+ return self.convert_result(test, result)
+ except BaseException as exception:
+ return _convert_exception(test, exception, self.protocol.content_shell_errors.read_errors())
+
+ def screenshot(self, test, viewport_size, dpi, page_ranges):
+ # Currently, the page size and DPI are hardcoded for print-reftests:
+ # https://chromium.googlesource.com/chromium/src/+/4e1b7bc33d42b401d7d9ad1dcba72883add3e2af/content/web_test/renderer/test_runner.cc#100
+ # Content shell has an internal `window.testRunner.setPrintingSize(...)`
+ # API, but it's not callable with protocol mode.
+ assert dpi is None
+ command = self.test_url(test)
+ if self.is_print:
+ # Currently, `content_shell` uses the expected image hash to avoid
+ # dumping a matching image as an optimization. In Chromium, the
+ # hash can be computed from an expected screenshot checked into the
+ # source tree (i.e., without looking at a reference). This is not
+ # possible in `wpt`, so pass an empty hash here to force a dump.
+ command += "''print"
+
+ _, image = self.protocol.content_shell_test.do_test(command,
+ timeout_for_test(self, test))
+ if not image:
+ return False, ("ERROR", self.protocol.content_shell_errors.read_errors())
+ return True, b64encode(image).decode()
+
+
+class ContentShellPrintRefTestExecutor(ContentShellRefTestExecutor):
+ is_print = True
+
+
+class ContentShellTestharnessExecutor(TestharnessExecutor, _SanitizerMixin): # type: ignore
+ # Chromium's `testdriver-vendor.js` partially implements testdriver support
+ # with internal APIs [1].
+ #
+ # [1]: https://chromium.googlesource.com/chromium/src/+/HEAD/docs/testing/writing_web_tests.md#Relying-on-Blink_Specific-Testing-APIs
+ supports_testdriver = True
+
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1, debug_info=None,
+ **kwargs):
+ super().__init__(logger, browser, server_config, timeout_multiplier, debug_info, **kwargs)
+ self.protocol = ContentShellProtocol(self, browser)
+
+ def do_test(self, test):
+ try:
+ text, _ = self.protocol.content_shell_test.do_test(self.test_url(test),
+ timeout_for_test(self, test))
+ errors = self.protocol.content_shell_errors.read_errors()
+ if not text:
+ return (test.make_result("ERROR", errors), [])
+
+ result_url, status, message, stack, subtest_results = json.loads(text)
+ if result_url != test.url:
+ # Suppress `convert_result`'s URL validation.
+ # See `testharnessreport-content-shell.js` for details.
+ self.logger.warning('Got results from %s, expected %s' % (result_url, test.url))
+ self.logger.warning('URL mismatch may be a false positive '
+ 'if the test navigates')
+ result_url = test.url
+ raw_result = result_url, status, message, stack, subtest_results
+ return self.convert_result(test, raw_result)
+ except BaseException as exception:
+ return _convert_exception(test, exception, self.protocol.content_shell_errors.read_errors())
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executoredge.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executoredge.py
new file mode 100644
index 0000000000..cbe5eadf9a
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executoredge.py
@@ -0,0 +1,101 @@
+# mypy: allow-untyped-defs
+
+import os
+
+from .executorwebdriver import (
+ WebDriverCrashtestExecutor,
+ WebDriverProtocol,
+ WebDriverRefTestExecutor,
+ WebDriverRun,
+ WebDriverTestharnessExecutor,
+)
+
+from .executorchrome import (
+ ChromeDriverPrintProtocolPart,
+ ChromeDriverTestharnessProtocolPart,
+ make_sanitizer_mixin,
+)
+
+here = os.path.dirname(__file__)
+
+_SanitizerMixin = make_sanitizer_mixin(WebDriverCrashtestExecutor)
+
+class EdgeChromiumDriverTestharnessProtocolPart(ChromeDriverTestharnessProtocolPart):
+ def setup(self):
+ super().setup()
+ self.cdp_company_prefix = "ms"
+
+
+class EdgeChromiumDriverPrintProtocolPart(ChromeDriverPrintProtocolPart):
+ def setup(self):
+ super().setup()
+ self.cdp_company_prefix = "ms"
+
+
+class EdgeChromiumDriverProtocol(WebDriverProtocol):
+ implements = [
+ EdgeChromiumDriverPrintProtocolPart,
+ EdgeChromiumDriverTestharnessProtocolPart,
+ *(part for part in WebDriverProtocol.implements
+ if part.name != EdgeChromiumDriverTestharnessProtocolPart.name)
+ ]
+ reuse_window = False
+
+
+class EdgeChromiumDriverRefTestExecutor(WebDriverRefTestExecutor, _SanitizerMixin): # type: ignore
+ protocol_cls = EdgeChromiumDriverProtocol
+
+
+class EdgeChromiumDriverTestharnessExecutor(WebDriverTestharnessExecutor, _SanitizerMixin): # type: ignore
+ protocol_cls = EdgeChromiumDriverProtocol
+
+ def __init__(self, *args, reuse_window=False, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.protocol.reuse_window = reuse_window
+
+
+class EdgeChromiumDriverPrintRefTestExecutor(EdgeChromiumDriverRefTestExecutor):
+ protocol_cls = EdgeChromiumDriverProtocol
+
+ def setup(self, runner):
+ super().setup(runner)
+ self.protocol.pdf_print.load_runner()
+ self.has_window = False
+ with open(os.path.join(here, "reftest.js")) as f:
+ self.script = f.read()
+
+ def screenshot(self, test, viewport_size, dpi, page_ranges):
+ # https://github.com/web-platform-tests/wpt/issues/7140
+ assert dpi is None
+
+ if not self.has_window:
+ self.protocol.base.execute_script(self.script)
+ self.protocol.base.set_window(self.protocol.webdriver.handles[-1])
+ self.has_window = True
+
+ self.viewport_size = viewport_size
+ self.page_ranges = page_ranges.get(test.url)
+ timeout = self.timeout_multiplier * test.timeout if self.debug_info is None else None
+
+ test_url = self.test_url(test)
+
+ return WebDriverRun(self.logger,
+ self._render,
+ self.protocol,
+ test_url,
+ timeout,
+ self.extra_timeout).run()
+
+ def _render(self, protocol, url, timeout):
+ protocol.webdriver.url = url
+
+ protocol.base.execute_script(self.wait_script, asynchronous=True)
+
+ pdf = protocol.pdf_print.render_as_pdf(*self.viewport_size)
+ screenshots = protocol.pdf_print.pdf_to_png(pdf, self.page_ranges)
+ for i, screenshot in enumerate(screenshots):
+ # strip off the data:img/png, part of the url
+ if screenshot.startswith("data:image/png;base64,"):
+ screenshots[i] = screenshot.split(",", 1)[1]
+
+ return screenshots
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executormarionette.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executormarionette.py
new file mode 100644
index 0000000000..a5bf61d405
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executormarionette.py
@@ -0,0 +1,1371 @@
+# mypy: allow-untyped-defs
+
+import json
+import os
+import threading
+import time
+import traceback
+import uuid
+
+from urllib.parse import urljoin
+
+errors = None
+marionette = None
+pytestrunner = None
+
+here = os.path.dirname(__file__)
+
+from .base import (CallbackHandler,
+ CrashtestExecutor,
+ RefTestExecutor,
+ RefTestImplementation,
+ TestharnessExecutor,
+ TimedRunner,
+ WdspecExecutor,
+ get_pages,
+ strip_server)
+from .protocol import (AccessibilityProtocolPart,
+ ActionSequenceProtocolPart,
+ AssertsProtocolPart,
+ BaseProtocolPart,
+ TestharnessProtocolPart,
+ PrefsProtocolPart,
+ Protocol,
+ StorageProtocolPart,
+ SelectorProtocolPart,
+ ClickProtocolPart,
+ CookiesProtocolPart,
+ SendKeysProtocolPart,
+ TestDriverProtocolPart,
+ CoverageProtocolPart,
+ GenerateTestReportProtocolPart,
+ VirtualAuthenticatorProtocolPart,
+ WindowProtocolPart,
+ SetPermissionProtocolPart,
+ PrintProtocolPart,
+ DebugProtocolPart,
+ VirtualSensorProtocolPart,
+ merge_dicts)
+
+
+def do_delayed_imports():
+ global errors, marionette, Addons, WebAuthn
+
+ from marionette_driver import marionette, errors
+ from marionette_driver.addons import Addons
+ from marionette_driver.webauthn import WebAuthn
+
+
+def _switch_to_window(marionette, handle):
+ """Switch to the specified window; subsequent commands will be
+ directed at the new window.
+
+ This is a workaround for issue 24924[0]; marionettedriver 3.1.0 dropped the
+ 'name' parameter from its switch_to_window command, but it is still needed
+ for at least Firefox 79.
+
+ [0]: https://github.com/web-platform-tests/wpt/issues/24924
+
+ :param marionette: The Marionette instance
+ :param handle: The id of the window to switch to.
+ """
+ marionette._send_message("WebDriver:SwitchToWindow",
+ {"handle": handle, "name": handle, "focus": True})
+ marionette.window = handle
+
+
+class MarionetteCallbackHandler(CallbackHandler):
+ def __init__(self, logger, protocol, test_window):
+ MarionetteCallbackHandler.expected_exc = (errors.MarionetteException,)
+ super().__init__(logger, protocol, test_window)
+
+
+class MarionetteBaseProtocolPart(BaseProtocolPart):
+ def __init__(self, parent):
+ super().__init__(parent)
+ self.timeout = None
+
+ def setup(self):
+ self.marionette = self.parent.marionette
+
+ def execute_script(self, script, asynchronous=False, args=None):
+ method = self.marionette.execute_async_script if asynchronous else self.marionette.execute_script
+ script_args = args if args is not None else []
+ return method(script, script_args=script_args, new_sandbox=False, sandbox=None)
+
+ def set_timeout(self, timeout):
+ """Set the Marionette script timeout.
+
+ :param timeout: Script timeout in seconds
+
+ """
+ if timeout != self.timeout:
+ self.marionette.timeout.script = timeout
+ self.timeout = timeout
+
+ @property
+ def current_window(self):
+ return self.marionette.current_window_handle
+
+ def set_window(self, handle):
+ _switch_to_window(self.marionette, handle)
+
+ def window_handles(self):
+ return self.marionette.window_handles
+
+ def load(self, url):
+ self.marionette.navigate(url)
+
+ def wait(self):
+ try:
+ socket_timeout = self.marionette.client.socket_timeout
+ except AttributeError:
+ # This can happen if there was a crash
+ return
+ if socket_timeout:
+ try:
+ self.marionette.timeout.script = socket_timeout / 2
+ except OSError:
+ self.logger.debug("Socket closed")
+ return
+
+ while True:
+ try:
+ rv = self.marionette.execute_async_script("""let callback = arguments[arguments.length - 1];
+addEventListener("__test_restart", e => {e.preventDefault(); callback(true)})""")
+ # None can be returned if we try to run the script again before we've completed a navigation.
+ # In that case, keep retrying
+ if rv is not None:
+ return rv
+ except errors.NoSuchWindowException:
+ # The window closed
+ break
+ except errors.ScriptTimeoutException:
+ self.logger.debug("Script timed out")
+ pass
+ except errors.JavascriptException as e:
+ # This can happen if we navigate, but just keep going
+ self.logger.debug(e)
+ pass
+ except OSError:
+ self.logger.debug("Socket closed")
+ break
+ except Exception:
+ self.logger.warning(traceback.format_exc())
+ break
+ return False
+
+
+class MarionetteTestharnessProtocolPart(TestharnessProtocolPart):
+ def __init__(self, parent):
+ super().__init__(parent)
+ self.runner_handle = None
+ with open(os.path.join(here, "runner.js")) as f:
+ self.runner_script = f.read()
+ with open(os.path.join(here, "window-loaded.js")) as f:
+ self.window_loaded_script = f.read()
+
+ def setup(self):
+ self.marionette = self.parent.marionette
+
+ def load_runner(self, url_protocol):
+ # Check if we previously had a test window open, and if we did make sure it's closed
+ if self.runner_handle:
+ self._close_windows()
+ url = urljoin(self.parent.executor.server_url(url_protocol), "/testharness_runner.html")
+ self.logger.debug("Loading %s" % url)
+ try:
+ self.dismiss_alert(lambda: self.marionette.navigate(url))
+ except Exception:
+ self.logger.critical(
+ "Loading initial page %s failed. Ensure that the "
+ "there are no other programs bound to this port and "
+ "that your firewall rules or network setup does not "
+ "prevent access.\n%s" % (url, traceback.format_exc()))
+ raise
+ self.runner_handle = self.marionette.current_window_handle
+ format_map = {"title": threading.current_thread().name.replace("'", '"')}
+ self.parent.base.execute_script(self.runner_script % format_map)
+
+ def _close_windows(self):
+ handles = self.marionette.window_handles
+ runner_handle = None
+ try:
+ handles.remove(self.runner_handle)
+ runner_handle = self.runner_handle
+ except ValueError:
+ # The runner window probably changed id but we can restore it
+ # This isn't supposed to happen, but marionette ids are not yet stable
+ # We assume that the first handle returned corresponds to the runner,
+ # but it hopefully doesn't matter too much if that assumption is
+ # wrong since we reload the runner in that tab anyway.
+ runner_handle = handles.pop(0)
+ self.logger.info("Changing harness_window to %s" % runner_handle)
+
+ for handle in handles:
+ try:
+ self.logger.info("Closing window %s" % handle)
+ _switch_to_window(self.marionette, handle)
+ self.dismiss_alert(lambda: self.marionette.close())
+ except errors.NoSuchWindowException:
+ # We might have raced with the previous test to close this
+ # window, skip it.
+ pass
+ _switch_to_window(self.marionette, runner_handle)
+ return runner_handle
+
+ def close_old_windows(self, url_protocol):
+ runner_handle = self._close_windows()
+ if runner_handle != self.runner_handle:
+ self.load_runner(url_protocol)
+ return self.runner_handle
+
+ def dismiss_alert(self, f):
+ while True:
+ try:
+ f()
+ except errors.UnexpectedAlertOpen:
+ alert = self.marionette.switch_to_alert()
+ try:
+ alert.dismiss()
+ except errors.NoAlertPresentException:
+ pass
+ else:
+ break
+
+ def get_test_window(self, window_id, parent, timeout=5):
+ """Find the test window amongst all the open windows.
+ This is assumed to be either the named window or the one after the parent in the list of
+ window handles
+
+ :param window_id: The DOM name of the Window
+ :param parent: The handle of the runner window
+ :param timeout: The time in seconds to wait for the window to appear. This is because in
+ some implementations there's a race between calling window.open and the
+ window being added to the list of WebDriver accessible windows."""
+ test_window = None
+ end_time = time.time() + timeout
+ while time.time() < end_time:
+ if window_id:
+ try:
+ # Try this, it's in Level 1 but nothing supports it yet
+ win_s = self.parent.base.execute_script("return window['%s'];" % self.window_id)
+ win_obj = json.loads(win_s)
+ test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
+ except Exception:
+ pass
+
+ if test_window is None:
+ handles = self.marionette.window_handles
+ if len(handles) == 2:
+ test_window = next(iter(set(handles) - {parent}))
+ elif len(handles) > 2 and handles[0] == parent:
+ # Hope the first one here is the test window
+ test_window = handles[1]
+
+ if test_window is not None:
+ assert test_window != parent
+ return test_window
+
+ time.sleep(0.1)
+
+ raise Exception("unable to find test window")
+
+ def test_window_loaded(self):
+ """Wait until the page in the new window has been loaded.
+
+ Hereby ignore Javascript execptions that are thrown when
+ the document has been unloaded due to a process change.
+ """
+ while True:
+ try:
+ self.parent.base.execute_script(self.window_loaded_script, asynchronous=True)
+ break
+ except errors.JavascriptException:
+ pass
+
+
+class MarionettePrefsProtocolPart(PrefsProtocolPart):
+ def setup(self):
+ self.marionette = self.parent.marionette
+
+ def set(self, name, value):
+ if not isinstance(value, str):
+ value = str(value)
+
+ if value.lower() not in ("true", "false"):
+ try:
+ int(value)
+ except ValueError:
+ value = f"'{value}'"
+ else:
+ value = value.lower()
+
+ self.logger.info(f"Setting pref {name} to {value}")
+
+ script = """
+ let prefInterface = Components.classes["@mozilla.org/preferences-service;1"]
+ .getService(Components.interfaces.nsIPrefBranch);
+ let pref = '%s';
+ let type = prefInterface.getPrefType(pref);
+ let value = %s;
+ switch(type) {
+ case prefInterface.PREF_STRING:
+ prefInterface.setCharPref(pref, value);
+ break;
+ case prefInterface.PREF_BOOL:
+ prefInterface.setBoolPref(pref, value);
+ break;
+ case prefInterface.PREF_INT:
+ prefInterface.setIntPref(pref, value);
+ break;
+ case prefInterface.PREF_INVALID:
+ // Pref doesn't seem to be defined already; guess at the
+ // right way to set it based on the type of value we have.
+ switch (typeof value) {
+ case "boolean":
+ prefInterface.setBoolPref(pref, value);
+ break;
+ case "string":
+ prefInterface.setCharPref(pref, value);
+ break;
+ case "number":
+ prefInterface.setIntPref(pref, value);
+ break;
+ default:
+ throw new Error("Unknown pref value type: " + (typeof value));
+ }
+ break;
+ default:
+ throw new Error("Unknown pref type " + type);
+ }
+ """ % (name, value)
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ self.marionette.execute_script(script)
+
+ def clear(self, name):
+ self.logger.info(f"Clearing pref {name}")
+ script = """
+ let prefInterface = Components.classes["@mozilla.org/preferences-service;1"]
+ .getService(Components.interfaces.nsIPrefBranch);
+ let pref = '%s';
+ prefInterface.clearUserPref(pref);
+ """ % name
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ self.marionette.execute_script(script)
+
+ def get(self, name):
+ script = """
+ let prefInterface = Components.classes["@mozilla.org/preferences-service;1"]
+ .getService(Components.interfaces.nsIPrefBranch);
+ let pref = '%s';
+ let type = prefInterface.getPrefType(pref);
+ switch(type) {
+ case prefInterface.PREF_STRING:
+ return prefInterface.getCharPref(pref);
+ case prefInterface.PREF_BOOL:
+ return prefInterface.getBoolPref(pref);
+ case prefInterface.PREF_INT:
+ return prefInterface.getIntPref(pref);
+ case prefInterface.PREF_INVALID:
+ return null;
+ }
+ """ % name
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ rv = self.marionette.execute_script(script)
+ self.logger.debug(f"Got pref {name} with value {rv}")
+ return rv
+
+
+class MarionetteStorageProtocolPart(StorageProtocolPart):
+ def setup(self):
+ self.marionette = self.parent.marionette
+
+ def clear_origin(self, url):
+ self.logger.info("Clearing origin %s" % (url))
+ script = """
+ let url = '%s';
+ let uri = Components.classes["@mozilla.org/network/io-service;1"]
+ .getService(Ci.nsIIOService)
+ .newURI(url);
+ let ssm = Components.classes["@mozilla.org/scriptsecuritymanager;1"]
+ .getService(Ci.nsIScriptSecurityManager);
+ let principal = ssm.createContentPrincipal(uri, {});
+ let qms = Components.classes["@mozilla.org/dom/quota-manager-service;1"]
+ .getService(Components.interfaces.nsIQuotaManagerService);
+ qms.clearStoragesForOriginPrefix(principal, "default");
+ """ % url
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ self.marionette.execute_script(script)
+
+
+class MarionetteAssertsProtocolPart(AssertsProtocolPart):
+ def setup(self):
+ self.assert_count = {"chrome": 0, "content": 0}
+ self.chrome_assert_count = 0
+ self.marionette = self.parent.marionette
+
+ def get(self):
+ script = """
+ debug = Cc["@mozilla.org/xpcom/debug;1"].getService(Ci.nsIDebug2);
+ if (debug.isDebugBuild) {
+ return debug.assertionCount;
+ }
+ return 0;
+ """
+
+ def get_count(context, **kwargs):
+ try:
+ context_count = self.marionette.execute_script(script, **kwargs)
+ if context_count:
+ self.parent.logger.info("Got %s assert count %s" % (context, context_count))
+ test_count = context_count - self.assert_count[context]
+ self.assert_count[context] = context_count
+ return test_count
+ except errors.NoSuchWindowException:
+ # If the window was already closed
+ self.parent.logger.warning("Failed to get assertion count; window was closed")
+ except (errors.MarionetteException, OSError):
+ # This usually happens if the process crashed
+ pass
+
+ counts = []
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ counts.append(get_count("chrome"))
+ if self.parent.e10s:
+ counts.append(get_count("content", sandbox="system"))
+
+ counts = [item for item in counts if item is not None]
+
+ if not counts:
+ return None
+
+ return sum(counts)
+
+
+class MarionetteSelectorProtocolPart(SelectorProtocolPart):
+ def setup(self):
+ self.marionette = self.parent.marionette
+
+ def elements_by_selector(self, selector):
+ return self.marionette.find_elements("css selector", selector)
+
+
+class MarionetteClickProtocolPart(ClickProtocolPart):
+ def setup(self):
+ self.marionette = self.parent.marionette
+
+ def element(self, element):
+ return element.click()
+
+
+class MarionetteCookiesProtocolPart(CookiesProtocolPart):
+ def setup(self):
+ self.marionette = self.parent.marionette
+
+ def delete_all_cookies(self):
+ self.logger.info("Deleting all cookies")
+ return self.marionette.delete_all_cookies()
+
+ def get_all_cookies(self):
+ self.logger.info("Getting all cookies")
+ return self.marionette.get_cookies()
+
+ def get_named_cookie(self, name):
+ self.logger.info("Getting cookie named %s" % name)
+ try:
+ return self.marionette.get_cookie(name)
+ # When errors.NoSuchCookieException is supported,
+ # that should be used here instead.
+ except Exception:
+ return None
+
+
+class MarionetteSendKeysProtocolPart(SendKeysProtocolPart):
+ def setup(self):
+ self.marionette = self.parent.marionette
+
+ def send_keys(self, element, keys):
+ return element.send_keys(keys)
+
+class MarionetteWindowProtocolPart(WindowProtocolPart):
+ def setup(self):
+ self.marionette = self.parent.marionette
+
+ def minimize(self):
+ return self.marionette.minimize_window()
+
+ def set_rect(self, rect):
+ self.marionette.set_window_rect(rect["x"], rect["y"], rect["height"], rect["width"])
+
+ def get_rect(self):
+ return self.marionette.window_rect
+
+class MarionetteActionSequenceProtocolPart(ActionSequenceProtocolPart):
+ def setup(self):
+ self.marionette = self.parent.marionette
+
+ def send_actions(self, actions):
+ actions = self.marionette._to_json(actions)
+ self.logger.info(actions)
+ self.marionette._send_message("WebDriver:PerformActions", actions)
+
+ def release(self):
+ self.marionette._send_message("WebDriver:ReleaseActions", {})
+
+
+class MarionetteTestDriverProtocolPart(TestDriverProtocolPart):
+ def setup(self):
+ self.marionette = self.parent.marionette
+
+ def send_message(self, cmd_id, message_type, status, message=None):
+ obj = {
+ "cmd_id": cmd_id,
+ "type": "testdriver-%s" % str(message_type),
+ "status": str(status)
+ }
+ if message:
+ obj["message"] = str(message)
+ self.parent.base.execute_script("window.postMessage(%s, '*')" % json.dumps(obj))
+
+ def _switch_to_frame(self, index_or_elem):
+ try:
+ self.marionette.switch_to_frame(index_or_elem)
+ except (errors.NoSuchFrameException,
+ errors.StaleElementException) as e:
+ raise ValueError from e
+
+ def _switch_to_parent_frame(self):
+ self.marionette.switch_to_parent_frame()
+
+
+class MarionetteCoverageProtocolPart(CoverageProtocolPart):
+ def setup(self):
+ self.marionette = self.parent.marionette
+
+ if not self.parent.ccov:
+ self.is_enabled = False
+ return
+
+ script = """
+ const {PerTestCoverageUtils} = ChromeUtils.import("chrome://remote/content/marionette/PerTestCoverageUtils.jsm");
+ return PerTestCoverageUtils.enabled;
+ """
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ self.is_enabled = self.marionette.execute_script(script)
+
+ def reset(self):
+ script = """
+ var callback = arguments[arguments.length - 1];
+
+ const {PerTestCoverageUtils} = ChromeUtils.import("chrome://remote/content/marionette/PerTestCoverageUtils.jsm");
+ PerTestCoverageUtils.beforeTest().then(callback, callback);
+ """
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ try:
+ error = self.marionette.execute_async_script(script)
+ if error is not None:
+ raise Exception('Failure while resetting counters: %s' % json.dumps(error))
+ except (errors.MarionetteException, OSError):
+ # This usually happens if the process crashed
+ pass
+
+ def dump(self):
+ if len(self.marionette.window_handles):
+ handle = self.marionette.window_handles[0]
+ _switch_to_window(self.marionette, handle)
+
+ script = """
+ var callback = arguments[arguments.length - 1];
+
+ const {PerTestCoverageUtils} = ChromeUtils.import("chrome://remote/content/marionette/PerTestCoverageUtils.jsm");
+ PerTestCoverageUtils.afterTest().then(callback, callback);
+ """
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ try:
+ error = self.marionette.execute_async_script(script)
+ if error is not None:
+ raise Exception('Failure while dumping counters: %s' % json.dumps(error))
+ except (errors.MarionetteException, OSError):
+ # This usually happens if the process crashed
+ pass
+
+class MarionetteGenerateTestReportProtocolPart(GenerateTestReportProtocolPart):
+ def setup(self):
+ self.marionette = self.parent.marionette
+
+ def generate_test_report(self, config):
+ raise NotImplementedError("generate_test_report not yet implemented")
+
+class MarionetteVirtualAuthenticatorProtocolPart(VirtualAuthenticatorProtocolPart):
+ def setup(self):
+ self.webauthn = WebAuthn(self.parent.marionette)
+
+ def add_virtual_authenticator(self, config):
+ return self.webauthn.add_virtual_authenticator(config)
+
+ def remove_virtual_authenticator(self, authenticator_id):
+ self.webauthn.remove_virtual_authenticator(authenticator_id)
+
+ def add_credential(self, authenticator_id, credential):
+ self.webauthn.add_credential(authenticator_id, credential)
+
+ def get_credentials(self, authenticator_id):
+ return self.webauthn.get_credentials(authenticator_id)
+
+ def remove_credential(self, authenticator_id, credential_id):
+ self.webauthn.remove_credential(authenticator_id, credential_id)
+
+ def remove_all_credentials(self, authenticator_id):
+ self.webauthn.remove_all_credentials(authenticator_id)
+
+ def set_user_verified(self, authenticator_id, uv):
+ self.webauthn.set_user_verified(authenticator_id, uv)
+
+
+class MarionetteSetPermissionProtocolPart(SetPermissionProtocolPart):
+ def setup(self):
+ self.marionette = self.parent.marionette
+
+ def set_permission(self, descriptor, state):
+ body = {
+ "descriptor": descriptor,
+ "state": state,
+ }
+ try:
+ self.marionette._send_message("WebDriver:SetPermission", body)
+ except errors.UnsupportedOperationException as e:
+ raise NotImplementedError("set_permission not yet implemented") from e
+
+
+class MarionettePrintProtocolPart(PrintProtocolPart):
+ def setup(self):
+ self.marionette = self.parent.marionette
+ self.runner_handle = None
+
+ def load_runner(self):
+ url = urljoin(self.parent.executor.server_url("http"), "/print_pdf_runner.html")
+ self.logger.debug("Loading %s" % url)
+ try:
+ self.marionette.navigate(url)
+ except Exception as e:
+ self.logger.critical(
+ "Loading initial page %s failed. Ensure that the "
+ "there are no other programs bound to this port and "
+ "that your firewall rules or network setup does not "
+ "prevent access.\n%s" % (url, traceback.format_exc(e)))
+ raise
+ self.runner_handle = self.marionette.current_window_handle
+
+ def render_as_pdf(self, width, height):
+ margin = 0.5 * 2.54
+ body = {
+ "page": {
+ "width": width,
+ "height": height
+ },
+ "margin": {
+ "left": margin,
+ "right": margin,
+ "top": margin,
+ "bottom": margin,
+ },
+ "shrinkToFit": False,
+ "background": True,
+ }
+ return self.marionette._send_message("WebDriver:Print", body, key="value")
+
+ def pdf_to_png(self, pdf_base64, page_ranges):
+ handle = self.marionette.current_window_handle
+ _switch_to_window(self.marionette, self.runner_handle)
+ try:
+ rv = self.marionette.execute_async_script("""
+let callback = arguments[arguments.length - 1];
+render('%s').then(result => callback(result))""" % pdf_base64, new_sandbox=False, sandbox=None)
+ page_numbers = get_pages(page_ranges, len(rv))
+ rv = [item for i, item in enumerate(rv) if i + 1 in page_numbers]
+ return rv
+ finally:
+ _switch_to_window(self.marionette, handle)
+
+
+class MarionetteDebugProtocolPart(DebugProtocolPart):
+ def setup(self):
+ self.marionette = self.parent.marionette
+
+ def load_devtools(self):
+ with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
+ # Once ESR is 107 is released, we can replace the ChromeUtils.import(DevToolsShim.jsm)
+ # with ChromeUtils.importESModule(DevToolsShim.sys.mjs) in this snippet:
+ self.parent.base.execute_script("""
+const { DevToolsShim } = ChromeUtils.import(
+ "chrome://devtools-startup/content/DevToolsShim.jsm"
+);
+
+const callback = arguments[arguments.length - 1];
+
+async function loadDevTools() {
+ const tab = window.gBrowser.selectedTab;
+ await DevToolsShim.showToolboxForTab(tab, {
+ toolId: "webconsole",
+ hostType: "window"
+ });
+}
+
+loadDevTools().catch((e) => console.error("Devtools failed to load", e))
+ .then(callback);
+""", asynchronous=True)
+
+
+class MarionetteAccessibilityProtocolPart(AccessibilityProtocolPart):
+ def setup(self):
+ self.marionette = self.parent.marionette
+
+ def get_computed_label(self, element):
+ return element.computed_label
+
+ def get_computed_role(self, element):
+ return element.computed_role
+
+
+class MarionetteVirtualSensorProtocolPart(VirtualSensorProtocolPart):
+ def setup(self):
+ self.marionette = self.parent.marionette
+
+ def create_virtual_sensor(self, sensor_type, sensor_params):
+ raise NotImplementedError("create_virtual_sensor not yet implemented")
+
+ def update_virtual_sensor(self, sensor_type, reading):
+ raise NotImplementedError("update_virtual_sensor not yet implemented")
+
+ def remove_virtual_sensor(self, remove_parameters):
+ raise NotImplementedError("remove_virtual_sensor not yet implemented")
+
+ def get_virtual_sensor_information(self, information_parameters):
+ raise NotImplementedError("get_virtual_sensor_information not yet implemented")
+
+
+class MarionetteProtocol(Protocol):
+ implements = [MarionetteBaseProtocolPart,
+ MarionetteTestharnessProtocolPart,
+ MarionettePrefsProtocolPart,
+ MarionetteStorageProtocolPart,
+ MarionetteSelectorProtocolPart,
+ MarionetteClickProtocolPart,
+ MarionetteCookiesProtocolPart,
+ MarionetteSendKeysProtocolPart,
+ MarionetteWindowProtocolPart,
+ MarionetteActionSequenceProtocolPart,
+ MarionetteTestDriverProtocolPart,
+ MarionetteAssertsProtocolPart,
+ MarionetteCoverageProtocolPart,
+ MarionetteGenerateTestReportProtocolPart,
+ MarionetteVirtualAuthenticatorProtocolPart,
+ MarionetteSetPermissionProtocolPart,
+ MarionettePrintProtocolPart,
+ MarionetteDebugProtocolPart,
+ MarionetteAccessibilityProtocolPart,
+ MarionetteVirtualSensorProtocolPart]
+
+ def __init__(self, executor, browser, capabilities=None, timeout_multiplier=1, e10s=True, ccov=False):
+ do_delayed_imports()
+
+ super().__init__(executor, browser)
+ self.marionette = None
+ self.marionette_port = browser.marionette_port
+ self.capabilities = capabilities
+ if hasattr(browser, "capabilities"):
+ if self.capabilities is None:
+ self.capabilities = browser.capabilities
+ else:
+ merge_dicts(self.capabilities, browser.capabilities)
+ self.timeout_multiplier = timeout_multiplier
+ self.runner_handle = None
+ self.e10s = e10s
+ self.ccov = ccov
+
+ def connect(self):
+ self.logger.debug("Connecting to Marionette on port %i" % self.marionette_port)
+ startup_timeout = marionette.Marionette.DEFAULT_STARTUP_TIMEOUT * self.timeout_multiplier
+ self.marionette = marionette.Marionette(host='127.0.0.1',
+ port=self.marionette_port,
+ socket_timeout=None,
+ startup_timeout=startup_timeout)
+
+ self.logger.debug("Waiting for Marionette connection")
+ while True:
+ try:
+ self.marionette.raise_for_port()
+ break
+ except OSError:
+ # When running in a debugger wait indefinitely for Firefox to start
+ if self.executor.debug_info is None:
+ raise
+
+ self.logger.debug("Starting Marionette session")
+ self.marionette.start_session(self.capabilities)
+ self.logger.debug("Marionette session started")
+
+ def after_connect(self):
+ pass
+
+ def teardown(self):
+ if self.marionette and self.marionette.session_id:
+ try:
+ self.marionette._request_in_app_shutdown()
+ self.marionette.delete_session(send_request=False)
+ self.marionette.cleanup()
+ except Exception:
+ # This is typically because the session never started
+ pass
+ if self.marionette is not None:
+ self.marionette = None
+ super().teardown()
+
+ def is_alive(self):
+ try:
+ self.marionette.current_window_handle
+ except Exception:
+ return False
+ return True
+
+ def on_environment_change(self, old_environment, new_environment):
+ #Unset all the old prefs
+ for name in old_environment.get("prefs", {}).keys():
+ value = self.executor.original_pref_values[name]
+ if value is None:
+ self.prefs.clear(name)
+ else:
+ self.prefs.set(name, value)
+
+ for name, value in new_environment.get("prefs", {}).items():
+ self.executor.original_pref_values[name] = self.prefs.get(name)
+ self.prefs.set(name, value)
+
+ pac = new_environment.get("pac", None)
+
+ if pac != old_environment.get("pac", None):
+ if pac is None:
+ self.prefs.clear("network.proxy.type")
+ self.prefs.clear("network.proxy.autoconfig_url")
+ else:
+ self.prefs.set("network.proxy.type", 2)
+ self.prefs.set("network.proxy.autoconfig_url",
+ urljoin(self.executor.server_url("http"), pac))
+
+class ExecuteAsyncScriptRun(TimedRunner):
+ def set_timeout(self):
+ timeout = self.timeout
+
+ try:
+ if timeout is not None:
+ self.protocol.base.set_timeout(timeout + self.extra_timeout)
+ else:
+ # We just want it to never time out, really, but marionette doesn't
+ # make that possible. It also seems to time out immediately if the
+ # timeout is set too high. This works at least.
+ self.protocol.base.set_timeout(2**28 - 1)
+ except OSError:
+ msg = "Lost marionette connection before starting test"
+ self.logger.error(msg)
+ return ("INTERNAL-ERROR", msg)
+
+ def before_run(self):
+ index = self.url.rfind("/storage/")
+ if index != -1:
+ # Clear storage
+ self.protocol.storage.clear_origin(self.url)
+
+ def run_func(self):
+ try:
+ self.result = True, self.func(self.protocol, self.url, self.timeout)
+ except errors.ScriptTimeoutException:
+ self.logger.debug("Got a marionette timeout")
+ self.result = False, ("EXTERNAL-TIMEOUT", None)
+ except OSError:
+ # This can happen on a crash
+ # Also, should check after the test if the firefox process is still running
+ # and otherwise ignore any other result and set it to crash
+ self.logger.info("IOError on command, setting status to CRASH")
+ self.result = False, ("CRASH", None)
+ except errors.NoSuchWindowException:
+ self.logger.info("NoSuchWindowException on command, setting status to CRASH")
+ self.result = False, ("CRASH", None)
+ except Exception as e:
+ if isinstance(e, errors.JavascriptException) and str(e).startswith("Document was unloaded"):
+ message = "Document unloaded; maybe test navigated the top-level-browsing context?"
+ else:
+ message = getattr(e, "message", "")
+ if message:
+ message += "\n"
+ message += traceback.format_exc()
+ self.logger.warning(traceback.format_exc())
+ self.result = False, ("INTERNAL-ERROR", message)
+ finally:
+ self.result_flag.set()
+
+
+class MarionetteTestharnessExecutor(TestharnessExecutor):
+ supports_testdriver = True
+
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1,
+ close_after_done=True, debug_info=None, capabilities=None,
+ debug=False, ccov=False, debug_test=False, **kwargs):
+ """Marionette-based executor for testharness.js tests"""
+ TestharnessExecutor.__init__(self, logger, browser, server_config,
+ timeout_multiplier=timeout_multiplier,
+ debug_info=debug_info)
+ self.protocol = MarionetteProtocol(self,
+ browser,
+ capabilities,
+ timeout_multiplier,
+ kwargs["e10s"],
+ ccov)
+ with open(os.path.join(here, "testharness_webdriver_resume.js")) as f:
+ self.script_resume = f.read()
+ self.close_after_done = close_after_done
+ self.window_id = str(uuid.uuid4())
+ self.debug = debug
+ self.debug_test = debug_test
+
+ self.install_extensions = browser.extensions
+
+ self.original_pref_values = {}
+
+ if marionette is None:
+ do_delayed_imports()
+
+ def setup(self, runner):
+ super().setup(runner)
+ for extension_path in self.install_extensions:
+ self.logger.info("Installing extension from %s" % extension_path)
+ addons = Addons(self.protocol.marionette)
+ addons.install(extension_path)
+
+ self.protocol.testharness.load_runner(self.last_environment["protocol"])
+
+ def is_alive(self):
+ return self.protocol.is_alive()
+
+ def on_environment_change(self, new_environment):
+ self.protocol.on_environment_change(self.last_environment, new_environment)
+
+ if new_environment["protocol"] != self.last_environment["protocol"]:
+ self.protocol.testharness.load_runner(new_environment["protocol"])
+
+ def do_test(self, test):
+ timeout = (test.timeout * self.timeout_multiplier if self.debug_info is None
+ else None)
+
+ success, data = ExecuteAsyncScriptRun(self.logger,
+ self.do_testharness,
+ self.protocol,
+ self.test_url(test),
+ timeout,
+ self.extra_timeout).run()
+ # The format of data depends on whether the test ran to completion or not
+ # For asserts we only care about the fact that if it didn't complete, the
+ # status is in the first field.
+ status = None
+ if not success:
+ status = data[0]
+
+ extra = None
+ if self.debug and (success or status not in ("CRASH", "INTERNAL-ERROR")):
+ assertion_count = self.protocol.asserts.get()
+ if assertion_count is not None:
+ extra = {"assertion_count": assertion_count}
+
+ if success:
+ return self.convert_result(test, data, extra=extra)
+
+ return (test.make_result(extra=extra, *data), [])
+
+ def do_testharness(self, protocol, url, timeout):
+ parent_window = protocol.testharness.close_old_windows(self.last_environment["protocol"])
+
+ if self.protocol.coverage.is_enabled:
+ self.protocol.coverage.reset()
+
+ protocol.base.execute_script("window.open('about:blank', '%s', 'noopener')" % self.window_id)
+ test_window = protocol.testharness.get_test_window(self.window_id, parent_window,
+ timeout=10 * self.timeout_multiplier)
+ self.protocol.base.set_window(test_window)
+ protocol.testharness.test_window_loaded()
+
+ if self.debug_test and self.browser.supports_devtools:
+ self.protocol.debug.load_devtools()
+
+ handler = MarionetteCallbackHandler(self.logger, protocol, test_window)
+ protocol.marionette.navigate(url)
+ while True:
+ result = protocol.base.execute_script(
+ self.script_resume, args=[strip_server(url)], asynchronous=True)
+ if result is None:
+ # This can happen if we get an content process crash
+ return None
+ done, rv = handler(result)
+ if done:
+ break
+
+ if self.protocol.coverage.is_enabled:
+ self.protocol.coverage.dump()
+
+ return rv
+
+
+class MarionetteRefTestExecutor(RefTestExecutor):
+ is_print = False
+
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1,
+ screenshot_cache=None, close_after_done=True,
+ debug_info=None, reftest_internal=False,
+ reftest_screenshot="unexpected", ccov=False,
+ group_metadata=None, capabilities=None, debug=False,
+ browser_version=None, debug_test=False, **kwargs):
+ """Marionette-based executor for reftests"""
+ RefTestExecutor.__init__(self,
+ logger,
+ browser,
+ server_config,
+ screenshot_cache=screenshot_cache,
+ timeout_multiplier=timeout_multiplier,
+ debug_info=debug_info)
+ self.protocol = MarionetteProtocol(self, browser, capabilities,
+ timeout_multiplier, kwargs["e10s"],
+ ccov)
+ self.implementation = self.get_implementation(reftest_internal)
+ self.implementation_kwargs = {}
+ if reftest_internal:
+ self.implementation_kwargs["screenshot"] = reftest_screenshot
+ self.implementation_kwargs["chrome_scope"] = False
+ # Older versions of Gecko require switching to chrome scope to run refests
+ if browser_version is not None:
+ try:
+ major_version = int(browser_version.split(".")[0])
+ self.implementation_kwargs["chrome_scope"] = major_version < 82
+ except ValueError:
+ pass
+ self.close_after_done = close_after_done
+ self.has_window = False
+ self.original_pref_values = {}
+ self.group_metadata = group_metadata
+ self.debug = debug
+ self.debug_test = debug_test
+
+ self.install_extensions = browser.extensions
+
+ with open(os.path.join(here, "reftest.js")) as f:
+ self.script = f.read()
+ with open(os.path.join(here, "test-wait.js")) as f:
+ self.wait_script = f.read() % {"classname": "reftest-wait"}
+
+ def get_implementation(self, reftest_internal):
+ return (InternalRefTestImplementation if reftest_internal
+ else RefTestImplementation)(self)
+
+ def setup(self, runner):
+ super().setup(runner)
+ for extension_path in self.install_extensions:
+ self.logger.info("Installing extension from %s" % extension_path)
+ addons = Addons(self.protocol.marionette)
+ addons.install(extension_path)
+
+ self.implementation.setup(**self.implementation_kwargs)
+
+ def teardown(self):
+ try:
+ self.implementation.teardown()
+ if self.protocol.marionette and self.protocol.marionette.session_id:
+ handles = self.protocol.marionette.window_handles
+ if handles:
+ _switch_to_window(self.protocol.marionette, handles[0])
+ super().teardown()
+ except Exception:
+ # Ignore errors during teardown
+ self.logger.warning("Exception during reftest teardown:\n%s" %
+ traceback.format_exc())
+
+ def reset(self):
+ self.implementation.reset(**self.implementation_kwargs)
+
+ def is_alive(self):
+ return self.protocol.is_alive()
+
+ def on_environment_change(self, new_environment):
+ self.protocol.on_environment_change(self.last_environment, new_environment)
+
+ def do_test(self, test):
+ if not isinstance(self.implementation, InternalRefTestImplementation):
+ if self.close_after_done and self.has_window:
+ self.protocol.marionette.close()
+ _switch_to_window(self.protocol.marionette,
+ self.protocol.marionette.window_handles[-1])
+ self.has_window = False
+
+ if not self.has_window:
+ self.protocol.base.execute_script(self.script)
+ self.protocol.base.set_window(self.protocol.marionette.window_handles[-1])
+ self.has_window = True
+ self.protocol.testharness.test_window_loaded()
+
+ if self.protocol.coverage.is_enabled:
+ self.protocol.coverage.reset()
+
+ result = self.implementation.run_test(test)
+
+ if self.protocol.coverage.is_enabled:
+ self.protocol.coverage.dump()
+
+ if self.debug:
+ assertion_count = self.protocol.asserts.get()
+ if "extra" not in result:
+ result["extra"] = {}
+ if assertion_count is not None:
+ result["extra"]["assertion_count"] = assertion_count
+
+ if self.debug_test and result["status"] in ["PASS", "FAIL", "ERROR"] and "extra" in result:
+ self.protocol.base.set_window(self.protocol.base.window_handles()[0])
+ self.protocol.debug.load_reftest_analyzer(test, result)
+
+ return self.convert_result(test, result)
+
+ def screenshot(self, test, viewport_size, dpi, page_ranges):
+ # https://github.com/web-platform-tests/wpt/issues/7135
+ assert viewport_size is None
+ assert dpi is None
+
+ timeout = self.timeout_multiplier * test.timeout if self.debug_info is None else None
+
+ test_url = self.test_url(test)
+
+ return ExecuteAsyncScriptRun(self.logger,
+ self._screenshot,
+ self.protocol,
+ test_url,
+ timeout,
+ self.extra_timeout).run()
+
+ def _screenshot(self, protocol, url, timeout):
+ protocol.marionette.navigate(url)
+
+ protocol.base.execute_script(self.wait_script, asynchronous=True)
+
+ screenshot = protocol.marionette.screenshot(full=False)
+ # strip off the data:img/png, part of the url
+ if screenshot.startswith("data:image/png;base64,"):
+ screenshot = screenshot.split(",", 1)[1]
+
+ return screenshot
+
+
+class InternalRefTestImplementation(RefTestImplementation):
+ def __init__(self, executor):
+ self.timeout_multiplier = executor.timeout_multiplier
+ self.executor = executor
+ self.chrome_scope = False
+
+ @property
+ def logger(self):
+ return self.executor.logger
+
+ def setup(self, screenshot="unexpected", chrome_scope=False):
+ data = {"screenshot": screenshot, "isPrint": self.executor.is_print}
+ if self.executor.group_metadata is not None:
+ data["urlCount"] = {urljoin(self.executor.server_url(key[0]), key[1]):value
+ for key, value in self.executor.group_metadata.get("url_count", {}).items()
+ if value > 1}
+ self.chrome_scope = chrome_scope
+ if chrome_scope:
+ self.logger.debug("Using marionette Chrome scope for reftests")
+ self.executor.protocol.marionette.set_context(self.executor.protocol.marionette.CONTEXT_CHROME)
+ self.executor.protocol.marionette._send_message("reftest:setup", data)
+
+ def reset(self, **kwargs):
+ # this is obvious wrong; it shouldn't be a no-op
+ # see https://github.com/web-platform-tests/wpt/issues/15604
+ pass
+
+ def run_test(self, test):
+ references = self.get_references(test, test)
+ timeout = (test.timeout * 1000) * self.timeout_multiplier
+ rv = self.executor.protocol.marionette._send_message("reftest:run",
+ {"test": self.executor.test_url(test),
+ "references": references,
+ "expected": test.expected(),
+ "timeout": timeout,
+ "width": 800,
+ "height": 600,
+ "pageRanges": test.page_ranges})["value"]
+ return rv
+
+ def get_references(self, root_test, node):
+ rv = []
+ for item, relation in node.references:
+ rv.append([self.executor.test_url(item), self.get_references(root_test, item), relation,
+ {"fuzzy": self.get_fuzzy(root_test, [node, item], relation)}])
+ return rv
+
+ def teardown(self):
+ try:
+ if self.executor.protocol.marionette and self.executor.protocol.marionette.session_id:
+ self.executor.protocol.marionette._send_message("reftest:teardown", {})
+ if self.chrome_scope:
+ self.executor.protocol.marionette.set_context(
+ self.executor.protocol.marionette.CONTEXT_CONTENT)
+ # the reftest runner opens/closes a window with focus, so as
+ # with after closing a window we need to give a new window
+ # focus
+ handles = self.executor.protocol.marionette.window_handles
+ if handles:
+ _switch_to_window(self.executor.protocol.marionette, handles[0])
+ except Exception:
+ # Ignore errors during teardown
+ self.logger.warning(traceback.format_exc())
+
+
+class MarionetteCrashtestExecutor(CrashtestExecutor):
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1,
+ debug_info=None, capabilities=None, debug=False,
+ ccov=False, **kwargs):
+ """Marionette-based executor for testharness.js tests"""
+ CrashtestExecutor.__init__(self, logger, browser, server_config,
+ timeout_multiplier=timeout_multiplier,
+ debug_info=debug_info)
+ self.protocol = MarionetteProtocol(self,
+ browser,
+ capabilities,
+ timeout_multiplier,
+ kwargs["e10s"],
+ ccov)
+
+ self.original_pref_values = {}
+ self.debug = debug
+
+ with open(os.path.join(here, "test-wait.js")) as f:
+ self.wait_script = f.read() % {"classname": "test-wait"}
+
+ if marionette is None:
+ do_delayed_imports()
+
+ def is_alive(self):
+ return self.protocol.is_alive()
+
+ def on_environment_change(self, new_environment):
+ self.protocol.on_environment_change(self.last_environment, new_environment)
+
+ def do_test(self, test):
+ timeout = (test.timeout * self.timeout_multiplier if self.debug_info is None
+ else None)
+
+ success, data = ExecuteAsyncScriptRun(self.logger,
+ self.do_crashtest,
+ self.protocol,
+ self.test_url(test),
+ timeout,
+ self.extra_timeout).run()
+ status = None
+ if not success:
+ status = data[0]
+
+ extra = None
+ if self.debug and (success or status not in ("CRASH", "INTERNAL-ERROR")):
+ assertion_count = self.protocol.asserts.get()
+ if assertion_count is not None:
+ extra = {"assertion_count": assertion_count}
+
+ if success:
+ return self.convert_result(test, data)
+
+ return (test.make_result(extra=extra, *data), [])
+
+ def do_crashtest(self, protocol, url, timeout):
+ if self.protocol.coverage.is_enabled:
+ self.protocol.coverage.reset()
+
+ protocol.base.load(url)
+ protocol.base.execute_script(self.wait_script, asynchronous=True)
+
+ if self.protocol.coverage.is_enabled:
+ self.protocol.coverage.dump()
+
+ return {"status": "PASS",
+ "message": None}
+
+
+class MarionettePrintRefTestExecutor(MarionetteRefTestExecutor):
+ is_print = True
+
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1,
+ screenshot_cache=None, close_after_done=True,
+ debug_info=None, reftest_screenshot="unexpected", ccov=False,
+ group_metadata=None, capabilities=None, debug=False,
+ reftest_internal=False, **kwargs):
+ """Marionette-based executor for reftests"""
+ MarionetteRefTestExecutor.__init__(self,
+ logger,
+ browser,
+ server_config,
+ timeout_multiplier=timeout_multiplier,
+ screenshot_cache=screenshot_cache,
+ close_after_done=close_after_done,
+ debug_info=debug_info,
+ reftest_screenshot=reftest_screenshot,
+ reftest_internal=reftest_internal,
+ ccov=ccov,
+ group_metadata=group_metadata,
+ capabilities=capabilities,
+ debug=debug,
+ **kwargs)
+
+ def setup(self, runner):
+ super().setup(runner)
+ if not isinstance(self.implementation, InternalRefTestImplementation):
+ self.protocol.pdf_print.load_runner()
+
+ def get_implementation(self, reftest_internal):
+ return (InternalRefTestImplementation if reftest_internal
+ else RefTestImplementation)(self)
+
+ def screenshot(self, test, viewport_size, dpi, page_ranges):
+ # https://github.com/web-platform-tests/wpt/issues/7140
+ assert dpi is None
+
+ self.viewport_size = viewport_size
+ timeout = self.timeout_multiplier * test.timeout if self.debug_info is None else None
+
+ test_url = self.test_url(test)
+ self.page_ranges = page_ranges.get(test.url)
+
+ return ExecuteAsyncScriptRun(self.logger,
+ self._render,
+ self.protocol,
+ test_url,
+ timeout,
+ self.extra_timeout).run()
+
+ def _render(self, protocol, url, timeout):
+ protocol.marionette.navigate(url)
+
+ protocol.base.execute_script(self.wait_script, asynchronous=True)
+
+ pdf = protocol.pdf_print.render_as_pdf(*self.viewport_size)
+ screenshots = protocol.pdf_print.pdf_to_png(pdf, self.page_ranges)
+ for i, screenshot in enumerate(screenshots):
+ # strip off the data:img/png, part of the url
+ if screenshot.startswith("data:image/png;base64,"):
+ screenshots[i] = screenshot.split(",", 1)[1]
+
+ return screenshots
+
+
+class MarionetteWdspecExecutor(WdspecExecutor):
+ def __init__(self, logger, browser, *args, **kwargs):
+ super().__init__(logger, browser, *args, **kwargs)
+
+ args = self.capabilities["moz:firefoxOptions"].setdefault("args", [])
+ args.extend(["--profile", self.browser.profile])
+
+ for option in ["androidPackage", "androidDeviceSerial", "env"]:
+ if hasattr(browser, option):
+ self.capabilities["moz:firefoxOptions"][option] = getattr(browser, option)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorselenium.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorselenium.py
new file mode 100644
index 0000000000..cf5ac2a22f
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorselenium.py
@@ -0,0 +1,463 @@
+# mypy: allow-untyped-defs
+
+import json
+import os
+import socket
+import threading
+import time
+import traceback
+import uuid
+from urllib.parse import urljoin
+
+from .base import (CallbackHandler,
+ RefTestExecutor,
+ RefTestImplementation,
+ TestharnessExecutor,
+ TimedRunner,
+ strip_server)
+from .protocol import (BaseProtocolPart,
+ TestharnessProtocolPart,
+ Protocol,
+ SelectorProtocolPart,
+ ClickProtocolPart,
+ CookiesProtocolPart,
+ SendKeysProtocolPart,
+ WindowProtocolPart,
+ ActionSequenceProtocolPart,
+ TestDriverProtocolPart)
+
+here = os.path.dirname(__file__)
+
+webdriver = None
+exceptions = None
+RemoteConnection = None
+Command = None
+
+
+def do_delayed_imports():
+ global webdriver
+ global exceptions
+ global RemoteConnection
+ global Command
+ from selenium import webdriver
+ from selenium.common import exceptions
+ from selenium.webdriver.remote.remote_connection import RemoteConnection
+ from selenium.webdriver.remote.command import Command
+
+
+class SeleniumBaseProtocolPart(BaseProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def execute_script(self, script, asynchronous=False):
+ method = self.webdriver.execute_async_script if asynchronous else self.webdriver.execute_script
+ return method(script)
+
+ def set_timeout(self, timeout):
+ self.webdriver.set_script_timeout(timeout * 1000)
+
+ @property
+ def current_window(self):
+ return self.webdriver.current_window_handle
+
+ def set_window(self, handle):
+ self.webdriver.switch_to_window(handle)
+
+ def window_handles(self):
+ return self.webdriver.window_handles
+
+ def load(self, url):
+ self.webdriver.get(url)
+
+ def wait(self):
+ while True:
+ try:
+ return self.webdriver.execute_async_script("""let callback = arguments[arguments.length - 1];
+addEventListener("__test_restart", e => {e.preventDefault(); callback(true)})""")
+ except exceptions.TimeoutException:
+ pass
+ except (socket.timeout, exceptions.NoSuchWindowException, exceptions.ErrorInResponseException, OSError):
+ break
+ except Exception:
+ self.logger.error(traceback.format_exc())
+ break
+ return False
+
+
+class SeleniumTestharnessProtocolPart(TestharnessProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+ self.runner_handle = None
+ with open(os.path.join(here, "runner.js")) as f:
+ self.runner_script = f.read()
+ with open(os.path.join(here, "window-loaded.js")) as f:
+ self.window_loaded_script = f.read()
+
+ def load_runner(self, url_protocol):
+ if self.runner_handle:
+ self.webdriver.switch_to_window(self.runner_handle)
+ url = urljoin(self.parent.executor.server_url(url_protocol),
+ "/testharness_runner.html")
+ self.logger.debug("Loading %s" % url)
+ self.webdriver.get(url)
+ self.runner_handle = self.webdriver.current_window_handle
+ format_map = {"title": threading.current_thread().name.replace("'", '"')}
+ self.parent.base.execute_script(self.runner_script % format_map)
+
+ def close_old_windows(self):
+ handles = [item for item in self.webdriver.window_handles if item != self.runner_handle]
+ for handle in handles:
+ try:
+ self.webdriver.switch_to_window(handle)
+ self.webdriver.close()
+ except exceptions.NoSuchWindowException:
+ pass
+ self.webdriver.switch_to_window(self.runner_handle)
+ return self.runner_handle
+
+ def get_test_window(self, window_id, parent, timeout=5):
+ """Find the test window amongst all the open windows.
+ This is assumed to be either the named window or the one after the parent in the list of
+ window handles
+
+ :param window_id: The DOM name of the Window
+ :param parent: The handle of the runner window
+ :param timeout: The time in seconds to wait for the window to appear. This is because in
+ some implementations there's a race between calling window.open and the
+ window being added to the list of WebDriver accessible windows."""
+ test_window = None
+ end_time = time.time() + timeout
+ while time.time() < end_time:
+ try:
+ # Try using the JSON serialization of the WindowProxy object,
+ # it's in Level 1 but nothing supports it yet
+ win_s = self.webdriver.execute_script("return window['%s'];" % window_id)
+ win_obj = json.loads(win_s)
+ test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
+ except Exception:
+ pass
+
+ if test_window is None:
+ after = self.webdriver.window_handles
+ if len(after) == 2:
+ test_window = next(iter(set(after) - {parent}))
+ elif after[0] == parent and len(after) > 2:
+ # Hope the first one here is the test window
+ test_window = after[1]
+
+ if test_window is not None:
+ assert test_window != parent
+ return test_window
+
+ time.sleep(0.1)
+
+ raise Exception("unable to find test window")
+
+ def test_window_loaded(self):
+ """Wait until the page in the new window has been loaded.
+
+ Hereby ignore Javascript execptions that are thrown when
+ the document has been unloaded due to a process change.
+ """
+ while True:
+ try:
+ self.webdriver.execute_async_script(self.window_loaded_script)
+ break
+ except exceptions.JavascriptException:
+ pass
+
+
+class SeleniumSelectorProtocolPart(SelectorProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def elements_by_selector(self, selector):
+ return self.webdriver.find_elements_by_css_selector(selector)
+
+ def elements_by_selector_and_frame(self, element_selector, frame):
+ return self.webdriver.find_elements_by_css_selector(element_selector)
+
+
+class SeleniumClickProtocolPart(ClickProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def element(self, element):
+ return element.click()
+
+
+class SeleniumCookiesProtocolPart(CookiesProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def delete_all_cookies(self):
+ self.logger.info("Deleting all cookies")
+ return self.webdriver.delete_all_cookies()
+
+ def get_all_cookies(self):
+ self.logger.info("Getting all cookies")
+ return self.webdriver.get_all_cookies()
+
+ def get_named_cookie(self, name):
+ self.logger.info("Getting cookie named %s" % name)
+ try:
+ return self.webdriver.get_named_cookie(name)
+ except exceptions.NoSuchCookieException:
+ return None
+
+
+class SeleniumWindowProtocolPart(WindowProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def minimize(self):
+ self.previous_rect = self.webdriver.window.rect
+ self.logger.info("Minimizing")
+ return self.webdriver.minimize()
+
+ def set_rect(self, rect):
+ self.logger.info("Setting window rect")
+ self.webdriver.window.rect = rect
+
+ def get_rect(self):
+ self.logger.info("Getting window rect")
+ return self.webdriver.window.rect
+
+class SeleniumSendKeysProtocolPart(SendKeysProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def send_keys(self, element, keys):
+ return element.send_keys(keys)
+
+
+class SeleniumActionSequenceProtocolPart(ActionSequenceProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def send_actions(self, actions):
+ self.webdriver.execute(Command.W3C_ACTIONS, {"actions": actions})
+
+ def release(self):
+ self.webdriver.execute(Command.W3C_CLEAR_ACTIONS, {})
+
+
+class SeleniumTestDriverProtocolPart(TestDriverProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def send_message(self, cmd_id, message_type, status, message=None):
+ obj = {
+ "cmd_id": cmd_id,
+ "type": "testdriver-%s" % str(message_type),
+ "status": str(status)
+ }
+ if message:
+ obj["message"] = str(message)
+ self.webdriver.execute_script("window.postMessage(%s, '*')" % json.dumps(obj))
+
+
+class SeleniumProtocol(Protocol):
+ implements = [SeleniumBaseProtocolPart,
+ SeleniumTestharnessProtocolPart,
+ SeleniumSelectorProtocolPart,
+ SeleniumClickProtocolPart,
+ SeleniumCookiesProtocolPart,
+ SeleniumSendKeysProtocolPart,
+ SeleniumTestDriverProtocolPart,
+ SeleniumWindowProtocolPart,
+ SeleniumActionSequenceProtocolPart]
+
+ def __init__(self, executor, browser, capabilities, **kwargs):
+ do_delayed_imports()
+
+ super().__init__(executor, browser)
+ self.capabilities = capabilities
+ self.url = browser.webdriver_url
+ self.webdriver = None
+
+ def connect(self):
+ """Connect to browser via Selenium's WebDriver implementation."""
+ self.logger.debug("Connecting to Selenium on URL: %s" % self.url)
+
+ self.webdriver = webdriver.Remote(command_executor=RemoteConnection(self.url.strip("/"),
+ resolve_ip=False),
+ desired_capabilities=self.capabilities)
+
+ def teardown(self):
+ self.logger.debug("Hanging up on Selenium session")
+ try:
+ self.webdriver.quit()
+ except Exception:
+ pass
+ del self.webdriver
+
+ def is_alive(self):
+ try:
+ # Get a simple property over the connection
+ self.webdriver.current_window_handle
+ # TODO what exception?
+ except (socket.timeout, exceptions.ErrorInResponseException):
+ return False
+ return True
+
+ def after_connect(self):
+ self.testharness.load_runner(self.executor.last_environment["protocol"])
+
+
+class SeleniumRun(TimedRunner):
+ def set_timeout(self):
+ timeout = self.timeout
+
+ try:
+ self.protocol.base.set_timeout(timeout + self.extra_timeout)
+ except exceptions.ErrorInResponseException:
+ msg = "Lost WebDriver connection"
+ self.logger.error(msg)
+ return ("INTERNAL-ERROR", msg)
+
+ def run_func(self):
+ try:
+ self.result = True, self.func(self.protocol, self.url, self.timeout)
+ except exceptions.TimeoutException:
+ self.result = False, ("EXTERNAL-TIMEOUT", None)
+ except (socket.timeout, exceptions.ErrorInResponseException):
+ self.result = False, ("CRASH", None)
+ except Exception as e:
+ message = str(getattr(e, "message", ""))
+ if message:
+ message += "\n"
+ message += traceback.format_exc()
+ self.result = False, ("INTERNAL-ERROR", message)
+ finally:
+ self.result_flag.set()
+
+
+class SeleniumTestharnessExecutor(TestharnessExecutor):
+ supports_testdriver = True
+
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1,
+ close_after_done=True, capabilities=None, debug_info=None,
+ **kwargs):
+ """Selenium-based executor for testharness.js tests"""
+ TestharnessExecutor.__init__(self, logger, browser, server_config,
+ timeout_multiplier=timeout_multiplier,
+ debug_info=debug_info)
+ self.protocol = SeleniumProtocol(self, browser, capabilities)
+ with open(os.path.join(here, "testharness_webdriver_resume.js")) as f:
+ self.script_resume = f.read()
+ self.close_after_done = close_after_done
+ self.window_id = str(uuid.uuid4())
+
+ def is_alive(self):
+ return self.protocol.is_alive()
+
+ def on_environment_change(self, new_environment):
+ if new_environment["protocol"] != self.last_environment["protocol"]:
+ self.protocol.testharness.load_runner(new_environment["protocol"])
+
+ def do_test(self, test):
+ url = self.test_url(test)
+
+ success, data = SeleniumRun(self.logger,
+ self.do_testharness,
+ self.protocol,
+ url,
+ test.timeout * self.timeout_multiplier,
+ self.extra_timeout).run()
+
+ if success:
+ return self.convert_result(test, data)
+
+ return (test.make_result(*data), [])
+
+ def do_testharness(self, protocol, url, timeout):
+ format_map = {"url": strip_server(url)}
+
+ parent_window = protocol.testharness.close_old_windows()
+ # Now start the test harness
+ protocol.base.execute_script("window.open('about:blank', '%s', 'noopener')" % self.window_id)
+ test_window = protocol.testharness.get_test_window(self.window_id,
+ parent_window,
+ timeout=5*self.timeout_multiplier)
+ self.protocol.base.set_window(test_window)
+ protocol.testharness.test_window_loaded()
+
+ protocol.base.load(url)
+
+ handler = CallbackHandler(self.logger, protocol, test_window)
+ while True:
+ result = protocol.base.execute_script(
+ self.script_resume % format_map, asynchronous=True)
+ done, rv = handler(result)
+ if done:
+ break
+ return rv
+
+
+class SeleniumRefTestExecutor(RefTestExecutor):
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1,
+ screenshot_cache=None, close_after_done=True,
+ debug_info=None, capabilities=None, **kwargs):
+ """Selenium WebDriver-based executor for reftests"""
+ RefTestExecutor.__init__(self,
+ logger,
+ browser,
+ server_config,
+ screenshot_cache=screenshot_cache,
+ timeout_multiplier=timeout_multiplier,
+ debug_info=debug_info)
+ self.protocol = SeleniumProtocol(self, browser,
+ capabilities=capabilities)
+ self.implementation = RefTestImplementation(self)
+ self.close_after_done = close_after_done
+ self.has_window = False
+
+ with open(os.path.join(here, "test-wait.js")) as f:
+ self.wait_script = f.read() % {"classname": "reftest-wait"}
+
+ def reset(self):
+ self.implementation.reset()
+
+ def is_alive(self):
+ return self.protocol.is_alive()
+
+ def do_test(self, test):
+ self.logger.info("Test requires OS-level window focus")
+
+ width_offset, height_offset = self.protocol.webdriver.execute_script(
+ """return [window.outerWidth - window.innerWidth,
+ window.outerHeight - window.innerHeight];"""
+ )
+ self.protocol.webdriver.set_window_position(0, 0)
+ self.protocol.webdriver.set_window_size(800 + width_offset, 600 + height_offset)
+
+ result = self.implementation.run_test(test)
+
+ return self.convert_result(test, result)
+
+ def screenshot(self, test, viewport_size, dpi, page_ranges):
+ # https://github.com/web-platform-tests/wpt/issues/7135
+ assert viewport_size is None
+ assert dpi is None
+
+ return SeleniumRun(self.logger,
+ self._screenshot,
+ self.protocol,
+ self.test_url(test),
+ test.timeout,
+ self.extra_timeout).run()
+
+ def _screenshot(self, protocol, url, timeout):
+ webdriver = protocol.webdriver
+ webdriver.get(url)
+
+ webdriver.execute_async_script(self.wait_script)
+
+ screenshot = webdriver.get_screenshot_as_base64()
+
+ # strip off the data:img/png, part of the url
+ if screenshot.startswith("data:image/png;base64,"):
+ screenshot = screenshot.split(",", 1)[1]
+
+ return screenshot
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorservo.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorservo.py
new file mode 100644
index 0000000000..9c938b6e75
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorservo.py
@@ -0,0 +1,333 @@
+# mypy: allow-untyped-defs
+
+import base64
+import json
+import os
+import subprocess
+import tempfile
+import threading
+import traceback
+import uuid
+
+from mozprocess import ProcessHandler
+
+from tools.serve.serve import make_hosts_file
+
+from .base import (RefTestImplementation,
+ crashtest_result_converter,
+ testharness_result_converter,
+ reftest_result_converter,
+ TimedRunner)
+from .process import ProcessTestExecutor
+from .protocol import ConnectionlessProtocol
+from ..browsers.base import browser_command
+
+
+pytestrunner = None
+webdriver = None
+
+
+class ServoExecutor(ProcessTestExecutor):
+ def __init__(self, logger, browser, server_config, timeout_multiplier, debug_info,
+ pause_after_test, reftest_screenshot="unexpected"):
+ ProcessTestExecutor.__init__(self, logger, browser, server_config,
+ timeout_multiplier=timeout_multiplier,
+ debug_info=debug_info,
+ reftest_screenshot=reftest_screenshot)
+ self.pause_after_test = pause_after_test
+ self.environment = {}
+ self.protocol = ConnectionlessProtocol(self, browser)
+
+ hosts_fd, self.hosts_path = tempfile.mkstemp()
+ with os.fdopen(hosts_fd, "w") as f:
+ f.write(make_hosts_file(server_config, "127.0.0.1"))
+
+ self.env_for_tests = os.environ.copy()
+ self.env_for_tests["HOST_FILE"] = self.hosts_path
+ self.env_for_tests["RUST_BACKTRACE"] = "1"
+
+ def teardown(self):
+ try:
+ os.unlink(self.hosts_path)
+ except OSError:
+ pass
+ ProcessTestExecutor.teardown(self)
+
+ def on_environment_change(self, new_environment):
+ self.environment = new_environment
+ return super().on_environment_change(new_environment)
+
+ def on_output(self, line):
+ line = line.decode("utf8", "replace")
+ if self.interactive:
+ print(line)
+ else:
+ self.logger.process_output(self.proc.pid, line, " ".join(self.command), self.test.url)
+
+ def build_servo_command(self, test, extra_args=None, debug_opts="replace-surrogates"):
+ args = [
+ "--hard-fail", "-u", "Servo/wptrunner",
+ # See https://github.com/servo/servo/issues/30080.
+ # For some reason rustls does not like the certificate generated by the WPT tooling.
+ "--ignore-certificate-errors",
+ "-z", self.test_url(test),
+ ]
+ if debug_opts:
+ args += ["-Z", debug_opts]
+ for stylesheet in self.browser.user_stylesheets:
+ args += ["--user-stylesheet", stylesheet]
+ for pref, value in self.environment.get('prefs', {}).items():
+ args += ["--pref", f"{pref}={value}"]
+ if self.browser.ca_certificate_path:
+ args += ["--certificate-path", self.browser.ca_certificate_path]
+ if extra_args:
+ args += extra_args
+ args += self.browser.binary_args
+ debug_args, command = browser_command(self.binary, args, self.debug_info)
+ if self.pause_after_test:
+ command.remove("-z")
+ return debug_args + command
+
+
+class ServoTestharnessExecutor(ServoExecutor):
+ convert_result = testharness_result_converter
+
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1, debug_info=None,
+ pause_after_test=False, **kwargs):
+ ServoExecutor.__init__(self, logger, browser, server_config,
+ timeout_multiplier=timeout_multiplier,
+ debug_info=debug_info,
+ pause_after_test=pause_after_test)
+ self.result_data = None
+ self.result_flag = None
+
+ def do_test(self, test):
+ self.test = test
+ self.result_data = None
+ self.result_flag = threading.Event()
+
+ self.command = self.build_servo_command(test)
+
+ if not self.interactive:
+ self.proc = ProcessHandler(self.command,
+ processOutputLine=[self.on_output],
+ onFinish=self.on_finish,
+ env=self.env_for_tests,
+ storeOutput=False)
+ self.proc.run()
+ else:
+ self.proc = subprocess.Popen(self.command, env=self.env_for_tests)
+
+ try:
+ timeout = test.timeout * self.timeout_multiplier
+
+ # Now wait to get the output we expect, or until we reach the timeout
+ if not self.interactive and not self.pause_after_test:
+ wait_timeout = timeout + 5
+ self.result_flag.wait(wait_timeout)
+ else:
+ wait_timeout = None
+ self.proc.wait()
+
+ proc_is_running = True
+
+ if self.result_flag.is_set():
+ if self.result_data is not None:
+ result = self.convert_result(test, self.result_data)
+ else:
+ self.proc.wait()
+ result = (test.make_result("CRASH", None), [])
+ proc_is_running = False
+ else:
+ result = (test.make_result("TIMEOUT", None), [])
+
+ if proc_is_running:
+ if self.pause_after_test:
+ self.logger.info("Pausing until the browser exits")
+ self.proc.wait()
+ else:
+ self.proc.kill()
+ except: # noqa
+ self.proc.kill()
+ raise
+
+ return result
+
+ def on_output(self, line):
+ prefix = "ALERT: RESULT: "
+ decoded_line = line.decode("utf8", "replace")
+ if decoded_line.startswith(prefix):
+ self.result_data = json.loads(decoded_line[len(prefix):])
+ self.result_flag.set()
+ else:
+ ServoExecutor.on_output(self, line)
+
+ def on_finish(self):
+ self.result_flag.set()
+
+
+class TempFilename:
+ def __init__(self, directory):
+ self.directory = directory
+ self.path = None
+
+ def __enter__(self):
+ self.path = os.path.join(self.directory, str(uuid.uuid4()))
+ return self.path
+
+ def __exit__(self, *args, **kwargs):
+ try:
+ os.unlink(self.path)
+ except OSError:
+ pass
+
+
+class ServoRefTestExecutor(ServoExecutor):
+ convert_result = reftest_result_converter
+
+ def __init__(self, logger, browser, server_config, binary=None, timeout_multiplier=1,
+ screenshot_cache=None, debug_info=None, pause_after_test=False,
+ reftest_screenshot="unexpected", **kwargs):
+ ServoExecutor.__init__(self,
+ logger,
+ browser,
+ server_config,
+ timeout_multiplier=timeout_multiplier,
+ debug_info=debug_info,
+ reftest_screenshot=reftest_screenshot,
+ pause_after_test=pause_after_test)
+
+ self.screenshot_cache = screenshot_cache
+ self.reftest_screenshot = reftest_screenshot
+ self.implementation = RefTestImplementation(self)
+ self.tempdir = tempfile.mkdtemp()
+
+ def reset(self):
+ self.implementation.reset()
+
+ def teardown(self):
+ os.rmdir(self.tempdir)
+ ServoExecutor.teardown(self)
+
+ def screenshot(self, test, viewport_size, dpi, page_ranges):
+ with TempFilename(self.tempdir) as output_path:
+ extra_args = ["--exit",
+ "--output=%s" % output_path,
+ "--resolution", viewport_size or "800x600"]
+ debug_opts = "disable-text-aa,load-webfonts-synchronously,replace-surrogates"
+
+ if dpi:
+ extra_args += ["--device-pixel-ratio", dpi]
+
+ self.command = self.build_servo_command(test, extra_args, debug_opts)
+
+ if not self.interactive:
+ self.proc = ProcessHandler(self.command,
+ processOutputLine=[self.on_output],
+ env=self.env_for_tests)
+
+ try:
+ self.proc.run()
+ timeout = test.timeout * self.timeout_multiplier + 5
+ rv = self.proc.wait(timeout=timeout)
+ except KeyboardInterrupt:
+ self.proc.kill()
+ raise
+ else:
+ self.proc = subprocess.Popen(self.command, env=self.env_for_tests)
+ try:
+ rv = self.proc.wait()
+ except KeyboardInterrupt:
+ self.proc.kill()
+ raise
+
+ if rv is None:
+ self.proc.kill()
+ return False, ("EXTERNAL-TIMEOUT", None)
+
+ if rv != 0 or not os.path.exists(output_path):
+ return False, ("CRASH", None)
+
+ with open(output_path, "rb") as f:
+ # Might need to strip variable headers or something here
+ data = f.read()
+ # Returning the screenshot as a string could potentially be avoided,
+ # see https://github.com/web-platform-tests/wpt/issues/28929.
+ return True, [base64.b64encode(data).decode()]
+
+ def do_test(self, test):
+ self.test = test
+ result = self.implementation.run_test(test)
+
+ return self.convert_result(test, result)
+
+
+class ServoTimedRunner(TimedRunner):
+ def run_func(self):
+ try:
+ self.result = (True, self.func(self.protocol, self.url, self.timeout))
+ except Exception as e:
+ message = getattr(e, "message", "")
+ if message:
+ message += "\n"
+ message += traceback.format_exc(e)
+ self.result = False, ("INTERNAL-ERROR", message)
+ finally:
+ self.result_flag.set()
+
+ def set_timeout(self):
+ pass
+
+
+class ServoCrashtestExecutor(ServoExecutor):
+ convert_result = crashtest_result_converter
+
+ def __init__(self, logger, browser, server_config, binary=None, timeout_multiplier=1,
+ screenshot_cache=None, debug_info=None, pause_after_test=False,
+ **kwargs):
+ ServoExecutor.__init__(self,
+ logger,
+ browser,
+ server_config,
+ timeout_multiplier=timeout_multiplier,
+ debug_info=debug_info,
+ pause_after_test=pause_after_test)
+
+ self.pause_after_test = pause_after_test
+ self.protocol = ConnectionlessProtocol(self, browser)
+
+ def do_test(self, test):
+ timeout = (test.timeout * self.timeout_multiplier if self.debug_info is None
+ else None)
+
+ test_url = self.test_url(test)
+ # We want to pass the full test object into build_servo_command,
+ # so stash it in the class
+ self.test = test
+ success, data = ServoTimedRunner(self.logger, self.do_crashtest, self.protocol,
+ test_url, timeout, self.extra_timeout).run()
+ # Ensure that no processes hang around if they timeout.
+ self.proc.kill()
+
+ if success:
+ return self.convert_result(test, data)
+
+ return (test.make_result(*data), [])
+
+ def do_crashtest(self, protocol, url, timeout):
+ self.command = self.build_servo_command(self.test, extra_args=["-x"])
+
+ if not self.interactive:
+ self.proc = ProcessHandler(self.command,
+ env=self.env_for_tests,
+ processOutputLine=[self.on_output],
+ storeOutput=False)
+ self.proc.run()
+ else:
+ self.proc = subprocess.Popen(self.command, env=self.env_for_tests)
+
+ self.proc.wait()
+
+ if self.proc.poll() >= 0:
+ return {"status": "PASS", "message": None}
+ return {"status": "CRASH", "message": None}
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorservodriver.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorservodriver.py
new file mode 100644
index 0000000000..5d7d55f30b
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorservodriver.py
@@ -0,0 +1,303 @@
+# mypy: allow-untyped-defs
+
+import json
+import os
+import socket
+import traceback
+
+from .base import (Protocol,
+ RefTestExecutor,
+ RefTestImplementation,
+ TestharnessExecutor,
+ TimedRunner,
+ strip_server)
+from .protocol import BaseProtocolPart
+from ..environment import wait_for_service
+
+webdriver = None
+ServoCommandExtensions = None
+
+here = os.path.dirname(__file__)
+
+
+def do_delayed_imports():
+ global webdriver
+ import webdriver
+
+ global ServoCommandExtensions
+
+ class ServoCommandExtensions:
+ def __init__(self, session):
+ self.session = session
+
+ @webdriver.client.command
+ def get_prefs(self, *prefs):
+ body = {"prefs": list(prefs)}
+ return self.session.send_session_command("POST", "servo/prefs/get", body)
+
+ @webdriver.client.command
+ def set_prefs(self, prefs):
+ body = {"prefs": prefs}
+ return self.session.send_session_command("POST", "servo/prefs/set", body)
+
+ @webdriver.client.command
+ def reset_prefs(self, *prefs):
+ body = {"prefs": list(prefs)}
+ return self.session.send_session_command("POST", "servo/prefs/reset", body)
+
+ def change_prefs(self, old_prefs, new_prefs):
+ # Servo interprets reset with an empty list as reset everything
+ if old_prefs:
+ self.reset_prefs(*old_prefs.keys())
+ self.set_prefs({k: parse_pref_value(v) for k, v in new_prefs.items()})
+
+
+# See parse_pref_from_command_line() in components/config/opts.rs
+def parse_pref_value(value):
+ if value == "true":
+ return True
+ if value == "false":
+ return False
+ try:
+ return float(value)
+ except ValueError:
+ return value
+
+
+class ServoBaseProtocolPart(BaseProtocolPart):
+ def execute_script(self, script, asynchronous=False):
+ pass
+
+ def set_timeout(self, timeout):
+ pass
+
+ def wait(self):
+ return False
+
+ def set_window(self, handle):
+ pass
+
+ def window_handles(self):
+ return []
+
+ def load(self, url):
+ pass
+
+
+class ServoWebDriverProtocol(Protocol):
+ implements = [ServoBaseProtocolPart]
+
+ def __init__(self, executor, browser, capabilities, **kwargs):
+ do_delayed_imports()
+ Protocol.__init__(self, executor, browser)
+ self.capabilities = capabilities
+ self.host = browser.webdriver_host
+ self.port = browser.webdriver_port
+ self.init_timeout = browser.init_timeout
+ self.session = None
+
+ def connect(self):
+ """Connect to browser via WebDriver."""
+ wait_for_service(self.logger, self.host, self.port, timeout=self.init_timeout)
+
+ self.session = webdriver.Session(self.host, self.port, extension=ServoCommandExtensions)
+ self.session.start()
+
+ def after_connect(self):
+ pass
+
+ def teardown(self):
+ self.logger.debug("Hanging up on WebDriver session")
+ try:
+ self.session.end()
+ except Exception:
+ pass
+
+ def is_alive(self):
+ try:
+ # Get a simple property over the connection
+ self.session.window_handle
+ # TODO what exception?
+ except Exception:
+ return False
+ return True
+
+ def wait(self):
+ while True:
+ try:
+ return self.session.execute_async_script("""let callback = arguments[arguments.length - 1];
+addEventListener("__test_restart", e => {e.preventDefault(); callback(true)})""")
+ except webdriver.TimeoutException:
+ pass
+ except (socket.timeout, OSError):
+ break
+ except Exception:
+ self.logger.error(traceback.format_exc())
+ break
+ return False
+
+
+class ServoWebDriverRun(TimedRunner):
+ def set_timeout(self):
+ pass
+
+ def run_func(self):
+ try:
+ self.result = True, self.func(self.protocol.session, self.url, self.timeout)
+ except webdriver.TimeoutException:
+ self.result = False, ("EXTERNAL-TIMEOUT", None)
+ except (socket.timeout, OSError):
+ self.result = False, ("CRASH", None)
+ except Exception as e:
+ message = getattr(e, "message", "")
+ if message:
+ message += "\n"
+ message += traceback.format_exc()
+ self.result = False, ("INTERNAL-ERROR", e)
+ finally:
+ self.result_flag.set()
+
+
+class ServoWebDriverTestharnessExecutor(TestharnessExecutor):
+ supports_testdriver = True
+
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1,
+ close_after_done=True, capabilities=None, debug_info=None,
+ **kwargs):
+ TestharnessExecutor.__init__(self, logger, browser, server_config, timeout_multiplier=1,
+ debug_info=None)
+ self.protocol = ServoWebDriverProtocol(self, browser, capabilities=capabilities)
+ with open(os.path.join(here, "testharness_servodriver.js")) as f:
+ self.script = f.read()
+ self.timeout = None
+
+ def on_protocol_change(self, new_protocol):
+ pass
+
+ def is_alive(self):
+ return self.protocol.is_alive()
+
+ def do_test(self, test):
+ url = self.test_url(test)
+
+ timeout = test.timeout * self.timeout_multiplier + self.extra_timeout
+
+ if timeout != self.timeout:
+ try:
+ self.protocol.session.timeouts.script = timeout
+ self.timeout = timeout
+ except OSError:
+ msg = "Lost WebDriver connection"
+ self.logger.error(msg)
+ return ("INTERNAL-ERROR", msg)
+
+ success, data = ServoWebDriverRun(self.logger,
+ self.do_testharness,
+ self.protocol,
+ url,
+ timeout,
+ self.extra_timeout).run()
+
+ if success:
+ return self.convert_result(test, data)
+
+ return (test.make_result(*data), [])
+
+ def do_testharness(self, session, url, timeout):
+ session.url = url
+ result = json.loads(
+ session.execute_async_script(
+ self.script % {"abs_url": url,
+ "url": strip_server(url),
+ "timeout_multiplier": self.timeout_multiplier,
+ "timeout": timeout * 1000}))
+ # Prevent leaking every page in history until Servo develops a more sane
+ # page cache
+ session.back()
+ return result
+
+ def on_environment_change(self, new_environment):
+ self.protocol.session.extension.change_prefs(
+ self.last_environment.get("prefs", {}),
+ new_environment.get("prefs", {})
+ )
+
+
+class TimeoutError(Exception):
+ pass
+
+
+class ServoWebDriverRefTestExecutor(RefTestExecutor):
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1,
+ screenshot_cache=None, capabilities=None, debug_info=None,
+ **kwargs):
+ """Selenium WebDriver-based executor for reftests"""
+ RefTestExecutor.__init__(self,
+ logger,
+ browser,
+ server_config,
+ screenshot_cache=screenshot_cache,
+ timeout_multiplier=timeout_multiplier,
+ debug_info=debug_info)
+ self.protocol = ServoWebDriverProtocol(self, browser,
+ capabilities=capabilities)
+ self.implementation = RefTestImplementation(self)
+ self.timeout = None
+ with open(os.path.join(here, "test-wait.js")) as f:
+ self.wait_script = f.read() % {"classname": "reftest-wait"}
+
+ def reset(self):
+ self.implementation.reset()
+
+ def is_alive(self):
+ return self.protocol.is_alive()
+
+ def do_test(self, test):
+ try:
+ result = self.implementation.run_test(test)
+ return self.convert_result(test, result)
+ except OSError:
+ return test.make_result("CRASH", None), []
+ except TimeoutError:
+ return test.make_result("TIMEOUT", None), []
+ except Exception as e:
+ message = getattr(e, "message", "")
+ if message:
+ message += "\n"
+ message += traceback.format_exc()
+ return test.make_result("INTERNAL-ERROR", message), []
+
+ def screenshot(self, test, viewport_size, dpi, page_ranges):
+ # https://github.com/web-platform-tests/wpt/issues/7135
+ assert viewport_size is None
+ assert dpi is None
+
+ timeout = (test.timeout * self.timeout_multiplier + self.extra_timeout
+ if self.debug_info is None else None)
+
+ if self.timeout != timeout:
+ try:
+ self.protocol.session.timeouts.script = timeout
+ self.timeout = timeout
+ except OSError:
+ msg = "Lost webdriver connection"
+ self.logger.error(msg)
+ return ("INTERNAL-ERROR", msg)
+
+ return ServoWebDriverRun(self.logger,
+ self._screenshot,
+ self.protocol,
+ self.test_url(test),
+ timeout,
+ self.extra_timeout).run()
+
+ def _screenshot(self, session, url, timeout):
+ session.url = url
+ session.execute_async_script(self.wait_script)
+ return session.screenshot()
+
+ def on_environment_change(self, new_environment):
+ self.protocol.session.extension.change_prefs(
+ self.last_environment.get("prefs", {}),
+ new_environment.get("prefs", {})
+ )
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorwebdriver.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorwebdriver.py
new file mode 100644
index 0000000000..b49b9e2b57
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorwebdriver.py
@@ -0,0 +1,762 @@
+# mypy: allow-untyped-defs
+
+import json
+import os
+import socket
+import threading
+import time
+import traceback
+import uuid
+from urllib.parse import urljoin
+
+from .base import (CallbackHandler,
+ CrashtestExecutor,
+ RefTestExecutor,
+ RefTestImplementation,
+ TestharnessExecutor,
+ TimedRunner,
+ strip_server)
+from .protocol import (BaseProtocolPart,
+ TestharnessProtocolPart,
+ Protocol,
+ SelectorProtocolPart,
+ AccessibilityProtocolPart,
+ ClickProtocolPart,
+ CookiesProtocolPart,
+ SendKeysProtocolPart,
+ ActionSequenceProtocolPart,
+ TestDriverProtocolPart,
+ GenerateTestReportProtocolPart,
+ SetPermissionProtocolPart,
+ VirtualAuthenticatorProtocolPart,
+ WindowProtocolPart,
+ DebugProtocolPart,
+ SPCTransactionsProtocolPart,
+ RPHRegistrationsProtocolPart,
+ FedCMProtocolPart,
+ VirtualSensorProtocolPart,
+ merge_dicts)
+
+from webdriver.client import Session
+from webdriver import error
+
+here = os.path.dirname(__file__)
+
+
+class WebDriverCallbackHandler(CallbackHandler):
+ unimplemented_exc = (NotImplementedError, error.UnknownCommandException)
+ expected_exc = (error.WebDriverException,)
+
+
+class WebDriverBaseProtocolPart(BaseProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def execute_script(self, script, asynchronous=False, args=None):
+ method = self.webdriver.execute_async_script if asynchronous else self.webdriver.execute_script
+ return method(script, args=args)
+
+ def set_timeout(self, timeout):
+ try:
+ self.webdriver.timeouts.script = timeout
+ except error.WebDriverException:
+ # workaround https://bugs.chromium.org/p/chromedriver/issues/detail?id=2057
+ body = {"type": "script", "ms": timeout * 1000}
+ self.webdriver.send_session_command("POST", "timeouts", body)
+
+ @property
+ def current_window(self):
+ return self.webdriver.window_handle
+
+ def set_window(self, handle):
+ self.webdriver.window_handle = handle
+
+ def window_handles(self):
+ return self.webdriver.handles
+
+ def load(self, url):
+ self.webdriver.url = url
+
+ def wait(self):
+ while True:
+ try:
+ self.webdriver.execute_async_script("""let callback = arguments[arguments.length - 1];
+addEventListener("__test_restart", e => {e.preventDefault(); callback(true)})""")
+ self.webdriver.execute_async_script("")
+ except (error.TimeoutException,
+ error.ScriptTimeoutException,
+ error.JavascriptErrorException):
+ # A JavascriptErrorException will happen when we navigate;
+ # by ignoring it it's possible to reload the test whilst the
+ # harness remains paused
+ pass
+ except (socket.timeout, error.NoSuchWindowException, error.UnknownErrorException, OSError):
+ break
+ except Exception:
+ message = "Uncaught exception in WebDriverBaseProtocolPart.wait:\n"
+ message += traceback.format_exc()
+ self.logger.error(message)
+ break
+ return False
+
+
+class WebDriverTestharnessProtocolPart(TestharnessProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+ self.runner_handle = None
+ with open(os.path.join(here, "runner.js")) as f:
+ self.runner_script = f.read()
+ with open(os.path.join(here, "window-loaded.js")) as f:
+ self.window_loaded_script = f.read()
+
+ def load_runner(self, url_protocol):
+ if self.runner_handle:
+ self.webdriver.window_handle = self.runner_handle
+ url = urljoin(self.parent.executor.server_url(url_protocol),
+ "/testharness_runner.html")
+ self.logger.debug("Loading %s" % url)
+
+ self.webdriver.url = url
+ self.runner_handle = self.webdriver.window_handle
+ format_map = {"title": threading.current_thread().name.replace("'", '"')}
+ self.parent.base.execute_script(self.runner_script % format_map)
+
+ def close_old_windows(self):
+ self.webdriver.actions.release()
+ handles = [item for item in self.webdriver.handles if item != self.runner_handle]
+ for handle in handles:
+ self._close_window(handle)
+ self.webdriver.window_handle = self.runner_handle
+ return self.runner_handle
+
+ def _close_window(self, window_handle):
+ try:
+ self.webdriver.window_handle = window_handle
+ self.webdriver.window.close()
+ except error.NoSuchWindowException:
+ pass
+
+ def open_test_window(self, window_id):
+ self.webdriver.execute_script(
+ "window.open('about:blank', '%s', 'noopener')" % window_id)
+
+ def get_test_window(self, window_id, parent, timeout=5):
+ """Find the test window amongst all the open windows.
+ This is assumed to be either the named window or the one after the parent in the list of
+ window handles
+
+ :param window_id: The DOM name of the Window
+ :param parent: The handle of the runner window
+ :param timeout: The time in seconds to wait for the window to appear. This is because in
+ some implementations there's a race between calling window.open and the
+ window being added to the list of WebDriver accessible windows."""
+ test_window = None
+ end_time = time.time() + timeout
+ while time.time() < end_time:
+ try:
+ # Try using the JSON serialization of the WindowProxy object,
+ # it's in Level 1 but nothing supports it yet
+ win_s = self.webdriver.execute_script("return window['%s'];" % window_id)
+ win_obj = json.loads(win_s)
+ test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
+ except Exception:
+ pass
+
+ if test_window is None:
+ test_window = self._poll_handles_for_test_window(parent)
+
+ if test_window is not None:
+ assert test_window != parent
+ return test_window
+
+ time.sleep(0.1)
+
+ raise Exception("unable to find test window")
+
+ def _poll_handles_for_test_window(self, parent):
+ test_window = None
+ after = self.webdriver.handles
+ if len(after) == 2:
+ test_window = next(iter(set(after) - {parent}))
+ elif after[0] == parent and len(after) > 2:
+ # Hope the first one here is the test window
+ test_window = after[1]
+ return test_window
+
+ def test_window_loaded(self):
+ """Wait until the page in the new window has been loaded.
+
+ Hereby ignore Javascript execptions that are thrown when
+ the document has been unloaded due to a process change.
+ """
+ while True:
+ try:
+ self.webdriver.execute_script(self.window_loaded_script, asynchronous=True)
+ break
+ except error.JavascriptErrorException:
+ pass
+
+
+class WebDriverSelectorProtocolPart(SelectorProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def elements_by_selector(self, selector):
+ return self.webdriver.find.css(selector)
+
+
+class WebDriverAccessibilityProtocolPart(AccessibilityProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def get_computed_label(self, element):
+ return element.get_computed_label()
+
+ def get_computed_role(self, element):
+ return element.get_computed_role()
+
+
+class WebDriverClickProtocolPart(ClickProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def element(self, element):
+ self.logger.info("click " + repr(element))
+ return element.click()
+
+
+class WebDriverCookiesProtocolPart(CookiesProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def delete_all_cookies(self):
+ self.logger.info("Deleting all cookies")
+ return self.webdriver.send_session_command("DELETE", "cookie")
+
+ def get_all_cookies(self):
+ self.logger.info("Getting all cookies")
+ return self.webdriver.send_session_command("GET", "cookie")
+
+ def get_named_cookie(self, name):
+ self.logger.info("Getting cookie named %s" % name)
+ try:
+ return self.webdriver.send_session_command("GET", "cookie/%s" % name)
+ except error.NoSuchCookieException:
+ return None
+
+
+class WebDriverWindowProtocolPart(WindowProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def minimize(self):
+ self.logger.info("Minimizing")
+ return self.webdriver.window.minimize()
+
+ def set_rect(self, rect):
+ self.logger.info("Restoring")
+ self.webdriver.window.rect = rect
+
+ def get_rect(self):
+ self.logger.info("Getting rect")
+ return self.webdriver.window.rect
+
+class WebDriverSendKeysProtocolPart(SendKeysProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def send_keys(self, element, keys):
+ try:
+ return element.send_keys(keys)
+ except error.UnknownErrorException as e:
+ # workaround https://bugs.chromium.org/p/chromedriver/issues/detail?id=1999
+ if (e.http_status != 500 or
+ e.status_code != "unknown error"):
+ raise
+ return element.send_element_command("POST", "value", {"value": list(keys)})
+
+
+class WebDriverActionSequenceProtocolPart(ActionSequenceProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def send_actions(self, actions):
+ self.webdriver.actions.perform(actions['actions'])
+
+ def release(self):
+ self.webdriver.actions.release()
+
+
+class WebDriverTestDriverProtocolPart(TestDriverProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def send_message(self, cmd_id, message_type, status, message=None):
+ obj = {
+ "cmd_id": cmd_id,
+ "type": "testdriver-%s" % str(message_type),
+ "status": str(status)
+ }
+ if message:
+ obj["message"] = str(message)
+ self.webdriver.execute_script("window.postMessage(%s, '*')" % json.dumps(obj))
+
+ def _switch_to_frame(self, index_or_elem):
+ try:
+ self.webdriver.switch_frame(index_or_elem)
+ except (error.StaleElementReferenceException,
+ error.NoSuchFrameException) as e:
+ raise ValueError from e
+
+ def _switch_to_parent_frame(self):
+ self.webdriver.switch_frame("parent")
+
+
+class WebDriverGenerateTestReportProtocolPart(GenerateTestReportProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def generate_test_report(self, message):
+ json_message = {"message": message}
+ self.webdriver.send_session_command("POST", "reporting/generate_test_report", json_message)
+
+
+class WebDriverSetPermissionProtocolPart(SetPermissionProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def set_permission(self, descriptor, state):
+ permission_params_dict = {
+ "descriptor": descriptor,
+ "state": state,
+ }
+ self.webdriver.send_session_command("POST", "permissions", permission_params_dict)
+
+
+class WebDriverVirtualAuthenticatorProtocolPart(VirtualAuthenticatorProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def add_virtual_authenticator(self, config):
+ return self.webdriver.send_session_command("POST", "webauthn/authenticator", config)
+
+ def remove_virtual_authenticator(self, authenticator_id):
+ return self.webdriver.send_session_command("DELETE", "webauthn/authenticator/%s" % authenticator_id)
+
+ def add_credential(self, authenticator_id, credential):
+ return self.webdriver.send_session_command("POST", "webauthn/authenticator/%s/credential" % authenticator_id, credential)
+
+ def get_credentials(self, authenticator_id):
+ return self.webdriver.send_session_command("GET", "webauthn/authenticator/%s/credentials" % authenticator_id)
+
+ def remove_credential(self, authenticator_id, credential_id):
+ return self.webdriver.send_session_command("DELETE", f"webauthn/authenticator/{authenticator_id}/credentials/{credential_id}")
+
+ def remove_all_credentials(self, authenticator_id):
+ return self.webdriver.send_session_command("DELETE", "webauthn/authenticator/%s/credentials" % authenticator_id)
+
+ def set_user_verified(self, authenticator_id, uv):
+ return self.webdriver.send_session_command("POST", "webauthn/authenticator/%s/uv" % authenticator_id, uv)
+
+
+class WebDriverSPCTransactionsProtocolPart(SPCTransactionsProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def set_spc_transaction_mode(self, mode):
+ body = {"mode": mode}
+ return self.webdriver.send_session_command("POST", "secure-payment-confirmation/set-mode", body)
+
+class WebDriverRPHRegistrationsProtocolPart(RPHRegistrationsProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def set_rph_registration_mode(self, mode):
+ body = {"mode": mode}
+ return self.webdriver.send_session_command("POST", "custom-handlers/set-mode", body)
+
+class WebDriverFedCMProtocolPart(FedCMProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def cancel_fedcm_dialog(self):
+ return self.webdriver.send_session_command("POST", "fedcm/canceldialog")
+
+ def click_fedcm_dialog_button(self, dialog_button):
+ body = {"dialogButton": dialog_button}
+ return self.webdriver.send_session_command("POST", "fedcm/clickdialogbutton", body)
+
+ def select_fedcm_account(self, account_index):
+ body = {"accountIndex": account_index}
+ return self.webdriver.send_session_command("POST", "fedcm/selectaccount", body)
+
+ def get_fedcm_account_list(self):
+ return self.webdriver.send_session_command("GET", "fedcm/accountlist")
+
+ def get_fedcm_dialog_title(self):
+ return self.webdriver.send_session_command("GET", "fedcm/gettitle")
+
+ def get_fedcm_dialog_type(self):
+ return self.webdriver.send_session_command("GET", "fedcm/getdialogtype")
+
+ def set_fedcm_delay_enabled(self, enabled):
+ body = {"enabled": enabled}
+ return self.webdriver.send_session_command("POST", "fedcm/setdelayenabled", body)
+
+ def reset_fedcm_cooldown(self):
+ return self.webdriver.send_session_command("POST", "fedcm/resetcooldown")
+
+
+class WebDriverDebugProtocolPart(DebugProtocolPart):
+ def load_devtools(self):
+ raise NotImplementedError()
+
+
+class WebDriverVirtualSensorPart(VirtualSensorProtocolPart):
+ def setup(self):
+ self.webdriver = self.parent.webdriver
+
+ def create_virtual_sensor(self, sensor_type, sensor_params):
+ body = {"type": sensor_type}
+ body.update(sensor_params)
+ return self.webdriver.send_session_command("POST", "sensor", body)
+
+ def update_virtual_sensor(self, sensor_type, reading):
+ body = {"reading": reading}
+ return self.webdriver.send_session_command("POST", "sensor/%s" % sensor_type, body)
+
+ def remove_virtual_sensor(self, sensor_type):
+ return self.webdriver.send_session_command("DELETE", "sensor/%s" % sensor_type)
+
+ def get_virtual_sensor_information(self, sensor_type):
+ return self.webdriver.send_session_command("GET", "sensor/%s" % sensor_type)
+
+
+class WebDriverProtocol(Protocol):
+ implements = [WebDriverBaseProtocolPart,
+ WebDriverTestharnessProtocolPart,
+ WebDriverSelectorProtocolPart,
+ WebDriverAccessibilityProtocolPart,
+ WebDriverClickProtocolPart,
+ WebDriverCookiesProtocolPart,
+ WebDriverSendKeysProtocolPart,
+ WebDriverWindowProtocolPart,
+ WebDriverActionSequenceProtocolPart,
+ WebDriverTestDriverProtocolPart,
+ WebDriverGenerateTestReportProtocolPart,
+ WebDriverSetPermissionProtocolPart,
+ WebDriverVirtualAuthenticatorProtocolPart,
+ WebDriverSPCTransactionsProtocolPart,
+ WebDriverRPHRegistrationsProtocolPart,
+ WebDriverFedCMProtocolPart,
+ WebDriverDebugProtocolPart,
+ WebDriverVirtualSensorPart]
+
+ def __init__(self, executor, browser, capabilities, **kwargs):
+ super().__init__(executor, browser)
+ self.capabilities = capabilities
+ if hasattr(browser, "capabilities"):
+ if self.capabilities is None:
+ self.capabilities = browser.capabilities
+ else:
+ merge_dicts(self.capabilities, browser.capabilities)
+
+ pac = browser.pac
+ if pac is not None:
+ if self.capabilities is None:
+ self.capabilities = {}
+ merge_dicts(self.capabilities, {"proxy":
+ {
+ "proxyType": "pac",
+ "proxyAutoconfigUrl": urljoin(executor.server_url("http"), pac)
+ }
+ })
+
+ self.url = browser.webdriver_url
+ self.webdriver = None
+
+ def connect(self):
+ """Connect to browser via WebDriver."""
+ self.logger.debug("Connecting to WebDriver on URL: %s" % self.url)
+
+ host, port = self.url.split(":")[1].strip("/"), self.url.split(':')[-1].strip("/")
+
+ capabilities = {"alwaysMatch": self.capabilities}
+ self.webdriver = Session(host, port, capabilities=capabilities)
+ self.webdriver.start()
+
+ def teardown(self):
+ self.logger.debug("Hanging up on WebDriver session")
+ try:
+ self.webdriver.end()
+ except Exception as e:
+ message = str(getattr(e, "message", ""))
+ if message:
+ message += "\n"
+ message += traceback.format_exc()
+ self.logger.debug(message)
+ self.webdriver = None
+
+ def is_alive(self):
+ try:
+ # Get a simple property over the connection, with 2 seconds of timeout
+ # that should be more than enough to check if the WebDriver its
+ # still alive, and allows to complete the check within the testrunner
+ # 5 seconds of extra_timeout we have as maximum to end the test before
+ # the external timeout from testrunner triggers.
+ self.webdriver.send_session_command("GET", "window", timeout=2)
+ except (socket.timeout, error.UnknownErrorException, error.InvalidSessionIdException):
+ return False
+ return True
+
+ def after_connect(self):
+ self.testharness.load_runner(self.executor.last_environment["protocol"])
+
+
+class WebDriverRun(TimedRunner):
+ def set_timeout(self):
+ try:
+ self.protocol.base.set_timeout(self.timeout + self.extra_timeout)
+ except error.UnknownErrorException:
+ msg = "Lost WebDriver connection"
+ self.logger.error(msg)
+ return ("INTERNAL-ERROR", msg)
+
+ def run_func(self):
+ try:
+ self.result = True, self.func(self.protocol, self.url, self.timeout)
+ except (error.TimeoutException, error.ScriptTimeoutException):
+ self.result = False, ("EXTERNAL-TIMEOUT", None)
+ except (socket.timeout, error.UnknownErrorException):
+ self.result = False, ("CRASH", None)
+ except Exception as e:
+ if (isinstance(e, error.WebDriverException) and
+ e.http_status == 408 and
+ e.status_code == "asynchronous script timeout"):
+ # workaround for https://bugs.chromium.org/p/chromedriver/issues/detail?id=2001
+ self.result = False, ("EXTERNAL-TIMEOUT", None)
+ else:
+ message = str(getattr(e, "message", ""))
+ if message:
+ message += "\n"
+ message += traceback.format_exc()
+ self.result = False, ("INTERNAL-ERROR", message)
+ finally:
+ self.result_flag.set()
+
+
+class WebDriverTestharnessExecutor(TestharnessExecutor):
+ supports_testdriver = True
+ protocol_cls = WebDriverProtocol
+
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1,
+ close_after_done=True, capabilities=None, debug_info=None,
+ cleanup_after_test=True, **kwargs):
+ """WebDriver-based executor for testharness.js tests"""
+ TestharnessExecutor.__init__(self, logger, browser, server_config,
+ timeout_multiplier=timeout_multiplier,
+ debug_info=debug_info)
+ self.protocol = self.protocol_cls(self, browser, capabilities)
+ with open(os.path.join(here, "testharness_webdriver_resume.js")) as f:
+ self.script_resume = f.read()
+ with open(os.path.join(here, "window-loaded.js")) as f:
+ self.window_loaded_script = f.read()
+
+ self.close_after_done = close_after_done
+ self.window_id = str(uuid.uuid4())
+ self.cleanup_after_test = cleanup_after_test
+
+ def is_alive(self):
+ return self.protocol.is_alive()
+
+ def on_environment_change(self, new_environment):
+ if new_environment["protocol"] != self.last_environment["protocol"]:
+ self.protocol.testharness.load_runner(new_environment["protocol"])
+
+ def do_test(self, test):
+ url = self.test_url(test)
+
+ success, data = WebDriverRun(self.logger,
+ self.do_testharness,
+ self.protocol,
+ url,
+ test.timeout * self.timeout_multiplier,
+ self.extra_timeout).run()
+
+ if success:
+ return self.convert_result(test, data)
+
+ return (test.make_result(*data), [])
+
+ def do_testharness(self, protocol, url, timeout):
+ # The previous test may not have closed its old windows (if something
+ # went wrong or if cleanup_after_test was False), so clean up here.
+ parent_window = protocol.testharness.close_old_windows()
+
+ # Now start the test harness
+ protocol.testharness.open_test_window(self.window_id)
+ test_window = protocol.testharness.get_test_window(self.window_id,
+ parent_window,
+ timeout=5*self.timeout_multiplier)
+ self.protocol.base.set_window(test_window)
+
+ # Wait until about:blank has been loaded
+ protocol.base.execute_script(self.window_loaded_script, asynchronous=True)
+
+ handler = WebDriverCallbackHandler(self.logger, protocol, test_window)
+ protocol.webdriver.url = url
+
+ while True:
+ result = protocol.base.execute_script(
+ self.script_resume, asynchronous=True, args=[strip_server(url)])
+
+ # As of 2019-03-29, WebDriver does not define expected behavior for
+ # cases where the browser crashes during script execution:
+ #
+ # https://github.com/w3c/webdriver/issues/1308
+ if not isinstance(result, list) or len(result) != 2:
+ try:
+ is_alive = self.is_alive()
+ except error.WebDriverException:
+ is_alive = False
+
+ if not is_alive:
+ raise Exception("Browser crashed during script execution.")
+
+ done, rv = handler(result)
+ if done:
+ break
+
+ # Attempt to cleanup any leftover windows, if allowed. This is
+ # preferable as it will blame the correct test if something goes wrong
+ # closing windows, but if the user wants to see the test results we
+ # have to leave the window(s) open.
+ if self.cleanup_after_test:
+ protocol.testharness.close_old_windows()
+
+ return rv
+
+
+class WebDriverRefTestExecutor(RefTestExecutor):
+ protocol_cls = WebDriverProtocol
+
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1,
+ screenshot_cache=None, close_after_done=True,
+ debug_info=None, capabilities=None, debug_test=False,
+ reftest_screenshot="unexpected", **kwargs):
+ """WebDriver-based executor for reftests"""
+ RefTestExecutor.__init__(self,
+ logger,
+ browser,
+ server_config,
+ screenshot_cache=screenshot_cache,
+ timeout_multiplier=timeout_multiplier,
+ debug_info=debug_info,
+ reftest_screenshot=reftest_screenshot)
+ self.protocol = self.protocol_cls(self,
+ browser,
+ capabilities=capabilities)
+ self.implementation = RefTestImplementation(self)
+ self.close_after_done = close_after_done
+ self.has_window = False
+ self.debug_test = debug_test
+
+ with open(os.path.join(here, "test-wait.js")) as f:
+ self.wait_script = f.read() % {"classname": "reftest-wait"}
+
+ def reset(self):
+ self.implementation.reset()
+
+ def is_alive(self):
+ return self.protocol.is_alive()
+
+ def do_test(self, test):
+ width_offset, height_offset = self.protocol.webdriver.execute_script(
+ """return [window.outerWidth - window.innerWidth,
+ window.outerHeight - window.innerHeight];"""
+ )
+ try:
+ self.protocol.webdriver.window.position = (0, 0)
+ except error.InvalidArgumentException:
+ # Safari 12 throws with 0 or 1, treating them as bools; fixed in STP
+ self.protocol.webdriver.window.position = (2, 2)
+ self.protocol.webdriver.window.size = (800 + width_offset, 600 + height_offset)
+
+ result = self.implementation.run_test(test)
+
+ if self.debug_test and result["status"] in ["PASS", "FAIL", "ERROR"] and "extra" in result:
+ self.protocol.debug.load_reftest_analyzer(test, result)
+
+ return self.convert_result(test, result)
+
+ def screenshot(self, test, viewport_size, dpi, page_ranges):
+ # https://github.com/web-platform-tests/wpt/issues/7135
+ assert viewport_size is None
+ assert dpi is None
+
+ return WebDriverRun(self.logger,
+ self._screenshot,
+ self.protocol,
+ self.test_url(test),
+ test.timeout,
+ self.extra_timeout).run()
+
+ def _screenshot(self, protocol, url, timeout):
+ self.protocol.base.load(url)
+
+ self.protocol.base.execute_script(self.wait_script, True)
+
+ screenshot = self.protocol.webdriver.screenshot()
+ if screenshot is None:
+ raise ValueError('screenshot is None')
+
+ # strip off the data:img/png, part of the url
+ if screenshot.startswith("data:image/png;base64,"):
+ screenshot = screenshot.split(",", 1)[1]
+
+ return screenshot
+
+
+class WebDriverCrashtestExecutor(CrashtestExecutor):
+ protocol_cls = WebDriverProtocol
+
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1,
+ screenshot_cache=None, close_after_done=True,
+ debug_info=None, capabilities=None, **kwargs):
+ """WebDriver-based executor for crashtests"""
+ CrashtestExecutor.__init__(self,
+ logger,
+ browser,
+ server_config,
+ screenshot_cache=screenshot_cache,
+ timeout_multiplier=timeout_multiplier,
+ debug_info=debug_info)
+ self.protocol = self.protocol_cls(self,
+ browser,
+ capabilities=capabilities)
+
+ with open(os.path.join(here, "test-wait.js")) as f:
+ self.wait_script = f.read() % {"classname": "test-wait"}
+
+ def do_test(self, test):
+ timeout = (test.timeout * self.timeout_multiplier if self.debug_info is None
+ else None)
+
+ success, data = WebDriverRun(self.logger,
+ self.do_crashtest,
+ self.protocol,
+ self.test_url(test),
+ timeout,
+ self.extra_timeout).run()
+
+ if success:
+ return self.convert_result(test, data)
+
+ return (test.make_result(*data), [])
+
+ def do_crashtest(self, protocol, url, timeout):
+ protocol.base.load(url)
+ protocol.base.execute_script(self.wait_script, asynchronous=True)
+
+ return {"status": "PASS",
+ "message": None}
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorwktr.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorwktr.py
new file mode 100644
index 0000000000..3c02fa799e
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorwktr.py
@@ -0,0 +1,268 @@
+# mypy: allow-untyped-defs
+
+from .base import RefTestExecutor, RefTestImplementation, CrashtestExecutor, TestharnessExecutor
+from .protocol import Protocol, ProtocolPart
+from time import time
+from queue import Empty
+from base64 import b64encode
+import json
+
+
+class CrashError(BaseException):
+ pass
+
+
+def _read_line(io_queue, deadline=None, encoding=None, errors="strict", raise_crash=True, logger=None):
+ """Reads a single line from the io queue. The read must succeed before `deadline` or
+ a TimeoutError is raised. The line is returned as a bytestring or optionally with the
+ specified `encoding`. If `raise_crash` is set, a CrashError is raised if the line
+ happens to be a crash message.
+ """
+ current_time = time()
+
+ if deadline and current_time > deadline:
+ raise TimeoutError()
+
+ try:
+ line = io_queue.get(True, deadline - current_time if deadline else None)
+ if raise_crash and line.startswith(b"#CRASHED"):
+ raise CrashError()
+ except Empty as e:
+ logger.debug(f"got empty line with {time() - deadline} remaining")
+ raise TimeoutError() from e
+
+ return line.decode(encoding, errors) if encoding else line
+
+
+class WKTRTestPart(ProtocolPart):
+ """This protocol part is responsible for running tests via WebKitTestRunner's protocol mode.
+ """
+ name = "wktr_test"
+ eof_marker = '#EOF\n' # Marker sent by wktr after blocks.
+
+ def __init__(self, parent):
+ super().__init__(parent)
+ self.stdout_queue = parent.browser.stdout_queue
+ self.stdin_queue = parent.browser.stdin_queue
+
+ def do_test(self, command, timeout=None):
+ """Send a command to wktr and return the resulting outputs.
+
+ A command consists of a URL to navigate to, followed by an optional options; see
+ https://github.com/WebKit/WebKit/blob/main/Tools/TestRunnerShared/TestCommand.cpp.
+
+ """
+ self._send_command(command + "'--timeout'%d" % (timeout * 1000))
+
+ deadline = time() + timeout if timeout else None
+ # The first block can also contain audio data but not in WPT.
+ text = self._read_block(deadline)
+ image = self._read_block(deadline)
+
+ return text, image
+
+ def _send_command(self, command):
+ """Sends a single `command`, i.e. a URL to open, to wktr.
+ """
+ self.stdin_queue.put((command + "\n").encode("utf-8"))
+
+ def _read_block(self, deadline=None):
+ """Tries to read a single block of content from stdout before the `deadline`.
+ """
+ while True:
+ line = _read_line(self.stdout_queue, deadline, "latin-1", logger=self.logger).rstrip()
+
+ if line == "Content-Type: text/plain":
+ return self._read_text_block(deadline)
+
+ if line == "Content-Type: image/png":
+ return self._read_image_block(deadline)
+
+ if line == "#EOF":
+ return None
+
+ def _read_text_block(self, deadline=None):
+ """Tries to read a plain text block in utf-8 encoding before the `deadline`.
+ """
+ result = ""
+
+ while True:
+ line = _read_line(self.stdout_queue, deadline, "utf-8", "replace", False, logger=self.logger)
+
+ if line.endswith(self.eof_marker):
+ result += line[:-len(self.eof_marker)]
+ break
+ elif line.endswith('#EOF\r\n'):
+ result += line[:-len('#EOF\r\n')]
+ self.logger.warning('Got a CRLF-terminated #EOF - this is a driver bug.')
+ break
+
+ result += line
+
+ return result
+
+ def _read_image_block(self, deadline=None):
+ """Tries to read an image block (as a binary png) before the `deadline`.
+ """
+ content_length_line = _read_line(self.stdout_queue, deadline, "utf-8", logger=self.logger)
+ assert content_length_line.startswith("Content-Length:")
+ content_length = int(content_length_line[15:])
+
+ result = bytearray()
+
+ while True:
+ line = _read_line(self.stdout_queue, deadline, raise_crash=False, logger=self.logger)
+ excess = len(line) + len(result) - content_length
+
+ if excess > 0:
+ # This is the line that contains the EOF marker.
+ assert excess == len(self.eof_marker)
+ result += line[:-excess]
+ break
+
+ result += line
+
+ return result
+
+
+class WKTRErrorsPart(ProtocolPart):
+ """This protocol part is responsible for collecting the errors reported by wktr.
+ """
+ name = "wktr_errors"
+
+ def __init__(self, parent):
+ super().__init__(parent)
+ self.stderr_queue = parent.browser.stderr_queue
+
+ def read_errors(self):
+ """Reads the entire content of the stderr queue as is available right now (no blocking).
+ """
+ result = ""
+
+ while not self.stderr_queue.empty():
+ # There is no potential for race conditions here because this is the only place
+ # where we read from the stderr queue.
+ result += _read_line(self.stderr_queue, None, "utf-8", "replace", False, logger=self.logger)
+
+ return result
+
+
+class WKTRProtocol(Protocol):
+ implements = [WKTRTestPart, WKTRErrorsPart]
+
+ def connect(self):
+ pass
+
+ def after_connect(self):
+ pass
+
+ def teardown(self):
+ # Close the queue properly to avoid broken pipe spam in the log.
+ self.browser.stdin_queue.close()
+ self.browser.stdin_queue.join_thread()
+
+ def is_alive(self):
+ """Checks if wktr is alive by determining if the IO pipes are still
+ open. This does not guarantee that the process is responsive.
+ """
+ return self.browser.io_stopped.is_set()
+
+
+def _convert_exception(test, exception, errors):
+ """Converts our TimeoutError and CrashError exceptions into test results.
+ """
+ if isinstance(exception, TimeoutError):
+ return (test.make_result("EXTERNAL-TIMEOUT", errors), [])
+ if isinstance(exception, CrashError):
+ return (test.make_result("CRASH", errors), [])
+ raise exception
+
+
+class WKTRRefTestExecutor(RefTestExecutor):
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
+ debug_info=None, reftest_screenshot="unexpected", **kwargs):
+ super().__init__(logger, browser, server_config, timeout_multiplier, screenshot_cache,
+ debug_info, reftest_screenshot, **kwargs)
+ self.implementation = RefTestImplementation(self)
+ self.protocol = WKTRProtocol(self, browser)
+
+ def reset(self):
+ self.implementation.reset()
+
+ def do_test(self, test):
+ try:
+ result = self.implementation.run_test(test)
+ self.protocol.wktr_errors.read_errors()
+ return self.convert_result(test, result)
+ except BaseException as exception:
+ return _convert_exception(test, exception, self.protocol.wktr_errors.read_errors())
+
+ def screenshot(self, test, viewport_size, dpi, page_ranges):
+ assert dpi is None
+ command = self.test_url(test)
+ command += "'--pixel-test'"
+ assert not self.is_print
+ _, image = self.protocol.wktr_test.do_test(
+ command, test.timeout * self.timeout_multiplier)
+
+ if not image:
+ return False, ("ERROR", self.protocol.wktr_errors.read_errors())
+
+ return True, b64encode(image).decode()
+
+ def wait(self):
+ return
+
+
+class WKTRCrashtestExecutor(CrashtestExecutor):
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1, debug_info=None,
+ **kwargs):
+ super().__init__(logger, browser, server_config, timeout_multiplier, debug_info, **kwargs)
+ self.protocol = WKTRProtocol(self, browser)
+
+ def do_test(self, test):
+ try:
+ _ = self.protocol.wktr_test.do_test(self.test_url(test), test.timeout * self.timeout_multiplier)
+ self.protocol.wktr_errors.read_errors()
+ return self.convert_result(test, {"status": "PASS", "message": None})
+ except BaseException as exception:
+ return _convert_exception(test, exception, self.protocol.wktr_errors.read_errors())
+
+ def wait(self):
+ return
+
+
+class WKTRTestharnessExecutor(TestharnessExecutor):
+ def __init__(self, logger, browser, server_config, timeout_multiplier=1, debug_info=None,
+ **kwargs):
+ super().__init__(logger, browser, server_config, timeout_multiplier, debug_info, **kwargs)
+ self.protocol = WKTRProtocol(self, browser)
+
+ def do_test(self, test):
+ try:
+ text, _ = self.protocol.wktr_test.do_test(self.test_url(test),
+ test.timeout * self.timeout_multiplier)
+
+ errors = self.protocol.wktr_errors.read_errors()
+ if not text:
+ return (test.make_result("ERROR", errors), [])
+
+ output = None
+ output_prefix = "CONSOLE MESSAGE: WPTRUNNER OUTPUT:"
+
+ for line in text.split("\n"):
+ if line.startswith(output_prefix):
+ if output is None:
+ output = line[len(output_prefix):]
+ else:
+ return (test.make_result("ERROR", "multiple wptrunner outputs"), [])
+
+ if output is None:
+ return (test.make_result("ERROR", "no wptrunner output"), [])
+
+ return self.convert_result(test, json.loads(output))
+ except BaseException as exception:
+ return _convert_exception(test, exception, self.protocol.wktr_errors.read_errors())
+
+ def wait(self):
+ return
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/process.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/process.py
new file mode 100644
index 0000000000..4a2c01372e
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/process.py
@@ -0,0 +1,22 @@
+# mypy: allow-untyped-defs
+
+from .base import TestExecutor
+
+
+class ProcessTestExecutor(TestExecutor):
+ def __init__(self, *args, **kwargs):
+ TestExecutor.__init__(self, *args, **kwargs)
+ self.binary = self.browser.binary
+ self.interactive = (False if self.debug_info is None
+ else self.debug_info.interactive)
+
+ def setup(self, runner):
+ self.runner = runner
+ self.runner.send_message("init_succeeded")
+ return True
+
+ def is_alive(self):
+ return True
+
+ def do_test(self, test):
+ raise NotImplementedError
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/protocol.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/protocol.py
new file mode 100644
index 0000000000..e44d1a7666
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/protocol.py
@@ -0,0 +1,804 @@
+# mypy: allow-untyped-defs
+
+import traceback
+from http.client import HTTPConnection
+
+from abc import ABCMeta, abstractmethod
+from typing import ClassVar, List, Type
+
+
+def merge_dicts(target, source):
+ if not (isinstance(target, dict) and isinstance(source, dict)):
+ raise TypeError
+ for (key, source_value) in source.items():
+ if key not in target:
+ target[key] = source_value
+ else:
+ if isinstance(source_value, dict) and isinstance(target[key], dict):
+ merge_dicts(target[key], source_value)
+ else:
+ target[key] = source_value
+
+class Protocol:
+ """Backend for a specific browser-control protocol.
+
+ Each Protocol is composed of a set of ProtocolParts that implement
+ the APIs required for specific interactions. This reflects the fact
+ that not all implementaions will support exactly the same feature set.
+ Each ProtocolPart is exposed directly on the protocol through an accessor
+ attribute with a name given by its `name` property.
+
+ :param Executor executor: The Executor instance that's using this Protocol
+ :param Browser browser: The Browser using this protocol"""
+ __metaclass__ = ABCMeta
+
+ implements: ClassVar[List[Type["ProtocolPart"]]] = []
+
+ def __init__(self, executor, browser):
+ self.executor = executor
+ self.browser = browser
+
+ for cls in self.implements:
+ name = cls.name
+ assert not hasattr(self, name)
+ setattr(self, name, cls(self))
+
+ @property
+ def logger(self):
+ """:returns: Current logger"""
+ return self.executor.logger
+
+ def is_alive(self):
+ """Is the browser connection still active
+
+ :returns: A boolean indicating whether the connection is still active."""
+ return True
+
+ def setup(self, runner):
+ """Handle protocol setup, and send a message to the runner to indicate
+ success or failure."""
+ msg = None
+ try:
+ msg = "Failed to start protocol connection"
+ self.connect()
+
+ msg = None
+
+ for cls in self.implements:
+ getattr(self, cls.name).setup()
+
+ msg = "Post-connection steps failed"
+ self.after_connect()
+ except Exception:
+ message = "Protocol.setup caught an exception:\n"
+ message += f"{msg}\n" if msg is not None else ""
+ message += traceback.format_exc()
+ self.logger.warning(message)
+ raise
+
+ @abstractmethod
+ def connect(self):
+ """Make a connection to the remote browser"""
+ pass
+
+ @abstractmethod
+ def after_connect(self):
+ """Run any post-connection steps. This happens after the ProtocolParts are
+ initalized so can depend on a fully-populated object."""
+ pass
+
+ def teardown(self):
+ """Run cleanup steps after the tests are finished."""
+ for cls in self.implements:
+ getattr(self, cls.name).teardown()
+
+
+class ProtocolPart:
+ """Base class for all ProtocolParts.
+
+ :param Protocol parent: The parent protocol"""
+ __metaclass__ = ABCMeta
+
+ name: ClassVar[str]
+
+ def __init__(self, parent):
+ self.parent = parent
+
+ @property
+ def logger(self):
+ """:returns: Current logger"""
+ return self.parent.logger
+
+ def setup(self):
+ """Run any setup steps required for the ProtocolPart."""
+ pass
+
+ def teardown(self):
+ """Run any teardown steps required for the ProtocolPart."""
+ pass
+
+
+class BaseProtocolPart(ProtocolPart):
+ """Generic bits of protocol that are required for multiple test types"""
+ __metaclass__ = ABCMeta
+
+ name = "base"
+
+ @abstractmethod
+ def execute_script(self, script, asynchronous=False):
+ """Execute javascript in the current Window.
+
+ :param str script: The js source to execute. This is implicitly wrapped in a function.
+ :param bool asynchronous: Whether the script is asynchronous in the webdriver
+ sense i.e. whether the return value is the result of
+ the initial function call or if it waits for some callback.
+ :returns: The result of the script execution.
+ """
+ pass
+
+ @abstractmethod
+ def set_timeout(self, timeout):
+ """Set the timeout for script execution.
+
+ :param timeout: Script timeout in seconds"""
+ pass
+
+ @abstractmethod
+ def wait(self):
+ """Wait indefinitely for the browser to close.
+
+ :returns: True to re-run the test, or False to continue with the next test"""
+ pass
+
+ @property
+ def current_window(self):
+ """Return a handle identifying the current top level browsing context
+
+ :returns: A protocol-specific handle"""
+ pass
+
+ @abstractmethod
+ def set_window(self, handle):
+ """Set the top level browsing context to one specified by a given handle.
+
+ :param handle: A protocol-specific handle identifying a top level browsing
+ context."""
+ pass
+
+ @abstractmethod
+ def window_handles(self):
+ """Get a list of handles to top-level browsing contexts"""
+ pass
+
+ @abstractmethod
+ def load(self, url):
+ """Load a url in the current browsing context
+
+ :param url: The url to load"""
+ pass
+
+
+class TestharnessProtocolPart(ProtocolPart):
+ """Protocol part required to run testharness tests."""
+ __metaclass__ = ABCMeta
+
+ name = "testharness"
+
+ @abstractmethod
+ def load_runner(self, url_protocol):
+ """Load the initial page used to control the tests.
+
+ :param str url_protocol: "https" or "http" depending on the test metadata.
+ """
+ pass
+
+ @abstractmethod
+ def close_old_windows(self, url_protocol):
+ """Close existing windows except for the initial runner window.
+ After calling this method there must be exactly one open window that
+ contains the initial runner page.
+
+ :param str url_protocol: "https" or "http" depending on the test metadata.
+ """
+ pass
+
+ @abstractmethod
+ def get_test_window(self, window_id, parent):
+ """Get the window handle dorresponding to the window containing the
+ currently active test.
+
+ :param window_id: A string containing the DOM name of the Window that
+ contains the test, or None.
+ :param parent: The handle of the runner window.
+ :returns: A protocol-specific window handle.
+ """
+ pass
+
+ @abstractmethod
+ def test_window_loaded(self):
+ """Wait until the newly opened test window has been loaded."""
+
+
+class PrefsProtocolPart(ProtocolPart):
+ """Protocol part that allows getting and setting browser prefs."""
+ __metaclass__ = ABCMeta
+
+ name = "prefs"
+
+ @abstractmethod
+ def set(self, name, value):
+ """Set the named pref to value.
+
+ :param name: A pref name of browser-specific type
+ :param value: A pref value of browser-specific type"""
+ pass
+
+ @abstractmethod
+ def get(self, name):
+ """Get the current value of a named pref
+
+ :param name: A pref name of browser-specific type
+ :returns: A pref value of browser-specific type"""
+ pass
+
+ @abstractmethod
+ def clear(self, name):
+ """Reset the value of a named pref back to the default.
+
+ :param name: A pref name of browser-specific type"""
+ pass
+
+
+class StorageProtocolPart(ProtocolPart):
+ """Protocol part for manipulating browser storage."""
+ __metaclass__ = ABCMeta
+
+ name = "storage"
+
+ @abstractmethod
+ def clear_origin(self, url):
+ """Clear all the storage for a specified origin.
+
+ :param url: A url belonging to the origin"""
+ pass
+
+
+class SelectorProtocolPart(ProtocolPart):
+ """Protocol part for selecting elements on the page."""
+ __metaclass__ = ABCMeta
+
+ name = "select"
+
+ def element_by_selector(self, element_selector):
+ elements = self.elements_by_selector(element_selector)
+ if len(elements) == 0:
+ raise ValueError(f"Selector '{element_selector}' matches no elements")
+ elif len(elements) > 1:
+ raise ValueError(f"Selector '{element_selector}' matches multiple elements")
+ return elements[0]
+
+ @abstractmethod
+ def elements_by_selector(self, selector):
+ """Select elements matching a CSS selector
+
+ :param str selector: The CSS selector
+ :returns: A list of protocol-specific handles to elements"""
+ pass
+
+
+class ClickProtocolPart(ProtocolPart):
+ """Protocol part for performing trusted clicks"""
+ __metaclass__ = ABCMeta
+
+ name = "click"
+
+ @abstractmethod
+ def element(self, element):
+ """Perform a trusted click somewhere on a specific element.
+
+ :param element: A protocol-specific handle to an element."""
+ pass
+
+
+
+class AccessibilityProtocolPart(ProtocolPart):
+ """Protocol part for accessibility introspection"""
+ __metaclass__ = ABCMeta
+
+ name = "accessibility"
+
+ @abstractmethod
+ def get_computed_label(self, element):
+ """Return the computed accessibility label for a specific element.
+
+ :param element: A protocol-specific handle to an element."""
+ pass
+
+ def get_computed_role(self, element):
+ """Return the computed accessibility role for a specific element.
+
+ :param element: A protocol-specific handle to an element."""
+ pass
+
+
+class CookiesProtocolPart(ProtocolPart):
+ """Protocol part for managing cookies"""
+ __metaclass__ = ABCMeta
+
+ name = "cookies"
+
+ @abstractmethod
+ def delete_all_cookies(self):
+ """Delete all cookies."""
+ pass
+
+ @abstractmethod
+ def get_all_cookies(self):
+ """Get all cookies."""
+ pass
+
+ @abstractmethod
+ def get_named_cookie(self, name):
+ """Get named cookie.
+
+ :param name: The name of the cookie to get."""
+ pass
+
+
+class SendKeysProtocolPart(ProtocolPart):
+ """Protocol part for performing trusted clicks"""
+ __metaclass__ = ABCMeta
+
+ name = "send_keys"
+
+ @abstractmethod
+ def send_keys(self, element, keys):
+ """Send keys to a specific element.
+
+ :param element: A protocol-specific handle to an element.
+ :param keys: A protocol-specific handle to a string of input keys."""
+ pass
+
+class WindowProtocolPart(ProtocolPart):
+ """Protocol part for manipulating the window"""
+ __metaclass__ = ABCMeta
+
+ name = "window"
+
+ @abstractmethod
+ def set_rect(self, rect):
+ """Restores the window to the given rect."""
+ pass
+
+ @abstractmethod
+ def get_rect(self):
+ """Gets the current window rect."""
+ pass
+
+ @abstractmethod
+ def minimize(self):
+ """Minimizes the window and returns the previous rect."""
+ pass
+
+class GenerateTestReportProtocolPart(ProtocolPart):
+ """Protocol part for generating test reports"""
+ __metaclass__ = ABCMeta
+
+ name = "generate_test_report"
+
+ @abstractmethod
+ def generate_test_report(self, message):
+ """Generate a test report.
+
+ :param message: The message to be contained in the report."""
+ pass
+
+
+class SetPermissionProtocolPart(ProtocolPart):
+ """Protocol part for setting permissions"""
+ __metaclass__ = ABCMeta
+
+ name = "set_permission"
+
+ @abstractmethod
+ def set_permission(self, descriptor, state):
+ """Set permission state.
+
+ :param descriptor: A PermissionDescriptor object.
+ :param state: The state to set the permission to."""
+ pass
+
+
+class ActionSequenceProtocolPart(ProtocolPart):
+ """Protocol part for performing trusted clicks"""
+ __metaclass__ = ABCMeta
+
+ name = "action_sequence"
+
+ @abstractmethod
+ def send_actions(self, actions):
+ """Send a sequence of actions to the window.
+
+ :param actions: A protocol-specific handle to an array of actions."""
+ pass
+
+ def release(self):
+ pass
+
+
+class TestDriverProtocolPart(ProtocolPart):
+ """Protocol part that implements the basic functionality required for
+ all testdriver-based tests."""
+ __metaclass__ = ABCMeta
+
+ name = "testdriver"
+
+ @abstractmethod
+ def send_message(self, cmd_id, message_type, status, message=None):
+ """Send a testdriver message to the browser.
+
+ :param int cmd_id: The id of the command to which we're responding
+ :param str message_type: The kind of the message.
+ :param str status: Either "failure" or "success" depending on whether the
+ previous command succeeded.
+ :param str message: Additional data to add to the message."""
+ pass
+
+ def switch_to_window(self, wptrunner_id, initial_window=None):
+ """Switch to a window given a wptrunner window id
+
+ :param str wptrunner_id: Testdriver-specific id for the target window
+ :param str initial_window: WebDriver window id for the test window"""
+ if wptrunner_id is None:
+ return
+
+ if initial_window is None:
+ initial_window = self.parent.base.current_window
+
+ stack = [str(item) for item in self.parent.base.window_handles()]
+ first = True
+ while stack:
+ item = stack.pop()
+
+ if item is None:
+ assert first is False
+ self._switch_to_parent_frame()
+ continue
+
+ if isinstance(item, str):
+ if not first or item != initial_window:
+ self.parent.base.set_window(item)
+ first = False
+ else:
+ assert first is False
+ try:
+ self._switch_to_frame(item)
+ except ValueError:
+ # The frame no longer exists, or doesn't have a nested browsing context, so continue
+ continue
+
+ try:
+ # Get the window id and a list of elements containing nested browsing contexts.
+ # For embed we can't tell fpr sure if there's a nested browsing context, so always return it
+ # and fail later if there isn't
+ result = self.parent.base.execute_script("""
+ let contextParents = Array.from(document.querySelectorAll("frame, iframe, embed, object"))
+ .filter(elem => elem.localName !== "embed" ? (elem.contentWindow !== null) : true);
+ return [window.__wptrunner_id, contextParents]""")
+ except Exception:
+ continue
+
+ if result is None:
+ # With marionette at least this is possible if the content process crashed. Not quite
+ # sure how we want to handle that case.
+ continue
+
+ handle_window_id, nested_context_containers = result
+
+ if handle_window_id and str(handle_window_id) == wptrunner_id:
+ return
+
+ for elem in reversed(nested_context_containers):
+ # None here makes us switch back to the parent after we've processed the frame
+ stack.append(None)
+ stack.append(elem)
+
+ raise Exception("Window with id %s not found" % wptrunner_id)
+
+ @abstractmethod
+ def _switch_to_frame(self, index_or_elem):
+ """Switch to a frame in the current window
+
+ :param int index_or_elem: Frame id or container element"""
+ pass
+
+ @abstractmethod
+ def _switch_to_parent_frame(self):
+ """Switch to the parent of the current frame"""
+ pass
+
+
+class AssertsProtocolPart(ProtocolPart):
+ """ProtocolPart that implements the functionality required to get a count of non-fatal
+ assertions triggered"""
+ __metaclass__ = ABCMeta
+
+ name = "asserts"
+
+ @abstractmethod
+ def get(self):
+ """Get a count of assertions since the last browser start"""
+ pass
+
+
+class CoverageProtocolPart(ProtocolPart):
+ """Protocol part for collecting per-test coverage data."""
+ __metaclass__ = ABCMeta
+
+ name = "coverage"
+
+ @abstractmethod
+ def reset(self):
+ """Reset coverage counters"""
+ pass
+
+ @abstractmethod
+ def dump(self):
+ """Dump coverage counters"""
+ pass
+
+
+class VirtualAuthenticatorProtocolPart(ProtocolPart):
+ """Protocol part for creating and manipulating virtual authenticators"""
+ __metaclass__ = ABCMeta
+
+ name = "virtual_authenticator"
+
+ @abstractmethod
+ def add_virtual_authenticator(self, config):
+ """Add a virtual authenticator
+
+ :param config: The Authenticator Configuration"""
+ pass
+
+ @abstractmethod
+ def remove_virtual_authenticator(self, authenticator_id):
+ """Remove a virtual authenticator
+
+ :param str authenticator_id: The ID of the authenticator to remove"""
+ pass
+
+ @abstractmethod
+ def add_credential(self, authenticator_id, credential):
+ """Inject a credential onto an authenticator
+
+ :param str authenticator_id: The ID of the authenticator to add the credential to
+ :param credential: The credential to inject"""
+ pass
+
+ @abstractmethod
+ def get_credentials(self, authenticator_id):
+ """Get the credentials stored in an authenticator
+
+ :param str authenticator_id: The ID of the authenticator
+ :returns: An array with the credentials stored on the authenticator"""
+ pass
+
+ @abstractmethod
+ def remove_credential(self, authenticator_id, credential_id):
+ """Remove a credential stored in an authenticator
+
+ :param str authenticator_id: The ID of the authenticator
+ :param str credential_id: The ID of the credential"""
+ pass
+
+ @abstractmethod
+ def remove_all_credentials(self, authenticator_id):
+ """Remove all the credentials stored in an authenticator
+
+ :param str authenticator_id: The ID of the authenticator"""
+ pass
+
+ @abstractmethod
+ def set_user_verified(self, authenticator_id, uv):
+ """Sets the user verified flag on an authenticator
+
+ :param str authenticator_id: The ID of the authenticator
+ :param bool uv: the user verified flag"""
+ pass
+
+
+class SPCTransactionsProtocolPart(ProtocolPart):
+ """Protocol part for Secure Payment Confirmation transactions"""
+ __metaclass__ = ABCMeta
+
+ name = "spc_transactions"
+
+ @abstractmethod
+ def set_spc_transaction_mode(self, mode):
+ """Set the SPC transaction automation mode
+
+ :param str mode: The automation mode to set"""
+ pass
+
+class RPHRegistrationsProtocolPart(ProtocolPart):
+ """Protocol part for Custom Handlers registrations"""
+ __metaclass__ = ABCMeta
+
+ name = "rph_registrations"
+
+ @abstractmethod
+ def set_rph_registration_mode(self, mode):
+ """Set the RPH registration automation mode
+
+ :param str mode: The automation mode to set"""
+ pass
+
+class FedCMProtocolPart(ProtocolPart):
+ """Protocol part for Federated Credential Management"""
+ __metaclass__ = ABCMeta
+
+ name = "fedcm"
+
+ @abstractmethod
+ def cancel_fedcm_dialog(self):
+ """Cancel the FedCM dialog"""
+ pass
+
+ @abstractmethod
+ def click_fedcm_dialog_button(self, dialog_button):
+ """Click a button on the FedCM dialog
+
+ :param str dialog_button: The dialog button to click"""
+ pass
+
+ @abstractmethod
+ def select_fedcm_account(self, account_index):
+ """Select a FedCM account
+
+ :param int account_index: The index of the account to select"""
+ pass
+
+ @abstractmethod
+ def get_fedcm_account_list(self):
+ """Get the FedCM account list"""
+ pass
+
+ @abstractmethod
+ def get_fedcm_dialog_title(self):
+ """Get the FedCM dialog title"""
+ pass
+
+ @abstractmethod
+ def get_fedcm_dialog_type(self):
+ """Get the FedCM dialog type"""
+ pass
+
+ @abstractmethod
+ def set_fedcm_delay_enabled(self, enabled):
+ """Sets the FedCM delay as enabled or disabled
+
+ :param bool enabled: The delay to set"""
+ pass
+
+ @abstractmethod
+ def reset_fedcm_cooldown(self):
+ """Set the FedCM cooldown"""
+ pass
+
+
+class PrintProtocolPart(ProtocolPart):
+ """Protocol part for rendering to a PDF."""
+ __metaclass__ = ABCMeta
+
+ name = "pdf_print"
+
+ @abstractmethod
+ def render_as_pdf(self, width, height):
+ """Output document as PDF"""
+ pass
+
+
+class DebugProtocolPart(ProtocolPart):
+ """Protocol part for debugging test failures."""
+ __metaclass__ = ABCMeta
+
+ name = "debug"
+
+ @abstractmethod
+ def load_devtools(self):
+ """Load devtools in the current window"""
+ pass
+
+ def load_reftest_analyzer(self, test, result):
+ import io
+ import mozlog
+ from urllib.parse import quote, urljoin
+
+ debug_test_logger = mozlog.structuredlog.StructuredLogger("debug_test")
+ output = io.StringIO()
+ debug_test_logger.suite_start([])
+ debug_test_logger.add_handler(mozlog.handlers.StreamHandler(output, formatter=mozlog.formatters.TbplFormatter()))
+ debug_test_logger.test_start(test.id)
+ # Always use PASS as the expected value so we get output even for expected failures
+ debug_test_logger.test_end(test.id, result["status"], "PASS", extra=result.get("extra"))
+
+ self.parent.base.load(urljoin(self.parent.executor.server_url("https"),
+ "/common/third_party/reftest-analyzer.xhtml#log=%s" %
+ quote(output.getvalue())))
+
+
+class ConnectionlessBaseProtocolPart(BaseProtocolPart):
+ def load(self, url):
+ pass
+
+ def execute_script(self, script, asynchronous=False):
+ pass
+
+ def set_timeout(self, timeout):
+ pass
+
+ def wait(self):
+ return False
+
+ def set_window(self, handle):
+ pass
+
+ def window_handles(self):
+ return []
+
+
+class ConnectionlessProtocol(Protocol):
+ implements = [ConnectionlessBaseProtocolPart]
+
+ def connect(self):
+ pass
+
+ def after_connect(self):
+ pass
+
+
+class WdspecProtocol(ConnectionlessProtocol):
+ implements = [ConnectionlessBaseProtocolPart]
+
+ def __init__(self, executor, browser):
+ super().__init__(executor, browser)
+
+ def is_alive(self):
+ """Test that the connection is still alive.
+
+ Because the remote communication happens over HTTP we need to
+ make an explicit request to the remote. It is allowed for
+ WebDriver spec tests to not have a WebDriver session, since this
+ may be what is tested.
+
+ An HTTP request to an invalid path that results in a 404 is
+ proof enough to us that the server is alive and kicking.
+ """
+ conn = HTTPConnection(self.browser.host, self.browser.port)
+ conn.request("HEAD", "/invalid")
+ res = conn.getresponse()
+ return res.status == 404
+
+
+class VirtualSensorProtocolPart(ProtocolPart):
+ """Protocol part for Sensors"""
+ __metaclass__ = ABCMeta
+
+ name = "virtual_sensor"
+
+ @abstractmethod
+ def create_virtual_sensor(self, sensor_type, sensor_params):
+ pass
+
+ @abstractmethod
+ def update_virtual_sensor(self, sensor_type, reading):
+ pass
+
+ @abstractmethod
+ def remove_virtual_sensor(self, sensor_type):
+ pass
+
+ @abstractmethod
+ def get_virtual_sensor_information(self, sensor_type):
+ pass
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/pytestrunner/__init__.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/pytestrunner/__init__.py
new file mode 100644
index 0000000000..1baaf9573a
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/pytestrunner/__init__.py
@@ -0,0 +1 @@
+from .runner import run # noqa: F401
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/pytestrunner/runner.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/pytestrunner/runner.py
new file mode 100644
index 0000000000..df7b7c4013
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/pytestrunner/runner.py
@@ -0,0 +1,179 @@
+# mypy: allow-untyped-defs
+
+"""
+Provides interface to deal with pytest.
+
+Usage::
+
+ session = webdriver.client.Session("127.0.0.1", "4444", "/")
+ harness_result = ("OK", None)
+ subtest_results = pytestrunner.run("/path/to/test", session.url)
+ return (harness_result, subtest_results)
+"""
+
+import errno
+import json
+import os
+import shutil
+import tempfile
+from collections import OrderedDict
+
+
+pytest = None
+
+
+def do_delayed_imports():
+ global pytest
+ import pytest
+
+
+def run(path, server_config, session_config, timeout=0):
+ """
+ Run Python test at ``path`` in pytest. The provided ``session``
+ is exposed as a fixture available in the scope of the test functions.
+
+ :param path: Path to the test file.
+ :param session_config: dictionary of host, port,capabilities parameters
+ to pass through to the webdriver session
+ :param timeout: Duration before interrupting potentially hanging
+ tests. If 0, there is no timeout.
+
+ :returns: (<harness result>, [<subtest result>, ...]),
+ where <subtest result> is (test id, status, message, stacktrace).
+ """
+ if pytest is None:
+ do_delayed_imports()
+
+ old_environ = os.environ.copy()
+ try:
+ with TemporaryDirectory() as cache:
+ config_path = os.path.join(cache, "wd_config.json")
+ os.environ["WDSPEC_CONFIG_FILE"] = config_path
+
+ config = session_config.copy()
+ config["wptserve"] = server_config.as_dict()
+
+ with open(config_path, "w") as f:
+ json.dump(config, f)
+
+ harness = HarnessResultRecorder()
+ subtests = SubtestResultRecorder()
+
+ try:
+ basetemp = os.path.join(cache, "pytest")
+ pytest.main(
+ [
+ "--strict-markers", # turn function marker warnings into errors
+ "-vv", # show each individual subtest and full failure logs
+ "--capture",
+ "no", # enable stdout/stderr from tests
+ "--basetemp",
+ basetemp, # temporary directory
+ "--showlocals", # display contents of variables in local scope
+ "-p",
+ "no:mozlog", # use the WPT result recorder
+ "-p",
+ "no:cacheprovider", # disable state preservation across invocations
+ "-o=console_output_style=classic", # disable test progress bar
+ path,
+ ],
+ plugins=[harness, subtests],
+ )
+ except Exception as e:
+ harness.outcome = ("INTERNAL-ERROR", str(e))
+
+ finally:
+ os.environ = old_environ
+
+ subtests_results = [(key,) + value for (key, value) in subtests.results.items()]
+ return (harness.outcome, subtests_results)
+
+
+class HarnessResultRecorder:
+ outcomes = {
+ "failed": "ERROR",
+ "passed": "OK",
+ "skipped": "SKIP",
+ }
+
+ def __init__(self):
+ # we are ok unless told otherwise
+ self.outcome = ("OK", None)
+
+ def pytest_collectreport(self, report):
+ harness_result = self.outcomes[report.outcome]
+ self.outcome = (harness_result, None)
+
+
+class SubtestResultRecorder:
+ def __init__(self):
+ self.results = OrderedDict()
+
+ def pytest_runtest_logreport(self, report):
+ if report.passed and report.when == "call":
+ self.record_pass(report)
+ elif report.failed:
+ # pytest outputs the stacktrace followed by an error message prefixed
+ # with "E ", e.g.
+ #
+ # def test_example():
+ # > assert "fuu" in "foobar"
+ # > E AssertionError: assert 'fuu' in 'foobar'
+ message = ""
+ for line in report.longreprtext.splitlines():
+ if line.startswith("E "):
+ message = line[1:].strip()
+ break
+
+ if report.when != "call":
+ self.record_error(report, message)
+ else:
+ self.record_fail(report, message)
+ elif report.skipped:
+ self.record_skip(report)
+
+ def record_pass(self, report):
+ self.record(report.nodeid, "PASS")
+
+ def record_fail(self, report, message):
+ self.record(report.nodeid, "FAIL", message=message, stack=report.longrepr)
+
+ def record_error(self, report, message):
+ # error in setup/teardown
+ message = f"{report.when} error: {message}"
+ self.record(report.nodeid, "ERROR", message, report.longrepr)
+
+ def record_skip(self, report):
+ self.record(
+ report.nodeid,
+ "ERROR",
+ "In-test skip decorators are disallowed, "
+ "please use WPT metadata to ignore tests.",
+ )
+
+ def record(self, test, status, message=None, stack=None):
+ if stack is not None:
+ stack = str(stack)
+ # Ensure we get a single result per subtest; pytest will sometimes
+ # call pytest_runtest_logreport more than once per test e.g. if
+ # it fails and then there's an error during teardown.
+ subtest_id = test.split("::")[-1]
+ if subtest_id in self.results and status == "PASS":
+ # This shouldn't happen, but never overwrite an existing result with PASS
+ return
+ new_result = (status, message, stack)
+ self.results[subtest_id] = new_result
+
+
+class TemporaryDirectory:
+ def __enter__(self):
+ self.path = tempfile.mkdtemp(prefix="wdspec-")
+ return self.path
+
+ def __exit__(self, *args):
+ try:
+ shutil.rmtree(self.path)
+ except OSError as e:
+ # no such file or directory
+ if e.errno != errno.ENOENT:
+ raise
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/reftest.js b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/reftest.js
new file mode 100644
index 0000000000..1ba98c686f
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/reftest.js
@@ -0,0 +1 @@
+var win = window.open("about:blank", "test", "left=0,top=0,width=800,height=600");
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/runner.js b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/runner.js
new file mode 100644
index 0000000000..171e6febd9
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/runner.js
@@ -0,0 +1 @@
+document.title = '%(title)s';
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/test-wait.js b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/test-wait.js
new file mode 100644
index 0000000000..ad08ad7d76
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/test-wait.js
@@ -0,0 +1,55 @@
+var callback = arguments[arguments.length - 1];
+var observer = null;
+var root = document.documentElement;
+
+function wait_load() {
+ if (Document.prototype.hasOwnProperty("fonts")) {
+ document.fonts.ready.then(wait_paints);
+ } else {
+ // This might take the screenshot too early, depending on whether the
+ // load event is blocked on fonts being loaded. See:
+ // https://github.com/w3c/csswg-drafts/issues/1088
+ wait_paints();
+ }
+}
+
+
+function wait_paints() {
+ // As of 2017-04-05, the Chromium web browser exhibits a rendering bug
+ // (https://bugs.chromium.org/p/chromium/issues/detail?id=708757) that
+ // produces instability during screen capture. The following use of
+ // `requestAnimationFrame` is intended as a short-term workaround, though
+ // it is not guaranteed to resolve the issue.
+ //
+ // For further detail, see:
+ // https://github.com/jugglinmike/chrome-screenshot-race/issues/1
+
+ requestAnimationFrame(function() {
+ requestAnimationFrame(function() {
+ screenshot_if_ready();
+ });
+ });
+}
+
+function screenshot_if_ready() {
+ if (root &&
+ root.classList.contains("%(classname)s") &&
+ observer === null) {
+ observer = new MutationObserver(wait_paints);
+ observer.observe(root, {attributes: true});
+ var event = new Event("TestRendered", {bubbles: true});
+ root.dispatchEvent(event);
+ return;
+ }
+ if (observer !== null) {
+ observer.disconnect();
+ }
+ callback();
+}
+
+
+if (document.readyState != "complete") {
+ addEventListener('load', wait_load);
+} else {
+ wait_load();
+}
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/testharness_servodriver.js b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/testharness_servodriver.js
new file mode 100644
index 0000000000..d731cc04d7
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/testharness_servodriver.js
@@ -0,0 +1,2 @@
+window.__wd_results_callback__ = arguments[arguments.length - 1];
+window.__wd_results_timer__ = setTimeout(timeout, %(timeout)s);
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/testharness_webdriver_resume.js b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/testharness_webdriver_resume.js
new file mode 100644
index 0000000000..4e7b63090e
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/testharness_webdriver_resume.js
@@ -0,0 +1,5 @@
+// We have to set the url here to ensure we get the same escaping as in the harness
+// and also to handle the case where the test changes the fragment
+window.__wptrunner_url = arguments[0];
+window.__wptrunner_testdriver_callback = arguments[arguments.length - 1];
+window.__wptrunner_process_next_event();
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/window-loaded.js b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/window-loaded.js
new file mode 100644
index 0000000000..78d73285a4
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/window-loaded.js
@@ -0,0 +1,9 @@
+const [resolve] = arguments;
+
+if (document.readyState != "complete") {
+ window.addEventListener("load", () => {
+ resolve();
+ }, { once: true });
+} else {
+ resolve();
+}
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/expected.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/expected.py
new file mode 100644
index 0000000000..72607ea25f
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/expected.py
@@ -0,0 +1,16 @@
+# mypy: allow-untyped-defs
+
+import os
+
+
+def expected_path(metadata_path, test_path):
+ """Path to the expectation data file for a given test path.
+
+ This is defined as metadata_path + relative_test_path + .ini
+
+ :param metadata_path: Path to the root of the metadata directory
+ :param test_path: Relative path to the test file from the test root
+ """
+ args = list(test_path.split("/"))
+ args[-1] += ".ini"
+ return os.path.join(metadata_path, *args)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/expectedtree.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/expectedtree.py
new file mode 100644
index 0000000000..88cf40ad94
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/expectedtree.py
@@ -0,0 +1,132 @@
+# mypy: allow-untyped-defs
+
+from math import log
+from collections import defaultdict
+
+class Node:
+ def __init__(self, prop, value):
+ self.prop = prop
+ self.value = value
+ self.parent = None
+
+ self.children = set()
+
+ # Populated for leaf nodes
+ self.run_info = set()
+ self.result_values = defaultdict(int)
+
+ def add(self, node):
+ self.children.add(node)
+ node.parent = self
+
+ def __iter__(self):
+ yield self
+ for node in self.children:
+ yield from node
+
+ def __len__(self):
+ return 1 + sum(len(item) for item in self.children)
+
+
+def entropy(results):
+ """This is basically a measure of the uniformity of the values in results
+ based on the shannon entropy"""
+
+ result_counts = defaultdict(int)
+ total = float(len(results))
+ for values in results.values():
+ # Not sure this is right, possibly want to treat multiple values as
+ # distinct from multiple of the same value?
+ for value in values:
+ result_counts[value] += 1
+
+ entropy_sum = 0
+
+ for count in result_counts.values():
+ prop = float(count) / total
+ entropy_sum -= prop * log(prop, 2)
+
+ return entropy_sum
+
+
+def split_results(prop, results):
+ """Split a dictionary of results into a dictionary of dictionaries where
+ each sub-dictionary has a specific value of the given property"""
+ by_prop = defaultdict(dict)
+ for run_info, value in results.items():
+ by_prop[run_info[prop]][run_info] = value
+
+ return by_prop
+
+
+def build_tree(properties, dependent_props, results, tree=None):
+ """Build a decision tree mapping properties to results
+
+ :param properties: - A list of run_info properties to consider
+ in the tree
+ :param dependent_props: - A dictionary mapping property name
+ to properties that should only be considered
+ after the properties in the key. For example
+ {"os": ["version"]} means that "version" won't
+ be used until after os.
+ :param results: Dictionary mapping run_info to set of results
+ :tree: A Node object to use as the root of the (sub)tree"""
+
+ if tree is None:
+ tree = Node(None, None)
+
+ prop_index = {prop: i for i, prop in enumerate(properties)}
+
+ all_results = defaultdict(int)
+ for result_values in results.values():
+ for result_value, count in result_values.items():
+ all_results[result_value] += count
+
+ # If there is only one result we are done
+ if not properties or len(all_results) == 1:
+ for value, count in all_results.items():
+ tree.result_values[value] += count
+ tree.run_info |= set(results.keys())
+ return tree
+
+ results_partitions = []
+ remove_properties = set()
+ for prop in properties:
+ result_sets = split_results(prop, results)
+ if len(result_sets) == 1:
+ # If this property doesn't partition the space then just remove it
+ # from the set to consider
+ remove_properties.add(prop)
+ continue
+ new_entropy = 0.
+ results_sets_entropy = []
+ for prop_value, result_set in result_sets.items():
+ results_sets_entropy.append((entropy(result_set), prop_value, result_set))
+ new_entropy += (float(len(result_set)) / len(results)) * results_sets_entropy[-1][0]
+
+ results_partitions.append((new_entropy,
+ prop,
+ results_sets_entropy))
+
+ # In the case that no properties partition the space
+ if not results_partitions:
+ for value, count in all_results.items():
+ tree.result_values[value] += count
+ tree.run_info |= set(results.keys())
+ return tree
+
+ # split by the property with the highest entropy
+ results_partitions.sort(key=lambda x: (x[0], prop_index[x[1]]))
+ _, best_prop, sub_results = results_partitions[0]
+
+ # Create a new set of properties that can be used
+ new_props = properties[:prop_index[best_prop]] + properties[prop_index[best_prop] + 1:]
+ new_props.extend(dependent_props.get(best_prop, []))
+ if remove_properties:
+ new_props = [item for item in new_props if item not in remove_properties]
+
+ for _, prop_value, results_sets in sub_results:
+ node = Node(best_prop, prop_value)
+ tree.add(node)
+ build_tree(new_props, dependent_props, results_sets, node)
+ return tree
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/font.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/font.py
new file mode 100644
index 0000000000..c533d70df7
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/font.py
@@ -0,0 +1,144 @@
+# mypy: allow-untyped-defs
+
+import ctypes
+import os
+import platform
+import plistlib
+
+from shutil import copy2, rmtree
+from subprocess import call, check_output
+
+HERE = os.path.dirname(__file__)
+SYSTEM = platform.system().lower()
+
+
+class FontInstaller:
+ def __init__(self, logger, font_dir=None, **fonts):
+ self.logger = logger
+ self.font_dir = font_dir
+ self.installed_fonts = False
+ self.created_dir = False
+ self.fonts = fonts
+
+ def __call__(self, env_options=None, env_config=None):
+ return self
+
+ def __enter__(self):
+ for _, font_path in self.fonts.items():
+ font_name = font_path.split('/')[-1]
+ install = getattr(self, 'install_%s_font' % SYSTEM, None)
+ if not install:
+ self.logger.warning('Font installation not supported on %s' % SYSTEM)
+ return False
+ if install(font_name, font_path):
+ self.installed_fonts = True
+ self.logger.info('Installed font: %s' % font_name)
+ else:
+ self.logger.warning('Unable to install font: %s' % font_name)
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if not self.installed_fonts:
+ return False
+
+ for _, font_path in self.fonts.items():
+ font_name = font_path.split('/')[-1]
+ remove = getattr(self, 'remove_%s_font' % SYSTEM, None)
+ if not remove:
+ self.logger.warning('Font removal not supported on %s' % SYSTEM)
+ return False
+ if remove(font_name, font_path):
+ self.logger.info('Removed font: %s' % font_name)
+ else:
+ self.logger.warning('Unable to remove font: %s' % font_name)
+
+ def install_linux_font(self, font_name, font_path):
+ if not self.font_dir:
+ self.font_dir = os.path.join(os.path.expanduser('~'), '.fonts')
+ if not os.path.exists(self.font_dir):
+ os.makedirs(self.font_dir)
+ self.created_dir = True
+ if not os.path.exists(os.path.join(self.font_dir, font_name)):
+ copy2(font_path, self.font_dir)
+ try:
+ fc_cache_returncode = call('fc-cache')
+ return not fc_cache_returncode
+ except OSError: # If fontconfig doesn't exist, return False
+ self.logger.error('fontconfig not available on this Linux system.')
+ return False
+
+ def install_darwin_font(self, font_name, font_path):
+ if not self.font_dir:
+ self.font_dir = os.path.join(os.path.expanduser('~'),
+ 'Library/Fonts')
+ if not os.path.exists(self.font_dir):
+ os.makedirs(self.font_dir)
+ self.created_dir = True
+ installed_font_path = os.path.join(self.font_dir, font_name)
+ if not os.path.exists(installed_font_path):
+ copy2(font_path, self.font_dir)
+
+ # Per https://github.com/web-platform-tests/results-collection/issues/218
+ # installing Ahem on macOS is flaky, so check if it actually installed
+ with open(os.devnull, 'w') as f:
+ fonts = check_output(['/usr/sbin/system_profiler', '-xml', 'SPFontsDataType'], stderr=f)
+
+ try:
+ # if py3
+ load_plist = plistlib.loads
+ except AttributeError:
+ load_plist = plistlib.readPlistFromString
+ fonts = load_plist(fonts)
+ assert len(fonts) == 1
+ for font in fonts[0]['_items']:
+ if font['path'] == installed_font_path:
+ return True
+ return False
+
+ def install_windows_font(self, _, font_path):
+ hwnd_broadcast = 0xFFFF
+ wm_fontchange = 0x001D
+
+ gdi32 = ctypes.WinDLL('gdi32')
+ if gdi32.AddFontResourceW(font_path):
+ from ctypes import wintypes
+ wparam = 0
+ lparam = 0
+ SendNotifyMessageW = ctypes.windll.user32.SendNotifyMessageW
+ SendNotifyMessageW.argtypes = [wintypes.HANDLE, wintypes.UINT,
+ wintypes.WPARAM, wintypes.LPARAM]
+ return bool(SendNotifyMessageW(hwnd_broadcast, wm_fontchange,
+ wparam, lparam))
+
+ def remove_linux_font(self, font_name, _):
+ if self.created_dir:
+ rmtree(self.font_dir)
+ else:
+ os.remove(f'{self.font_dir}/{font_name}')
+ try:
+ fc_cache_returncode = call('fc-cache')
+ return not fc_cache_returncode
+ except OSError: # If fontconfig doesn't exist, return False
+ self.logger.error('fontconfig not available on this Linux system.')
+ return False
+
+ def remove_darwin_font(self, font_name, _):
+ if self.created_dir:
+ rmtree(self.font_dir)
+ else:
+ os.remove(os.path.join(self.font_dir, font_name))
+ return True
+
+ def remove_windows_font(self, _, font_path):
+ hwnd_broadcast = 0xFFFF
+ wm_fontchange = 0x001D
+
+ gdi32 = ctypes.WinDLL('gdi32')
+ if gdi32.RemoveFontResourceW(font_path):
+ from ctypes import wintypes
+ wparam = 0
+ lparam = 0
+ SendNotifyMessageW = ctypes.windll.user32.SendNotifyMessageW
+ SendNotifyMessageW.argtypes = [wintypes.HANDLE, wintypes.UINT,
+ wintypes.WPARAM, wintypes.LPARAM]
+ return bool(SendNotifyMessageW(hwnd_broadcast, wm_fontchange,
+ wparam, lparam))
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/__init__.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/__init__.py
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/chromium.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/chromium.py
new file mode 100644
index 0000000000..95f53011bf
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/chromium.py
@@ -0,0 +1,338 @@
+# mypy: allow-untyped-defs
+
+import functools
+import json
+import time
+
+from collections import defaultdict
+from mozlog.formatters import base
+
+from wptrunner.wptmanifest import serializer
+
+_escape_heading = functools.partial(serializer.escape, extras="]")
+
+
+class ChromiumFormatter(base.BaseFormatter): # type: ignore
+ """Formatter to produce results matching the Chromium JSON Test Results format.
+ https://chromium.googlesource.com/chromium/src/+/master/docs/testing/json_test_results_format.md
+
+ Notably, each test has an "artifacts" field that is a dict consisting of
+ "log": a list of strings (one per subtest + one for harness status, see
+ _append_test_message for the format)
+ "screenshots": a list of strings in the format of "url: base64"
+
+ """
+
+ def __init__(self):
+ # Whether the run was interrupted, either by the test runner or user.
+ self.interrupted = False
+
+ # A map of test status to the number of tests that had that status.
+ self.num_failures_by_status = defaultdict(int)
+
+ # Start time, expressed as offset since UNIX epoch in seconds. Measured
+ # from the first `suite_start` event.
+ self.start_timestamp_seconds = None
+
+ # A map of test names to test start timestamps, expressed in seconds
+ # since UNIX epoch. Only contains tests that are currently running
+ # (i.e., have not received the `test_end` event).
+ self.test_starts = {}
+
+ # Trie of test results. Each directory in the test name is a node in
+ # the trie and the leaf contains the dict of per-test data.
+ self.tests = {}
+
+ # Two dictionaries keyed by test name. Values are lists of strings:
+ # actual metadata content and other messages, respectively.
+ # See _append_test_message for examples.
+ self.actual_metadata = defaultdict(list)
+ self.messages = defaultdict(list)
+
+ # List of tests that have failing subtests.
+ self.tests_with_subtest_fails = set()
+
+ # Browser log for the current test under execution.
+ # These logs are from ChromeDriver's stdout/err, so we cannot say for
+ # sure which test a message is from, but instead we correlate them based
+ # on timing.
+ self.browser_log = []
+
+ def _append_test_message(self, test, subtest, wpt_actual_status, message):
+ r"""
+ Appends the message data for a test or subtest.
+
+ :param str test: the name of the test
+ :param str subtest: the name of the subtest with the message. Will be
+ None if this is called for a test.
+ :param str wpt_actual_status: the test status as reported by WPT
+ :param str message: the string to append to the message for this test
+
+ Example actual_metadata of a test with a subtest:
+ "[test_name]\n expected: OK\n"
+ " [subtest_name]\n expected: FAIL\n"
+
+ NOTE: throughout this function we output a key called "expected" but
+ fill it in with the actual status. This is by design. The goal of this
+ output is to look exactly like WPT's expectation metadata so that it
+ can be easily diff-ed.
+
+ Messages are appended verbatim to self.messages[test].
+ """
+ if subtest:
+ result = " [%s]\n expected: %s\n" % (_escape_heading(subtest),
+ wpt_actual_status)
+ self.actual_metadata[test].append(result)
+ if message:
+ self.messages[test].append("%s: %s\n" % (subtest, message))
+ else:
+ # No subtest, so this is the top-level test. The result must be
+ # prepended to the list, so that it comes before any subtest.
+ test_name_last_part = test.split("/")[-1]
+ result = "[%s]\n expected: %s\n" % (
+ _escape_heading(test_name_last_part), wpt_actual_status)
+ self.actual_metadata[test].insert(0, result)
+ if message:
+ self.messages[test].insert(0, "Harness: %s\n" % message)
+
+ def _append_artifact(self, cur_dict, artifact_name, artifact_value):
+ """
+ Appends artifacts to the specified dictionary.
+ :param dict cur_dict: the test leaf dictionary to append to
+ :param str artifact_name: the name of the artifact
+ :param str artifact_value: the value of the artifact
+ """
+ assert isinstance(artifact_value, str), "artifact_value must be a str"
+ if "artifacts" not in cur_dict.keys():
+ cur_dict["artifacts"] = defaultdict(list)
+ cur_dict["artifacts"][artifact_name].append(artifact_value)
+
+ def _store_test_result(self, name, actual, expected, actual_metadata,
+ messages, wpt_actual, subtest_failure,
+ duration=None, reftest_screenshots=None):
+ """
+ Stores the result of a single test in |self.tests|
+
+ :param str name: name of the test.
+ :param str actual: actual status of the test.
+ :param str expected: expected statuses of the test.
+ :param list actual_metadata: a list of metadata items.
+ :param list messages: a list of test messages.
+ :param str wpt_actual: actual status reported by wpt, may differ from |actual|.
+ :param bool subtest_failure: whether this test failed because of subtests.
+ :param Optional[float] duration: time it took in seconds to run this test.
+ :param Optional[list] reftest_screenshots: see executors/base.py for definition.
+ """
+ # The test name can contain a leading / which will produce an empty
+ # string in the first position of the list returned by split. We use
+ # filter(None) to remove such entries.
+ name_parts = filter(None, name.split("/"))
+ cur_dict = self.tests
+ for name_part in name_parts:
+ cur_dict = cur_dict.setdefault(name_part, {})
+ # Splitting and joining the list of statuses here avoids the need for
+ # recursively postprocessing the |tests| trie at shutdown. We assume the
+ # number of repetitions is typically small enough for the quadratic
+ # runtime to not matter.
+ statuses = cur_dict.get("actual", "").split()
+ statuses.append(actual)
+ cur_dict["actual"] = " ".join(statuses)
+ cur_dict["expected"] = expected
+ if duration is not None:
+ # Record the time to run the first invocation only.
+ cur_dict.setdefault("time", duration)
+ durations = cur_dict.setdefault("times", [])
+ durations.append(duration)
+ if subtest_failure:
+ self._append_artifact(cur_dict, "wpt_subtest_failure", "true")
+ if wpt_actual != actual:
+ self._append_artifact(cur_dict, "wpt_actual_status", wpt_actual)
+ if wpt_actual == 'CRASH':
+ for line in self.browser_log:
+ self._append_artifact(cur_dict, "wpt_crash_log", line)
+ for metadata in actual_metadata:
+ self._append_artifact(cur_dict, "wpt_actual_metadata", metadata)
+ for message in messages:
+ self._append_artifact(cur_dict, "wpt_log", message)
+
+ # Store screenshots (if any).
+ for item in reftest_screenshots or []:
+ if not isinstance(item, dict):
+ # Skip the relation string.
+ continue
+ data = "%s: %s" % (item["url"], item["screenshot"])
+ self._append_artifact(cur_dict, "screenshots", data)
+
+ # Figure out if there was a regression, unexpected status, or flake.
+ # This only happens for tests that were run
+ if actual != "SKIP":
+ if actual not in expected:
+ cur_dict["is_unexpected"] = True
+ if actual != "PASS":
+ cur_dict["is_regression"] = True
+ if len(set(statuses)) > 1:
+ cur_dict["is_flaky"] = True
+
+ # Update the count of how many tests ran with each status. Only includes
+ # the first invocation's result in the totals.
+ if len(statuses) == 1:
+ self.num_failures_by_status[actual] += 1
+
+ def _map_status_name(self, status):
+ """
+ Maps a WPT status to a Chromium status.
+
+ Chromium has five main statuses that we have to map to:
+ CRASH: the test harness crashed
+ FAIL: the test did not run as expected
+ PASS: the test ran as expected
+ SKIP: the test was not run
+ TIMEOUT: the did not finish in time and was aborted
+
+ :param str status: the string status of a test from WPT
+ :return: a corresponding string status for Chromium
+ """
+ if status == "OK":
+ return "PASS"
+ if status == "NOTRUN":
+ return "SKIP"
+ if status == "EXTERNAL-TIMEOUT":
+ return "TIMEOUT"
+ if status in ("ERROR", "PRECONDITION_FAILED"):
+ return "FAIL"
+ if status == "INTERNAL-ERROR":
+ return "CRASH"
+ # Any other status just gets returned as-is.
+ return status
+
+ def _get_expected_status_from_data(self, actual_status, data):
+ """
+ Gets the expected statuses from a |data| dictionary.
+
+ If there is no expected status in data, the actual status is returned.
+ This is because mozlog will delete "expected" from |data| if it is the
+ same as "status". So the presence of "expected" implies that "status" is
+ unexpected. Conversely, the absence of "expected" implies the "status"
+ is expected. So we use the "expected" status if it's there or fall back
+ to the actual status if it's not.
+
+ If the test has multiple statuses, it will have other statuses listed as
+ "known_intermittent" in |data|. If these exist, they will be added to
+ the returned status with spaced in between.
+
+ :param str actual_status: the actual status of the test
+ :param data: a data dictionary to extract expected status from
+ :return str: the expected statuses as a string
+ """
+ expected_statuses = self._map_status_name(data["expected"]) if "expected" in data else actual_status
+ if data.get("known_intermittent"):
+ all_statsues = {self._map_status_name(other_status) for other_status in data["known_intermittent"]}
+ all_statsues.add(expected_statuses)
+ expected_statuses = " ".join(sorted(all_statsues))
+ return expected_statuses
+
+ def _get_time(self, data):
+ """Get the timestamp of a message in seconds since the UNIX epoch."""
+ maybe_timestamp_millis = data.get("time")
+ if maybe_timestamp_millis is not None:
+ return float(maybe_timestamp_millis) / 1000
+ return time.time()
+
+ def _time_test(self, test_name, data):
+ """Time how long a test took to run.
+
+ :param str test_name: the name of the test to time
+ :param data: a data dictionary to extract the test end timestamp from
+ :return Optional[float]: a nonnegative duration in seconds or None if
+ the measurement is unavailable or invalid
+ """
+ test_start = self.test_starts.pop(test_name, None)
+ if test_start is not None:
+ # The |data| dictionary only provides millisecond resolution
+ # anyway, so further nonzero digits are unlikely to be meaningful.
+ duration = round(self._get_time(data) - test_start, 3)
+ if duration >= 0:
+ return duration
+ return None
+
+ def suite_start(self, data):
+ if self.start_timestamp_seconds is None:
+ self.start_timestamp_seconds = self._get_time(data)
+ if 'run_info' in data:
+ self.flag_specific = data['run_info'].get('flag_specific', '')
+
+ def test_start(self, data):
+ test_name = data["test"]
+ self.test_starts[test_name] = self._get_time(data)
+
+ def test_status(self, data):
+ test_name = data["test"]
+ wpt_actual_status = data["status"]
+ actual_status = self._map_status_name(wpt_actual_status)
+ expected_statuses = self._get_expected_status_from_data(actual_status, data)
+
+ is_unexpected = actual_status not in expected_statuses
+ if is_unexpected and test_name not in self.tests_with_subtest_fails:
+ self.tests_with_subtest_fails.add(test_name)
+ # We should always get a subtest in the data dict, but it's technically
+ # possible that it's missing. Be resilient here.
+ subtest_name = data.get("subtest", "UNKNOWN SUBTEST")
+ self._append_test_message(test_name, subtest_name,
+ wpt_actual_status, data.get("message", ""))
+
+ def test_end(self, data):
+ test_name = data["test"]
+ # Save the status reported by WPT since we might change it when
+ # reporting to Chromium.
+ wpt_actual_status = data["status"]
+ actual_status = self._map_status_name(wpt_actual_status)
+ expected_statuses = self._get_expected_status_from_data(actual_status, data)
+ duration = self._time_test(test_name, data)
+ subtest_failure = False
+ if test_name in self.tests_with_subtest_fails:
+ subtest_failure = True
+ # Clean up the test list to avoid accumulating too many.
+ self.tests_with_subtest_fails.remove(test_name)
+ # This test passed but it has failing subtests. Since we can only
+ # report a single status to Chromium, we choose FAIL to indicate
+ # that something about this test did not run correctly.
+ if actual_status == "PASS":
+ actual_status = "FAIL"
+
+ self._append_test_message(test_name, None, wpt_actual_status,
+ data.get("message", ""))
+ self._store_test_result(test_name,
+ actual_status,
+ expected_statuses,
+ self.actual_metadata[test_name],
+ self.messages[test_name],
+ wpt_actual_status,
+ subtest_failure,
+ duration,
+ data.get("extra", {}).get("reftest_screenshots"))
+
+ # Remove the test from dicts to avoid accumulating too many.
+ self.actual_metadata.pop(test_name)
+ self.messages.pop(test_name)
+
+ # New test, new browser logs.
+ self.browser_log = []
+
+ def shutdown(self, data):
+ # Create the final result dictionary
+ final_result = {
+ # There are some required fields that we just hard-code.
+ "interrupted": False,
+ "path_delimiter": "/",
+ "version": 3,
+ "seconds_since_epoch": self.start_timestamp_seconds,
+ "num_failures_by_type": self.num_failures_by_status,
+ "flag_name": self.flag_specific,
+ "tests": self.tests
+ }
+ return json.dumps(final_result)
+
+ def process_output(self, data):
+ cmd = data.get("command", "")
+ if any(c in cmd for c in ["chromedriver", "logcat"]):
+ self.browser_log.append(data['data'])
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/tests/__init__.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/tests/__init__.py
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/tests/test_chromium.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/tests/test_chromium.py
new file mode 100644
index 0000000000..bf815d5dc7
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/tests/test_chromium.py
@@ -0,0 +1,828 @@
+# mypy: ignore-errors
+
+import json
+import sys
+from os.path import dirname, join
+from io import StringIO
+
+from mozlog import handlers, structuredlog
+import pytest
+
+sys.path.insert(0, join(dirname(__file__), "..", ".."))
+from formatters.chromium import ChromiumFormatter
+
+
+@pytest.fixture
+def logger():
+ test_logger = structuredlog.StructuredLogger("test_a")
+ try:
+ yield test_logger
+ finally:
+ # Loggers of the same name share state globally:
+ # https://searchfox.org/mozilla-central/rev/1c54648c082efdeb08cf6a5e3a8187e83f7549b9/testing/mozbase/mozlog/mozlog/structuredlog.py#195-196
+ #
+ # Resetting the state here ensures the logger will not be shut down in
+ # the next test.
+ test_logger.reset_state()
+
+
+def test_chromium_required_fields(logger, capfd):
+ # Test that the test results contain a handful of required fields.
+
+ # Set up the handler.
+ output = StringIO()
+ logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
+
+ # output a bunch of stuff
+ logger.suite_start(["test-id-1"], run_info={}, time=123)
+ logger.test_start("test-id-1")
+ logger.test_end("test-id-1", status="PASS", expected="PASS")
+ logger.suite_end()
+ logger.shutdown()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_obj = json.load(output)
+
+ # Check for existence of required fields
+ assert "interrupted" in output_obj
+ assert "path_delimiter" in output_obj
+ assert "version" in output_obj
+ assert "num_failures_by_type" in output_obj
+ assert "tests" in output_obj
+
+ test_obj = output_obj["tests"]["test-id-1"]
+ assert "actual" in test_obj
+ assert "expected" in test_obj
+
+
+def test_time_per_test(logger, capfd):
+ # Test that the formatter measures time per test correctly.
+
+ # Set up the handler.
+ output = StringIO()
+ logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
+
+ logger.suite_start(["test-id-1", "test-id-2"], run_info={}, time=50)
+ logger.test_start("test-id-1", time=100)
+ logger.test_start("test-id-2", time=200)
+ logger.test_end("test-id-1", status="PASS", expected="PASS", time=300)
+ logger.test_end("test-id-2", status="PASS", expected="PASS", time=199)
+ logger.suite_end()
+
+ logger.suite_start(["test-id-1"], run_info={}, time=400)
+ logger.test_start("test-id-1", time=500)
+ logger.test_end("test-id-1", status="PASS", expected="PASS", time=600)
+ logger.suite_end()
+
+ # Write the final results.
+ logger.shutdown()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_obj = json.load(output)
+
+ test1_obj = output_obj["tests"]["test-id-1"]
+ test2_obj = output_obj["tests"]["test-id-2"]
+ # Test 1 run 1: 300ms - 100ms = 0.2s
+ # Test 1 run 2: 600ms - 500ms = 0.1s
+ assert test1_obj["time"] == pytest.approx(0.2)
+ assert len(test1_obj["times"]) == 2
+ assert test1_obj["times"][0] == pytest.approx(0.2)
+ assert test1_obj["times"][1] == pytest.approx(0.1)
+ assert "time" not in test2_obj
+ assert "times" not in test2_obj
+
+
+def test_chromium_test_name_trie(logger, capfd):
+ # Ensure test names are broken into directories and stored in a trie with
+ # test results at the leaves.
+
+ # Set up the handler.
+ output = StringIO()
+ logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
+
+ # output a bunch of stuff
+ logger.suite_start(["/foo/bar/test-id-1", "/foo/test-id-2"], run_info={},
+ time=123)
+ logger.test_start("/foo/bar/test-id-1")
+ logger.test_end("/foo/bar/test-id-1", status="TIMEOUT", expected="FAIL")
+ logger.test_start("/foo/test-id-2")
+ logger.test_end("/foo/test-id-2", status="ERROR", expected="TIMEOUT")
+ logger.suite_end()
+ logger.shutdown()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_obj = json.load(output)
+
+ # Ensure that the test names are broken up by directory name and that the
+ # results are stored at the leaves.
+ test_obj = output_obj["tests"]["foo"]["bar"]["test-id-1"]
+ assert test_obj["actual"] == "TIMEOUT"
+ assert test_obj["expected"] == "FAIL"
+
+ test_obj = output_obj["tests"]["foo"]["test-id-2"]
+ # The ERROR status is mapped to FAIL for Chromium
+ assert test_obj["actual"] == "FAIL"
+ assert test_obj["expected"] == "TIMEOUT"
+
+
+def test_num_failures_by_type(logger, capfd):
+ # Test that the number of failures by status type is correctly calculated.
+
+ # Set up the handler.
+ output = StringIO()
+ logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
+
+ # Run some tests with different statuses: 3 passes, 1 timeout
+ logger.suite_start(["t1", "t2", "t3", "t4"], run_info={}, time=123)
+ logger.test_start("t1")
+ logger.test_end("t1", status="PASS", expected="PASS")
+ logger.test_start("t2")
+ logger.test_end("t2", status="PASS", expected="PASS")
+ logger.test_start("t3")
+ logger.test_end("t3", status="PASS", expected="FAIL")
+ logger.test_start("t4")
+ logger.test_end("t4", status="TIMEOUT", expected="CRASH")
+ logger.suite_end()
+ logger.shutdown()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ num_failures_by_type = json.load(output)["num_failures_by_type"]
+
+ # We expect 3 passes and 1 timeout, nothing else.
+ assert sorted(num_failures_by_type.keys()) == ["PASS", "TIMEOUT"]
+ assert num_failures_by_type["PASS"] == 3
+ assert num_failures_by_type["TIMEOUT"] == 1
+
+
+def test_subtest_messages(logger, capfd):
+ # Tests accumulation of test output
+
+ # Set up the handler.
+ output = StringIO()
+ logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
+
+ # Run two tests with subtest messages. The subtest name should be included
+ # in the output. We should also tolerate missing messages and subtest names
+ # with unusual characters.
+ logger.suite_start(["t1", "t2"], run_info={}, time=123)
+ logger.test_start("t1")
+ logger.test_status("t1", status="FAIL", subtest="t1_a",
+ message="t1_a_message")
+ # Subtest name includes a backslash and two closing square brackets.
+ logger.test_status("t1", status="PASS", subtest=r"t1_\[]]b",
+ message="t1_b_message")
+ logger.test_end("t1", status="PASS", expected="PASS")
+ logger.test_start("t2")
+ # Subtests with empty messages should not be ignored.
+ logger.test_status("t2", status="PASS", subtest="t2_a")
+ # A test-level message will also be appended
+ logger.test_end("t2", status="TIMEOUT", expected="PASS",
+ message="t2_message")
+ logger.suite_end()
+ logger.shutdown()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_json = json.load(output)
+
+ t1_artifacts = output_json["tests"]["t1"]["artifacts"]
+ assert t1_artifacts["wpt_actual_metadata"] == [
+ "[t1]\n expected: PASS\n",
+ " [t1_a]\n expected: FAIL\n",
+ " [t1_\\\\[\\]\\]b]\n expected: PASS\n",
+ ]
+ assert t1_artifacts["wpt_log"] == [
+ "t1_a: t1_a_message\n",
+ # Only humans will read the log, so there's no need to escape
+ # characters here.
+ "t1_\\[]]b: t1_b_message\n",
+ ]
+ assert t1_artifacts["wpt_subtest_failure"] == ["true"]
+ t2_artifacts = output_json["tests"]["t2"]["artifacts"]
+ assert t2_artifacts["wpt_actual_metadata"] == [
+ "[t2]\n expected: TIMEOUT\n",
+ " [t2_a]\n expected: PASS\n",
+ ]
+ assert t2_artifacts["wpt_log"] == [
+ "Harness: t2_message\n"
+ ]
+ assert "wpt_subtest_failure" not in t2_artifacts.keys()
+
+
+def test_subtest_failure(logger, capfd):
+ # Tests that a test fails if a subtest fails
+
+ # Set up the handler.
+ output = StringIO()
+ formatter = ChromiumFormatter()
+ logger.add_handler(handlers.StreamHandler(output, formatter))
+
+ # Run a test with some subtest failures.
+ logger.suite_start(["t1"], run_info={}, time=123)
+ logger.test_start("t1")
+ logger.test_status("t1", status="FAIL", subtest="t1_a",
+ message="t1_a_message")
+ logger.test_status("t1", status="PASS", subtest="t1_b",
+ message="t1_b_message")
+ logger.test_status("t1", status="TIMEOUT", subtest="t1_c",
+ message="t1_c_message")
+
+ # Make sure the test name was added to the set of tests with subtest fails
+ assert "t1" in formatter.tests_with_subtest_fails
+
+ # The test status is reported as a pass here because the harness was able to
+ # run the test to completion.
+ logger.test_end("t1", status="PASS", expected="PASS", message="top_message")
+ logger.suite_end()
+ logger.shutdown()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_json = json.load(output)
+
+ test_obj = output_json["tests"]["t1"]
+ t1_artifacts = test_obj["artifacts"]
+ assert t1_artifacts["wpt_actual_metadata"] == [
+ "[t1]\n expected: PASS\n",
+ " [t1_a]\n expected: FAIL\n",
+ " [t1_b]\n expected: PASS\n",
+ " [t1_c]\n expected: TIMEOUT\n",
+ ]
+ assert t1_artifacts["wpt_log"] == [
+ "Harness: top_message\n",
+ "t1_a: t1_a_message\n",
+ "t1_b: t1_b_message\n",
+ "t1_c: t1_c_message\n",
+ ]
+ assert t1_artifacts["wpt_subtest_failure"] == ["true"]
+ # The status of the test in the output is a failure because subtests failed,
+ # despite the harness reporting that the test passed. But the harness status
+ # is logged as an artifact.
+ assert t1_artifacts["wpt_actual_status"] == ["PASS"]
+ assert test_obj["actual"] == "FAIL"
+ assert test_obj["expected"] == "PASS"
+ # Also ensure that the formatter cleaned up its internal state
+ assert "t1" not in formatter.tests_with_subtest_fails
+
+
+def test_expected_subtest_failure(logger, capfd):
+ # Tests that an expected subtest failure does not cause the test to fail
+
+ # Set up the handler.
+ output = StringIO()
+ formatter = ChromiumFormatter()
+ logger.add_handler(handlers.StreamHandler(output, formatter))
+
+ # Run a test with some expected subtest failures.
+ logger.suite_start(["t1"], run_info={}, time=123)
+ logger.test_start("t1")
+ logger.test_status("t1", status="FAIL", expected="FAIL", subtest="t1_a",
+ message="t1_a_message")
+ logger.test_status("t1", status="PASS", subtest="t1_b",
+ message="t1_b_message")
+ logger.test_status("t1", status="TIMEOUT", expected="TIMEOUT", subtest="t1_c",
+ message="t1_c_message")
+
+ # The subtest failures are all expected so this test should not be added to
+ # the set of tests with subtest failures.
+ assert "t1" not in formatter.tests_with_subtest_fails
+
+ # The test status is reported as a pass here because the harness was able to
+ # run the test to completion.
+ logger.test_end("t1", status="OK", expected="OK")
+ logger.suite_end()
+ logger.shutdown()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_json = json.load(output)
+
+ test_obj = output_json["tests"]["t1"]
+ assert test_obj["artifacts"]["wpt_actual_metadata"] == [
+ "[t1]\n expected: OK\n",
+ " [t1_a]\n expected: FAIL\n",
+ " [t1_b]\n expected: PASS\n",
+ " [t1_c]\n expected: TIMEOUT\n",
+ ]
+ assert test_obj["artifacts"]["wpt_log"] == [
+ "t1_a: t1_a_message\n",
+ "t1_b: t1_b_message\n",
+ "t1_c: t1_c_message\n",
+ ]
+ # The status of the test in the output is a pass because the subtest
+ # failures were all expected.
+ assert test_obj["actual"] == "PASS"
+ assert test_obj["expected"] == "PASS"
+
+
+def test_unexpected_subtest_pass(logger, capfd):
+ # A subtest that unexpectedly passes is considered a failure condition.
+
+ # Set up the handler.
+ output = StringIO()
+ formatter = ChromiumFormatter()
+ logger.add_handler(handlers.StreamHandler(output, formatter))
+
+ # Run a test with a subtest that is expected to fail but passes.
+ logger.suite_start(["t1"], run_info={}, time=123)
+ logger.test_start("t1")
+ logger.test_status("t1", status="PASS", expected="FAIL", subtest="t1_a",
+ message="t1_a_message")
+
+ # Since the subtest behaviour is unexpected, it's considered a failure, so
+ # the test should be added to the set of tests with subtest failures.
+ assert "t1" in formatter.tests_with_subtest_fails
+
+ # The test status is reported as a pass here because the harness was able to
+ # run the test to completion.
+ logger.test_end("t1", status="PASS", expected="PASS")
+ logger.suite_end()
+ logger.shutdown()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_json = json.load(output)
+
+ test_obj = output_json["tests"]["t1"]
+ t1_artifacts = test_obj["artifacts"]
+ assert t1_artifacts["wpt_actual_metadata"] == [
+ "[t1]\n expected: PASS\n",
+ " [t1_a]\n expected: PASS\n",
+ ]
+ assert t1_artifacts["wpt_log"] == [
+ "t1_a: t1_a_message\n",
+ ]
+ assert t1_artifacts["wpt_subtest_failure"] == ["true"]
+ # Since the subtest status is unexpected, we fail the test. But we report
+ # wpt_actual_status as an artifact
+ assert t1_artifacts["wpt_actual_status"] == ["PASS"]
+ assert test_obj["actual"] == "FAIL"
+ assert test_obj["expected"] == "PASS"
+ # Also ensure that the formatter cleaned up its internal state
+ assert "t1" not in formatter.tests_with_subtest_fails
+
+
+def test_expected_test_fail(logger, capfd):
+ # Check that an expected test-level failure is treated as a Pass
+
+ # Set up the handler.
+ output = StringIO()
+ logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
+
+ # Run some tests with different statuses: 3 passes, 1 timeout
+ logger.suite_start(["t1"], run_info={}, time=123)
+ logger.test_start("t1")
+ logger.test_end("t1", status="ERROR", expected="ERROR")
+ logger.suite_end()
+ logger.shutdown()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_json = json.load(output)
+
+ test_obj = output_json["tests"]["t1"]
+ # The test's actual and expected status should map from "ERROR" to "FAIL"
+ assert test_obj["actual"] == "FAIL"
+ assert test_obj["expected"] == "FAIL"
+ # ..and this test should not be a regression nor unexpected
+ assert "is_regression" not in test_obj
+ assert "is_unexpected" not in test_obj
+
+
+def test_unexpected_test_fail(logger, capfd):
+ # Check that an unexpected test-level failure is marked as unexpected and
+ # as a regression.
+
+ # Set up the handler.
+ output = StringIO()
+ logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
+
+ # Run some tests with different statuses: 3 passes, 1 timeout
+ logger.suite_start(["t1"], run_info={}, time=123)
+ logger.test_start("t1")
+ logger.test_end("t1", status="ERROR", expected="OK")
+ logger.suite_end()
+ logger.shutdown()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_json = json.load(output)
+
+ test_obj = output_json["tests"]["t1"]
+ # The test's actual and expected status should be mapped, ERROR->FAIL and
+ # OK->PASS
+ assert test_obj["actual"] == "FAIL"
+ assert test_obj["expected"] == "PASS"
+ # ..and this test should be a regression and unexpected
+ assert test_obj["is_regression"] is True
+ assert test_obj["is_unexpected"] is True
+
+
+def test_flaky_test_expected(logger, capfd):
+ # Check that a flaky test with multiple possible statuses is seen as
+ # expected if its actual status is one of the possible ones.
+
+ # set up the handler.
+ output = StringIO()
+ logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
+
+ # Run a test that is known to be flaky
+ logger.suite_start(["t1"], run_info={}, time=123)
+ logger.test_start("t1")
+ logger.test_end("t1", status="ERROR", expected="OK", known_intermittent=["ERROR", "TIMEOUT"])
+ logger.suite_end()
+ logger.shutdown()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_json = json.load(output)
+
+ test_obj = output_json["tests"]["t1"]
+ # The test's statuses are all mapped, changing ERROR->FAIL and OK->PASS
+ assert test_obj["actual"] == "FAIL"
+ # All the possible statuses are merged and sorted together into expected.
+ assert test_obj["expected"] == "FAIL PASS TIMEOUT"
+ # ...this is not a regression or unexpected because the actual status is one
+ # of the expected ones
+ assert "is_regression" not in test_obj
+ assert "is_unexpected" not in test_obj
+
+
+def test_flaky_test_unexpected(logger, capfd):
+ # Check that a flaky test with multiple possible statuses is seen as
+ # unexpected if its actual status is NOT one of the possible ones.
+
+ # set up the handler.
+ output = StringIO()
+ logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
+
+ # Run a test that is known to be flaky
+ logger.suite_start(["t1"], run_info={}, time=123)
+ logger.test_start("t1")
+ logger.test_end("t1", status="ERROR", expected="OK", known_intermittent=["TIMEOUT"])
+ logger.suite_end()
+ logger.shutdown()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_json = json.load(output)
+
+ test_obj = output_json["tests"]["t1"]
+ # The test's statuses are all mapped, changing ERROR->FAIL and OK->PASS
+ assert test_obj["actual"] == "FAIL"
+ # All the possible statuses are merged and sorted together into expected.
+ assert test_obj["expected"] == "PASS TIMEOUT"
+ # ...this is a regression and unexpected because the actual status is not
+ # one of the expected ones
+ assert test_obj["is_regression"] is True
+ assert test_obj["is_unexpected"] is True
+
+
+def test_precondition_failed(logger, capfd):
+ # Check that a failed precondition gets properly handled.
+
+ # set up the handler.
+ output = StringIO()
+ logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
+
+ # Run a test with a precondition failure
+ logger.suite_start(["t1"], run_info={}, time=123)
+ logger.test_start("t1")
+ logger.test_end("t1", status="PRECONDITION_FAILED", expected="OK")
+ logger.suite_end()
+ logger.shutdown()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_json = json.load(output)
+
+ test_obj = output_json["tests"]["t1"]
+ # The precondition failure should map to FAIL status, but we should also
+ # have an artifact containing the original PRECONDITION_FAILED status.
+ assert test_obj["actual"] == "FAIL"
+ assert test_obj["artifacts"]["wpt_actual_status"] == ["PRECONDITION_FAILED"]
+ # ...this is an unexpected regression because we expected a pass but failed
+ assert test_obj["is_regression"] is True
+ assert test_obj["is_unexpected"] is True
+
+
+def test_repeated_test_statuses(logger, capfd):
+ # Check that the logger outputs all statuses from multiple runs of a test.
+
+ # Set up the handler.
+ output = StringIO()
+ logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
+
+ # Run a test suite for the first time.
+ logger.suite_start(["t1"], run_info={}, time=123)
+ logger.test_start("t1")
+ logger.test_end("t1", status="PASS", expected="PASS", known_intermittent=[])
+ logger.suite_end()
+
+ # Run the test suite for the second time.
+ logger.suite_start(["t1"], run_info={}, time=456)
+ logger.test_start("t1")
+ logger.test_end("t1", status="FAIL", expected="PASS", known_intermittent=[])
+ logger.suite_end()
+
+ # Write the final results.
+ logger.shutdown()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_json = json.load(output)
+
+ status_totals = output_json["num_failures_by_type"]
+ assert status_totals["PASS"] == 1
+ # A missing result type is the same as being present and set to zero (0).
+ assert status_totals.get("FAIL", 0) == 0
+
+ # The actual statuses are accumulated in a ordered space-separated list.
+ test_obj = output_json["tests"]["t1"]
+ assert test_obj["actual"] == "PASS FAIL"
+ assert test_obj["expected"] == "PASS"
+
+
+def test_flaky_test_detection(logger, capfd):
+ # Check that the logger detects flakiness for a test run multiple times.
+
+ # Set up the handler.
+ output = StringIO()
+ logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
+
+ logger.suite_start(["t1", "t2"], run_info={})
+ logger.test_start("t1")
+ logger.test_start("t2")
+ logger.test_end("t1", status="FAIL", expected="PASS")
+ logger.test_end("t2", status="FAIL", expected="FAIL")
+ logger.suite_end()
+
+ logger.suite_start(["t1", "t2"], run_info={})
+ logger.test_start("t1")
+ logger.test_start("t2")
+ logger.test_end("t1", status="PASS", expected="PASS")
+ logger.test_end("t2", status="FAIL", expected="FAIL")
+ logger.suite_end()
+
+ # Write the final results.
+ logger.shutdown()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_json = json.load(output)
+
+ # We consider a test flaky if it runs multiple times and produces more than
+ # one kind of result.
+ test1_obj = output_json["tests"]["t1"]
+ test2_obj = output_json["tests"]["t2"]
+ assert test1_obj["is_flaky"] is True
+ assert "is_flaky" not in test2_obj
+
+
+def test_known_intermittent_empty(logger, capfd):
+ # If the known_intermittent list is empty, we want to ensure we don't append
+ # any extraneous characters to the output.
+
+ # set up the handler.
+ output = StringIO()
+ logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
+
+ # Run a test and include an empty known_intermittent list
+ logger.suite_start(["t1"], run_info={}, time=123)
+ logger.test_start("t1")
+ logger.test_end("t1", status="OK", expected="OK", known_intermittent=[])
+ logger.suite_end()
+ logger.shutdown()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_json = json.load(output)
+
+ test_obj = output_json["tests"]["t1"]
+ # Both actual and expected statuses get mapped to Pass. No extra whitespace
+ # anywhere.
+ assert test_obj["actual"] == "PASS"
+ assert test_obj["expected"] == "PASS"
+
+
+def test_known_intermittent_duplicate(logger, capfd):
+ # We don't want to have duplicate statuses in the final "expected" field.
+
+ # Set up the handler.
+ output = StringIO()
+ logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
+
+ # There are two duplications in this input:
+ # 1. known_intermittent already contains expected;
+ # 2. both statuses in known_intermittent map to FAIL in Chromium.
+ # In the end, we should only get one FAIL in Chromium "expected".
+ logger.suite_start(["t1"], run_info={}, time=123)
+ logger.test_start("t1")
+ logger.test_end("t1", status="ERROR", expected="ERROR", known_intermittent=["FAIL", "ERROR"])
+ logger.suite_end()
+ logger.shutdown()
+
+ # Check nothing got output to stdout/stderr.
+ # (Note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # Check the actual output of the formatter.
+ output.seek(0)
+ output_json = json.load(output)
+
+ test_obj = output_json["tests"]["t1"]
+ assert test_obj["actual"] == "FAIL"
+ # No duplicate "FAIL" in "expected".
+ assert test_obj["expected"] == "FAIL"
+
+
+def test_reftest_screenshots(logger, capfd):
+ # reftest_screenshots, if present, should be plumbed into artifacts.
+
+ # Set up the handler.
+ output = StringIO()
+ logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
+
+ # Run a reftest with reftest_screenshots.
+ logger.suite_start(["t1"], run_info={}, time=123)
+ logger.test_start("t1")
+ logger.test_end("t1", status="FAIL", expected="PASS", extra={
+ "reftest_screenshots": [
+ {"url": "foo.html", "hash": "HASH1", "screenshot": "DATA1"},
+ "!=",
+ {"url": "foo-ref.html", "hash": "HASH2", "screenshot": "DATA2"},
+ ]
+ })
+ logger.suite_end()
+ logger.shutdown()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_json = json.load(output)
+
+ test_obj = output_json["tests"]["t1"]
+ assert test_obj["artifacts"]["screenshots"] == [
+ "foo.html: DATA1",
+ "foo-ref.html: DATA2",
+ ]
+
+
+def test_process_output_crashing_test(logger, capfd):
+ """Test that chromedriver logs are preserved for crashing tests"""
+
+ # Set up the handler.
+ output = StringIO()
+ logger.add_handler(handlers.StreamHandler(output, ChromiumFormatter()))
+
+ logger.suite_start(["t1", "t2", "t3"], run_info={}, time=123)
+
+ logger.test_start("t1")
+ logger.process_output(100, "This message should be recorded", "/some/path/to/chromedriver --some-flag")
+ logger.process_output(101, "This message should not be recorded", "/some/other/process --another-flag")
+ logger.process_output(100, "This message should also be recorded", "/some/path/to/chromedriver --some-flag")
+ logger.test_end("t1", status="CRASH", expected="CRASH")
+
+ logger.test_start("t2")
+ logger.process_output(100, "Another message for the second test", "/some/path/to/chromedriver --some-flag")
+ logger.test_end("t2", status="CRASH", expected="PASS")
+
+ logger.test_start("t3")
+ logger.process_output(100, "This test fails", "/some/path/to/chromedriver --some-flag")
+ logger.process_output(100, "But the output should not be captured", "/some/path/to/chromedriver --some-flag")
+ logger.process_output(100, "Because it does not crash", "/some/path/to/chromedriver --some-flag")
+ logger.test_end("t3", status="FAIL", expected="PASS")
+
+ logger.suite_end()
+ logger.shutdown()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_json = json.load(output)
+
+ test_obj = output_json["tests"]["t1"]
+ assert test_obj["artifacts"]["wpt_crash_log"] == [
+ "This message should be recorded",
+ "This message should also be recorded"
+ ]
+
+ test_obj = output_json["tests"]["t2"]
+ assert test_obj["artifacts"]["wpt_crash_log"] == [
+ "Another message for the second test"
+ ]
+
+ test_obj = output_json["tests"]["t3"]
+ assert "wpt_crash_log" not in test_obj["artifacts"]
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/wptreport.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/wptreport.py
new file mode 100644
index 0000000000..5919631ab7
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/wptreport.py
@@ -0,0 +1,144 @@
+# mypy: allow-untyped-defs
+
+import json
+import re
+
+from mozlog.structured.formatters.base import BaseFormatter
+from ..executors.base import strip_server
+
+
+LONE_SURROGATE_RE = re.compile("[\uD800-\uDFFF]")
+
+
+def surrogate_replacement(match):
+ return "U+" + hex(ord(match.group()))[2:]
+
+
+def replace_lone_surrogate(data):
+ return LONE_SURROGATE_RE.subn(surrogate_replacement, data)[0]
+
+
+class WptreportFormatter(BaseFormatter): # type: ignore
+ """Formatter that produces results in the format that wptreport expects."""
+
+ def __init__(self):
+ self.raw_results = {}
+ self.results = {}
+
+ def suite_start(self, data):
+ self.results['run_info'] = data.get('run_info', {})
+ self.results['time_start'] = data['time']
+ self.results["results"] = []
+ self.results["subsuites"] = {}
+
+ def add_subsuite(self, data):
+ self.results["subsuites"][data["name"]] = data.get("run_info", {})
+
+ def suite_end(self, data):
+ self.results['time_end'] = data['time']
+ for subsuite, results in self.raw_results.items():
+ for test_name, result in results.items():
+ result["test"] = test_name
+ result["subsuite"] = subsuite
+ self.results["results"].append(result)
+ return json.dumps(self.results) + "\n"
+
+ def find_or_create_test(self, data):
+ subsuite = data.get("subsuite", "")
+ test_name = data["test"]
+ subsuite_results = self.raw_results.setdefault(subsuite, {})
+ return subsuite_results.setdefault(test_name, {"subtests": [],
+ "status": "",
+ "message": None})
+
+ def test_start(self, data):
+ test = self.find_or_create_test(data)
+ test["start_time"] = data["time"]
+
+ def create_subtest(self, data):
+ test = self.find_or_create_test(data)
+ subtest_name = replace_lone_surrogate(data["subtest"])
+
+ subtest = {
+ "name": subtest_name,
+ "status": "",
+ "message": None
+ }
+ test["subtests"].append(subtest)
+
+ return subtest
+
+ def test_status(self, data):
+ subtest = self.create_subtest(data)
+ subtest["status"] = data["status"]
+ if "expected" in data:
+ subtest["expected"] = data["expected"]
+ if "known_intermittent" in data:
+ subtest["known_intermittent"] = data["known_intermittent"]
+ if "message" in data:
+ subtest["message"] = replace_lone_surrogate(data["message"])
+
+ def test_end(self, data):
+ test = self.find_or_create_test(data)
+ start_time = test.pop("start_time")
+ test["duration"] = data["time"] - start_time
+ test["status"] = data["status"]
+ if "expected" in data:
+ test["expected"] = data["expected"]
+ if "known_intermittent" in data:
+ test["known_intermittent"] = data["known_intermittent"]
+ if "message" in data:
+ test["message"] = replace_lone_surrogate(data["message"])
+ if "reftest_screenshots" in data.get("extra", {}):
+ test["screenshots"] = {
+ strip_server(item["url"]): "sha1:" + item["hash"]
+ for item in data["extra"]["reftest_screenshots"]
+ if isinstance(item, dict)
+ }
+ test_name = data["test"]
+ subsuite = data.get("subsuite", "")
+ result = {"test": test_name,
+ "subsuite": subsuite}
+ result.update(self.raw_results[subsuite][test_name])
+ self.results["results"].append(result)
+ self.raw_results[subsuite].pop(test_name)
+
+ def assertion_count(self, data):
+ test = self.find_or_create_test(data)
+ test["asserts"] = {
+ "count": data["count"],
+ "min": data["min_expected"],
+ "max": data["max_expected"]
+ }
+
+ def lsan_leak(self, data):
+ if "lsan_leaks" not in self.results:
+ self.results["lsan_leaks"] = []
+ lsan_leaks = self.results["lsan_leaks"]
+ lsan_leaks.append({"frames": data["frames"],
+ "scope": data["scope"],
+ "allowed_match": data.get("allowed_match"),
+ "subsuite": data.get("subsuite", "")})
+
+ def find_or_create_mozleak(self, data):
+ if "mozleak" not in self.results:
+ self.results["mozleak"] = {}
+ scope = data["scope"]
+ if scope not in self.results["mozleak"]:
+ self.results["mozleak"][scope] = {"objects": [], "total": []}
+ return self.results["mozleak"][scope]
+
+ def mozleak_object(self, data):
+ scope_data = self.find_or_create_mozleak(data)
+ scope_data["objects"].append({"process": data["process"],
+ "name": data["name"],
+ "allowed": data.get("allowed", False),
+ "bytes": data["bytes"],
+ "subsuite": data.get("subsuite", "")})
+
+ def mozleak_total(self, data):
+ scope_data = self.find_or_create_mozleak(data)
+ scope_data["total"].append({"bytes": data["bytes"],
+ "threshold": data.get("threshold", 0),
+ "process": data["process"],
+ "subsuite": data.get("subsuite", "")})
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/wptscreenshot.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/wptscreenshot.py
new file mode 100644
index 0000000000..989559701a
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/formatters/wptscreenshot.py
@@ -0,0 +1,49 @@
+# mypy: allow-untyped-defs
+
+import requests
+from mozlog.structured.formatters.base import BaseFormatter
+
+DEFAULT_API = "https://wpt.fyi/api/screenshots/hashes"
+
+
+class WptscreenshotFormatter(BaseFormatter): # type: ignore
+ """Formatter that outputs screenshots in the format expected by wpt.fyi."""
+
+ def __init__(self, api=None):
+ self.api = api or DEFAULT_API
+ self.cache = set()
+
+ def suite_start(self, data):
+ # TODO(Hexcles): We might want to move the request into a different
+ # place, make it non-blocking, and handle errors better.
+ params = {}
+ run_info = data.get("run_info", {})
+ if "product" in run_info:
+ params["browser"] = run_info["product"]
+ if "browser_version" in run_info:
+ params["browser_version"] = run_info["browser_version"]
+ if "os" in run_info:
+ params["os"] = run_info["os"]
+ if "os_version" in run_info:
+ params["os_version"] = run_info["os_version"]
+ try:
+ r = requests.get(self.api, params=params)
+ r.raise_for_status()
+ self.cache = set(r.json())
+ except (requests.exceptions.RequestException, ValueError):
+ pass
+
+ def test_end(self, data):
+ if "reftest_screenshots" not in data.get("extra", {}):
+ return
+ output = ""
+ for item in data["extra"]["reftest_screenshots"]:
+ if not isinstance(item, dict):
+ # Skip the relation string.
+ continue
+ checksum = "sha1:" + item["hash"]
+ if checksum in self.cache:
+ continue
+ self.cache.add(checksum)
+ output += "data:image/png;base64,{}\n".format(item["screenshot"])
+ return output if output else None
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/instruments.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/instruments.py
new file mode 100644
index 0000000000..26df5fa29b
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/instruments.py
@@ -0,0 +1,121 @@
+# mypy: allow-untyped-defs
+
+import time
+import threading
+
+from . import mpcontext
+
+"""Instrumentation for measuring high-level time spent on various tasks inside the runner.
+
+This is lower fidelity than an actual profile, but allows custom data to be considered,
+so that we can see the time spent in specific tests and test directories.
+
+
+Instruments are intended to be used as context managers with the return value of __enter__
+containing the user-facing API e.g.
+
+with Instrument(*args) as recording:
+ recording.set(["init"])
+ do_init()
+ recording.pause()
+ for thread in test_threads:
+ thread.start(recording, *args)
+ for thread in test_threads:
+ thread.join()
+ recording.set(["teardown"]) # un-pauses the Instrument
+ do_teardown()
+"""
+
+class NullInstrument:
+ def set(self, stack):
+ """Set the current task to stack
+
+ :param stack: A list of strings defining the current task.
+ These are interpreted like a stack trace so that ["foo"] and
+ ["foo", "bar"] both show up as descendants of "foo"
+ """
+ pass
+
+ def pause(self):
+ """Stop recording a task on the current thread. This is useful if the thread
+ is purely waiting on the results of other threads"""
+ pass
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args, **kwargs):
+ return
+
+
+class InstrumentWriter:
+ def __init__(self, queue):
+ self.queue = queue
+
+ def set(self, stack):
+ stack.insert(0, threading.current_thread().name)
+ stack = self._check_stack(stack)
+ self.queue.put(("set", threading.current_thread().ident, time.time(), stack))
+
+ def pause(self):
+ self.queue.put(("pause", threading.current_thread().ident, time.time(), None))
+
+ def _check_stack(self, stack):
+ assert isinstance(stack, (tuple, list))
+ return [item.replace(" ", "_") for item in stack]
+
+
+class Instrument:
+ def __init__(self, file_path):
+ """Instrument that collects data from multiple threads and sums the time in each
+ thread. The output is in the format required by flamegraph.pl to enable visualisation
+ of the time spent in each task.
+
+ :param file_path: - The path on which to write instrument output. Any existing file
+ at the path will be overwritten
+ """
+ self.path = file_path
+ self.queue = None
+ self.current = None
+ self.start_time = None
+ self.instrument_proc = None
+
+ def __enter__(self):
+ assert self.instrument_proc is None
+ assert self.queue is None
+ mp = mpcontext.get_context()
+ self.queue = mp.Queue()
+ self.instrument_proc = mp.Process(target=self.run)
+ self.instrument_proc.start()
+ return InstrumentWriter(self.queue)
+
+ def __exit__(self, *args, **kwargs):
+ self.queue.put(("stop", None, time.time(), None))
+ self.instrument_proc.join()
+ self.instrument_proc = None
+ self.queue = None
+
+ def run(self):
+ known_commands = {"stop", "pause", "set"}
+ with open(self.path, "w") as f:
+ thread_data = {}
+ while True:
+ command, thread, time_stamp, stack = self.queue.get()
+ assert command in known_commands
+
+ # If we are done recording, dump the information from all threads to the file
+ # before exiting. Otherwise for either 'set' or 'pause' we only need to dump
+ # information from the current stack (if any) that was recording on the reporting
+ # thread (as that stack is no longer active).
+ items = []
+ if command == "stop":
+ items = thread_data.values()
+ elif thread in thread_data:
+ items.append(thread_data.pop(thread))
+ for output_stack, start_time in items:
+ f.write("%s %d\n" % (";".join(output_stack), int(1000 * (time_stamp - start_time))))
+
+ if command == "set":
+ thread_data[thread] = (stack, time_stamp)
+ elif command == "stop":
+ break
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/manifestexpected.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/manifestexpected.py
new file mode 100644
index 0000000000..6c2f5c96c2
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/manifestexpected.py
@@ -0,0 +1,529 @@
+# mypy: allow-untyped-defs
+
+from collections import deque
+
+from .wptmanifest.backends import static
+from .wptmanifest.backends.base import ManifestItem
+
+from . import expected
+
+"""Manifest structure used to store expected results of a test.
+
+Each manifest file is represented by an ExpectedManifest that
+has one or more TestNode children, one per test in the manifest.
+Each TestNode has zero or more SubtestNode children, one for each
+known subtest of the test.
+"""
+
+
+def data_cls_getter(output_node, visited_node):
+ # visited_node is intentionally unused
+ if output_node is None:
+ return ExpectedManifest
+ if isinstance(output_node, ExpectedManifest):
+ return TestNode
+ if isinstance(output_node, TestNode):
+ return SubtestNode
+ raise ValueError
+
+
+def bool_prop(name, node):
+ """Boolean property"""
+ try:
+ return bool(node.get(name))
+ except KeyError:
+ return None
+
+
+def int_prop(name, node):
+ """Boolean property"""
+ try:
+ return int(node.get(name))
+ except KeyError:
+ return None
+
+
+def list_prop(name, node):
+ """List property"""
+ try:
+ list_prop = node.get(name)
+ if isinstance(list_prop, str):
+ return [list_prop]
+ return list(list_prop)
+ except KeyError:
+ return []
+
+
+def str_prop(name, node):
+ try:
+ prop = node.get(name)
+ if not isinstance(prop, str):
+ raise ValueError
+ return prop
+ except KeyError:
+ return None
+
+
+def tags(node):
+ """Set of tags that have been applied to the test"""
+ try:
+ value = node.get("tags")
+ if isinstance(value, str):
+ return {value}
+ return set(value)
+ except KeyError:
+ return set()
+
+
+def prefs(node):
+ def value(ini_value):
+ if isinstance(ini_value, str):
+ return tuple(pref_piece.strip() for pref_piece in ini_value.split(':', 1))
+ else:
+ # this should be things like @Reset, which are apparently type 'object'
+ return (ini_value, None)
+
+ try:
+ node_prefs = node.get("prefs")
+ if isinstance(node_prefs, str):
+ rv = dict(value(node_prefs))
+ else:
+ rv = dict(value(item) for item in node_prefs)
+ except KeyError:
+ rv = {}
+ return rv
+
+
+def set_prop(name, node):
+ try:
+ node_items = node.get(name)
+ if isinstance(node_items, str):
+ rv = {node_items}
+ else:
+ rv = set(node_items)
+ except KeyError:
+ rv = set()
+ return rv
+
+
+def leak_threshold(node):
+ rv = {}
+ try:
+ node_items = node.get("leak-threshold")
+ if isinstance(node_items, str):
+ node_items = [node_items]
+ for item in node_items:
+ process, value = item.rsplit(":", 1)
+ rv[process.strip()] = int(value.strip())
+ except KeyError:
+ pass
+ return rv
+
+
+def fuzzy_prop(node):
+ """Fuzzy reftest match
+
+ This can either be a list of strings or a single string. When a list is
+ supplied, the format of each item matches the description below.
+
+ The general format is
+ fuzzy = [key ":"] <prop> ";" <prop>
+ key = <test name> [reftype <reference name>]
+ reftype = "==" | "!="
+ prop = [propName "=" ] range
+ propName = "maxDifference" | "totalPixels"
+ range = <digits> ["-" <digits>]
+
+ So for example:
+ maxDifference=10;totalPixels=10-20
+
+ specifies that for any test/ref pair for which no other rule is supplied,
+ there must be a maximum pixel difference of exactly 10, and between 10 and
+ 20 total pixels different.
+
+ test.html==ref.htm:10;20
+
+ specifies that for a equality comparison between test.html and ref.htm,
+ resolved relative to the test path, there can be a maximum difference
+ of 10 in the pixel value for any channel and 20 pixels total difference.
+
+ ref.html:10;20
+
+ is just like the above but applies to any comparison involving ref.html
+ on the right hand side.
+
+ The return format is [(key, (maxDifferenceRange, totalPixelsRange))], where
+ the key is either None where no specific reference is specified, the reference
+ name where there is only one component or a tuple (test, ref, reftype) when the
+ exact comparison is specified. maxDifferenceRange and totalPixelsRange are tuples
+ of integers indicating the inclusive range of allowed values.
+"""
+ rv = []
+ args = ["maxDifference", "totalPixels"]
+ try:
+ value = node.get("fuzzy")
+ except KeyError:
+ return rv
+ if not isinstance(value, list):
+ value = [value]
+ for item in value:
+ if not isinstance(item, str):
+ rv.append(item)
+ continue
+ parts = item.rsplit(":", 1)
+ if len(parts) == 1:
+ key = None
+ fuzzy_values = parts[0]
+ else:
+ key, fuzzy_values = parts
+ for reftype in ["==", "!="]:
+ if reftype in key:
+ key = key.split(reftype)
+ key.append(reftype)
+ key = tuple(key)
+ ranges = fuzzy_values.split(";")
+ if len(ranges) != 2:
+ raise ValueError("Malformed fuzzy value %s" % item)
+ arg_values = {None: deque()}
+ for range_str_value in ranges:
+ if "=" in range_str_value:
+ name, range_str_value = (part.strip()
+ for part in range_str_value.split("=", 1))
+ if name not in args:
+ raise ValueError("%s is not a valid fuzzy property" % name)
+ if arg_values.get(name):
+ raise ValueError("Got multiple values for argument %s" % name)
+ else:
+ name = None
+ if "-" in range_str_value:
+ range_min, range_max = range_str_value.split("-")
+ else:
+ range_min = range_str_value
+ range_max = range_str_value
+ try:
+ range_value = tuple(int(item.strip()) for item in (range_min, range_max))
+ except ValueError:
+ raise ValueError("Fuzzy value %s must be a range of integers" % range_str_value)
+ if name is None:
+ arg_values[None].append(range_value)
+ else:
+ arg_values[name] = range_value
+ range_values = []
+ for arg_name in args:
+ if arg_values.get(arg_name):
+ value = arg_values.pop(arg_name)
+ else:
+ value = arg_values[None].popleft()
+ range_values.append(value)
+ rv.append((key, tuple(range_values)))
+ return rv
+
+
+class ExpectedManifest(ManifestItem):
+ def __init__(self, node, test_path):
+ """Object representing all the tests in a particular manifest
+
+ :param name: Name of the AST Node associated with this object.
+ Should always be None since this should always be associated with
+ the root node of the AST.
+ :param test_path: Path of the test file associated with this manifest.
+ """
+ name = node.data
+ if name is not None:
+ raise ValueError("ExpectedManifest should represent the root node")
+ if test_path is None:
+ raise ValueError("ExpectedManifest requires a test path")
+ ManifestItem.__init__(self, node)
+ self.child_map = {}
+ self.test_path = test_path
+
+ def append(self, child):
+ """Add a test to the manifest"""
+ ManifestItem.append(self, child)
+ self.child_map[child.id] = child
+
+ def _remove_child(self, child):
+ del self.child_map[child.id]
+ ManifestItem.remove_child(self, child)
+ assert len(self.child_map) == len(self.children)
+
+ def get_test(self, test_id):
+ """Get a test from the manifest by ID
+
+ :param test_id: ID of the test to return."""
+ return self.child_map.get(test_id)
+
+ @property
+ def disabled(self):
+ return bool_prop("disabled", self)
+
+ @property
+ def restart_after(self):
+ return bool_prop("restart-after", self)
+
+ @property
+ def leaks(self):
+ return bool_prop("leaks", self)
+
+ @property
+ def min_assertion_count(self):
+ return int_prop("min-asserts", self)
+
+ @property
+ def max_assertion_count(self):
+ return int_prop("max-asserts", self)
+
+ @property
+ def tags(self):
+ return tags(self)
+
+ @property
+ def prefs(self):
+ return prefs(self)
+
+ @property
+ def lsan_disabled(self):
+ return bool_prop("lsan-disabled", self)
+
+ @property
+ def lsan_allowed(self):
+ return set_prop("lsan-allowed", self)
+
+ @property
+ def leak_allowed(self):
+ return set_prop("leak-allowed", self)
+
+ @property
+ def leak_threshold(self):
+ return leak_threshold(self)
+
+ @property
+ def lsan_max_stack_depth(self):
+ return int_prop("lsan-max-stack-depth", self)
+
+ @property
+ def fuzzy(self):
+ return fuzzy_prop(self)
+
+ @property
+ def expected(self):
+ return list_prop("expected", self)[0]
+
+ @property
+ def known_intermittent(self):
+ return list_prop("expected", self)[1:]
+
+ @property
+ def implementation_status(self):
+ return str_prop("implementation-status", self)
+
+
+class DirectoryManifest(ManifestItem):
+ @property
+ def disabled(self):
+ return bool_prop("disabled", self)
+
+ @property
+ def restart_after(self):
+ return bool_prop("restart-after", self)
+
+ @property
+ def leaks(self):
+ return bool_prop("leaks", self)
+
+ @property
+ def min_assertion_count(self):
+ return int_prop("min-asserts", self)
+
+ @property
+ def max_assertion_count(self):
+ return int_prop("max-asserts", self)
+
+ @property
+ def tags(self):
+ return tags(self)
+
+ @property
+ def prefs(self):
+ return prefs(self)
+
+ @property
+ def lsan_disabled(self):
+ return bool_prop("lsan-disabled", self)
+
+ @property
+ def lsan_allowed(self):
+ return set_prop("lsan-allowed", self)
+
+ @property
+ def leak_allowed(self):
+ return set_prop("leak-allowed", self)
+
+ @property
+ def leak_threshold(self):
+ return leak_threshold(self)
+
+ @property
+ def lsan_max_stack_depth(self):
+ return int_prop("lsan-max-stack-depth", self)
+
+ @property
+ def fuzzy(self):
+ return fuzzy_prop(self)
+
+ @property
+ def implementation_status(self):
+ return str_prop("implementation-status", self)
+
+
+class TestNode(ManifestItem):
+ def __init__(self, node, **kwargs):
+ """Tree node associated with a particular test in a manifest
+
+ :param name: name of the test"""
+ assert node.data is not None
+ ManifestItem.__init__(self, node, **kwargs)
+ self.updated_expected = []
+ self.new_expected = []
+ self.subtests = {}
+ self.default_status = None
+ self._from_file = True
+
+ @property
+ def is_empty(self):
+ required_keys = {"type"}
+ if set(self._data.keys()) != required_keys:
+ return False
+ return all(child.is_empty for child in self.children)
+
+ @property
+ def test_type(self):
+ return self.get("type")
+
+ @property
+ def id(self):
+ return self.name
+
+ @property
+ def disabled(self):
+ return bool_prop("disabled", self)
+
+ @property
+ def restart_after(self):
+ return bool_prop("restart-after", self)
+
+ @property
+ def leaks(self):
+ return bool_prop("leaks", self)
+
+ @property
+ def min_assertion_count(self):
+ return int_prop("min-asserts", self)
+
+ @property
+ def max_assertion_count(self):
+ return int_prop("max-asserts", self)
+
+ @property
+ def tags(self):
+ return tags(self)
+
+ @property
+ def prefs(self):
+ return prefs(self)
+
+ @property
+ def lsan_disabled(self):
+ return bool_prop("lsan-disabled", self)
+
+ @property
+ def lsan_allowed(self):
+ return set_prop("lsan-allowed", self)
+
+ @property
+ def leak_allowed(self):
+ return set_prop("leak-allowed", self)
+
+ @property
+ def leak_threshold(self):
+ return leak_threshold(self)
+
+ @property
+ def lsan_max_stack_depth(self):
+ return int_prop("lsan-max-stack-depth", self)
+
+ @property
+ def fuzzy(self):
+ return fuzzy_prop(self)
+
+ @property
+ def expected(self):
+ return list_prop("expected", self)[0]
+
+ @property
+ def known_intermittent(self):
+ return list_prop("expected", self)[1:]
+
+ @property
+ def implementation_status(self):
+ return str_prop("implementation-status", self)
+
+ def append(self, node):
+ """Add a subtest to the current test
+
+ :param node: AST Node associated with the subtest"""
+ child = ManifestItem.append(self, node)
+ self.subtests[child.name] = child
+
+ def get_subtest(self, name):
+ """Get the SubtestNode corresponding to a particular subtest, by name
+
+ :param name: Name of the node to return"""
+ if name in self.subtests:
+ return self.subtests[name]
+ return None
+
+
+class SubtestNode(TestNode):
+ @property
+ def is_empty(self):
+ if self._data:
+ return False
+ return True
+
+
+def get_manifest(metadata_root, test_path, run_info):
+ """Get the ExpectedManifest for a particular test path, or None if there is no
+ metadata stored for that test path.
+
+ :param metadata_root: Absolute path to the root of the metadata directory
+ :param test_path: Path to the test(s) relative to the test root
+ :param run_info: Dictionary of properties of the test run for which the expectation
+ values should be computed.
+ """
+ manifest_path = expected.expected_path(metadata_root, test_path)
+ try:
+ with open(manifest_path, "rb") as f:
+ return static.compile(f,
+ run_info,
+ data_cls_getter=data_cls_getter,
+ test_path=test_path)
+ except OSError:
+ return None
+
+
+def get_dir_manifest(path, run_info):
+ """Get the ExpectedManifest for a particular test path, or None if there is no
+ metadata stored for that test path.
+
+ :param path: Full path to the ini file
+ :param run_info: Dictionary of properties of the test run for which the expectation
+ values should be computed.
+ """
+ try:
+ with open(path, "rb") as f:
+ return static.compile(f,
+ run_info,
+ data_cls_getter=lambda x,y: DirectoryManifest)
+ except OSError:
+ return None
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/manifestinclude.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/manifestinclude.py
new file mode 100644
index 0000000000..89031d8fb0
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/manifestinclude.py
@@ -0,0 +1,156 @@
+# mypy: allow-untyped-defs
+
+"""Manifest structure used to store paths that should be included in a test run.
+
+The manifest is represented by a tree of IncludeManifest objects, the root
+representing the file and each subnode representing a subdirectory that should
+be included or excluded.
+"""
+import glob
+import os
+from urllib.parse import urlparse, urlsplit
+
+from .wptmanifest.node import DataNode
+from .wptmanifest.backends import conditional
+from .wptmanifest.backends.conditional import ManifestItem
+
+
+class IncludeManifest(ManifestItem):
+ def __init__(self, node):
+ """Node in a tree structure representing the paths
+ that should be included or excluded from the test run.
+
+ :param node: AST Node corresponding to this Node.
+ """
+ ManifestItem.__init__(self, node)
+ self.child_map = {}
+
+ @classmethod
+ def create(cls):
+ """Create an empty IncludeManifest tree"""
+ node = DataNode(None)
+ return cls(node)
+
+ def set_defaults(self):
+ if not self.has_key("skip"):
+ self.set("skip", "False")
+
+ def append(self, child):
+ ManifestItem.append(self, child)
+ self.child_map[child.name] = child
+ assert len(self.child_map) == len(self.children)
+
+ def include(self, test):
+ """Return a boolean indicating whether a particular test should be
+ included in a test run, based on the IncludeManifest tree rooted on
+ this object.
+
+ :param test: The test object"""
+ path_components = self._get_components(test.url)
+ return self._include(test, path_components)
+
+ def _include(self, test, path_components):
+ if path_components:
+ next_path_part = path_components.pop()
+ if next_path_part in self.child_map:
+ return self.child_map[next_path_part]._include(test, path_components)
+
+ node = self
+ while node:
+ try:
+ skip_value = self.get("skip", {"test_type": test.item_type}).lower()
+ assert skip_value in ("true", "false")
+ return skip_value != "true"
+ except KeyError:
+ if node.parent is not None:
+ node = node.parent
+ else:
+ # Include by default
+ return True
+
+ def _get_components(self, url):
+ rv = []
+ url_parts = urlsplit(url)
+ variant = ""
+ if url_parts.query:
+ variant += "?" + url_parts.query
+ if url_parts.fragment:
+ variant += "#" + url_parts.fragment
+ if variant:
+ rv.append(variant)
+ rv.extend([item for item in reversed(url_parts.path.split("/")) if item])
+ return rv
+
+ def _add_rule(self, test_manifests, url, direction):
+ maybe_path = os.path.join(os.path.abspath(os.curdir), url)
+ rest, last = os.path.split(maybe_path)
+ fragment = query = None
+ if "#" in last:
+ last, fragment = last.rsplit("#", 1)
+ if "?" in last:
+ last, query = last.rsplit("?", 1)
+
+ maybe_path = os.path.join(rest, last)
+ paths = glob.glob(maybe_path)
+
+ if paths:
+ urls = []
+ for path in paths:
+ for manifest, data in test_manifests.items():
+ found = False
+ rel_path = os.path.relpath(path, data["tests_path"])
+ iterator = manifest.iterpath if os.path.isfile(path) else manifest.iterdir
+ for test in iterator(rel_path):
+ if not hasattr(test, "url"):
+ continue
+ url = test.url
+ if query or fragment:
+ parsed = urlparse(url)
+ if ((query and query != parsed.query) or
+ (fragment and fragment != parsed.fragment)):
+ continue
+ urls.append(url)
+ found = True
+ if found:
+ break
+ else:
+ urls = [url]
+
+ assert direction in ("include", "exclude")
+
+ for url in urls:
+ components = self._get_components(url)
+
+ node = self
+ while components:
+ component = components.pop()
+ if component not in node.child_map:
+ new_node = IncludeManifest(DataNode(component))
+ node.append(new_node)
+ new_node.set("skip", node.get("skip", {}))
+
+ node = node.child_map[component]
+
+ skip = False if direction == "include" else True
+ node.set("skip", str(skip))
+
+ def add_include(self, test_manifests, url_prefix):
+ """Add a rule indicating that tests under a url path
+ should be included in test runs
+
+ :param url_prefix: The url prefix to include
+ """
+ return self._add_rule(test_manifests, url_prefix, "include")
+
+ def add_exclude(self, test_manifests, url_prefix):
+ """Add a rule indicating that tests under a url path
+ should be excluded from test runs
+
+ :param url_prefix: The url prefix to exclude
+ """
+ return self._add_rule(test_manifests, url_prefix, "exclude")
+
+
+def get_manifest(manifest_path):
+ with open(manifest_path, "rb") as f:
+ return conditional.compile(f, data_cls_getter=lambda x, y: IncludeManifest)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/manifestupdate.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/manifestupdate.py
new file mode 100644
index 0000000000..ebab2493a7
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/manifestupdate.py
@@ -0,0 +1,992 @@
+# mypy: allow-untyped-defs
+
+import os
+from urllib.parse import urljoin, urlsplit
+from collections import Counter, namedtuple, defaultdict, deque
+from math import ceil
+from typing import Any, Callable, ClassVar, Dict, List, Optional
+
+from .wptmanifest import serialize
+from .wptmanifest.node import (DataNode, ConditionalNode, BinaryExpressionNode,
+ BinaryOperatorNode, NumberNode, StringNode, VariableNode,
+ ValueNode, UnaryExpressionNode, UnaryOperatorNode,
+ ListNode)
+from .wptmanifest.backends import conditional
+from .wptmanifest.backends.conditional import ManifestItem
+
+from . import expected
+from . import expectedtree
+
+"""Manifest structure used to update the expected results of a test
+
+Each manifest file is represented by an ExpectedManifest that has one
+or more TestNode children, one per test in the manifest. Each
+TestNode has zero or more SubtestNode children, one for each known
+subtest of the test.
+
+In these representations, conditionals expressions in the manifest are
+not evaluated upfront but stored as python functions to be evaluated
+at runtime.
+
+When a result for a test is to be updated set_result on the
+[Sub]TestNode is called to store the new result, alongside the
+existing conditional that result's run info matched, if any. Once all
+new results are known, update is called to compute the new
+set of results and conditionals. The AST of the underlying parsed manifest
+is updated with the changes, and the result is serialised to a file.
+"""
+
+
+class ConditionError(Exception):
+ def __init__(self, cond=None):
+ self.cond = cond
+
+
+class UpdateError(Exception):
+ pass
+
+
+Value = namedtuple("Value", ["run_info", "value"])
+
+
+def data_cls_getter(output_node, visited_node):
+ # visited_node is intentionally unused
+ if output_node is None:
+ return ExpectedManifest
+ elif isinstance(output_node, ExpectedManifest):
+ return TestNode
+ elif isinstance(output_node, TestNode):
+ return SubtestNode
+ else:
+ raise ValueError
+
+def get_test_name(test_id):
+ # test name is base name of test path + query string + frament
+ return test_id[len(urlsplit(test_id).path.rsplit("/", 1)[0]) + 1:]
+
+
+class UpdateProperties:
+ def __init__(self, manifest, **kwargs):
+ self._manifest = manifest
+ self._classes = kwargs
+
+ def __getattr__(self, name):
+ if name in self._classes:
+ rv = self._classes[name](self._manifest)
+ setattr(self, name, rv)
+ return rv
+ raise AttributeError
+
+ def __contains__(self, name):
+ return name in self._classes
+
+ def __iter__(self):
+ for name in self._classes.keys():
+ yield getattr(self, name)
+
+
+class ExpectedManifest(ManifestItem):
+ def __init__(self, node, test_path, url_base, run_info_properties,
+ update_intermittent=False, remove_intermittent=False):
+ """Object representing all the tests in a particular manifest
+
+ :param node: AST Node associated with this object. If this is None,
+ a new AST is created to associate with this manifest.
+ :param test_path: Path of the test file associated with this manifest.
+ :param url_base: Base url for serving the tests in this manifest.
+ :param run_info_properties: Tuple of ([property name],
+ {property_name: [dependent property]})
+ The first part lists run_info properties
+ that are always used in the update, the second
+ maps property names to additional properties that
+ can be considered if we already have a condition on
+ the key property e.g. {"foo": ["bar"]} means that
+ we consider making conditions on bar only after we
+ already made one on foo.
+ :param update_intermittent: When True, intermittent statuses will be recorded
+ as `expected` in the test metadata.
+ :param: remove_intermittent: When True, old intermittent statuses will be removed
+ if no longer intermittent. This is only relevant if
+ `update_intermittent` is also True, because if False,
+ the metadata will simply update one `expected`status.
+ """
+ if node is None:
+ node = DataNode(None)
+ ManifestItem.__init__(self, node)
+ self.child_map = {}
+ self.test_path = test_path
+ self.url_base = url_base
+ assert self.url_base is not None
+ self._modified = False
+ self.run_info_properties = run_info_properties
+ self.update_intermittent = update_intermittent
+ self.remove_intermittent = remove_intermittent
+ self.update_properties = UpdateProperties(self, **{
+ "lsan": LsanUpdate,
+ "leak_object": LeakObjectUpdate,
+ "leak_threshold": LeakThresholdUpdate,
+ })
+
+ @property
+ def modified(self):
+ if self._modified:
+ return True
+ return any(item.modified for item in self.children)
+
+ @modified.setter
+ def modified(self, value):
+ self._modified = value
+
+ def append(self, child):
+ ManifestItem.append(self, child)
+ if child.id in self.child_map:
+ print("Warning: Duplicate heading %s" % child.id)
+ self.child_map[child.id] = child
+
+ def _remove_child(self, child):
+ del self.child_map[child.id]
+ ManifestItem._remove_child(self, child)
+
+ def get_test(self, test_id):
+ """Return a TestNode by test id, or None if no test matches
+
+ :param test_id: The id of the test to look up"""
+
+ return self.child_map.get(test_id)
+
+ def has_test(self, test_id):
+ """Boolean indicating whether the current test has a known child test
+ with id test id
+
+ :param test_id: The id of the test to look up"""
+
+ return test_id in self.child_map
+
+ @property
+ def url(self):
+ return urljoin(self.url_base,
+ "/".join(self.test_path.split(os.path.sep)))
+
+ def set_lsan(self, run_info, result):
+ """Set the result of the test in a particular run
+
+ :param run_info: Dictionary of run_info parameters corresponding
+ to this run
+ :param result: Lsan violations detected"""
+ self.update_properties.lsan.set(run_info, result)
+
+ def set_leak_object(self, run_info, result):
+ """Set the result of the test in a particular run
+
+ :param run_info: Dictionary of run_info parameters corresponding
+ to this run
+ :param result: Leaked objects deletec"""
+ self.update_properties.leak_object.set(run_info, result)
+
+ def set_leak_threshold(self, run_info, result):
+ """Set the result of the test in a particular run
+
+ :param run_info: Dictionary of run_info parameters corresponding
+ to this run
+ :param result: Total number of bytes leaked"""
+ self.update_properties.leak_threshold.set(run_info, result)
+
+ def update(self, full_update, disable_intermittent):
+ for prop_update in self.update_properties:
+ prop_update.update(full_update,
+ disable_intermittent)
+
+
+class TestNode(ManifestItem):
+ def __init__(self, node):
+ """Tree node associated with a particular test in a manifest
+
+ :param node: AST node associated with the test"""
+
+ ManifestItem.__init__(self, node)
+ self.subtests = {}
+ self._from_file = True
+ self.new_disabled = False
+ self.has_result = False
+ self._modified = False
+ self.update_properties = UpdateProperties(
+ self,
+ expected=ExpectedUpdate,
+ max_asserts=MaxAssertsUpdate,
+ min_asserts=MinAssertsUpdate
+ )
+
+ @classmethod
+ def create(cls, test_id):
+ """Create a TestNode corresponding to a given test
+
+ :param test_type: The type of the test
+ :param test_id: The id of the test"""
+ name = get_test_name(test_id)
+ node = DataNode(name)
+ self = cls(node)
+
+ self._from_file = False
+ return self
+
+ @property
+ def is_empty(self):
+ ignore_keys = {"type"}
+ if set(self._data.keys()) - ignore_keys:
+ return False
+ return all(child.is_empty for child in self.children)
+
+ @property
+ def test_type(self):
+ """The type of the test represented by this TestNode"""
+ return self.get("type", None)
+
+ @property
+ def id(self):
+ """The id of the test represented by this TestNode"""
+ return urljoin(self.parent.url, self.name)
+
+ @property
+ def modified(self):
+ if self._modified:
+ return self._modified
+ return any(child.modified for child in self.children)
+
+ @modified.setter
+ def modified(self, value):
+ self._modified = value
+
+ def disabled(self, run_info):
+ """Boolean indicating whether this test is disabled when run in an
+ environment with the given run_info
+
+ :param run_info: Dictionary of run_info parameters"""
+
+ return self.get("disabled", run_info) is not None
+
+ def set_result(self, run_info, result):
+ """Set the result of the test in a particular run
+
+ :param run_info: Dictionary of run_info parameters corresponding
+ to this run
+ :param result: Status of the test in this run"""
+ self.update_properties.expected.set(run_info, result)
+
+ def set_asserts(self, run_info, count):
+ """Set the assert count of a test
+
+ """
+ self.update_properties.min_asserts.set(run_info, count)
+ self.update_properties.max_asserts.set(run_info, count)
+
+ def append(self, node):
+ child = ManifestItem.append(self, node)
+ self.subtests[child.name] = child
+
+ def get_subtest(self, name):
+ """Return a SubtestNode corresponding to a particular subtest of
+ the current test, creating a new one if no subtest with that name
+ already exists.
+
+ :param name: Name of the subtest"""
+
+ if name in self.subtests:
+ return self.subtests[name]
+ else:
+ subtest = SubtestNode.create(name)
+ self.append(subtest)
+ return subtest
+
+ def update(self, full_update, disable_intermittent):
+ for prop_update in self.update_properties:
+ prop_update.update(full_update,
+ disable_intermittent)
+
+
+class SubtestNode(TestNode):
+ def __init__(self, node):
+ assert isinstance(node, DataNode)
+ TestNode.__init__(self, node)
+
+ @classmethod
+ def create(cls, name):
+ node = DataNode(name)
+ self = cls(node)
+ return self
+
+ @property
+ def is_empty(self):
+ if self._data:
+ return False
+ return True
+
+
+def build_conditional_tree(_, run_info_properties, results):
+ properties, dependent_props = run_info_properties
+ return expectedtree.build_tree(properties, dependent_props, results)
+
+
+def build_unconditional_tree(_, run_info_properties, results):
+ root = expectedtree.Node(None, None)
+ for run_info, values in results.items():
+ for value, count in values.items():
+ root.result_values[value] += count
+ root.run_info.add(run_info)
+ return root
+
+
+class PropertyUpdate:
+ property_name: ClassVar[str]
+ cls_default_value: ClassVar[Optional[Any]] = None
+ value_type: ClassVar[Optional[type]] = None
+ # property_builder is a class variable set to either build_conditional_tree
+ # or build_unconditional_tree. TODO: Make this type stricter when those
+ # methods are annotated.
+ property_builder: ClassVar[Callable[..., Any]]
+
+ def __init__(self, node):
+ self.node = node
+ self.default_value = self.cls_default_value
+ self.has_result = False
+ self.results = defaultdict(lambda: defaultdict(int))
+ self.update_intermittent = self.node.root.update_intermittent
+ self.remove_intermittent = self.node.root.remove_intermittent
+
+ def run_info_by_condition(self, run_info_index, conditions):
+ run_info_by_condition = defaultdict(list)
+ # A condition might match 0 or more run_info values
+ run_infos = run_info_index.keys()
+ for cond in conditions:
+ for run_info in run_infos:
+ if cond(run_info):
+ run_info_by_condition[cond].append(run_info)
+
+ return run_info_by_condition
+
+ def set(self, run_info, value):
+ self.has_result = True
+ self.node.has_result = True
+ self.check_default(value)
+ value = self.from_result_value(value)
+ self.results[run_info][value] += 1
+
+ def check_default(self, result):
+ return
+
+ def from_result_value(self, value):
+ """Convert a value from a test result into the internal format"""
+ return value
+
+ def from_ini_value(self, value):
+ """Convert a value from an ini file into the internal format"""
+ if self.value_type:
+ return self.value_type(value)
+ return value
+
+ def to_ini_value(self, value):
+ """Convert a value from the internal format to the ini file format"""
+ return str(value)
+
+ def updated_value(self, current, new):
+ """Given a single current value and a set of observed new values,
+ compute an updated value for the property"""
+ return new
+
+ @property
+ def unconditional_value(self):
+ try:
+ unconditional_value = self.from_ini_value(
+ self.node.get(self.property_name))
+ except KeyError:
+ unconditional_value = self.default_value
+ return unconditional_value
+
+ def update(self,
+ full_update=False,
+ disable_intermittent=None):
+ """Update the underlying manifest AST for this test based on all the
+ added results.
+
+ This will update existing conditionals if they got the same result in
+ all matching runs in the updated results, will delete existing conditionals
+ that get more than one different result in the updated run, and add new
+ conditionals for anything that doesn't match an existing conditional.
+
+ Conditionals not matched by any added result are not changed.
+
+ When `disable_intermittent` is not None, disable any test that shows multiple
+ unexpected results for the same set of parameters.
+ """
+ if not self.has_result:
+ return
+
+ property_tree = self.property_builder(self.node.root.run_info_properties,
+ self.results)
+
+ conditions, errors = self.update_conditions(property_tree,
+ full_update)
+
+ for e in errors:
+ if disable_intermittent:
+ condition = e.cond.children[0] if e.cond else None
+ msg = disable_intermittent if isinstance(disable_intermittent, str) else "unstable"
+ self.node.set("disabled", msg, condition)
+ self.node.new_disabled = True
+ else:
+ msg = "Conflicting metadata values for %s" % (
+ self.node.root.test_path)
+ if e.cond:
+ msg += ": %s" % serialize(e.cond).strip()
+ print(msg)
+
+ # If all the values match remove all conditionals
+ # This handles the case where we update a number of existing conditions and they
+ # all end up looking like the post-update default.
+ new_default = self.default_value
+ if conditions and conditions[-1][0] is None:
+ new_default = conditions[-1][1]
+ if all(condition[1] == new_default for condition in conditions):
+ conditions = [(None, new_default)]
+
+ # Don't set the default to the class default
+ if (conditions and
+ conditions[-1][0] is None and
+ conditions[-1][1] == self.default_value):
+ self.node.modified = True
+ conditions = conditions[:-1]
+
+ if self.node.modified:
+ self.node.clear(self.property_name)
+
+ for condition, value in conditions:
+ self.node.set(self.property_name,
+ self.to_ini_value(value),
+ condition)
+
+ def update_conditions(self,
+ property_tree,
+ full_update):
+ # This is complicated because the expected behaviour is complex
+ # The complexity arises from the fact that there are two ways of running
+ # the tool, with a full set of runs (full_update=True) or with partial metadata
+ # (full_update=False). In the case of a full update things are relatively simple:
+ # * All existing conditionals are ignored, with the exception of conditionals that
+ # depend on variables not used by the updater, which are retained as-is
+ # * All created conditionals are independent of each other (i.e. order isn't
+ # important in the created conditionals)
+ # In the case where we don't have a full set of runs, the expected behaviour
+ # is much less clear. This is of course the common case for when a developer
+ # runs the test on their own machine. In this case the assumptions above are untrue
+ # * The existing conditions may be required to handle other platforms
+ # * The order of the conditions may be important, since we don't know if they overlap
+ # e.g. `if os == linux and version == 18.04` overlaps with `if (os != win)`.
+ # So in the case we have a full set of runs, the process is pretty simple:
+ # * Generate the conditionals for the property_tree
+ # * Pick the most common value as the default and add only those conditions
+ # not matching the default
+ # In the case where we have a partial set of runs, things are more complex
+ # and more best-effort
+ # * For each existing conditional, see if it matches any of the run info we
+ # have. In cases where it does match, record the new results
+ # * Where all the new results match, update the right hand side of that
+ # conditional, otherwise remove it
+ # * If this leaves nothing existing, then proceed as with the full update
+ # * Otherwise add conditionals for the run_info that doesn't match any
+ # remaining conditions
+ prev_default = None
+
+ current_conditions = self.node.get_conditions(self.property_name)
+
+ # Ignore the current default value
+ if current_conditions and current_conditions[-1].condition_node is None:
+ self.node.modified = True
+ prev_default = current_conditions[-1].value
+ current_conditions = current_conditions[:-1]
+
+ # If there aren't any current conditions, or there is just a default
+ # value for all run_info, proceed as for a full update
+ if not current_conditions:
+ return self._update_conditions_full(property_tree,
+ prev_default=prev_default)
+
+ conditions = []
+ errors = []
+
+ run_info_index = {run_info: node
+ for node in property_tree
+ for run_info in node.run_info}
+
+ node_by_run_info = {run_info: node
+ for (run_info, node) in run_info_index.items()
+ if node.result_values}
+
+ run_info_by_condition = self.run_info_by_condition(run_info_index,
+ current_conditions)
+
+ run_info_with_condition = set()
+
+ if full_update:
+ # Even for a full update we need to keep hand-written conditions not
+ # using the properties we've specified and not matching any run_info
+ top_level_props, dependent_props = self.node.root.run_info_properties
+ update_properties = set(top_level_props)
+ for item in dependent_props.values():
+ update_properties |= set(item)
+ for condition in current_conditions:
+ if (not condition.variables.issubset(update_properties) and
+ not run_info_by_condition[condition]):
+ conditions.append((condition.condition_node,
+ self.from_ini_value(condition.value)))
+
+ new_conditions, errors = self._update_conditions_full(property_tree,
+ prev_default=prev_default)
+ conditions.extend(new_conditions)
+ return conditions, errors
+
+ # Retain existing conditions if they match the updated values
+ for condition in current_conditions:
+ # All run_info that isn't handled by some previous condition
+ all_run_infos_condition = run_info_by_condition[condition]
+ run_infos = {item for item in all_run_infos_condition
+ if item not in run_info_with_condition}
+
+ if not run_infos:
+ # Retain existing conditions that don't match anything in the update
+ conditions.append((condition.condition_node,
+ self.from_ini_value(condition.value)))
+ continue
+
+ # Set of nodes in the updated tree that match the same run_info values as the
+ # current existing node
+ nodes = [node_by_run_info[run_info] for run_info in run_infos
+ if run_info in node_by_run_info]
+
+ updated_value = None
+ current_values = set(condition.value)
+ if all(set(result).issubset(current_values)
+ for node in nodes
+ for result in node.result_values.keys()):
+ # If all the values are subsets of the current value, retain the condition as-is
+ updated_value = self.from_ini_value(condition.value)
+ elif nodes and all(set(node.result_values.keys()) ==
+ set(nodes[0].result_values.keys()) for node in nodes):
+ # If the condition doesn't need to change, update the value
+ current_value = self.from_ini_value(condition.value)
+ try:
+ updated_value = self.updated_value(current_value,
+ nodes[0].result_values)
+ except ConditionError as e:
+ errors.append(e)
+ continue
+ if updated_value != current_value:
+ self.node.modified = True
+
+ if updated_value is not None:
+ # Reuse the existing condition with an updated value
+ conditions.append((condition.condition_node, updated_value))
+ run_info_with_condition |= set(run_infos)
+ else:
+ # Don't reuse this condition
+ self.node.modified = True
+
+ new_conditions, new_errors = self.build_tree_conditions(property_tree,
+ run_info_with_condition,
+ prev_default)
+ if new_conditions:
+ self.node.modified = True
+
+ conditions.extend(new_conditions)
+ errors.extend(new_errors)
+
+ return conditions, errors
+
+ def _update_conditions_full(self,
+ property_tree,
+ prev_default=None):
+ self.node.modified = True
+ conditions, errors = self.build_tree_conditions(property_tree,
+ set(),
+ prev_default)
+
+ return conditions, errors
+
+ def build_tree_conditions(self,
+ property_tree,
+ run_info_with_condition,
+ prev_default=None):
+ conditions = []
+ errors = []
+
+ def to_count_value(v):
+ if v is None:
+ return v
+ # Need to count the values in a hashable type
+ count_value = self.to_ini_value(v)
+ if isinstance(count_value, list):
+ count_value = tuple(count_value)
+ return count_value
+
+
+ queue = deque([(property_tree, [])])
+ while queue:
+ node, parents = queue.popleft()
+ parents_and_self = parents + [node]
+ if node.result_values and any(run_info not in run_info_with_condition
+ for run_info in node.run_info):
+ prop_set = [(item.prop, item.value) for item in parents_and_self if item.prop]
+ value = node.result_values
+ error = None
+ if parents:
+ try:
+ value = self.updated_value(None, value)
+ except ConditionError:
+ expr = make_expr(prop_set, value)
+ error = ConditionError(expr)
+ else:
+ expr = make_expr(prop_set, value)
+ else:
+ # The root node needs special handling
+ expr = None
+ try:
+ value = self.updated_value(self.unconditional_value,
+ value)
+ except ConditionError:
+ error = ConditionError(expr)
+ # If we got an error for the root node, re-add the previous
+ # default value
+ if prev_default:
+ conditions.append((None, prev_default))
+ if error is None:
+ conditions.append((expr, value))
+ else:
+ errors.append(error)
+
+ try:
+ # Attempt to stably order the next group of conditions by their
+ # values, which are typically string/numeric types that have an
+ # order defined.
+ children = sorted(node.children, key=lambda child: child.value)
+ except TypeError:
+ children = node.children
+ for child in children:
+ queue.append((child, parents_and_self))
+
+ conditions = conditions[::-1]
+ value_count = Counter(to_count_value(value) for _, value in conditions)
+
+ # If we haven't set a default condition, add one and remove all the conditions
+ # with the same value
+ if value_count and (not conditions or conditions[-1][0] is not None):
+ # Sort in order of occurence, prioritising values that match the class default
+ # or the previous default
+ cls_default = to_count_value(self.default_value)
+ prev_default = to_count_value(prev_default)
+ commonest_value = max(value_count, key=lambda x: (value_count[x],
+ x == cls_default,
+ x == prev_default))
+ if isinstance(commonest_value, tuple):
+ commonest_value = list(commonest_value)
+ commonest_value = self.from_ini_value(commonest_value)
+ conditions = [item for item in conditions if item[1] != commonest_value]
+ conditions.append((None, commonest_value))
+
+ return conditions, errors
+
+
+class ExpectedUpdate(PropertyUpdate):
+ property_name = "expected"
+ property_builder = build_conditional_tree
+
+ def check_default(self, result):
+ if self.default_value is not None:
+ assert self.default_value == result.default_expected
+ else:
+ self.default_value = result.default_expected
+
+ def from_result_value(self, result):
+ # When we are updating intermittents, we need to keep a record of any existing
+ # intermittents to pass on when building the property tree and matching statuses and
+ # intermittents to the correct run info - this is so we can add them back into the
+ # metadata aligned with the right conditions, unless specified not to with
+ # self.remove_intermittent.
+ # The (status, known_intermittent) tuple is counted when the property tree is built, but
+ # the count value only applies to the first item in the tuple, the status from that run,
+ # when passed to `updated_value`.
+ if (not self.update_intermittent or
+ self.remove_intermittent or
+ not result.known_intermittent):
+ return result.status
+ return result.status + result.known_intermittent
+
+ def to_ini_value(self, value):
+ if isinstance(value, (list, tuple)):
+ return [str(item) for item in value]
+ return str(value)
+
+ def updated_value(self, current, new):
+ if len(new) > 1 and not self.update_intermittent and not isinstance(current, list):
+ raise ConditionError
+
+ counts = {}
+ for status, count in new.items():
+ if isinstance(status, tuple):
+ counts[status[0]] = count
+ counts.update({intermittent: 0 for intermittent in status[1:] if intermittent not in counts})
+ else:
+ counts[status] = count
+
+ if not (self.update_intermittent or isinstance(current, list)):
+ return list(counts)[0]
+
+ # Reorder statuses first based on counts, then based on status priority if there are ties.
+ # Counts with 0 are considered intermittent.
+ statuses = ["OK", "PASS", "FAIL", "ERROR", "TIMEOUT", "CRASH"]
+ status_priority = {value: i for i, value in enumerate(statuses)}
+ sorted_new = sorted(counts.items(), key=lambda x:(-1 * x[1],
+ status_priority.get(x[0],
+ len(status_priority))))
+ expected = []
+ for status, count in sorted_new:
+ # If we are not removing existing recorded intermittents, with a count of 0,
+ # add them in to expected.
+ if count > 0 or not self.remove_intermittent:
+ expected.append(status)
+
+ # If the new intermittent is a subset of the existing one, just use the existing one
+ # This prevents frequent flip-flopping of results between e.g. [OK, TIMEOUT] and
+ # [TIMEOUT, OK]
+ if current is not None:
+ if not isinstance(current, list):
+ current_set = {current}
+ else:
+ current_set = set(current)
+ if set(expected).issubset(current_set):
+ return current
+
+ if self.update_intermittent:
+ if len(expected) == 1:
+ return expected[0]
+ return expected
+
+ # If we are not updating intermittents, return the status with the highest occurence.
+ return expected[0]
+
+
+class MaxAssertsUpdate(PropertyUpdate):
+ """For asserts we always update the default value and never add new conditionals.
+ The value we set as the default is the maximum the current default or one more than the
+ number of asserts we saw in any configuration."""
+
+ property_name = "max-asserts"
+ cls_default_value = 0
+ value_type = int
+ property_builder = build_unconditional_tree
+
+ def updated_value(self, current, new):
+ if any(item > current for item in new):
+ return max(new) + 1
+ return current
+
+
+class MinAssertsUpdate(PropertyUpdate):
+ property_name = "min-asserts"
+ cls_default_value = 0
+ value_type = int
+ property_builder = build_unconditional_tree
+
+ def updated_value(self, current, new):
+ if any(item < current for item in new):
+ rv = min(new) - 1
+ else:
+ rv = current
+ return max(rv, 0)
+
+
+class AppendOnlyListUpdate(PropertyUpdate):
+ cls_default_value: ClassVar[List[str]] = []
+ property_builder = build_unconditional_tree
+
+ def updated_value(self, current, new):
+ if current is None:
+ rv = set()
+ else:
+ rv = set(current)
+
+ for item in new:
+ if item is None:
+ continue
+ elif isinstance(item, str):
+ rv.add(item)
+ else:
+ rv |= item
+
+ return sorted(rv)
+
+
+class LsanUpdate(AppendOnlyListUpdate):
+ property_name = "lsan-allowed"
+ property_builder = build_unconditional_tree
+
+ def from_result_value(self, result):
+ # If we have an allowed_match that matched, return None
+ # This value is ignored later (because it matches the default)
+ # We do that because then if we allow a failure in foo/__dir__.ini
+ # we don't want to update foo/bar/__dir__.ini with the same rule
+ if result[1]:
+ return None
+ # Otherwise return the topmost stack frame
+ # TODO: there is probably some improvement to be made by looking for a "better" stack frame
+ return result[0][0]
+
+ def to_ini_value(self, value):
+ return value
+
+
+class LeakObjectUpdate(AppendOnlyListUpdate):
+ property_name = "leak-allowed"
+ property_builder = build_unconditional_tree
+
+ def from_result_value(self, result):
+ # If we have an allowed_match that matched, return None
+ if result[1]:
+ return None
+ # Otherwise return the process/object name
+ return result[0]
+
+
+class LeakThresholdUpdate(PropertyUpdate):
+ property_name = "leak-threshold"
+ cls_default_value: ClassVar[Dict[str, int]] = {}
+ property_builder = build_unconditional_tree
+
+ def from_result_value(self, result):
+ return result
+
+ def to_ini_value(self, data):
+ return ["%s:%s" % item for item in sorted(data.items())]
+
+ def from_ini_value(self, data):
+ rv = {}
+ for item in data:
+ key, value = item.split(":", 1)
+ rv[key] = int(float(value))
+ return rv
+
+ def updated_value(self, current, new):
+ if current:
+ rv = current.copy()
+ else:
+ rv = {}
+ for process, leaked_bytes, threshold in new:
+ # If the value is less than the threshold but there isn't
+ # an old value we must have inherited the threshold from
+ # a parent ini file so don't any anything to this one
+ if process not in rv and leaked_bytes < threshold:
+ continue
+ if leaked_bytes > rv.get(process, 0):
+ # Round up to nearest 50 kb
+ boundary = 50 * 1024
+ rv[process] = int(boundary * ceil(float(leaked_bytes) / boundary))
+ return rv
+
+
+def make_expr(prop_set, rhs):
+ """Create an AST that returns the value ``status`` given all the
+ properties in prop_set match.
+
+ :param prop_set: tuple of (property name, value) pairs for each
+ property in this expression and the value it must match
+ :param status: Status on RHS when all the given properties match
+ """
+ root = ConditionalNode()
+
+ assert len(prop_set) > 0
+
+ expressions = []
+ for prop, value in prop_set:
+ if value not in (True, False):
+ expressions.append(
+ BinaryExpressionNode(
+ BinaryOperatorNode("=="),
+ VariableNode(prop),
+ make_node(value)))
+ else:
+ if value:
+ expressions.append(VariableNode(prop))
+ else:
+ expressions.append(
+ UnaryExpressionNode(
+ UnaryOperatorNode("not"),
+ VariableNode(prop)
+ ))
+ if len(expressions) > 1:
+ prev = expressions[-1]
+ for curr in reversed(expressions[:-1]):
+ node = BinaryExpressionNode(
+ BinaryOperatorNode("and"),
+ curr,
+ prev)
+ prev = node
+ else:
+ node = expressions[0]
+
+ root.append(node)
+ rhs_node = make_value_node(rhs)
+ root.append(rhs_node)
+
+ return root
+
+
+def make_node(value):
+ if isinstance(value, (int, float,)):
+ node = NumberNode(value)
+ elif isinstance(value, str):
+ node = StringNode(str(value))
+ elif hasattr(value, "__iter__"):
+ node = ListNode()
+ for item in value:
+ node.append(make_node(item))
+ else:
+ raise ValueError(f"Unrecoginsed data type {type(value)}")
+ return node
+
+
+def make_value_node(value):
+ if isinstance(value, (int, float,)):
+ node = ValueNode(value)
+ elif isinstance(value, str):
+ node = ValueNode(str(value))
+ elif hasattr(value, "__iter__"):
+ node = ListNode()
+ for item in value:
+ node.append(make_value_node(item))
+ else:
+ raise ValueError("Don't know how to convert %s into node" % type(value))
+ return node
+
+
+def get_manifest(metadata_root, test_path, url_base, run_info_properties, update_intermittent, remove_intermittent):
+ """Get the ExpectedManifest for a particular test path, or None if there is no
+ metadata stored for that test path.
+
+ :param metadata_root: Absolute path to the root of the metadata directory
+ :param test_path: Path to the test(s) relative to the test root
+ :param url_base: Base url for serving the tests in this manifest"""
+ manifest_path = expected.expected_path(metadata_root, test_path)
+ try:
+ with open(manifest_path, "rb") as f:
+ rv = compile(f, test_path, url_base,
+ run_info_properties, update_intermittent, remove_intermittent)
+ except OSError:
+ return None
+ return rv
+
+
+def compile(manifest_file, test_path, url_base, run_info_properties, update_intermittent, remove_intermittent):
+ return conditional.compile(manifest_file,
+ data_cls_getter=data_cls_getter,
+ test_path=test_path,
+ url_base=url_base,
+ run_info_properties=run_info_properties,
+ update_intermittent=update_intermittent,
+ remove_intermittent=remove_intermittent)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/metadata.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/metadata.py
new file mode 100644
index 0000000000..b9cb61eb07
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/metadata.py
@@ -0,0 +1,859 @@
+# mypy: allow-untyped-defs
+
+import array
+import os
+from collections import defaultdict, namedtuple
+from typing import Dict, List, Tuple
+
+from mozlog import structuredlog
+from six import ensure_str, ensure_text
+from sys import intern
+
+from . import manifestupdate
+from . import products
+from . import testloader
+from . import wptmanifest
+from . import wpttest
+from .expected import expected_path
+manifest = None # Module that will be imported relative to test_root
+manifestitem = None
+
+logger = structuredlog.StructuredLogger("web-platform-tests")
+
+try:
+ import ujson as json
+except ImportError:
+ import json # type: ignore
+
+
+class RunInfo:
+ """A wrapper around RunInfo dicts so that they can be hashed by identity"""
+
+ def __init__(self, dict_value):
+ self.data = dict_value
+ self.canonical_repr = tuple(tuple(item) for item in sorted(dict_value.items()))
+
+ def __getitem__(self, key):
+ return self.data[key]
+
+ def __setitem__(self, key, value):
+ raise TypeError
+
+ def __hash__(self):
+ return hash(self.canonical_repr)
+
+ def __eq__(self, other):
+ return self.canonical_repr == other.canonical_repr
+
+ def iteritems(self):
+ yield from self.data.items()
+
+ def items(self):
+ return list(self.items())
+
+
+def get_properties(properties_file=None, extra_properties=None, config=None, product=None):
+ """Read the list of properties to use for updating metadata.
+
+ :param properties_file: Path to a JSON file containing properties.
+ :param extra_properties: List of extra properties to use
+ :param config: (deprecated) wptrunner config
+ :param Product: (deprecated) product name (requires a config argument to be used)
+ """
+ properties = []
+ dependents = {}
+
+ if properties_file is not None:
+ logger.debug(f"Reading update properties from {properties_file}")
+ try:
+ with open(properties_file) as f:
+ data = json.load(f)
+ msg = None
+ if "properties" not in data:
+ msg = "Properties file missing 'properties' key"
+ elif not isinstance(data["properties"], list):
+ msg = "Properties file 'properties' value must be a list"
+ elif not all(isinstance(item, str) for item in data["properties"]):
+ msg = "Properties file 'properties' value must be a list of strings"
+ elif "dependents" in data:
+ dependents = data["dependents"]
+ if not isinstance(dependents, dict):
+ msg = "Properties file 'dependent_properties' value must be an object"
+ elif (not all(isinstance(dependents[item], list) and
+ all(isinstance(item_value, str)
+ for item_value in dependents[item])
+ for item in dependents)):
+ msg = ("Properties file 'dependent_properties' values must be lists of" +
+ " strings")
+ if msg is not None:
+ logger.error(msg)
+ raise ValueError(msg)
+
+ properties = data["properties"]
+ except OSError:
+ logger.critical(f"Error opening properties file {properties_file}")
+ raise
+ except ValueError:
+ logger.critical(f"Error parsing properties file {properties_file}")
+ raise
+ elif product is not None:
+ logger.warning("Falling back to getting metadata update properties from wptrunner browser "
+ "product file, this will be removed")
+ if config is None:
+ msg = "Must provide a config together with a product"
+ logger.critical(msg)
+ raise ValueError(msg)
+
+ properties, dependents = products.load_product_update(config, product)
+
+ if extra_properties is not None:
+ properties.extend(extra_properties)
+
+ properties_set = set(properties)
+ if any(item not in properties_set for item in dependents.keys()):
+ msg = "All 'dependent' keys must be in 'properties'"
+ logger.critical(msg)
+ raise ValueError(msg)
+
+ return properties, dependents
+
+
+def update_expected(test_paths, log_file_names,
+ update_properties, full_update=False, disable_intermittent=None,
+ update_intermittent=False, remove_intermittent=False, **kwargs):
+ """Update the metadata files for web-platform-tests based on
+ the results obtained in a previous run or runs
+
+ If `disable_intermittent` is not None, assume log_file_names refers to logs from repeated
+ test jobs, disable tests that don't behave as expected on all runs
+
+ If `update_intermittent` is True, intermittent statuses will be recorded as `expected` in
+ the metadata.
+
+ If `remove_intermittent` is True and used in conjunction with `update_intermittent`, any
+ intermittent statuses which are not present in the current run will be removed from the
+ metadata, else they are left in."""
+
+ do_delayed_imports()
+
+ id_test_map = load_test_data(test_paths)
+
+ msg = f"Updating metadata using properties: {','.join(update_properties[0])}"
+ if update_properties[1]:
+ dependent_strs = [f"{item}: {','.join(values)}"
+ for item, values in update_properties[1].items()]
+ msg += f", and dependent properties: {' '.join(dependent_strs)}"
+ logger.info(msg)
+
+ for metadata_path, updated_ini in update_from_logs(id_test_map,
+ update_properties,
+ disable_intermittent,
+ update_intermittent,
+ remove_intermittent,
+ full_update,
+ *log_file_names):
+
+ write_new_expected(metadata_path, updated_ini)
+ if disable_intermittent:
+ for test in updated_ini.iterchildren():
+ for subtest in test.iterchildren():
+ if subtest.new_disabled:
+ logger.info("disabled: %s" % os.path.dirname(subtest.root.test_path) + "/" + subtest.name)
+ if test.new_disabled:
+ logger.info("disabled: %s" % test.root.test_path)
+
+
+def do_delayed_imports():
+ global manifest, manifestitem
+ from manifest import manifest, item as manifestitem # type: ignore
+
+
+# For each testrun
+# Load all files and scan for the suite_start entry
+# Build a hash of filename: properties
+# For each different set of properties, gather all chunks
+# For each chunk in the set of chunks, go through all tests
+# for each test, make a map of {conditionals: [(platform, new_value)]}
+# Repeat for each platform
+# For each test in the list of tests:
+# for each conditional:
+# If all the new values match (or there aren't any) retain that conditional
+# If any new values mismatch:
+# If disable_intermittent and any repeated values don't match, disable the test
+# else mark the test as needing human attention
+# Check if all the RHS values are the same; if so collapse the conditionals
+
+
+class InternedData:
+ """Class for interning data of any (hashable) type.
+
+ This class is intended for building a mapping of int <=> value, such
+ that the integer may be stored as a proxy for the real value, and then
+ the real value obtained later from the proxy value.
+
+ In order to support the use case of packing the integer value as binary,
+ it is possible to specify a maximum bitsize of the data; adding more items
+ than this allowed will result in a ValueError exception.
+
+ The zero value is reserved to use as a sentinal."""
+
+ type_conv = None
+ rev_type_conv = None
+
+ def __init__(self, max_bits: int = 8):
+ self.max_idx = 2**max_bits - 2
+ # Reserve 0 as a sentinal
+ self._data: Tuple[List[object], Dict[int, object]]
+ self._data = [None], {}
+
+ def clear(self):
+ self.__init__()
+
+ def store(self, obj):
+ if self.type_conv is not None:
+ obj = self.type_conv(obj)
+
+ objs, obj_to_idx = self._data
+ if obj not in obj_to_idx:
+ value = len(objs)
+ objs.append(obj)
+ obj_to_idx[obj] = value
+ if value > self.max_idx:
+ raise ValueError
+ else:
+ value = obj_to_idx[obj]
+ return value
+
+ def get(self, idx):
+ obj = self._data[0][idx]
+ if self.rev_type_conv is not None:
+ obj = self.rev_type_conv(obj)
+ return obj
+
+ def __iter__(self):
+ for i in range(1, len(self._data[0])):
+ yield self.get(i)
+
+
+class RunInfoInterned(InternedData):
+ def type_conv(self, value):
+ return tuple(value.items())
+
+ def rev_type_conv(self, value):
+ return dict(value)
+
+
+prop_intern = InternedData(4)
+run_info_intern = InternedData(16)
+status_intern = InternedData(4)
+
+
+def pack_result(data):
+ # As `status_intern` normally handles one status, if `known_intermittent` is present in
+ # the test logs, intern and store this with the `status` in an array until needed.
+ if not data.get("known_intermittent"):
+ return status_intern.store(data.get("status"))
+ result = array.array("B")
+ expected = data.get("expected")
+ if expected is None:
+ expected = data["status"]
+ result_parts = [data["status"], expected] + data["known_intermittent"]
+ for i, part in enumerate(result_parts):
+ value = status_intern.store(part)
+ if i % 2 == 0:
+ assert value < 16
+ result.append(value << 4)
+ else:
+ result[-1] += value
+ return result
+
+
+def unpack_result(data):
+ if isinstance(data, int):
+ return (status_intern.get(data), None)
+ if isinstance(data, str):
+ return (data, None)
+ # Unpack multiple statuses into a tuple to be used in the Results named tuple below,
+ # separating `status` and `known_intermittent`.
+ results = []
+ for packed_value in data:
+ first = status_intern.get(packed_value >> 4)
+ second = status_intern.get(packed_value & 0x0F)
+ results.append(first)
+ if second:
+ results.append(second)
+ return ((results[0],), tuple(results[1:]))
+
+
+def load_test_data(test_paths):
+ manifest_loader = testloader.ManifestLoader(test_paths, False)
+ manifests = manifest_loader.load()
+
+ id_test_map = {}
+ for test_manifest, paths in manifests.items():
+ id_test_map.update(create_test_tree(paths["metadata_path"],
+ test_manifest))
+ return id_test_map
+
+
+def update_from_logs(id_test_map, update_properties, disable_intermittent, update_intermittent,
+ remove_intermittent, full_update, *log_filenames):
+
+ updater = ExpectedUpdater(id_test_map)
+
+ for i, log_filename in enumerate(log_filenames):
+ logger.info("Processing log %d/%d" % (i + 1, len(log_filenames)))
+ with open(log_filename) as f:
+ updater.update_from_log(f)
+
+ yield from update_results(id_test_map, update_properties, full_update,
+ disable_intermittent, update_intermittent=update_intermittent,
+ remove_intermittent=remove_intermittent)
+
+
+def update_results(id_test_map,
+ update_properties,
+ full_update,
+ disable_intermittent,
+ update_intermittent,
+ remove_intermittent):
+ test_file_items = set(id_test_map.values())
+
+ default_expected_by_type = {}
+ for test_type, test_cls in wpttest.manifest_test_cls.items():
+ if test_cls.result_cls:
+ default_expected_by_type[(test_type, False)] = test_cls.result_cls.default_expected
+ if test_cls.subtest_result_cls:
+ default_expected_by_type[(test_type, True)] = test_cls.subtest_result_cls.default_expected
+
+ for test_file in test_file_items:
+ updated_expected = test_file.update(default_expected_by_type, update_properties,
+ full_update, disable_intermittent, update_intermittent,
+ remove_intermittent)
+ if updated_expected is not None and updated_expected.modified:
+ yield test_file.metadata_path, updated_expected
+
+
+def directory_manifests(metadata_path):
+ rv = []
+ for dirpath, dirname, filenames in os.walk(metadata_path):
+ if "__dir__.ini" in filenames:
+ rel_path = os.path.relpath(dirpath, metadata_path)
+ rv.append(os.path.join(rel_path, "__dir__.ini"))
+ return rv
+
+
+def write_new_expected(metadata_path, expected):
+ # Serialize the data back to a file
+ path = expected_path(metadata_path, expected.test_path)
+ if not expected.is_empty:
+ manifest_str = wptmanifest.serialize(expected.node,
+ skip_empty_data=True)
+ assert manifest_str != ""
+ dir = os.path.dirname(path)
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+ tmp_path = path + ".tmp"
+ try:
+ with open(tmp_path, "wb") as f:
+ f.write(manifest_str.encode("utf8"))
+ os.replace(tmp_path, path)
+ except (Exception, KeyboardInterrupt):
+ try:
+ os.unlink(tmp_path)
+ except OSError:
+ pass
+ else:
+ try:
+ os.unlink(path)
+ except OSError:
+ pass
+
+
+class ExpectedUpdater:
+ def __init__(self, id_test_map):
+ self.id_test_map = id_test_map
+ self.base_run_info = None
+ self.run_info_by_subsuite = {}
+ self.action_map = {"suite_start": self.suite_start,
+ "add_subsuite": self.add_subsuite,
+ "test_start": self.test_start,
+ "test_status": self.test_status,
+ "test_end": self.test_end,
+ "assertion_count": self.assertion_count,
+ "lsan_leak": self.lsan_leak,
+ "mozleak_object": self.mozleak_object,
+ "mozleak_total": self.mozleak_total}
+ self.tests_visited = {}
+
+ def update_from_log(self, log_file):
+ # We support three possible formats:
+ # * wptreport format; one json object in the file, possibly pretty-printed
+ # * wptreport format; one run per line
+ # * raw log format
+
+ # Try reading a single json object in wptreport format
+ self.base_run_info = None
+ self.run_info_by_subsuite = {}
+ success = self.get_wptreport_data(log_file.read())
+
+ if success:
+ return
+
+ # Try line-separated json objects in wptreport format
+ log_file.seek(0)
+ for line in log_file:
+ success = self.get_wptreport_data(line)
+ if not success:
+ break
+ else:
+ return
+
+ # Assume the file is a raw log
+ log_file.seek(0)
+ self.update_from_raw_log(log_file)
+
+ def get_wptreport_data(self, input_str):
+ try:
+ data = json.loads(input_str)
+ except Exception:
+ pass
+ else:
+ if "action" not in data and "results" in data:
+ self.update_from_wptreport_log(data)
+ return True
+ return False
+
+ def update_from_raw_log(self, log_file):
+ action_map = self.action_map
+ for line in log_file:
+ try:
+ data = json.loads(line)
+ except ValueError:
+ # Just skip lines that aren't json
+ continue
+ action = data["action"]
+ if action in action_map:
+ action_map[action](data)
+
+ def update_from_wptreport_log(self, data):
+ action_map = self.action_map
+ action_map["suite_start"]({"run_info": data["run_info"]})
+ for subsuite, run_info in data.get("subsuites", {}).items():
+ action_map["add_subsuite"]({"name": subsuite, "run_info": run_info})
+ for test in data["results"]:
+ action_map["test_start"]({"test": test["test"],
+ "subsuite": test.get("subsuite", "")})
+ for subtest in test["subtests"]:
+ action_map["test_status"]({"test": test["test"],
+ "subsuite": test.get("subsuite", ""),
+ "subtest": subtest["name"],
+ "status": subtest["status"],
+ "expected": subtest.get("expected"),
+ "known_intermittent": subtest.get("known_intermittent", [])})
+ action_map["test_end"]({"test": test["test"],
+ "subsuite": test.get("subsuite", ""),
+ "status": test["status"],
+ "expected": test.get("expected"),
+ "known_intermittent": test.get("known_intermittent", [])})
+ if "asserts" in test:
+ asserts = test["asserts"]
+ action_map["assertion_count"]({"test": test["test"],
+ "subsuite": data.get("subsuite", ""),
+ "count": asserts["count"],
+ "min_expected": asserts["min"],
+ "max_expected": asserts["max"]})
+ for item in data.get("lsan_leaks", []):
+ action_map["lsan_leak"](item)
+
+ mozleak_data = data.get("mozleak", {})
+ for scope, scope_data in mozleak_data.items():
+ for key, action in [("objects", "mozleak_object"),
+ ("total", "mozleak_total")]:
+ for item in scope_data.get(key, []):
+ item_data = {"scope": scope}
+ item_data.update(item)
+ action_map[action](item_data)
+
+ def suite_start(self, data):
+ self.base_run_info = data["run_info"]
+ run_info = RunInfo(data["run_info"])
+ self.run_info_by_subsuite[""] = run_info_intern.store(run_info)
+
+ def add_subsuite(self, data):
+ run_info_data = self.base_run_info.copy()
+ run_info_data.update(data["run_info"])
+ run_info = RunInfo(run_info_data)
+ name = data["name"]
+ self.run_info_by_subsuite[name] = run_info_intern.store(run_info)
+
+ def test_start(self, data):
+ test_id = intern(ensure_str(data["test"]))
+ try:
+ self.id_test_map[test_id]
+ except KeyError:
+ logger.warning("Test not found %s, skipping" % test_id)
+ return
+
+ self.tests_visited[test_id] = set()
+
+ def test_status(self, data):
+ test_id = intern(ensure_str(data["test"]))
+ subtest = intern(ensure_str(data["subtest"]))
+ test_data = self.id_test_map.get(test_id)
+ if test_data is None:
+ return
+
+ self.tests_visited[test_id].add(subtest)
+
+ result = pack_result(data)
+
+ test_data.set(test_id, subtest, "status", self.run_info_by_subsuite[data.get("subsuite", "")], result)
+ status = data["status"]
+ expected = data.get("expected")
+ if expected and expected != status and status not in data.get("known_intermittent", []):
+ test_data.set_requires_update()
+
+ def test_end(self, data):
+ if data["status"] == "SKIP":
+ return
+
+ test_id = intern(ensure_str(data["test"]))
+ test_data = self.id_test_map.get(test_id)
+ if test_data is None:
+ return
+
+ result = pack_result(data)
+
+ test_data.set(test_id, None, "status", self.run_info_by_subsuite[data.get("subsuite", "")], result)
+ status = data["status"]
+ expected = data.get("expected")
+ if expected and expected != status and status not in data.get("known_intermittent", []):
+ test_data.set_requires_update()
+ del self.tests_visited[test_id]
+
+ def assertion_count(self, data):
+ test_id = intern(ensure_str(data["test"]))
+ test_data = self.id_test_map.get(test_id)
+ if test_data is None:
+ return
+
+ test_data.set(test_id, None, "asserts", self.run_info_by_subsuite[data.get("subsuite", "")], data["count"])
+ if data["count"] < data["min_expected"] or data["count"] > data["max_expected"]:
+ test_data.set_requires_update()
+
+ def test_for_scope(self, data):
+ dir_path = data.get("scope", "/")
+ dir_id = intern(ensure_str(os.path.join(dir_path, "__dir__").replace(os.path.sep, "/")))
+ if dir_id.startswith("/"):
+ dir_id = dir_id[1:]
+ return dir_id, self.id_test_map[dir_id]
+
+ def lsan_leak(self, data):
+ if data["scope"] == "/":
+ logger.warning("Not updating lsan annotations for root scope")
+ return
+ dir_id, test_data = self.test_for_scope(data)
+ test_data.set(dir_id, None, "lsan",
+ self.run_info_by_subsuite[data.get("subsuite", "")], (data["frames"], data.get("allowed_match")))
+ if not data.get("allowed_match"):
+ test_data.set_requires_update()
+
+ def mozleak_object(self, data):
+ if data["scope"] == "/":
+ logger.warning("Not updating mozleak annotations for root scope")
+ return
+ dir_id, test_data = self.test_for_scope(data)
+ test_data.set(dir_id, None, "leak-object",
+ self.run_info_by_subsuite[data.get("subsuite", "")], ("%s:%s", (data["process"], data["name"]),
+ data.get("allowed")))
+ if not data.get("allowed"):
+ test_data.set_requires_update()
+
+ def mozleak_total(self, data):
+ if data["scope"] == "/":
+ logger.warning("Not updating mozleak annotations for root scope")
+ return
+ if data["bytes"]:
+ dir_id, test_data = self.test_for_scope(data)
+ test_data.set(dir_id, None, "leak-threshold",
+ self.run_info_by_subsuite[data.get("subsuite", "")], (data["process"], data["bytes"], data["threshold"]))
+ if data["bytes"] > data["threshold"] or data["bytes"] < 0:
+ test_data.set_requires_update()
+
+
+def create_test_tree(metadata_path, test_manifest):
+ """Create a map of test_id to TestFileData for that test.
+ """
+ do_delayed_imports()
+ id_test_map = {}
+ exclude_types = frozenset(["manual", "support", "conformancechecker"])
+ all_types = set(manifestitem.item_types.keys())
+ assert all_types > exclude_types
+ include_types = all_types - exclude_types
+ for item_type, test_path, tests in test_manifest.itertypes(*include_types):
+ test_file_data = TestFileData(intern(ensure_str(test_manifest.url_base)),
+ intern(ensure_str(item_type)),
+ metadata_path,
+ test_path,
+ tests)
+ for test in tests:
+ id_test_map[intern(ensure_str(test.id))] = test_file_data
+
+ dir_path = os.path.dirname(test_path)
+ while True:
+ dir_meta_path = os.path.join(dir_path, "__dir__")
+ dir_id = (test_manifest.url_base + dir_meta_path.replace(os.path.sep, "/")).lstrip("/")
+ if dir_id in id_test_map:
+ break
+
+ test_file_data = TestFileData(intern(ensure_str(test_manifest.url_base)),
+ None,
+ metadata_path,
+ dir_meta_path,
+ [])
+ id_test_map[dir_id] = test_file_data
+ dir_path = os.path.dirname(dir_path)
+ if not dir_path:
+ break
+
+ return id_test_map
+
+
+class PackedResultList:
+ """Class for storing test results.
+
+ Results are stored as an array of 4-byte integers for compactness
+ with the first 8 bits reserved. In the remaining 24 bits,
+ the first 4 bits represent the property name, the second 4 bits
+ represent the test status (if it's a result with a status code), and
+ the final 16 bits represent the run_info. If the result doesn't have a
+ simple status code but instead a richer type, we place that richer type
+ in a dictionary and set the status part of the result type to 0.
+
+ This class depends on the global prop_intern, run_info_intern and
+ status_intern InteredData objects to convert between the bit values
+ and corresponding Python objects."""
+
+ def __init__(self):
+ self.data = array.array("L")
+
+ __slots__ = ("data", "raw_data")
+
+ def append(self, prop, run_info, value):
+ out_val = (prop << 20) + run_info
+ if prop == prop_intern.store("status") and isinstance(value, int):
+ out_val += value << 16
+ else:
+ if not hasattr(self, "raw_data"):
+ self.raw_data = {}
+ self.raw_data[len(self.data)] = value
+ self.data.append(out_val)
+
+ def unpack(self, idx, packed):
+ prop = prop_intern.get((packed & 0xF00000) >> 20)
+
+ value_idx = (packed & 0x0F0000) >> 16
+ if value_idx == 0:
+ value = self.raw_data[idx]
+ else:
+ value = status_intern.get(value_idx)
+
+ run_info = run_info_intern.get(packed & 0x00FFFF)
+
+ return prop, run_info, value
+
+ def __iter__(self):
+ for i, item in enumerate(self.data):
+ yield self.unpack(i, item)
+
+
+class TestFileData:
+ __slots__ = ("url_base", "item_type", "test_path", "metadata_path", "tests",
+ "_requires_update", "data")
+
+ def __init__(self, url_base, item_type, metadata_path, test_path, tests):
+ self.url_base = url_base
+ self.item_type = item_type
+ self.test_path = test_path
+ self.metadata_path = metadata_path
+ self.tests = {intern(ensure_str(item.id)) for item in tests}
+ self._requires_update = False
+ self.data = defaultdict(lambda: defaultdict(PackedResultList))
+
+ def set_requires_update(self):
+ self._requires_update = True
+
+ @property
+ def requires_update(self):
+ return self._requires_update
+
+ def set(self, test_id, subtest_id, prop, run_info, value):
+ self.data[test_id][subtest_id].append(prop_intern.store(prop),
+ run_info,
+ value)
+
+ def expected(self, update_properties, update_intermittent, remove_intermittent):
+ expected_data = load_expected(self.url_base,
+ self.metadata_path,
+ self.test_path,
+ self.tests,
+ update_properties,
+ update_intermittent,
+ remove_intermittent)
+ if expected_data is None:
+ expected_data = create_expected(self.url_base,
+ self.test_path,
+ update_properties,
+ update_intermittent,
+ remove_intermittent)
+ return expected_data
+
+ def is_disabled(self, test):
+ # This conservatively assumes that anything that was disabled remains disabled
+ # we could probably do better by checking if it's in the full set of run infos
+ return test.has_key("disabled")
+
+ def orphan_subtests(self, expected):
+ # Return subtest nodes present in the expected file, but missing from the data
+ rv = []
+
+ for test_id, subtests in self.data.items():
+ test = expected.get_test(ensure_text(test_id))
+ if not test:
+ continue
+ seen_subtests = {ensure_text(item) for item in subtests.keys() if item is not None}
+ missing_subtests = set(test.subtests.keys()) - seen_subtests
+ for item in missing_subtests:
+ expected_subtest = test.get_subtest(item)
+ if not self.is_disabled(expected_subtest):
+ rv.append(expected_subtest)
+ for name in seen_subtests:
+ subtest = test.get_subtest(name)
+ # If any of the items have children (ie subsubtests) we want to prune thes
+ if subtest.children:
+ rv.extend(subtest.children)
+
+ return rv
+
+ def filter_unknown_props(self, update_properties, subtests):
+ # Remove subtests which have some conditions that aren't in update_properties
+ # since removing these may be inappropriate
+ top_level_props, dependent_props = update_properties
+ all_properties = set(top_level_props)
+ for item in dependent_props.values():
+ all_properties |= set(item)
+
+ filtered = []
+ for subtest in subtests:
+ include = True
+ for key, _ in subtest.iter_properties():
+ conditions = subtest.get_conditions(key)
+ for condition in conditions:
+ if not condition.variables.issubset(all_properties):
+ include = False
+ break
+ if not include:
+ break
+ if include:
+ filtered.append(subtest)
+ return filtered
+
+ def update(self, default_expected_by_type, update_properties,
+ full_update=False, disable_intermittent=None, update_intermittent=False,
+ remove_intermittent=False):
+ # If we are doing a full update, we may need to prune missing nodes
+ # even if the expectations didn't change
+ if not self.requires_update and not full_update:
+ return
+
+ logger.debug("Updating %s", self.metadata_path)
+
+ expected = self.expected(update_properties,
+ update_intermittent=update_intermittent,
+ remove_intermittent=remove_intermittent)
+
+ if full_update:
+ orphans = self.orphan_subtests(expected)
+ orphans = self.filter_unknown_props(update_properties, orphans)
+
+ if not self.requires_update and not orphans:
+ return
+
+ if orphans:
+ expected.modified = True
+ for item in orphans:
+ item.remove()
+
+ expected_by_test = {}
+
+ for test_id in self.tests:
+ if not expected.has_test(test_id):
+ expected.append(manifestupdate.TestNode.create(test_id))
+ test_expected = expected.get_test(test_id)
+ expected_by_test[test_id] = test_expected
+
+ for test_id, test_data in self.data.items():
+ test_id = ensure_str(test_id)
+ for subtest_id, results_list in test_data.items():
+ for prop, run_info, value in results_list:
+ # Special case directory metadata
+ if subtest_id is None and test_id.endswith("__dir__"):
+ if prop == "lsan":
+ expected.set_lsan(run_info, value)
+ elif prop == "leak-object":
+ expected.set_leak_object(run_info, value)
+ elif prop == "leak-threshold":
+ expected.set_leak_threshold(run_info, value)
+ continue
+
+ test_expected = expected_by_test[test_id]
+ if subtest_id is None:
+ item_expected = test_expected
+ else:
+ subtest_id = ensure_text(subtest_id)
+ item_expected = test_expected.get_subtest(subtest_id)
+
+ if prop == "status":
+ status, known_intermittent = unpack_result(value)
+ value = Result(status,
+ known_intermittent,
+ default_expected_by_type[self.item_type,
+ subtest_id is not None])
+ item_expected.set_result(run_info, value)
+ elif prop == "asserts":
+ item_expected.set_asserts(run_info, value)
+
+ expected.update(full_update=full_update,
+ disable_intermittent=disable_intermittent)
+ for test in expected.iterchildren():
+ for subtest in test.iterchildren():
+ subtest.update(full_update=full_update,
+ disable_intermittent=disable_intermittent)
+ test.update(full_update=full_update,
+ disable_intermittent=disable_intermittent)
+
+ return expected
+
+
+Result = namedtuple("Result", ["status", "known_intermittent", "default_expected"])
+
+
+def create_expected(url_base, test_path, run_info_properties, update_intermittent, remove_intermittent):
+ expected = manifestupdate.ExpectedManifest(None,
+ test_path,
+ url_base,
+ run_info_properties,
+ update_intermittent,
+ remove_intermittent)
+ return expected
+
+
+def load_expected(url_base, metadata_path, test_path, tests, run_info_properties, update_intermittent, remove_intermittent):
+ expected_manifest = manifestupdate.get_manifest(metadata_path,
+ test_path,
+ url_base,
+ run_info_properties,
+ update_intermittent,
+ remove_intermittent)
+ return expected_manifest
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/mpcontext.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/mpcontext.py
new file mode 100644
index 0000000000..d423d9b9a1
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/mpcontext.py
@@ -0,0 +1,13 @@
+# mypy: allow-untyped-defs
+
+import multiprocessing
+
+_context = None
+
+
+def get_context():
+ global _context
+
+ if _context is None:
+ _context = multiprocessing.get_context("spawn")
+ return _context
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/print_pdf_runner.html b/testing/web-platform/tests/tools/wptrunner/wptrunner/print_pdf_runner.html
new file mode 100644
index 0000000000..fbe09bab98
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/print_pdf_runner.html
@@ -0,0 +1,33 @@
+<!doctype html>
+<title></title>
+<script src="/_pdf_js/pdf.js"></script>
+<canvas></canvas>
+<script>
+function render(pdfData) {
+ return _render(pdfData);
+}
+
+async function _render(pdfData) {
+ let loadingTask = pdfjsLib.getDocument({data: atob(pdfData)});
+ let pdf = await loadingTask.promise;
+ let rendered = [];
+ for (let pageNumber = 1; pageNumber <= pdf.numPages; pageNumber++) {
+ let page = await pdf.getPage(pageNumber);
+ const viewport = page.getViewport({ scale: 96. / 72. });
+ // Prepare canvas using PDF page dimensions
+ const canvas = document.getElementsByTagName('canvas')[0];
+ const context = canvas.getContext('2d');
+ canvas.height = viewport.height;
+ canvas.width = viewport.width;
+
+ // Render PDF page into canvas context
+ const renderContext = {
+ canvasContext: context,
+ viewport
+ };
+ await page.render(renderContext).promise;
+ rendered.push(canvas.toDataURL());
+ }
+ return rendered;
+}
+</script>
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/products.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/products.py
new file mode 100644
index 0000000000..c81396f3dd
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/products.py
@@ -0,0 +1,60 @@
+# mypy: allow-untyped-defs
+import importlib
+
+from .browsers import product_list
+
+
+def product_module(config, product):
+ if product not in product_list:
+ raise ValueError("Unknown product %s" % product)
+
+ module = importlib.import_module("wptrunner.browsers." + product)
+ if not hasattr(module, "__wptrunner__"):
+ raise ValueError("Product module does not define __wptrunner__ variable")
+
+ return module
+
+
+class Product:
+ def __init__(self, config, product):
+ module = product_module(config, product)
+ data = module.__wptrunner__
+ self.name = product
+ if isinstance(data["browser"], str):
+ self._browser_cls = {None: getattr(module, data["browser"])}
+ else:
+ self._browser_cls = {key: getattr(module, value)
+ for key, value in data["browser"].items()}
+ self.check_args = getattr(module, data["check_args"])
+ self.get_browser_kwargs = getattr(module, data["browser_kwargs"])
+ self.get_executor_kwargs = getattr(module, data["executor_kwargs"])
+ self.env_options = getattr(module, data["env_options"])()
+ self.get_env_extras = getattr(module, data["env_extras"])
+ self.run_info_extras = (getattr(module, data["run_info_extras"])
+ if "run_info_extras" in data else lambda product, **kwargs:{})
+ self.get_timeout_multiplier = getattr(module, data["timeout_multiplier"])
+
+ self.executor_classes = {}
+ for test_type, cls_name in data["executor"].items():
+ cls = getattr(module, cls_name)
+ self.executor_classes[test_type] = cls
+
+ def get_browser_cls(self, test_type):
+ if test_type in self._browser_cls:
+ return self._browser_cls[test_type]
+ return self._browser_cls[None]
+
+
+def load_product_update(config, product):
+ """Return tuple of (property_order, boolean_properties) indicating the
+ run_info properties to use when constructing the expectation data for
+ this product. None for either key indicates that the default keys
+ appropriate for distinguishing based on platform will be used."""
+
+ module = product_module(config, product)
+ data = module.__wptrunner__
+
+ update_properties = (getattr(module, data["update_properties"])()
+ if "update_properties" in data else (["product"], {}))
+
+ return update_properties
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/stability.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/stability.py
new file mode 100644
index 0000000000..029b237f98
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/stability.py
@@ -0,0 +1,417 @@
+# mypy: allow-untyped-defs
+
+import copy
+import functools
+import io
+import os
+from collections import OrderedDict, defaultdict
+from datetime import datetime
+
+from mozlog import reader
+from mozlog.formatters import JSONFormatter
+from mozlog.handlers import BaseHandler, LogLevelFilter, StreamHandler
+
+from tools.wpt.utils import load_source
+
+from . import wptrunner
+
+here = os.path.dirname(__file__)
+localpaths = load_source("localpaths", os.path.abspath(os.path.join(here, os.pardir, os.pardir, "localpaths.py"))) # type: ignore
+from ci.tc.github_checks_output import get_gh_checks_outputter # type: ignore
+from wpt.markdown import markdown_adjust, table # type: ignore
+
+# If a test takes more than (FLAKY_THRESHOLD*timeout) and does not consistently
+# time out, it is considered slow (potentially flaky).
+FLAKY_THRESHOLD = 0.8
+
+
+class LogActionFilter(BaseHandler): # type: ignore
+
+ """Handler that filters out messages not of a given set of actions.
+
+ Subclasses BaseHandler.
+
+ :param inner: Handler to use for messages that pass this filter
+ :param actions: List of actions for which to fire the handler
+ """
+
+ def __init__(self, inner, actions):
+ """Extend BaseHandler and set inner and actions props on self."""
+ BaseHandler.__init__(self, inner)
+ self.inner = inner
+ self.actions = actions
+
+ def __call__(self, item):
+ """Invoke handler if action is in list passed as constructor param."""
+ if item["action"] in self.actions:
+ return self.inner(item)
+
+
+class LogHandler(reader.LogHandler): # type: ignore
+
+ """Handle updating test and subtest status in log.
+
+ Subclasses reader.LogHandler.
+ """
+ def __init__(self):
+ self.results = OrderedDict()
+
+ def find_or_create_test(self, data):
+ test_name = data["test"]
+ if self.results.get(test_name):
+ return self.results[test_name]
+
+ test = {
+ "subtests": OrderedDict(),
+ "status": defaultdict(int),
+ "longest_duration": defaultdict(float),
+ }
+ self.results[test_name] = test
+ return test
+
+ def find_or_create_subtest(self, data):
+ test = self.find_or_create_test(data)
+ subtest_name = data["subtest"]
+
+ if test["subtests"].get(subtest_name):
+ return test["subtests"][subtest_name]
+
+ subtest = {
+ "status": defaultdict(int),
+ "messages": set()
+ }
+ test["subtests"][subtest_name] = subtest
+
+ return subtest
+
+ def test_start(self, data):
+ test = self.find_or_create_test(data)
+ test["start_time"] = data["time"]
+
+ def test_status(self, data):
+ subtest = self.find_or_create_subtest(data)
+ subtest["status"][data["status"]] += 1
+ if data.get("message"):
+ subtest["messages"].add(data["message"])
+
+ def test_end(self, data):
+ test = self.find_or_create_test(data)
+ test["status"][data["status"]] += 1
+ # Timestamps are in ms since epoch.
+ duration = data["time"] - test.pop("start_time")
+ test["longest_duration"][data["status"]] = max(
+ duration, test["longest_duration"][data["status"]])
+ try:
+ # test_timeout is in seconds; convert it to ms.
+ test["timeout"] = data["extra"]["test_timeout"] * 1000
+ except KeyError:
+ # If a test is skipped, it won't have extra info.
+ pass
+
+
+def is_inconsistent(results_dict, iterations):
+ """Return whether or not a single test is inconsistent."""
+ if 'SKIP' in results_dict:
+ return False
+ return len(results_dict) > 1 or sum(results_dict.values()) != iterations
+
+
+def find_slow_status(test):
+ """Check if a single test almost times out.
+
+ We are interested in tests that almost time out (i.e. likely to be flaky).
+ Therefore, timeout statuses are ignored, including (EXTERNAL-)TIMEOUT.
+ CRASH & ERROR are also ignored because the they override TIMEOUT; a test
+ that both crashes and times out is marked as CRASH, so it won't be flaky.
+
+ Returns:
+ A result status produced by a run that almost times out; None, if no
+ runs almost time out.
+ """
+ if "timeout" not in test:
+ return None
+ threshold = test["timeout"] * FLAKY_THRESHOLD
+ for status in ['PASS', 'FAIL', 'OK']:
+ if (status in test["longest_duration"] and
+ test["longest_duration"][status] > threshold):
+ return status
+ return None
+
+
+def process_results(log, iterations):
+ """Process test log and return overall results and list of inconsistent tests."""
+ inconsistent = []
+ slow = []
+ handler = LogHandler()
+ reader.handle_log(reader.read(log), handler)
+ results = handler.results
+ for test_name, test in results.items():
+ if is_inconsistent(test["status"], iterations):
+ inconsistent.append((test_name, None, test["status"], []))
+ for subtest_name, subtest in test["subtests"].items():
+ if is_inconsistent(subtest["status"], iterations):
+ inconsistent.append((test_name, subtest_name, subtest["status"], subtest["messages"]))
+
+ slow_status = find_slow_status(test)
+ if slow_status is not None:
+ slow.append((
+ test_name,
+ slow_status,
+ test["longest_duration"][slow_status],
+ test["timeout"]
+ ))
+
+ return results, inconsistent, slow
+
+
+def err_string(results_dict, iterations):
+ """Create and return string with errors from test run."""
+ rv = []
+ total_results = sum(results_dict.values())
+ if total_results > iterations:
+ rv.append("Duplicate subtest name")
+ else:
+ for key, value in sorted(results_dict.items()):
+ rv.append("%s%s" %
+ (key, ": %s/%s" % (value, iterations) if value != iterations else ""))
+ if total_results < iterations:
+ rv.append("MISSING: %s/%s" % (iterations - total_results, iterations))
+ rv = ", ".join(rv)
+ if is_inconsistent(results_dict, iterations):
+ rv = "**%s**" % rv
+ return rv
+
+
+def write_github_checks_summary_inconsistent(log, inconsistent, iterations):
+ """Outputs a summary of inconsistent tests for GitHub Checks."""
+ log("Some affected tests had inconsistent (flaky) results:\n")
+ write_inconsistent(log, inconsistent, iterations)
+ log("\n")
+ log("These may be pre-existing or new flakes. Please try to reproduce (see "
+ "the above WPT command, though some flags may not be needed when "
+ "running locally) and determine if your change introduced the flake. "
+ "If you are unable to reproduce the problem, please tag "
+ "`@web-platform-tests/wpt-core-team` in a comment for help.\n")
+
+
+def write_github_checks_summary_slow_tests(log, slow):
+ """Outputs a summary of slow tests for GitHub Checks."""
+ log("Some affected tests had slow results:\n")
+ write_slow_tests(log, slow)
+ log("\n")
+ log("These may be pre-existing or newly slow tests. Slow tests indicate "
+ "that a test ran very close to the test timeout limit and so may "
+ "become TIMEOUT-flaky in the future. Consider speeding up the test or "
+ "breaking it into multiple tests. For help, please tag "
+ "`@web-platform-tests/wpt-core-team` in a comment.\n")
+
+
+def write_inconsistent(log, inconsistent, iterations):
+ """Output inconsistent tests to the passed in logging function."""
+ log("## Unstable results ##\n")
+ strings = [(
+ "`%s`" % markdown_adjust(test),
+ ("`%s`" % markdown_adjust(subtest)) if subtest else "",
+ err_string(results, iterations),
+ ("`%s`" % markdown_adjust(";".join(messages))) if len(messages) else "")
+ for test, subtest, results, messages in inconsistent]
+ table(["Test", "Subtest", "Results", "Messages"], strings, log)
+
+
+def write_slow_tests(log, slow):
+ """Output slow tests to the passed in logging function."""
+ log("## Slow tests ##\n")
+ strings = [(
+ "`%s`" % markdown_adjust(test),
+ "`%s`" % status,
+ "`%.0f`" % duration,
+ "`%.0f`" % timeout)
+ for test, status, duration, timeout in slow]
+ table(["Test", "Result", "Longest duration (ms)", "Timeout (ms)"], strings, log)
+
+
+def write_results(log, results, iterations, pr_number=None, use_details=False):
+ log("## All results ##\n")
+ if use_details:
+ log("<details>\n")
+ log("<summary>%i %s ran</summary>\n\n" % (len(results),
+ "tests" if len(results) > 1
+ else "test"))
+
+ for test_name, test in results.items():
+ baseurl = "http://w3c-test.org/submissions"
+ if "https" in os.path.splitext(test_name)[0].split(".")[1:]:
+ baseurl = "https://w3c-test.org/submissions"
+ title = test_name
+ if use_details:
+ log("<details>\n")
+ if pr_number:
+ title = "<a href=\"%s/%s%s\">%s</a>" % (baseurl, pr_number, test_name, title)
+ log('<summary>%s</summary>\n\n' % title)
+ else:
+ log("### %s ###" % title)
+ strings = [("", err_string(test["status"], iterations), "")]
+
+ strings.extend(((
+ ("`%s`" % markdown_adjust(subtest_name)) if subtest else "",
+ err_string(subtest["status"], iterations),
+ ("`%s`" % markdown_adjust(';'.join(subtest["messages"]))) if len(subtest["messages"]) else "")
+ for subtest_name, subtest in test["subtests"].items()))
+ table(["Subtest", "Results", "Messages"], strings, log)
+ if use_details:
+ log("</details>\n")
+
+ if use_details:
+ log("</details>\n")
+
+
+def run_step(logger, iterations, restart_after_iteration, kwargs_extras, **kwargs):
+ kwargs = copy.deepcopy(kwargs)
+
+ if restart_after_iteration:
+ kwargs["repeat"] = iterations
+ else:
+ kwargs["rerun"] = iterations
+
+ kwargs["pause_after_test"] = False
+ kwargs.update(kwargs_extras)
+
+ def wrap_handler(x):
+ if not kwargs.get("verify_log_full", False):
+ x = LogLevelFilter(x, "WARNING")
+ x = LogActionFilter(x, ["log", "process_output"])
+ return x
+
+ initial_handlers = logger._state.handlers
+ logger._state.handlers = [wrap_handler(handler)
+ for handler in initial_handlers]
+
+ log = io.BytesIO()
+ # Setup logging for wptrunner that keeps process output and
+ # warning+ level logs only
+ logger.add_handler(StreamHandler(log, JSONFormatter()))
+
+ _, test_status = wptrunner.run_tests(**kwargs)
+
+ logger._state.handlers = initial_handlers
+ logger._state.running_tests = set()
+ logger._state.suite_started = False
+
+ log.seek(0)
+ total_iterations = test_status.repeated_runs * kwargs.get("rerun", 1)
+ all_skipped = test_status.all_skipped
+ results, inconsistent, slow = process_results(log, total_iterations)
+ return total_iterations, all_skipped, results, inconsistent, slow
+
+
+def get_steps(logger, repeat_loop, repeat_restart, kwargs_extras):
+ steps = []
+ for kwargs_extra in kwargs_extras:
+ if kwargs_extra:
+ flags_string = " with flags %s" % " ".join(
+ "%s=%s" % item for item in kwargs_extra.items())
+ else:
+ flags_string = ""
+
+ if repeat_loop:
+ desc = "Running tests in a loop %d times%s" % (repeat_loop,
+ flags_string)
+ steps.append((desc,
+ functools.partial(run_step,
+ logger,
+ repeat_loop,
+ False,
+ kwargs_extra),
+ repeat_loop))
+
+ if repeat_restart:
+ desc = "Running tests in a loop with restarts %s times%s" % (repeat_restart,
+ flags_string)
+ steps.append((desc,
+ functools.partial(run_step,
+ logger,
+ repeat_restart,
+ True,
+ kwargs_extra),
+ repeat_restart))
+
+ return steps
+
+
+def write_summary(logger, step_results, final_result):
+ for desc, result in step_results:
+ logger.info('::: %s : %s' % (desc, result))
+ logger.info(':::')
+ if final_result == "PASS":
+ log = logger.info
+ elif final_result == "TIMEOUT":
+ log = logger.warning
+ else:
+ log = logger.error
+ log('::: Test verification %s' % final_result)
+
+ logger.info(':::')
+
+
+def check_stability(logger, repeat_loop=10, repeat_restart=5, chaos_mode=True, max_time=None,
+ output_results=True, **kwargs):
+ kwargs_extras = [{}]
+ if chaos_mode and kwargs["product"] == "firefox":
+ kwargs_extras.append({"chaos_mode_flags": int("0xfb", base=16)})
+
+ steps = get_steps(logger, repeat_loop, repeat_restart, kwargs_extras)
+
+ start_time = datetime.now()
+ step_results = []
+
+ github_checks_outputter = get_gh_checks_outputter(kwargs.get("github_checks_text_file"))
+
+ for desc, step_func, expected_iterations in steps:
+ if max_time and datetime.now() - start_time > max_time:
+ logger.info("::: Test verification is taking too long: Giving up!")
+ logger.info("::: So far, all checks passed, but not all checks were run.")
+ write_summary(logger, step_results, "TIMEOUT")
+ return 2
+
+ logger.info(':::')
+ logger.info('::: Running test verification step "%s"...' % desc)
+ logger.info(':::')
+ total_iterations, all_skipped, results, inconsistent, slow = step_func(**kwargs)
+
+ logger.info(f"::: Ran {total_iterations} of expected {expected_iterations} iterations.")
+ if total_iterations <= 1 and expected_iterations > 1 and not all_skipped:
+ step_results.append((desc, "FAIL"))
+ logger.info("::: Reached iteration timeout before finishing 2 or more repeat runs.")
+ logger.info("::: At least 2 successful repeat runs are required to validate stability.")
+ write_summary(logger, step_results, "TIMEOUT")
+ return 1
+
+ if output_results:
+ write_results(logger.info, results, total_iterations)
+
+ if inconsistent:
+ step_results.append((desc, "FAIL"))
+ if github_checks_outputter:
+ write_github_checks_summary_inconsistent(github_checks_outputter.output,
+ inconsistent, total_iterations)
+ write_inconsistent(logger.info, inconsistent, total_iterations)
+ write_summary(logger, step_results, "FAIL")
+ return 1
+
+ if slow:
+ step_results.append((desc, "FAIL"))
+ if github_checks_outputter:
+ write_github_checks_summary_slow_tests(github_checks_outputter.output, slow)
+ write_slow_tests(logger.info, slow)
+ write_summary(logger, step_results, "FAIL")
+ return 1
+
+ # If the tests passed but the number of iterations didn't match the number expected to run,
+ # it is likely that the runs were stopped early to avoid a timeout.
+ if total_iterations != expected_iterations:
+ result = f"PASS * {total_iterations}/{expected_iterations} repeats completed"
+ step_results.append((desc, result))
+ else:
+ step_results.append((desc, "PASS"))
+
+ write_summary(logger, step_results, "PASS")
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/testdriver-extra.js b/testing/web-platform/tests/tools/wptrunner/wptrunner/testdriver-extra.js
new file mode 100644
index 0000000000..af25bf4111
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testdriver-extra.js
@@ -0,0 +1,330 @@
+"use strict";
+
+(function() {
+ const pending = new Map();
+
+ let result = null;
+ let ctx_cmd_id = 0;
+ let testharness_context = null;
+
+ window.addEventListener("message", function(event) {
+ const data = event.data;
+
+ if (typeof data !== "object" && data !== null) {
+ return;
+ }
+
+ if (is_test_context() && data.type === "testdriver-command") {
+ const command = data.message;
+ const ctx_id = command.cmd_id;
+ delete command.cmd_id;
+ const cmd_id = window.__wptrunner_message_queue.push(command);
+ let on_success = (data) => {
+ data.type = "testdriver-complete";
+ data.cmd_id = ctx_id;
+ event.source.postMessage(data, "*");
+ };
+ let on_failure = (data) => {
+ data.type = "testdriver-complete";
+ data.cmd_id = ctx_id;
+ event.source.postMessage(data, "*");
+ };
+ pending.set(cmd_id, [on_success, on_failure]);
+ } else if (data.type === "testdriver-complete") {
+ const cmd_id = data.cmd_id;
+ const [on_success, on_failure] = pending.get(cmd_id);
+ pending.delete(cmd_id);
+ const resolver = data.status === "success" ? on_success : on_failure;
+ resolver(data);
+ if (is_test_context()) {
+ window.__wptrunner_process_next_event();
+ }
+ }
+ });
+
+ function is_test_context() {
+ return window.__wptrunner_message_queue !== undefined;
+ }
+
+ // Code copied from /common/utils.js
+ function rand_int(bits) {
+ if (bits < 1 || bits > 53) {
+ throw new TypeError();
+ } else {
+ if (bits >= 1 && bits <= 30) {
+ return 0 | ((1 << bits) * Math.random());
+ } else {
+ var high = (0 | ((1 << (bits - 30)) * Math.random())) * (1 << 30);
+ var low = 0 | ((1 << 30) * Math.random());
+ return high + low;
+ }
+ }
+ }
+
+ function to_hex(x, length) {
+ var rv = x.toString(16);
+ while (rv.length < length) {
+ rv = "0" + rv;
+ }
+ return rv;
+ }
+
+ function get_window_id(win) {
+ if (win == window && is_test_context()) {
+ return null;
+ }
+ if (!win.__wptrunner_id) {
+ // generate a uuid
+ win.__wptrunner_id = [to_hex(rand_int(32), 8),
+ to_hex(rand_int(16), 4),
+ to_hex(0x4000 | rand_int(12), 4),
+ to_hex(0x8000 | rand_int(14), 4),
+ to_hex(rand_int(48), 12)].join("-");
+ }
+ return win.__wptrunner_id;
+ }
+
+ const get_context = function(element) {
+ if (!element) {
+ return null;
+ }
+ let elementWindow = element.ownerDocument.defaultView;
+ if (!elementWindow) {
+ throw new Error("Browsing context for element was detached");
+ }
+ return elementWindow;
+ };
+
+ const get_selector = function(element) {
+ let selector;
+
+ if (element.id) {
+ const id = element.id;
+
+ selector = "#";
+ // escape everything, because it's easy to implement
+ for (let i = 0, len = id.length; i < len; i++) {
+ selector += '\\' + id.charCodeAt(i).toString(16) + ' ';
+ }
+ } else {
+ // push and then reverse to avoid O(n) unshift in the loop
+ let segments = [];
+ for (let node = element;
+ node.parentElement;
+ node = node.parentElement) {
+ let segment = "*|" + node.localName;
+ let nth = Array.prototype.indexOf.call(node.parentElement.children, node) + 1;
+ segments.push(segment + ":nth-child(" + nth + ")");
+ }
+ segments.push(":root");
+ segments.reverse();
+
+ selector = segments.join(" > ");
+ }
+
+ return selector;
+ };
+
+ const create_action = function(name, props) {
+ let cmd_id;
+ const action_msg = {type: "action",
+ action: name,
+ ...props};
+ if (action_msg.context) {
+ action_msg.context = get_window_id(action_msg.context);
+ }
+ if (is_test_context()) {
+ cmd_id = window.__wptrunner_message_queue.push(action_msg);
+ } else {
+ if (testharness_context === null) {
+ throw new Error("Tried to run in a non-testharness window without a call to set_test_context");
+ }
+ if (action_msg.context === null) {
+ action_msg.context = get_window_id(window);
+ }
+ cmd_id = ctx_cmd_id++;
+ action_msg.cmd_id = cmd_id;
+ window.test_driver.message_test({type: "testdriver-command",
+ message: action_msg});
+ }
+ const pending_promise = new Promise(function(resolve, reject) {
+ const on_success = data => {
+ result = JSON.parse(data.message).result;
+ resolve(result);
+ };
+ const on_failure = data => {
+ reject(`${data.status}: ${data.message}`);
+ };
+ pending.set(cmd_id, [on_success, on_failure]);
+ });
+ return pending_promise;
+ };
+
+ window.test_driver_internal.in_automation = true;
+
+ window.test_driver_internal.set_test_context = function(context) {
+ if (window.__wptrunner_message_queue) {
+ throw new Error("Tried to set testharness context in a window containing testharness.js");
+ }
+ testharness_context = context;
+ };
+
+ window.test_driver_internal.click = function(element) {
+ const selector = get_selector(element);
+ const context = get_context(element);
+ return create_action("click", {selector, context});
+ };
+
+ window.test_driver_internal.delete_all_cookies = function(context=null) {
+ return create_action("delete_all_cookies", {context});
+ };
+
+ window.test_driver_internal.get_all_cookies = function(context=null) {
+ return create_action("get_all_cookies", {context});
+ };
+
+ window.test_driver_internal.get_computed_label = function(element) {
+ const selector = get_selector(element);
+ const context = get_context(element);
+ return create_action("get_computed_label", {selector, context});
+ };
+
+ window.test_driver_internal.get_computed_role = function(element) {
+ const selector = get_selector(element);
+ const context = get_context(element);
+ return create_action("get_computed_role", {selector, context});
+ };
+
+ window.test_driver_internal.get_named_cookie = function(name, context=null) {
+ return create_action("get_named_cookie", {name, context});
+ };
+
+ window.test_driver_internal.minimize_window = function(context=null) {
+ return create_action("minimize_window", {context});
+ };
+
+ window.test_driver_internal.set_window_rect = function(rect, context=null) {
+ return create_action("set_window_rect", {rect, context});
+ };
+
+ window.test_driver_internal.get_window_rect = function(context=null) {
+ return create_action("get_window_rect", {context});
+ };
+
+ window.test_driver_internal.send_keys = function(element, keys) {
+ const selector = get_selector(element);
+ const context = get_context(element);
+ return create_action("send_keys", {selector, keys, context});
+ };
+
+ window.test_driver_internal.action_sequence = function(actions, context=null) {
+ for (let actionSequence of actions) {
+ if (actionSequence.type == "pointer") {
+ for (let action of actionSequence.actions) {
+ // The origin of each action can only be an element or a string of a value "viewport" or "pointer".
+ if (action.type == "pointerMove" && typeof(action.origin) != 'string') {
+ let action_context = get_context(action.origin);
+ action.origin = {selector: get_selector(action.origin)};
+ if (context !== null && action_context !== context) {
+ throw new Error("Actions must be in a single context");
+ }
+ context = action_context;
+ }
+ }
+ }
+ }
+ return create_action("action_sequence", {actions, context});
+ };
+
+ window.test_driver_internal.generate_test_report = function(message, context=null) {
+ return create_action("generate_test_report", {message, context});
+ };
+
+ window.test_driver_internal.set_permission = function(permission_params, context=null) {
+ return create_action("set_permission", {permission_params, context});
+ };
+
+ window.test_driver_internal.add_virtual_authenticator = function(config, context=null) {
+ return create_action("add_virtual_authenticator", {config, context});
+ };
+
+ window.test_driver_internal.remove_virtual_authenticator = function(authenticator_id, context=null) {
+ return create_action("remove_virtual_authenticator", {authenticator_id, context});
+ };
+
+ window.test_driver_internal.add_credential = function(authenticator_id, credential, context=null) {
+ return create_action("add_credential", {authenticator_id, credential, context});
+ };
+
+ window.test_driver_internal.get_credentials = function(authenticator_id, context=null) {
+ return create_action("get_credentials", {authenticator_id, context});
+ };
+
+ window.test_driver_internal.remove_credential = function(authenticator_id, credential_id, context=null) {
+ return create_action("remove_credential", {authenticator_id, credential_id, context});
+ };
+
+ window.test_driver_internal.remove_all_credentials = function(authenticator_id, context=null) {
+ return create_action("remove_all_credentials", {authenticator_id, context});
+ };
+
+ window.test_driver_internal.set_user_verified = function(authenticator_id, uv, context=null) {
+ return create_action("set_user_verified", {authenticator_id, uv, context});
+ };
+
+ window.test_driver_internal.set_spc_transaction_mode = function(mode, context = null) {
+ return create_action("set_spc_transaction_mode", {mode, context});
+ };
+
+ window.test_driver_internal.set_rph_registration_mode = function(mode, context = null) {
+ return create_action("set_rph_registration_mode", {mode, context});
+ };
+
+ window.test_driver_internal.cancel_fedcm_dialog = function(context = null) {
+ return create_action("cancel_fedcm_dialog", {context});
+ };
+
+ window.test_driver_internal.click_fedcm_dialog_button = function(dialog_button, context = null) {
+ return create_action("click_fedcm_dialog_button", {dialog_button, context});
+ };
+
+ window.test_driver_internal.select_fedcm_account = function(account_index, context = null) {
+ return create_action("select_fedcm_account", {account_index, context});
+ };
+
+ window.test_driver_internal.get_fedcm_account_list = function(context = null) {
+ return create_action("get_fedcm_account_list", {context});
+ };
+
+ window.test_driver_internal.get_fedcm_dialog_title = function(context = null) {
+ return create_action("get_fedcm_dialog_title", {context});
+ };
+
+ window.test_driver_internal.get_fedcm_dialog_type = function(context = null) {
+ return create_action("get_fedcm_dialog_type", {context});
+ };
+
+ window.test_driver_internal.set_fedcm_delay_enabled = function(enabled, context = null) {
+ return create_action("set_fedcm_delay_enabled", {enabled, context});
+ };
+
+ window.test_driver_internal.reset_fedcm_cooldown = function(context = null) {
+ return create_action("reset_fedcm_cooldown", {context});
+ };
+
+ window.test_driver_internal.create_virtual_sensor = function(sensor_type, sensor_params={}, context=null) {
+ return create_action("create_virtual_sensor", {sensor_type, sensor_params, context});
+ };
+
+ window.test_driver_internal.update_virtual_sensor = function(sensor_type, reading, context=null) {
+ return create_action("update_virtual_sensor", {sensor_type, reading, context});
+ };
+
+ window.test_driver_internal.remove_virtual_sensor = function(sensor_type, context=null) {
+ return create_action("remove_virtual_sensor", {sensor_type, context});
+ };
+
+ window.test_driver_internal.get_virtual_sensor_information = function(sensor_type, context=null) {
+ return create_action("get_virtual_sensor_information", {sensor_type, context});
+ };
+})();
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/testdriver-vendor.js b/testing/web-platform/tests/tools/wptrunner/wptrunner/testdriver-vendor.js
new file mode 100644
index 0000000000..3e88403636
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testdriver-vendor.js
@@ -0,0 +1 @@
+// This file intentionally left blank
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/testharness_runner.html b/testing/web-platform/tests/tools/wptrunner/wptrunner/testharness_runner.html
new file mode 100644
index 0000000000..1cc80a270e
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testharness_runner.html
@@ -0,0 +1,6 @@
+<!doctype html>
+<title></title>
+<script>
+var timeout_multiplier = 1;
+var win = null;
+</script>
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport-content-shell.js b/testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport-content-shell.js
new file mode 100644
index 0000000000..1e24fdbcfb
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport-content-shell.js
@@ -0,0 +1,45 @@
+(function() {
+ var props = {output:%(output)d, debug: %(debug)s};
+ setup(props);
+
+ // Some tests navigate away from the original URL as part of the
+ // functionality they exercise. In that case, `add_completion_callback(...)`
+ // uses the final `window.location` to report the test ID, which may not be
+ // correct [1].
+ //
+ // Persisting the original `window.location` with standard web platform APIs
+ // (e.g., `localStorage`) could interfere with the their tests, so this must
+ // be avoided. Unfortunately, there doesn't appear to be anything in content
+ // shell's protocol mode or Blink-specific `window.testRunner` or
+ // `window.internals` [2] that could help with this. As such, the driver
+ // simply downgrades a mismatched test ID to a logged warning instead of a
+ // harness error.
+ //
+ // [1] crbug.com/1418753
+ // [2] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/docs/testing/writing_web_tests.md#Relying-on-Blink_Specific-Testing-APIs
+ const url = new URL(location.href);
+
+ testRunner.dumpAsText();
+ testRunner.waitUntilDone();
+ testRunner.setPopupBlockingEnabled(false);
+ testRunner.setDumpJavaScriptDialogs(false);
+ // Show `CONSOLE MESSAGE:` and `CONSOLE ERROR:` in stderr.
+ if (props.debug) {
+ testRunner.setDumpConsoleMessages(true);
+ }
+
+ add_completion_callback(function (tests, harness_status) {
+ const test_id = decodeURIComponent(url.pathname) + decodeURIComponent(url.search) + decodeURIComponent(url.hash);
+ const result_string = JSON.stringify([
+ test_id,
+ harness_status.status,
+ harness_status.message,
+ harness_status.stack,
+ tests.map(function(t) {
+ return [t.name, t.status, t.message, t.stack]
+ }),
+ ]);
+ testRunner.setCustomTextOutput(result_string);
+ testRunner.notifyDone();
+ });
+})();
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport-servo.js b/testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport-servo.js
new file mode 100644
index 0000000000..4a27dc27ef
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport-servo.js
@@ -0,0 +1,17 @@
+var props = {output:%(output)d, debug: %(debug)s};
+var start_loc = document.createElement('a');
+start_loc.href = location.href;
+setup(props);
+
+add_completion_callback(function (tests, harness_status) {
+ var id = decodeURIComponent(start_loc.pathname) + decodeURIComponent(start_loc.search) + decodeURIComponent(start_loc.hash);
+ console.log("ALERT: RESULT: " + JSON.stringify([
+ id,
+ harness_status.status,
+ harness_status.message,
+ harness_status.stack,
+ tests.map(function(t) {
+ return [t.name, t.status, t.message, t.stack]
+ }),
+ ]));
+});
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport-servodriver.js b/testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport-servodriver.js
new file mode 100644
index 0000000000..7819538dbb
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport-servodriver.js
@@ -0,0 +1,23 @@
+setup({output:%(output)d, debug: %(debug)s});
+
+add_completion_callback(function() {
+ add_completion_callback(function (tests, status) {
+ var subtest_results = tests.map(function(x) {
+ return [x.name, x.status, x.message, x.stack]
+ });
+ var id = location.pathname + location.search + location.hash;
+ var results = JSON.stringify([id,
+ status.status,
+ status.message,
+ status.stack,
+ subtest_results]);
+ (function done() {
+ if (window.__wd_results_callback__) {
+ clearTimeout(__wd_results_timer__);
+ __wd_results_callback__(results)
+ } else {
+ setTimeout(done, 20);
+ }
+ })()
+ })
+});
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport-wktr.js b/testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport-wktr.js
new file mode 100644
index 0000000000..b7d350a426
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport-wktr.js
@@ -0,0 +1,23 @@
+var props = {output:%(output)d, debug: %(debug)s};
+var start_loc = document.createElement('a');
+start_loc.href = location.href;
+setup(props);
+
+testRunner.dumpAsText();
+testRunner.waitUntilDone();
+
+add_completion_callback(function (tests, harness_status) {
+ var id = decodeURIComponent(start_loc.pathname) + decodeURIComponent(start_loc.search) + decodeURIComponent(start_loc.hash);
+ var result_string = JSON.stringify([
+ id,
+ harness_status.status,
+ harness_status.message,
+ harness_status.stack,
+ tests.map(function(t) {
+ return [t.name, t.status, t.message, t.stack]
+ }),
+ ]);
+
+ console.log("WPTRUNNER OUTPUT:" + result_string);
+ testRunner.notifyDone();
+});
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport.js b/testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport.js
new file mode 100644
index 0000000000..d385692445
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testharnessreport.js
@@ -0,0 +1,88 @@
+class MessageQueue {
+ constructor() {
+ this.item_id = 0;
+ this._queue = [];
+ }
+
+ push(item) {
+ let cmd_id = this.item_id++;
+ item.id = cmd_id;
+ this._queue.push(item);
+ __wptrunner_process_next_event();
+ return cmd_id;
+ }
+
+ shift() {
+ return this._queue.shift();
+ }
+}
+
+window.__wptrunner_testdriver_callback = null;
+window.__wptrunner_message_queue = new MessageQueue();
+window.__wptrunner_url = null;
+
+window.__wptrunner_process_next_event = function() {
+ /* This function handles the next testdriver event. The presence of
+ window.testdriver_callback is used as a switch; when that function
+ is present we are able to handle the next event and when is is not
+ present we must wait. Therefore to drive the event processing, this
+ function must be called in two circumstances:
+ * Every time there is a new event that we may be able to handle
+ * Every time we set the callback function
+ This function unsets the callback, so no further testdriver actions
+ will be run until it is reset, which wptrunner does after it has
+ completed handling the current action.
+ */
+
+ if (!window.__wptrunner_testdriver_callback) {
+ return;
+ }
+ var data = window.__wptrunner_message_queue.shift();
+ if (!data) {
+ return;
+ }
+
+ var payload = undefined;
+
+ switch(data.type) {
+ case "complete":
+ var tests = data.tests;
+ var status = data.status;
+
+ var subtest_results = tests.map(function(x) {
+ return [x.name, x.status, x.message, x.stack];
+ });
+ payload = [status.status,
+ status.message,
+ status.stack,
+ subtest_results];
+ clearTimeout(window.__wptrunner_timer);
+ break;
+ case "action":
+ payload = data;
+ break;
+ default:
+ return;
+ }
+ var callback = window.__wptrunner_testdriver_callback;
+ window.__wptrunner_testdriver_callback = null;
+ callback([__wptrunner_url, data.type, payload]);
+};
+
+(function() {
+ var props = {output: %(output)d,
+ timeout_multiplier: %(timeout_multiplier)s,
+ explicit_timeout: %(explicit_timeout)s,
+ debug: %(debug)s,
+ message_events: ["completion"]};
+
+ add_completion_callback(function(tests, harness_status) {
+ __wptrunner_message_queue.push({
+ "type": "complete",
+ "tests": tests,
+ "status": harness_status});
+ __wptrunner_process_next_event();
+ });
+ setup(props);
+})();
+
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/testloader.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/testloader.py
new file mode 100644
index 0000000000..aa266548d7
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testloader.py
@@ -0,0 +1,708 @@
+# mypy: allow-untyped-calls, allow-untyped-defs
+from __future__ import annotations
+
+import abc
+import hashlib
+import itertools
+import json
+import os
+import queue
+from urllib.parse import urlsplit
+from abc import ABCMeta, abstractmethod
+from collections import defaultdict, deque, namedtuple
+from typing import (cast, Any, Callable, Dict, Deque, List, Mapping, MutableMapping, Optional, Set,
+ Tuple, Type)
+
+from . import manifestinclude
+from . import manifestexpected
+from . import manifestupdate
+from . import wpttest
+from mozlog import structured
+
+manifest = None
+manifest_update = None
+download_from_github = None
+
+# Mapping from (subsuite, test_type) to [Test]
+TestsByType = Mapping[Tuple[str, str], List[wpttest.Test]]
+
+
+def do_delayed_imports():
+ # This relies on an already loaded module having set the sys.path correctly :(
+ global manifest, manifest_update, download_from_github
+ from manifest import manifest # type: ignore
+ from manifest import update as manifest_update
+ from manifest.download import download_from_github # type: ignore
+
+
+class WriteQueue:
+ def __init__(self, queue_cls: Callable[[], Any] = queue.SimpleQueue):
+ """Queue wrapper that is only used for writing items to a queue.
+
+ Once all items are enqueued, call to_read() to get a reader for the queue.
+ This will also prevent further writes using this writer."""
+ self._raw_queue = queue_cls()
+
+ def put(self, item: Any) -> None:
+ if self._raw_queue is None:
+ raise ValueError("Tried to write to closed queue")
+ self._raw_queue.put(item)
+
+ def to_read(self) -> ReadQueue:
+ reader = ReadQueue(self._raw_queue)
+ self._raw_queue = None
+ return reader
+
+
+class ReadQueue:
+ def __init__(self, raw_queue: Any):
+ self._raw_queue = raw_queue
+
+ def get(self) -> Any:
+ """Queue wrapper that is only used for reading items from a queue."""
+ return self._raw_queue.get(False)
+
+
+
+class TestGroups:
+ def __init__(self, logger, path, subsuites):
+ try:
+ with open(path) as f:
+ data = json.load(f)
+ except ValueError:
+ logger.critical("test groups file %s not valid json" % path)
+ raise
+
+ self.tests_by_group = defaultdict(set)
+ self.group_by_test = {}
+ for group, test_ids in data.items():
+ id_parts = group.split(":", 1)
+ if len(id_parts) == 1:
+ group_name = id_parts[0]
+ subsuite = ""
+ else:
+ subsuite, group_name = id_parts
+ if subsuite not in subsuites:
+ raise ValueError(f"Unknown subsuite {subsuite} in group data {group}")
+ for test_id in test_ids:
+ self.group_by_test[(subsuite, test_id)] = group_name
+ self.tests_by_group[group_name].add(test_id)
+
+
+def load_subsuites(logger: Any,
+ base_run_info: wpttest.RunInfo,
+ path: Optional[str],
+ include_subsuites: Set[str]) -> Dict[str, Subsuite]:
+ subsuites: Dict[str, Subsuite] = {}
+ run_all_subsuites = not include_subsuites
+ include_subsuites.add("")
+
+ def maybe_add_subsuite(name: str, data: Dict[str, Any]) -> None:
+ if run_all_subsuites or name in include_subsuites:
+ subsuites[name] = Subsuite(name,
+ data.get("config", {}),
+ base_run_info,
+ run_info_extras=data.get("run_info", {}),
+ include=data.get("include"),
+ tags=set(data["tags"]) if "tags" in data else None)
+ if name in include_subsuites:
+ include_subsuites.remove(name)
+
+ maybe_add_subsuite("", {})
+
+ if path is None:
+ if include_subsuites:
+ raise ValueError("Unrecognised subsuites {','.join(include_subsuites)}, missing --subsuite-file?")
+ return subsuites
+
+ try:
+ with open(path) as f:
+ data = json.load(f)
+ except ValueError:
+ logger.critical("subsuites file %s not valid json" % path)
+ raise
+
+ for key, subsuite in data.items():
+ if key == "":
+ raise ValueError("Subsuites must have a non-empty name")
+ maybe_add_subsuite(key, subsuite)
+
+ if include_subsuites:
+ raise ValueError(f"Unrecognised subsuites {','.join(include_subsuites)}")
+
+ return subsuites
+
+
+class Subsuite:
+ def __init__(self,
+ name: str,
+ config: Dict[str, Any],
+ base_run_info: Optional[wpttest.RunInfo] = None,
+ run_info_extras: Optional[Dict[str, Any]] = None,
+ include: Optional[List[str]] = None,
+ tags: Optional[Set[str]] = None):
+ self.name = name
+ self.config = config
+ self.run_info_extras = run_info_extras or {}
+ self.run_info_extras["subsuite"] = name
+ self.include = include
+ self.tags = tags
+
+ run_info = base_run_info.copy() if base_run_info is not None else {}
+ run_info.update(self.run_info_extras)
+ self.run_info = run_info
+
+ def manifest_filters(self, manifests):
+ if self.name:
+ manifest_filters = [TestFilter(manifests,
+ include=self.include,
+ explicit=True)]
+ return manifest_filters
+
+ # use base manifest_filters for default subsuite
+ return []
+
+ def __repr__(self):
+ return "Subsuite('%s', config:%s, run_info:%s)" % (self.name or 'default',
+ str(self.config),
+ str(self.run_info))
+
+
+def read_include_from_file(file):
+ new_include = []
+ with open(file) as f:
+ for line in f:
+ line = line.strip()
+ # Allow whole-line comments;
+ # fragments mean we can't have partial line #-based comments
+ if len(line) > 0 and not line.startswith("#"):
+ new_include.append(line)
+ return new_include
+
+
+def update_include_for_groups(test_groups, include):
+ new_include = []
+ if include is None:
+ # We're just running everything
+ for tests in test_groups.tests_by_group.values():
+ new_include.extend(tests)
+ else:
+ for item in include:
+ if item in test_groups.tests_by_group:
+ new_include.extend(test_groups.tests_by_group[item])
+ else:
+ new_include.append(item)
+ return new_include
+
+
+class TestChunker(abc.ABC):
+ def __init__(self, total_chunks: int, chunk_number: int, **kwargs: Any):
+ self.total_chunks = total_chunks
+ self.chunk_number = chunk_number
+ assert self.chunk_number <= self.total_chunks
+ self.logger = structured.get_default_logger()
+ assert self.logger
+ self.kwargs = kwargs
+
+ @abstractmethod
+ def __call__(self, manifest):
+ ...
+
+
+class Unchunked(TestChunker):
+ def __init__(self, *args, **kwargs):
+ TestChunker.__init__(self, *args, **kwargs)
+ assert self.total_chunks == 1
+
+ def __call__(self, manifest, **kwargs):
+ yield from manifest
+
+
+class HashChunker(TestChunker):
+ def __call__(self, manifest):
+ for test_type, test_path, tests in manifest:
+ tests_for_chunk = {
+ test for test in tests
+ if self._key_in_chunk(self.chunk_key(test_type, test_path, test))
+ }
+ if tests_for_chunk:
+ yield test_type, test_path, tests_for_chunk
+
+ def _key_in_chunk(self, key: str) -> bool:
+ chunk_index = self.chunk_number - 1
+ digest = hashlib.md5(key.encode()).hexdigest()
+ return int(digest, 16) % self.total_chunks == chunk_index
+
+ @abstractmethod
+ def chunk_key(self, test_type: str, test_path: str,
+ test: wpttest.Test) -> str:
+ ...
+
+
+class PathHashChunker(HashChunker):
+ def chunk_key(self, test_type: str, test_path: str,
+ test: wpttest.Test) -> str:
+ return test_path
+
+
+class IDHashChunker(HashChunker):
+ def chunk_key(self, test_type: str, test_path: str,
+ test: wpttest.Test) -> str:
+ return cast(str, test.id)
+
+
+class DirectoryHashChunker(HashChunker):
+ """Like HashChunker except the directory is hashed.
+
+ This ensures that all tests in the same directory end up in the same
+ chunk.
+ """
+ def chunk_key(self, test_type: str, test_path: str,
+ test: wpttest.Test) -> str:
+ depth = self.kwargs.get("depth")
+ if depth:
+ return os.path.sep.join(os.path.dirname(test_path).split(os.path.sep, depth)[:depth])
+ else:
+ return os.path.dirname(test_path)
+
+
+class TestFilter:
+ """Callable that restricts the set of tests in a given manifest according
+ to initial criteria"""
+ def __init__(self, test_manifests, include=None, exclude=None, manifest_path=None, explicit=False):
+ if manifest_path is None or include or explicit:
+ self.manifest = manifestinclude.IncludeManifest.create()
+ self.manifest.set_defaults()
+ else:
+ self.manifest = manifestinclude.get_manifest(manifest_path)
+
+ if include or explicit:
+ self.manifest.set("skip", "true")
+
+ if include:
+ for item in include:
+ self.manifest.add_include(test_manifests, item)
+
+ if exclude:
+ for item in exclude:
+ self.manifest.add_exclude(test_manifests, item)
+
+ def __call__(self, manifest_iter):
+ for test_type, test_path, tests in manifest_iter:
+ include_tests = set()
+ for test in tests:
+ if self.manifest.include(test):
+ include_tests.add(test)
+
+ if include_tests:
+ yield test_type, test_path, include_tests
+
+
+class TagFilter:
+ def __init__(self, include_tags, exclude_tags):
+ self.include_tags = set(include_tags) if include_tags else None
+ self.exclude_tags = set(exclude_tags) if exclude_tags else None
+
+ def __call__(self, test):
+ does_match = True
+ if self.include_tags:
+ does_match &= bool(test.tags & self.include_tags)
+ if self.exclude_tags:
+ does_match &= not (test.tags & self.exclude_tags)
+ return does_match
+
+
+class ManifestLoader:
+ def __init__(self, test_paths, force_manifest_update=False, manifest_download=False,
+ types=None):
+ do_delayed_imports()
+ self.test_paths = test_paths
+ self.force_manifest_update = force_manifest_update
+ self.manifest_download = manifest_download
+ self.types = types
+ self.logger = structured.get_default_logger()
+ if self.logger is None:
+ self.logger = structured.structuredlog.StructuredLogger("ManifestLoader")
+
+ def load(self):
+ rv = {}
+ for url_base, test_root in self.test_paths.items():
+ manifest_file = self.load_manifest(url_base, test_root)
+ path_data = {"url_base": url_base,
+ "tests_path": test_root.tests_path,
+ "metadata_path": test_root.metadata_path,
+ "manifest_path": test_root.manifest_path}
+ rv[manifest_file] = path_data
+ return rv
+
+ def load_manifest(self, url_base, test_root):
+ cache_root = os.path.join(test_root.metadata_path, ".cache")
+ if self.manifest_download:
+ download_from_github(test_root.manifest_path, test_root.tests_path)
+ return manifest.load_and_update(test_root.tests_path, test_root.manifest_path, url_base,
+ cache_root=cache_root, update=self.force_manifest_update,
+ types=self.types)
+
+
+def iterfilter(filters, iter):
+ for f in filters:
+ iter = f(iter)
+ yield from iter
+
+
+class TestLoader:
+ """Loads tests according to a WPT manifest and any associated expectation files"""
+ def __init__(self,
+ test_manifests,
+ test_types,
+ base_run_info,
+ subsuites=None,
+ manifest_filters=None,
+ test_filters=None,
+ chunk_type="none",
+ total_chunks=1,
+ chunk_number=1,
+ include_https=True,
+ include_h2=True,
+ include_webtransport_h3=False,
+ skip_timeout=False,
+ skip_crash=False,
+ skip_implementation_status=None,
+ chunker_kwargs=None):
+
+ self.test_types = test_types
+ self.base_run_info = base_run_info
+ self.subsuites = subsuites or {}
+
+ self.manifest_filters = manifest_filters if manifest_filters is not None else []
+ self.test_filters = test_filters if test_filters is not None else []
+
+ self.manifests = test_manifests
+ self.tests = None
+ self.disabled_tests = None
+ self.include_https = include_https
+ self.include_h2 = include_h2
+ self.include_webtransport_h3 = include_webtransport_h3
+ self.skip_timeout = skip_timeout
+ self.skip_crash = skip_crash
+ self.skip_implementation_status = skip_implementation_status
+
+ self.chunk_type = chunk_type
+ self.total_chunks = total_chunks
+ self.chunk_number = chunk_number
+
+ if chunker_kwargs is None:
+ chunker_kwargs = {}
+ self.chunker = {"none": Unchunked,
+ "hash": PathHashChunker,
+ "id_hash": IDHashChunker,
+ "dir_hash": DirectoryHashChunker}[chunk_type](total_chunks,
+ chunk_number,
+ **chunker_kwargs)
+
+ self._test_ids = None
+
+ self.directory_manifests = {}
+ self._load_tests()
+
+ @property
+ def test_ids(self):
+ if self._test_ids is None:
+ self._test_ids = []
+ for test_dict in [self.disabled_tests, self.tests]:
+ for subsuite in self.subsuites:
+ for test_type in self.test_types:
+ self._test_ids += [item.id for item in test_dict[subsuite][test_type]]
+ return self._test_ids
+
+ def get_test(self, manifest_file, manifest_test, inherit_metadata, test_metadata):
+ if test_metadata is not None:
+ inherit_metadata.append(test_metadata)
+ test_metadata = test_metadata.get_test(manifestupdate.get_test_name(manifest_test.id))
+
+ return wpttest.from_manifest(manifest_file, manifest_test, inherit_metadata, test_metadata)
+
+ def load_dir_metadata(self, run_info, test_manifest, metadata_path, test_path):
+ rv = []
+ path_parts = os.path.dirname(test_path).split(os.path.sep)
+ for i in range(len(path_parts) + 1):
+ path = os.path.join(metadata_path, os.path.sep.join(path_parts[:i]), "__dir__.ini")
+ if path not in self.directory_manifests:
+ self.directory_manifests[path] = manifestexpected.get_dir_manifest(path,
+ run_info)
+ manifest = self.directory_manifests[path]
+ if manifest is not None:
+ rv.append(manifest)
+ return rv
+
+ def load_metadata(self, run_info, test_manifest, metadata_path, test_path):
+ inherit_metadata = self.load_dir_metadata(run_info, test_manifest, metadata_path, test_path)
+ test_metadata = manifestexpected.get_manifest(
+ metadata_path, test_path, run_info)
+ return inherit_metadata, test_metadata
+
+ def iter_tests(self, run_info, manifest_filters):
+ manifest_items = []
+ manifests_by_url_base = {}
+
+ for manifest in sorted(self.manifests.keys(), key=lambda x:x.url_base):
+ manifest_iter = iterfilter(manifest_filters,
+ manifest.itertypes(*self.test_types))
+ manifest_items.extend(manifest_iter)
+ manifests_by_url_base[manifest.url_base] = manifest
+
+ if self.chunker is not None:
+ manifest_items = self.chunker(manifest_items)
+
+ for test_type, test_path, tests in manifest_items:
+ manifest_file = manifests_by_url_base[next(iter(tests)).url_base]
+ metadata_path = self.manifests[manifest_file]["metadata_path"]
+
+ inherit_metadata, test_metadata = self.load_metadata(run_info, manifest_file, metadata_path, test_path)
+ for test in tests:
+ wpt_test = self.get_test(manifest_file, test, inherit_metadata, test_metadata)
+ if all(f(wpt_test) for f in self.test_filters):
+ yield test_path, test_type, wpt_test
+
+ def _load_tests(self):
+ """Read in the tests from the manifest file"""
+ tests_enabled = {}
+ tests_disabled = {}
+
+ for subsuite_name, subsuite in self.subsuites.items():
+ tests_enabled[subsuite_name] = defaultdict(list)
+ tests_disabled[subsuite_name] = defaultdict(list)
+ run_info = subsuite.run_info
+ if not subsuite_name:
+ manifest_filters = self.manifest_filters
+ else:
+ manifest_filters = subsuite.manifest_filters(self.manifests)
+ for test_path, test_type, test in self.iter_tests(run_info, manifest_filters):
+ enabled = not test.disabled()
+ if not self.include_https and test.environment["protocol"] == "https":
+ enabled = False
+ if not self.include_h2 and test.environment["protocol"] == "h2":
+ enabled = False
+ if self.skip_timeout and test.expected() == "TIMEOUT":
+ enabled = False
+ if self.skip_crash and test.expected() == "CRASH":
+ enabled = False
+ if self.skip_implementation_status and test.implementation_status() in self.skip_implementation_status:
+ # for backlog, we want to run timeout/crash:
+ if not (test.implementation_status() == "implementing" and test.expected() in ["TIMEOUT", "CRASH"]):
+ enabled = False
+ target = tests_enabled if enabled else tests_disabled
+ target[subsuite_name][test_type].append(test)
+
+ self.tests = tests_enabled
+ self.disabled_tests = tests_disabled
+
+ def groups(self, test_types, chunk_type="none", total_chunks=1, chunk_number=1):
+ groups = set()
+
+ for test_type in test_types:
+ for test in self.tests[test_type]:
+ group = test.url.split("/")[1]
+ groups.add(group)
+
+ return groups
+
+
+
+def get_test_queue_builder(**kwargs: Any) -> Tuple[TestQueueBuilder, Mapping[str, Any]]:
+ builder_kwargs = {"processes": kwargs["processes"],
+ "logger": kwargs["logger"]}
+ chunker_kwargs = {}
+ builder_cls: Type[TestQueueBuilder]
+ if kwargs["fully_parallel"]:
+ builder_cls = FullyParallelGroupedSource
+ elif kwargs["run_by_dir"] is not False:
+ # A value of None indicates infinite depth
+ builder_cls = PathGroupedSource
+ builder_kwargs["depth"] = kwargs["run_by_dir"]
+ chunker_kwargs["depth"] = kwargs["run_by_dir"]
+ elif kwargs["test_groups"]:
+ builder_cls = GroupFileTestSource
+ builder_kwargs["test_groups"] = kwargs["test_groups"]
+ else:
+ builder_cls = SingleTestSource
+ return builder_cls(**builder_kwargs), chunker_kwargs
+
+
+TestGroup = namedtuple("TestGroup", ["group", "subsuite", "test_type", "metadata"])
+
+
+class TestQueueBuilder:
+ __metaclass__ = ABCMeta
+
+ def __init__(self, **kwargs: Any):
+ """Class for building a queue of groups of tests to run.
+
+ Each item in the queue is a TestGroup, which consists of an iterable of
+ tests to run, the name of the subsuite, the name of the test type, and
+ a dictionary containing group-specific metadata.
+
+ Tests in the same group are run in the same TestRunner in the
+ provided order."""
+ self.kwargs = kwargs
+
+ def make_queue(self, tests_by_type: TestsByType) -> Tuple[ReadQueue, int]:
+ test_queue = WriteQueue()
+ groups = self.make_groups(tests_by_type)
+ processes = self.process_count(self.kwargs["processes"], len(groups))
+ if processes > 1:
+ groups.sort(key=lambda group: (
+ # Place groups of the same test type together to minimize
+ # browser restarts.
+ group.test_type,
+ # Next, run larger groups first to avoid straggler runners. Use
+ # timeout to give slow tests greater relative weight.
+ -sum(test.timeout for test in group.group),
+ ))
+ for item in groups:
+ test_queue.put(item)
+
+ return test_queue.to_read(), processes
+
+ @abstractmethod
+ def make_groups(self, tests_by_type: TestsByType) -> List[TestGroup]:
+ """Divide a given set of tests into groups that will be run together."""
+ pass
+
+ @abstractmethod
+ def tests_by_group(self, tests_by_type: TestsByType) -> Mapping[str, List[str]]:
+ pass
+
+ def group_metadata(self, state: Mapping[str, Any]) -> Mapping[str, Any]:
+ return {"scope": "/"}
+
+ def process_count(self, requested_processes: int, num_test_groups: int) -> int:
+ """Get the number of processes to use.
+
+ This must always be at least one, but otherwise not more than the number of test groups"""
+ return max(1, min(requested_processes, num_test_groups))
+
+
+class SingleTestSource(TestQueueBuilder):
+ def make_groups(self, tests_by_type: TestsByType) -> List[TestGroup]:
+ groups = []
+ for (subsuite, test_type), tests in tests_by_type.items():
+ processes = self.kwargs["processes"]
+ queues: List[Deque[TestGroup]] = [deque([]) for _ in range(processes)]
+ metadatas = [self.group_metadata({}) for _ in range(processes)]
+ for test in tests:
+ idx = hash(test.id) % processes
+ group = queues[idx]
+ metadata = metadatas[idx]
+ group.append(test)
+ test.update_metadata(metadata)
+
+ for item in zip(queues,
+ itertools.repeat(subsuite),
+ itertools.repeat(test_type),
+ metadatas):
+ if len(item[0]) > 0:
+ groups.append(TestGroup(*item))
+ return groups
+
+ def tests_by_group(self, tests_by_type: TestsByType) -> Mapping[str, List[str]]:
+ groups: MutableMapping[str, List[str]] = defaultdict(list)
+ for (subsuite, test_type), tests in tests_by_type.items():
+ group_name = f"{subsuite}:{self.group_metadata({})['scope']}"
+ groups[group_name].extend(test.id for test in tests)
+ return groups
+
+
+class PathGroupedSource(TestQueueBuilder):
+ def new_group(self,
+ state: MutableMapping[str, Any],
+ subsuite: str,
+ test_type: str,
+ test: wpttest.Test) -> bool:
+ depth = self.kwargs.get("depth")
+ if depth is True or depth == 0:
+ depth = None
+ path = urlsplit(test.url).path.split("/")[1:-1][:depth]
+ rv = (subsuite, test_type, path) != state.get("prev_group_key")
+ state["prev_group_key"] = (subsuite, test_type, path)
+ return rv
+
+ def make_groups(self, tests_by_type: TestsByType) -> List[TestGroup]:
+ groups = []
+ state: MutableMapping[str, Any] = {}
+ for (subsuite, test_type), tests in tests_by_type.items():
+ for test in tests:
+ if self.new_group(state, subsuite, test_type, test):
+ group_metadata = self.group_metadata(state)
+ groups.append(TestGroup(deque(), subsuite, test_type, group_metadata))
+ group, _, _, metadata = groups[-1]
+ group.append(test)
+ test.update_metadata(metadata)
+ return groups
+
+ def tests_by_group(self, tests_by_type: TestsByType) -> Mapping[str, List[str]]:
+ groups = defaultdict(list)
+ state: MutableMapping[str, Any] = {}
+ for (subsuite, test_type), tests in tests_by_type.items():
+ for test in tests:
+ if self.new_group(state, subsuite, test_type, test):
+ group = self.group_metadata(state)['scope']
+ if subsuite:
+ group_name = f"{subsuite}:{group}"
+ else:
+ group_name = group
+ groups[group_name].append(test.id)
+ return groups
+
+ def group_metadata(self, state: Mapping[str, Any]) -> Mapping[str, Any]:
+ return {"scope": "/%s" % "/".join(state["prev_group_key"][2])}
+
+
+class FullyParallelGroupedSource(PathGroupedSource):
+ # Chuck every test into a different group, so that they can run
+ # fully parallel with each other. Useful to run a lot of tests
+ # clustered in a few directories.
+ def new_group(self,
+ state: MutableMapping[str, Any],
+ subsuite: str,
+ test_type: str,
+ test: wpttest.Test) -> bool:
+ path = urlsplit(test.url).path.split("/")[1:-1]
+ state["prev_group_key"] = (subsuite, test_type, path)
+ return True
+
+
+class GroupFileTestSource(TestQueueBuilder):
+ def make_groups(self, tests_by_type: TestsByType) -> List[TestGroup]:
+ groups = []
+ for (subsuite, test_type), tests in tests_by_type.items():
+ tests_by_group = self.tests_by_group({(subsuite, test_type): tests})
+ ids_to_tests = {test.id: test for test in tests}
+ for group_name, test_ids in tests_by_group.items():
+ group_metadata = {"scope": group_name}
+ group: Deque[wpttest.Test] = deque()
+ for test_id in test_ids:
+ test = ids_to_tests[test_id]
+ group.append(test)
+ test.update_metadata(group_metadata)
+ groups.append(TestGroup(group, subsuite, test_type, group_metadata))
+ return groups
+
+ def tests_by_group(self, tests_by_type: TestsByType) -> Mapping[str, List[str]]:
+ test_groups = self.kwargs["test_groups"]
+
+ tests_by_group = defaultdict(list)
+ for (subsuite, test_type), tests in tests_by_type.items():
+ for test in tests:
+ try:
+ group = test_groups.group_by_test[(subsuite, test.id)]
+ except KeyError:
+ print(f"{test.id} is missing from test groups file")
+ raise
+ if subsuite:
+ group_name = f"{subsuite}:{group}"
+ else:
+ group_name = group
+ tests_by_group[group_name].append(test.id)
+
+ return tests_by_group
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/testrunner.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/testrunner.py
new file mode 100644
index 0000000000..28d06f88ee
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testrunner.py
@@ -0,0 +1,1049 @@
+# mypy: allow-untyped-defs
+
+import threading
+import time
+import traceback
+from queue import Empty
+from collections import namedtuple, defaultdict
+from typing import Any, Mapping, Optional
+
+from mozlog import structuredlog, capture
+
+from . import mpcontext, testloader
+
+# Special value used as a sentinal in various commands
+Stop = object()
+
+
+def release_mozlog_lock():
+ try:
+ from mozlog.structuredlog import StructuredLogger
+ try:
+ StructuredLogger._lock.release()
+ except threading.ThreadError:
+ pass
+ except ImportError:
+ pass
+
+
+TestImplementation = namedtuple('TestImplementation',
+ ['executor_cls', 'executor_kwargs',
+ 'browser_cls', 'browser_kwargs'])
+
+
+class LogMessageHandler:
+ def __init__(self, send_message):
+ self.send_message = send_message
+
+ def __call__(self, data):
+ self.send_message("log", data)
+
+
+class TestRunner:
+ """Class implementing the main loop for running tests.
+
+ This class delegates the job of actually running a test to the executor
+ that is passed in.
+
+ :param logger: Structured logger
+ :param command_queue: multiprocessing.Queue used to send commands to the
+ process
+ :param result_queue: multiprocessing.Queue used to send results to the
+ parent TestRunnerManager process
+ :param executor: TestExecutor object that will actually run a test.
+ """
+ def __init__(self, logger, command_queue, result_queue, executor, recording):
+ self.command_queue = command_queue
+ self.result_queue = result_queue
+
+ self.executor = executor
+ self.name = mpcontext.get_context().current_process().name
+ self.logger = logger
+ self.recording = recording
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.teardown()
+
+ def setup(self):
+ self.logger.debug("Executor setup")
+ try:
+ self.executor.setup(self)
+ except Exception:
+ # The caller is responsible for logging the exception if required
+ self.send_message("init_failed")
+ else:
+ self.send_message("init_succeeded")
+ self.logger.debug("Executor setup done")
+
+ def teardown(self):
+ self.executor.teardown()
+ self.send_message("runner_teardown")
+ self.result_queue = None
+ self.command_queue = None
+ self.browser = None
+
+ def run(self):
+ """Main loop accepting commands over the pipe and triggering
+ the associated methods"""
+ try:
+ self.setup()
+ except Exception:
+ self.logger.warning("An error occured during executor setup:\n%s" %
+ traceback.format_exc())
+ raise
+ commands = {"run_test": self.run_test,
+ "reset": self.reset,
+ "stop": self.stop,
+ "wait": self.wait}
+ while True:
+ command, args = self.command_queue.get()
+ try:
+ rv = commands[command](*args)
+ except Exception:
+ self.send_message("error",
+ "Error running command %s with arguments %r:\n%s" %
+ (command, args, traceback.format_exc()))
+ else:
+ if rv is Stop:
+ break
+
+ def stop(self):
+ return Stop
+
+ def reset(self):
+ self.executor.reset()
+
+ def run_test(self, test):
+ try:
+ return self.executor.run_test(test)
+ except Exception:
+ message = "TestRunner.run_test caught an exception:\n"
+ message += traceback.format_exc()
+ self.logger.error(message)
+ raise
+
+ def wait(self):
+ rerun = self.executor.wait()
+ self.send_message("wait_finished", rerun)
+
+ def send_message(self, command, *args):
+ self.result_queue.put((command, args))
+
+
+def start_runner(runner_command_queue, runner_result_queue,
+ executor_cls, executor_kwargs,
+ executor_browser_cls, executor_browser_kwargs,
+ capture_stdio, stop_flag, recording):
+ """Launch a TestRunner in a new process"""
+
+ def send_message(command, *args):
+ runner_result_queue.put((command, args))
+
+ def handle_error(e):
+ logger.critical(traceback.format_exc())
+ stop_flag.set()
+
+ # Ensure that when we start this in a new process we have the global lock
+ # in the logging module unlocked
+ release_mozlog_lock()
+
+ proc_name = mpcontext.get_context().current_process().name
+ logger = structuredlog.StructuredLogger(proc_name)
+ logger.add_handler(LogMessageHandler(send_message))
+
+ with capture.CaptureIO(logger, capture_stdio):
+ try:
+ browser = executor_browser_cls(**executor_browser_kwargs)
+ executor = executor_cls(logger, browser, **executor_kwargs)
+ with TestRunner(logger, runner_command_queue, runner_result_queue, executor, recording) as runner:
+ try:
+ runner.run()
+ except KeyboardInterrupt:
+ stop_flag.set()
+ except Exception as e:
+ handle_error(e)
+ except Exception as e:
+ handle_error(e)
+
+
+class BrowserManager:
+ def __init__(self, logger, browser, command_queue, no_timeout=False):
+ self.logger = logger
+ self.browser = browser
+ self.no_timeout = no_timeout
+ self.browser_settings = None
+ self.last_test = None
+
+ self.started = False
+
+ self.browser_pid = None
+ self.init_timer = None
+ self.command_queue = command_queue
+
+ def update_settings(self, test):
+ browser_settings = self.browser.settings(test)
+ restart_required = ((self.browser_settings is not None and
+ self.browser_settings != browser_settings) or
+ (self.last_test != test and test.expected() == "CRASH"))
+ self.browser_settings = browser_settings
+ self.last_test = test
+ return restart_required
+
+ def init(self, group_metadata):
+ """Launch the browser that is being tested,
+ and the TestRunner process that will run the tests."""
+ # It seems that this lock is helpful to prevent some race that otherwise
+ # sometimes stops the spawned processes initialising correctly, and
+ # leaves this thread hung
+ if self.init_timer is not None:
+ self.init_timer.cancel()
+
+ self.logger.debug("Init called, starting browser and runner")
+
+ if not self.no_timeout:
+ self.init_timer = threading.Timer(self.browser.init_timeout,
+ self.init_timeout)
+ try:
+ if self.init_timer is not None:
+ self.init_timer.start()
+ self.logger.debug("Starting browser with settings %r" % self.browser_settings)
+ self.browser.start(group_metadata=group_metadata, **self.browser_settings)
+ self.browser_pid = self.browser.pid
+ except Exception:
+ self.logger.error(f"Failure during init:\n{traceback.format_exc()}")
+ if self.init_timer is not None:
+ self.init_timer.cancel()
+ succeeded = False
+ else:
+ succeeded = True
+ self.started = True
+
+ return succeeded
+
+ def send_message(self, command, *args):
+ self.command_queue.put((command, args))
+
+ def init_timeout(self):
+ # This is called from a separate thread, so we send a message to the
+ # main loop so we get back onto the manager thread
+ self.logger.debug("init_failed called from timer")
+ self.send_message("init_failed")
+
+ def after_init(self):
+ """Callback when we have started the browser, started the remote
+ control connection, and we are ready to start testing."""
+ if self.init_timer is not None:
+ self.init_timer.cancel()
+
+ def stop(self, force=False):
+ self.browser.stop(force=force)
+ self.started = False
+
+ def cleanup(self):
+ if self.init_timer is not None:
+ self.init_timer.cancel()
+
+ def check_crash(self, test_id):
+ return self.browser.check_crash(process=self.browser_pid, test=test_id)
+
+ def is_alive(self):
+ return self.browser.is_alive()
+
+
+class TestSource:
+ def __init__(self, logger: structuredlog.StructuredLogger, test_queue: testloader.ReadQueue):
+ self.logger = logger
+ self.test_queue = test_queue
+ self.current_group = testloader.TestGroup(None, None, None, None)
+
+ def group(self) -> testloader.TestGroup:
+ if not self.current_group.group or len(self.current_group.group) == 0:
+ try:
+ self.current_group = self.test_queue.get()
+ self.logger.debug(f"Got new test group subsuite:{self.current_group[1]} "
+ f"test_type:{self.current_group[2]}")
+ except Empty:
+ return testloader.TestGroup(None, None, None, None)
+ return self.current_group
+
+
+class _RunnerManagerState:
+ before_init = namedtuple("before_init", [])
+ initializing = namedtuple("initializing",
+ ["subsuite", "test_type", "test", "test_group",
+ "group_metadata", "failure_count"])
+ running = namedtuple("running", ["subsuite", "test_type", "test", "test_group", "group_metadata"])
+ restarting = namedtuple("restarting", ["subsuite", "test_type", "test", "test_group",
+ "group_metadata", "force_stop"])
+ error = namedtuple("error", [])
+ stop = namedtuple("stop", ["force_stop"])
+
+
+RunnerManagerState = _RunnerManagerState()
+
+
+class TestRunnerManager(threading.Thread):
+ def __init__(self, suite_name, index, test_queue,
+ test_implementations, stop_flag, retry_index=0, rerun=1,
+ pause_after_test=False, pause_on_unexpected=False,
+ restart_on_unexpected=True, debug_info=None,
+ capture_stdio=True, restart_on_new_group=True, recording=None, max_restarts=5):
+ """Thread that owns a single TestRunner process and any processes required
+ by the TestRunner (e.g. the Firefox binary).
+
+ TestRunnerManagers are responsible for launching the browser process and the
+ runner process, and for logging the test progress. The actual test running
+ is done by the TestRunner. In particular they:
+
+ * Start the binary of the program under test
+ * Start the TestRunner
+ * Tell the TestRunner to start a test, if any
+ * Log that the test started
+ * Log the test results
+ * Take any remedial action required e.g. restart crashed or hung
+ processes
+ """
+ self.suite_name = suite_name
+ self.manager_number = index
+ self.test_implementation_key = None
+
+ self.test_implementations = {}
+ for key, test_implementation in test_implementations.items():
+ browser_kwargs = test_implementation.browser_kwargs
+ if browser_kwargs.get("device_serial"):
+ browser_kwargs = browser_kwargs.copy()
+ # Assign Android device to runner according to current manager index
+ browser_kwargs["device_serial"] = browser_kwargs["device_serial"][index]
+ self.test_implementations[key] = TestImplementation(
+ test_implementation.executor_cls,
+ test_implementation.executor_kwargs,
+ test_implementation.browser_cls,
+ browser_kwargs)
+ else:
+ self.test_implementations[key] = test_implementation
+
+ # Flags used to shut down this thread if we get a sigint
+ self.parent_stop_flag = stop_flag
+ self.child_stop_flag = mpcontext.get_context().Event()
+
+ # Keep track of the current retry index. The retries are meant to handle
+ # flakiness, so at retry round we should restart the browser after each test.
+ self.retry_index = retry_index
+ self.rerun = rerun
+ self.run_count = 0
+ self.pause_after_test = pause_after_test
+ self.pause_on_unexpected = pause_on_unexpected
+ self.restart_on_unexpected = restart_on_unexpected
+ self.debug_info = debug_info
+ self.capture_stdio = capture_stdio
+ self.restart_on_new_group = restart_on_new_group
+ self.max_restarts = max_restarts
+
+ assert recording is not None
+ self.recording = recording
+
+ self.test_count = 0
+ self.unexpected_fail_tests = defaultdict(list)
+ self.unexpected_pass_tests = defaultdict(list)
+
+ # Properties we initialize right after the thread is started
+ self.logger = None
+ self.test_source = None
+ self.command_queue = None
+ self.remote_queue = None
+
+ # Properties we initalize later in the lifecycle
+ self.timer = None
+ self.test_runner_proc = None
+ self.browser = None
+
+ super().__init__(name=f"TestRunnerManager-{index}", target=self.run_loop, args=[test_queue], daemon=True)
+
+ def run_loop(self, test_queue):
+ """Main loop for the TestRunnerManager.
+
+ TestRunnerManagers generally receive commands from their
+ TestRunner updating them on the status of a test. They
+ may also have a stop flag set by the main thread indicating
+ that the manager should shut down the next time the event loop
+ spins."""
+ self.recording.set(["testrunner", "startup"])
+ self.logger = structuredlog.StructuredLogger(self.suite_name)
+
+ self.test_source = TestSource(self.logger, test_queue)
+
+ mp = mpcontext.get_context()
+ self.command_queue = mp.Queue()
+ self.remote_queue = mp.Queue()
+
+ dispatch = {
+ RunnerManagerState.before_init: self.start_init,
+ RunnerManagerState.initializing: self.init,
+ RunnerManagerState.running: self.run_test,
+ RunnerManagerState.restarting: self.restart_runner,
+ }
+
+ self.state = RunnerManagerState.before_init()
+ end_states = (RunnerManagerState.stop,
+ RunnerManagerState.error)
+
+ try:
+ while not isinstance(self.state, end_states):
+ f = dispatch.get(self.state.__class__)
+ while f:
+ self.logger.debug(f"Dispatch {f.__name__}")
+ if self.should_stop():
+ return
+ new_state = f()
+ if new_state is None:
+ break
+ self.state = new_state
+ self.logger.debug(f"new state: {self.state.__class__.__name__}")
+ if isinstance(self.state, end_states):
+ return
+ f = dispatch.get(self.state.__class__)
+
+ new_state = None
+ while new_state is None:
+ new_state = self.wait_event()
+ if self.should_stop():
+ return
+ self.state = new_state
+ self.logger.debug(f"new state: {self.state.__class__.__name__}")
+ except Exception:
+ message = "Uncaught exception in TestRunnerManager.run:\n"
+ message += traceback.format_exc()
+ self.logger.critical(message)
+ raise
+ finally:
+ self.logger.debug("TestRunnerManager main loop terminating, starting cleanup")
+
+ skipped_tests = []
+ while True:
+ _, _, test, _, _ = self.get_next_test()
+ if test is None:
+ break
+ skipped_tests.append(test)
+
+ if skipped_tests:
+ self.logger.critical(
+ f"Tests left in the queue: {skipped_tests[0].id!r} "
+ f"and {len(skipped_tests) - 1} others"
+ )
+ for test in skipped_tests[1:]:
+ self.logger.debug(f"Test left in the queue: {test.id!r}")
+
+ force_stop = (not isinstance(self.state, RunnerManagerState.stop) or
+ self.state.force_stop)
+ self.stop_runner(force=force_stop)
+ self.teardown()
+ if self.browser is not None:
+ assert self.browser.browser is not None
+ self.browser.browser.cleanup()
+ self.logger.debug("TestRunnerManager main loop terminated")
+
+ def wait_event(self):
+ dispatch = {
+ RunnerManagerState.before_init: {},
+ RunnerManagerState.initializing:
+ {
+ "init_succeeded": self.init_succeeded,
+ "init_failed": self.init_failed,
+ },
+ RunnerManagerState.running:
+ {
+ "test_ended": self.test_ended,
+ "wait_finished": self.wait_finished,
+ },
+ RunnerManagerState.restarting: {},
+ RunnerManagerState.error: {},
+ RunnerManagerState.stop: {},
+ None: {
+ "runner_teardown": self.runner_teardown,
+ "log": self.log,
+ "error": self.error
+ }
+ }
+ try:
+ command, data = self.command_queue.get(True, 1)
+ self.logger.debug("Got command: %r" % command)
+ except OSError:
+ self.logger.error("Got IOError from poll")
+ return RunnerManagerState.restarting(self.state.subsuite,
+ self.state.test_type,
+ self.state.test,
+ self.state.test_group,
+ self.state.group_metadata,
+ False)
+ except Empty:
+ if (self.debug_info and self.debug_info.interactive and
+ self.browser.started and not self.browser.is_alive()):
+ self.logger.debug("Debugger exited")
+ return RunnerManagerState.stop(False)
+
+ if (isinstance(self.state, RunnerManagerState.running) and
+ not self.test_runner_proc.is_alive()):
+ if not self.command_queue.empty():
+ # We got a new message so process that
+ return
+
+ # If we got to here the runner presumably shut down
+ # unexpectedly
+ self.logger.info("Test runner process shut down")
+
+ if self.state.test is not None:
+ # This could happen if the test runner crashed for some other
+ # reason
+ # Need to consider the unlikely case where one test causes the
+ # runner process to repeatedly die
+ self.logger.critical("Last test did not complete")
+ return RunnerManagerState.error()
+ self.logger.warning("More tests found, but runner process died, restarting")
+ return RunnerManagerState.restarting(self.state.test_type,
+ self.state.test,
+ self.state.test_group,
+ self.state.group_metadata,
+ False)
+ else:
+ f = (dispatch.get(self.state.__class__, {}).get(command) or
+ dispatch.get(None, {}).get(command))
+ if not f:
+ self.logger.warning("Got command %s in state %s" %
+ (command, self.state.__class__.__name__))
+ return
+ return f(*data)
+
+ def should_stop(self):
+ return self.child_stop_flag.is_set() or self.parent_stop_flag.is_set()
+
+ def start_init(self):
+ subsuite, test_type, test, test_group, group_metadata = self.get_next_test()
+ self.recording.set(["testrunner", "init"])
+ if test is None:
+ return RunnerManagerState.stop(True)
+ else:
+ return RunnerManagerState.initializing(subsuite, test_type, test, test_group, group_metadata, 0)
+
+ def init(self):
+ assert isinstance(self.state, RunnerManagerState.initializing)
+ if self.state.failure_count > self.max_restarts:
+ self.logger.critical("Max restarts exceeded")
+ return RunnerManagerState.error()
+
+ if (self.state.subsuite, self.state.test_type) != self.test_implementation_key:
+ if self.browser is not None:
+ assert self.browser.browser is not None
+ self.browser.browser.cleanup()
+ impl = self.test_implementations[(self.state.subsuite, self.state.test_type)]
+ browser = impl.browser_cls(self.logger, remote_queue=self.command_queue,
+ **impl.browser_kwargs)
+ browser.setup()
+ self.browser = BrowserManager(self.logger,
+ browser,
+ self.command_queue,
+ no_timeout=self.debug_info is not None)
+ self.test_implementation_key = (self.state.subsuite, self.state.test_type)
+
+ assert self.browser is not None
+ self.browser.update_settings(self.state.test)
+
+ result = self.browser.init(self.state.group_metadata)
+ if not result:
+ return self.init_failed()
+
+ self.start_test_runner()
+
+ def start_test_runner(self):
+ # Note that we need to be careful to start the browser before the
+ # test runner to ensure that any state set when the browser is started
+ # can be passed in to the test runner.
+ assert isinstance(self.state, RunnerManagerState.initializing)
+ assert self.command_queue is not None
+ assert self.remote_queue is not None
+ self.logger.info("Starting runner")
+ impl = self.test_implementations[(self.state.subsuite, self.state.test_type)]
+ self.executor_cls = impl.executor_cls
+ self.executor_kwargs = impl.executor_kwargs
+ self.executor_kwargs["group_metadata"] = self.state.group_metadata
+ self.executor_kwargs["browser_settings"] = self.browser.browser_settings
+ executor_browser_cls, executor_browser_kwargs = self.browser.browser.executor_browser()
+
+ args = (self.remote_queue,
+ self.command_queue,
+ self.executor_cls,
+ self.executor_kwargs,
+ executor_browser_cls,
+ executor_browser_kwargs,
+ self.capture_stdio,
+ self.child_stop_flag,
+ self.recording)
+
+ mp = mpcontext.get_context()
+ self.test_runner_proc = mp.Process(target=start_runner,
+ args=args,
+ name="TestRunner-%i" % self.manager_number)
+ self.test_runner_proc.start()
+ self.logger.debug("Test runner started")
+ # Now we wait for either an init_succeeded event or an init_failed event
+
+ def init_succeeded(self):
+ assert isinstance(self.state, RunnerManagerState.initializing)
+ self.browser.after_init()
+ return RunnerManagerState.running(self.state.subsuite,
+ self.state.test_type,
+ self.state.test,
+ self.state.test_group,
+ self.state.group_metadata)
+
+ def init_failed(self):
+ assert isinstance(self.state, RunnerManagerState.initializing)
+ self.browser.check_crash(None)
+ self.browser.after_init()
+ self.stop_runner(force=True)
+ return RunnerManagerState.initializing(self.state.subsuite,
+ self.state.test_type,
+ self.state.test,
+ self.state.test_group,
+ self.state.group_metadata,
+ self.state.failure_count + 1)
+
+ def get_next_test(self):
+ # returns test_type, test, test_group, group_metadata
+ test = None
+ test_group = None
+ while test is None:
+ while test_group is None or len(test_group) == 0:
+ test_group, subsuite, test_type, group_metadata = self.test_source.group()
+ if test_group is None:
+ self.logger.info("No more tests")
+ return None, None, None, None, None
+ test = test_group.popleft()
+ self.run_count = 0
+ return subsuite, test_type, test, test_group, group_metadata
+
+ def run_test(self):
+ assert isinstance(self.state, RunnerManagerState.running)
+ assert self.state.test is not None
+
+ if self.browser.update_settings(self.state.test):
+ self.logger.info("Restarting browser for new test environment")
+ return RunnerManagerState.restarting(self.state.subsuite,
+ self.state.test_type,
+ self.state.test,
+ self.state.test_group,
+ self.state.group_metadata,
+ False)
+
+ self.recording.set(["testrunner", "test"] + self.state.test.id.split("/")[1:])
+ self.logger.test_start(self.state.test.id, subsuite=self.state.subsuite)
+ if self.rerun > 1:
+ self.logger.info(f"Run {self.run_count + 1}/{self.rerun}")
+ self.send_message("reset")
+ self.run_count += 1
+ if self.debug_info is None:
+ # Factor of 3 on the extra timeout here is based on allowing the executor
+ # at least test.timeout + 2 * extra_timeout to complete,
+ # which in turn is based on having several layers of timeout inside the executor
+ wait_timeout = (self.state.test.timeout * self.executor_kwargs['timeout_multiplier'] +
+ 3 * self.executor_cls.extra_timeout)
+ self.timer = threading.Timer(wait_timeout, self._timeout)
+ self.timer.name = f"{self.name}-timeout"
+
+ self.send_message("run_test", self.state.test)
+ if self.timer:
+ self.timer.start()
+
+ def _timeout(self):
+ # This is executed in a different thread (threading.Timer).
+ self.logger.info("Got timeout in harness")
+ test = self.state.test
+ self.inject_message(
+ "test_ended",
+ test,
+ (test.make_result("EXTERNAL-TIMEOUT",
+ "TestRunner hit external timeout "
+ "(this may indicate a hang)"), []),
+ )
+
+ def test_ended(self, test, results):
+ """Handle the end of a test.
+
+ Output the result of each subtest, and the result of the overall
+ harness to the logs.
+ """
+ if ((not isinstance(self.state, RunnerManagerState.running)) or
+ (test != self.state.test)):
+ # Due to inherent race conditions in EXTERNAL-TIMEOUT, we might
+ # receive multiple test_ended for a test (e.g. from both Executor
+ # and TestRunner), in which case we ignore the duplicate message.
+ self.logger.warning("Received unexpected test_ended for %s" % test)
+ return
+ if self.timer is not None:
+ self.timer.cancel()
+
+ # Write the result of each subtest
+ file_result, test_results = results
+ subtest_unexpected = False
+ subtest_all_pass_or_expected = True
+ for result in test_results:
+ if test.disabled(result.name):
+ continue
+ expected = result.expected
+ known_intermittent = result.known_intermittent
+ is_unexpected = expected != result.status and result.status not in known_intermittent
+ is_expected_notrun = (expected == "NOTRUN" or "NOTRUN" in known_intermittent)
+
+ if not is_unexpected and result.status in ["FAIL", "PRECONDITION_FAILED"]:
+ # subtest is expected FAIL or expected PRECONDITION_FAILED,
+ # change result to unexpected if expected_fail_message does not
+ # match
+ expected_fail_message = test.expected_fail_message(result.name)
+ if expected_fail_message is not None and result.message.strip() != expected_fail_message:
+ is_unexpected = True
+ if result.status in known_intermittent:
+ known_intermittent.remove(result.status)
+ elif len(known_intermittent) > 0:
+ expected = known_intermittent[0]
+ known_intermittent = known_intermittent[1:]
+ else:
+ expected = "PASS"
+
+ if is_unexpected:
+ subtest_unexpected = True
+
+ if result.status != "PASS" and not is_expected_notrun:
+ # Any result against an expected "NOTRUN" should be treated
+ # as unexpected pass.
+ subtest_all_pass_or_expected = False
+
+ self.logger.test_status(test.id,
+ result.name,
+ result.status,
+ message=result.message,
+ expected=expected,
+ known_intermittent=known_intermittent,
+ stack=result.stack,
+ subsuite=self.state.subsuite)
+
+ expected = file_result.expected
+ known_intermittent = file_result.known_intermittent
+ status = file_result.status
+
+ if self.browser.check_crash(test.id) and status != "CRASH":
+ if test.test_type in ["crashtest", "wdspec"] or status == "EXTERNAL-TIMEOUT":
+ self.logger.info("Found a crash dump file; changing status to CRASH")
+ status = "CRASH"
+ else:
+ self.logger.warning(f"Found a crash dump; should change status from {status} to CRASH but this causes instability")
+
+ # We have a couple of status codes that are used internally, but not exposed to the
+ # user. These are used to indicate that some possibly-broken state was reached
+ # and we should restart the runner before the next test.
+ # INTERNAL-ERROR indicates a Python exception was caught in the harness
+ # EXTERNAL-TIMEOUT indicates we had to forcibly kill the browser from the harness
+ # because the test didn't return a result after reaching the test-internal timeout
+ status_subns = {"INTERNAL-ERROR": "ERROR",
+ "EXTERNAL-TIMEOUT": "TIMEOUT"}
+ status = status_subns.get(status, status)
+
+ self.test_count += 1
+ is_unexpected = expected != status and status not in known_intermittent
+ is_pass_or_expected = status in ["OK", "PASS"] or (not is_unexpected)
+
+ # A result is unexpected pass if the test or any subtest run
+ # unexpectedly, and the overall status is expected or passing (OK for test
+ # harness test, or PASS for reftest), and all unexpected results for
+ # subtests (if any) are unexpected pass.
+ is_unexpected_pass = ((is_unexpected or subtest_unexpected) and
+ is_pass_or_expected and subtest_all_pass_or_expected)
+ if is_unexpected_pass:
+ self.unexpected_pass_tests[self.state.subsuite, test.test_type].append(test)
+ elif is_unexpected or subtest_unexpected:
+ self.unexpected_fail_tests[self.state.subsuite, test.test_type].append(test)
+
+ if "assertion_count" in file_result.extra:
+ assertion_count = file_result.extra["assertion_count"]
+ if assertion_count is not None and assertion_count > 0:
+ self.logger.assertion_count(test.id,
+ int(assertion_count),
+ test.min_assertion_count,
+ test.max_assertion_count)
+
+ file_result.extra["test_timeout"] = test.timeout * self.executor_kwargs['timeout_multiplier']
+
+ self.logger.test_end(test.id,
+ status,
+ message=file_result.message,
+ expected=expected,
+ known_intermittent=known_intermittent,
+ extra=file_result.extra,
+ stack=file_result.stack,
+ subsuite=self.state.subsuite)
+
+ restart_before_next = (self.retry_index > 0 or test.restart_after or
+ file_result.status in ("CRASH", "EXTERNAL-TIMEOUT", "INTERNAL-ERROR") or
+ ((subtest_unexpected or is_unexpected) and
+ self.restart_on_unexpected))
+ force_stop = test.test_type == "wdspec" and file_result.status == "EXTERNAL-TIMEOUT"
+
+ self.recording.set(["testrunner", "after-test"])
+ if (not file_result.status == "CRASH" and
+ self.pause_after_test or
+ (self.pause_on_unexpected and (subtest_unexpected or is_unexpected))):
+ self.logger.info("Pausing until the browser exits")
+ self.send_message("wait")
+ else:
+ return self.after_test_end(test, restart_before_next, force_stop=force_stop)
+
+ def wait_finished(self, rerun=False):
+ assert isinstance(self.state, RunnerManagerState.running)
+ self.logger.debug("Wait finished")
+
+ # The browser should be stopped already, but this ensures we do any
+ # post-stop processing
+ return self.after_test_end(self.state.test, not rerun, force_rerun=rerun)
+
+ def after_test_end(self, test, restart, force_rerun=False, force_stop=False):
+ assert isinstance(self.state, RunnerManagerState.running)
+ # Mixing manual reruns and automatic reruns is confusing; we currently assume
+ # that as long as we've done at least the automatic run count in total we can
+ # continue with the next test.
+ if not force_rerun and self.run_count >= self.rerun:
+ subsuite, test_type, test, test_group, group_metadata = self.get_next_test()
+ if test is None:
+ return RunnerManagerState.stop(force_stop)
+ if subsuite != self.state.subsuite:
+ self.logger.info(f"Restarting browser for new subsuite:{subsuite}")
+ restart = True
+ elif test_type != self.state.test_type:
+ self.logger.info(f"Restarting browser for new test type:{test_type}")
+ restart = True
+ elif self.restart_on_new_group and test_group is not self.state.test_group:
+ self.logger.info("Restarting browser for new test group")
+ restart = True
+ else:
+ subsuite = self.state.subsuite
+ test_type = self.state.test_type
+ test_group = self.state.test_group
+ group_metadata = self.state.group_metadata
+
+ if restart:
+ return RunnerManagerState.restarting(
+ subsuite, test_type, test, test_group, group_metadata, force_stop)
+ else:
+ return RunnerManagerState.running(
+ subsuite, test_type, test, test_group, group_metadata)
+
+ def restart_runner(self):
+ """Stop and restart the TestRunner"""
+ assert isinstance(self.state, RunnerManagerState.restarting)
+ self.stop_runner(force=self.state.force_stop)
+ return RunnerManagerState.initializing(
+ self.state.subsuite, self.state.test_type, self.state.test,
+ self.state.test_group, self.state.group_metadata, 0)
+
+ def log(self, data: Mapping[str, Any]) -> None:
+ self.logger.log_raw(data)
+
+ def error(self, message):
+ self.logger.error(message)
+ self.restart_runner()
+
+ def stop_runner(self, force=False):
+ """Stop the TestRunner and the browser binary."""
+ self.recording.set(["testrunner", "stop_runner"])
+ if self.test_runner_proc is None:
+ return
+
+ if self.test_runner_proc.is_alive():
+ self.send_message("stop")
+ try:
+ self.browser.stop(force=force)
+ self.ensure_runner_stopped()
+ finally:
+ self.cleanup()
+
+ def teardown(self):
+ self.logger.debug("TestRunnerManager teardown")
+ self.test_runner_proc = None
+ self.command_queue.close()
+ self.remote_queue.close()
+ self.command_queue = None
+ self.remote_queue = None
+ self.recording.pause()
+
+ def ensure_runner_stopped(self):
+ self.logger.debug("ensure_runner_stopped")
+ if self.test_runner_proc is None:
+ return
+
+ self.browser.stop(force=True)
+ self.test_runner_proc.join(10)
+ mp = mpcontext.get_context()
+ if self.test_runner_proc.is_alive():
+ # This might leak a file handle from the queue
+ self.logger.warning("Forcibly terminating runner process")
+ self.test_runner_proc.terminate()
+ self.logger.debug("After terminating runner process")
+
+ # Multiprocessing queues are backed by operating system pipes. If
+ # the pipe in the child process had buffered data at the time of
+ # forced termination, the queue is no longer in a usable state
+ # (subsequent attempts to retrieve items may block indefinitely).
+ # Discard the potentially-corrupted queue and create a new one.
+ self.logger.debug("Recreating command queue")
+ self.command_queue.cancel_join_thread()
+ self.command_queue.close()
+ self.command_queue = mp.Queue()
+ self.logger.debug("Recreating remote queue")
+ self.remote_queue.cancel_join_thread()
+ self.remote_queue.close()
+ self.remote_queue = mp.Queue()
+ else:
+ self.logger.debug("Runner process exited with code %i" % self.test_runner_proc.exitcode)
+
+ def runner_teardown(self):
+ self.ensure_runner_stopped()
+ return RunnerManagerState.stop(False)
+
+ def send_message(self, command, *args):
+ """Send a message to the remote queue (to Executor)."""
+ self.remote_queue.put((command, args))
+
+ def inject_message(self, command, *args):
+ """Inject a message to the command queue (from Executor)."""
+ self.command_queue.put((command, args))
+
+ def cleanup(self):
+ self.logger.debug("TestRunnerManager cleanup")
+ if self.browser:
+ self.browser.cleanup()
+ if self.timer:
+ self.timer.cancel()
+ while True:
+ try:
+ cmd, data = self.command_queue.get_nowait()
+ except Empty:
+ break
+ else:
+ if cmd == "log":
+ self.log(*data)
+ elif cmd == "runner_teardown":
+ # It's OK for the "runner_teardown" message to be left in
+ # the queue during cleanup, as we will already have tried
+ # to stop the TestRunner in `stop_runner`.
+ pass
+ else:
+ self.logger.warning(f"Command left in command_queue during cleanup: {cmd!r}, {data!r}")
+ while True:
+ try:
+ cmd, data = self.remote_queue.get_nowait()
+ self.logger.warning(f"Command left in remote_queue during cleanup: {cmd!r}, {data!r}")
+ except Empty:
+ break
+
+
+class ManagerGroup:
+ """Main thread object that owns all the TestRunnerManager threads."""
+ def __init__(self, suite_name, test_queue_builder, test_implementations,
+ retry_index=0,
+ rerun=1,
+ pause_after_test=False,
+ pause_on_unexpected=False,
+ restart_on_unexpected=True,
+ debug_info=None,
+ capture_stdio=True,
+ restart_on_new_group=True,
+ recording=None,
+ max_restarts=5):
+ self.suite_name = suite_name
+ self.test_queue_builder = test_queue_builder
+ self.test_implementations = test_implementations
+ self.pause_after_test = pause_after_test
+ self.pause_on_unexpected = pause_on_unexpected
+ self.restart_on_unexpected = restart_on_unexpected
+ self.debug_info = debug_info
+ self.retry_index = retry_index
+ self.rerun = rerun
+ self.capture_stdio = capture_stdio
+ self.restart_on_new_group = restart_on_new_group
+ self.recording = recording
+ assert recording is not None
+ self.max_restarts = max_restarts
+
+ self.pool = set()
+ # Event that is polled by threads so that they can gracefully exit in the face
+ # of sigint
+ self.stop_flag = threading.Event()
+ self.logger = structuredlog.StructuredLogger(suite_name)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.stop()
+
+ def run(self, tests):
+ """Start all managers in the group"""
+ test_queue, size = self.test_queue_builder.make_queue(tests)
+ self.logger.info("Using %i child processes" % size)
+
+ for idx in range(size):
+ manager = TestRunnerManager(self.suite_name,
+ idx,
+ test_queue,
+ self.test_implementations,
+ self.stop_flag,
+ self.retry_index,
+ self.rerun,
+ self.pause_after_test,
+ self.pause_on_unexpected,
+ self.restart_on_unexpected,
+ self.debug_info,
+ self.capture_stdio,
+ self.restart_on_new_group,
+ recording=self.recording,
+ max_restarts=self.max_restarts)
+ manager.start()
+ self.pool.add(manager)
+ self.wait()
+
+ def wait(self, timeout: Optional[float] = None) -> None:
+ """Wait for all the managers in the group to finish.
+
+ Arguments:
+ timeout: Overall timeout (in seconds) for all threads to join. The
+ default value indicates an indefinite timeout.
+ """
+ deadline = None if timeout is None else time.time() + timeout
+ for manager in self.pool:
+ manager_timeout = None
+ if deadline is not None:
+ manager_timeout = max(0, deadline - time.time())
+ manager.join(manager_timeout)
+
+ def stop(self):
+ """Set the stop flag so that all managers in the group stop as soon
+ as possible"""
+ self.stop_flag.set()
+ self.logger.debug("Stop flag set in ManagerGroup")
+
+ def test_count(self):
+ return sum(manager.test_count for manager in self.pool)
+
+ def unexpected_fail_tests(self):
+ rv = defaultdict(list)
+ for manager in self.pool:
+ for (subsuite, test_type), tests in manager.unexpected_fail_tests.items():
+ rv[(subsuite, test_type)].extend(tests)
+ return rv
+
+ def unexpected_pass_tests(self):
+ rv = defaultdict(list)
+ for manager in self.pool:
+ for (subsuite, test_type), tests in manager.unexpected_pass_tests.items():
+ rv[(subsuite, test_type)].extend(tests)
+ return rv
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/__init__.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/__init__.py
new file mode 100644
index 0000000000..b4a26cee9b
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/__init__.py
@@ -0,0 +1,9 @@
+# mypy: ignore-errors
+
+import os
+import sys
+
+here = os.path.abspath(os.path.dirname(__file__))
+sys.path.insert(0, os.path.join(here, os.pardir, os.pardir, os.pardir))
+
+import localpaths as _localpaths # noqa: F401
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/base.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/base.py
new file mode 100644
index 0000000000..8e71aba812
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/base.py
@@ -0,0 +1,62 @@
+# mypy: allow-untyped-defs
+
+import os
+import sys
+
+from os.path import dirname, join
+
+import pytest
+
+sys.path.insert(0, join(dirname(__file__), "..", ".."))
+
+from .. import browsers
+
+
+_products = browsers.product_list
+_active_products = set()
+
+if "CURRENT_TOX_ENV" in os.environ:
+ current_tox_env_split = os.environ["CURRENT_TOX_ENV"].split("-")
+
+ tox_env_extra_browsers = {
+ "chrome": {"chrome_android"},
+ "servo": {"servodriver"},
+ }
+
+ _active_products = set(_products) & set(current_tox_env_split)
+ for product in frozenset(_active_products):
+ _active_products |= tox_env_extra_browsers.get(product, set())
+else:
+ _active_products = set(_products)
+
+
+class all_products:
+ def __init__(self, arg, marks={}):
+ self.arg = arg
+ self.marks = marks
+
+ def __call__(self, f):
+ params = []
+ for product in _products:
+ if product in self.marks:
+ params.append(pytest.param(product, marks=self.marks[product]))
+ else:
+ params.append(product)
+ return pytest.mark.parametrize(self.arg, params)(f)
+
+
+class active_products:
+ def __init__(self, arg, marks={}):
+ self.arg = arg
+ self.marks = marks
+
+ def __call__(self, f):
+ params = []
+ for product in _products:
+ if product not in _active_products:
+ params.append(pytest.param(product, marks=pytest.mark.skip(reason="wrong toxenv")))
+ elif product in self.marks:
+ params.append(pytest.param(product, marks=self.marks[product]))
+ else:
+ params.append(product)
+ return pytest.mark.parametrize(self.arg, params)(f)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/browsers/__init__.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/browsers/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/browsers/__init__.py
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/browsers/test_base.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/browsers/test_base.py
new file mode 100644
index 0000000000..a3d804336e
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/browsers/test_base.py
@@ -0,0 +1,52 @@
+# mypy: allow-untyped-defs
+
+import sys
+from os.path import dirname, join
+from unittest import mock
+
+import pytest
+
+sys.path.insert(0, join(dirname(__file__), "..", "..", ".."))
+
+from mozlog import structuredlog
+
+from wptrunner.browsers import base
+
+
+class MozLogTestHandler(object):
+ def __init__(self):
+ self.items = []
+
+ def __call__(self, data):
+ self.items.append(data)
+
+
+@pytest.mark.skipif(
+ sys.platform == "win32",
+ reason="Relies on echo, which isn't an executable on Windows",
+)
+def test_logging_immediate_exit():
+ logger = structuredlog.StructuredLogger("test")
+ handler = MozLogTestHandler()
+ logger.add_handler(handler)
+
+ class CustomException(Exception):
+ pass
+
+ with mock.patch.object(base, "wait_for_service", side_effect=CustomException):
+ browser = base.WebDriverBrowser(
+ logger, webdriver_binary="echo", webdriver_args=["sample output"]
+ )
+ try:
+ with pytest.raises(CustomException):
+ browser.start(group_metadata={})
+ finally:
+ # Ensure the `echo` process actually exits
+ browser._proc.wait()
+
+ process_output_actions = [
+ data for data in handler.items if data["action"] == "process_output"
+ ]
+
+ assert len(process_output_actions) == 1
+ assert process_output_actions[0]["data"] == "sample output"
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/browsers/test_sauce.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/browsers/test_sauce.py
new file mode 100644
index 0000000000..bfb8e53b74
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/browsers/test_sauce.py
@@ -0,0 +1,177 @@
+# mypy: allow-untyped-defs
+
+import gc
+import logging
+import sys
+from unittest import mock
+
+import pytest
+
+from os.path import join, dirname
+
+sys.path.insert(0, join(dirname(__file__), "..", "..", ".."))
+
+sauce = pytest.importorskip("wptrunner.browsers.sauce")
+
+from wptserve.config import ConfigBuilder
+
+logger = logging.getLogger()
+
+
+def setup_module(module):
+ # Do a full GC collection, as mozprocess often creates garbage which then
+ # breaks when its __del__ is called while subprocess.Popen is mocked below.
+ gc.collect()
+
+
+def test_sauceconnect_success():
+ with mock.patch.object(sauce.SauceConnect, "upload_prerun_exec"),\
+ mock.patch.object(sauce.subprocess, "Popen") as Popen,\
+ mock.patch.object(sauce.os.path, "exists") as exists:
+ # Act as if it's still running
+ Popen.return_value.poll.return_value = None
+ Popen.return_value.returncode = None
+ # Act as if file created
+ exists.return_value = True
+
+ sauce_connect = sauce.SauceConnect(
+ sauce_user="aaa",
+ sauce_key="bbb",
+ sauce_tunnel_id="ccc",
+ sauce_connect_binary="ddd",
+ sauce_connect_args=[])
+
+ with ConfigBuilder(logger, browser_host="example.net") as env_config:
+ sauce_connect(None, env_config)
+ with sauce_connect:
+ pass
+
+
+@pytest.mark.parametrize("readyfile,returncode", [
+ (True, 0),
+ (True, 1),
+ (True, 2),
+ (False, 0),
+ (False, 1),
+ (False, 2),
+])
+def test_sauceconnect_failure_exit(readyfile, returncode):
+ with mock.patch.object(sauce.SauceConnect, "upload_prerun_exec"),\
+ mock.patch.object(sauce.subprocess, "Popen") as Popen,\
+ mock.patch.object(sauce.os.path, "exists") as exists,\
+ mock.patch.object(sauce.time, "sleep") as sleep:
+ Popen.return_value.poll.return_value = returncode
+ Popen.return_value.returncode = returncode
+ exists.return_value = readyfile
+
+ sauce_connect = sauce.SauceConnect(
+ sauce_user="aaa",
+ sauce_key="bbb",
+ sauce_tunnel_id="ccc",
+ sauce_connect_binary="ddd",
+ sauce_connect_args=[])
+
+ with ConfigBuilder(logger, browser_host="example.net") as env_config:
+ sauce_connect(None, env_config)
+ with pytest.raises(sauce.SauceException):
+ with sauce_connect:
+ pass
+
+ # Given we appear to exit immediately with these mocks, sleep shouldn't be called
+ sleep.assert_not_called()
+
+
+def test_sauceconnect_cleanup():
+ """Ensure that execution pauses when the process is closed while exiting
+ the context manager. This allow Sauce Connect to close any active
+ tunnels."""
+ with mock.patch.object(sauce.SauceConnect, "upload_prerun_exec"),\
+ mock.patch.object(sauce.subprocess, "Popen") as Popen,\
+ mock.patch.object(sauce.os.path, "exists") as exists,\
+ mock.patch.object(sauce.time, "sleep") as sleep:
+ Popen.return_value.poll.return_value = True
+ Popen.return_value.returncode = None
+ exists.return_value = True
+
+ sauce_connect = sauce.SauceConnect(
+ sauce_user="aaa",
+ sauce_key="bbb",
+ sauce_tunnel_id="ccc",
+ sauce_connect_binary="ddd",
+ sauce_connect_args=[])
+
+ with ConfigBuilder(logger, browser_host="example.net") as env_config:
+ sauce_connect(None, env_config)
+ with sauce_connect:
+ Popen.return_value.poll.return_value = None
+ sleep.assert_not_called()
+
+ sleep.assert_called()
+
+def test_sauceconnect_failure_never_ready():
+ with mock.patch.object(sauce.SauceConnect, "upload_prerun_exec"),\
+ mock.patch.object(sauce.subprocess, "Popen") as Popen,\
+ mock.patch.object(sauce.os.path, "exists") as exists,\
+ mock.patch.object(sauce.time, "sleep") as sleep:
+ Popen.return_value.poll.return_value = None
+ Popen.return_value.returncode = None
+ exists.return_value = False
+
+ sauce_connect = sauce.SauceConnect(
+ sauce_user="aaa",
+ sauce_key="bbb",
+ sauce_tunnel_id="ccc",
+ sauce_connect_binary="ddd",
+ sauce_connect_args=[])
+
+ with ConfigBuilder(logger, browser_host="example.net") as env_config:
+ sauce_connect(None, env_config)
+ with pytest.raises(sauce.SauceException):
+ with sauce_connect:
+ pass
+
+ # We should sleep while waiting for it to create the readyfile
+ sleep.assert_called()
+
+ # Check we actually kill it after termination fails
+ Popen.return_value.terminate.assert_called()
+ Popen.return_value.kill.assert_called()
+
+
+def test_sauceconnect_tunnel_domains():
+ with mock.patch.object(sauce.SauceConnect, "upload_prerun_exec"),\
+ mock.patch.object(sauce.subprocess, "Popen") as Popen,\
+ mock.patch.object(sauce.os.path, "exists") as exists:
+ Popen.return_value.poll.return_value = None
+ Popen.return_value.returncode = None
+ exists.return_value = True
+
+ sauce_connect = sauce.SauceConnect(
+ sauce_user="aaa",
+ sauce_key="bbb",
+ sauce_tunnel_id="ccc",
+ sauce_connect_binary="ddd",
+ sauce_connect_args=[])
+
+ with ConfigBuilder(logger,
+ browser_host="example.net",
+ alternate_hosts={"alt": "example.org"},
+ subdomains={"a", "b"},
+ not_subdomains={"x", "y"}) as env_config:
+ sauce_connect(None, env_config)
+ with sauce_connect:
+ Popen.assert_called_once()
+ args, kwargs = Popen.call_args
+ cmd = args[0]
+ assert "--tunnel-domains" in cmd
+ i = cmd.index("--tunnel-domains")
+ rest = cmd[i+1:]
+ assert len(rest) >= 1
+ if len(rest) > 1:
+ assert rest[1].startswith("-"), "--tunnel-domains takes a comma separated list (not a space separated list)"
+ assert set(rest[0].split(",")) == {'example.net',
+ 'a.example.net',
+ 'b.example.net',
+ 'example.org',
+ 'a.example.org',
+ 'b.example.org'}
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/browsers/test_webkitgtk.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/browsers/test_webkitgtk.py
new file mode 100644
index 0000000000..b5fc5fce5f
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/browsers/test_webkitgtk.py
@@ -0,0 +1,77 @@
+# mypy: allow-untyped-defs, allow-untyped-calls
+
+import logging
+from os.path import join, dirname
+
+import pytest
+
+from wptserve.config import ConfigBuilder
+from ..base import active_products
+from wptrunner import environment, products, testloader, wptcommandline
+
+wpt_root = join(dirname(__file__), "..", "..", "..", "..")
+
+test_paths = {"/": wptcommandline.TestRoot(wpt_root, wpt_root)}
+environment.do_delayed_imports(None, test_paths)
+
+logger = logging.getLogger()
+
+
+@active_products("product")
+def test_webkitgtk_certificate_domain_list(product):
+
+ def domain_is_inside_certificate_list_cert(domain_to_find, webkitgtk_certificate_list, cert_file):
+ for domain in webkitgtk_certificate_list:
+ if domain["host"] == domain_to_find and domain["certificateFile"] == cert_file:
+ return True
+ return False
+
+ if product not in ["epiphany", "webkit", "webkitgtk_minibrowser"]:
+ pytest.skip("%s doesn't support certificate_domain_list" % product)
+
+ product_data = products.Product({}, product)
+
+ cert_file = "/home/user/wpt/tools/certs/cacert.pem"
+ valid_domains_test = ["a.example.org", "b.example.org", "example.org",
+ "a.example.net", "b.example.net", "example.net"]
+ invalid_domains_test = ["x.example.org", "y.example.org", "example.it",
+ "x.example.net", "y.example.net", "z.example.net"]
+ kwargs = {}
+ kwargs["timeout_multiplier"] = 1
+ kwargs["debug_info"] = None
+ kwargs["host_cert_path"] = cert_file
+ kwargs["webkit_port"] = "gtk"
+ kwargs["binary"] = None
+ kwargs["webdriver_binary"] = None
+ kwargs["pause_after_test"] = False
+ kwargs["pause_on_unexpected"] = False
+ kwargs["debug_test"] = False
+ kwargs["subsuite"] = testloader.Subsuite("", config={})
+ with ConfigBuilder(logger,
+ browser_host="example.net",
+ alternate_hosts={"alt": "example.org"},
+ subdomains={"a", "b"},
+ not_subdomains={"x", "y"}) as env_config:
+
+ # We don't want to actually create a test environment; the get_executor_kwargs
+ # function only really wants an object with the config key
+
+ class MockEnvironment:
+ def __init__(self, config):
+ self.config = config
+
+ executor_args = product_data.get_executor_kwargs(None,
+ None,
+ MockEnvironment(env_config),
+ {},
+ **kwargs)
+ assert 'capabilities' in executor_args
+ assert 'webkitgtk:browserOptions' in executor_args['capabilities']
+ assert 'certificates' in executor_args['capabilities']['webkitgtk:browserOptions']
+ cert_list = executor_args['capabilities']['webkitgtk:browserOptions']['certificates']
+ for valid_domain in valid_domains_test:
+ assert domain_is_inside_certificate_list_cert(valid_domain, cert_list, cert_file)
+ assert not domain_is_inside_certificate_list_cert(valid_domain, cert_list, cert_file + ".backup_non_existent")
+ for invalid_domain in invalid_domains_test:
+ assert not domain_is_inside_certificate_list_cert(invalid_domain, cert_list, cert_file)
+ assert not domain_is_inside_certificate_list_cert(invalid_domain, cert_list, cert_file + ".backup_non_existent")
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_executors.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_executors.py
new file mode 100644
index 0000000000..682a34e5df
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_executors.py
@@ -0,0 +1,17 @@
+# mypy: allow-untyped-defs
+
+import pytest
+
+from ..executors import base
+
+@pytest.mark.parametrize("ranges_value, total_pages, expected", [
+ ([], 3, {1, 2, 3}),
+ ([[1, 2]], 3, {1, 2}),
+ ([[1], [3, 4]], 5, {1, 3, 4}),
+ ([[1],[3]], 5, {1, 3}),
+ ([[2, None]], 5, {2, 3, 4, 5}),
+ ([[None, 2]], 5, {1, 2}),
+ ([[None, 2], [2, None]], 5, {1, 2, 3, 4, 5}),
+ ([[1], [6, 7], [8]], 5, {1})])
+def test_get_pages_valid(ranges_value, total_pages, expected):
+ assert base.get_pages(ranges_value, total_pages) == expected
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_expectedtree.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_expectedtree.py
new file mode 100644
index 0000000000..b8a1120246
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_expectedtree.py
@@ -0,0 +1,120 @@
+# mypy: allow-untyped-defs
+
+from .. import expectedtree, metadata
+from collections import defaultdict
+
+
+def dump_tree(tree):
+ rv = []
+
+ def dump_node(node, indent=0):
+ prefix = " " * indent
+ if not node.prop:
+ data = "root"
+ else:
+ data = f"{node.prop}:{node.value}"
+ if node.result_values:
+ data += " result_values:%s" % (",".join(sorted(node.result_values)))
+ rv.append(f"{prefix}<{data}>")
+ for child in sorted(node.children, key=lambda x:x.value):
+ dump_node(child, indent + 2)
+ dump_node(tree)
+ return "\n".join(rv)
+
+
+def results_object(results):
+ results_obj = defaultdict(lambda: defaultdict(int))
+ for run_info, status in results:
+ run_info = metadata.RunInfo(run_info)
+ results_obj[run_info][status] += 1
+ return results_obj
+
+
+def test_build_tree_0():
+ # Pass if debug
+ results = [({"os": "linux", "version": "18.04", "debug": True}, "FAIL"),
+ ({"os": "linux", "version": "18.04", "debug": False}, "PASS"),
+ ({"os": "linux", "version": "16.04", "debug": False}, "PASS"),
+ ({"os": "mac", "version": "10.12", "debug": True}, "FAIL"),
+ ({"os": "mac", "version": "10.12", "debug": False}, "PASS"),
+ ({"os": "win", "version": "7", "debug": False}, "PASS"),
+ ({"os": "win", "version": "10", "debug": False}, "PASS")]
+ results_obj = results_object(results)
+ tree = expectedtree.build_tree(["os", "version", "debug"], {}, results_obj)
+
+ expected = """<root>
+ <debug:False result_values:PASS>
+ <debug:True result_values:FAIL>"""
+
+ assert dump_tree(tree) == expected
+
+
+def test_build_tree_1():
+ # Pass if linux or windows 10
+ results = [({"os": "linux", "version": "18.04", "debug": True}, "PASS"),
+ ({"os": "linux", "version": "18.04", "debug": False}, "PASS"),
+ ({"os": "linux", "version": "16.04", "debug": False}, "PASS"),
+ ({"os": "mac", "version": "10.12", "debug": True}, "FAIL"),
+ ({"os": "mac", "version": "10.12", "debug": False}, "FAIL"),
+ ({"os": "win", "version": "7", "debug": False}, "FAIL"),
+ ({"os": "win", "version": "10", "debug": False}, "PASS")]
+ results_obj = results_object(results)
+ tree = expectedtree.build_tree(["os", "debug"], {"os": ["version"]}, results_obj)
+
+ expected = """<root>
+ <os:linux result_values:PASS>
+ <os:mac result_values:FAIL>
+ <os:win>
+ <version:10 result_values:PASS>
+ <version:7 result_values:FAIL>"""
+
+ assert dump_tree(tree) == expected
+
+
+def test_build_tree_2():
+ # Fails in a specific configuration
+ results = [({"os": "linux", "version": "18.04", "debug": True}, "PASS"),
+ ({"os": "linux", "version": "18.04", "debug": False}, "FAIL"),
+ ({"os": "linux", "version": "16.04", "debug": False}, "PASS"),
+ ({"os": "linux", "version": "16.04", "debug": True}, "PASS"),
+ ({"os": "mac", "version": "10.12", "debug": True}, "PASS"),
+ ({"os": "mac", "version": "10.12", "debug": False}, "PASS"),
+ ({"os": "win", "version": "7", "debug": False}, "PASS"),
+ ({"os": "win", "version": "10", "debug": False}, "PASS")]
+ results_obj = results_object(results)
+ tree = expectedtree.build_tree(["os", "debug"], {"os": ["version"]}, results_obj)
+
+ expected = """<root>
+ <os:linux>
+ <debug:False>
+ <version:16.04 result_values:PASS>
+ <version:18.04 result_values:FAIL>
+ <debug:True result_values:PASS>
+ <os:mac result_values:PASS>
+ <os:win result_values:PASS>"""
+
+ assert dump_tree(tree) == expected
+
+
+def test_build_tree_3():
+
+ results = [({"os": "linux", "version": "18.04", "debug": True, "unused": False}, "PASS"),
+ ({"os": "linux", "version": "18.04", "debug": True, "unused": True}, "FAIL")]
+ results_obj = results_object(results)
+ tree = expectedtree.build_tree(["os", "debug"], {"os": ["version"]}, results_obj)
+
+ expected = """<root result_values:FAIL,PASS>"""
+
+ assert dump_tree(tree) == expected
+
+
+def test_build_tree_4():
+ # Check counts for multiple statuses
+ results = [({"os": "linux", "version": "18.04", "debug": False}, "FAIL"),
+ ({"os": "linux", "version": "18.04", "debug": False}, "PASS"),
+ ({"os": "linux", "version": "18.04", "debug": False}, "PASS")]
+ results_obj = results_object(results)
+ tree = expectedtree.build_tree(["os", "version", "debug"], {}, results_obj)
+
+ assert tree.result_values["PASS"] == 2
+ assert tree.result_values["FAIL"] == 1
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_formatters.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_formatters.py
new file mode 100644
index 0000000000..3f66f77bea
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_formatters.py
@@ -0,0 +1,152 @@
+# mypy: allow-untyped-defs
+
+import json
+import time
+from io import StringIO
+
+from mozlog import handlers, structuredlog
+
+from ..formatters.wptscreenshot import WptscreenshotFormatter
+from ..formatters.wptreport import WptreportFormatter
+
+
+def test_wptreport_runtime(capfd):
+ # setup the logger
+ output = StringIO()
+ logger = structuredlog.StructuredLogger("test_a")
+ logger.add_handler(handlers.StreamHandler(output, WptreportFormatter()))
+
+ # output a bunch of stuff
+ logger.suite_start(["test-id-1"], run_info={})
+ logger.test_start("test-id-1")
+ time.sleep(0.125)
+ logger.test_end("test-id-1", "PASS")
+ logger.suite_end()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_obj = json.load(output)
+ # be relatively lax in case of low resolution timers
+ # 62 is 0.125s = 125ms / 2 = 62ms (assuming int maths)
+ # this provides a margin of 62ms, sufficient for even DOS (55ms timer)
+ assert output_obj["results"][0]["duration"] >= 62
+
+
+def test_wptreport_run_info_optional(capfd):
+ """per the mozlog docs, run_info is optional; check we work without it"""
+ # setup the logger
+ output = StringIO()
+ logger = structuredlog.StructuredLogger("test_a")
+ logger.add_handler(handlers.StreamHandler(output, WptreportFormatter()))
+
+ # output a bunch of stuff
+ logger.suite_start(["test-id-1"]) # no run_info arg!
+ logger.test_start("test-id-1")
+ logger.test_end("test-id-1", "PASS")
+ logger.suite_end()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_obj = json.load(output)
+ assert "run_info" not in output_obj or output_obj["run_info"] == {}
+
+
+def test_wptreport_lone_surrogate(capfd):
+ output = StringIO()
+ logger = structuredlog.StructuredLogger("test_a")
+ logger.add_handler(handlers.StreamHandler(output, WptreportFormatter()))
+
+ # output a bunch of stuff
+ logger.suite_start(["test-id-1"]) # no run_info arg!
+ logger.test_start("test-id-1")
+ logger.test_status("test-id-1",
+ subtest="Name with surrogate\uD800",
+ status="FAIL",
+ message="\U0001F601 \uDE0A\uD83D")
+ logger.test_end("test-id-1",
+ status="PASS",
+ message="\uDE0A\uD83D \U0001F601")
+ logger.suite_end()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_obj = json.load(output)
+ test = output_obj["results"][0]
+ assert test["message"] == "U+de0aU+d83d \U0001F601"
+ subtest = test["subtests"][0]
+ assert subtest["name"] == "Name with surrogateU+d800"
+ assert subtest["message"] == "\U0001F601 U+de0aU+d83d"
+
+
+def test_wptreport_known_intermittent(capfd):
+ output = StringIO()
+ logger = structuredlog.StructuredLogger("test_a")
+ logger.add_handler(handlers.StreamHandler(output, WptreportFormatter()))
+
+ # output a bunch of stuff
+ logger.suite_start(["test-id-1"]) # no run_info arg!
+ logger.test_start("test-id-1")
+ logger.test_status("test-id-1",
+ "a-subtest",
+ status="FAIL",
+ expected="PASS",
+ known_intermittent=["FAIL"])
+ logger.test_end("test-id-1",
+ status="OK",)
+ logger.suite_end()
+
+ # check nothing got output to stdout/stderr
+ # (note that mozlog outputs exceptions during handling to stderr!)
+ captured = capfd.readouterr()
+ assert captured.out == ""
+ assert captured.err == ""
+
+ # check the actual output of the formatter
+ output.seek(0)
+ output_obj = json.load(output)
+ test = output_obj["results"][0]
+ assert test["status"] == "OK"
+ subtest = test["subtests"][0]
+ assert subtest["expected"] == "PASS"
+ assert subtest["known_intermittent"] == ['FAIL']
+
+
+def test_wptscreenshot_test_end(capfd):
+ formatter = WptscreenshotFormatter()
+
+ # Empty
+ data = {}
+ assert formatter.test_end(data) is None
+
+ # No items
+ data['extra'] = {"reftest_screenshots": []}
+ assert formatter.test_end(data) is None
+
+ # Invalid item
+ data['extra']['reftest_screenshots'] = ["no dict item"]
+ assert formatter.test_end(data) is None
+
+ # Random hash
+ data['extra']['reftest_screenshots'] = [{"hash": "HASH", "screenshot": "DATA"}]
+ assert 'data:image/png;base64,DATA\n' == formatter.test_end(data)
+
+ # Already cached hash
+ assert formatter.test_end(data) is None
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_manifestexpected.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_manifestexpected.py
new file mode 100644
index 0000000000..e47a3470d8
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_manifestexpected.py
@@ -0,0 +1,35 @@
+# mypy: allow-untyped-defs
+
+from io import BytesIO
+
+import pytest
+
+from .. import manifestexpected
+
+
+@pytest.mark.parametrize("fuzzy, expected", [
+ (b"ref.html:1;200", [("ref.html", ((1, 1), (200, 200)))]),
+ (b"ref.html:0-1;100-200", [("ref.html", ((0, 1), (100, 200)))]),
+ (b"0-1;100-200", [(None, ((0, 1), (100, 200)))]),
+ (b"maxDifference=1;totalPixels=200", [(None, ((1, 1), (200, 200)))]),
+ (b"totalPixels=200;maxDifference=1", [(None, ((1, 1), (200, 200)))]),
+ (b"totalPixels=200;1", [(None, ((1, 1), (200, 200)))]),
+ (b"maxDifference=1;200", [(None, ((1, 1), (200, 200)))]),
+ (b"test.html==ref.html:maxDifference=1;totalPixels=200",
+ [(("test.html", "ref.html", "=="), ((1, 1), (200, 200)))]),
+ (b"test.html!=ref.html:maxDifference=1;totalPixels=200",
+ [(("test.html", "ref.html", "!="), ((1, 1), (200, 200)))]),
+ (b"[test.html!=ref.html:maxDifference=1;totalPixels=200, test.html==ref1.html:maxDifference=5-10;100]",
+ [(("test.html", "ref.html", "!="), ((1, 1), (200, 200))),
+ (("test.html", "ref1.html", "=="), ((5,10), (100, 100)))]),
+])
+def test_fuzzy(fuzzy, expected):
+ data = b"""
+[test.html]
+ fuzzy: %s""" % fuzzy
+ f = BytesIO(data)
+ manifest = manifestexpected.static.compile(f,
+ {},
+ data_cls_getter=manifestexpected.data_cls_getter,
+ test_path="test/test.html")
+ assert manifest.get_test("test.html").fuzzy == expected
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_manifestupdate.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_manifestupdate.py
new file mode 100644
index 0000000000..12e379615d
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_manifestupdate.py
@@ -0,0 +1,58 @@
+# mypy: allow-untyped-defs
+
+import io
+import textwrap
+
+from .. import metadata
+from .. import manifestupdate
+from .. import wptmanifest
+
+
+def test_unconditional_default_promotion():
+ contents_before = io.BytesIO(
+ textwrap.dedent(
+ """\
+ [b.html]
+ """).encode())
+ manifest = manifestupdate.compile(
+ contents_before,
+ test_path='a/b.html',
+ url_base='/',
+ run_info_properties=(['os'], {'os': ['version']}),
+ update_intermittent=True,
+ remove_intermittent=False)
+ test = manifest.get_test('/a/b.html')
+ test.set_result(
+ metadata.RunInfo({'os': 'linux', 'version': 'jammy'}),
+ metadata.Result('TIMEOUT', [], 'PASS'))
+ test.set_result(
+ metadata.RunInfo({'os': 'win', 'version': '10'}),
+ metadata.Result('TIMEOUT', [], 'PASS'))
+ test.set_result(
+ metadata.RunInfo({'os': 'mac', 'version': '11'}),
+ metadata.Result('FAIL', [], 'PASS'))
+ test.set_result(
+ metadata.RunInfo({'os': 'mac', 'version': '12'}),
+ metadata.Result('FAIL', [], 'PASS'))
+ test.set_result(
+ metadata.RunInfo({'os': 'mac', 'version': '13'}),
+ metadata.Result('FAIL', [], 'PASS'))
+ test.update(full_update=True, disable_intermittent=False)
+
+ # The conditions before the default is created will look like:
+ # expected:
+ # if os == "linux": TIMEOUT
+ # if os == "win": TIMEOUT
+ # if os == "mac": FAIL
+ #
+ # The update should prefer promoting `TIMEOUT` over `FAIL`, since the former
+ # eliminates more conditions (both non-mac ones).
+ contents_after = io.BytesIO(
+ textwrap.dedent(
+ """\
+ [b.html]
+ expected:
+ if os == "mac": FAIL
+ TIMEOUT
+ """).encode())
+ assert manifest.node == wptmanifest.parse(contents_after)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_metadata.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_metadata.py
new file mode 100644
index 0000000000..ee3d90915d
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_metadata.py
@@ -0,0 +1,47 @@
+import json
+import os
+
+import pytest
+
+from .. import metadata
+
+
+def write_properties(tmp_path, data): # type: ignore
+ path = os.path.join(tmp_path, "update_properties.json")
+ with open(path, "w") as f:
+ json.dump(data, f)
+ return path
+
+@pytest.mark.parametrize("data",
+ [{"properties": ["prop1"]}, # type: ignore
+ {"properties": ["prop1"], "dependents": {"prop1": ["prop2"]}},
+ ])
+def test_get_properties_file_valid(tmp_path, data):
+ path = write_properties(tmp_path, data)
+ expected = data["properties"], data.get("dependents", {})
+ actual = metadata.get_properties(properties_file=path)
+ assert actual == expected
+
+@pytest.mark.parametrize("data",
+ [{}, # type: ignore
+ {"properties": "prop1"},
+ {"properties": None},
+ {"properties": ["prop1", 1]},
+ {"dependents": {"prop1": ["prop1"]}},
+ {"properties": "prop1", "dependents": ["prop1"]},
+ {"properties": "prop1", "dependents": None},
+ {"properties": "prop1", "dependents": {"prop1": ["prop2", 2]}},
+ {"properties": ["prop1"], "dependents": {"prop2": ["prop3"]}},
+ ])
+def test_get_properties_file_invalid(tmp_path, data):
+ path = write_properties(tmp_path, data)
+ with pytest.raises(ValueError):
+ metadata.get_properties(properties_file=path)
+
+
+def test_extra_properties(tmp_path): # type: ignore
+ data = {"properties": ["prop1"], "dependents": {"prop1": ["prop2"]}}
+ path = write_properties(tmp_path, data)
+ actual = metadata.get_properties(properties_file=path, extra_properties=["prop4"])
+ expected = ["prop1", "prop4"], {"prop1": ["prop2"]}
+ assert actual == expected
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_products.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_products.py
new file mode 100644
index 0000000000..c225958e78
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_products.py
@@ -0,0 +1,60 @@
+# mypy: allow-untyped-defs, allow-untyped-calls
+
+from os.path import join, dirname
+from unittest import mock
+
+import pytest
+
+from .base import all_products, active_products
+from .. import environment
+from .. import products
+from .. import wptcommandline
+
+wpt_root = join(dirname(__file__), "..", "..", "..", "..")
+
+test_paths = {"/": wptcommandline.TestRoot(wpt_root, wpt_root)}
+environment.do_delayed_imports(None, test_paths)
+
+
+@active_products("product")
+def test_load_active_product(product):
+ """test we can successfully load the product of the current testenv"""
+ products.Product({}, product)
+ # test passes if it doesn't throw
+
+
+@all_products("product")
+def test_load_all_products(product):
+ """test every product either loads or throws ImportError"""
+ try:
+ products.Product({}, product)
+ except ImportError:
+ pass
+
+
+@active_products("product", marks={
+ "sauce": pytest.mark.skip("needs env extras kwargs"),
+})
+def test_server_start_config(product):
+ product_data = products.Product({}, product)
+
+ env_extras = product_data.get_env_extras()
+
+ with mock.patch.object(environment.serve, "start") as start:
+ with environment.TestEnvironment(test_paths,
+ 1,
+ False,
+ False,
+ None,
+ product_data.env_options,
+ {"type": "none"},
+ env_extras):
+ start.assert_called_once()
+ args = start.call_args
+ config = args[0][1]
+ if "server_host" in product_data.env_options:
+ assert config["server_host"] == product_data.env_options["server_host"]
+
+ else:
+ assert config["server_host"] == config["browser_host"]
+ assert isinstance(config["bind_address"], bool)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_stability.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_stability.py
new file mode 100644
index 0000000000..d6e7cc8f70
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_stability.py
@@ -0,0 +1,186 @@
+# mypy: allow-untyped-defs
+
+import sys
+from collections import OrderedDict, defaultdict
+from unittest import mock
+
+from mozlog.structuredlog import StructuredLogger
+from mozlog.formatters import TbplFormatter
+from mozlog.handlers import StreamHandler
+
+from .. import stability, wptrunner
+
+def test_is_inconsistent():
+ assert stability.is_inconsistent({"PASS": 10}, 10) is False
+ assert stability.is_inconsistent({"PASS": 9}, 10) is True
+ assert stability.is_inconsistent({"PASS": 9, "FAIL": 1}, 10) is True
+ assert stability.is_inconsistent({"PASS": 8, "FAIL": 1}, 10) is True
+
+
+def test_find_slow_status():
+ assert stability.find_slow_status({
+ "longest_duration": {"TIMEOUT": 10},
+ "timeout": 10}) is None
+ assert stability.find_slow_status({
+ "longest_duration": {"CRASH": 10},
+ "timeout": 10}) is None
+ assert stability.find_slow_status({
+ "longest_duration": {"ERROR": 10},
+ "timeout": 10}) is None
+ assert stability.find_slow_status({
+ "longest_duration": {"PASS": 1},
+ "timeout": 10}) is None
+ assert stability.find_slow_status({
+ "longest_duration": {"PASS": 81},
+ "timeout": 100}) == "PASS"
+ assert stability.find_slow_status({
+ "longest_duration": {"TIMEOUT": 10, "FAIL": 81},
+ "timeout": 100}) == "FAIL"
+ assert stability.find_slow_status({
+ "longest_duration": {"SKIP": 0}}) is None
+
+
+def test_get_steps():
+ logger = None
+
+ steps = stability.get_steps(logger, 0, 0, [])
+ assert len(steps) == 0
+
+ steps = stability.get_steps(logger, 0, 0, [{}])
+ assert len(steps) == 0
+
+ repeat_loop = 1
+ flag_name = 'flag'
+ flag_value = 'y'
+ steps = stability.get_steps(logger, repeat_loop, 0, [
+ {flag_name: flag_value}])
+ assert len(steps) == 1
+ assert steps[0][0] == "Running tests in a loop %d times with flags %s=%s" % (
+ repeat_loop, flag_name, flag_value)
+
+ repeat_loop = 0
+ repeat_restart = 1
+ flag_name = 'flag'
+ flag_value = 'n'
+ steps = stability.get_steps(logger, repeat_loop, repeat_restart, [
+ {flag_name: flag_value}])
+ assert len(steps) == 1
+ assert steps[0][0] == "Running tests in a loop with restarts %d times with flags %s=%s" % (
+ repeat_restart, flag_name, flag_value)
+
+ repeat_loop = 10
+ repeat_restart = 5
+ steps = stability.get_steps(logger, repeat_loop, repeat_restart, [{}])
+ assert len(steps) == 2
+ assert steps[0][0] == "Running tests in a loop %d times" % repeat_loop
+ assert steps[1][0] == (
+ "Running tests in a loop with restarts %d times" % repeat_restart)
+
+
+def test_log_handler():
+ handler = stability.LogHandler()
+ data = OrderedDict()
+ data["test"] = "test_name"
+ test = handler.find_or_create_test(data)
+ assert test["subtests"] == OrderedDict()
+ assert test["status"] == defaultdict(int)
+ assert test["longest_duration"] == defaultdict(float)
+ assert test == handler.find_or_create_test(data)
+
+ start_time = 100
+ data["time"] = start_time
+ handler.test_start(data)
+ assert test["start_time"] == start_time
+
+ data["subtest"] = "subtest_name"
+ subtest = handler.find_or_create_subtest(data)
+ assert subtest["status"] == defaultdict(int)
+ assert subtest["messages"] == set()
+ assert subtest == handler.find_or_create_subtest(data)
+
+ data["status"] = 0
+ assert subtest["status"][data["status"]] == 0
+ handler.test_status(data)
+ assert subtest["status"][data["status"]] == 1
+ handler.test_status(data)
+ assert subtest["status"][data["status"]] == 2
+ data["status"] = 1
+ assert subtest["status"][data["status"]] == 0
+ message = "test message"
+ data["message"] = message
+ handler.test_status(data)
+ assert subtest["status"][data["status"]] == 1
+ assert len(subtest["messages"]) == 1
+ assert message in subtest["messages"]
+
+ test_duration = 10
+ data["time"] = data["time"] + test_duration
+ handler.test_end(data)
+ assert test["longest_duration"][data["status"]] == test_duration
+ assert "timeout" not in test
+
+ data["test2"] = "test_name_2"
+ timeout = 5
+ data["extra"] = {}
+ data["extra"]["test_timeout"] = timeout
+ handler.test_start(data)
+ handler.test_end(data)
+ assert test["timeout"] == timeout * 1000
+
+
+def test_err_string():
+ assert stability.err_string(
+ {'OK': 1, 'FAIL': 1}, 1) == "**Duplicate subtest name**"
+ assert stability.err_string(
+ {'OK': 2, 'FAIL': 1}, 2) == "**Duplicate subtest name**"
+ assert stability.err_string({'SKIP': 1}, 0) == "Duplicate subtest name"
+ assert stability.err_string(
+ {'SKIP': 1, 'OK': 1}, 1) == "Duplicate subtest name"
+
+ assert stability.err_string(
+ {'FAIL': 1}, 2) == "**FAIL: 1/2, MISSING: 1/2**"
+ assert stability.err_string(
+ {'FAIL': 1, 'OK': 1}, 3) == "**FAIL: 1/3, OK: 1/3, MISSING: 1/3**"
+
+ assert stability.err_string(
+ {'OK': 1, 'FAIL': 1}, 2) == "**FAIL: 1/2, OK: 1/2**"
+
+ assert stability.err_string(
+ {'OK': 2, 'FAIL': 1, 'SKIP': 1}, 4) == "FAIL: 1/4, OK: 2/4, SKIP: 1/4"
+ assert stability.err_string(
+ {'FAIL': 1, 'SKIP': 1, 'OK': 2}, 4) == "FAIL: 1/4, OK: 2/4, SKIP: 1/4"
+
+
+def test_check_stability_iterations():
+ logger = StructuredLogger("test-stability")
+ logger.add_handler(StreamHandler(sys.stdout, TbplFormatter()))
+
+ kwargs = {"verify_log_full": False}
+
+ def mock_run_tests(**kwargs):
+ repeats = kwargs.get("repeat", 1)
+ for _ in range(repeats):
+ logger.suite_start(tests=[], name="test")
+ for _ in range(kwargs.get("rerun", 1)):
+ logger.test_start("/example/test.html")
+ logger.test_status("/example/test.html", subtest="test1", status="PASS")
+ logger.test_end("/example/test.html", status="OK")
+ logger.suite_end()
+
+ status = wptrunner.TestStatus()
+ status.total_tests = 1
+ status.repeated_runs = repeats
+ status.expected_repeated_runs = repeats
+
+ return (None, status)
+
+ # Don't actually load wptrunner, because that will end up starting a browser
+ # which we don't want to do in this test.
+ with mock.patch("wptrunner.stability.wptrunner.run_tests") as mock_run:
+ mock_run.side_effect = mock_run_tests
+ assert stability.check_stability(logger,
+ repeat_loop=10,
+ repeat_restart=5,
+ chaos_mode=False,
+ output_results=False,
+ **kwargs) is None
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_testloader.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_testloader.py
new file mode 100644
index 0000000000..0915f42381
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_testloader.py
@@ -0,0 +1,349 @@
+# mypy: ignore-errors
+
+import os
+import sys
+import tempfile
+
+import pytest
+
+from mozlog import structured
+from ..testloader import (
+ DirectoryHashChunker,
+ IDHashChunker,
+ PathHashChunker,
+ Subsuite,
+ TestFilter,
+ TestLoader,
+ TagFilter,
+ read_include_from_file,
+)
+from .test_wpttest import make_mock_manifest
+
+here = os.path.dirname(__file__)
+sys.path.insert(0, os.path.join(here, os.pardir, os.pardir, os.pardir))
+from manifest.manifest import Manifest as WPTManifest
+
+structured.set_default_logger(structured.structuredlog.StructuredLogger("TestLoader"))
+
+TestFilter.__test__ = False
+TestLoader.__test__ = False
+
+include_ini = """\
+skip: true
+[test_\u53F0]
+ skip: false
+"""
+
+
+@pytest.fixture
+def manifest():
+ manifest_json = {
+ "items": {
+ "testharness": {
+ "a": {
+ "foo.html": [
+ "abcdef123456",
+ ["a/foo.html?b", {}],
+ ["a/foo.html?c", {}],
+ ],
+ "bar.html": [
+ "uvwxyz987654",
+ [None, {}],
+ ],
+ }
+ }
+ },
+ "url_base": "/",
+ "version": 8,
+ }
+ return WPTManifest.from_json("/", manifest_json)
+
+
+
+def test_loader_h2_tests():
+ manifest_json = {
+ "items": {
+ "testharness": {
+ "a": {
+ "foo.html": [
+ "abcdef123456",
+ [None, {}],
+ ],
+ "bar.h2.html": [
+ "uvwxyz987654",
+ [None, {}],
+ ],
+ }
+ }
+ },
+ "url_base": "/",
+ "version": 8,
+ }
+ manifest = WPTManifest.from_json("/", manifest_json)
+ subsuites = {}
+ subsuites[""] = Subsuite("", config={})
+
+ # By default, the loader should include the h2 test.
+ loader = TestLoader({manifest: {"metadata_path": ""}}, ["testharness"], None, subsuites)
+ assert "testharness" in loader.tests[""]
+ assert len(loader.tests[""]["testharness"]) == 2
+ assert len(loader.disabled_tests[""]) == 0
+
+ # We can also instruct it to skip them.
+ loader = TestLoader({manifest: {"metadata_path": ""}}, ["testharness"], None, subsuites, include_h2=False)
+ assert "testharness" in loader.tests[""]
+ assert len(loader.tests[""]["testharness"]) == 1
+ assert "testharness" in loader.disabled_tests[""]
+ assert len(loader.disabled_tests[""]["testharness"]) == 1
+ assert loader.disabled_tests[""]["testharness"][0].url == "/a/bar.h2.html"
+
+
+@pytest.mark.xfail(sys.platform == "win32",
+ reason="NamedTemporaryFile cannot be reopened on Win32")
+def test_include_file():
+ test_cases = """
+# This is a comment
+/foo/bar-error.https.html
+/foo/bar-success.https.html
+/foo/idlharness.https.any.html
+/foo/idlharness.https.any.worker.html
+ """
+
+ with tempfile.NamedTemporaryFile(mode="wt") as f:
+ f.write(test_cases)
+ f.flush()
+
+ include = read_include_from_file(f.name)
+
+ assert len(include) == 4
+ assert "/foo/bar-error.https.html" in include
+ assert "/foo/bar-success.https.html" in include
+ assert "/foo/idlharness.https.any.html" in include
+ assert "/foo/idlharness.https.any.worker.html" in include
+
+
+@pytest.mark.xfail(sys.platform == "win32",
+ reason="NamedTemporaryFile cannot be reopened on Win32")
+def test_filter_unicode():
+ tests = make_mock_manifest(("test", "a", 10), ("test", "a/b", 10),
+ ("test", "c", 10))
+
+ with tempfile.NamedTemporaryFile("wb", suffix=".ini") as f:
+ f.write(include_ini.encode('utf-8'))
+ f.flush()
+
+ TestFilter(manifest_path=f.name, test_manifests=tests)
+
+
+def test_tag_filter():
+ # Mock a structure with what `TagFilter` actually uses
+ class Tagged:
+ def __init__(self, tags):
+ self.tags = set(tags)
+
+ # Case: empty filter (allow anything)
+ filter = TagFilter({}, {})
+ assert filter(Tagged({}))
+ assert filter(Tagged({'a'}))
+ assert filter(Tagged({'a', 'b'}))
+
+ # Case: only inclusion specified, single tag
+ filter = TagFilter({'a'}, {})
+ assert not filter(Tagged({})) # no `'a'`, no entry
+ assert filter(Tagged({'a'}))
+ assert not filter(Tagged({'b'}))
+ assert filter(Tagged({'a', 'b'}))
+
+ # Case: only inclusion specified, multiple tags
+ filter = TagFilter({'a', 'b'}, {})
+ assert not filter(Tagged({}))
+ assert filter(Tagged({'a'}))
+ assert filter(Tagged({'a', 'b'}))
+ assert filter(Tagged({'b'}))
+ assert not filter(Tagged({'c'}))
+
+ # Case: only exclusion specified, single tag
+ filter = TagFilter({}, {'a'})
+ assert filter(Tagged({})) # no `'a'`, no entry
+ assert not filter(Tagged({'a'}))
+ assert not filter(Tagged({'a', 'b'}))
+ assert filter(Tagged({'b'}))
+
+ # Case: only exclusion specified, multiple tags
+ filter = TagFilter({}, {'a', 'b'})
+ assert filter(Tagged({}))
+ assert not filter(Tagged({'a'}))
+ assert not filter(Tagged({'b'}))
+ assert filter(Tagged({'c'}))
+
+ # Case: disjoint inclusion and exclusion
+ filter = TagFilter({'a'}, {'b'})
+ assert not filter(Tagged({}))
+ assert filter(Tagged({'a'}))
+ assert not filter(Tagged({'b'}))
+ assert not filter(Tagged({'a', 'b'})) # `exclude` overrides `include`
+
+ # Case: intersecting inclusion and exclusion
+ filter = TagFilter({'a'}, {'a'})
+ assert not filter(Tagged({}))
+ assert not filter(Tagged({'a'}))
+ assert not filter(Tagged({'a', 'b'})) # exclusion takes precedence
+ assert not filter(Tagged({'b'}))
+ filter = TagFilter({'a', 'b'}, {'a'})
+ assert not filter(Tagged({}))
+ assert not filter(Tagged({'a'}))
+ assert not filter(Tagged({'a', 'b'}))
+ assert filter(Tagged({'b'}))
+ filter = TagFilter({'a'}, {'a', 'b'})
+ assert not filter(Tagged({}))
+ assert not filter(Tagged({'a'}))
+ assert not filter(Tagged({'a', 'b'})) # exclusion takes precedence
+ assert not filter(Tagged({'b'}))
+
+
+def test_loader_filter_tags():
+ manifest_json = {
+ "items": {
+ "testharness": {
+ "a": {
+ "foo.html": [
+ "abcdef123456",
+ [None, {}],
+ ],
+ "bar.html": [ # will have `test-include` tag
+ "uvwxyz987654",
+ [None, {}],
+ ],
+ },
+ "b": {
+ "baz.html": [ # will have `test-include`, `test-exclude` tags
+ "quertyuiop@!",
+ [None, {}],
+ ],
+ "quux.html": [
+ "asdfghjkl_-'",
+ [None, {}],
+ ],
+ },
+ }
+ },
+ "url_base": "/",
+ "version": 8,
+ }
+ manifest = WPTManifest.from_json("/", manifest_json)
+
+ tmpdir_kwargs = {}
+ if sys.version_info.major >= 3 and sys.version_info.minor >= 10:
+ tmpdir_kwargs["ignore_cleanup_errors"] = True
+ with tempfile.TemporaryDirectory(**tmpdir_kwargs) as metadata_path:
+ a_path = os.path.join(metadata_path, "a")
+ os.makedirs(a_path)
+ with open(os.path.join(a_path, "bar.html.ini"), "w") as f:
+ f.write("tags: [test-include]\n")
+
+ subsuites = {}
+ subsuites[""] = Subsuite("", config={})
+
+ b_path = os.path.join(metadata_path, "b")
+ os.makedirs(b_path)
+ with open(os.path.join(b_path, "baz.html.ini"), "w") as f:
+ f.write("tags: [test-include, test-exclude]\n")
+
+
+ # Check: no filter loads all tests
+ loader = TestLoader({manifest: {"metadata_path": metadata_path}}, ["testharness"], None, subsuites)
+ assert len(loader.tests[""]["testharness"]) == 4
+
+ # Check: specifying a single `test-include` inclusion yields `/a/bar` and `/b/baz`
+ loader = TestLoader({manifest: {"metadata_path": metadata_path}}, ["testharness"], None, subsuites,
+ test_filters=[TagFilter({"test-include"}, {})])
+ assert len(loader.tests[""]["testharness"]) == 2
+ assert loader.tests[""]["testharness"][0].id == "/a/bar.html"
+ assert loader.tests[""]["testharness"][0].tags == {"dir:a", "test-include"}
+ assert loader.tests[""]["testharness"][1].id == "/b/baz.html"
+ assert loader.tests[""]["testharness"][1].tags == {"dir:b", "test-include", "test-exclude"}
+
+ # Check: specifying a single `test-exclude` exclusion rejects only `/b/baz`
+ loader = TestLoader({manifest: {"metadata_path": metadata_path}}, ["testharness"], None, subsuites,
+ test_filters=[TagFilter({}, {"test-exclude"})])
+ assert len(loader.tests[""]["testharness"]) == 3
+ assert all(test.id != "/b/baz.html" for test in loader.tests[""]["testharness"])
+
+ # Check: including `test-include` and excluding `test-exclude` yields only `/a/bar`
+ loader = TestLoader({manifest: {"metadata_path": metadata_path}}, ["testharness"], None, subsuites,
+ test_filters=[TagFilter({"test-include"}, {"test-exclude"})])
+ assert len(loader.tests[""]["testharness"]) == 1
+ assert loader.tests[""]["testharness"][0].id == "/a/bar.html"
+ assert loader.tests[""]["testharness"][0].tags == {"dir:a", "test-include"}
+
+ # Check: non-empty intersection of inclusion and exclusion yield zero tests
+
+ loader = TestLoader({manifest: {"metadata_path": metadata_path}}, ["testharness"], None, subsuites,
+ test_filters=[TagFilter({"test-include"}, {"test-include"})])
+ assert len(loader.tests[""]["testharness"]) == 0
+
+ loader = TestLoader({manifest: {"metadata_path": metadata_path}}, ["testharness"], None, subsuites,
+ test_filters=[TagFilter({"test-include", "test-exclude"}, {"test-include"})])
+ assert len(loader.tests[""]["testharness"]) == 0
+
+
+def test_chunk_hash(manifest):
+ chunker1 = PathHashChunker(total_chunks=2, chunk_number=1)
+ chunker2 = PathHashChunker(total_chunks=2, chunk_number=2)
+ # Check that the chunkers partition the manifest (i.e., each item is
+ # assigned to exactly one chunk).
+ items = sorted([*chunker1(manifest), *chunker2(manifest)],
+ key=lambda item: item[1])
+ assert len(items) == 2
+ test_type, test_path, tests = items[0]
+ assert test_type == "testharness"
+ assert test_path == os.path.join("a", "bar.html")
+ assert {test.id for test in tests} == {"/a/bar.html"}
+ test_type, test_path, tests = items[1]
+ assert test_type == "testharness"
+ assert test_path == os.path.join("a", "foo.html")
+ assert {test.id for test in tests} == {"/a/foo.html?b", "/a/foo.html?c"}
+
+
+def test_chunk_id_hash(manifest):
+ chunker1 = IDHashChunker(total_chunks=2, chunk_number=1)
+ chunker2 = IDHashChunker(total_chunks=2, chunk_number=2)
+ items = []
+ for test_type, test_path, tests in [*chunker1(manifest), *chunker2(manifest)]:
+ assert len(tests) > 0
+ items.extend((test_type, test_path, test) for test in tests)
+ assert len(items) == 3
+ items.sort(key=lambda item: item[2].id)
+ test_type, test_path, test = items[0]
+ assert test_type == "testharness"
+ assert test_path == os.path.join("a", "bar.html")
+ assert test.id == "/a/bar.html"
+ test_type, test_path, test = items[1]
+ assert test_type == "testharness"
+ assert test_path == os.path.join("a", "foo.html")
+ assert test.id == "/a/foo.html?b"
+ test_type, test_path, test = items[2]
+ assert test_type == "testharness"
+ assert test_path == os.path.join("a", "foo.html")
+ assert test.id == "/a/foo.html?c"
+
+
+def test_chunk_dir_hash(manifest):
+ chunker1 = DirectoryHashChunker(total_chunks=2, chunk_number=1)
+ chunker2 = DirectoryHashChunker(total_chunks=2, chunk_number=2)
+ # Check that tests in the same directory are located in the same chunk
+ # (which particular chunk is irrelevant).
+ empty_chunk, chunk_a = sorted([
+ list(chunker1(manifest)),
+ list(chunker2(manifest)),
+ ], key=len)
+ assert len(empty_chunk) == 0
+ assert len(chunk_a) == 2
+ test_type, test_path, tests = chunk_a[0]
+ assert test_type == "testharness"
+ assert test_path == os.path.join("a", "bar.html")
+ assert {test.id for test in tests} == {"/a/bar.html"}
+ test_type, test_path, tests = chunk_a[1]
+ assert test_type == "testharness"
+ assert test_path == os.path.join("a", "foo.html")
+ assert {test.id for test in tests} == {"/a/foo.html?b", "/a/foo.html?c"}
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_update.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_update.py
new file mode 100644
index 0000000000..59aaaeadff
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_update.py
@@ -0,0 +1,1893 @@
+# mypy: ignore-errors
+
+import json
+import os
+import sys
+from io import BytesIO, StringIO
+from unittest import mock
+
+import pytest
+
+from .. import metadata, manifestupdate, wptcommandline, wptmanifest
+from ..update.update import WPTUpdate
+from ..update.base import StepRunner, Step
+from mozlog import structuredlog, handlers, formatters
+
+here = os.path.dirname(__file__)
+sys.path.insert(0, os.path.join(here, os.pardir, os.pardir, os.pardir))
+from manifest import manifest, item as manifest_item, utils
+
+
+def rel_path_to_test_url(rel_path):
+ assert not os.path.isabs(rel_path)
+ return rel_path.replace(os.sep, "/")
+
+
+def SourceFileWithTest(path, hash, cls, *args):
+ path_parts = tuple(path.split("/"))
+ path = utils.to_os_path(path)
+ s = mock.Mock(rel_path=path, rel_path_parts=path_parts, hash=hash)
+ test = cls("/foobar", path, "/", rel_path_to_test_url(path), *args)
+ s.manifest_items = mock.Mock(return_value=(cls.item_type, [test]))
+ return s
+
+
+def tree_and_sourcefile_mocks(source_files):
+ paths_dict = {}
+ tree = []
+ for source_file, file_hash, updated in source_files:
+ paths_dict[source_file.rel_path] = source_file
+ tree.append([source_file.rel_path, file_hash, updated])
+
+ def MockSourceFile(tests_root, path, url_base, file_hash):
+ return paths_dict[path]
+
+ return tree, MockSourceFile
+
+
+item_classes = {"testharness": manifest_item.TestharnessTest,
+ "reftest": manifest_item.RefTest,
+ "manual": manifest_item.ManualTest,
+ "wdspec": manifest_item.WebDriverSpecTest,
+ "conformancechecker": manifest_item.ConformanceCheckerTest,
+ "visual": manifest_item.VisualTest,
+ "support": manifest_item.SupportFile}
+
+
+default_run_info = {"debug": False, "os": "linux", "version": "18.04", "processor": "x86_64", "bits": 64}
+test_id = "/path/to/test.htm"
+dir_id = "path/to/__dir__"
+
+
+def reset_globals():
+ metadata.prop_intern.clear()
+ metadata.run_info_intern.clear()
+ metadata.status_intern.clear()
+
+
+def get_run_info(overrides):
+ run_info = default_run_info.copy()
+ run_info.update(overrides)
+ return run_info
+
+
+def update(tests, *logs, **kwargs):
+ full_update = kwargs.pop("full_update", False)
+ disable_intermittent = kwargs.pop("disable_intermittent", False)
+ update_intermittent = kwargs.pop("update_intermittent", False)
+ remove_intermittent = kwargs.pop("remove_intermittent", False)
+ assert not kwargs
+ id_test_map, updater = create_updater(tests)
+
+ for log in logs:
+ log = create_log(log)
+ updater.update_from_log(log)
+
+ update_properties = (["debug", "os", "version", "processor"],
+ {"os": ["version"], "processor": ["bits"]})
+
+ expected_data = {}
+ metadata.load_expected = lambda _, __, test_path, *args: expected_data.get(test_path)
+ for test_path, test_ids, test_type, manifest_str in tests:
+ test_path = utils.to_os_path(test_path)
+ expected_data[test_path] = manifestupdate.compile(BytesIO(manifest_str),
+ test_path,
+ "/",
+ update_properties,
+ update_intermittent,
+ remove_intermittent)
+
+ return list(metadata.update_results(id_test_map,
+ update_properties,
+ full_update,
+ disable_intermittent,
+ update_intermittent,
+ remove_intermittent))
+
+
+def create_updater(tests, url_base="/", **kwargs):
+ id_test_map = {}
+ m = create_test_manifest(tests, url_base)
+
+ reset_globals()
+ id_test_map = metadata.create_test_tree(None, m)
+
+ return id_test_map, metadata.ExpectedUpdater(id_test_map, **kwargs)
+
+
+def create_log(entries):
+ data = StringIO()
+ if isinstance(entries, list):
+ logger = structuredlog.StructuredLogger("expected_test")
+ handler = handlers.StreamHandler(data, formatters.JSONFormatter())
+ logger.add_handler(handler)
+
+ for item in entries:
+ action, kwargs = item
+ getattr(logger, action)(**kwargs)
+ logger.remove_handler(handler)
+ else:
+ data.write(json.dumps(entries))
+ data.seek(0)
+ return data
+
+
+def suite_log(entries, run_info=None):
+ _run_info = default_run_info.copy()
+ if run_info:
+ _run_info.update(run_info)
+ return ([("suite_start", {"tests": [], "run_info": _run_info})] +
+ entries +
+ [("suite_end", {})])
+
+
+def create_test_manifest(tests, url_base="/"):
+ source_files = []
+ for i, (test, _, test_type, _) in enumerate(tests):
+ if test_type:
+ source_files.append(SourceFileWithTest(test, str(i) * 40, item_classes[test_type]))
+ m = manifest.Manifest("")
+ tree, sourcefile_mock = tree_and_sourcefile_mocks((item, None, True) for item in source_files)
+ with mock.patch("manifest.manifest.SourceFile", side_effect=sourcefile_mock):
+ m.update(tree)
+ return m
+
+
+def test_update_0():
+ tests = [("path/to/test.htm", [test_id], "testharness",
+ b"""[test.htm]
+ [test1]
+ expected: FAIL""")]
+
+ log = suite_log([("test_start", {"test": "/path/to/test.htm"}),
+ ("test_status", {"test": "/path/to/test.htm",
+ "subtest": "test1",
+ "status": "PASS",
+ "expected": "FAIL"}),
+ ("test_end", {"test": "/path/to/test.htm",
+ "status": "OK"})])
+
+ updated = update(tests, log)
+
+ assert len(updated) == 1
+ assert updated[0][1].is_empty
+
+
+def test_update_1():
+ tests = [("path/to/test.htm", [test_id], "testharness",
+ b"""[test.htm]
+ [test1]
+ expected: ERROR""")]
+
+ log = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "ERROR"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ updated = update(tests, log)
+
+ new_manifest = updated[0][1]
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).children[0].get("expected", default_run_info) == "FAIL"
+
+
+def test_update_known_intermittent_1():
+ tests = [("path/to/test.htm", [test_id], "testharness",
+ b"""[test.htm]
+ [test1]
+ expected: PASS""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "PASS"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "PASS",
+ "expected": "PASS"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ log_2 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "PASS",
+ "expected": "PASS"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ updated = update(tests, log_0, log_1, log_2, update_intermittent=True)
+
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", default_run_info) == ["PASS", "FAIL"]
+
+
+def test_update_known_intermittent_2():
+ tests = [("path/to/test.htm", [test_id], "testharness",
+ b"""[test.htm]
+ [test1]
+ expected: PASS""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "PASS"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ updated = update(tests, log_0, update_intermittent=True)
+
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", default_run_info) == "FAIL"
+
+
+def test_update_existing_known_intermittent():
+ tests = [("path/to/test.htm", [test_id], "testharness",
+ b"""[test.htm]
+ [test1]
+ expected: [PASS, FAIL]""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "ERROR",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "PASS",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ log_2 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "PASS",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ updated = update(tests, log_0, log_1, log_2, update_intermittent=True)
+
+ new_manifest = updated[0][1]
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", default_run_info) == ["PASS", "ERROR", "FAIL"]
+
+
+def test_update_remove_previous_intermittent():
+ tests = [("path/to/test.htm", [test_id], "testharness",
+ b"""[test.htm]
+ [test1]
+ expected: [PASS, FAIL]""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "ERROR",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "PASS",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ log_2 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "PASS",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ updated = update(tests,
+ log_0,
+ log_1,
+ log_2,
+ update_intermittent=True,
+ remove_intermittent=True)
+
+ new_manifest = updated[0][1]
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", default_run_info) == ["PASS", "ERROR"]
+
+
+def test_update_new_test_with_intermittent():
+ tests = [("path/to/test.htm", [test_id], "testharness", None)]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "PASS",
+ "expected": "PASS"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "PASS",
+ "expected": "PASS"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ log_2 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "PASS"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ updated = update(tests, log_0, log_1, log_2, update_intermittent=True)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test("test.htm") is None
+ assert len(new_manifest.get_test(test_id).children) == 1
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", default_run_info) == ["PASS", "FAIL"]
+
+
+def test_update_expected_tie_resolution():
+ tests = [("path/to/test.htm", [test_id], "testharness", None)]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "PASS",
+ "expected": "PASS"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "PASS"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ updated = update(tests, log_0, log_1, update_intermittent=True)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", default_run_info) == ["PASS", "FAIL"]
+
+
+def test_update_no_reorder_expected():
+ tests = [("path/to/test.htm", [test_id], "testharness",
+ b"""[test.htm]
+ [test1]
+ expected: [PASS, FAIL]""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ log_2 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "PASS",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ updated = update(tests, log_0, log_1, log_2, update_intermittent=True)
+ assert not updated
+
+
+def test_update_and_preserve_unchanged_expected_intermittent():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""
+[test.htm]
+ expected:
+ if os == "android": [PASS, FAIL]
+ FAIL""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_end", {"test": test_id,
+ "status": "FAIL",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL"]})],
+ run_info={"os": "android"})
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_end", {"test": test_id,
+ "status": "PASS",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL"]})],
+ run_info={"os": "android"})
+
+ log_2 = suite_log([("test_start", {"test": test_id}),
+ ("test_end", {"test": test_id,
+ "status": "PASS",
+ "expected": "FAIL"})])
+
+ updated = update(tests, log_0, log_1, log_2)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+
+ run_info_1 = default_run_info.copy()
+ run_info_1.update({"os": "android"})
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).get(
+ "expected", run_info_1) == ["PASS", "FAIL"]
+ assert new_manifest.get_test(test_id).get(
+ "expected", default_run_info) == "PASS"
+
+
+def test_update_intermittent():
+ tests = [("path/to/test.htm", [test_id], "testharness",
+ b"""[test.htm]
+ [test1]
+ expected:
+ if os == "linux" or os == "android": [PASS, FAIL, ERROR]""")]
+
+ # Logs where the test requires an update, but we don't want to update the
+ # intermittent status
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL", "ERROR"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL", "ERROR"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ log_2 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "PASS",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL", "ERROR"]}),
+ ("test_end", {"test": test_id,
+ "status": "TIMEOUT"})],
+ run_info={"os": "android"})
+
+ updated = update(tests, log_0, log_1, log_2, update_intermittent=True)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+
+ run_info_1 = default_run_info.copy()
+ run_info_1.update({"os": "android"})
+ assert new_manifest.get_test(test_id).get(
+ "expected", run_info_1) == "TIMEOUT"
+
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", default_run_info) == ["PASS", "FAIL", "ERROR"]
+
+
+def test_update_test_with_intermittent_to_one_expected_status():
+ tests = [("path/to/test.htm", [test_id], "testharness",
+ b"""[test.htm]
+ [test1]
+ expected: [PASS, FAIL]""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "ERROR",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ updated = update(tests, log_0)
+
+ new_manifest = updated[0][1]
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", default_run_info) == "ERROR"
+
+
+def test_update_intermittent_with_conditions():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""
+[test.htm]
+ expected:
+ if os == "android": [PASS, FAIL]""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_end", {"test": test_id,
+ "status": "TIMEOUT",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL"]})],
+ run_info={"os": "android"})
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_end", {"test": test_id,
+ "status": "PASS",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL"]})],
+ run_info={"os": "android"})
+
+ updated = update(tests, log_0, log_1, update_intermittent=True)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+
+ run_info_1 = default_run_info.copy()
+ run_info_1.update({"os": "android"})
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).get(
+ "expected", run_info_1) == ["PASS", "TIMEOUT", "FAIL"]
+
+
+def test_update_and_remove_intermittent_with_conditions():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""
+[test.htm]
+ expected:
+ if os == "android": [PASS, FAIL]""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_end", {"test": test_id,
+ "status": "TIMEOUT",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL"]})],
+ run_info={"os": "android"})
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_end", {"test": test_id,
+ "status": "PASS",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL"]})],
+ run_info={"os": "android"})
+
+ updated = update(tests, log_0, log_1, update_intermittent=True, remove_intermittent=True)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+
+ run_info_1 = default_run_info.copy()
+ run_info_1.update({"os": "android"})
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).get(
+ "expected", run_info_1) == ["PASS", "TIMEOUT"]
+
+
+def test_update_intermittent_full():
+ tests = [("path/to/test.htm", [test_id], "testharness",
+ b"""[test.htm]
+ [test1]
+ expected:
+ if os == "mac": [FAIL, TIMEOUT]
+ FAIL""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "FAIL",
+ "known_intermittent": ["TIMEOUT"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"os": "mac"})
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ updated = update(tests, log_0, log_1, update_intermittent=True, full_update=True)
+
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ run_info_1 = default_run_info.copy()
+ run_info_1.update({"os": "mac"})
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", run_info_1) == ["FAIL", "TIMEOUT"]
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", default_run_info) == "FAIL"
+
+
+def test_update_intermittent_full_remove():
+ tests = [("path/to/test.htm", [test_id], "testharness",
+ b"""[test.htm]
+ [test1]
+ expected:
+ if os == "mac": [FAIL, TIMEOUT, PASS]
+ FAIL""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "FAIL",
+ "known_intermittent": ["TIMEOUT", "PASS"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"os": "mac"})
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "TIMEOUT",
+ "expected": "FAIL",
+ "known_intermittent": ["TIMEOUT", "PASS"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"os": "mac"})
+
+ log_2 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ updated = update(tests, log_0, log_1, log_2, update_intermittent=True,
+ full_update=True, remove_intermittent=True)
+
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ run_info_1 = default_run_info.copy()
+ run_info_1.update({"os": "mac"})
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", run_info_1) == ["FAIL", "TIMEOUT"]
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", default_run_info) == "FAIL"
+
+
+def test_full_update():
+ tests = [("path/to/test.htm", [test_id], "testharness",
+ b"""[test.htm]
+ [test1]
+ expected:
+ if os == "mac": [FAIL, TIMEOUT]
+ FAIL""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "FAIL",
+ "known_intermittent": ["TIMEOUT"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"os": "mac"})
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ updated = update(tests, log_0, log_1, full_update=True)
+
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ run_info_1 = default_run_info.copy()
+ run_info_1.update({"os": "mac"})
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", run_info_1) == "FAIL"
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", default_run_info) == "FAIL"
+
+
+def test_full_orphan():
+ tests = [("path/to/test.htm", [test_id], "testharness",
+ b"""[test.htm]
+ [test1]
+ expected: FAIL
+ [subsub test]
+ expected: TIMEOUT
+ [test2]
+ expected: ERROR
+""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "FAIL"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+
+ updated = update(tests, log_0, full_update=True)
+
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert len(new_manifest.get_test(test_id).children[0].children) == 0
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", default_run_info) == "FAIL"
+ assert len(new_manifest.get_test(test_id).children) == 1
+
+
+def test_update_no_reorder_expected_full_conditions():
+ tests = [("path/to/test.htm", [test_id], "testharness",
+ b"""[test.htm]
+ [test1]
+ expected:
+ if os == "mac": [FAIL, TIMEOUT]
+ [FAIL, PASS]""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "TIMEOUT",
+ "expected": "FAIL",
+ "known_intermittent": ["TIMEOUT"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"os": "mac"})
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "TIMEOUT",
+ "expected": "FAIL",
+ "known_intermittent": ["TIMEOUT"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"os": "mac"})
+
+ log_2 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "PASS",
+ "expected": "FAIL",
+ "known_intermittent": ["PASS"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ log_3 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "PASS",
+ "expected": "FAIL",
+ "known_intermittent": ["PASS"]}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ updated = update(tests, log_0, log_1, log_2, log_3, update_intermittent=True, full_update=True)
+ assert not updated
+
+
+def test_skip_0():
+ tests = [("path/to/test.htm", [test_id], "testharness",
+ b"""[test.htm]
+ [test1]
+ expected: FAIL""")]
+
+ log = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "FAIL"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ updated = update(tests, log)
+ assert not updated
+
+
+def test_new_subtest():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]
+ [test1]
+ expected: FAIL""")]
+
+ log = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "FAIL"}),
+ ("test_status", {"test": test_id,
+ "subtest": "test2",
+ "status": "FAIL",
+ "expected": "PASS"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+ updated = update(tests, log)
+ new_manifest = updated[0][1]
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).children[0].get("expected", default_run_info) == "FAIL"
+ assert new_manifest.get_test(test_id).children[1].get("expected", default_run_info) == "FAIL"
+
+
+def test_update_subtest():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]
+ expected:
+ if os == "linux": [OK, ERROR]
+ [test1]
+ expected: FAIL""")]
+
+ log = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "known_intermittent": []}),
+ ("test_status", {"test": test_id,
+ "subtest": "test2",
+ "status": "FAIL",
+ "expected": "PASS",
+ "known_intermittent": []}),
+ ("test_end", {"test": test_id,
+ "status": "OK",
+ "known_intermittent": ["ERROR"]})])
+ updated = update(tests, log)
+ new_manifest = updated[0][1]
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).children[0].get("expected", default_run_info) == "FAIL"
+
+
+def test_update_multiple_0():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]
+ [test1]
+ expected: FAIL""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "FAIL"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"debug": False, "os": "osx"})
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "TIMEOUT",
+ "expected": "FAIL"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"debug": False, "os": "linux"})
+
+ updated = update(tests, log_0, log_1)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ run_info_1 = default_run_info.copy()
+ run_info_1.update({"debug": False, "os": "osx"})
+ run_info_2 = default_run_info.copy()
+ run_info_2.update({"debug": False, "os": "linux"})
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", run_info_1) == "FAIL"
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", {"debug": False, "os": "linux"}) == "TIMEOUT"
+
+
+def test_update_multiple_1():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]
+ [test1]
+ expected: FAIL""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "FAIL"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"os": "osx"})
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "TIMEOUT",
+ "expected": "FAIL"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"os": "linux"})
+
+ updated = update(tests, log_0, log_1)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ run_info_1 = default_run_info.copy()
+ run_info_1.update({"os": "osx"})
+ run_info_2 = default_run_info.copy()
+ run_info_2.update({"os": "linux"})
+ run_info_3 = default_run_info.copy()
+ run_info_3.update({"os": "win"})
+
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", run_info_1) == "FAIL"
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", run_info_2) == "TIMEOUT"
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", run_info_3) == "FAIL"
+
+
+def test_update_multiple_2():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]
+ [test1]
+ expected: FAIL""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "FAIL"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"debug": False, "os": "osx"})
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "TIMEOUT",
+ "expected": "FAIL"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"debug": True, "os": "osx"})
+
+ updated = update(tests, log_0, log_1)
+ new_manifest = updated[0][1]
+
+ run_info_1 = default_run_info.copy()
+ run_info_1.update({"debug": False, "os": "osx"})
+ run_info_2 = default_run_info.copy()
+ run_info_2.update({"debug": True, "os": "osx"})
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", run_info_1) == "FAIL"
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", run_info_2) == "TIMEOUT"
+
+
+def test_update_multiple_3():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]
+ [test1]
+ expected:
+ if debug: FAIL
+ if not debug and os == "osx": TIMEOUT""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "FAIL"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"debug": False, "os": "osx"})
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "TIMEOUT",
+ "expected": "FAIL"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"debug": True, "os": "osx"})
+
+ updated = update(tests, log_0, log_1)
+ new_manifest = updated[0][1]
+
+ run_info_1 = default_run_info.copy()
+ run_info_1.update({"debug": False, "os": "osx"})
+ run_info_2 = default_run_info.copy()
+ run_info_2.update({"debug": True, "os": "osx"})
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", run_info_1) == "FAIL"
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", run_info_2) == "TIMEOUT"
+
+
+def test_update_ignore_existing():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]
+ [test1]
+ expected:
+ if debug: TIMEOUT
+ if not debug and os == "osx": NOTRUN""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "PASS"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"debug": False, "os": "linux"})
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "PASS"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"debug": True, "os": "windows"})
+
+ updated = update(tests, log_0, log_1)
+ new_manifest = updated[0][1]
+
+ run_info_1 = default_run_info.copy()
+ run_info_1.update({"debug": False, "os": "linux"})
+ run_info_2 = default_run_info.copy()
+ run_info_2.update({"debug": False, "os": "osx"})
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", run_info_1) == "FAIL"
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", run_info_2) == "NOTRUN"
+
+
+def test_update_new_test():
+ tests = [("path/to/test.htm", [test_id], "testharness", None)]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "PASS"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+ updated = update(tests, log_0)
+ new_manifest = updated[0][1]
+
+ run_info_1 = default_run_info.copy()
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test("test.htm") is None
+ assert len(new_manifest.get_test(test_id).children) == 1
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", run_info_1) == "FAIL"
+
+
+def test_update_duplicate():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""
+[test.htm]
+ expected: ERROR""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_end", {"test": test_id,
+ "status": "PASS"})])
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_end", {"test": test_id,
+ "status": "FAIL"})])
+
+ updated = update(tests, log_0, log_1)
+ new_manifest = updated[0][1]
+ run_info_1 = default_run_info.copy()
+
+ assert new_manifest.get_test(test_id).get(
+ "expected", run_info_1) == "ERROR"
+
+
+def test_update_disable_intermittent():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""
+[test.htm]
+ expected: ERROR""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_end", {"test": test_id,
+ "status": "PASS"})])
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_end", {"test": test_id,
+ "status": "FAIL"})])
+
+ updated = update(tests, log_0, log_1, disable_intermittent="Some message")
+ new_manifest = updated[0][1]
+ run_info_1 = default_run_info.copy()
+
+ assert new_manifest.get_test(test_id).get(
+ "disabled", run_info_1) == "Some message"
+
+
+def test_update_stability_conditional_instability():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""
+[test.htm]
+ expected: ERROR""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_end", {"test": test_id,
+ "status": "PASS"})],
+ run_info={"os": "linux"})
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_end", {"test": test_id,
+ "status": "FAIL"})],
+ run_info={"os": "linux"})
+ log_2 = suite_log([("test_start", {"test": test_id}),
+ ("test_end", {"test": test_id,
+ "status": "FAIL"})],
+ run_info={"os": "mac"})
+
+ updated = update(tests, log_0, log_1, log_2, disable_intermittent="Some message")
+ new_manifest = updated[0][1]
+ run_info_1 = default_run_info.copy()
+ run_info_1.update({"os": "linux"})
+ run_info_2 = default_run_info.copy()
+ run_info_2.update({"os": "mac"})
+
+ assert new_manifest.get_test(test_id).get(
+ "disabled", run_info_1) == "Some message"
+ with pytest.raises(KeyError):
+ assert new_manifest.get_test(test_id).get(
+ "disabled", run_info_2)
+ assert new_manifest.get_test(test_id).get(
+ "expected", run_info_2) == "FAIL"
+
+
+def test_update_full():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]
+ [test1]
+ expected:
+ if debug: TIMEOUT
+ if not debug and os == "osx": NOTRUN
+
+ [test2]
+ expected: FAIL
+
+[test.js]
+ [test1]
+ expected: FAIL
+""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "PASS"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"debug": False})
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "ERROR",
+ "expected": "PASS"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"debug": True})
+
+ updated = update(tests, log_0, log_1, full_update=True)
+ new_manifest = updated[0][1]
+
+ run_info_1 = default_run_info.copy()
+ run_info_1.update({"debug": False, "os": "win"})
+ run_info_2 = default_run_info.copy()
+ run_info_2.update({"debug": True, "os": "osx"})
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test("test.js") is None
+ assert len(new_manifest.get_test(test_id).children) == 1
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", run_info_1) == "FAIL"
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", run_info_2) == "ERROR"
+
+
+def test_update_full_unknown():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]
+ [test1]
+ expected:
+ if release_or_beta: ERROR
+ if not debug and os == "osx": NOTRUN
+""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "PASS"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"debug": False, "release_or_beta": False})
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "PASS"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"debug": True, "release_or_beta": False})
+
+ updated = update(tests, log_0, log_1, full_update=True)
+ new_manifest = updated[0][1]
+
+ run_info_1 = default_run_info.copy()
+ run_info_1.update({"release_or_beta": False})
+ run_info_2 = default_run_info.copy()
+ run_info_2.update({"release_or_beta": True})
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", run_info_1) == "FAIL"
+ assert new_manifest.get_test(test_id).children[0].get(
+ "expected", run_info_2) == "ERROR"
+
+
+def test_update_full_unknown_missing():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]
+ [subtest_deleted]
+ expected:
+ if release_or_beta: ERROR
+ FAIL
+""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "PASS",
+ "expected": "PASS"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"debug": False, "release_or_beta": False})
+
+ updated = update(tests, log_0, full_update=True)
+ assert len(updated) == 0
+
+
+def test_update_default():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]
+ [test1]
+ expected:
+ if os == "mac": FAIL
+ ERROR""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "PASS",
+ "expected": "FAIL"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"os": "mac"})
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "PASS",
+ "expected": "ERROR"}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"os": "linux"})
+
+ updated = update(tests, log_0, log_1)
+ new_manifest = updated[0][1]
+
+ assert new_manifest.is_empty
+ assert new_manifest.modified
+
+
+def test_update_default_1():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""
+[test.htm]
+ expected:
+ if os == "mac": TIMEOUT
+ ERROR""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_end", {"test": test_id,
+ "expected": "ERROR",
+ "status": "FAIL"})],
+ run_info={"os": "linux"})
+
+ updated = update(tests, log_0)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+
+ run_info_1 = default_run_info.copy()
+ run_info_1.update({"os": "mac"})
+ run_info_2 = default_run_info.copy()
+ run_info_2.update({"os": "win"})
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).get(
+ "expected", run_info_1) == "TIMEOUT"
+ assert new_manifest.get_test(test_id).get(
+ "expected", run_info_2) == "FAIL"
+
+
+def test_update_default_2():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""
+[test.htm]
+ expected:
+ if os == "mac": TIMEOUT
+ ERROR""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_end", {"test": test_id,
+ "expected": "ERROR",
+ "status": "TIMEOUT"})],
+ run_info={"os": "linux"})
+
+ updated = update(tests, log_0)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+
+ run_info_1 = default_run_info.copy()
+ run_info_1.update({"os": "mac"})
+ run_info_2 = default_run_info.copy()
+ run_info_2.update({"os": "win"})
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).get(
+ "expected", run_info_1) == "TIMEOUT"
+ assert new_manifest.get_test(test_id).get(
+ "expected", run_info_2) == "TIMEOUT"
+
+
+def test_update_assertion_count_0():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]
+ max-asserts: 4
+ min-asserts: 2
+""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("assertion_count", {"test": test_id,
+ "count": 6,
+ "min_expected": 2,
+ "max_expected": 4}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ updated = update(tests, log_0)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).get("max-asserts") == "7"
+ assert new_manifest.get_test(test_id).get("min-asserts") == "2"
+
+
+def test_update_assertion_count_1():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]
+ max-asserts: 4
+ min-asserts: 2
+""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("assertion_count", {"test": test_id,
+ "count": 1,
+ "min_expected": 2,
+ "max_expected": 4}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ updated = update(tests, log_0)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).get("max-asserts") == "4"
+ assert new_manifest.get_test(test_id).has_key("min-asserts") is False
+
+
+def test_update_assertion_count_2():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]
+ max-asserts: 4
+ min-asserts: 2
+""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("assertion_count", {"test": test_id,
+ "count": 3,
+ "min_expected": 2,
+ "max_expected": 4}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})])
+
+ updated = update(tests, log_0)
+ assert not updated
+
+
+def test_update_assertion_count_3():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]
+ max-asserts: 4
+ min-asserts: 2
+""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("assertion_count", {"test": test_id,
+ "count": 6,
+ "min_expected": 2,
+ "max_expected": 4}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"os": "windows"})
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("assertion_count", {"test": test_id,
+ "count": 7,
+ "min_expected": 2,
+ "max_expected": 4}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"os": "linux"})
+
+ updated = update(tests, log_0, log_1)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).get("max-asserts") == "8"
+ assert new_manifest.get_test(test_id).get("min-asserts") == "2"
+
+
+def test_update_assertion_count_4():
+ tests = [("path/to/test.htm", [test_id], "testharness", b"""[test.htm]""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("assertion_count", {"test": test_id,
+ "count": 6,
+ "min_expected": 0,
+ "max_expected": 0}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"os": "windows"})
+
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("assertion_count", {"test": test_id,
+ "count": 7,
+ "min_expected": 0,
+ "max_expected": 0}),
+ ("test_end", {"test": test_id,
+ "status": "OK"})],
+ run_info={"os": "linux"})
+
+ updated = update(tests, log_0, log_1)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get_test(test_id).get("max-asserts") == "8"
+ assert new_manifest.get_test(test_id).has_key("min-asserts") is False
+
+
+def test_update_lsan_0():
+ tests = [("path/to/test.htm", [test_id], "testharness", b""),
+ ("path/to/__dir__", [dir_id], None, b"")]
+
+ log_0 = suite_log([("lsan_leak", {"scope": "path/to/",
+ "frames": ["foo", "bar"]})])
+
+
+ updated = update(tests, log_0)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get("lsan-allowed") == ["foo"]
+
+
+def test_update_lsan_1():
+ tests = [("path/to/test.htm", [test_id], "testharness", b""),
+ ("path/to/__dir__", [dir_id], None, b"""
+lsan-allowed: [foo]""")]
+
+ log_0 = suite_log([("lsan_leak", {"scope": "path/to/",
+ "frames": ["foo", "bar"]}),
+ ("lsan_leak", {"scope": "path/to/",
+ "frames": ["baz", "foobar"]})])
+
+
+ updated = update(tests, log_0)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get("lsan-allowed") == ["baz", "foo"]
+
+
+def test_update_lsan_2():
+ tests = [("path/to/test.htm", [test_id], "testharness", b""),
+ ("path/__dir__", ["path/__dir__"], None, b"""
+lsan-allowed: [foo]"""),
+ ("path/to/__dir__", [dir_id], None, b"")]
+
+ log_0 = suite_log([("lsan_leak", {"scope": "path/to/",
+ "frames": ["foo", "bar"],
+ "allowed_match": ["foo"]}),
+ ("lsan_leak", {"scope": "path/to/",
+ "frames": ["baz", "foobar"]})])
+
+
+ updated = update(tests, log_0)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get("lsan-allowed") == ["baz"]
+
+
+def test_update_lsan_3():
+ tests = [("path/to/test.htm", [test_id], "testharness", b""),
+ ("path/to/__dir__", [dir_id], None, b"")]
+
+ log_0 = suite_log([("lsan_leak", {"scope": "path/to/",
+ "frames": ["foo", "bar"]})],
+ run_info={"os": "win"})
+
+ log_1 = suite_log([("lsan_leak", {"scope": "path/to/",
+ "frames": ["baz", "foobar"]})],
+ run_info={"os": "linux"})
+
+
+ updated = update(tests, log_0, log_1)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get("lsan-allowed") == ["baz", "foo"]
+
+
+def test_update_wptreport_0():
+ tests = [("path/to/test.htm", [test_id], "testharness",
+ b"""[test.htm]
+ [test1]
+ expected: FAIL""")]
+
+ log = {"run_info": default_run_info.copy(),
+ "results": [
+ {"test": "/path/to/test.htm",
+ "subtests": [{"name": "test1",
+ "status": "PASS",
+ "expected": "FAIL"}],
+ "status": "OK"}]}
+
+ updated = update(tests, log)
+
+ assert len(updated) == 1
+ assert updated[0][1].is_empty
+
+
+def test_update_wptreport_1():
+ tests = [("path/to/test.htm", [test_id], "testharness", b""),
+ ("path/to/__dir__", [dir_id], None, b"")]
+
+ log = {"run_info": default_run_info.copy(),
+ "results": [],
+ "lsan_leaks": [{"scope": "path/to/",
+ "frames": ["baz", "foobar"]}]}
+
+ updated = update(tests, log)
+
+ assert len(updated) == 1
+ assert updated[0][1].get("lsan-allowed") == ["baz"]
+
+
+def test_update_leak_total_0():
+ tests = [("path/to/test.htm", [test_id], "testharness", b""),
+ ("path/to/__dir__", [dir_id], None, b"")]
+
+ log_0 = suite_log([("mozleak_total", {"scope": "path/to/",
+ "process": "default",
+ "bytes": 100,
+ "threshold": 0,
+ "objects": []})])
+
+ updated = update(tests, log_0)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get("leak-threshold") == ['default:51200']
+
+
+def test_update_leak_total_1():
+ tests = [("path/to/test.htm", [test_id], "testharness", b""),
+ ("path/to/__dir__", [dir_id], None, b"")]
+
+ log_0 = suite_log([("mozleak_total", {"scope": "path/to/",
+ "process": "default",
+ "bytes": 100,
+ "threshold": 1000,
+ "objects": []})])
+
+ updated = update(tests, log_0)
+ assert not updated
+
+
+def test_update_leak_total_2():
+ tests = [("path/to/test.htm", [test_id], "testharness", b""),
+ ("path/to/__dir__", [dir_id], None, b"""
+leak-total: 110""")]
+
+ log_0 = suite_log([("mozleak_total", {"scope": "path/to/",
+ "process": "default",
+ "bytes": 100,
+ "threshold": 110,
+ "objects": []})])
+
+ updated = update(tests, log_0)
+ assert not updated
+
+
+def test_update_leak_total_3():
+ tests = [("path/to/test.htm", [test_id], "testharness", b""),
+ ("path/to/__dir__", [dir_id], None, b"""
+leak-total: 100""")]
+
+ log_0 = suite_log([("mozleak_total", {"scope": "path/to/",
+ "process": "default",
+ "bytes": 1000,
+ "threshold": 100,
+ "objects": []})])
+
+ updated = update(tests, log_0)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.get("leak-threshold") == ['default:51200']
+
+
+def test_update_leak_total_4():
+ tests = [("path/to/test.htm", [test_id], "testharness", b""),
+ ("path/to/__dir__", [dir_id], None, b"""
+leak-total: 110""")]
+
+ log_0 = suite_log([
+ ("lsan_leak", {"scope": "path/to/",
+ "frames": ["foo", "bar"]}),
+ ("mozleak_total", {"scope": "path/to/",
+ "process": "default",
+ "bytes": 100,
+ "threshold": 110,
+ "objects": []})])
+
+ updated = update(tests, log_0)
+ new_manifest = updated[0][1]
+
+ assert not new_manifest.is_empty
+ assert new_manifest.modified
+ assert new_manifest.has_key("leak-threshold") is False
+
+
+class TestStep(Step):
+ __test__ = False
+
+ def create(self, state):
+ tests = [("path/to/test.htm", [test_id], "testharness", "")]
+ state.foo = create_test_manifest(tests)
+
+
+class UpdateRunner(StepRunner):
+ steps = [TestStep]
+
+
+def test_update_pickle():
+ logger = structuredlog.StructuredLogger("expected_test")
+ wpt_root = os.path.abspath(os.path.join(here,
+ os.pardir,
+ os.pardir,
+ os.pardir,
+ os.pardir))
+ args = {
+ "test_paths": {
+ "/": wptcommandline.TestRoot(wpt_root, wpt_root),
+ },
+ "abort": False,
+ "continue": False,
+ "sync": False,
+ }
+ args2 = args.copy()
+ args2["abort"] = True
+ wptupdate = WPTUpdate(logger, **args2)
+ wptupdate = WPTUpdate(logger, runner_cls=UpdateRunner, **args)
+ wptupdate.run()
+
+
+def test_update_serialize_quoted():
+ tests = [("path/to/test.htm", [test_id], "testharness",
+ b"""[test.htm]
+ expected: "ERROR"
+ [test1]
+ expected:
+ if os == "linux": ["PASS", "FAIL"]
+ "ERROR"
+""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "PASS",
+ "known_intermittent": ["FAIL"]}),
+ ("test_end", {"test": test_id,
+ "expected": "ERROR",
+ "status": "OK"})],
+ run_info={"os": "linux"})
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL"]}),
+ ("test_end", {"test": test_id,
+ "expected": "ERROR",
+ "status": "OK"})],
+ run_info={"os": "linux"})
+ log_2 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "ERROR"}),
+ ("test_end", {"test": test_id,
+ "expected": "ERROR",
+ "status": "OK"})],
+ run_info={"os": "win"})
+
+ updated = update(tests, log_0, log_1, log_2, full_update=True, update_intermittent=True)
+
+
+ manifest_str = wptmanifest.serialize(updated[0][1].node,
+ skip_empty_data=True)
+ assert manifest_str == """[test.htm]
+ [test1]
+ expected:
+ if os == "linux": [PASS, FAIL]
+ ERROR
+"""
+
+
+def test_update_serialize_unquoted():
+ tests = [("path/to/test.htm", [test_id], "testharness",
+ b"""[test.htm]
+ expected: ERROR
+ [test1]
+ expected:
+ if os == "linux": [PASS, FAIL]
+ ERROR
+""")]
+
+ log_0 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "PASS",
+ "known_intermittent": ["FAIL"]}),
+ ("test_end", {"test": test_id,
+ "expected": "ERROR",
+ "status": "OK"})],
+ run_info={"os": "linux"})
+ log_1 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "FAIL",
+ "expected": "PASS",
+ "known_intermittent": ["FAIL"]}),
+ ("test_end", {"test": test_id,
+ "expected": "ERROR",
+ "status": "OK"})],
+ run_info={"os": "linux"})
+ log_2 = suite_log([("test_start", {"test": test_id}),
+ ("test_status", {"test": test_id,
+ "subtest": "test1",
+ "status": "ERROR"}),
+ ("test_end", {"test": test_id,
+ "expected": "ERROR",
+ "status": "OK"})],
+ run_info={"os": "win"})
+
+ updated = update(tests, log_0, log_1, log_2, full_update=True, update_intermittent=True)
+
+
+ manifest_str = wptmanifest.serialize(updated[0][1].node,
+ skip_empty_data=True)
+ assert manifest_str == """[test.htm]
+ [test1]
+ expected:
+ if os == "linux": [PASS, FAIL]
+ ERROR
+"""
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_wptrunner.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_wptrunner.py
new file mode 100644
index 0000000000..3c9a0bc1fd
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_wptrunner.py
@@ -0,0 +1,79 @@
+from ..wptrunner import get_pause_after_test
+from .test_testloader import Subsuite, TestFilter, TestLoader, WPTManifest
+
+def test_get_pause_after_test(): # type: ignore
+ manifest_json = {
+ "items": {
+ "testharness": {
+ "a": {
+ "foo.html": [
+ "abcdef123456",
+ [None, {}],
+ ],
+ "bar.h2.html": [
+ "uvwxyz987654",
+ [None, {}],
+ ],
+ }
+ },
+ "reftest": {
+ "a": {
+ "reffoo.html": [
+ "abcdef654321",
+ [None, [["/common/some-ref.html", "=="]], {}]
+ ],
+ }
+ }
+ },
+ "url_base": "/",
+ "version": 8,
+ }
+
+ kwargs = {
+ "pause_after_test": None,
+ "repeat_until_unexpected": False,
+ "headless": False,
+ "debug_test": False,
+ "repeat": 1,
+ "rerun": 1
+ }
+
+ manifest = WPTManifest.from_json("/", manifest_json)
+ test_manifests = {manifest: {"metadata_path": ""}}
+
+ manifest_filters = [TestFilter(test_manifests, include=["/a/foo.html", "/a/reffoo.html"])]
+
+ subsuites = {}
+ subsuites[""] = Subsuite("", config={})
+
+ # This has two testharness tests, so shouldn't set pause_after_test
+ loader = TestLoader(test_manifests, ["testharness"], None, subsuites)
+
+ assert get_pause_after_test(loader, **kwargs) is False
+
+ # This has one testharness test, so should set pause_after_test
+ loader = TestLoader(test_manifests, ["testharness"], None, subsuites,
+ manifest_filters=manifest_filters)
+
+ assert get_pause_after_test(loader, **kwargs) is True
+
+ # This has one testharness test, and one reftest so shouldn't set pause_after_test
+ loader = TestLoader(test_manifests, ["testharness", "reftest"], None, subsuites,
+ manifest_filters=manifest_filters)
+
+ assert get_pause_after_test(loader, **kwargs) is False
+
+ # This has one reftest so shouldn't set pause_after_test
+ loader = TestLoader(test_manifests, ["reftest"], None, subsuites)
+
+ assert get_pause_after_test(loader, **kwargs) is False
+
+ multi_subsuites = {}
+ multi_subsuites[""] = Subsuite("", config={})
+ multi_subsuites["extra"] = Subsuite("extra", config={}, include=["/a/foo.html"])
+
+ # This has one testharness test per subsuite, so shouldn't set pause_after_test
+ loader = TestLoader(test_manifests, ["testharness"], None, multi_subsuites,
+ manifest_filters=manifest_filters)
+ print(loader.tests)
+ assert get_pause_after_test(loader, **kwargs) is False
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_wpttest.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_wpttest.py
new file mode 100644
index 0000000000..d2f68361bf
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_wpttest.py
@@ -0,0 +1,234 @@
+# mypy: ignore-errors
+
+from io import BytesIO
+from unittest import mock
+
+from manifest import manifest as wptmanifest
+from manifest.item import TestharnessTest, RefTest
+from manifest.utils import to_os_path
+from . test_update import tree_and_sourcefile_mocks
+from .. import manifestexpected, manifestupdate, wpttest
+
+
+TestharnessTest.__test__ = False
+
+dir_ini_0 = b"""\
+prefs: [a:b]
+"""
+
+dir_ini_1 = b"""\
+prefs: [@Reset, b:c]
+max-asserts: 2
+min-asserts: 1
+tags: [b, c]
+"""
+
+dir_ini_2 = b"""\
+lsan-max-stack-depth: 42
+"""
+
+test_0 = b"""\
+[0.html]
+ prefs: [c:d]
+ max-asserts: 3
+ tags: [a, @Reset]
+"""
+
+test_1 = b"""\
+[1.html]
+ prefs:
+ if os == 'win': [a:b, c:d]
+ expected:
+ if os == 'win': FAIL
+"""
+
+test_2 = b"""\
+[2.html]
+ lsan-max-stack-depth: 42
+"""
+
+test_3 = b"""\
+[3.html]
+ [subtest1]
+ expected: [PASS, FAIL]
+
+ [subtest2]
+ disabled: reason
+
+ [subtest3]
+ expected: FAIL
+"""
+
+test_4 = b"""\
+[4.html]
+ expected: FAIL
+"""
+
+test_5 = b"""\
+[5.html]
+"""
+
+test_6 = b"""\
+[6.html]
+ expected: [OK, FAIL]
+"""
+
+test_fuzzy = b"""\
+[fuzzy.html]
+ fuzzy: fuzzy-ref.html:1;200
+"""
+
+
+testharness_test = b"""<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>"""
+
+
+def make_mock_manifest(*items):
+ rv = mock.Mock(tests_root="/foobar")
+ tests = []
+ rv.__iter__ = lambda self: iter(tests)
+ rv.__getitem__ = lambda self, k: tests[k]
+ for test_type, dir_path, num_tests in items:
+ for i in range(num_tests):
+ filename = dir_path + "/%i.html" % i
+ tests.append((test_type,
+ filename,
+ {TestharnessTest("/foo.bar", filename, "/", filename)}))
+ return rv
+
+def make_test_object(test_name,
+ test_path,
+ index,
+ items,
+ inherit_metadata=None,
+ iterate=False,
+ condition=None):
+ inherit_metadata = inherit_metadata if inherit_metadata is not None else []
+ condition = condition if condition is not None else {}
+ tests = make_mock_manifest(*items) if isinstance(items, list) else make_mock_manifest(items)
+
+ test_metadata = manifestexpected.static.compile(BytesIO(test_name),
+ condition,
+ data_cls_getter=manifestexpected.data_cls_getter,
+ test_path=test_path)
+
+ test = next(iter(tests[index][2])) if iterate else tests[index][2].pop()
+ return wpttest.from_manifest(tests, test, inherit_metadata,
+ test_metadata.get_test(manifestupdate.get_test_name(test.id)))
+
+
+def test_run_info():
+ run_info = wpttest.get_run_info("/", "fake-product", debug=False)
+ assert isinstance(run_info["bits"], int)
+ assert isinstance(run_info["os"], str)
+ assert isinstance(run_info["os_version"], str)
+ assert isinstance(run_info["processor"], str)
+ assert isinstance(run_info["product"], str)
+ assert isinstance(run_info["python_version"], int)
+
+
+def test_metadata_inherit():
+ items = [("test", "a", 10), ("test", "a/b", 10), ("test", "c", 10)]
+ inherit_metadata = [
+ manifestexpected.static.compile(
+ BytesIO(item),
+ {},
+ data_cls_getter=lambda x,y: manifestexpected.DirectoryManifest)
+ for item in [dir_ini_0, dir_ini_1]]
+
+ test_obj = make_test_object(test_0, "a/0.html", 0, items, inherit_metadata, True)
+
+ assert test_obj.max_assertion_count == 3
+ assert test_obj.min_assertion_count == 1
+ assert test_obj.prefs == {"b": "c", "c": "d"}
+ assert test_obj.tags == {"a", "dir:a"}
+
+
+def test_conditional():
+ items = [("test", "a", 10), ("test", "a/b", 10), ("test", "c", 10)]
+
+ test_obj = make_test_object(test_1, "a/1.html", 1, items, None, True, {"os": "win"})
+
+ assert test_obj.prefs == {"a": "b", "c": "d"}
+ assert test_obj.expected() == "FAIL"
+
+
+def test_metadata_lsan_stack_depth():
+ items = [("test", "a", 10), ("test", "a/b", 10)]
+
+ test_obj = make_test_object(test_2, "a/2.html", 2, items, None, True)
+
+ assert test_obj.lsan_max_stack_depth == 42
+
+ test_obj = make_test_object(test_2, "a/2.html", 1, items, None, True)
+
+ assert test_obj.lsan_max_stack_depth is None
+
+ inherit_metadata = [
+ manifestexpected.static.compile(
+ BytesIO(dir_ini_2),
+ {},
+ data_cls_getter=lambda x,y: manifestexpected.DirectoryManifest)
+ ]
+
+ test_obj = make_test_object(test_0, "a/0/html", 0, items, inherit_metadata, False)
+
+ assert test_obj.lsan_max_stack_depth == 42
+
+
+def test_subtests():
+ test_obj = make_test_object(test_3, "a/3.html", 3, ("test", "a", 4), None, False)
+ assert test_obj.expected("subtest1") == "PASS"
+ assert test_obj.known_intermittent("subtest1") == ["FAIL"]
+ assert test_obj.expected("subtest2") == "PASS"
+ assert test_obj.known_intermittent("subtest2") == []
+ assert test_obj.expected("subtest3") == "FAIL"
+ assert test_obj.known_intermittent("subtest3") == []
+
+
+def test_expected_fail():
+ test_obj = make_test_object(test_4, "a/4.html", 4, ("test", "a", 5), None, False)
+ assert test_obj.expected() == "FAIL"
+ assert test_obj.known_intermittent() == []
+
+
+def test_no_expected():
+ test_obj = make_test_object(test_5, "a/5.html", 5, ("test", "a", 6), None, False)
+ assert test_obj.expected() == "OK"
+ assert test_obj.known_intermittent() == []
+
+
+def test_known_intermittent():
+ test_obj = make_test_object(test_6, "a/6.html", 6, ("test", "a", 7), None, False)
+ assert test_obj.expected() == "OK"
+ assert test_obj.known_intermittent() == ["FAIL"]
+
+
+def test_metadata_fuzzy():
+ item = RefTest(tests_root=".",
+ path="a/fuzzy.html",
+ url_base="/",
+ url="a/fuzzy.html",
+ references=[["/a/fuzzy-ref.html", "=="]],
+ fuzzy=[[["/a/fuzzy.html", '/a/fuzzy-ref.html', '=='],
+ [[2, 3], [10, 15]]]])
+ s = mock.Mock(rel_path="a/fuzzy.html", rel_path_parts=("a", "fuzzy.html"), hash="0"*40)
+ s.manifest_items = mock.Mock(return_value=(item.item_type, [item]))
+
+ manifest = wptmanifest.Manifest("")
+
+ tree, sourcefile_mock = tree_and_sourcefile_mocks([(s, None, True)])
+ with mock.patch("manifest.manifest.SourceFile", side_effect=sourcefile_mock):
+ assert manifest.update(tree) is True
+
+ test_metadata = manifestexpected.static.compile(BytesIO(test_fuzzy),
+ {},
+ data_cls_getter=manifestexpected.data_cls_getter,
+ test_path="a/fuzzy.html")
+
+ test = next(manifest.iterpath(to_os_path("a/fuzzy.html")))
+ test_obj = wpttest.from_manifest(manifest, test, [],
+ test_metadata.get_test(manifestupdate.get_test_name(test.id)))
+
+ assert test_obj.fuzzy == {('/a/fuzzy.html', '/a/fuzzy-ref.html', '=='): [[2, 3], [10, 15]]}
+ assert test_obj.fuzzy_override == {'/a/fuzzy-ref.html': ((1, 1), (200, 200))}
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/update/__init__.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/update/__init__.py
new file mode 100644
index 0000000000..1a58837f8d
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/update/__init__.py
@@ -0,0 +1,47 @@
+# mypy: allow-untyped-defs
+
+import sys
+
+from mozlog.structured import structuredlog, commandline
+
+from .. import wptcommandline
+
+from .update import WPTUpdate
+
+def remove_logging_args(args):
+ """Take logging args out of the dictionary of command line arguments so
+ they are not passed in as kwargs to the update code. This is particularly
+ necessary here because the arguments are often of type file, which cannot
+ be serialized.
+
+ :param args: Dictionary of command line arguments.
+ """
+ for name in list(args.keys()):
+ if name.startswith("log_"):
+ args.pop(name)
+
+
+def setup_logging(args, defaults):
+ """Use the command line arguments to set up the logger.
+
+ :param args: Dictionary of command line arguments.
+ :param defaults: Dictionary of {formatter_name: stream} to use if
+ no command line logging is specified"""
+ logger = commandline.setup_logging("web-platform-tests-update", args, defaults)
+
+ remove_logging_args(args)
+
+ return logger
+
+
+def run_update(logger, **kwargs):
+ updater = WPTUpdate(logger, **kwargs)
+ return updater.run()
+
+
+def main():
+ args = wptcommandline.parse_args_update()
+ logger = setup_logging(args, {"mach": sys.stdout})
+ assert structuredlog.get_default_logger() is not None
+ success = run_update(logger, **args)
+ sys.exit(0 if success else 1)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/update/base.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/update/base.py
new file mode 100644
index 0000000000..22ccde746f
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/update/base.py
@@ -0,0 +1,69 @@
+# mypy: allow-untyped-defs
+
+from typing import ClassVar, List, Type
+
+exit_unclean = object()
+exit_clean = object()
+
+
+class Step:
+ provides: ClassVar[List[str]] = []
+
+ def __init__(self, logger):
+ self.logger = logger
+
+ def run(self, step_index, state):
+ """Base class for state-creating steps.
+
+ When a Step is run() the current state is checked to see
+ if the state from this step has already been created. If it
+ has the restore() method is invoked. Otherwise the create()
+ method is invoked with the state object. This is expected to
+ add items with all the keys in __class__.provides to the state
+ object.
+ """
+
+ name = self.__class__.__name__
+
+ try:
+ stored_step = state.steps[step_index]
+ except IndexError:
+ stored_step = None
+
+ if stored_step == name:
+ self.restore(state)
+ elif stored_step is None:
+ self.create(state)
+ assert set(self.provides).issubset(set(state.keys()))
+ state.steps = state.steps + [name]
+ else:
+ raise ValueError(f"Expected a {name} step, got a {stored_step} step")
+
+ def create(self, data):
+ raise NotImplementedError
+
+ def restore(self, state):
+ self.logger.debug(f"Step {self.__class__.__name__} using stored state")
+ for key in self.provides:
+ assert key in state
+
+
+class StepRunner:
+ steps: ClassVar[List[Type[Step]]] = []
+
+ def __init__(self, logger, state):
+ """Class that runs a specified series of Steps with a common State"""
+ self.state = state
+ self.logger = logger
+ if "steps" not in state:
+ state.steps = []
+
+ def run(self):
+ rv = None
+ for step_index, step in enumerate(self.steps):
+ self.logger.debug("Starting step %s" % step.__name__)
+ rv = step(self.logger).run(step_index, self.state)
+ if rv in (exit_clean, exit_unclean):
+ break
+
+ return rv
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/update/metadata.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/update/metadata.py
new file mode 100644
index 0000000000..48519900e7
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/update/metadata.py
@@ -0,0 +1,62 @@
+# mypy: allow-untyped-defs
+
+import os
+
+from .. import metadata, products
+
+from .base import Step, StepRunner
+
+
+class GetUpdatePropertyList(Step):
+ provides = ["update_properties"]
+
+ def create(self, state):
+ state.update_properties = products.load_product_update(state.config, state.product.name)
+
+
+class UpdateExpected(Step):
+ """Do the metadata update on the local checkout"""
+
+ def create(self, state):
+ metadata.update_expected(state.paths,
+ state.run_log,
+ update_properties=state.update_properties,
+ full_update=state.full_update,
+ disable_intermittent=state.disable_intermittent,
+ update_intermittent=state.update_intermittent,
+ remove_intermittent=state.remove_intermittent)
+
+
+class CreateMetadataPatch(Step):
+ """Create a patch/commit for the metadata checkout"""
+
+ def create(self, state):
+ if not state.patch:
+ return
+
+ local_tree = state.local_tree
+ sync_tree = state.sync_tree
+
+ if sync_tree is not None:
+ name = "web-platform-tests_update_%s_metadata" % sync_tree.rev
+ message = f"Update {state.suite_name} expected data to revision {sync_tree.rev}"
+ else:
+ name = "web-platform-tests_update_metadata"
+ message = "Update %s expected data" % state.suite_name
+
+ local_tree.create_patch(name, message)
+
+ if not local_tree.is_clean:
+ metadata_paths = [manifest_path["metadata_path"]
+ for manifest_path in state.paths.itervalues()]
+ for path in metadata_paths:
+ local_tree.add_new(os.path.relpath(path, local_tree.root))
+ local_tree.update_patch(include=metadata_paths)
+ local_tree.commit_patch()
+
+
+class MetadataUpdateRunner(StepRunner):
+ """(Sub)Runner for updating metadata"""
+ steps = [GetUpdatePropertyList,
+ UpdateExpected,
+ CreateMetadataPatch]
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/update/state.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/update/state.py
new file mode 100644
index 0000000000..e187d411a2
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/update/state.py
@@ -0,0 +1,159 @@
+# mypy: allow-untyped-defs
+
+import os
+import pickle
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+class BaseState:
+ def __new__(cls, logger):
+ rv = cls.load(logger)
+ if rv is not None:
+ logger.debug("Existing state found")
+ return rv
+
+ logger.debug("No existing state found")
+ return super().__new__(cls)
+
+ def __init__(self, logger):
+ """Object containing state variables created when running Steps.
+
+ Variables are set and get as attributes e.g. state_obj.spam = "eggs".
+
+ :param parent: Parent State object or None if this is the root object.
+ """
+
+ if hasattr(self, "_data"):
+ return
+
+ self._data = [{}]
+ self._logger = logger
+ self._index = 0
+
+ def __getstate__(self):
+ rv = self.__dict__.copy()
+ del rv["_logger"]
+ return rv
+
+
+ def push(self, init_values):
+ """Push a new clean state dictionary
+
+ :param init_values: List of variable names in the current state dict to copy
+ into the new state dict."""
+
+ return StateContext(self, init_values)
+
+ def is_empty(self):
+ return len(self._data) == 1 and self._data[0] == {}
+
+ def clear(self):
+ """Remove all state and delete the stored copy."""
+ self._data = [{}]
+
+ def __setattr__(self, key, value):
+ if key.startswith("_"):
+ object.__setattr__(self, key, value)
+ else:
+ self._data[self._index][key] = value
+ self.save()
+
+ def __getattr__(self, key):
+ if key.startswith("_"):
+ raise AttributeError
+ try:
+ return self._data[self._index][key]
+ except KeyError as e:
+ raise AttributeError from e
+
+ def __contains__(self, key):
+ return key in self._data[self._index]
+
+ def update(self, items):
+ """Add a dictionary of {name: value} pairs to the state"""
+ self._data[self._index].update(items)
+ self.save()
+
+ def keys(self):
+ return self._data[self._index].keys()
+
+
+ @classmethod
+ def load(cls):
+ raise NotImplementedError
+
+ def save(self):
+ raise NotImplementedError
+
+
+class SavedState(BaseState):
+ """On write the state is serialized to disk, such that it can be restored in
+ the event that the program is interrupted before all steps are complete.
+ Note that this only works well if the values are immutable; mutating an
+ existing value will not cause the data to be serialized."""
+ filename = os.path.join(here, ".wpt-update.lock")
+
+ @classmethod
+ def load(cls, logger):
+ """Load saved state from a file"""
+ try:
+ if not os.path.isfile(cls.filename):
+ return None
+ with open(cls.filename, "rb") as f:
+ try:
+ rv = pickle.load(f)
+ logger.debug(f"Loading data {rv._data!r}")
+ rv._logger = logger
+ rv._index = 0
+ return rv
+ except EOFError:
+ logger.warning("Found empty state file")
+ except OSError:
+ logger.debug("IOError loading stored state")
+
+ def save(self):
+ """Write the state to disk"""
+ with open(self.filename, "wb") as f:
+ pickle.dump(self, f)
+
+ def clear(self):
+ super().clear()
+ try:
+ os.unlink(self.filename)
+ except OSError:
+ pass
+
+
+class UnsavedState(BaseState):
+ @classmethod
+ def load(cls, logger):
+ return None
+
+ def save(self):
+ return
+
+
+class StateContext:
+ def __init__(self, state, init_values):
+ self.state = state
+ self.init_values = init_values
+
+ def __enter__(self):
+ if len(self.state._data) == self.state._index + 1:
+ # This is the case where there is no stored state
+ new_state = {}
+ for key in self.init_values:
+ new_state[key] = self.state._data[self.state._index][key]
+ self.state._data.append(new_state)
+ self.state._index += 1
+ self.state._logger.debug("Incremented index to %s" % self.state._index)
+
+ def __exit__(self, *args, **kwargs):
+ if len(self.state._data) > 1:
+ assert self.state._index == len(self.state._data) - 1
+ self.state._data.pop()
+ self.state._index -= 1
+ self.state._logger.debug("Decremented index to %s" % self.state._index)
+ assert self.state._index >= 0
+ else:
+ raise ValueError("Tried to pop the top state")
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/update/sync.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/update/sync.py
new file mode 100644
index 0000000000..b1dcf2d6c2
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/update/sync.py
@@ -0,0 +1,150 @@
+# mypy: allow-untyped-defs
+
+import fnmatch
+import os
+import re
+import shutil
+import sys
+import uuid
+
+from .base import Step, StepRunner
+from .tree import Commit
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+
+def copy_wpt_tree(tree, dest, excludes=None, includes=None):
+ """Copy the working copy of a Tree to a destination directory.
+
+ :param tree: The Tree to copy.
+ :param dest: The destination directory"""
+ if os.path.exists(dest):
+ assert os.path.isdir(dest)
+
+ shutil.rmtree(dest)
+
+ os.mkdir(dest)
+
+ if excludes is None:
+ excludes = []
+
+ excludes = [re.compile(fnmatch.translate(item)) for item in excludes]
+
+ if includes is None:
+ includes = []
+
+ includes = [re.compile(fnmatch.translate(item)) for item in includes]
+
+ for tree_path in tree.paths():
+ if (any(item.match(tree_path) for item in excludes) and
+ not any(item.match(tree_path) for item in includes)):
+ continue
+
+ source_path = os.path.join(tree.root, tree_path)
+ dest_path = os.path.join(dest, tree_path)
+
+ dest_dir = os.path.dirname(dest_path)
+ if not os.path.isdir(source_path):
+ if not os.path.exists(dest_dir):
+ os.makedirs(dest_dir)
+ shutil.copy2(source_path, dest_path)
+
+ for source, destination in [("testharness_runner.html", ""),
+ ("testdriver-vendor.js", "resources/")]:
+ source_path = os.path.join(here, os.pardir, source)
+ dest_path = os.path.join(dest, destination, os.path.basename(source))
+ shutil.copy2(source_path, dest_path)
+
+
+class UpdateCheckout(Step):
+ """Pull changes from upstream into the local sync tree."""
+
+ provides = ["local_branch"]
+
+ def create(self, state):
+ sync_tree = state.sync_tree
+ state.local_branch = uuid.uuid4().hex
+ sync_tree.update(state.sync["remote_url"],
+ state.sync["branch"],
+ state.local_branch)
+ sync_path = os.path.abspath(sync_tree.root)
+ if sync_path not in sys.path:
+ from .update import setup_paths
+ setup_paths(sync_path)
+
+ def restore(self, state):
+ assert os.path.abspath(state.sync_tree.root) in sys.path
+ Step.restore(self, state)
+
+
+class GetSyncTargetCommit(Step):
+ """Find the commit that we will sync to."""
+
+ provides = ["sync_commit"]
+
+ def create(self, state):
+ if state.target_rev is None:
+ #Use upstream branch HEAD as the base commit
+ state.sync_commit = state.sync_tree.get_remote_sha1(state.sync["remote_url"],
+ state.sync["branch"])
+ else:
+ state.sync_commit = Commit(state.sync_tree, state.rev)
+
+ state.sync_tree.checkout(state.sync_commit.sha1, state.local_branch, force=True)
+ self.logger.debug("New base commit is %s" % state.sync_commit.sha1)
+
+
+class UpdateManifest(Step):
+ """Update the manifest to match the tests in the sync tree checkout"""
+
+ provides = ["manifest_path", "test_manifest"]
+
+ def create(self, state):
+ from manifest import manifest # type: ignore
+ state.manifest_path = os.path.join(state.metadata_path, "MANIFEST.json")
+ state.test_manifest = manifest.load_and_update(state.sync["path"],
+ state.manifest_path,
+ "/",
+ write_manifest=True)
+
+
+class CopyWorkTree(Step):
+ """Copy the sync tree over to the destination in the local tree"""
+
+ def create(self, state):
+ copy_wpt_tree(state.sync_tree,
+ state.tests_path,
+ excludes=state.path_excludes,
+ includes=state.path_includes)
+
+
+class CreateSyncPatch(Step):
+ """Add the updated test files to a commit/patch in the local tree."""
+
+ def create(self, state):
+ if not state.patch:
+ return
+
+ local_tree = state.local_tree
+ sync_tree = state.sync_tree
+
+ local_tree.create_patch("web-platform-tests_update_%s" % sync_tree.rev,
+ f"Update {state.suite_name} to revision {sync_tree.rev}")
+ test_prefix = os.path.relpath(state.tests_path, local_tree.root)
+ local_tree.add_new(test_prefix)
+ local_tree.add_ignored(sync_tree, test_prefix)
+ updated = local_tree.update_patch(include=[state.tests_path,
+ state.metadata_path])
+ local_tree.commit_patch()
+
+ if not updated:
+ self.logger.info("Nothing to sync")
+
+
+class SyncFromUpstreamRunner(StepRunner):
+ """(Sub)Runner for doing an upstream sync"""
+ steps = [UpdateCheckout,
+ GetSyncTargetCommit,
+ UpdateManifest,
+ CopyWorkTree,
+ CreateSyncPatch]
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/update/tree.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/update/tree.py
new file mode 100644
index 0000000000..8c1b6a5f1b
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/update/tree.py
@@ -0,0 +1,407 @@
+# mypy: allow-untyped-defs
+
+import os
+import re
+import subprocess
+import tempfile
+
+from .. import vcs
+from ..vcs import git, hg
+
+
+def get_unique_name(existing, initial):
+ """Get a name either equal to initial or of the form initial_N, for some
+ integer N, that is not in the set existing.
+
+
+ :param existing: Set of names that must not be chosen.
+ :param initial: Name, or name prefix, to use"""
+ if initial not in existing:
+ return initial
+ for i in range(len(existing) + 1):
+ test = f"{initial}_{i + 1}"
+ if test not in existing:
+ return test
+ assert False
+
+class NoVCSTree:
+ name = "non-vcs"
+
+ def __init__(self, root=None):
+ if root is None:
+ root = os.path.abspath(os.curdir)
+ self.root = root
+
+ @classmethod
+ def is_type(cls, path=None):
+ return True
+
+ @property
+ def is_clean(self):
+ return True
+
+ def add_new(self, prefix=None):
+ pass
+
+ def add_ignored(self, sync_tree, prefix):
+ pass
+
+ def create_patch(self, patch_name, message):
+ pass
+
+ def update_patch(self, include=None):
+ pass
+
+ def commit_patch(self):
+ pass
+
+
+class HgTree:
+ name = "mercurial"
+
+ def __init__(self, root=None):
+ if root is None:
+ root = hg("root").strip()
+ self.root = root
+ self.hg = vcs.bind_to_repo(hg, self.root)
+
+ def __getstate__(self):
+ rv = self.__dict__.copy()
+ del rv['hg']
+ return rv
+
+ def __setstate__(self, dict):
+ self.__dict__.update(dict)
+ self.hg = vcs.bind_to_repo(vcs.hg, self.root)
+
+ @classmethod
+ def is_type(cls, path=None):
+ kwargs = {"log_error": False}
+ if path is not None:
+ kwargs["repo"] = path
+ try:
+ hg("root", **kwargs)
+ except Exception:
+ return False
+ return True
+
+ @property
+ def is_clean(self):
+ return self.hg("status").strip() == b""
+
+ def add_new(self, prefix=None):
+ if prefix is not None:
+ args = ("-I", prefix)
+ else:
+ args = ()
+ self.hg("add", *args)
+
+ def add_ignored(self, sync_tree, prefix):
+ pass
+
+ def create_patch(self, patch_name, message):
+ try:
+ self.hg("qinit", log_error=False)
+ except subprocess.CalledProcessError:
+ pass
+
+ patch_names = [item.strip() for item in self.hg("qseries").split(b"\n") if item.strip()]
+
+ suffix = 0
+ test_name = patch_name
+ while test_name in patch_names:
+ suffix += 1
+ test_name = "%s-%i" % (patch_name, suffix)
+
+ self.hg("qnew", test_name, "-X", self.root, "-m", message)
+
+ def update_patch(self, include=None):
+ if include is not None:
+ args = []
+ for item in include:
+ args.extend(["-I", item])
+ else:
+ args = ()
+
+ self.hg("qrefresh", *args)
+ return True
+
+ def commit_patch(self):
+ self.hg("qfinish")
+
+ def contains_commit(self, commit):
+ try:
+ self.hg("identify", "-r", commit.sha1)
+ return True
+ except subprocess.CalledProcessError:
+ return False
+
+
+class GitTree:
+ name = "git"
+
+ def __init__(self, root=None, log_error=True):
+ if root is None:
+ root = git("rev-parse", "--show-toplevel", log_error=log_error).strip().decode('utf-8')
+ self.root = root
+ self.git = vcs.bind_to_repo(git, self.root, log_error=log_error)
+ self.message = None
+ self.commit_cls = Commit
+
+ def __getstate__(self):
+ rv = self.__dict__.copy()
+ del rv['git']
+ return rv
+
+ def __setstate__(self, dict):
+ self.__dict__.update(dict)
+ self.git = vcs.bind_to_repo(vcs.git, self.root)
+
+ @classmethod
+ def is_type(cls, path=None):
+ kwargs = {"log_error": False}
+ if path is not None:
+ kwargs["repo"] = path
+ try:
+ git("rev-parse", "--show-toplevel", **kwargs)
+ except Exception:
+ return False
+ return True
+
+ @property
+ def rev(self):
+ """Current HEAD revision"""
+ if vcs.is_git_root(self.root):
+ return self.git("rev-parse", "HEAD").strip()
+ else:
+ return None
+
+ @property
+ def is_clean(self):
+ return self.git("status").strip() == b""
+
+ def add_new(self, prefix=None):
+ """Add files to the staging area.
+
+ :param prefix: None to include all files or a path prefix to
+ add all files under that path.
+ """
+ if prefix is None:
+ args = ["-a"]
+ else:
+ args = ["--no-ignore-removal", prefix]
+ self.git("add", *args)
+
+ def add_ignored(self, sync_tree, prefix):
+ """Add files to the staging area that are explicitly ignored by git.
+
+ :param prefix: None to include all files or a path prefix to
+ add all files under that path.
+ """
+ with tempfile.TemporaryFile() as f:
+ sync_tree.git("ls-tree", "-z", "-r", "--name-only", "HEAD", stdout=f)
+ f.seek(0)
+ ignored_files = sync_tree.git("check-ignore", "--no-index", "--stdin", "-z", stdin=f)
+ args = []
+ for entry in ignored_files.decode('utf-8').split('\0'):
+ args.append(os.path.join(prefix, entry))
+ if args:
+ self.git("add", "--force", *args)
+
+ def list_refs(self, ref_filter=None):
+ """Get a list of sha1, name tuples for references in a repository.
+
+ :param ref_filter: Pattern that reference name must match (from the end,
+ matching whole /-delimited segments only
+ """
+ args = []
+ if ref_filter is not None:
+ args.append(ref_filter)
+ data = self.git("show-ref", *args)
+ rv = []
+ for line in data.split(b"\n"):
+ if not line.strip():
+ continue
+ sha1, ref = line.split()
+ rv.append((sha1, ref))
+ return rv
+
+ def list_remote(self, remote, ref_filter=None):
+ """Return a list of (sha1, name) tupes for references in a remote.
+
+ :param remote: URL of the remote to list.
+ :param ref_filter: Pattern that the reference name must match.
+ """
+ args = []
+ if ref_filter is not None:
+ args.append(ref_filter)
+ data = self.git("ls-remote", remote, *args)
+ rv = []
+ for line in data.split(b"\n"):
+ if not line.strip():
+ continue
+ sha1, ref = line.split()
+ rv.append((sha1, ref))
+ return rv
+
+ def get_remote_sha1(self, remote, branch):
+ """Return the SHA1 of a particular branch in a remote.
+
+ :param remote: the remote URL
+ :param branch: the branch name"""
+ for sha1, ref in self.list_remote(remote, branch):
+ if ref.decode('utf-8') == "refs/heads/%s" % branch:
+ return self.commit_cls(self, sha1.decode('utf-8'))
+ assert False
+
+ def create_patch(self, patch_name, message):
+ # In git a patch is actually a commit
+ self.message = message
+
+ def update_patch(self, include=None):
+ """Commit the staged changes, or changes to listed files.
+
+ :param include: Either None, to commit staged changes, or a list
+ of filenames (which must already be in the repo)
+ to commit
+ """
+ if include is not None:
+ args = tuple(include)
+ else:
+ args = ()
+
+ if self.git("status", "-uno", "-z", *args).strip():
+ self.git("add", *args)
+ return True
+ return False
+
+ def commit_patch(self):
+ assert self.message is not None
+
+ if self.git("diff", "--name-only", "--staged", "-z").strip():
+ self.git("commit", "-m", self.message)
+ return True
+
+ return False
+
+ def init(self):
+ self.git("init")
+ assert vcs.is_git_root(self.root)
+
+ def checkout(self, rev, branch=None, force=False):
+ """Checkout a particular revision, optionally into a named branch.
+
+ :param rev: Revision identifier (e.g. SHA1) to checkout
+ :param branch: Branch name to use
+ :param force: Force-checkout
+ """
+ assert rev is not None
+
+ args = []
+ if branch:
+ branches = [ref[len("refs/heads/"):].decode('utf-8') for sha1, ref in self.list_refs()
+ if ref.startswith(b"refs/heads/")]
+ branch = get_unique_name(branches, branch)
+
+ args += ["-b", branch]
+
+ if force:
+ args.append("-f")
+ args.append(rev)
+ self.git("checkout", *args)
+
+ def update(self, remote, remote_branch, local_branch):
+ """Fetch from the remote and checkout into a local branch.
+
+ :param remote: URL to the remote repository
+ :param remote_branch: Branch on the remote repository to check out
+ :param local_branch: Local branch name to check out into
+ """
+ if not vcs.is_git_root(self.root):
+ self.init()
+ self.git("clean", "-xdf")
+ self.git("fetch", remote, f"{remote_branch}:{local_branch}")
+ self.checkout(local_branch)
+ self.git("submodule", "update", "--init", "--recursive")
+
+ def clean(self):
+ self.git("checkout", self.rev)
+ self.git("branch", "-D", self.local_branch)
+
+ def paths(self):
+ """List paths in the tree"""
+ repo_paths = [self.root] + [os.path.join(self.root, path)
+ for path in self.submodules()]
+
+ rv = []
+
+ for repo_path in repo_paths:
+ paths = vcs.git("ls-tree", "-r", "--name-only", "HEAD", repo=repo_path).split(b"\n")
+ rv.extend(os.path.relpath(os.path.join(repo_path, item.decode('utf-8')), self.root) for item in paths
+ if item.strip())
+ return rv
+
+ def submodules(self):
+ """List submodule directories"""
+ output = self.git("submodule", "status", "--recursive")
+ rv = []
+ for line in output.split(b"\n"):
+ line = line.strip()
+ if not line:
+ continue
+ parts = line.split(b" ")
+ rv.append(parts[1])
+ return rv
+
+ def contains_commit(self, commit):
+ try:
+ self.git("rev-parse", "--verify", commit.sha1)
+ return True
+ except subprocess.CalledProcessError:
+ return False
+
+
+class CommitMessage:
+ def __init__(self, text):
+ self.text = text
+ self._parse_message()
+
+ def __str__(self):
+ return self.text
+
+ def _parse_message(self):
+ lines = self.text.splitlines()
+ self.full_summary = lines[0]
+ self.body = "\n".join(lines[1:])
+
+
+class Commit:
+ msg_cls = CommitMessage
+
+ _sha1_re = re.compile("^[0-9a-f]{40}$")
+
+ def __init__(self, tree, sha1):
+ """Object representing a commit in a specific GitTree.
+
+ :param tree: GitTree to which this commit belongs.
+ :param sha1: Full sha1 string for the commit
+ """
+ assert self._sha1_re.match(sha1)
+
+ self.tree = tree
+ self.git = tree.git
+ self.sha1 = sha1
+ self.author, self.email, self.message = self._get_meta()
+
+ def __getstate__(self):
+ rv = self.__dict__.copy()
+ del rv['git']
+ return rv
+
+ def __setstate__(self, dict):
+ self.__dict__.update(dict)
+ self.git = self.tree.git
+
+ def _get_meta(self):
+ author, email, message = self.git("show", "-s", "--format=format:%an\n%ae\n%B", self.sha1).decode('utf-8').split("\n", 2)
+ return author, email, self.msg_cls(message)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/update/update.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/update/update.py
new file mode 100644
index 0000000000..d519c20bea
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/update/update.py
@@ -0,0 +1,191 @@
+# mypy: allow-untyped-defs
+
+import os
+import sys
+
+from .metadata import MetadataUpdateRunner
+from .sync import SyncFromUpstreamRunner
+from .tree import GitTree, HgTree, NoVCSTree
+
+from .base import Step, StepRunner, exit_clean, exit_unclean
+from .state import SavedState, UnsavedState
+
+def setup_paths(sync_path):
+ sys.path.insert(0, os.path.abspath(sync_path))
+ from tools import localpaths # noqa: F401
+
+class LoadConfig(Step):
+ """Step for loading configuration from the ini file and kwargs."""
+
+ provides = ["sync", "paths", "metadata_path", "tests_path"]
+
+ def create(self, state):
+ state.sync = {"remote_url": state.kwargs["remote_url"],
+ "branch": state.kwargs["branch"],
+ "path": state.kwargs["sync_path"]}
+
+ state.paths = state.kwargs["test_paths"]
+ state.tests_path = state.paths["/"].tests_path
+ state.metadata_path = state.paths["/"].metadata_path
+
+ assert os.path.isabs(state.tests_path)
+
+
+class LoadTrees(Step):
+ """Step for creating a Tree for the local copy and a GitTree for the
+ upstream sync."""
+
+ provides = ["local_tree", "sync_tree"]
+
+ def create(self, state):
+ if os.path.exists(state.sync["path"]):
+ sync_tree = GitTree(root=state.sync["path"])
+ else:
+ sync_tree = None
+
+ if GitTree.is_type():
+ local_tree = GitTree()
+ elif HgTree.is_type():
+ local_tree = HgTree()
+ else:
+ local_tree = NoVCSTree()
+
+ state.update({"local_tree": local_tree,
+ "sync_tree": sync_tree})
+
+
+class SyncFromUpstream(Step):
+ """Step that synchronises a local copy of the code with upstream."""
+
+ def create(self, state):
+ if not state.kwargs["sync"]:
+ return
+
+ if not state.sync_tree:
+ os.mkdir(state.sync["path"])
+ state.sync_tree = GitTree(root=state.sync["path"])
+
+ kwargs = state.kwargs
+ with state.push(["sync", "paths", "metadata_path", "tests_path", "local_tree",
+ "sync_tree"]):
+ state.target_rev = kwargs["rev"]
+ state.patch = kwargs["patch"]
+ state.suite_name = kwargs["suite_name"]
+ state.path_excludes = kwargs["exclude"]
+ state.path_includes = kwargs["include"]
+ runner = SyncFromUpstreamRunner(self.logger, state)
+ runner.run()
+
+
+class UpdateMetadata(Step):
+ """Update the expectation metadata from a set of run logs"""
+
+ def create(self, state):
+ if not state.kwargs["run_log"]:
+ return
+
+ kwargs = state.kwargs
+ with state.push(["local_tree", "sync_tree", "paths", "serve_root"]):
+ state.run_log = kwargs["run_log"]
+ state.disable_intermittent = kwargs["disable_intermittent"]
+ state.update_intermittent = kwargs["update_intermittent"]
+ state.remove_intermittent = kwargs["remove_intermittent"]
+ state.patch = kwargs["patch"]
+ state.suite_name = kwargs["suite_name"]
+ state.product = kwargs["product"]
+ state.config = kwargs["config"]
+ state.full_update = kwargs["full"]
+ state.extra_properties = kwargs["extra_property"]
+ runner = MetadataUpdateRunner(self.logger, state)
+ runner.run()
+
+
+class RemoveObsolete(Step):
+ """Remove metadata files that don't corespond to an existing test file"""
+
+ def create(self, state):
+ if not state.kwargs["remove_obsolete"]:
+ return
+
+ paths = state.kwargs["test_paths"]
+ state.tests_path = state.paths["/"].tests_path
+ state.metadata_path = state.paths["/"].metadata_path
+
+ for url_paths in paths.values():
+ tests_path = url_paths.tests_path
+ metadata_path = url_paths.metadata_path
+ for dirpath, dirnames, filenames in os.walk(metadata_path):
+ for filename in filenames:
+ if filename == "__dir__.ini":
+ continue
+ if filename.endswith(".ini"):
+ full_path = os.path.join(dirpath, filename)
+ rel_path = os.path.relpath(full_path, metadata_path)
+ test_path = os.path.join(tests_path, rel_path[:-4])
+ if not os.path.exists(test_path):
+ os.unlink(full_path)
+
+
+class UpdateRunner(StepRunner):
+ """Runner for doing an overall update."""
+ steps = [LoadConfig,
+ LoadTrees,
+ SyncFromUpstream,
+ RemoveObsolete,
+ UpdateMetadata]
+
+
+class WPTUpdate:
+ def __init__(self, logger, runner_cls=UpdateRunner, **kwargs):
+ """Object that controls the running of a whole wptupdate.
+
+ :param runner_cls: Runner subclass holding the overall list of
+ steps to run.
+ :param kwargs: Command line arguments
+ """
+ self.runner_cls = runner_cls
+ self.serve_root = kwargs["test_paths"]["/"].tests_path
+
+ if not kwargs["sync"]:
+ setup_paths(self.serve_root)
+ else:
+ if os.path.exists(kwargs["sync_path"]):
+ # If the sync path doesn't exist we defer this until it does
+ setup_paths(kwargs["sync_path"])
+
+ if kwargs.get("store_state", False):
+ self.state = SavedState(logger)
+ else:
+ self.state = UnsavedState(logger)
+ self.kwargs = kwargs
+ self.logger = logger
+
+ def run(self, **kwargs):
+ if self.kwargs["abort"]:
+ self.abort()
+ return exit_clean
+
+ if not self.kwargs["continue"] and not self.state.is_empty():
+ self.logger.critical("Found existing state. Run with --continue to resume or --abort to clear state")
+ return exit_unclean
+
+ if self.kwargs["continue"]:
+ if self.state.is_empty():
+ self.logger.error("No sync in progress?")
+ return exit_clean
+
+ self.kwargs = self.state.kwargs
+ else:
+ self.state.kwargs = self.kwargs
+
+ self.state.serve_root = self.serve_root
+
+ update_runner = self.runner_cls(self.logger, self.state)
+ rv = update_runner.run()
+ if rv in (exit_clean, None):
+ self.state.clear()
+
+ return rv
+
+ def abort(self):
+ self.state.clear()
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/vcs.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/vcs.py
new file mode 100644
index 0000000000..790fdc9833
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/vcs.py
@@ -0,0 +1,67 @@
+# mypy: allow-untyped-defs
+
+import subprocess
+from functools import partial
+from typing import Callable
+
+from mozlog import get_default_logger
+
+from wptserve.utils import isomorphic_decode
+
+logger = None
+
+def vcs(bin_name: str) -> Callable[..., None]:
+ def inner(command, *args, **kwargs):
+ global logger
+
+ if logger is None:
+ logger = get_default_logger("vcs")
+
+ repo = kwargs.pop("repo", None)
+ log_error = kwargs.pop("log_error", True)
+ stdout = kwargs.pop("stdout", None)
+ stdin = kwargs.pop("stdin", None)
+ if kwargs:
+ raise TypeError(kwargs)
+
+ args = list(args)
+
+ proc_kwargs = {}
+ if repo is not None:
+ # Make sure `cwd` is str type to work in different sub-versions of Python 3.
+ # Before 3.8, bytes were not accepted on Windows for `cwd`.
+ proc_kwargs["cwd"] = isomorphic_decode(repo)
+ if stdout is not None:
+ proc_kwargs["stdout"] = stdout
+ if stdin is not None:
+ proc_kwargs["stdin"] = stdin
+
+ command_line = [bin_name, command] + args
+ logger.debug(" ".join(command_line))
+ try:
+ func = subprocess.check_output if not stdout else subprocess.check_call
+ return func(command_line, stderr=subprocess.STDOUT, **proc_kwargs)
+ except OSError as e:
+ if log_error:
+ logger.error(e)
+ raise
+ except subprocess.CalledProcessError as e:
+ if log_error:
+ logger.error(e.output)
+ raise
+ return inner
+
+git = vcs("git")
+hg = vcs("hg")
+
+
+def bind_to_repo(vcs_func, repo, log_error=True):
+ return partial(vcs_func, repo=repo, log_error=log_error)
+
+
+def is_git_root(path, log_error=True):
+ try:
+ rv = git("rev-parse", "--show-cdup", repo=path, log_error=log_error)
+ except subprocess.CalledProcessError:
+ return False
+ return rv == b"\n"
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptcommandline.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptcommandline.py
new file mode 100644
index 0000000000..ded5243113
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptcommandline.py
@@ -0,0 +1,831 @@
+# mypy: allow-untyped-defs
+
+import argparse
+import os
+import sys
+from collections import OrderedDict
+from shutil import which
+from datetime import timedelta
+from typing import Mapping, Optional
+
+from . import config
+from . import products
+from . import wpttest
+from .formatters import chromium, wptreport, wptscreenshot
+
+
+def abs_path(path):
+ return os.path.abspath(os.path.expanduser(path))
+
+
+def url_or_path(path):
+ from urllib.parse import urlparse
+
+ parsed = urlparse(path)
+ if len(parsed.scheme) > 2:
+ return path
+ else:
+ return abs_path(path)
+
+
+def require_arg(kwargs, name, value_func=None):
+ if value_func is None:
+ value_func = lambda x: x is not None
+
+ if name not in kwargs or not value_func(kwargs[name]):
+ print("Missing required argument %s" % name, file=sys.stderr)
+ sys.exit(1)
+
+
+def create_parser(product_choices=None):
+ from mozlog import commandline
+
+ if product_choices is None:
+ product_choices = products.product_list
+
+ parser = argparse.ArgumentParser(description="""Runner for web-platform-tests tests.""",
+ usage="""%(prog)s [OPTION]... [TEST]...
+
+TEST is either the full path to a test file to run, or the URL of a test excluding
+scheme host and port.""")
+ parser.add_argument("--manifest-update", action="store_true", default=None,
+ help="Regenerate the test manifest.")
+ parser.add_argument("--no-manifest-update", action="store_false", dest="manifest_update",
+ help="Prevent regeneration of the test manifest.")
+ parser.add_argument("--manifest-download", action="store_true", default=None,
+ help="Attempt to download a preexisting manifest when updating.")
+ parser.add_argument("--no-manifest-download", action="store_false", dest="manifest_download",
+ help="Prevent download of the test manifest.")
+
+ parser.add_argument("--timeout-multiplier", action="store", type=float, default=None,
+ help="Multiplier relative to standard test timeout to use")
+ parser.add_argument("--run-by-dir", type=int, nargs="?", default=False,
+ help="Split run into groups by directories. With a parameter,"
+ "limit the depth of splits e.g. --run-by-dir=1 to split by top-level"
+ "directory")
+ parser.add_argument("-f", "--fully-parallel", action='store_true',
+ help='Run every test in a separate group for fully parallelism.')
+ parser.add_argument("--processes", action="store", type=int, default=None,
+ help="Number of simultaneous processes to use")
+ parser.add_argument("--max-restarts", action="store", type=int, default=5,
+ help="Maximum number of browser restart retries")
+
+ parser.add_argument("--no-capture-stdio", action="store_true", default=False,
+ help="Don't capture stdio and write to logging")
+ parser.add_argument("--no-fail-on-unexpected", action="store_false",
+ default=True,
+ dest="fail_on_unexpected",
+ help="Exit with status code 0 when test expectations are violated")
+ parser.add_argument("--no-fail-on-unexpected-pass", action="store_false",
+ default=True,
+ dest="fail_on_unexpected_pass",
+ help="Exit with status code 0 when all unexpected results are PASS")
+ parser.add_argument("--no-restart-on-new-group", action="store_false",
+ default=True,
+ dest="restart_on_new_group",
+ help="Don't restart test runner when start a new test group")
+
+ mode_group = parser.add_argument_group("Mode")
+ mode_group.add_argument("--list-test-groups", action="store_true",
+ default=False,
+ help="List the top level directories containing tests that will run.")
+ mode_group.add_argument("--list-disabled", action="store_true",
+ default=False,
+ help="List the tests that are disabled on the current platform")
+ mode_group.add_argument("--list-tests", action="store_true",
+ default=False,
+ help="List all tests that will run")
+ stability_group = mode_group.add_mutually_exclusive_group()
+ stability_group.add_argument("--verify", action="store_true",
+ default=False,
+ help="Run a stability check on the selected tests")
+ stability_group.add_argument("--stability", action="store_true",
+ default=False,
+ help=argparse.SUPPRESS)
+ mode_group.add_argument("--verify-log-full", action="store_true",
+ default=False,
+ help="Output per-iteration test results when running verify")
+ mode_group.add_argument("--verify-repeat-loop", action="store",
+ default=10,
+ help="Number of iterations for a run that reloads each test without restart.",
+ type=int)
+ mode_group.add_argument("--verify-repeat-restart", action="store",
+ default=5,
+ help="Number of iterations, for a run that restarts the runner between each iteration",
+ type=int)
+ chaos_mode_group = mode_group.add_mutually_exclusive_group()
+ chaos_mode_group.add_argument("--verify-no-chaos-mode", action="store_false",
+ default=True,
+ dest="verify_chaos_mode",
+ help="Disable chaos mode when running on Firefox")
+ chaos_mode_group.add_argument("--verify-chaos-mode", action="store_true",
+ default=True,
+ dest="verify_chaos_mode",
+ help="Enable chaos mode when running on Firefox")
+ mode_group.add_argument("--verify-max-time", action="store",
+ default=None,
+ help="The maximum number of minutes for the job to run",
+ type=lambda x: timedelta(minutes=float(x)))
+ mode_group.add_argument("--repeat-max-time", action="store",
+ default=100,
+ help="The maximum number of minutes for the test suite to attempt repeat runs",
+ type=int)
+ output_results_group = mode_group.add_mutually_exclusive_group()
+ output_results_group.add_argument("--verify-no-output-results", action="store_false",
+ dest="verify_output_results",
+ default=True,
+ help="Prints individuals test results and messages")
+ output_results_group.add_argument("--verify-output-results", action="store_true",
+ dest="verify_output_results",
+ default=True,
+ help="Disable printing individuals test results and messages")
+
+ test_selection_group = parser.add_argument_group("Test Selection")
+ test_selection_group.add_argument("--test-types", action="store",
+ nargs="*", default=wpttest.enabled_tests,
+ choices=wpttest.enabled_tests,
+ help="Test types to run")
+ test_selection_group.add_argument("--subsuite-file", action="store",
+ help="Path to JSON file containing subsuite configuration")
+ # TODO use an empty string argument for the default subsuite
+ test_selection_group.add_argument("--subsuite", action="append", dest="subsuites",
+ help="Subsuite names to run. Runs all subsuites when omitted.")
+ test_selection_group.add_argument("--include", action="append",
+ help="URL prefix to include")
+ test_selection_group.add_argument("--include-file", action="store",
+ help="A file listing URL prefix for tests")
+ test_selection_group.add_argument("--exclude", action="append",
+ help="URL prefix to exclude")
+ test_selection_group.add_argument("--include-manifest", type=abs_path,
+ help="Path to manifest listing tests to include")
+ test_selection_group.add_argument("--test-groups", dest="test_groups_file", type=abs_path,
+ help="Path to json file containing a mapping {group_name: [test_ids]}")
+ test_selection_group.add_argument("--skip-timeout", action="store_true",
+ help="Skip tests that are expected to time out")
+ test_selection_group.add_argument("--skip-crash", action="store_true",
+ help="Skip tests that are expected to crash")
+ test_selection_group.add_argument("--skip-implementation-status",
+ action="append",
+ choices=["not-implementing", "backlog", "implementing"],
+ help="Skip tests that have the given implementation status")
+ # TODO(bashi): Remove this when WebTransport over HTTP/3 server is enabled by default.
+ test_selection_group.add_argument("--enable-webtransport-h3",
+ action="store_true",
+ dest="enable_webtransport_h3",
+ default=True,
+ help="Enable tests that require WebTransport over HTTP/3 server (default: true)")
+ test_selection_group.add_argument("--no-enable-webtransport-h3", action="store_false", dest="enable_webtransport_h3",
+ help="Do not enable WebTransport tests on experimental channels")
+ test_selection_group.add_argument("--tag", action="append", dest="tags",
+ help="Labels applied to tests to include in the run. "
+ "Labels starting dir: are equivalent to top-level directories.")
+ test_selection_group.add_argument("--exclude-tag", action="append", dest="exclude_tags",
+ help="Labels applied to tests to exclude in the run. Takes precedence over `--tag`. "
+ "Labels starting dir: are equivalent to top-level directories.")
+ test_selection_group.add_argument("--default-exclude", action="store_true",
+ default=False,
+ help="Only run the tests explicitly given in arguments. "
+ "No tests will run if the list is empty, and the "
+ "program will exit with status code 0.")
+
+ debugging_group = parser.add_argument_group("Debugging")
+ debugging_group.add_argument('--debugger', const="__default__", nargs="?",
+ help="run under a debugger, e.g. gdb or valgrind")
+ debugging_group.add_argument('--debugger-args', help="arguments to the debugger")
+ debugging_group.add_argument("--rerun", action="store", type=int, default=1,
+ help="Number of times to re run each test without restarts")
+ debugging_group.add_argument("--repeat", action="store", type=int, default=1,
+ help="Number of times to run the tests, restarting between each run")
+ debugging_group.add_argument("--repeat-until-unexpected", action="store_true", default=None,
+ help="Run tests in a loop until one returns an unexpected result")
+ debugging_group.add_argument('--retry-unexpected', type=int, default=0,
+ help=('Maximum number of times to retry unexpected tests. '
+ 'A test is retried until it gets one of the expected status, '
+ 'or until it exhausts the maximum number of retries.'))
+ debugging_group.add_argument('--pause-after-test', action="store_true", default=None,
+ help="Halt the test runner after each test (this happens by default if only a single test is run)")
+ debugging_group.add_argument('--no-pause-after-test', dest="pause_after_test", action="store_false",
+ help="Don't halt the test runner irrespective of the number of tests run")
+ debugging_group.add_argument('--debug-test', dest="debug_test", action="store_true",
+ help="Run tests with additional debugging features enabled")
+
+ debugging_group.add_argument('--pause-on-unexpected', action="store_true",
+ help="Halt the test runner when an unexpected result is encountered")
+ debugging_group.add_argument('--no-restart-on-unexpected', dest="restart_on_unexpected",
+ default=True, action="store_false",
+ help="Don't restart on an unexpected result")
+
+ debugging_group.add_argument("--symbols-path", action="store", type=url_or_path,
+ help="Path or url to symbols file used to analyse crash minidumps.")
+ debugging_group.add_argument("--stackwalk-binary", action="store", type=abs_path,
+ help="Path to stackwalker program used to analyse minidumps.")
+ debugging_group.add_argument("--pdb", action="store_true",
+ help="Drop into pdb on python exception")
+
+ android_group = parser.add_argument_group("Android specific arguments")
+ android_group.add_argument("--adb-binary", action="store",
+ help="Path to adb binary to use")
+ android_group.add_argument("--package-name", action="store",
+ help="Android package name to run tests against")
+ android_group.add_argument("--keep-app-data-directory", action="store_true",
+ help="Don't delete the app data directory")
+ android_group.add_argument("--device-serial", action="append", default=[],
+ help="Running Android instances to connect to, if not emulator-5554")
+
+ config_group = parser.add_argument_group("Configuration")
+ config_group.add_argument("--binary", action="store",
+ type=abs_path, help="Desktop binary to run tests against")
+ config_group.add_argument('--binary-arg',
+ default=[], action="append", dest="binary_args",
+ help="Extra argument for the binary")
+ config_group.add_argument("--webdriver-binary", action="store", metavar="BINARY",
+ type=abs_path, help="WebDriver server binary to use")
+ config_group.add_argument('--webdriver-arg',
+ default=[], action="append", dest="webdriver_args",
+ help="Extra argument for the WebDriver binary")
+ config_group.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root",
+ help="Path to root directory containing test metadata"),
+ config_group.add_argument("--tests", action="store", type=abs_path, dest="tests_root",
+ help="Path to root directory containing test files"),
+ config_group.add_argument("--manifest", action="store", type=abs_path, dest="manifest_path",
+ help="Path to test manifest (default is ${metadata_root}/MANIFEST.json)")
+ config_group.add_argument("--run-info", action="store", type=abs_path,
+ help="Path to directory containing extra json files to add to run info")
+ config_group.add_argument("--product", action="store", choices=product_choices,
+ default=None, help="Browser against which to run tests")
+ config_group.add_argument("--browser-version", action="store",
+ default=None, help="Informative string detailing the browser "
+ "release version. This is included in the run_info data.")
+ config_group.add_argument("--browser-channel", action="store",
+ default=None, help="Informative string detailing the browser "
+ "release channel. This is included in the run_info data.")
+ config_group.add_argument("--config", action="store", type=abs_path, dest="config",
+ help="Path to config file")
+ config_group.add_argument("--install-fonts", action="store_true",
+ default=None,
+ help="Install additional system fonts on your system")
+ config_group.add_argument("--no-install-fonts", dest="install_fonts", action="store_false",
+ help="Do not install additional system fonts on your system")
+ config_group.add_argument("--font-dir", action="store", type=abs_path, dest="font_dir",
+ help="Path to local font installation directory", default=None)
+ config_group.add_argument("--inject-script", action="store", dest="inject_script", default=None,
+ help="Path to script file to inject, useful for testing polyfills.")
+ config_group.add_argument("--headless", action="store_true",
+ help="Run browser in headless mode", default=None)
+ config_group.add_argument("--no-headless", action="store_false", dest="headless",
+ help="Don't run browser in headless mode")
+ config_group.add_argument("--instrument-to-file", action="store",
+ help="Path to write instrumentation logs to")
+ config_group.add_argument("--suppress-handler-traceback", action="store_true", default=None,
+ help="Don't write the stacktrace for exceptions in server handlers")
+ config_group.add_argument("--no-suppress-handler-traceback", action="store_false",
+ dest="supress_handler_traceback",
+ help="Write the stacktrace for exceptions in server handlers")
+
+ build_type = parser.add_mutually_exclusive_group()
+ build_type.add_argument("--debug-build", dest="debug", action="store_true",
+ default=None,
+ help="Build is a debug build (overrides any mozinfo file)")
+ build_type.add_argument("--release-build", dest="debug", action="store_false",
+ default=None,
+ help="Build is a release (overrides any mozinfo file)")
+
+ chunking_group = parser.add_argument_group("Test Chunking")
+ chunking_group.add_argument("--total-chunks", action="store", type=int, default=1,
+ help="Total number of chunks to use")
+ chunking_group.add_argument("--this-chunk", action="store", type=int, default=1,
+ help="Chunk number to run")
+ chunking_group.add_argument("--chunk-type", action="store",
+ choices=["none", "hash", "id_hash", "dir_hash"],
+ default=None, help="Chunking type to use")
+
+ ssl_group = parser.add_argument_group("SSL/TLS")
+ ssl_group.add_argument("--ssl-type", action="store", default=None,
+ choices=["openssl", "pregenerated", "none"],
+ help="Type of ssl support to enable (running without ssl may lead to spurious errors)")
+
+ ssl_group.add_argument("--openssl-binary", action="store",
+ help="Path to openssl binary", default="openssl")
+ ssl_group.add_argument("--certutil-binary", action="store",
+ help="Path to certutil binary for use with Firefox + ssl")
+
+ ssl_group.add_argument("--ca-cert-path", action="store", type=abs_path,
+ help="Path to ca certificate when using pregenerated ssl certificates")
+ ssl_group.add_argument("--host-key-path", action="store", type=abs_path,
+ help="Path to host private key when using pregenerated ssl certificates")
+ ssl_group.add_argument("--host-cert-path", action="store", type=abs_path,
+ help="Path to host certificate when using pregenerated ssl certificates")
+
+ gecko_group = parser.add_argument_group("Gecko-specific")
+ gecko_group.add_argument("--prefs-root", dest="prefs_root", action="store", type=abs_path,
+ help="Path to the folder containing browser prefs")
+ gecko_group.add_argument("--preload-browser", dest="preload_browser", action="store_true",
+ default=None, help="Preload a gecko instance for faster restarts")
+ gecko_group.add_argument("--no-preload-browser", dest="preload_browser", action="store_false",
+ default=None, help="Don't preload a gecko instance for faster restarts")
+ gecko_group.add_argument("--disable-e10s", dest="gecko_e10s", action="store_false", default=True,
+ help="Run tests without electrolysis preferences")
+ gecko_group.add_argument("--disable-fission", dest="disable_fission", action="store_true", default=False,
+ help="Disable fission in Gecko.")
+ gecko_group.add_argument("--stackfix-dir", dest="stackfix_dir", action="store",
+ help="Path to directory containing assertion stack fixing scripts")
+ gecko_group.add_argument("--specialpowers-path", action="store",
+ help="Path to specialPowers extension xpi file")
+ gecko_group.add_argument("--setpref", dest="extra_prefs", action='append',
+ default=[], metavar="PREF=VALUE",
+ help="Defines an extra user preference (overrides those in prefs_root)")
+ gecko_group.add_argument("--leak-check", dest="leak_check", action="store_true", default=None,
+ help="Enable leak checking (enabled by default for debug builds, "
+ "silently ignored for opt, mobile)")
+ gecko_group.add_argument("--no-leak-check", dest="leak_check", action="store_false", default=None,
+ help="Disable leak checking")
+ gecko_group.add_argument("--reftest-internal", dest="reftest_internal", action="store_true",
+ default=None, help="Enable reftest runner implemented inside Marionette")
+ gecko_group.add_argument("--reftest-external", dest="reftest_internal", action="store_false",
+ help="Disable reftest runner implemented inside Marionette")
+ gecko_group.add_argument("--reftest-screenshot", dest="reftest_screenshot", action="store",
+ choices=["always", "fail", "unexpected"], default=None,
+ help="With --reftest-internal, when to take a screenshot")
+ gecko_group.add_argument("--chaos", dest="chaos_mode_flags", action="store",
+ nargs="?", const=0xFFFFFFFF, type=lambda x: int(x, 16),
+ help="Enable chaos mode with the specified feature flag "
+ "(see http://searchfox.org/mozilla-central/source/mfbt/ChaosMode.h for "
+ "details). If no value is supplied, all features are activated")
+
+ gecko_view_group = parser.add_argument_group("GeckoView-specific")
+ gecko_view_group.add_argument("--setenv", dest="env", action="append", default=[],
+ help="Set target environment variable, like FOO=BAR")
+
+ servo_group = parser.add_argument_group("Servo-specific")
+ servo_group.add_argument("--user-stylesheet",
+ default=[], action="append", dest="user_stylesheets",
+ help="Inject a user CSS stylesheet into every test.")
+
+ chrome_group = parser.add_argument_group("Chrome-specific")
+ chrome_group.add_argument("--enable-mojojs", action="store_true", default=False,
+ help="Enable MojoJS for testing. Note that this flag is usally "
+ "enabled automatically by `wpt run`, if it succeeds in downloading "
+ "the right version of mojojs.zip or if --mojojs-path is specified.")
+ chrome_group.add_argument("--mojojs-path",
+ help="Path to mojojs gen/ directory. If it is not specified, `wpt run` "
+ "will download and extract mojojs.zip into _venv2/mojojs/gen.")
+ chrome_group.add_argument("--enable-swiftshader", action="store_true", default=False,
+ help="Enable SwiftShader for CPU-based 3D graphics. This can be used "
+ "in environments with no hardware GPU available.")
+ chrome_group.add_argument("--enable-experimental", action="store_true", dest="enable_experimental",
+ help="Enable --enable-experimental-web-platform-features flag", default=None)
+ chrome_group.add_argument("--no-enable-experimental", action="store_false", dest="enable_experimental",
+ help="Do not enable --enable-experimental-web-platform-features flag "
+ "on experimental channels")
+ chrome_group.add_argument(
+ "--enable-sanitizer",
+ action="store_true",
+ dest="sanitizer_enabled",
+ help="Only alert on sanitizer-related errors and crashes.")
+ chrome_group.add_argument(
+ "--reuse-window",
+ action="store_true",
+ help=("Reuse a window across `testharness.js` tests where possible, "
+ "which can speed up testing. Also useful for ensuring that the "
+ "renderer process has a stable PID for a debugger to attach to."))
+
+ sauce_group = parser.add_argument_group("Sauce Labs-specific")
+ sauce_group.add_argument("--sauce-browser", dest="sauce_browser",
+ help="Sauce Labs browser name")
+ sauce_group.add_argument("--sauce-platform", dest="sauce_platform",
+ help="Sauce Labs OS platform")
+ sauce_group.add_argument("--sauce-version", dest="sauce_version",
+ help="Sauce Labs browser version")
+ sauce_group.add_argument("--sauce-build", dest="sauce_build",
+ help="Sauce Labs build identifier")
+ sauce_group.add_argument("--sauce-tags", dest="sauce_tags", nargs="*",
+ help="Sauce Labs identifying tag", default=[])
+ sauce_group.add_argument("--sauce-tunnel-id", dest="sauce_tunnel_id",
+ help="Sauce Connect tunnel identifier")
+ sauce_group.add_argument("--sauce-user", dest="sauce_user",
+ help="Sauce Labs user name")
+ sauce_group.add_argument("--sauce-key", dest="sauce_key",
+ default=os.environ.get("SAUCE_ACCESS_KEY"),
+ help="Sauce Labs access key")
+ sauce_group.add_argument("--sauce-connect-binary",
+ dest="sauce_connect_binary",
+ help="Path to Sauce Connect binary")
+ sauce_group.add_argument("--sauce-init-timeout", action="store",
+ type=int, default=30,
+ help="Number of seconds to wait for Sauce "
+ "Connect tunnel to be available before "
+ "aborting")
+ sauce_group.add_argument("--sauce-connect-arg", action="append",
+ default=[], dest="sauce_connect_args",
+ help="Command-line argument to forward to the "
+ "Sauce Connect binary (repeatable)")
+
+ taskcluster_group = parser.add_argument_group("Taskcluster-specific")
+ taskcluster_group.add_argument("--github-checks-text-file",
+ type=str,
+ help="Path to GitHub checks output file")
+
+ webkit_group = parser.add_argument_group("WebKit-specific")
+ webkit_group.add_argument("--webkit-port", dest="webkit_port",
+ help="WebKit port")
+
+ safari_group = parser.add_argument_group("Safari-specific")
+ safari_group.add_argument("--kill-safari", dest="kill_safari", action="store_true", default=False,
+ help="Kill Safari when stopping the browser")
+
+ parser.add_argument("test_list", nargs="*",
+ help="List of URLs for tests to run, or paths including tests to run. "
+ "(equivalent to --include)")
+
+ def screenshot_api_wrapper(formatter, api):
+ formatter.api = api
+ return formatter
+
+ commandline.fmt_options["api"] = (screenshot_api_wrapper,
+ "Cache API (default: %s)" % wptscreenshot.DEFAULT_API,
+ {"wptscreenshot"}, "store")
+
+ commandline.log_formatters["chromium"] = (chromium.ChromiumFormatter, "Chromium Layout Tests format")
+ commandline.log_formatters["wptreport"] = (wptreport.WptreportFormatter, "wptreport format")
+ commandline.log_formatters["wptscreenshot"] = (wptscreenshot.WptscreenshotFormatter, "wpt.fyi screenshots")
+
+ commandline.add_logging_group(parser)
+ return parser
+
+
+def set_from_config(kwargs):
+ if kwargs["config"] is None:
+ config_path = config.path()
+ else:
+ config_path = kwargs["config"]
+
+ kwargs["config_path"] = config_path
+
+ kwargs["config"] = config.read(kwargs["config_path"])
+
+ kwargs["product"] = products.Product(kwargs["config"], kwargs["product"])
+
+ keys = {"paths": [("prefs", "prefs_root", True),
+ ("run_info", "run_info", True)],
+ "web-platform-tests": [("remote_url", "remote_url", False),
+ ("branch", "branch", False),
+ ("sync_path", "sync_path", True)],
+ "SSL": [("openssl_binary", "openssl_binary", True),
+ ("certutil_binary", "certutil_binary", True),
+ ("ca_cert_path", "ca_cert_path", True),
+ ("host_cert_path", "host_cert_path", True),
+ ("host_key_path", "host_key_path", True)]}
+
+ for section, values in keys.items():
+ for config_value, kw_value, is_path in values:
+ if kw_value in kwargs and kwargs[kw_value] is None:
+ if not is_path:
+ new_value = kwargs["config"].get(section, config.ConfigDict({})).get(config_value)
+ else:
+ new_value = kwargs["config"].get(section, config.ConfigDict({})).get_path(config_value)
+ kwargs[kw_value] = new_value
+
+ test_paths = get_test_paths(kwargs["config"],
+ kwargs["tests_root"],
+ kwargs["metadata_root"],
+ kwargs["manifest_path"])
+ check_paths(test_paths)
+ kwargs["test_paths"] = test_paths
+
+ kwargs["suite_name"] = kwargs["config"].get("web-platform-tests", {}).get("name", "web-platform-tests")
+
+
+
+class TestRoot:
+ def __init__(self, tests_path: str, metadata_path: str, manifest_path: Optional[str] = None):
+ self.tests_path = tests_path
+ self.metadata_path = metadata_path
+ if manifest_path is None:
+ manifest_path = os.path.join(metadata_path, "MANIFEST.json")
+
+ self.manifest_path = manifest_path
+
+
+TestPaths = Mapping[str, TestRoot]
+
+
+def get_test_paths(config: Mapping[str, config.ConfigDict],
+ tests_path_override: Optional[str] = None,
+ metadata_path_override: Optional[str] = None,
+ manifest_path_override: Optional[str] = None) -> TestPaths:
+ # Set up test_paths
+ test_paths = OrderedDict()
+
+ for section in config.keys():
+ if section.startswith("manifest:"):
+ manifest_opts = config[section]
+ url_base = manifest_opts.get("url_base", "/")
+ tests_path = manifest_opts.get_path("tests")
+ if tests_path is None:
+ raise ValueError(f"Missing `tests` key in configuration with url_base {url_base}")
+ metadata_path = manifest_opts.get_path("metadata")
+ if metadata_path is None:
+ raise ValueError(f"Missing `metadata` key in configuration with url_base {url_base}")
+ manifest_path = manifest_opts.get_path("manifest")
+
+ if url_base == "/":
+ if tests_path_override is not None:
+ tests_path = tests_path_override
+ if metadata_path_override is not None:
+ metadata_path = metadata_path_override
+ if manifest_path_override is not None:
+ manifest_path = manifest_path_override
+
+ test_paths[url_base] = TestRoot(tests_path, metadata_path, manifest_path)
+
+ if "/" not in test_paths:
+ if tests_path_override is None or metadata_path_override is None:
+ raise ValueError("No ini file configures the root url, "
+ "so --tests and --metadata arguments are mandatory")
+ test_paths["/"] = TestRoot(tests_path_override,
+ metadata_path_override,
+ manifest_path_override)
+
+ return test_paths
+
+
+def exe_path(name: Optional[str]) -> Optional[str]:
+ if name is None:
+ return None
+
+ return which(name)
+
+
+def check_paths(test_paths: TestPaths) -> None:
+ for test_root in test_paths.values():
+ for key in ["tests_path", "metadata_path", "manifest_path"]:
+ name = key.split("_", 1)[0]
+ path = getattr(test_root, key)
+
+ if name == "manifest":
+ # For the manifest we can create it later, so just check the path
+ # actually exists
+ path = os.path.dirname(path)
+
+ if not os.path.exists(path):
+ print(f"Fatal: {name} path {path} does not exist")
+ sys.exit(1)
+
+ if not os.path.isdir(path):
+ print(f"Fatal: {name} path {path} is not a directory")
+ sys.exit(1)
+
+
+def check_args(kwargs):
+ set_from_config(kwargs)
+
+ if kwargs["manifest_update"] is None:
+ kwargs["manifest_update"] = True
+
+ if "sauce" in kwargs["product"].name:
+ kwargs["pause_after_test"] = False
+
+ if kwargs["test_list"]:
+ if kwargs["include"] is not None:
+ kwargs["include"].extend(kwargs["test_list"])
+ else:
+ kwargs["include"] = kwargs["test_list"]
+
+ if kwargs["run_info"] is None:
+ kwargs["run_info"] = kwargs["config_path"]
+
+ if kwargs["this_chunk"] > 1:
+ require_arg(kwargs, "total_chunks", lambda x: x >= kwargs["this_chunk"])
+
+ if kwargs["chunk_type"] is None:
+ if kwargs["total_chunks"] > 1:
+ kwargs["chunk_type"] = "dir_hash"
+ else:
+ kwargs["chunk_type"] = "none"
+
+ if sum([
+ kwargs["test_groups_file"] is not None,
+ kwargs["run_by_dir"] is not False,
+ kwargs["fully_parallel"],
+ ]) > 1:
+ print('Must pass up to one of: --test-groups, --run-by-dir, --fully-parallel')
+ sys.exit(1)
+
+ if (kwargs["test_groups_file"] is not None and
+ not os.path.exists(kwargs["test_groups_file"])):
+ print("--test-groups file %s not found" % kwargs["test_groups_file"])
+ sys.exit(1)
+
+ # When running on Android, the number of workers is decided by the number of
+ # emulators. Each worker will use one emulator to run the Android browser.
+ if kwargs["device_serial"]:
+ if kwargs["processes"] is None:
+ kwargs["processes"] = len(kwargs["device_serial"])
+ elif len(kwargs["device_serial"]) != kwargs["processes"]:
+ print("--processes does not match number of devices")
+ sys.exit(1)
+ elif len(set(kwargs["device_serial"])) != len(kwargs["device_serial"]):
+ print("Got duplicate --device-serial value")
+ sys.exit(1)
+
+ if kwargs["processes"] is None:
+ from manifest import mputil # type: ignore
+ kwargs["processes"] = mputil.max_parallelism() if kwargs["fully_parallel"] else 1
+
+ if kwargs["debugger"] is not None:
+ import mozdebug
+ if kwargs["debugger"] == "__default__":
+ kwargs["debugger"] = mozdebug.get_default_debugger_name()
+ debug_info = mozdebug.get_debugger_info(kwargs["debugger"],
+ kwargs["debugger_args"])
+ if debug_info and debug_info.interactive:
+ if kwargs["processes"] != 1:
+ kwargs["processes"] = 1
+ kwargs["no_capture_stdio"] = True
+ kwargs["debug_info"] = debug_info
+ else:
+ kwargs["debug_info"] = None
+
+ if kwargs["binary"] is not None:
+ if not os.path.exists(kwargs["binary"]):
+ print("Binary path %s does not exist" % kwargs["binary"], file=sys.stderr)
+ sys.exit(1)
+
+ if kwargs["ssl_type"] is None:
+ if None not in (kwargs["ca_cert_path"], kwargs["host_cert_path"], kwargs["host_key_path"]):
+ kwargs["ssl_type"] = "pregenerated"
+ elif exe_path(kwargs["openssl_binary"]) is not None:
+ kwargs["ssl_type"] = "openssl"
+ else:
+ kwargs["ssl_type"] = "none"
+
+ if kwargs["ssl_type"] == "pregenerated":
+ require_arg(kwargs, "ca_cert_path", lambda x:os.path.exists(x))
+ require_arg(kwargs, "host_cert_path", lambda x:os.path.exists(x))
+ require_arg(kwargs, "host_key_path", lambda x:os.path.exists(x))
+
+ elif kwargs["ssl_type"] == "openssl":
+ path = exe_path(kwargs["openssl_binary"])
+ if path is None:
+ print("openssl-binary argument missing or not a valid executable", file=sys.stderr)
+ sys.exit(1)
+ kwargs["openssl_binary"] = path
+
+ if kwargs["ssl_type"] != "none" and kwargs["product"].name == "firefox" and kwargs["certutil_binary"]:
+ path = exe_path(kwargs["certutil_binary"])
+ if path is None:
+ print("certutil-binary argument missing or not a valid executable", file=sys.stderr)
+ sys.exit(1)
+ kwargs["certutil_binary"] = path
+
+ if kwargs['extra_prefs']:
+ missing = any('=' not in prefarg for prefarg in kwargs['extra_prefs'])
+ if missing:
+ print("Preferences via --setpref must be in key=value format", file=sys.stderr)
+ sys.exit(1)
+ kwargs['extra_prefs'] = [tuple(prefarg.split('=', 1)) for prefarg in
+ kwargs['extra_prefs']]
+
+ if kwargs["reftest_internal"] is None:
+ kwargs["reftest_internal"] = True
+
+ if kwargs["reftest_screenshot"] is None:
+ kwargs["reftest_screenshot"] = "unexpected" if not kwargs["debug_test"] else "always"
+
+ if kwargs["preload_browser"] is None:
+ # Default to preloading a gecko instance if we're only running a single process
+ kwargs["preload_browser"] = kwargs["processes"] == 1
+
+ if kwargs["tags"] and kwargs["exclude_tags"]:
+ contradictory = set(kwargs["tags"]) & set(kwargs["exclude_tags"])
+ if contradictory:
+ print("contradictory tags found; exclusion will take precedence:", contradictory)
+
+ return kwargs
+
+
+def check_args_metadata_update(kwargs):
+ set_from_config(kwargs)
+
+ for item in kwargs["run_log"]:
+ if os.path.isdir(item):
+ print("Log file %s is a directory" % item, file=sys.stderr)
+ sys.exit(1)
+
+ if kwargs["properties_file"] is None and not kwargs["no_properties_file"]:
+ default_file = os.path.join(kwargs["test_paths"]["/"].metadata_path,
+ "update_properties.json")
+ if os.path.exists(default_file):
+ kwargs["properties_file"] = default_file
+
+ return kwargs
+
+
+def check_args_update(kwargs):
+ kwargs = check_args_metadata_update(kwargs)
+
+ if kwargs["patch"] is None:
+ kwargs["patch"] = kwargs["sync"]
+
+ return kwargs
+
+
+def create_parser_metadata_update(product_choices=None):
+ from mozlog.structured import commandline
+
+ from . import products
+
+ if product_choices is None:
+ product_choices = products.product_list
+
+ parser = argparse.ArgumentParser("web-platform-tests-update",
+ description="Update script for web-platform-tests tests.")
+ # This will be removed once all consumers are updated to the properties-file based system
+ parser.add_argument("--product", action="store", choices=product_choices,
+ default="firefox", help=argparse.SUPPRESS)
+ parser.add_argument("--config", action="store", type=abs_path, help="Path to config file")
+ parser.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root",
+ help="Path to the folder containing test metadata"),
+ parser.add_argument("--tests", action="store", type=abs_path, dest="tests_root",
+ help="Path to web-platform-tests"),
+ parser.add_argument("--manifest", action="store", type=abs_path, dest="manifest_path",
+ help="Path to test manifest (default is ${metadata_root}/MANIFEST.json)")
+ parser.add_argument("--full", action="store_true", default=False,
+ help="For all tests that are updated, remove any existing conditions and missing subtests")
+ parser.add_argument("--disable-intermittent", nargs="?", action="store", const="unstable", default=None,
+ help=("Reason for disabling tests. When updating test results, disable tests that have "
+ "inconsistent results across many runs with the given reason."))
+ parser.add_argument("--update-intermittent", action="store_true", default=False,
+ help="Update test metadata with expected intermittent statuses.")
+ parser.add_argument("--remove-intermittent", action="store_true", default=False,
+ help="Remove obsolete intermittent statuses from expected statuses.")
+ parser.add_argument("--no-remove-obsolete", action="store_false", dest="remove_obsolete", default=True,
+ help="Don't remove metadata files that no longer correspond to a test file")
+ parser.add_argument("--properties-file",
+ help="""Path to a JSON file containing run_info properties to use in update. This must be of the form
+ {"properties": [<name>], "dependents": {<property name>: [<name>]}}""")
+ parser.add_argument("--no-properties-file", action="store_true",
+ help="Don't use the default properties file at "
+ "${metadata_root}/update_properties.json, even if it exists.")
+ parser.add_argument("--extra-property", action="append", default=[],
+ help="Extra property from run_info.json to use in metadata update.")
+ # TODO: Should make this required iff run=logfile
+ parser.add_argument("run_log", nargs="*", type=abs_path,
+ help="Log file from run of tests")
+ commandline.add_logging_group(parser)
+ return parser
+
+
+def create_parser_update(product_choices=None):
+ parser = create_parser_metadata_update(product_choices)
+ parser.add_argument("--sync-path", action="store", type=abs_path,
+ help="Path to store git checkout of web-platform-tests during update"),
+ parser.add_argument("--remote_url", action="store",
+ help="URL of web-platfrom-tests repository to sync against"),
+ parser.add_argument("--branch", action="store", type=abs_path,
+ help="Remote branch to sync against")
+ parser.add_argument("--rev", action="store", help="Revision to sync to")
+ parser.add_argument("--patch", action="store_true", dest="patch", default=None,
+ help="Create a VCS commit containing the changes.")
+ parser.add_argument("--no-patch", action="store_false", dest="patch",
+ help="Don't create a VCS commit containing the changes.")
+ parser.add_argument("--sync", dest="sync", action="store_true", default=False,
+ help="Sync the tests with the latest from upstream (implies --patch)")
+ parser.add_argument("--no-store-state", action="store_false", dest="store_state",
+ help="Store state so that steps can be resumed after failure")
+ parser.add_argument("--continue", action="store_true",
+ help="Continue a previously started run of the update script")
+ parser.add_argument("--abort", action="store_true",
+ help="Clear state from a previous incomplete run of the update script")
+ parser.add_argument("--exclude", action="store", nargs="*",
+ help="List of glob-style paths to exclude when syncing tests")
+ parser.add_argument("--include", action="store", nargs="*",
+ help="List of glob-style paths to include which would otherwise be excluded when syncing tests")
+ return parser
+
+
+def create_parser_reduce(product_choices=None):
+ parser = create_parser(product_choices)
+ parser.add_argument("target", action="store", help="Test id that is unstable")
+ return parser
+
+
+def parse_args():
+ parser = create_parser()
+ rv = vars(parser.parse_args())
+ check_args(rv)
+ return rv
+
+
+def parse_args_update():
+ parser = create_parser_update()
+ rv = vars(parser.parse_args())
+ check_args_update(rv)
+ return rv
+
+
+def parse_args_reduce():
+ parser = create_parser_reduce()
+ rv = vars(parser.parse_args())
+ check_args(rv)
+ return rv
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptlogging.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptlogging.py
new file mode 100644
index 0000000000..06b34dabdb
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptlogging.py
@@ -0,0 +1,109 @@
+# mypy: allow-untyped-defs
+
+import logging
+from threading import Thread
+
+from mozlog import commandline, stdadapter, set_default_logger
+from mozlog.structuredlog import StructuredLogger, log_levels
+
+
+def setup(args, defaults, formatter_defaults=None):
+ logger = args.pop('log', None)
+ if logger:
+ set_default_logger(logger)
+ StructuredLogger._logger_states["web-platform-tests"] = logger._state
+ else:
+ logger = commandline.setup_logging("web-platform-tests", args, defaults,
+ formatter_defaults=formatter_defaults)
+ setup_stdlib_logger()
+
+ for name in list(args.keys()):
+ if name.startswith("log_"):
+ args.pop(name)
+
+ return logger
+
+
+def setup_stdlib_logger():
+ logging.root.handlers = []
+ logging.root = stdadapter.std_logging_adapter(logging.root)
+
+
+class LogLevelRewriter:
+ """Filter that replaces log messages at specified levels with messages
+ at a different level.
+
+ This can be used to e.g. downgrade log messages from ERROR to WARNING
+ in some component where ERRORs are not critical.
+
+ :param inner: Handler to use for messages that pass this filter
+ :param from_levels: List of levels which should be affected
+ :param to_level: Log level to set for the affected messages
+ """
+ def __init__(self, inner, from_levels, to_level):
+ self.inner = inner
+ self.from_levels = [item.upper() for item in from_levels]
+ self.to_level = to_level.upper()
+
+ def __call__(self, data):
+ if data["action"] == "log" and data["level"].upper() in self.from_levels:
+ data = data.copy()
+ data["level"] = self.to_level
+ return self.inner(data)
+
+
+class LoggedAboveLevelHandler:
+ """Filter that records whether any log message above a certain level has been
+ seen.
+
+ :param min_level: Minimum level to record as a str (e.g., "CRITICAL")
+
+ """
+ def __init__(self, min_level):
+ self.min_level = log_levels[min_level.upper()]
+ self.has_log = False
+
+ def __call__(self, data):
+ if (data["action"] == "log" and
+ not self.has_log and
+ log_levels[data["level"]] <= self.min_level):
+ self.has_log = True
+
+
+class QueueHandler(logging.Handler):
+ def __init__(self, queue, level=logging.NOTSET):
+ self.queue = queue
+ logging.Handler.__init__(self, level=level)
+
+ def createLock(self):
+ # The queue provides its own locking
+ self.lock = None
+
+ def emit(self, record):
+ msg = self.format(record)
+ data = {"action": "log",
+ "level": record.levelname,
+ "thread": record.threadName,
+ "pid": record.process,
+ "source": self.name,
+ "message": msg}
+ self.queue.put(data)
+
+
+class LogQueueThread(Thread):
+ """Thread for handling log messages from a queue"""
+ def __init__(self, queue, logger):
+ self.queue = queue
+ self.logger = logger
+ super().__init__(name="Thread-Log")
+
+ def run(self):
+ while True:
+ try:
+ data = self.queue.get()
+ except (EOFError, OSError):
+ break
+ if data is None:
+ # A None message is used to shut down the logging thread
+ break
+ self.logger.log_raw(data)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/__init__.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/__init__.py
new file mode 100644
index 0000000000..e354d5ff4f
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/__init__.py
@@ -0,0 +1,5 @@
+# flake8: noqa (not ideal, but nicer than adding noqa: F401 to every line!)
+from .serializer import serialize
+from .parser import parse
+from .backends.static import compile as compile_static
+from .backends.conditional import compile as compile_condition
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/__init__.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/__init__.py
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/base.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/base.py
new file mode 100644
index 0000000000..c1ec206b75
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/base.py
@@ -0,0 +1,221 @@
+# mypy: allow-untyped-defs
+
+import abc
+
+from ..node import NodeVisitor
+from ..parser import parse
+
+
+class Compiler(NodeVisitor):
+ __metaclass__ = abc.ABCMeta
+
+ def compile(self, tree, data_cls_getter=None, **kwargs):
+ self._kwargs = kwargs
+ return self._compile(tree, data_cls_getter, **kwargs)
+
+ def _compile(self, tree, data_cls_getter=None, **kwargs):
+ """Compile a raw AST into a form where conditional expressions
+ are represented by ConditionalValue objects that can be evaluated
+ at runtime.
+
+ tree - The root node of the wptmanifest AST to compile
+
+ data_cls_getter - A function taking two parameters; the previous
+ output node and the current ast node and returning
+ the class of the output node to use for the current
+ ast node
+ """
+ if data_cls_getter is None:
+ self.data_cls_getter = lambda x, y: ManifestItem
+ else:
+ self.data_cls_getter = data_cls_getter
+
+ self.tree = tree
+ self.output_node = self._initial_output_node(tree, **kwargs)
+ self.visit(tree)
+ if hasattr(self.output_node, "set_defaults"):
+ self.output_node.set_defaults()
+ assert self.output_node is not None
+ return self.output_node
+
+ def _initial_output_node(self, node, **kwargs):
+ return self.data_cls_getter(None, None)(node, **kwargs)
+
+ def visit_DataNode(self, node):
+ if node != self.tree:
+ output_parent = self.output_node
+ self.output_node = self.data_cls_getter(self.output_node, node)(node, **self._kwargs)
+ else:
+ output_parent = None
+
+ assert self.output_node is not None
+
+ for child in node.children:
+ self.visit(child)
+
+ if output_parent is not None:
+ # Append to the parent *after* processing all the node data
+ output_parent.append(self.output_node)
+ self.output_node = self.output_node.parent
+
+ assert self.output_node is not None
+
+ @abc.abstractmethod
+ def visit_KeyValueNode(self, node):
+ pass
+
+ def visit_ListNode(self, node):
+ return [self.visit(child) for child in node.children]
+
+ def visit_ValueNode(self, node):
+ return node.data
+
+ def visit_AtomNode(self, node):
+ return node.data
+
+ @abc.abstractmethod
+ def visit_ConditionalNode(self, node):
+ pass
+
+ def visit_StringNode(self, node):
+ indexes = [self.visit(child) for child in node.children]
+
+ def value(x):
+ rv = node.data
+ for index in indexes:
+ rv = rv[index(x)]
+ return rv
+ return value
+
+ def visit_NumberNode(self, node):
+ if "." in node.data:
+ return float(node.data)
+ else:
+ return int(node.data)
+
+ def visit_VariableNode(self, node):
+ indexes = [self.visit(child) for child in node.children]
+
+ def value(x):
+ data = x[node.data]
+ for index in indexes:
+ data = data[index(x)]
+ return data
+ return value
+
+ def visit_IndexNode(self, node):
+ assert len(node.children) == 1
+ return self.visit(node.children[0])
+
+ @abc.abstractmethod
+ def visit_UnaryExpressionNode(self, node):
+ pass
+
+ @abc.abstractmethod
+ def visit_BinaryExpressionNode(self, node):
+ pass
+
+ @abc.abstractmethod
+ def visit_UnaryOperatorNode(self, node):
+ pass
+
+ @abc.abstractmethod
+ def visit_BinaryOperatorNode(self, node):
+ pass
+
+
+class ManifestItem:
+ def __init__(self, node, **kwargs):
+ self.parent = None
+ self.node = node
+ self.children = []
+ self._data = {}
+
+ def __repr__(self):
+ return f"<{self.__class__} {self.node.data}>"
+
+ def __str__(self):
+ rv = [repr(self)]
+ for item in self.children:
+ rv.extend(" %s" % line for line in str(item).split("\n"))
+ return "\n".join(rv)
+
+ def set_defaults(self):
+ pass
+
+ @property
+ def is_empty(self):
+ if self._data:
+ return False
+ return all(child.is_empty for child in self.children)
+
+ @property
+ def root(self):
+ node = self
+ while node.parent is not None:
+ node = node.parent
+ return node
+
+ @property
+ def name(self):
+ return self.node.data
+
+ def get(self, key):
+ for node in [self, self.root]:
+ if key in node._data:
+ return node._data[key]
+ raise KeyError
+
+ def set(self, name, value):
+ self._data[name] = value
+
+ def remove(self):
+ if self.parent:
+ self.parent.children.remove(self)
+ self.parent = None
+
+ def iterchildren(self, name=None):
+ for item in self.children:
+ if item.name == name or name is None:
+ yield item
+
+ def has_key(self, key):
+ for node in [self, self.root]:
+ if key in node._data:
+ return True
+ return False
+
+ def _flatten(self):
+ rv = {}
+ for node in [self, self.root]:
+ for name, value in node._data.items():
+ if name not in rv:
+ rv[name] = value
+ return rv
+
+ def iteritems(self):
+ yield from self._flatten().items()
+
+ def iterkeys(self):
+ yield from self._flatten().keys()
+
+ def itervalues(self):
+ yield from self._flatten().values()
+
+ def append(self, child):
+ child.parent = self
+ self.children.append(child)
+ return child
+
+
+def compile_ast(compiler, ast, data_cls_getter=None, **kwargs):
+ return compiler().compile(ast,
+ data_cls_getter=data_cls_getter,
+ **kwargs)
+
+
+def compile(compiler, stream, data_cls_getter=None, **kwargs):
+ return compile_ast(compiler,
+ parse(stream),
+ data_cls_getter=data_cls_getter,
+ **kwargs)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/conditional.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/conditional.py
new file mode 100644
index 0000000000..695d6a3dcc
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/conditional.py
@@ -0,0 +1,402 @@
+# mypy: allow-untyped-defs
+
+import operator
+
+from ..node import NodeVisitor, DataNode, ConditionalNode, KeyValueNode, ListNode, ValueNode, BinaryExpressionNode, VariableNode
+from ..parser import parse
+
+
+class ConditionalValue:
+ def __init__(self, node, condition_func):
+ self.node = node
+ assert callable(condition_func)
+ self.condition_func = condition_func
+ if isinstance(node, ConditionalNode):
+ assert len(node.children) == 2
+ self.condition_node = self.node.children[0]
+ assert isinstance(node.children[1], (ValueNode, ListNode))
+ self.value_node = self.node.children[1]
+ else:
+ assert isinstance(node, (ValueNode, ListNode))
+ self.condition_node = None
+ self.value_node = self.node
+
+ @property
+ def value(self):
+ if isinstance(self.value_node, ValueNode):
+ return self.value_node.data
+ else:
+ return [item.data for item in self.value_node.children]
+
+ @value.setter
+ def value(self, value):
+ if isinstance(self.value_node, ValueNode):
+ self.value_node.data = value
+ else:
+ assert isinstance(self.value_node, ListNode)
+ while self.value_node.children:
+ self.value_node.children[0].remove()
+ assert len(self.value_node.children) == 0
+ for list_value in value:
+ self.value_node.append(ValueNode(list_value))
+
+ def __call__(self, run_info):
+ return self.condition_func(run_info)
+
+ def value_as(self, type_func):
+ """Get value and convert to a given type.
+
+ This is unfortunate, but we don't currently have a good way to specify that
+ specific properties should have their data returned as specific types"""
+ value = self.value
+ if type_func is not None:
+ value = type_func(value)
+ return value
+
+ def remove(self):
+ if len(self.node.parent.children) == 1:
+ self.node.parent.remove()
+ self.node.remove()
+
+ @property
+ def variables(self):
+ rv = set()
+ if self.condition_node is None:
+ return rv
+ stack = [self.condition_node]
+ while stack:
+ node = stack.pop()
+ if isinstance(node, VariableNode):
+ rv.add(node.data)
+ for child in reversed(node.children):
+ stack.append(child)
+ return rv
+
+
+class Compiler(NodeVisitor):
+ def compile(self, tree, data_cls_getter=None, **kwargs):
+ """Compile a raw AST into a form where conditional expressions
+ are represented by ConditionalValue objects that can be evaluated
+ at runtime.
+
+ tree - The root node of the wptmanifest AST to compile
+
+ data_cls_getter - A function taking two parameters; the previous
+ output node and the current ast node and returning
+ the class of the output node to use for the current
+ ast node
+ """
+ if data_cls_getter is None:
+ self.data_cls_getter = lambda x, y: ManifestItem
+ else:
+ self.data_cls_getter = data_cls_getter
+
+ self.tree = tree
+ self.output_node = self._initial_output_node(tree, **kwargs)
+ self.visit(tree)
+ if hasattr(self.output_node, "set_defaults"):
+ self.output_node.set_defaults()
+ assert self.output_node is not None
+ return self.output_node
+
+ def compile_condition(self, condition):
+ """Compile a ConditionalNode into a ConditionalValue.
+
+ condition: A ConditionalNode"""
+ data_node = DataNode()
+ key_value_node = KeyValueNode()
+ key_value_node.append(condition.copy())
+ data_node.append(key_value_node)
+ manifest_item = self.compile(data_node)
+ return manifest_item._data[None][0]
+
+ def _initial_output_node(self, node, **kwargs):
+ return self.data_cls_getter(None, None)(node, **kwargs)
+
+ def visit_DataNode(self, node):
+ if node != self.tree:
+ output_parent = self.output_node
+ self.output_node = self.data_cls_getter(self.output_node, node)(node)
+ else:
+ output_parent = None
+
+ assert self.output_node is not None
+
+ for child in node.children:
+ self.visit(child)
+
+ if output_parent is not None:
+ # Append to the parent *after* processing all the node data
+ output_parent.append(self.output_node)
+ self.output_node = self.output_node.parent
+
+ assert self.output_node is not None
+
+ def visit_KeyValueNode(self, node):
+ key_values = []
+ for child in node.children:
+ condition, value = self.visit(child)
+ key_values.append(ConditionalValue(child, condition))
+
+ self.output_node._add_key_value(node, key_values)
+
+ def visit_ListNode(self, node):
+ return (lambda x:True, [self.visit(child) for child in node.children])
+
+ def visit_ValueNode(self, node):
+ return (lambda x: True, node.data)
+
+ def visit_AtomNode(self, node):
+ return (lambda x: True, node.data)
+
+ def visit_ConditionalNode(self, node):
+ return self.visit(node.children[0]), self.visit(node.children[1])
+
+ def visit_StringNode(self, node):
+ indexes = [self.visit(child) for child in node.children]
+
+ def value(x):
+ rv = node.data
+ for index in indexes:
+ rv = rv[index(x)]
+ return rv
+ return value
+
+ def visit_NumberNode(self, node):
+ if "." in node.data:
+ return lambda x: float(node.data)
+ else:
+ return lambda x: int(node.data)
+
+ def visit_VariableNode(self, node):
+ indexes = [self.visit(child) for child in node.children]
+
+ def value(x):
+ data = x[node.data]
+ for index in indexes:
+ data = data[index(x)]
+ return data
+ return value
+
+ def visit_IndexNode(self, node):
+ assert len(node.children) == 1
+ return self.visit(node.children[0])
+
+ def visit_UnaryExpressionNode(self, node):
+ assert len(node.children) == 2
+ operator = self.visit(node.children[0])
+ operand = self.visit(node.children[1])
+
+ return lambda x: operator(operand(x))
+
+ def visit_BinaryExpressionNode(self, node):
+ assert len(node.children) == 3
+ operator = self.visit(node.children[0])
+ operand_0 = self.visit(node.children[1])
+ operand_1 = self.visit(node.children[2])
+
+ assert operand_0 is not None
+ assert operand_1 is not None
+
+ return lambda x: operator(operand_0(x), operand_1(x))
+
+ def visit_UnaryOperatorNode(self, node):
+ return {"not": operator.not_}[node.data]
+
+ def visit_BinaryOperatorNode(self, node):
+ assert isinstance(node.parent, BinaryExpressionNode)
+ return {"and": operator.and_,
+ "or": operator.or_,
+ "==": operator.eq,
+ "!=": operator.ne}[node.data]
+
+
+class ManifestItem:
+ def __init__(self, node=None, **kwargs):
+ self.node = node
+ self.parent = None
+ self.children = []
+ self._data = {}
+
+ def __repr__(self):
+ return "<conditional.ManifestItem %s>" % (self.node.data)
+
+ def __str__(self):
+ rv = [repr(self)]
+ for item in self.children:
+ rv.extend(" %s" % line for line in str(item).split("\n"))
+ return "\n".join(rv)
+
+ def __contains__(self, key):
+ return key in self._data
+
+ def __iter__(self):
+ yield self
+ for child in self.children:
+ yield from child
+
+ @property
+ def is_empty(self):
+ if self._data:
+ return False
+ return all(child.is_empty for child in self.children)
+
+ @property
+ def root(self):
+ node = self
+ while node.parent is not None:
+ node = node.parent
+ return node
+
+ @property
+ def name(self):
+ return self.node.data
+
+ def has_key(self, key):
+ for node in [self, self.root]:
+ if key in node._data:
+ return True
+ return False
+
+ def get(self, key, run_info=None):
+ if run_info is None:
+ run_info = {}
+
+ for node in [self, self.root]:
+ if key in node._data:
+ for cond_value in node._data[key]:
+ try:
+ matches = cond_value(run_info)
+ except KeyError:
+ matches = False
+ if matches:
+ return cond_value.value
+ raise KeyError
+
+ def set(self, key, value, condition=None):
+ # First try to update the existing value
+ if key in self._data:
+ cond_values = self._data[key]
+ for cond_value in cond_values:
+ if cond_value.condition_node == condition:
+ cond_value.value = value
+ return
+ # If there isn't a conditional match reuse the existing KeyValueNode as the
+ # parent
+ node = None
+ for child in self.node.children:
+ if child.data == key:
+ node = child
+ break
+ assert node is not None
+
+ else:
+ node = KeyValueNode(key)
+ self.node.append(node)
+
+ if isinstance(value, list):
+ value_node = ListNode()
+ for item in value:
+ value_node.append(ValueNode(str(item)))
+ else:
+ value_node = ValueNode(str(value))
+ if condition is not None:
+ if not isinstance(condition, ConditionalNode):
+ conditional_node = ConditionalNode()
+ conditional_node.append(condition)
+ conditional_node.append(value_node)
+ else:
+ conditional_node = condition
+ node.append(conditional_node)
+ cond_value = Compiler().compile_condition(conditional_node)
+ else:
+ node.append(value_node)
+ cond_value = ConditionalValue(value_node, lambda x: True)
+
+ # Update the cache of child values. This is pretty annoying and maybe
+ # it should just work directly on the tree
+ if key not in self._data:
+ self._data[key] = []
+ if self._data[key] and self._data[key][-1].condition_node is None:
+ self._data[key].insert(len(self._data[key]) - 1, cond_value)
+ else:
+ self._data[key].append(cond_value)
+
+ def clear(self, key):
+ """Clear all the expected data for this node"""
+ if key in self._data:
+ for child in self.node.children:
+ if (isinstance(child, KeyValueNode) and
+ child.data == key):
+ child.remove()
+ del self._data[key]
+ break
+
+ def get_conditions(self, property_name):
+ if property_name in self._data:
+ return self._data[property_name]
+ return []
+
+ def _add_key_value(self, node, values):
+ """Called during construction to set a key-value node"""
+ self._data[node.data] = values
+
+ def append(self, child):
+ self.children.append(child)
+ child.parent = self
+ if child.node.parent != self.node:
+ self.node.append(child.node)
+ return child
+
+ def remove(self):
+ if self.parent:
+ self.parent._remove_child(self)
+
+ def _remove_child(self, child):
+ self.children.remove(child)
+ child.parent = None
+ child.node.remove()
+
+ def iterchildren(self, name=None):
+ for item in self.children:
+ if item.name == name or name is None:
+ yield item
+
+ def _flatten(self):
+ rv = {}
+ for node in [self, self.root]:
+ for name, value in node._data.items():
+ if name not in rv:
+ rv[name] = value
+ return rv
+
+ def iteritems(self):
+ yield from self._flatten().items()
+
+ def iterkeys(self):
+ yield from self._flatten().keys()
+
+ def iter_properties(self):
+ for item in self._data:
+ yield item, self._data[item]
+
+ def remove_value(self, key, value):
+ if key not in self._data:
+ return
+ try:
+ self._data[key].remove(value)
+ except ValueError:
+ return
+ if not self._data[key]:
+ del self._data[key]
+ value.remove()
+
+
+def compile_ast(ast, data_cls_getter=None, **kwargs):
+ return Compiler().compile(ast, data_cls_getter=data_cls_getter, **kwargs)
+
+
+def compile(stream, data_cls_getter=None, **kwargs):
+ return compile_ast(parse(stream),
+ data_cls_getter=data_cls_getter,
+ **kwargs)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/static.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/static.py
new file mode 100644
index 0000000000..5bec942e0b
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/static.py
@@ -0,0 +1,102 @@
+# mypy: allow-untyped-defs
+
+import operator
+
+from . import base
+from ..parser import parse
+
+
+class Compiler(base.Compiler):
+ """Compiler backend that evaluates conditional expressions
+ to give static output"""
+
+ def compile(self, tree, expr_data, data_cls_getter=None, **kwargs):
+ """Compile a raw AST into a form with conditional expressions
+ evaluated.
+
+ tree - The root node of the wptmanifest AST to compile
+
+ expr_data - A dictionary of key / value pairs to use when
+ evaluating conditional expressions
+
+ data_cls_getter - A function taking two parameters; the previous
+ output node and the current ast node and returning
+ the class of the output node to use for the current
+ ast node
+ """
+
+ self._kwargs = kwargs
+ self.expr_data = expr_data
+
+ return self._compile(tree, data_cls_getter, **kwargs)
+
+ def visit_KeyValueNode(self, node):
+ key_name = node.data
+ key_value = None
+ for child in node.children:
+ value = self.visit(child)
+ if value is not None:
+ key_value = value
+ break
+ if key_value is not None:
+ self.output_node.set(key_name, key_value)
+
+ def visit_ConditionalNode(self, node):
+ assert len(node.children) == 2
+ if self.visit(node.children[0]):
+ return self.visit(node.children[1])
+
+ def visit_StringNode(self, node):
+ value = node.data
+ for child in node.children:
+ value = self.visit(child)(value)
+ return value
+
+ def visit_VariableNode(self, node):
+ value = self.expr_data[node.data]
+ for child in node.children:
+ value = self.visit(child)(value)
+ return value
+
+ def visit_IndexNode(self, node):
+ assert len(node.children) == 1
+ index = self.visit(node.children[0])
+ return lambda x: x[index]
+
+ def visit_UnaryExpressionNode(self, node):
+ assert len(node.children) == 2
+ operator = self.visit(node.children[0])
+ operand = self.visit(node.children[1])
+
+ return operator(operand)
+
+ def visit_BinaryExpressionNode(self, node):
+ assert len(node.children) == 3
+ operator = self.visit(node.children[0])
+ operand_0 = self.visit(node.children[1])
+ operand_1 = self.visit(node.children[2])
+
+ return operator(operand_0, operand_1)
+
+ def visit_UnaryOperatorNode(self, node):
+ return {"not": operator.not_}[node.data]
+
+ def visit_BinaryOperatorNode(self, node):
+ return {"and": operator.and_,
+ "or": operator.or_,
+ "==": operator.eq,
+ "!=": operator.ne}[node.data]
+
+
+def compile_ast(ast, expr_data, data_cls_getter=None, **kwargs):
+ return Compiler().compile(ast,
+ expr_data,
+ data_cls_getter=data_cls_getter,
+ **kwargs)
+
+
+def compile(stream, expr_data, data_cls_getter=None, **kwargs):
+ return compile_ast(parse(stream),
+ expr_data,
+ data_cls_getter=data_cls_getter,
+ **kwargs)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/node.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/node.py
new file mode 100644
index 0000000000..437de54f5b
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/node.py
@@ -0,0 +1,173 @@
+# mypy: allow-untyped-defs
+
+class NodeVisitor:
+ def visit(self, node):
+ # This is ugly as hell, but we don't have multimethods and
+ # they aren't trivial to fake without access to the class
+ # object from the class body
+ func = getattr(self, "visit_%s" % (node.__class__.__name__))
+ return func(node)
+
+
+class Node:
+ def __init__(self, data=None, comments=None):
+ self.data = data
+ self.parent = None
+ self.children = []
+ self.comments = comments or []
+
+ def append(self, other):
+ other.parent = self
+ self.children.append(other)
+
+ def remove(self):
+ self.parent.children.remove(self)
+
+ def __repr__(self):
+ return f"<{self.__class__.__name__} {self.data}>"
+
+ def __str__(self):
+ rv = [repr(self)]
+ for item in self.children:
+ rv.extend(" %s" % line for line in str(item).split("\n"))
+ return "\n".join(rv)
+
+ def __eq__(self, other):
+ if not (self.__class__ == other.__class__ and
+ self.data == other.data and
+ len(self.children) == len(other.children)):
+ return False
+ for child, other_child in zip(self.children, other.children):
+ if not child == other_child:
+ return False
+ return True
+
+ def copy(self):
+ new = self.__class__(self.data, self.comments)
+ for item in self.children:
+ new.append(item.copy())
+ return new
+
+
+class DataNode(Node):
+ def append(self, other):
+ # Append that retains the invariant that child data nodes
+ # come after child nodes of other types
+ other.parent = self
+ if isinstance(other, DataNode):
+ self.children.append(other)
+ else:
+ index = len(self.children)
+ while index > 0 and isinstance(self.children[index - 1], DataNode):
+ index -= 1
+ for i in range(index):
+ if other.data == self.children[i].data:
+ raise ValueError("Duplicate key %s" % self.children[i].data)
+ self.children.insert(index, other)
+
+
+class KeyValueNode(Node):
+ def append(self, other):
+ # Append that retains the invariant that conditional nodes
+ # come before unconditional nodes
+ other.parent = self
+ if not isinstance(other, (ListNode, ValueNode, ConditionalNode)):
+ raise TypeError
+ if isinstance(other, (ListNode, ValueNode)):
+ if self.children:
+ assert not isinstance(self.children[-1], (ListNode, ValueNode))
+ self.children.append(other)
+ else:
+ if self.children and isinstance(self.children[-1], ValueNode):
+ self.children.insert(len(self.children) - 1, other)
+ else:
+ self.children.append(other)
+
+
+class ListNode(Node):
+ def append(self, other):
+ other.parent = self
+ self.children.append(other)
+
+
+class ValueNode(Node):
+ def append(self, other):
+ raise TypeError
+
+
+class AtomNode(ValueNode):
+ pass
+
+
+class ConditionalNode(Node):
+ def append(self, other):
+ if not len(self.children):
+ if not isinstance(other, (BinaryExpressionNode, UnaryExpressionNode, VariableNode)):
+ raise TypeError
+ else:
+ if len(self.children) > 1:
+ raise ValueError
+ if not isinstance(other, (ListNode, ValueNode)):
+ raise TypeError
+ other.parent = self
+ self.children.append(other)
+
+
+class UnaryExpressionNode(Node):
+ def __init__(self, operator, operand):
+ Node.__init__(self)
+ self.append(operator)
+ self.append(operand)
+
+ def append(self, other):
+ Node.append(self, other)
+ assert len(self.children) <= 2
+
+ def copy(self):
+ new = self.__class__(self.children[0].copy(),
+ self.children[1].copy())
+ return new
+
+
+class BinaryExpressionNode(Node):
+ def __init__(self, operator, operand_0, operand_1):
+ Node.__init__(self)
+ self.append(operator)
+ self.append(operand_0)
+ self.append(operand_1)
+
+ def append(self, other):
+ Node.append(self, other)
+ assert len(self.children) <= 3
+
+ def copy(self):
+ new = self.__class__(self.children[0].copy(),
+ self.children[1].copy(),
+ self.children[2].copy())
+ return new
+
+
+class UnaryOperatorNode(Node):
+ def append(self, other):
+ raise TypeError
+
+
+class BinaryOperatorNode(Node):
+ def append(self, other):
+ raise TypeError
+
+
+class IndexNode(Node):
+ pass
+
+
+class VariableNode(Node):
+ pass
+
+
+class StringNode(Node):
+ pass
+
+
+class NumberNode(ValueNode):
+ pass
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/parser.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/parser.py
new file mode 100644
index 0000000000..c778895ed2
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/parser.py
@@ -0,0 +1,873 @@
+# mypy: allow-untyped-defs
+
+#default_value:foo
+#include: other.manifest
+#
+#[test_name.js]
+# expected: ERROR
+#
+# [subtest 1]
+# expected:
+# os == win: FAIL #This is a comment
+# PASS
+#
+
+
+from io import BytesIO
+
+from .node import (Node, AtomNode, BinaryExpressionNode, BinaryOperatorNode,
+ ConditionalNode, DataNode, IndexNode, KeyValueNode, ListNode,
+ NumberNode, StringNode, UnaryExpressionNode,
+ UnaryOperatorNode, ValueNode, VariableNode)
+
+
+class ParseError(Exception):
+ def __init__(self, filename, line, detail):
+ self.line = line
+ self.filename = filename
+ self.detail = detail
+ self.message = f"{self.detail}: {self.filename} line {self.line}"
+ Exception.__init__(self, self.message)
+
+eol = object
+group_start = object
+group_end = object
+digits = "0123456789"
+open_parens = "[("
+close_parens = "])"
+parens = open_parens + close_parens
+operator_chars = "=!"
+
+unary_operators = ["not"]
+binary_operators = ["==", "!=", "and", "or"]
+
+operators = ["==", "!=", "not", "and", "or"]
+
+atoms = {"True": True,
+ "False": False,
+ "Reset": object()}
+
+def decode(s):
+ assert isinstance(s, str)
+ return s
+
+
+def precedence(operator_node):
+ return len(operators) - operators.index(operator_node.data)
+
+
+class TokenTypes:
+ def __init__(self) -> None:
+ for type in [
+ "group_start",
+ "group_end",
+ "paren",
+ "list_start",
+ "list_end",
+ "separator",
+ "ident",
+ "string",
+ "number",
+ "atom",
+ # Without an end-of-line token type, we need two different comment
+ # token types to distinguish between:
+ # [heading1] # Comment attached to heading 1
+ # [heading2]
+ #
+ # and
+ # [heading1]
+ # # Comment attached to heading 2
+ # [heading2]
+ "comment",
+ "inline_comment",
+ "eof",
+ ]:
+ setattr(self, type, type)
+
+token_types = TokenTypes()
+
+
+class Tokenizer:
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.indent_levels = [0]
+ self.state = self.line_start_state
+ self.next_state = self.data_line_state
+ self.line_number = 0
+ self.filename = ""
+
+ def tokenize(self, stream):
+ self.reset()
+ assert not isinstance(stream, str)
+ if isinstance(stream, bytes):
+ stream = BytesIO(stream)
+ if not hasattr(stream, "name"):
+ self.filename = ""
+ else:
+ self.filename = stream.name
+
+ self.next_line_state = self.line_start_state
+ for i, line in enumerate(stream):
+ assert isinstance(line, bytes)
+ self.state = self.next_line_state
+ assert self.state is not None
+ states = []
+ self.next_line_state = None
+ self.line_number = i + 1
+ self.index = 0
+ self.line = line.decode('utf-8').rstrip()
+ assert isinstance(self.line, str)
+ while self.state != self.eol_state:
+ states.append(self.state)
+ tokens = self.state()
+ if tokens:
+ yield from tokens
+ self.state()
+ while True:
+ yield (token_types.eof, None)
+
+ def char(self):
+ if self.index == len(self.line):
+ return eol
+ return self.line[self.index]
+
+ def consume(self):
+ if self.index < len(self.line):
+ self.index += 1
+
+ def peek(self, length):
+ return self.line[self.index:self.index + length]
+
+ def skip_whitespace(self):
+ while self.char() == " ":
+ self.consume()
+
+ def eol_state(self):
+ if self.next_line_state is None:
+ self.next_line_state = self.line_start_state
+
+ def line_start_state(self):
+ self.skip_whitespace()
+ if self.char() == eol:
+ self.state = self.eol_state
+ return
+ if self.char() == "#":
+ self.state = self.comment_state
+ return
+ if self.index > self.indent_levels[-1]:
+ self.indent_levels.append(self.index)
+ yield (token_types.group_start, None)
+ else:
+ if self.index < self.indent_levels[-1]:
+ while self.index < self.indent_levels[-1]:
+ self.indent_levels.pop()
+ yield (token_types.group_end, None)
+ # This is terrible; if we were parsing an expression
+ # then the next_state will be expr_or_value but when we deindent
+ # it must always be a heading or key next so we go back to data_line_state
+ self.next_state = self.data_line_state
+ if self.index != self.indent_levels[-1]:
+ raise ParseError(self.filename, self.line_number, "Unexpected indent")
+
+ self.state = self.next_state
+
+ def data_line_state(self):
+ if self.char() == "[":
+ yield (token_types.paren, self.char())
+ self.consume()
+ self.state = self.heading_state
+ else:
+ self.state = self.key_state
+
+ def heading_state(self):
+ rv = ""
+ while True:
+ c = self.char()
+ if c == "\\":
+ rv += self.consume_escape()
+ elif c == "]":
+ break
+ elif c == eol:
+ raise ParseError(self.filename, self.line_number, "EOL in heading")
+ else:
+ rv += c
+ self.consume()
+
+ yield (token_types.string, decode(rv))
+ yield (token_types.paren, "]")
+ self.consume()
+ self.state = self.line_end_state
+ self.next_state = self.data_line_state
+
+ def key_state(self):
+ rv = ""
+ while True:
+ c = self.char()
+ if c == " ":
+ self.skip_whitespace()
+ if self.char() != ":":
+ raise ParseError(self.filename, self.line_number, "Space in key name")
+ break
+ elif c == ":":
+ break
+ elif c == eol:
+ raise ParseError(self.filename, self.line_number, "EOL in key name (missing ':'?)")
+ elif c == "\\":
+ rv += self.consume_escape()
+ else:
+ rv += c
+ self.consume()
+ yield (token_types.string, decode(rv))
+ yield (token_types.separator, ":")
+ self.consume()
+ self.state = self.after_key_state
+
+ def after_key_state(self):
+ self.skip_whitespace()
+ c = self.char()
+ if c in {"#", eol}:
+ self.next_state = self.expr_or_value_state
+ self.state = self.line_end_state
+ elif c == "[":
+ self.state = self.list_start_state
+ else:
+ self.state = self.value_state
+
+ def after_expr_state(self):
+ self.skip_whitespace()
+ c = self.char()
+ if c in {"#", eol}:
+ self.next_state = self.after_expr_state
+ self.state = self.line_end_state
+ elif c == "[":
+ self.state = self.list_start_state
+ else:
+ self.state = self.value_state
+
+ def list_start_state(self):
+ yield (token_types.list_start, "[")
+ self.consume()
+ self.state = self.list_value_start_state
+
+ def list_value_start_state(self):
+ self.skip_whitespace()
+ if self.char() == "]":
+ self.state = self.list_end_state
+ elif self.char() in ("'", '"'):
+ quote_char = self.char()
+ self.consume()
+ yield (token_types.string, self.consume_string(quote_char))
+ self.skip_whitespace()
+ if self.char() == "]":
+ self.state = self.list_end_state
+ elif self.char() != ",":
+ raise ParseError(self.filename, self.line_number, "Junk after quoted string")
+ self.consume()
+ elif self.char() in {"#", eol}:
+ self.state = self.line_end_state
+ self.next_line_state = self.list_value_start_state
+ elif self.char() == ",":
+ raise ParseError(self.filename, self.line_number, "List item started with separator")
+ elif self.char() == "@":
+ self.state = self.list_value_atom_state
+ else:
+ self.state = self.list_value_state
+
+ def list_value_state(self):
+ rv = ""
+ spaces = 0
+ while True:
+ c = self.char()
+ if c == "\\":
+ escape = self.consume_escape()
+ rv += escape
+ elif c == eol:
+ raise ParseError(self.filename, self.line_number, "EOL in list value")
+ elif c == "#":
+ raise ParseError(self.filename, self.line_number, "EOL in list value (comment)")
+ elif c == ",":
+ self.state = self.list_value_start_state
+ self.consume()
+ break
+ elif c == " ":
+ spaces += 1
+ self.consume()
+ elif c == "]":
+ self.state = self.list_end_state
+ self.consume()
+ break
+ else:
+ rv += " " * spaces
+ spaces = 0
+ rv += c
+ self.consume()
+
+ if rv:
+ yield (token_types.string, decode(rv))
+
+ def list_value_atom_state(self):
+ self.consume()
+ for _, value in self.list_value_state():
+ yield token_types.atom, value
+
+ def list_end_state(self):
+ self.consume()
+ yield (token_types.list_end, "]")
+ self.state = self.line_end_state
+
+ def value_state(self):
+ self.skip_whitespace()
+ c = self.char()
+ if c in ("'", '"'):
+ quote_char = self.char()
+ self.consume()
+ yield (token_types.string, self.consume_string(quote_char))
+ self.state = self.line_end_state
+ elif c == "@":
+ self.consume()
+ for _, value in self.value_inner_state():
+ yield token_types.atom, value
+ elif c == "[":
+ self.state = self.list_start_state
+ else:
+ self.state = self.value_inner_state
+
+ def value_inner_state(self):
+ rv = ""
+ spaces = 0
+ while True:
+ c = self.char()
+ if c == "\\":
+ rv += self.consume_escape()
+ elif c in {"#", eol}:
+ self.state = self.line_end_state
+ break
+ elif c == " ":
+ # prevent whitespace before comments from being included in the value
+ spaces += 1
+ self.consume()
+ else:
+ rv += " " * spaces
+ spaces = 0
+ rv += c
+ self.consume()
+ rv = decode(rv)
+ if rv.startswith("if "):
+ # Hack to avoid a problem where people write
+ # disabled: if foo
+ # and expect that to disable conditionally
+ raise ParseError(self.filename, self.line_number, "Strings starting 'if ' must be quoted "
+ "(expressions must start on a newline and be indented)")
+ yield (token_types.string, rv)
+
+ def _consume_comment(self):
+ assert self.char() == "#"
+ self.consume()
+ comment = ''
+ while self.char() is not eol:
+ comment += self.char()
+ self.consume()
+ return comment
+
+ def comment_state(self):
+ yield (token_types.comment, self._consume_comment())
+ self.state = self.eol_state
+
+ def inline_comment_state(self):
+ yield (token_types.inline_comment, self._consume_comment())
+ self.state = self.eol_state
+
+ def line_end_state(self):
+ self.skip_whitespace()
+ c = self.char()
+ if c == "#":
+ self.state = self.inline_comment_state
+ elif c == eol:
+ self.state = self.eol_state
+ else:
+ raise ParseError(self.filename, self.line_number, "Junk before EOL %s" % c)
+
+ def consume_string(self, quote_char):
+ rv = ""
+ while True:
+ c = self.char()
+ if c == "\\":
+ rv += self.consume_escape()
+ elif c == quote_char:
+ self.consume()
+ break
+ elif c == eol:
+ raise ParseError(self.filename, self.line_number, "EOL in quoted string")
+ else:
+ rv += c
+ self.consume()
+
+ return decode(rv)
+
+ def expr_or_value_state(self):
+ if self.peek(3) == "if ":
+ self.state = self.expr_state
+ else:
+ self.state = self.value_state
+
+ def expr_state(self):
+ self.skip_whitespace()
+ c = self.char()
+ if c == eol:
+ raise ParseError(self.filename, self.line_number, "EOL in expression")
+ elif c in "'\"":
+ self.consume()
+ yield (token_types.string, self.consume_string(c))
+ elif c == "#":
+ raise ParseError(self.filename, self.line_number, "Comment before end of expression")
+ elif c == ":":
+ yield (token_types.separator, c)
+ self.consume()
+ self.state = self.after_expr_state
+ elif c in parens:
+ self.consume()
+ yield (token_types.paren, c)
+ elif c in ("!", "="):
+ self.state = self.operator_state
+ elif c in digits:
+ self.state = self.digit_state
+ else:
+ self.state = self.ident_state
+
+ def operator_state(self):
+ # Only symbolic operators
+ index_0 = self.index
+ while True:
+ c = self.char()
+ if c == eol:
+ break
+ elif c in operator_chars:
+ self.consume()
+ else:
+ self.state = self.expr_state
+ break
+ yield (token_types.ident, self.line[index_0:self.index])
+
+ def digit_state(self):
+ index_0 = self.index
+ seen_dot = False
+ while True:
+ c = self.char()
+ if c == eol:
+ break
+ elif c in digits:
+ self.consume()
+ elif c == ".":
+ if seen_dot:
+ raise ParseError(self.filename, self.line_number, "Invalid number")
+ self.consume()
+ seen_dot = True
+ elif c in parens:
+ break
+ elif c in operator_chars:
+ break
+ elif c == " ":
+ break
+ elif c == ":":
+ break
+ else:
+ raise ParseError(self.filename, self.line_number, "Invalid character in number")
+
+ self.state = self.expr_state
+ yield (token_types.number, self.line[index_0:self.index])
+
+ def ident_state(self):
+ index_0 = self.index
+ while True:
+ c = self.char()
+ if c == eol:
+ break
+ elif c == ".":
+ break
+ elif c in parens:
+ break
+ elif c in operator_chars:
+ break
+ elif c == " ":
+ break
+ elif c == ":":
+ break
+ else:
+ self.consume()
+ self.state = self.expr_state
+ yield (token_types.ident, self.line[index_0:self.index])
+
+ def consume_escape(self):
+ assert self.char() == "\\"
+ self.consume()
+ c = self.char()
+ self.consume()
+ if c == "x":
+ return self.decode_escape(2)
+ elif c == "u":
+ return self.decode_escape(4)
+ elif c == "U":
+ return self.decode_escape(6)
+ elif c in ["a", "b", "f", "n", "r", "t", "v"]:
+ return eval(r"'\%s'" % c)
+ elif c is eol:
+ raise ParseError(self.filename, self.line_number, "EOL in escape")
+ else:
+ return c
+
+ def decode_escape(self, length):
+ value = 0
+ for i in range(length):
+ c = self.char()
+ value *= 16
+ value += self.escape_value(c)
+ self.consume()
+
+ return chr(value)
+
+ def escape_value(self, c):
+ if '0' <= c <= '9':
+ return ord(c) - ord('0')
+ elif 'a' <= c <= 'f':
+ return ord(c) - ord('a') + 10
+ elif 'A' <= c <= 'F':
+ return ord(c) - ord('A') + 10
+ else:
+ raise ParseError(self.filename, self.line_number, "Invalid character escape")
+
+
+class Parser:
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.token = None
+ self.unary_operators = "!"
+ self.binary_operators = frozenset(["&&", "||", "=="])
+ self.tokenizer = Tokenizer()
+ self.token_generator = None
+ self.tree = Treebuilder(DataNode(None))
+ self.expr_builder = None
+ self.expr_builders = []
+ self.comments = []
+
+ def parse(self, input):
+ try:
+ self.reset()
+ self.token_generator = self.tokenizer.tokenize(input)
+ self.consume()
+ self.manifest()
+ return self.tree.node
+ except Exception as e:
+ if not isinstance(e, ParseError):
+ raise ParseError(self.tokenizer.filename,
+ self.tokenizer.line_number,
+ str(e))
+ raise
+
+ def consume(self):
+ self.token = next(self.token_generator)
+
+ def expect(self, type, value=None):
+ if self.token[0] != type:
+ raise ParseError(self.tokenizer.filename, self.tokenizer.line_number,
+ f"Token '{self.token[0]}' doesn't equal expected type '{type}'")
+ if value is not None:
+ if self.token[1] != value:
+ raise ParseError(self.tokenizer.filename, self.tokenizer.line_number,
+ f"Token '{self.token[1]}' doesn't equal expected value '{value}'")
+
+ self.consume()
+
+ def maybe_consume_inline_comment(self):
+ if self.token[0] == token_types.inline_comment:
+ self.comments.append(self.token)
+ self.consume()
+
+ def consume_comments(self):
+ while self.token[0] == token_types.comment:
+ self.comments.append(self.token)
+ self.consume()
+
+ def flush_comments(self, target_node=None):
+ """Transfer comments from the parser's buffer to a parse tree node.
+
+ Use the tree's current node if no target node is explicitly specified.
+
+ The comments are buffered because the target node they should belong to
+ may not exist yet. For example:
+
+ [heading]
+ # comment to be attached to the subheading
+ [subheading]
+ """
+ (target_node or self.tree.node).comments.extend(self.comments)
+ self.comments.clear()
+
+ def manifest(self):
+ self.data_block()
+ self.expect(token_types.eof)
+
+ def data_block(self):
+ while self.token[0] in {token_types.comment, token_types.string,
+ token_types.paren}:
+ if self.token[0] == token_types.comment:
+ self.consume_comments()
+ elif self.token[0] == token_types.string:
+ self.tree.append(KeyValueNode(self.token[1]))
+ self.consume()
+ self.expect(token_types.separator)
+ self.maybe_consume_inline_comment()
+ self.flush_comments()
+ self.consume_comments()
+ self.value_block()
+ self.flush_comments()
+ self.tree.pop()
+ else:
+ self.expect(token_types.paren, "[")
+ if self.token[0] != token_types.string:
+ raise ParseError(self.tokenizer.filename,
+ self.tokenizer.line_number,
+ f"Token '{self.token[0]}' is not a string")
+ self.tree.append(DataNode(self.token[1]))
+ self.consume()
+ self.expect(token_types.paren, "]")
+ self.maybe_consume_inline_comment()
+ self.flush_comments()
+ self.consume_comments()
+ if self.token[0] == token_types.group_start:
+ self.consume()
+ self.data_block()
+ self.eof_or_end_group()
+ self.tree.pop()
+
+ def eof_or_end_group(self):
+ if self.token[0] != token_types.eof:
+ self.expect(token_types.group_end)
+
+ def value_block(self):
+ if self.token[0] == token_types.list_start:
+ self.consume()
+ self.list_value()
+ elif self.token[0] == token_types.string:
+ self.value()
+ elif self.token[0] == token_types.group_start:
+ self.consume()
+ self.expression_values()
+ default_value = None
+ if self.token[0] == token_types.string:
+ default_value = self.value
+ elif self.token[0] == token_types.atom:
+ default_value = self.atom
+ elif self.token[0] == token_types.list_start:
+ self.consume()
+ default_value = self.list_value
+ if default_value:
+ default_value()
+ # For this special case where a group exists, attach comments to
+ # the string/list value, not the key-value node. That is,
+ # key:
+ # ...
+ # # comment attached to condition default
+ # value
+ #
+ # should not read
+ # # comment attached to condition default
+ # key:
+ # ...
+ # value
+ self.consume_comments()
+ self.flush_comments(
+ self.tree.node.children[-1] if default_value else None)
+ self.eof_or_end_group()
+ elif self.token[0] == token_types.atom:
+ self.atom()
+ else:
+ raise ParseError(self.tokenizer.filename, self.tokenizer.line_number,
+ f"Token '{self.token[0]}' is not a known type")
+
+ def list_value(self):
+ self.tree.append(ListNode())
+ self.maybe_consume_inline_comment()
+ while self.token[0] in (token_types.atom, token_types.string):
+ if self.token[0] == token_types.atom:
+ self.atom()
+ else:
+ self.value()
+ self.expect(token_types.list_end)
+ self.maybe_consume_inline_comment()
+ self.tree.pop()
+
+ def expression_values(self):
+ self.consume_comments()
+ while self.token == (token_types.ident, "if"):
+ self.consume()
+ self.tree.append(ConditionalNode())
+ self.expr_start()
+ self.expect(token_types.separator)
+ self.value_block()
+ self.flush_comments()
+ self.tree.pop()
+ self.consume_comments()
+
+ def value(self):
+ self.tree.append(ValueNode(self.token[1]))
+ self.consume()
+ self.maybe_consume_inline_comment()
+ self.tree.pop()
+
+ def atom(self):
+ if self.token[1] not in atoms:
+ raise ParseError(self.tokenizer.filename, self.tokenizer.line_number, "Unrecognised symbol @%s" % self.token[1])
+ self.tree.append(AtomNode(atoms[self.token[1]]))
+ self.consume()
+ self.maybe_consume_inline_comment()
+ self.tree.pop()
+
+ def expr_start(self):
+ self.expr_builder = ExpressionBuilder(self.tokenizer)
+ self.expr_builders.append(self.expr_builder)
+ self.expr()
+ expression = self.expr_builder.finish()
+ self.expr_builders.pop()
+ self.expr_builder = self.expr_builders[-1] if self.expr_builders else None
+ if self.expr_builder:
+ self.expr_builder.operands[-1].children[-1].append(expression)
+ else:
+ self.tree.append(expression)
+ self.tree.pop()
+
+ def expr(self):
+ self.expr_operand()
+ while (self.token[0] == token_types.ident and self.token[1] in binary_operators):
+ self.expr_bin_op()
+ self.expr_operand()
+
+ def expr_operand(self):
+ if self.token == (token_types.paren, "("):
+ self.consume()
+ self.expr_builder.left_paren()
+ self.expr()
+ self.expect(token_types.paren, ")")
+ self.expr_builder.right_paren()
+ elif self.token[0] == token_types.ident and self.token[1] in unary_operators:
+ self.expr_unary_op()
+ self.expr_operand()
+ elif self.token[0] in [token_types.string, token_types.ident]:
+ self.expr_value()
+ elif self.token[0] == token_types.number:
+ self.expr_number()
+ else:
+ raise ParseError(self.tokenizer.filename, self.tokenizer.line_number, "Unrecognised operand")
+
+ def expr_unary_op(self):
+ if self.token[1] in unary_operators:
+ self.expr_builder.push_operator(UnaryOperatorNode(self.token[1]))
+ self.consume()
+ else:
+ raise ParseError(self.tokenizer.filename, self.tokenizer.line_number, "Expected unary operator")
+
+ def expr_bin_op(self):
+ if self.token[1] in binary_operators:
+ self.expr_builder.push_operator(BinaryOperatorNode(self.token[1]))
+ self.consume()
+ else:
+ raise ParseError(self.tokenizer.filename, self.tokenizer.line_number, "Expected binary operator")
+
+ def expr_value(self):
+ node_type = {token_types.string: StringNode,
+ token_types.ident: VariableNode}[self.token[0]]
+ self.expr_builder.push_operand(node_type(self.token[1]))
+ self.consume()
+ if self.token == (token_types.paren, "["):
+ self.consume()
+ self.expr_builder.operands[-1].append(IndexNode())
+ self.expr_start()
+ self.expect(token_types.paren, "]")
+
+ def expr_number(self):
+ self.expr_builder.push_operand(NumberNode(self.token[1]))
+ self.consume()
+
+
+class Treebuilder:
+ def __init__(self, root):
+ self.root = root
+ self.node = root
+
+ def append(self, node):
+ assert isinstance(node, Node)
+ self.node.append(node)
+ self.node = node
+ assert self.node is not None
+ return node
+
+ def pop(self):
+ node = self.node
+ self.node = self.node.parent
+ assert self.node is not None
+ return node
+
+
+class ExpressionBuilder:
+ def __init__(self, tokenizer):
+ self.operands = []
+ self.operators = [None]
+ self.tokenizer = tokenizer
+
+ def finish(self):
+ while self.operators[-1] is not None:
+ self.pop_operator()
+ rv = self.pop_operand()
+ assert self.is_empty()
+ return rv
+
+ def left_paren(self):
+ self.operators.append(None)
+
+ def right_paren(self):
+ while self.operators[-1] is not None:
+ self.pop_operator()
+ if not self.operators:
+ raise ParseError(self.tokenizer.filename, self.tokenizer.line,
+ "Unbalanced parens")
+
+ assert self.operators.pop() is None
+
+ def push_operator(self, operator):
+ assert operator is not None
+ while self.precedence(self.operators[-1]) > self.precedence(operator):
+ self.pop_operator()
+
+ self.operators.append(operator)
+
+ def pop_operator(self):
+ operator = self.operators.pop()
+ if isinstance(operator, BinaryOperatorNode):
+ operand_1 = self.operands.pop()
+ operand_0 = self.operands.pop()
+ self.operands.append(BinaryExpressionNode(operator, operand_0, operand_1))
+ else:
+ operand_0 = self.operands.pop()
+ self.operands.append(UnaryExpressionNode(operator, operand_0))
+
+ def push_operand(self, node):
+ self.operands.append(node)
+
+ def pop_operand(self):
+ return self.operands.pop()
+
+ def is_empty(self):
+ return len(self.operands) == 0 and all(item is None for item in self.operators)
+
+ def precedence(self, operator):
+ if operator is None:
+ return 0
+ return precedence(operator)
+
+
+def parse(stream):
+ p = Parser()
+ return p.parse(stream)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/serializer.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/serializer.py
new file mode 100644
index 0000000000..e749add74e
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/serializer.py
@@ -0,0 +1,160 @@
+# mypy: allow-untyped-defs
+
+from six import ensure_text
+
+from .node import NodeVisitor, ValueNode, ListNode, BinaryExpressionNode
+from .parser import atoms, precedence, token_types
+
+atom_names = {v: "@%s" % k for (k,v) in atoms.items()}
+
+named_escapes = {"\a", "\b", "\f", "\n", "\r", "\t", "\v"}
+
+def escape(string, extras=""):
+ # Assumes input bytes are either UTF8 bytes or unicode.
+ rv = ""
+ for c in string:
+ if c in named_escapes:
+ rv += c.encode("unicode_escape").decode()
+ elif c == "\\":
+ rv += "\\\\"
+ elif c < '\x20':
+ rv += "\\x%02x" % ord(c)
+ elif c in extras:
+ rv += "\\" + c
+ else:
+ rv += c
+ return ensure_text(rv)
+
+
+class ManifestSerializer(NodeVisitor):
+ def __init__(self, skip_empty_data=False):
+ self.skip_empty_data = skip_empty_data
+
+ def serialize(self, root):
+ self.indent = 2
+ rv = "\n".join(self.visit(root))
+ if not rv:
+ return rv
+ rv = rv.strip()
+ if rv[-1] != "\n":
+ rv = rv + "\n"
+ return rv
+
+ def visit(self, node):
+ lines = super().visit(node)
+ comments = [f"#{comment}" for _, comment in node.comments]
+ # Simply checking if the first line contains '#' is less than ideal; the
+ # character might be escaped or within a string.
+ if lines and "#" not in lines[0]:
+ for i, (token_type, comment) in enumerate(node.comments):
+ if token_type == token_types.inline_comment:
+ lines[0] += f" #{comment}"
+ comments.pop(i)
+ break
+ return comments + lines
+
+ def visit_DataNode(self, node):
+ rv = []
+ if not self.skip_empty_data or node.children:
+ if node.data:
+ rv.append("[%s]" % escape(node.data, extras="]"))
+ indent = self.indent * " "
+ else:
+ indent = ""
+
+ for child in node.children:
+ rv.extend("%s%s" % (indent if item else "", item) for item in self.visit(child))
+
+ if node.parent:
+ rv.append("")
+
+ return rv
+
+ def visit_KeyValueNode(self, node):
+ rv = [escape(node.data, ":") + ":"]
+ indent = " " * self.indent
+
+ if len(node.children) == 1 and isinstance(node.children[0], (ValueNode, ListNode)):
+ rv[0] += " %s" % self.visit(node.children[0])[0]
+ else:
+ for child in node.children:
+ rv.extend(indent + line for line in self.visit(child))
+
+ return rv
+
+ def visit_ListNode(self, node):
+ rv = ["["]
+ rv.extend(", ".join(self.visit(child)[0] for child in node.children))
+ rv.append("]")
+ return ["".join(rv)]
+
+ def visit_ValueNode(self, node):
+ data = ensure_text(node.data)
+ if ("#" in data or
+ data.startswith("if ") or
+ (isinstance(node.parent, ListNode) and
+ ("," in data or "]" in data))):
+ if "\"" in data:
+ quote = "'"
+ else:
+ quote = "\""
+ else:
+ quote = ""
+ return [quote + escape(data, extras=quote) + quote]
+
+ def visit_AtomNode(self, node):
+ return [atom_names[node.data]]
+
+ def visit_ConditionalNode(self, node):
+ return ["if %s: %s" % tuple(self.visit(item)[0] for item in node.children)]
+
+ def visit_StringNode(self, node):
+ rv = ["\"%s\"" % escape(node.data, extras="\"")]
+ for child in node.children:
+ rv[0] += self.visit(child)[0]
+ return rv
+
+ def visit_NumberNode(self, node):
+ return [ensure_text(node.data)]
+
+ def visit_VariableNode(self, node):
+ rv = escape(node.data)
+ for child in node.children:
+ rv += self.visit(child)
+ return [rv]
+
+ def visit_IndexNode(self, node):
+ assert len(node.children) == 1
+ return ["[%s]" % self.visit(node.children[0])[0]]
+
+ def visit_UnaryExpressionNode(self, node):
+ children = []
+ for child in node.children:
+ child_str = self.visit(child)[0]
+ if isinstance(child, BinaryExpressionNode):
+ child_str = "(%s)" % child_str
+ children.append(child_str)
+ return [" ".join(children)]
+
+ def visit_BinaryExpressionNode(self, node):
+ assert len(node.children) == 3
+ children = []
+ for child_index in [1, 0, 2]:
+ child = node.children[child_index]
+ child_str = self.visit(child)[0]
+ if (isinstance(child, BinaryExpressionNode) and
+ precedence(node.children[0]) < precedence(child.children[0])):
+ child_str = "(%s)" % child_str
+ children.append(child_str)
+ return [" ".join(children)]
+
+ def visit_UnaryOperatorNode(self, node):
+ return [ensure_text(node.data)]
+
+ def visit_BinaryOperatorNode(self, node):
+ return [ensure_text(node.data)]
+
+
+def serialize(tree, *args, **kwargs):
+ s = ManifestSerializer(*args, **kwargs)
+ return s.serialize(tree)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/__init__.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/__init__.py
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_conditional.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_conditional.py
new file mode 100644
index 0000000000..0059b98556
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_conditional.py
@@ -0,0 +1,143 @@
+# mypy: allow-untyped-defs
+
+import unittest
+
+from ..backends import conditional
+from ..node import BinaryExpressionNode, BinaryOperatorNode, VariableNode, NumberNode
+
+
+class TestConditional(unittest.TestCase):
+ def compile(self, input_text):
+ return conditional.compile(input_text)
+
+ def test_get_0(self):
+ data = b"""
+key: value
+
+[Heading 1]
+ other_key:
+ if a == 1: value_1
+ if a == 2: value_2
+ value_3
+"""
+
+ manifest = self.compile(data)
+
+ self.assertEqual(manifest.get("key"), "value")
+ children = list(item for item in manifest.iterchildren())
+ self.assertEqual(len(children), 1)
+ section = children[0]
+ self.assertEqual(section.name, "Heading 1")
+
+ self.assertEqual(section.get("other_key", {"a": 1}), "value_1")
+ self.assertEqual(section.get("other_key", {"a": 2}), "value_2")
+ self.assertEqual(section.get("other_key", {"a": 7}), "value_3")
+ self.assertEqual(section.get("key"), "value")
+
+ def test_get_1(self):
+ data = b"""
+key: value
+
+[Heading 1]
+ other_key:
+ if a == "1": value_1
+ if a == 2: value_2
+ value_3
+"""
+
+ manifest = self.compile(data)
+
+ children = list(item for item in manifest.iterchildren())
+ section = children[0]
+
+ self.assertEqual(section.get("other_key", {"a": "1"}), "value_1")
+ self.assertEqual(section.get("other_key", {"a": 1}), "value_3")
+
+ def test_get_2(self):
+ data = b"""
+key:
+ if a[1] == "b": value_1
+ if a[1] == 2: value_2
+ value_3
+"""
+
+ manifest = self.compile(data)
+
+ self.assertEqual(manifest.get("key", {"a": "ab"}), "value_1")
+ self.assertEqual(manifest.get("key", {"a": [1, 2]}), "value_2")
+
+ def test_get_3(self):
+ data = b"""
+key:
+ if a[1] == "ab"[1]: value_1
+ if a[1] == 2: value_2
+ value_3
+"""
+
+ manifest = self.compile(data)
+
+ self.assertEqual(manifest.get("key", {"a": "ab"}), "value_1")
+ self.assertEqual(manifest.get("key", {"a": [1, 2]}), "value_2")
+
+ def test_set_0(self):
+ data = b"""
+key:
+ if a == "a": value_1
+ if a == "b": value_2
+ value_3
+"""
+ manifest = self.compile(data)
+
+ manifest.set("new_key", "value_new")
+
+ self.assertEqual(manifest.get("new_key"), "value_new")
+
+ def test_set_1(self):
+ data = b"""
+key:
+ if a == "a": value_1
+ if a == "b": value_2
+ value_3
+"""
+
+ manifest = self.compile(data)
+
+ manifest.set("key", "value_new")
+
+ self.assertEqual(manifest.get("key"), "value_new")
+ self.assertEqual(manifest.get("key", {"a": "a"}), "value_1")
+
+ def test_set_2(self):
+ data = b"""
+key:
+ if a == "a": value_1
+ if a == "b": value_2
+ value_3
+"""
+
+ manifest = self.compile(data)
+
+ expr = BinaryExpressionNode(BinaryOperatorNode("=="),
+ VariableNode("a"),
+ NumberNode("1"))
+
+ manifest.set("key", "value_new", expr)
+
+ self.assertEqual(manifest.get("key", {"a": 1}), "value_new")
+ self.assertEqual(manifest.get("key", {"a": "a"}), "value_1")
+
+ def test_api_0(self):
+ data = b"""
+key:
+ if a == 1.5: value_1
+ value_2
+key_1: other_value
+"""
+ manifest = self.compile(data)
+
+ self.assertFalse(manifest.is_empty)
+ self.assertEqual(manifest.root, manifest)
+ self.assertTrue(manifest.has_key("key_1"))
+ self.assertFalse(manifest.has_key("key_2"))
+
+ self.assertEqual(set(manifest.iterkeys()), {"key", "key_1"})
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_parser.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_parser.py
new file mode 100644
index 0000000000..a220307088
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_parser.py
@@ -0,0 +1,155 @@
+# mypy: allow-untyped-defs
+
+import unittest
+
+from .. import parser
+
+# There aren't many tests here because it turns out to be way more convenient to
+# use test_serializer for the majority of cases
+
+
+class TestExpression(unittest.TestCase):
+ def setUp(self):
+ self.parser = parser.Parser()
+
+ def parse(self, input_str):
+ return self.parser.parse(input_str)
+
+ def compare(self, input_text, expected):
+ actual = self.parse(input_text)
+ self.match(expected, actual)
+
+ def match(self, expected_node, actual_node):
+ self.assertEqual(expected_node[0], actual_node.__class__.__name__)
+ self.assertEqual(expected_node[1], actual_node.data)
+ self.assertEqual(len(expected_node[2]), len(actual_node.children))
+ for expected_child, actual_child in zip(expected_node[2], actual_node.children):
+ self.match(expected_child, actual_child)
+
+ def test_expr_0(self):
+ self.compare(
+ b"""
+key:
+ if x == 1 : value""",
+ ["DataNode", None,
+ [["KeyValueNode", "key",
+ [["ConditionalNode", None,
+ [["BinaryExpressionNode", None,
+ [["BinaryOperatorNode", "==", []],
+ ["VariableNode", "x", []],
+ ["NumberNode", "1", []]
+ ]],
+ ["ValueNode", "value", []],
+ ]]]]]]
+ )
+
+ def test_expr_1(self):
+ self.compare(
+ b"""
+key:
+ if not x and y : value""",
+ ["DataNode", None,
+ [["KeyValueNode", "key",
+ [["ConditionalNode", None,
+ [["BinaryExpressionNode", None,
+ [["BinaryOperatorNode", "and", []],
+ ["UnaryExpressionNode", None,
+ [["UnaryOperatorNode", "not", []],
+ ["VariableNode", "x", []]
+ ]],
+ ["VariableNode", "y", []]
+ ]],
+ ["ValueNode", "value", []],
+ ]]]]]]
+ )
+
+ def test_expr_2(self):
+ self.compare(
+ b"""
+key:
+ if x == 1 : [value1, value2]""",
+ ["DataNode", None,
+ [["KeyValueNode", "key",
+ [["ConditionalNode", None,
+ [["BinaryExpressionNode", None,
+ [["BinaryOperatorNode", "==", []],
+ ["VariableNode", "x", []],
+ ["NumberNode", "1", []]
+ ]],
+ ["ListNode", None,
+ [["ValueNode", "value1", []],
+ ["ValueNode", "value2", []]]],
+ ]]]]]]
+ )
+
+ def test_expr_3(self):
+ self.compare(
+ b"""
+key:
+ if x == 1: 'if b: value'""",
+ ["DataNode", None,
+ [["KeyValueNode", "key",
+ [["ConditionalNode", None,
+ [["BinaryExpressionNode", None,
+ [["BinaryOperatorNode", "==", []],
+ ["VariableNode", "x", []],
+ ["NumberNode", "1", []]
+ ]],
+ ["ValueNode", "if b: value", []],
+ ]]]]]]
+ )
+
+ def test_atom_0(self):
+ with self.assertRaises(parser.ParseError):
+ self.parse(b"key: @Unknown")
+
+ def test_atom_1(self):
+ with self.assertRaises(parser.ParseError):
+ self.parse(b"key: @true")
+
+ def test_list_expr(self):
+ self.compare(
+ b"""
+key:
+ if x == 1: [a]
+ [b]""",
+ ["DataNode", None,
+ [["KeyValueNode", "key",
+ [["ConditionalNode", None,
+ [["BinaryExpressionNode", None,
+ [["BinaryOperatorNode", "==", []],
+ ["VariableNode", "x", []],
+ ["NumberNode", "1", []]
+ ]],
+ ["ListNode", None,
+ [["ValueNode", "a", []]]],
+ ]],
+ ["ListNode", None,
+ [["ValueNode", "b", []]]]]]]])
+
+ def test_list_heading(self):
+ self.compare(
+ b"""
+key:
+ if x == 1: [a]
+[b]""",
+ ["DataNode", None,
+ [["KeyValueNode", "key",
+ [["ConditionalNode", None,
+ [["BinaryExpressionNode", None,
+ [["BinaryOperatorNode", "==", []],
+ ["VariableNode", "x", []],
+ ["NumberNode", "1", []]
+ ]],
+ ["ListNode", None,
+ [["ValueNode", "a", []]]],
+ ]]]],
+ ["DataNode", "b", []]]])
+
+ def test_if_1(self):
+ with self.assertRaises(parser.ParseError):
+ self.parse(b"key: if foo")
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_serializer.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_serializer.py
new file mode 100644
index 0000000000..d73668ac64
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_serializer.py
@@ -0,0 +1,356 @@
+# mypy: allow-untyped-defs
+
+import textwrap
+import unittest
+
+from .. import parser, serializer
+
+
+class SerializerTest(unittest.TestCase):
+ def setUp(self):
+ self.serializer = serializer.ManifestSerializer()
+ self.parser = parser.Parser()
+
+ def serialize(self, input_str):
+ return self.serializer.serialize(self.parser.parse(input_str))
+
+ def compare(self, input_str, expected=None):
+ if expected is None:
+ expected = input_str.decode("utf-8")
+ actual = self.serialize(input_str)
+ self.assertEqual(actual, expected)
+
+ def test_0(self):
+ self.compare(b"""key: value
+[Heading 1]
+ other_key: other_value
+""")
+
+ def test_1(self):
+ self.compare(b"""key: value
+[Heading 1]
+ other_key:
+ if a or b: other_value
+""")
+
+ def test_2(self):
+ self.compare(b"""key: value
+[Heading 1]
+ other_key:
+ if a or b: other_value
+ fallback_value
+""")
+
+ def test_3(self):
+ self.compare(b"""key: value
+[Heading 1]
+ other_key:
+ if a == 1: other_value
+ fallback_value
+""")
+
+ def test_4(self):
+ self.compare(b"""key: value
+[Heading 1]
+ other_key:
+ if a == "1": other_value
+ fallback_value
+""")
+
+ def test_5(self):
+ self.compare(b"""key: value
+[Heading 1]
+ other_key:
+ if a == "abc"[1]: other_value
+ fallback_value
+""")
+
+ def test_6(self):
+ self.compare(b"""key: value
+[Heading 1]
+ other_key:
+ if a == "abc"[c]: other_value
+ fallback_value
+""")
+
+ def test_7(self):
+ self.compare(b"""key: value
+[Heading 1]
+ other_key:
+ if (a or b) and c: other_value
+ fallback_value
+""",
+"""key: value
+[Heading 1]
+ other_key:
+ if a or b and c: other_value
+ fallback_value
+""")
+
+ def test_8(self):
+ self.compare(b"""key: value
+[Heading 1]
+ other_key:
+ if a or (b and c): other_value
+ fallback_value
+""")
+
+ def test_9(self):
+ self.compare(b"""key: value
+[Heading 1]
+ other_key:
+ if not (a and b): other_value
+ fallback_value
+""")
+
+ def test_10(self):
+ self.compare(b"""key: value
+[Heading 1]
+ some_key: some_value
+
+[Heading 2]
+ other_key: other_value
+""")
+
+ def test_11(self):
+ self.compare(b"""key:
+ if not a and b and c and d: true
+""")
+
+ def test_12(self):
+ self.compare(b"""[Heading 1]
+ key: [a:1, b:2]
+""")
+
+ def test_13(self):
+ self.compare(b"""key: [a:1, "b:#"]
+""")
+
+ def test_14(self):
+ self.compare(b"""key: [","]
+""")
+
+ def test_15(self):
+ self.compare(b"""key: ,
+""")
+
+ def test_16(self):
+ self.compare(b"""key: ["]", b]
+""")
+
+ def test_17(self):
+ self.compare(b"""key: ]
+""")
+
+ def test_18(self):
+ self.compare(br"""key: \]
+ """, """key: ]
+""")
+
+ def test_atom_as_default(self):
+ self.compare(
+ textwrap.dedent(
+ """\
+ key:
+ if a == 1: @True
+ @False
+ """).encode())
+
+ def test_escape_0(self):
+ self.compare(br"""k\t\:y: \a\b\f\n\r\t\v""",
+ r"""k\t\:y: \x07\x08\x0c\n\r\t\x0b
+""")
+
+ def test_escape_1(self):
+ self.compare(br"""k\x00: \x12A\x45""",
+ r"""k\x00: \x12AE
+""")
+
+ def test_escape_2(self):
+ self.compare(br"""k\u0045y: \u1234A\uABc6""",
+ """kEy: \u1234A\uabc6
+""")
+
+ def test_escape_3(self):
+ self.compare(br"""k\u0045y: \u1234A\uABc6""",
+ """kEy: \u1234A\uabc6
+""")
+
+ def test_escape_4(self):
+ self.compare(br"""key: '\u1234A\uABc6'""",
+ """key: \u1234A\uabc6
+""")
+
+ def test_escape_5(self):
+ self.compare(br"""key: [\u1234A\uABc6]""",
+ """key: [\u1234A\uabc6]
+""")
+
+ def test_escape_6(self):
+ self.compare(br"""key: [\u1234A\uABc6\,]""",
+ """key: ["\u1234A\uabc6,"]
+""")
+
+ def test_escape_7(self):
+ self.compare(br"""key: [\,\]\#]""",
+ r"""key: [",]#"]
+""")
+
+ def test_escape_8(self):
+ self.compare(br"""key: \#""",
+ r"""key: "#"
+""")
+
+ def test_escape_9(self):
+ self.compare(br"""key: \U10FFFFabc""",
+ """key: \U0010FFFFabc
+""")
+
+ def test_escape_10(self):
+ self.compare(br"""key: \u10FFab""",
+ """key: \u10FFab
+""")
+
+ def test_escape_11(self):
+ self.compare(br"""key: \\ab
+""")
+
+ def test_atom_1(self):
+ self.compare(br"""key: @True
+""")
+
+ def test_atom_2(self):
+ self.compare(br"""key: @False
+""")
+
+ def test_atom_3(self):
+ self.compare(br"""key: @Reset
+""")
+
+ def test_atom_4(self):
+ self.compare(br"""key: [a, @Reset, b]
+""")
+
+ def test_conditional_1(self):
+ self.compare(b"""foo:
+ if a or b: [1, 2]
+""")
+
+ def test_if_string_0(self):
+ self.compare(b"""foo: "if bar"
+""")
+
+ def test_non_ascii_1(self):
+ self.compare(b"""[\xf0\x9f\x99\x84]
+""")
+
+ def test_comments_preceding_kv_pair(self):
+ self.compare(
+ textwrap.dedent(
+ """\
+ # These two comments should be attached
+ # to the first key-value pair.
+ key1: value
+ # Attached to the second pair.
+ key2: value
+ """).encode())
+
+ def test_comments_preceding_headings(self):
+ self.compare(
+ textwrap.dedent(
+ """\
+ # Attached to the first heading.
+ [test1.html]
+
+ # Attached to the second heading.
+ [test2.html]
+ # Attached to subheading.
+ # Also attached to subheading.
+ [subheading] # Also attached to subheading (inline).
+ """).encode(),
+ textwrap.dedent(
+ """\
+ # Attached to the first heading.
+ [test1.html]
+
+ # Attached to the second heading.
+ [test2.html]
+ # Attached to subheading.
+ # Also attached to subheading.
+ [subheading] # Also attached to subheading (inline).
+ """))
+
+ def test_comments_inline(self):
+ self.compare(
+ textwrap.dedent(
+ """\
+ key1: # inline after key
+ value # inline after string value
+ key2:
+ [value] # inline after list in group
+ [test.html] # inline after heading
+ key1: @True # inline after atom
+ key2: [ # inline after list start
+ @False, # inline after atom in list
+ value1, # inline after value in list
+ value2] # inline after list end
+ """).encode(),
+ textwrap.dedent(
+ """\
+ # inline after key
+ key1: value # inline after string value
+ key2: [value] # inline after list in group
+ [test.html] # inline after heading
+ key1: @True # inline after atom
+ # inline after atom in list
+ # inline after value in list
+ # inline after list end
+ key2: [@False, value1, value2] # inline after list start
+ """))
+
+ def test_comments_conditions(self):
+ self.compare(
+ textwrap.dedent(
+ """\
+ key1:
+ # cond 1
+ if cond == 1: value
+ # cond 2
+ if cond == 2: value # cond 2
+ # cond 3
+ # cond 3
+ if cond == 3: value
+ # default 0
+ default # default 1
+ # default 2
+ # default 3
+ key2:
+ if cond == 1: value
+ [value]
+ # list default
+ key3:
+ if cond == 1: value
+ # no default
+ """).encode(),
+ textwrap.dedent(
+ """\
+ key1:
+ # cond 1
+ if cond == 1: value
+ # cond 2
+ if cond == 2: value # cond 2
+ # cond 3
+ # cond 3
+ if cond == 3: value
+ # default 0
+ # default 2
+ # default 3
+ default # default 1
+ key2:
+ if cond == 1: value
+ # list default
+ [value]
+ # no default
+ key3:
+ if cond == 1: value
+ """))
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_static.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_static.py
new file mode 100644
index 0000000000..0ded07f42d
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_static.py
@@ -0,0 +1,98 @@
+# mypy: allow-untyped-defs
+
+import unittest
+
+from ..backends import static
+
+# There aren't many tests here because it turns out to be way more convenient to
+# use test_serializer for the majority of cases
+
+
+class TestStatic(unittest.TestCase):
+ def compile(self, input_text, input_data):
+ return static.compile(input_text, input_data)
+
+ def test_get_0(self):
+ data = b"""
+key: value
+
+[Heading 1]
+ other_key:
+ if a == 1: value_1
+ if a == 2: value_2
+ value_3
+"""
+
+ manifest = self.compile(data, {"a": 2})
+
+ self.assertEqual(manifest.get("key"), "value")
+ children = list(item for item in manifest.iterchildren())
+ self.assertEqual(len(children), 1)
+ section = children[0]
+ self.assertEqual(section.name, "Heading 1")
+
+ self.assertEqual(section.get("other_key"), "value_2")
+ self.assertEqual(section.get("key"), "value")
+
+ def test_get_1(self):
+ data = b"""
+key: value
+
+[Heading 1]
+ other_key:
+ if a == 1: value_1
+ if a == 2: value_2
+ value_3
+"""
+ manifest = self.compile(data, {"a": 3})
+
+ children = list(item for item in manifest.iterchildren())
+ section = children[0]
+ self.assertEqual(section.get("other_key"), "value_3")
+
+ def test_get_3(self):
+ data = b"""key:
+ if a == "1": value_1
+ if a[0] == "ab"[0]: value_2
+"""
+ manifest = self.compile(data, {"a": "1"})
+ self.assertEqual(manifest.get("key"), "value_1")
+
+ manifest = self.compile(data, {"a": "ac"})
+ self.assertEqual(manifest.get("key"), "value_2")
+
+ def test_get_4(self):
+ data = b"""key:
+ if not a: value_1
+ value_2
+"""
+ manifest = self.compile(data, {"a": True})
+ self.assertEqual(manifest.get("key"), "value_2")
+
+ manifest = self.compile(data, {"a": False})
+ self.assertEqual(manifest.get("key"), "value_1")
+
+ def test_api(self):
+ data = b"""key:
+ if a == 1.5: value_1
+ value_2
+key_1: other_value
+"""
+ manifest = self.compile(data, {"a": 1.5})
+
+ self.assertFalse(manifest.is_empty)
+ self.assertEqual(manifest.root, manifest)
+ self.assertTrue(manifest.has_key("key_1"))
+ self.assertFalse(manifest.has_key("key_2"))
+
+ self.assertEqual(set(manifest.iterkeys()), {"key", "key_1"})
+ self.assertEqual(set(manifest.itervalues()), {"value_1", "other_value"})
+
+ def test_is_empty_1(self):
+ data = b"""
+[Section]
+ [Subsection]
+"""
+ manifest = self.compile(data, {})
+
+ self.assertTrue(manifest.is_empty)
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_tokenizer.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_tokenizer.py
new file mode 100644
index 0000000000..6b9d052560
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_tokenizer.py
@@ -0,0 +1,385 @@
+# mypy: allow-untyped-defs
+
+import textwrap
+import unittest
+
+from .. import parser
+from ..parser import token_types
+
+class TokenizerTest(unittest.TestCase):
+ def setUp(self):
+ self.tokenizer = parser.Tokenizer()
+
+ def tokenize(self, input_str):
+ rv = []
+ for item in self.tokenizer.tokenize(input_str):
+ rv.append(item)
+ if item[0] == token_types.eof:
+ break
+ return rv
+
+ def compare(self, input_text, expected):
+ expected = expected + [(token_types.eof, None)]
+ actual = self.tokenize(input_text)
+ self.assertEqual(actual, expected)
+
+ def test_heading_0(self):
+ self.compare(b"""[Heading text]""",
+ [(token_types.paren, "["),
+ (token_types.string, "Heading text"),
+ (token_types.paren, "]")])
+
+ def test_heading_1(self):
+ self.compare(br"""[Heading [text\]]""",
+ [(token_types.paren, "["),
+ (token_types.string, "Heading [text]"),
+ (token_types.paren, "]")])
+
+ def test_heading_2(self):
+ self.compare(b"""[Heading #text]""",
+ [(token_types.paren, "["),
+ (token_types.string, "Heading #text"),
+ (token_types.paren, "]")])
+
+ def test_heading_3(self):
+ self.compare(br"""[Heading [\]text]""",
+ [(token_types.paren, "["),
+ (token_types.string, "Heading []text"),
+ (token_types.paren, "]")])
+
+ def test_heading_4(self):
+ with self.assertRaises(parser.ParseError):
+ self.tokenize(b"[Heading")
+
+ def test_heading_5(self):
+ self.compare(br"""[Heading [\]text] #comment""",
+ [(token_types.paren, "["),
+ (token_types.string, "Heading []text"),
+ (token_types.paren, "]"),
+ (token_types.inline_comment, "comment")])
+
+ def test_heading_6(self):
+ self.compare(br"""[Heading \ttext]""",
+ [(token_types.paren, "["),
+ (token_types.string, "Heading \ttext"),
+ (token_types.paren, "]")])
+
+ def test_key_0(self):
+ self.compare(b"""key:value""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.string, "value")])
+
+ def test_key_1(self):
+ self.compare(b"""key : value""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.string, "value")])
+
+ def test_key_2(self):
+ self.compare(b"""key : val ue""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.string, "val ue")])
+
+ def test_key_3(self):
+ self.compare(b"""key: value#comment""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.string, "value"),
+ (token_types.inline_comment, "comment")])
+
+ def test_key_4(self):
+ with self.assertRaises(parser.ParseError):
+ self.tokenize(b"""ke y: value""")
+
+ def test_key_5(self):
+ with self.assertRaises(parser.ParseError):
+ self.tokenize(b"""key""")
+
+ def test_key_6(self):
+ self.compare(b"""key: "value\"""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.string, "value")])
+
+ def test_key_7(self):
+ self.compare(b"""key: 'value'""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.string, "value")])
+
+ def test_key_8(self):
+ self.compare(b"""key: "#value\"""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.string, "#value")])
+
+ def test_key_9(self):
+ self.compare(b"""key: '#value\'""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.string, "#value")])
+
+ def test_key_10(self):
+ with self.assertRaises(parser.ParseError):
+ self.tokenize(b"""key: "value""")
+
+ def test_key_11(self):
+ with self.assertRaises(parser.ParseError):
+ self.tokenize(b"""key: 'value""")
+
+ def test_key_12(self):
+ with self.assertRaises(parser.ParseError):
+ self.tokenize(b"""key: 'value""")
+
+ def test_key_13(self):
+ with self.assertRaises(parser.ParseError):
+ self.tokenize(b"""key: 'value' abc""")
+
+ def test_key_14(self):
+ self.compare(br"""key: \\nb""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.string, r"\nb")])
+
+ def test_list_0(self):
+ self.compare(b"""
+key: []""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.list_start, "["),
+ (token_types.list_end, "]")])
+
+ def test_list_1(self):
+ self.compare(b"""
+key: [a, "b"]""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.list_start, "["),
+ (token_types.string, "a"),
+ (token_types.string, "b"),
+ (token_types.list_end, "]")])
+
+ def test_list_2(self):
+ self.compare(b"""
+key: [a,
+ b]""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.list_start, "["),
+ (token_types.string, "a"),
+ (token_types.string, "b"),
+ (token_types.list_end, "]")])
+
+ def test_list_3(self):
+ self.compare(b"""
+key: [a, #b]
+ c]""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.list_start, "["),
+ (token_types.string, "a"),
+ (token_types.inline_comment, "b]"),
+ (token_types.string, "c"),
+ (token_types.list_end, "]")])
+
+ def test_list_4(self):
+ with self.assertRaises(parser.ParseError):
+ self.tokenize(b"""key: [a #b]
+ c]""")
+
+ def test_list_5(self):
+ with self.assertRaises(parser.ParseError):
+ self.tokenize(b"""key: [a \\
+ c]""")
+
+ def test_list_6(self):
+ self.compare(b"""key: [a , b]""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.list_start, "["),
+ (token_types.string, "a"),
+ (token_types.string, "b"),
+ (token_types.list_end, "]")])
+
+ def test_expr_0(self):
+ self.compare(b"""
+key:
+ if cond == 1: value""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.group_start, None),
+ (token_types.ident, "if"),
+ (token_types.ident, "cond"),
+ (token_types.ident, "=="),
+ (token_types.number, "1"),
+ (token_types.separator, ":"),
+ (token_types.string, "value")])
+
+ def test_expr_1(self):
+ self.compare(b"""
+key:
+ if cond == 1: value1
+ value2""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.group_start, None),
+ (token_types.ident, "if"),
+ (token_types.ident, "cond"),
+ (token_types.ident, "=="),
+ (token_types.number, "1"),
+ (token_types.separator, ":"),
+ (token_types.string, "value1"),
+ (token_types.string, "value2")])
+
+ def test_expr_2(self):
+ self.compare(b"""
+key:
+ if cond=="1": value""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.group_start, None),
+ (token_types.ident, "if"),
+ (token_types.ident, "cond"),
+ (token_types.ident, "=="),
+ (token_types.string, "1"),
+ (token_types.separator, ":"),
+ (token_types.string, "value")])
+
+ def test_expr_3(self):
+ self.compare(b"""
+key:
+ if cond==1.1: value""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.group_start, None),
+ (token_types.ident, "if"),
+ (token_types.ident, "cond"),
+ (token_types.ident, "=="),
+ (token_types.number, "1.1"),
+ (token_types.separator, ":"),
+ (token_types.string, "value")])
+
+ def test_expr_4(self):
+ self.compare(b"""
+key:
+ if cond==1.1 and cond2 == "a": value""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.group_start, None),
+ (token_types.ident, "if"),
+ (token_types.ident, "cond"),
+ (token_types.ident, "=="),
+ (token_types.number, "1.1"),
+ (token_types.ident, "and"),
+ (token_types.ident, "cond2"),
+ (token_types.ident, "=="),
+ (token_types.string, "a"),
+ (token_types.separator, ":"),
+ (token_types.string, "value")])
+
+ def test_expr_5(self):
+ self.compare(b"""
+key:
+ if (cond==1.1 ): value""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.group_start, None),
+ (token_types.ident, "if"),
+ (token_types.paren, "("),
+ (token_types.ident, "cond"),
+ (token_types.ident, "=="),
+ (token_types.number, "1.1"),
+ (token_types.paren, ")"),
+ (token_types.separator, ":"),
+ (token_types.string, "value")])
+
+ def test_expr_6(self):
+ self.compare(b"""
+key:
+ if "\\ttest": value""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.group_start, None),
+ (token_types.ident, "if"),
+ (token_types.string, "\ttest"),
+ (token_types.separator, ":"),
+ (token_types.string, "value")])
+
+ def test_expr_7(self):
+ with self.assertRaises(parser.ParseError):
+ self.tokenize(b"""
+key:
+ if 1A: value""")
+
+ def test_expr_8(self):
+ with self.assertRaises(parser.ParseError):
+ self.tokenize(b"""
+key:
+ if 1a: value""")
+
+ def test_expr_9(self):
+ with self.assertRaises(parser.ParseError):
+ self.tokenize(b"""
+key:
+ if 1.1.1: value""")
+
+ def test_expr_10(self):
+ self.compare(b"""
+key:
+ if 1.: value""",
+ [(token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.group_start, None),
+ (token_types.ident, "if"),
+ (token_types.number, "1."),
+ (token_types.separator, ":"),
+ (token_types.string, "value")])
+
+ def test_comment_with_indents(self):
+ self.compare(
+ textwrap.dedent(
+ """\
+ # comment 0
+ [Heading]
+ # comment 1
+ # comment 2
+ """).encode(),
+ [(token_types.comment, " comment 0"),
+ (token_types.paren, "["),
+ (token_types.string, "Heading"),
+ (token_types.paren, "]"),
+ (token_types.comment, " comment 1"),
+ (token_types.comment, " comment 2")])
+
+ def test_comment_inline(self):
+ self.compare(
+ textwrap.dedent(
+ """\
+ [Heading] # after heading
+ key: # after key
+ # before group start
+ if cond: value1 # after value1
+ value2 # after value2
+ """).encode(),
+ [(token_types.paren, "["),
+ (token_types.string, "Heading"),
+ (token_types.paren, "]"),
+ (token_types.inline_comment, " after heading"),
+ (token_types.string, "key"),
+ (token_types.separator, ":"),
+ (token_types.inline_comment, " after key"),
+ (token_types.comment, " before group start"),
+ (token_types.group_start, None),
+ (token_types.ident, "if"),
+ (token_types.ident, "cond"),
+ (token_types.separator, ":"),
+ (token_types.string, "value1"),
+ (token_types.inline_comment, " after value1"),
+ (token_types.string, "value2"),
+ (token_types.inline_comment, " after value2")])
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptrunner.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptrunner.py
new file mode 100644
index 0000000000..d65369b380
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptrunner.py
@@ -0,0 +1,581 @@
+# mypy: allow-untyped-calls, allow-untyped-defs
+
+import json
+import os
+import signal
+import sys
+from collections import defaultdict
+from datetime import datetime, timedelta
+from typing import Any, Tuple
+
+import wptserve
+from wptserve import sslutils
+
+from . import environment as env
+from . import instruments
+from . import mpcontext
+from . import products
+from . import testloader
+from . import wptcommandline
+from . import wptlogging
+from . import wpttest
+from mozlog import capture, handlers
+from .font import FontInstaller
+from .testrunner import ManagerGroup, TestImplementation
+
+here = os.path.dirname(__file__)
+
+logger = None
+
+"""Runner for web-platform-tests
+
+The runner has several design goals:
+
+* Tests should run with no modification from upstream.
+
+* Tests should be regarded as "untrusted" so that errors, timeouts and even
+ crashes in the tests can be handled without failing the entire test run.
+
+* For performance tests can be run in multiple browsers in parallel.
+
+The upstream repository has the facility for creating a test manifest in JSON
+format. This manifest is used directly to determine which tests exist. Local
+metadata files are used to store the expected test results.
+"""
+
+def setup_logging(*args, **kwargs):
+ global logger
+ logger = wptlogging.setup(*args, **kwargs)
+ return logger
+
+
+def get_loader(test_paths: wptcommandline.TestPaths,
+ product: products.Product,
+ **kwargs: Any) -> Tuple[testloader.TestQueueBuilder, testloader.TestLoader]:
+ run_info_extras = product.run_info_extras(logger, **kwargs)
+ base_run_info = wpttest.get_run_info(kwargs["run_info"],
+ product.name,
+ browser_version=kwargs.get("browser_version"),
+ browser_channel=kwargs.get("browser_channel"),
+ verify=kwargs.get("verify"),
+ debug=kwargs["debug"],
+ extras=run_info_extras,
+ device_serials=kwargs.get("device_serial"),
+ adb_binary=kwargs.get("adb_binary"))
+
+ subsuites = testloader.load_subsuites(logger,
+ base_run_info,
+ kwargs["subsuite_file"],
+ set(kwargs["subsuites"] or []))
+
+ if kwargs["test_groups_file"] is not None:
+ test_groups = testloader.TestGroups(logger,
+ kwargs["test_groups_file"],
+ subsuites)
+ else:
+ test_groups = None
+
+ test_manifests = testloader.ManifestLoader(test_paths,
+ force_manifest_update=kwargs["manifest_update"],
+ manifest_download=kwargs["manifest_download"]).load()
+
+ manifest_filters = []
+ test_filters = []
+
+ include = kwargs["include"]
+ if kwargs["include_file"]:
+ include = include or []
+ include.extend(testloader.read_include_from_file(kwargs["include_file"]))
+ if test_groups:
+ include = testloader.update_include_for_groups(test_groups, include)
+
+ if kwargs["tags"] or kwargs["exclude_tags"]:
+ test_filters.append(testloader.TagFilter(kwargs["tags"], kwargs["exclude_tags"]))
+
+ if include or kwargs["exclude"] or kwargs["include_manifest"] or kwargs["default_exclude"]:
+ manifest_filters.append(testloader.TestFilter(include=include,
+ exclude=kwargs["exclude"],
+ manifest_path=kwargs["include_manifest"],
+ test_manifests=test_manifests,
+ explicit=kwargs["default_exclude"]))
+
+ ssl_enabled = sslutils.get_cls(kwargs["ssl_type"]).ssl_enabled
+ h2_enabled = wptserve.utils.http2_compatible()
+
+ test_queue_builder, chunker_kwargs = testloader.get_test_queue_builder(logger=logger,
+ test_groups=test_groups,
+ **kwargs)
+
+ test_loader = testloader.TestLoader(test_manifests=test_manifests,
+ test_types=kwargs["test_types"],
+ base_run_info=base_run_info,
+ subsuites=subsuites,
+ manifest_filters=manifest_filters,
+ test_filters=test_filters,
+ chunk_type=kwargs["chunk_type"],
+ total_chunks=kwargs["total_chunks"],
+ chunk_number=kwargs["this_chunk"],
+ include_https=ssl_enabled,
+ include_h2=h2_enabled,
+ include_webtransport_h3=kwargs["enable_webtransport_h3"],
+ skip_timeout=kwargs["skip_timeout"],
+ skip_crash=kwargs["skip_crash"],
+ skip_implementation_status=kwargs["skip_implementation_status"],
+ chunker_kwargs=chunker_kwargs)
+ return test_queue_builder, test_loader
+
+
+def list_test_groups(test_paths, product, **kwargs):
+ env.do_delayed_imports(logger, test_paths)
+
+ _, test_loader = get_loader(test_paths,
+ product,
+ **kwargs)
+
+ for item in sorted(test_loader.groups(kwargs["test_types"])):
+ print(item)
+
+
+def list_disabled(test_paths, product, **kwargs):
+ env.do_delayed_imports(logger, test_paths)
+
+ rv = []
+
+ _, test_loader = get_loader(test_paths, product, **kwargs)
+
+ for test_type, tests in test_loader.disabled_tests.items():
+ for test in tests:
+ rv.append({"test": test.id, "reason": test.disabled()})
+ print(json.dumps(rv, indent=2))
+
+
+def list_tests(test_paths, product, **kwargs):
+ env.do_delayed_imports(logger, test_paths)
+
+ _, test_loader = get_loader(test_paths, product, **kwargs)
+
+ for test in test_loader.test_ids:
+ print(test)
+
+
+def get_pause_after_test(test_loader, **kwargs):
+ if kwargs["pause_after_test"] is not None:
+ return kwargs["pause_after_test"]
+ if kwargs["repeat_until_unexpected"]:
+ return False
+ if kwargs["headless"]:
+ return False
+ if kwargs["debug_test"]:
+ return True
+ tests = test_loader.tests
+ is_single_testharness = True
+ testharness_count = 0
+ for tests_by_type in tests.values():
+ for test_type, tests in tests_by_type.items():
+ if test_type != "testharness" and len(tests):
+ is_single_testharness = False
+ break
+ elif test_type == "testharness":
+ testharness_count += len(tests)
+ if testharness_count > 1:
+ is_single_testharness = False
+ break
+ return kwargs["repeat"] == 1 and kwargs["rerun"] == 1 and is_single_testharness
+
+
+
+
+def log_suite_start(tests_by_group, base_run_info, subsuites, run_by_dir):
+ logger.suite_start(tests_by_group,
+ name='web-platform-test',
+ run_info=base_run_info,
+ extra={"run_by_dir": run_by_dir})
+
+ for name, subsuite in subsuites.items():
+ logger.add_subsuite(name=name, run_info=subsuite.run_info_extras)
+
+
+def run_test_iteration(test_status, test_loader, test_queue_builder,
+ recording, test_environment, product, kwargs):
+ """Runs the entire test suite.
+ This is called for each repeat run requested."""
+ tests_by_type = defaultdict(list)
+
+ for test_type in test_loader.test_types:
+ for subsuite_name, subsuite in test_loader.subsuites.items():
+ type_tests_active = test_loader.tests[subsuite_name][test_type]
+ type_tests_disabled = test_loader.disabled_tests[subsuite_name][test_type]
+ if type_tests_active or type_tests_disabled:
+ tests_by_type[(subsuite_name, test_type)].extend(type_tests_active)
+ tests_by_type[(subsuite_name, test_type)].extend(type_tests_disabled)
+
+ tests_by_group = test_queue_builder.tests_by_group(tests_by_type)
+
+ log_suite_start(tests_by_group,
+ test_loader.base_run_info,
+ test_loader.subsuites,
+ kwargs["run_by_dir"])
+
+ test_implementations = {}
+ tests_to_run = defaultdict(list)
+
+ for test_type in test_loader.test_types:
+ executor_cls = product.executor_classes.get(test_type)
+ if executor_cls is None:
+ logger.warning(f"Unsupported test type {test_type} for product {product.name}")
+ continue
+ browser_cls = product.get_browser_cls(test_type)
+
+ for subsuite_name, subsuite in test_loader.subsuites.items():
+ if (subsuite_name, test_type) not in tests_by_type:
+ continue
+ run_info = subsuite.run_info
+ executor_kwargs = product.get_executor_kwargs(logger,
+ test_type,
+ test_environment,
+ run_info,
+ subsuite=subsuite,
+ **kwargs)
+ browser_kwargs = product.get_browser_kwargs(logger,
+ test_type,
+ run_info,
+ config=test_environment.config,
+ num_test_groups=len(tests_by_group),
+ subsuite=subsuite,
+ **kwargs)
+
+ test_implementations[(subsuite_name, test_type)] = TestImplementation(executor_cls,
+ executor_kwargs,
+ browser_cls,
+ browser_kwargs)
+
+ for test in test_loader.disabled_tests[subsuite_name][test_type]:
+ logger.test_start(test.id, subsuite=subsuite_name)
+ logger.test_end(test.id, status="SKIP", subsuite=subsuite_name)
+ test_status.skipped += 1
+
+ if test_type == "testharness":
+ for test in test_loader.tests[subsuite_name][test_type]:
+ skip_reason = None
+ if test.testdriver and not executor_cls.supports_testdriver:
+ skip_reason = "Executor does not support testdriver.js"
+ elif test.jsshell and not executor_cls.supports_jsshell:
+ skip_reason = "Executor does not support jsshell"
+ if skip_reason:
+ logger.test_start(test.id, subsuite=subsuite_name)
+ logger.test_end(test.id,
+ status="SKIP",
+ subsuite=subsuite_name,
+ message=skip_reason)
+ test_status.skipped += 1
+ else:
+ tests_to_run[(subsuite_name, test_type)].append(test)
+ else:
+ tests_to_run[(subsuite_name, test_type)] = test_loader.tests[subsuite_name][test_type]
+
+ unexpected_fail_tests = defaultdict(list)
+ unexpected_pass_tests = defaultdict(list)
+ recording.pause()
+ retry_counts = kwargs["retry_unexpected"]
+ for retry_index in range(retry_counts + 1):
+ if retry_index > 0:
+ if kwargs["fail_on_unexpected_pass"]:
+ for (subtests, test_type), tests in unexpected_pass_tests.items():
+ unexpected_fail_tests[(subtests, test_type)].extend(tests)
+ tests_to_run = unexpected_fail_tests
+ if sum(len(tests) for tests in tests_to_run.values()) == 0:
+ break
+ tests_by_group = test_queue_builder.tests_by_group(tests_to_run)
+
+ logger.suite_end()
+
+ log_suite_start(tests_by_group,
+ test_loader.base_run_info,
+ test_loader.subsuites,
+ kwargs["run_by_dir"])
+
+ with ManagerGroup("web-platform-tests",
+ test_queue_builder,
+ test_implementations,
+ retry_index,
+ kwargs["rerun"],
+ kwargs["pause_after_test"],
+ kwargs["pause_on_unexpected"],
+ kwargs["restart_on_unexpected"],
+ kwargs["debug_info"],
+ not kwargs["no_capture_stdio"],
+ kwargs["restart_on_new_group"],
+ recording=recording,
+ max_restarts=kwargs["max_restarts"],
+ ) as manager_group:
+ try:
+ handle_interrupt_signals()
+ manager_group.run(tests_to_run)
+ except KeyboardInterrupt:
+ logger.critical(
+ "Main thread got signal; "
+ "waiting for TestRunnerManager threads to exit.")
+ manager_group.stop()
+ manager_group.wait(timeout=10)
+ raise
+
+ test_status.total_tests += manager_group.test_count()
+ unexpected_fail_tests = manager_group.unexpected_fail_tests()
+ unexpected_pass_tests = manager_group.unexpected_pass_tests()
+
+ test_status.unexpected_pass += sum(len(tests) for tests in unexpected_pass_tests.values())
+ test_status.unexpected += sum(len(tests) for tests in unexpected_pass_tests.values())
+ test_status.unexpected += sum(len(tests) for tests in unexpected_fail_tests.values())
+ logger.suite_end()
+ return True
+
+
+def handle_interrupt_signals():
+ def termination_handler(_signum, _unused_frame):
+ raise KeyboardInterrupt()
+ if sys.platform == "win32":
+ signal.signal(signal.SIGBREAK, termination_handler)
+ else:
+ signal.signal(signal.SIGTERM, termination_handler)
+
+
+def evaluate_runs(test_status, **kwargs):
+ """Evaluates the test counts after the given number of repeat runs has finished"""
+ if test_status.total_tests == 0:
+ if test_status.skipped > 0:
+ logger.warning("All requested tests were skipped")
+ else:
+ if kwargs["default_exclude"]:
+ logger.info("No tests ran")
+ return True
+ else:
+ logger.critical("No tests ran")
+ return False
+
+ if test_status.unexpected and not kwargs["fail_on_unexpected"]:
+ logger.info(f"Tolerating {test_status.unexpected} unexpected results")
+ return True
+
+ all_unexpected_passed = (test_status.unexpected and
+ test_status.unexpected == test_status.unexpected_pass)
+ if all_unexpected_passed and not kwargs["fail_on_unexpected_pass"]:
+ logger.info(f"Tolerating {test_status.unexpected_pass} unexpected results "
+ "because they all PASS")
+ return True
+
+ return test_status.unexpected == 0
+
+
+class TestStatus:
+ """Class that stores information on the results of test runs for later reference"""
+ def __init__(self):
+ self.total_tests = 0
+ self.skipped = 0
+ self.unexpected = 0
+ self.unexpected_pass = 0
+ self.repeated_runs = 0
+ self.expected_repeated_runs = 0
+ self.all_skipped = False
+
+
+def run_tests(config, product, test_paths, **kwargs):
+ """Set up the test environment, load the list of tests to be executed, and
+ invoke the remainder of the code to execute tests"""
+ mp = mpcontext.get_context()
+ if kwargs["instrument_to_file"] is None:
+ recorder = instruments.NullInstrument()
+ else:
+ recorder = instruments.Instrument(kwargs["instrument_to_file"])
+ with recorder as recording, capture.CaptureIO(logger,
+ not kwargs["no_capture_stdio"],
+ mp_context=mp):
+ recording.set(["startup"])
+ env.do_delayed_imports(logger, test_paths)
+
+ env_extras = product.get_env_extras(**kwargs)
+
+ product.check_args(**kwargs)
+
+ if kwargs["install_fonts"]:
+ env_extras.append(FontInstaller(
+ logger,
+ font_dir=kwargs["font_dir"],
+ ahem=os.path.join(test_paths["/"].tests_path, "fonts/Ahem.ttf")
+ ))
+
+ recording.set(["startup", "load_tests"])
+
+ test_queue_builder, test_loader = get_loader(test_paths,
+ product,
+ **kwargs)
+
+ test_status = TestStatus()
+ repeat = kwargs["repeat"]
+ test_status.expected_repeated_runs = repeat
+
+ if len(test_loader.test_ids) == 0 and kwargs["test_list"]:
+ logger.critical("Unable to find any tests at the path(s):")
+ for path in kwargs["test_list"]:
+ logger.critical(" %s" % path)
+ logger.critical("Please check spelling and make sure there are tests in the specified path(s).")
+ return False, test_status
+ kwargs["pause_after_test"] = get_pause_after_test(test_loader, **kwargs)
+
+ ssl_config = {"type": kwargs["ssl_type"],
+ "openssl": {"openssl_binary": kwargs["openssl_binary"]},
+ "pregenerated": {"host_key_path": kwargs["host_key_path"],
+ "host_cert_path": kwargs["host_cert_path"],
+ "ca_cert_path": kwargs["ca_cert_path"]}}
+
+ # testharness.js is global so we can't set the timeout multiplier in that file by subsuite
+ testharness_timeout_multipler = product.get_timeout_multiplier("testharness",
+ test_loader.base_run_info,
+ **kwargs)
+
+ mojojs_path = kwargs["mojojs_path"] if kwargs["enable_mojojs"] else None
+ inject_script = kwargs["inject_script"] if kwargs["inject_script"] else None
+
+ recording.set(["startup", "start_environment"])
+ with env.TestEnvironment(test_paths,
+ testharness_timeout_multipler,
+ kwargs["pause_after_test"],
+ kwargs["debug_test"],
+ kwargs["debug_info"],
+ product.env_options,
+ ssl_config,
+ env_extras,
+ kwargs["enable_webtransport_h3"],
+ mojojs_path,
+ inject_script,
+ kwargs["suppress_handler_traceback"]) as test_environment:
+ recording.set(["startup", "ensure_environment"])
+ try:
+ test_environment.ensure_started()
+ start_time = datetime.now()
+ except env.TestEnvironmentError as e:
+ logger.critical("Error starting test environment: %s" % e)
+ raise
+
+ recording.set(["startup"])
+
+ max_time = None
+ if "repeat_max_time" in kwargs:
+ max_time = timedelta(minutes=kwargs["repeat_max_time"])
+
+ repeat_until_unexpected = kwargs["repeat_until_unexpected"]
+
+ # keep track of longest time taken to complete a test suite iteration
+ # so that the runs can be stopped to avoid a possible TC timeout.
+ longest_iteration_time = timedelta()
+
+ while test_status.repeated_runs < repeat or repeat_until_unexpected:
+ # if the next repeat run could cause the TC timeout to be reached,
+ # stop now and use the test results we have.
+ # Pad the total time by 10% to ensure ample time for the next iteration(s).
+ estimate = (datetime.now() +
+ timedelta(seconds=(longest_iteration_time.total_seconds() * 1.1)))
+ if not repeat_until_unexpected and max_time and estimate >= start_time + max_time:
+ logger.info(f"Ran {test_status.repeated_runs} of {repeat} iterations.")
+ break
+
+ # begin tracking runtime of the test suite
+ iteration_start = datetime.now()
+ test_status.repeated_runs += 1
+ if repeat_until_unexpected:
+ logger.info(f"Repetition {test_status.repeated_runs}")
+ elif repeat > 1:
+ logger.info(f"Repetition {test_status.repeated_runs} / {repeat}")
+
+ iter_success = run_test_iteration(test_status,
+ test_loader,
+ test_queue_builder,
+ recording,
+ test_environment,
+ product,
+ kwargs)
+ # if there were issues with the suite run(tests not loaded, etc.) return
+ if not iter_success:
+ return False, test_status
+ recording.set(["after-end"])
+ logger.info(f"Got {test_status.unexpected} unexpected results, "
+ f"with {test_status.unexpected_pass} unexpected passes")
+
+ # Note this iteration's runtime
+ iteration_runtime = datetime.now() - iteration_start
+ # determine the longest test suite runtime seen.
+ longest_iteration_time = max(longest_iteration_time,
+ iteration_runtime)
+
+ if repeat_until_unexpected and test_status.unexpected > 0:
+ break
+ if test_status.repeated_runs == 1 and len(test_loader.test_ids) == test_status.skipped:
+ test_status.all_skipped = True
+ break
+
+ # Return the evaluation of the runs and the number of repeated iterations that were run.
+ return evaluate_runs(test_status, **kwargs), test_status
+
+
+def check_stability(**kwargs):
+ from . import stability
+ if kwargs["stability"]:
+ logger.warning("--stability is deprecated; please use --verify instead!")
+ kwargs['verify_max_time'] = None
+ kwargs['verify_chaos_mode'] = False
+ kwargs['verify_repeat_loop'] = 0
+ kwargs['verify_repeat_restart'] = 10 if kwargs['repeat'] == 1 else kwargs['repeat']
+ kwargs['verify_output_results'] = True
+
+ return stability.check_stability(logger,
+ max_time=kwargs['verify_max_time'],
+ chaos_mode=kwargs['verify_chaos_mode'],
+ repeat_loop=kwargs['verify_repeat_loop'],
+ repeat_restart=kwargs['verify_repeat_restart'],
+ output_results=kwargs['verify_output_results'],
+ **kwargs)
+
+
+def start(**kwargs):
+ assert logger is not None
+
+ logged_critical = wptlogging.LoggedAboveLevelHandler("CRITICAL")
+ handler = handlers.LogLevelFilter(logged_critical, "CRITICAL")
+ logger.add_handler(handler)
+
+ rv = False
+ try:
+ if kwargs["list_test_groups"]:
+ list_test_groups(**kwargs)
+ elif kwargs["list_disabled"]:
+ list_disabled(**kwargs)
+ elif kwargs["list_tests"]:
+ list_tests(**kwargs)
+ elif kwargs["verify"] or kwargs["stability"]:
+ rv = check_stability(**kwargs) or logged_critical.has_log
+ else:
+ rv = not run_tests(**kwargs)[0] or logged_critical.has_log
+ finally:
+ logger.shutdown()
+ logger.remove_handler(handler)
+ return rv
+
+
+def main():
+ """Main entry point when calling from the command line"""
+ kwargs = wptcommandline.parse_args()
+
+ try:
+ if kwargs["prefs_root"] is None:
+ kwargs["prefs_root"] = os.path.abspath(os.path.join(here, "prefs"))
+
+ setup_logging(kwargs, {"raw": sys.stdout})
+
+ return start(**kwargs)
+ except Exception:
+ if kwargs["pdb"]:
+ import pdb
+ import traceback
+ print(traceback.format_exc())
+ pdb.post_mortem()
+ else:
+ raise
diff --git a/testing/web-platform/tests/tools/wptrunner/wptrunner/wpttest.py b/testing/web-platform/tests/tools/wptrunner/wptrunner/wpttest.py
new file mode 100644
index 0000000000..44e6878970
--- /dev/null
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wpttest.py
@@ -0,0 +1,740 @@
+# mypy: allow-untyped-defs
+import os
+import subprocess
+import sys
+from abc import ABC
+from collections import defaultdict
+from typing import Any, ClassVar, Dict, Optional, Set, Type
+from urllib.parse import urljoin
+
+from .wptmanifest.parser import atoms
+
+atom_reset = atoms["Reset"]
+enabled_tests = {"testharness", "reftest", "wdspec", "crashtest", "print-reftest"}
+
+
+class Result(ABC):
+ default_expected: ClassVar[str]
+ statuses: Set[str]
+
+ def __init__(self,
+ status,
+ message,
+ expected=None,
+ extra=None,
+ stack=None,
+ known_intermittent=None):
+ if status not in self.statuses:
+ raise ValueError("Unrecognised status %s" % status)
+ self.status = status
+ self.message = message
+ self.expected = expected if expected is not None else self.default_expected
+ self.known_intermittent = known_intermittent if known_intermittent is not None else []
+ self.extra = extra if extra is not None else {}
+ self.stack = stack
+
+ def __repr__(self):
+ return f"<{self.__module__}.{self.__class__.__name__} {self.status}>"
+
+
+class SubtestResult(ABC):
+ def __init__(self, name, status, message, stack=None, expected=None, known_intermittent=None):
+ self.name = name
+ if status not in self.statuses:
+ raise ValueError("Unrecognised status %s" % status)
+ self.status = status
+ self.message = message
+ self.stack = stack
+ self.expected = expected
+ self.known_intermittent = known_intermittent if known_intermittent is not None else []
+
+ def __repr__(self):
+ return f"<{self.__module__}.{self.__class__.__name__} {self.name} {self.status}>"
+
+
+class TestharnessResult(Result):
+ default_expected = "OK"
+ statuses = {"OK", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH", "PRECONDITION_FAILED"}
+
+
+class TestharnessSubtestResult(SubtestResult):
+ default_expected = "PASS"
+ statuses = {"PASS", "FAIL", "TIMEOUT", "NOTRUN", "PRECONDITION_FAILED"}
+
+
+class ReftestResult(Result):
+ default_expected = "PASS"
+ statuses = {"PASS", "FAIL", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT",
+ "CRASH"}
+
+
+class WdspecResult(Result):
+ default_expected = "OK"
+ statuses = {"OK", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH"}
+
+
+class WdspecSubtestResult(SubtestResult):
+ default_expected = "PASS"
+ statuses = {"PASS", "FAIL", "ERROR"}
+
+
+class CrashtestResult(Result):
+ default_expected = "PASS"
+ statuses = {"PASS", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT",
+ "CRASH"}
+
+
+def get_run_info(metadata_root, product, **kwargs):
+ return RunInfo(metadata_root, product, **kwargs)
+
+
+class RunInfo(Dict[str, Any]):
+ def __init__(self, metadata_root, product_name, debug,
+ browser_version=None,
+ browser_channel=None,
+ verify=None,
+ extras=None,
+ device_serials=None,
+ adb_binary=None):
+ import mozinfo
+ self._update_mozinfo(metadata_root)
+ self.update(mozinfo.info)
+
+ from .update.tree import GitTree
+ try:
+ # GitTree.__init__ throws if we are not in a git tree.
+ rev = GitTree(log_error=False).rev
+ except (OSError, subprocess.CalledProcessError):
+ rev = None
+ if rev:
+ self["revision"] = rev.decode("utf-8")
+
+ self["python_version"] = sys.version_info.major
+ self["product"] = product_name
+ if debug is not None:
+ self["debug"] = debug
+ elif "debug" not in self:
+ # Default to release
+ self["debug"] = False
+ if browser_version:
+ self["browser_version"] = browser_version
+ if browser_channel:
+ self["browser_channel"] = browser_channel
+
+ self["verify"] = verify
+ if "wasm" not in self:
+ self["wasm"] = False
+ if extras is not None:
+ self.update(extras)
+ if "headless" not in self:
+ self["headless"] = False
+
+ if adb_binary:
+ self["adb_binary"] = adb_binary
+ if device_serials:
+ # Assume all emulators are identical, so query an arbitrary one.
+ self._update_with_emulator_info(device_serials[0])
+ self.pop("linux_distro", None)
+
+ def _adb_run(self, device_serial, args, **kwargs):
+ adb_binary = self.get("adb_binary", "adb")
+ cmd = [adb_binary, "-s", device_serial, *args]
+ return subprocess.check_output(cmd, **kwargs)
+
+ def _adb_get_property(self, device_serial, prop, **kwargs):
+ args = ["shell", "getprop", prop]
+ value = self._adb_run(device_serial, args, **kwargs)
+ return value.strip()
+
+ def _update_with_emulator_info(self, device_serial):
+ """Override system info taken from the host if using an Android
+ emulator."""
+ try:
+ self._adb_run(device_serial, ["wait-for-device"])
+ emulator_info = {
+ "os": "android",
+ "os_version": self._adb_get_property(
+ device_serial,
+ "ro.build.version.release",
+ encoding="utf-8",
+ ),
+ }
+ emulator_info["version"] = emulator_info["os_version"]
+
+ # Detect CPU info (https://developer.android.com/ndk/guides/abis#sa)
+ abi64, *_ = self._adb_get_property(
+ device_serial,
+ "ro.product.cpu.abilist64",
+ encoding="utf-8",
+ ).split(',')
+ if abi64:
+ emulator_info["processor"] = abi64
+ emulator_info["bits"] = 64
+ else:
+ emulator_info["processor"], *_ = self._adb_get_property(
+ device_serial,
+ "ro.product.cpu.abilist32",
+ encoding="utf-8",
+ ).split(',')
+ emulator_info["bits"] = 32
+
+ self.update(emulator_info)
+ except (OSError, subprocess.CalledProcessError):
+ pass
+
+ def _update_mozinfo(self, metadata_root):
+ """Add extra build information from a mozinfo.json file in a parent
+ directory"""
+ import mozinfo
+
+ path = metadata_root
+ dirs = set()
+ while path != os.path.expanduser('~'):
+ if path in dirs:
+ break
+ dirs.add(str(path))
+ path = os.path.dirname(path)
+
+ mozinfo.find_and_update_from_json(*dirs)
+
+
+def server_protocol(manifest_item):
+ if hasattr(manifest_item, "h2") and manifest_item.h2:
+ return "h2"
+ if hasattr(manifest_item, "https") and manifest_item.https:
+ return "https"
+ return "http"
+
+
+class Test(ABC):
+ result_cls: ClassVar[Type[Result]]
+ subtest_result_cls: ClassVar[Optional[Type[SubtestResult]]] = None
+ test_type: ClassVar[str]
+ pac = None
+
+ default_timeout = 10 # seconds
+ long_timeout = 60 # seconds
+
+ def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata,
+ timeout=None, path=None, protocol="http", subdomain=False, pac=None):
+ self.url_base = url_base
+ self.tests_root = tests_root
+ self.url = url
+ self._inherit_metadata = inherit_metadata
+ self._test_metadata = test_metadata
+ self.timeout = timeout if timeout is not None else self.default_timeout
+ self.path = path
+
+ self.subdomain = subdomain
+ self.environment = {"url_base": url_base,
+ "protocol": protocol,
+ "prefs": self.prefs}
+
+ if pac is not None:
+ self.environment["pac"] = urljoin(self.url, pac)
+
+ def __eq__(self, other):
+ if not isinstance(other, Test):
+ return False
+ return self.id == other.id
+
+ # Python 2 does not have this delegation, while Python 3 does.
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def make_result(self,
+ status,
+ message,
+ expected=None,
+ extra=None,
+ stack=None,
+ known_intermittent=None):
+ if expected is None:
+ expected = self.expected()
+ known_intermittent = self.known_intermittent()
+ return self.result_cls(status, message, expected, extra, stack, known_intermittent)
+
+ def make_subtest_result(self, name, status, message, stack=None, expected=None,
+ known_intermittent=None):
+ if expected is None:
+ expected = self.expected(name)
+ known_intermittent = self.known_intermittent(name)
+ return self.subtest_result_cls(name, status, message, stack, expected, known_intermittent)
+
+ def update_metadata(self, metadata=None):
+ if metadata is None:
+ metadata = {}
+ return metadata
+
+ @classmethod
+ def from_manifest(cls, manifest_file, manifest_item, inherit_metadata, test_metadata):
+ timeout = cls.long_timeout if manifest_item.timeout == "long" else cls.default_timeout
+ return cls(manifest_file.url_base,
+ manifest_file.tests_root,
+ manifest_item.url,
+ inherit_metadata,
+ test_metadata,
+ timeout=timeout,
+ path=os.path.join(manifest_file.tests_root, manifest_item.path),
+ protocol=server_protocol(manifest_item),
+ subdomain=manifest_item.subdomain)
+
+ @property
+ def id(self):
+ return self.url
+
+ @property
+ def keys(self):
+ return tuple()
+
+ @property
+ def abs_path(self):
+ return os.path.join(self.tests_root, self.path)
+
+ def _get_metadata(self, subtest=None):
+ if self._test_metadata is not None and subtest is not None:
+ return self._test_metadata.get_subtest(subtest)
+ else:
+ return self._test_metadata
+
+ def itermeta(self, subtest=None):
+ if self._test_metadata is not None:
+ if subtest is not None:
+ subtest_meta = self._get_metadata(subtest)
+ if subtest_meta is not None:
+ yield subtest_meta
+ yield self._get_metadata()
+ yield from reversed(self._inherit_metadata)
+
+ def disabled(self, subtest=None):
+ for meta in self.itermeta(subtest):
+ disabled = meta.disabled
+ if disabled is not None:
+ return disabled
+ return None
+
+ @property
+ def restart_after(self):
+ for meta in self.itermeta(None):
+ restart_after = meta.restart_after
+ if restart_after is not None:
+ return True
+ return False
+
+ @property
+ def leaks(self):
+ for meta in self.itermeta(None):
+ leaks = meta.leaks
+ if leaks is not None:
+ return leaks
+ return False
+
+ @property
+ def min_assertion_count(self):
+ for meta in self.itermeta(None):
+ count = meta.min_assertion_count
+ if count is not None:
+ return count
+ return 0
+
+ @property
+ def max_assertion_count(self):
+ for meta in self.itermeta(None):
+ count = meta.max_assertion_count
+ if count is not None:
+ return count
+ return 0
+
+ @property
+ def lsan_disabled(self):
+ for meta in self.itermeta():
+ if meta.lsan_disabled is not None:
+ return meta.lsan_disabled
+ return False
+
+ @property
+ def lsan_allowed(self):
+ lsan_allowed = set()
+ for meta in self.itermeta():
+ lsan_allowed |= meta.lsan_allowed
+ if atom_reset in lsan_allowed:
+ lsan_allowed.remove(atom_reset)
+ break
+ return lsan_allowed
+
+ @property
+ def lsan_max_stack_depth(self):
+ for meta in self.itermeta(None):
+ depth = meta.lsan_max_stack_depth
+ if depth is not None:
+ return depth
+ return None
+
+ @property
+ def mozleak_allowed(self):
+ mozleak_allowed = set()
+ for meta in self.itermeta():
+ mozleak_allowed |= meta.leak_allowed
+ if atom_reset in mozleak_allowed:
+ mozleak_allowed.remove(atom_reset)
+ break
+ return mozleak_allowed
+
+ @property
+ def mozleak_threshold(self):
+ rv = {}
+ for meta in self.itermeta(None):
+ threshold = meta.leak_threshold
+ for key, value in threshold.items():
+ if key not in rv:
+ rv[key] = value
+ return rv
+
+ @property
+ def tags(self):
+ tags = set()
+ for meta in self.itermeta():
+ meta_tags = meta.tags
+ tags |= meta_tags
+ if atom_reset in meta_tags:
+ tags.remove(atom_reset)
+ break
+
+ tags.add("dir:%s" % self.id.lstrip("/").split("/")[0])
+
+ return tags
+
+ @property
+ def prefs(self):
+ prefs = {}
+ for meta in reversed(list(self.itermeta())):
+ meta_prefs = meta.prefs
+ if atom_reset in meta_prefs:
+ del meta_prefs[atom_reset]
+ prefs = {}
+ prefs.update(meta_prefs)
+ return prefs
+
+ def expected_fail_message(self, subtest):
+ if subtest is None:
+ return None
+
+ metadata = self._get_metadata(subtest)
+ if metadata is None:
+ return None
+
+ try:
+ return metadata.get("expected-fail-message")
+ except KeyError:
+ return None
+
+ def expected(self, subtest=None):
+ if subtest is None:
+ default = self.result_cls.default_expected
+ else:
+ default = self.subtest_result_cls.default_expected
+
+ metadata = self._get_metadata(subtest)
+ if metadata is None:
+ return default
+
+ try:
+ expected = metadata.get("expected")
+ if isinstance(expected, str):
+ return expected
+ elif isinstance(expected, list):
+ return expected[0]
+ elif expected is None:
+ return default
+ except KeyError:
+ return default
+
+ def implementation_status(self):
+ implementation_status = None
+ for meta in self.itermeta():
+ implementation_status = meta.implementation_status
+ if implementation_status:
+ return implementation_status
+
+ # assuming no specific case, we are implementing it
+ return "implementing"
+
+ def known_intermittent(self, subtest=None):
+ metadata = self._get_metadata(subtest)
+ if metadata is None:
+ return []
+
+ try:
+ expected = metadata.get("expected")
+ if isinstance(expected, list):
+ return expected[1:]
+ return []
+ except KeyError:
+ return []
+
+ def __repr__(self):
+ return f"<{self.__module__}.{self.__class__.__name__} {self.id}>"
+
+
+class TestharnessTest(Test):
+ result_cls = TestharnessResult
+ subtest_result_cls = TestharnessSubtestResult
+ test_type = "testharness"
+
+ def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata,
+ timeout=None, path=None, protocol="http", testdriver=False,
+ jsshell=False, scripts=None, subdomain=False, pac=None):
+ Test.__init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, timeout,
+ path, protocol, subdomain, pac)
+
+ self.testdriver = testdriver
+ self.jsshell = jsshell
+ self.scripts = scripts or []
+
+ @classmethod
+ def from_manifest(cls, manifest_file, manifest_item, inherit_metadata, test_metadata):
+ timeout = cls.long_timeout if manifest_item.timeout == "long" else cls.default_timeout
+ pac = manifest_item.pac
+ testdriver = manifest_item.testdriver if hasattr(manifest_item, "testdriver") else False
+ jsshell = manifest_item.jsshell if hasattr(manifest_item, "jsshell") else False
+ script_metadata = manifest_item.script_metadata or []
+ scripts = [v for (k, v) in script_metadata
+ if k == "script"]
+ return cls(manifest_file.url_base,
+ manifest_file.tests_root,
+ manifest_item.url,
+ inherit_metadata,
+ test_metadata,
+ timeout=timeout,
+ pac=pac,
+ path=os.path.join(manifest_file.tests_root, manifest_item.path),
+ protocol=server_protocol(manifest_item),
+ testdriver=testdriver,
+ jsshell=jsshell,
+ scripts=scripts,
+ subdomain=manifest_item.subdomain)
+
+ @property
+ def id(self):
+ return self.url
+
+
+class ReftestTest(Test):
+ """A reftest
+
+ A reftest should be considered to pass if one of its references matches (see below) *and* the
+ reference passes if it has any references recursively.
+
+ Attributes:
+ references (List[Tuple[str, str]]): a list of alternate references, where one must match for the test to pass
+ viewport_size (Optional[Tuple[int, int]]): size of the viewport for this test, if not default
+ dpi (Optional[int]): dpi to use when rendering this test, if not default
+
+ """
+ result_cls = ReftestResult
+ test_type = "reftest"
+
+ def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, references,
+ timeout=None, path=None, viewport_size=None, dpi=None, fuzzy=None,
+ protocol="http", subdomain=False):
+ Test.__init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, timeout,
+ path, protocol, subdomain)
+
+ for _, ref_type in references:
+ if ref_type not in ("==", "!="):
+ raise ValueError
+
+ self.references = references
+ self.viewport_size = self.get_viewport_size(viewport_size)
+ self.dpi = dpi
+ self._fuzzy = fuzzy or {}
+
+ @classmethod
+ def cls_kwargs(cls, manifest_test):
+ return {"viewport_size": manifest_test.viewport_size,
+ "dpi": manifest_test.dpi,
+ "protocol": server_protocol(manifest_test),
+ "fuzzy": manifest_test.fuzzy}
+
+ @classmethod
+ def from_manifest(cls,
+ manifest_file,
+ manifest_test,
+ inherit_metadata,
+ test_metadata):
+
+ timeout = cls.long_timeout if manifest_test.timeout == "long" else cls.default_timeout
+
+ url = manifest_test.url
+
+ node = cls(manifest_file.url_base,
+ manifest_file.tests_root,
+ manifest_test.url,
+ inherit_metadata,
+ test_metadata,
+ [],
+ timeout=timeout,
+ path=manifest_test.path,
+ subdomain=manifest_test.subdomain,
+ **cls.cls_kwargs(manifest_test))
+
+ refs_by_type = defaultdict(list)
+
+ for ref_url, ref_type in manifest_test.references:
+ refs_by_type[ref_type].append(ref_url)
+
+ # Construct a list of all the mismatches, where we end up with mismatch_1 != url !=
+ # mismatch_2 != url != mismatch_3 etc.
+ #
+ # Per the logic documented above, this means that none of the mismatches provided match,
+ mismatch_walk = None
+ if refs_by_type["!="]:
+ mismatch_walk = ReftestTest(manifest_file.url_base,
+ manifest_file.tests_root,
+ refs_by_type["!="][0],
+ [],
+ None,
+ [])
+ cmp_ref = mismatch_walk
+ for ref_url in refs_by_type["!="][1:]:
+ cmp_self = ReftestTest(manifest_file.url_base,
+ manifest_file.tests_root,
+ url,
+ [],
+ None,
+ [])
+ cmp_ref.references.append((cmp_self, "!="))
+ cmp_ref = ReftestTest(manifest_file.url_base,
+ manifest_file.tests_root,
+ ref_url,
+ [],
+ None,
+ [])
+ cmp_self.references.append((cmp_ref, "!="))
+
+ if mismatch_walk is None:
+ mismatch_refs = []
+ else:
+ mismatch_refs = [(mismatch_walk, "!=")]
+
+ if refs_by_type["=="]:
+ # For each == ref, add a reference to this node whose tail is the mismatch list.
+ # Per the logic documented above, this means any one of the matches must pass plus all the mismatches.
+ for ref_url in refs_by_type["=="]:
+ ref = ReftestTest(manifest_file.url_base,
+ manifest_file.tests_root,
+ ref_url,
+ [],
+ None,
+ mismatch_refs)
+ node.references.append((ref, "=="))
+ else:
+ # Otherwise, we just add the mismatches directly as we are immediately into the
+ # mismatch chain with no alternates.
+ node.references.extend(mismatch_refs)
+
+ return node
+
+ def update_metadata(self, metadata):
+ if "url_count" not in metadata:
+ metadata["url_count"] = defaultdict(int)
+ for reference, _ in self.references:
+ # We assume a naive implementation in which a url with multiple
+ # possible screenshots will need to take both the lhs and rhs screenshots
+ # for each possible match
+ metadata["url_count"][(self.environment["protocol"], reference.url)] += 1
+ reference.update_metadata(metadata)
+ return metadata
+
+ def get_viewport_size(self, override):
+ return override
+
+ @property
+ def id(self):
+ return self.url
+
+ @property
+ def keys(self):
+ return ("reftype", "refurl")
+
+ @property
+ def fuzzy(self):
+ return self._fuzzy
+
+ @property
+ def fuzzy_override(self):
+ values = {}
+ for meta in reversed(list(self.itermeta(None))):
+ value = meta.fuzzy
+ if not value:
+ continue
+ if atom_reset in value:
+ value.remove(atom_reset)
+ values = {}
+ for key, data in value:
+ if isinstance(key, (tuple, list)):
+ key = list(key)
+ key[0] = urljoin(self.url, key[0])
+ key[1] = urljoin(self.url, key[1])
+ key = tuple(key)
+ elif key:
+ # Key is just a relative url to a ref
+ key = urljoin(self.url, key)
+ values[key] = data
+ return values
+
+ @property
+ def page_ranges(self):
+ return {}
+
+
+class PrintReftestTest(ReftestTest):
+ test_type = "print-reftest"
+
+ def __init__(self, url_base, tests_root, url, inherit_metadata, test_metadata, references,
+ timeout=None, path=None, viewport_size=None, dpi=None, fuzzy=None,
+ page_ranges=None, protocol="http", subdomain=False):
+ super().__init__(url_base, tests_root, url, inherit_metadata, test_metadata,
+ references, timeout, path, viewport_size, dpi,
+ fuzzy, protocol, subdomain=subdomain)
+ self._page_ranges = page_ranges
+
+ @classmethod
+ def cls_kwargs(cls, manifest_test):
+ rv = super().cls_kwargs(manifest_test)
+ rv["page_ranges"] = manifest_test.page_ranges
+ return rv
+
+ def get_viewport_size(self, override):
+ assert override is None
+ return (5*2.54, 3*2.54)
+
+ @property
+ def page_ranges(self):
+ return self._page_ranges
+
+
+class WdspecTest(Test):
+ result_cls = WdspecResult
+ subtest_result_cls = WdspecSubtestResult
+ test_type = "wdspec"
+
+ default_timeout = 25
+ long_timeout = 180 # 3 minutes
+
+
+class CrashTest(Test):
+ result_cls = CrashtestResult
+ test_type = "crashtest"
+
+
+manifest_test_cls = {"reftest": ReftestTest,
+ "print-reftest": PrintReftestTest,
+ "testharness": TestharnessTest,
+ "wdspec": WdspecTest,
+ "crashtest": CrashTest}
+
+
+def from_manifest(manifest_file, manifest_test, inherit_metadata, test_metadata):
+ test_cls = manifest_test_cls[manifest_test.item_type]
+ return test_cls.from_manifest(manifest_file, manifest_test, inherit_metadata, test_metadata)