From 09a16e2a8151bccee444ddc33846b3d02437e838 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Tue, 16 Apr 2024 22:04:57 +0200 Subject: Adding upstream version 85. Signed-off-by: Daniel Baumann --- .eslintignore | 2 + .eslintrc.json | 54 + .flake8 | 2 + .fmf/version | 1 + .github/ISSUE_TEMPLATE/bug_report.md | 31 + .github/ISSUE_TEMPLATE/enhancement.md | 11 + .github/codeql-config.yml | 2 + .github/dependabot.yml | 38 + .github/workflows/cockpit-lib-update.yml | 33 + .github/workflows/codeql.yml | 33 + .github/workflows/dependabot.yml | 82 + .github/workflows/nightly.yml | 22 + .github/workflows/release.yml | 68 + .github/workflows/reposchutz.yml | 66 + .github/workflows/weblate-sync-po.yml | 46 + .github/workflows/weblate-sync-pot.yml | 42 + .gitignore | 34 + .gitmodules | 7 + .stylelintrc.json | 39 + HACKING.md | 107 + LICENSE | 502 + Makefile | 217 + README.md | 78 + build.js | 107 + cockpit-podman.spec | 88 + dist/index.css.LEGAL.txt | 35 + dist/index.css.gz | Bin 0 -> 144167 bytes dist/index.html | 36 + dist/index.js.LEGAL.txt | 46 + dist/index.js.gz | Bin 0 -> 428668 bytes dist/manifest.json | 16 + dist/po.cs.js.gz | Bin 0 -> 6153 bytes dist/po.de.js.gz | Bin 0 -> 5644 bytes dist/po.es.js.gz | Bin 0 -> 5763 bytes dist/po.fi.js.gz | Bin 0 -> 5778 bytes dist/po.fr.js.gz | Bin 0 -> 5673 bytes dist/po.ja.js.gz | Bin 0 -> 6135 bytes dist/po.ka.js.gz | Bin 0 -> 6373 bytes dist/po.ko.js.gz | Bin 0 -> 5988 bytes dist/po.manifest.cs.js.gz | Bin 0 -> 197 bytes dist/po.manifest.de.js.gz | Bin 0 -> 175 bytes dist/po.manifest.es.js.gz | Bin 0 -> 180 bytes dist/po.manifest.fi.js.gz | Bin 0 -> 179 bytes dist/po.manifest.fr.js.gz | Bin 0 -> 177 bytes dist/po.manifest.ja.js.gz | Bin 0 -> 193 bytes dist/po.manifest.ka.js.gz | Bin 0 -> 228 bytes dist/po.manifest.ko.js.gz | Bin 0 -> 199 bytes dist/po.manifest.pl.js.gz | Bin 0 -> 216 bytes dist/po.manifest.sk.js.gz | Bin 0 -> 196 bytes dist/po.manifest.sv.js.gz | Bin 0 -> 183 bytes dist/po.manifest.tr.js.gz | Bin 0 -> 186 bytes dist/po.manifest.uk.js.gz | Bin 0 -> 251 bytes dist/po.manifest.zh_CN.js.gz | Bin 0 -> 187 bytes dist/po.pl.js.gz | Bin 0 -> 6050 bytes dist/po.sk.js.gz | Bin 0 -> 4099 bytes dist/po.sv.js.gz | Bin 0 -> 5613 bytes dist/po.tr.js.gz | Bin 0 -> 5704 bytes dist/po.uk.js.gz | Bin 0 -> 6809 bytes dist/po.zh_CN.js.gz | Bin 0 -> 5687 bytes node_modules/chrome-remote-interface/LICENSE | 18 + node_modules/chrome-remote-interface/README.md | 992 + node_modules/chrome-remote-interface/bin/client.js | 311 + .../chrome-remote-interface.js | 1 + node_modules/chrome-remote-interface/index.js | 44 + node_modules/chrome-remote-interface/lib/api.js | 92 + node_modules/chrome-remote-interface/lib/chrome.js | 314 + .../chrome-remote-interface/lib/defaults.js | 4 + .../chrome-remote-interface/lib/devtools.js | 127 + .../lib/external-request.js | 44 + .../chrome-remote-interface/lib/protocol.json | 27862 +++++++++++++++++++ .../lib/websocket-wrapper.js | 39 + node_modules/chrome-remote-interface/package.json | 64 + .../chrome-remote-interface/webpack.config.js | 48 + node_modules/commander/History.md | 298 + node_modules/commander/LICENSE | 22 + node_modules/commander/Readme.md | 351 + node_modules/commander/index.js | 1137 + node_modules/commander/package.json | 29 + node_modules/sizzle/AUTHORS.txt | 67 + node_modules/sizzle/LICENSE.txt | 36 + node_modules/sizzle/README.md | 55 + node_modules/sizzle/dist/sizzle.js | 2514 ++ node_modules/sizzle/dist/sizzle.min.js | 3 + node_modules/sizzle/dist/sizzle.min.map | 1 + node_modules/sizzle/package.json | 85 + node_modules/ws/LICENSE | 21 + node_modules/ws/README.md | 495 + node_modules/ws/browser.js | 8 + node_modules/ws/index.js | 10 + node_modules/ws/lib/buffer-util.js | 129 + node_modules/ws/lib/constants.js | 10 + node_modules/ws/lib/event-target.js | 184 + node_modules/ws/lib/extension.js | 223 + node_modules/ws/lib/limiter.js | 55 + node_modules/ws/lib/permessage-deflate.js | 518 + node_modules/ws/lib/receiver.js | 607 + node_modules/ws/lib/sender.js | 409 + node_modules/ws/lib/stream.js | 180 + node_modules/ws/lib/validation.js | 104 + node_modules/ws/lib/websocket-server.js | 447 + node_modules/ws/lib/websocket.js | 1195 + node_modules/ws/package.json | 56 + org.cockpit-project.podman.metainfo.xml | 16 + package-lock.json | 5427 ++++ package.json | 61 + packaging/arch/PKGBUILD | 15 + packaging/debian/changelog | 5 + packaging/debian/control | 23 + packaging/debian/copyright | 38 + packaging/debian/rules | 13 + packaging/debian/source/format | 1 + packaging/debian/source/lintian-overrides | 2 + packaging/debian/upstream/metadata | 4 + packaging/debian/watch | 5 + packit.yaml | 80 + pkg/lib/README | 5 + pkg/lib/_global-variables.scss | 14 + pkg/lib/cockpit-components-context-menu.jsx | 110 + pkg/lib/cockpit-components-dialog.jsx | 372 + pkg/lib/cockpit-components-dialog.scss | 8 + pkg/lib/cockpit-components-dropdown.jsx | 76 + pkg/lib/cockpit-components-dynamic-list.jsx | 143 + pkg/lib/cockpit-components-dynamic-list.scss | 39 + pkg/lib/cockpit-components-empty-state.css | 3 + pkg/lib/cockpit-components-empty-state.jsx | 62 + pkg/lib/cockpit-components-file-autocomplete.jsx | 212 + pkg/lib/cockpit-components-firewalld-request.jsx | 167 + pkg/lib/cockpit-components-firewalld-request.scss | 12 + pkg/lib/cockpit-components-form-helper.jsx | 43 + pkg/lib/cockpit-components-inline-notification.css | 7 + pkg/lib/cockpit-components-inline-notification.jsx | 96 + pkg/lib/cockpit-components-install-dialog.css | 43 + pkg/lib/cockpit-components-install-dialog.jsx | 211 + pkg/lib/cockpit-components-listing-panel.jsx | 87 + pkg/lib/cockpit-components-listing-panel.scss | 93 + pkg/lib/cockpit-components-logs-panel.jsx | 185 + pkg/lib/cockpit-components-logs-panel.scss | 15 + pkg/lib/cockpit-components-modifications.css | 28 + pkg/lib/cockpit-components-modifications.jsx | 182 + pkg/lib/cockpit-components-password.jsx | 188 + pkg/lib/cockpit-components-password.scss | 11 + pkg/lib/cockpit-components-plot.jsx | 513 + pkg/lib/cockpit-components-plot.scss | 119 + pkg/lib/cockpit-components-privileged.jsx | 77 + pkg/lib/cockpit-components-shutdown.jsx | 248 + pkg/lib/cockpit-components-shutdown.scss | 17 + pkg/lib/cockpit-components-table.jsx | 297 + pkg/lib/cockpit-components-table.scss | 106 + pkg/lib/cockpit-components-terminal.jsx | 362 + pkg/lib/cockpit-components-truncate.jsx | 42 + pkg/lib/cockpit-components-truncate.scss | 4 + pkg/lib/cockpit-dark-theme.js | 70 + pkg/lib/cockpit-po-plugin.js | 133 + pkg/lib/cockpit-rsync-plugin.js | 49 + pkg/lib/cockpit.js | 4444 +++ pkg/lib/console.css | 16 + pkg/lib/context-menu.scss | 20 + pkg/lib/credentials-ssh-private-keys.sh | 34 + pkg/lib/credentials-ssh-remove-key.sh | 10 + pkg/lib/credentials.js | 344 + pkg/lib/ct-card.scss | 63 + pkg/lib/dialogs.jsx | 131 + pkg/lib/esbuild-cleanup-plugin.js | 17 + pkg/lib/esbuild-common.js | 33 + pkg/lib/esbuild-compress-plugin.js | 50 + pkg/lib/esbuild-test-html-plugin.js | 28 + pkg/lib/get-timesync-backend.py | 61 + pkg/lib/hooks.js | 326 + pkg/lib/html2po.js | 235 + pkg/lib/inotify.py | 72 + pkg/lib/journal.css | 161 + pkg/lib/journal.js | 453 + pkg/lib/long-running-process.js | 166 + pkg/lib/machine-info.js | 259 + pkg/lib/manifest2po.js | 179 + pkg/lib/menu-select-widget.scss | 35 + pkg/lib/notifications.js | 167 + pkg/lib/os-release.js | 38 + pkg/lib/packagekit.js | 528 + pkg/lib/page.scss | 197 + pkg/lib/pam_user_parser.js | 95 + pkg/lib/patternfly/_fonts.scss | 38 + pkg/lib/patternfly/patternfly-5-cockpit.scss | 9 + pkg/lib/patternfly/patternfly-5-overrides.scss | 391 + pkg/lib/plot.js | 574 + pkg/lib/polyfills.js | 25 + pkg/lib/python.js | 30 + pkg/lib/qunit-tests.js | 90 + pkg/lib/serverTime.js | 768 + pkg/lib/serverTime.scss | 7 + pkg/lib/service.js | 344 + pkg/lib/superuser.js | 126 + pkg/lib/table.css | 138 + pkg/lib/timeformat.js | 66 + pkg/lib/utils.jsx | 33 + plans/all.fmf | 23 + po/cs.po | 1511 + po/de.po | 1506 + po/es.po | 1490 + po/fi.po | 1485 + po/fr.po | 1530 + po/ja.po | 1468 + po/ka.po | 1409 + po/ko.po | 1462 + po/pl.po | 1509 + po/sk.po | 1554 ++ po/sv.po | 1485 + po/tr.po | 1502 + po/uk.po | 1523 + po/zh_CN.po | 1480 + pyproject.toml | 50 + src/ContainerCheckpointModal.jsx | 68 + src/ContainerCommitModal.jsx | 166 + src/ContainerDeleteModal.jsx | 42 + src/ContainerDetails.jsx | 80 + src/ContainerHeader.jsx | 37 + src/ContainerHealthLogs.jsx | 131 + src/ContainerIntegration.jsx | 121 + src/ContainerLogs.jsx | 175 + src/ContainerRenameModal.jsx | 107 + src/ContainerRestoreModal.jsx | 74 + src/ContainerTerminal.css | 7 + src/ContainerTerminal.jsx | 278 + src/Containers.jsx | 877 + src/Containers.scss | 93 + src/Env.jsx | 96 + src/ForceRemoveModal.jsx | 33 + src/ImageDeleteModal.jsx | 121 + src/ImageDetails.jsx | 47 + src/ImageHistory.jsx | 65 + src/ImageRunModal.jsx | 1183 + src/ImageRunModal.scss | 47 + src/ImageSearchModal.css | 59 + src/ImageSearchModal.jsx | 218 + src/ImageUsedBy.jsx | 44 + src/Images.css | 23 + src/Images.jsx | 429 + src/Notification.jsx | 45 + src/PodActions.jsx | 195 + src/PodCreateModal.jsx | 220 + src/PruneUnusedContainersModal.jsx | 110 + src/PruneUnusedImagesModal.jsx | 123 + src/PublishPort.jsx | 142 + src/Volume.jsx | 90 + src/app.jsx | 791 + src/client.js | 183 + src/index.html | 36 + src/index.js | 30 + src/manifest.json | 16 + src/podman.scss | 149 + src/rest.js | 89 + src/util.js | 185 + test/browser/browser.sh | 88 + test/browser/main.fmf | 24 + test/browser/run-test.sh | 48 + test/check-application | 2871 ++ test/common/__init__.py | 0 test/common/cdp.py | 381 + test/common/chromium-cdp-driver.js | 332 + test/common/firefox-cdp-driver.js | 392 + test/common/git-utils.sh | 150 + test/common/lcov.py | 505 + test/common/link-patterns.json | 39 + test/common/make-bots | 28 + test/common/netlib.py | 214 + test/common/packagelib.py | 428 + test/common/pixel-tests | 261 + test/common/pixeldiff.html | 623 + test/common/pywrap | 30 + test/common/ruff.toml | 11 + test/common/run-tests | 585 + test/common/storagelib.py | 669 + test/common/tap-cdp | 119 + test/common/test-functions.js | 360 + test/common/testlib.py | 2535 ++ test/reference-image | 1 + test/run | 17 + test/static-code | 200 + test/vm.install | 38 + tools/node-modules | 198 + 280 files changed, 101153 insertions(+) create mode 100644 .eslintignore create mode 100644 .eslintrc.json create mode 100644 .flake8 create mode 100644 .fmf/version create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/enhancement.md create mode 100644 .github/codeql-config.yml create mode 100644 .github/dependabot.yml create mode 100644 .github/workflows/cockpit-lib-update.yml create mode 100644 .github/workflows/codeql.yml create mode 100644 .github/workflows/dependabot.yml create mode 100644 .github/workflows/nightly.yml create mode 100644 .github/workflows/release.yml create mode 100644 .github/workflows/reposchutz.yml create mode 100644 .github/workflows/weblate-sync-po.yml create mode 100644 .github/workflows/weblate-sync-pot.yml create mode 100644 .gitignore create mode 100644 .gitmodules create mode 100644 .stylelintrc.json create mode 100644 HACKING.md create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 README.md create mode 100755 build.js create mode 100644 cockpit-podman.spec create mode 100644 dist/index.css.LEGAL.txt create mode 100644 dist/index.css.gz create mode 100644 dist/index.html create mode 100644 dist/index.js.LEGAL.txt create mode 100644 dist/index.js.gz create mode 100644 dist/manifest.json create mode 100644 dist/po.cs.js.gz create mode 100644 dist/po.de.js.gz create mode 100644 dist/po.es.js.gz create mode 100644 dist/po.fi.js.gz create mode 100644 dist/po.fr.js.gz create mode 100644 dist/po.ja.js.gz create mode 100644 dist/po.ka.js.gz create mode 100644 dist/po.ko.js.gz create mode 100644 dist/po.manifest.cs.js.gz create mode 100644 dist/po.manifest.de.js.gz create mode 100644 dist/po.manifest.es.js.gz create mode 100644 dist/po.manifest.fi.js.gz create mode 100644 dist/po.manifest.fr.js.gz create mode 100644 dist/po.manifest.ja.js.gz create mode 100644 dist/po.manifest.ka.js.gz create mode 100644 dist/po.manifest.ko.js.gz create mode 100644 dist/po.manifest.pl.js.gz create mode 100644 dist/po.manifest.sk.js.gz create mode 100644 dist/po.manifest.sv.js.gz create mode 100644 dist/po.manifest.tr.js.gz create mode 100644 dist/po.manifest.uk.js.gz create mode 100644 dist/po.manifest.zh_CN.js.gz create mode 100644 dist/po.pl.js.gz create mode 100644 dist/po.sk.js.gz create mode 100644 dist/po.sv.js.gz create mode 100644 dist/po.tr.js.gz create mode 100644 dist/po.uk.js.gz create mode 100644 dist/po.zh_CN.js.gz create mode 100644 node_modules/chrome-remote-interface/LICENSE create mode 100644 node_modules/chrome-remote-interface/README.md create mode 100755 node_modules/chrome-remote-interface/bin/client.js create mode 100644 node_modules/chrome-remote-interface/chrome-remote-interface.js create mode 100644 node_modules/chrome-remote-interface/index.js create mode 100644 node_modules/chrome-remote-interface/lib/api.js create mode 100644 node_modules/chrome-remote-interface/lib/chrome.js create mode 100644 node_modules/chrome-remote-interface/lib/defaults.js create mode 100644 node_modules/chrome-remote-interface/lib/devtools.js create mode 100644 node_modules/chrome-remote-interface/lib/external-request.js create mode 100644 node_modules/chrome-remote-interface/lib/protocol.json create mode 100644 node_modules/chrome-remote-interface/lib/websocket-wrapper.js create mode 100644 node_modules/chrome-remote-interface/package.json create mode 100644 node_modules/chrome-remote-interface/webpack.config.js create mode 100644 node_modules/commander/History.md create mode 100644 node_modules/commander/LICENSE create mode 100644 node_modules/commander/Readme.md create mode 100644 node_modules/commander/index.js create mode 100644 node_modules/commander/package.json create mode 100644 node_modules/sizzle/AUTHORS.txt create mode 100644 node_modules/sizzle/LICENSE.txt create mode 100644 node_modules/sizzle/README.md create mode 100644 node_modules/sizzle/dist/sizzle.js create mode 100644 node_modules/sizzle/dist/sizzle.min.js create mode 100644 node_modules/sizzle/dist/sizzle.min.map create mode 100644 node_modules/sizzle/package.json create mode 100644 node_modules/ws/LICENSE create mode 100644 node_modules/ws/README.md create mode 100644 node_modules/ws/browser.js create mode 100644 node_modules/ws/index.js create mode 100644 node_modules/ws/lib/buffer-util.js create mode 100644 node_modules/ws/lib/constants.js create mode 100644 node_modules/ws/lib/event-target.js create mode 100644 node_modules/ws/lib/extension.js create mode 100644 node_modules/ws/lib/limiter.js create mode 100644 node_modules/ws/lib/permessage-deflate.js create mode 100644 node_modules/ws/lib/receiver.js create mode 100644 node_modules/ws/lib/sender.js create mode 100644 node_modules/ws/lib/stream.js create mode 100644 node_modules/ws/lib/validation.js create mode 100644 node_modules/ws/lib/websocket-server.js create mode 100644 node_modules/ws/lib/websocket.js create mode 100644 node_modules/ws/package.json create mode 100644 org.cockpit-project.podman.metainfo.xml create mode 100644 package-lock.json create mode 100644 package.json create mode 100644 packaging/arch/PKGBUILD create mode 100644 packaging/debian/changelog create mode 100644 packaging/debian/control create mode 100644 packaging/debian/copyright create mode 100755 packaging/debian/rules create mode 100644 packaging/debian/source/format create mode 100644 packaging/debian/source/lintian-overrides create mode 100644 packaging/debian/upstream/metadata create mode 100644 packaging/debian/watch create mode 100644 packit.yaml create mode 100644 pkg/lib/README create mode 100644 pkg/lib/_global-variables.scss create mode 100644 pkg/lib/cockpit-components-context-menu.jsx create mode 100644 pkg/lib/cockpit-components-dialog.jsx create mode 100644 pkg/lib/cockpit-components-dialog.scss create mode 100644 pkg/lib/cockpit-components-dropdown.jsx create mode 100644 pkg/lib/cockpit-components-dynamic-list.jsx create mode 100644 pkg/lib/cockpit-components-dynamic-list.scss create mode 100644 pkg/lib/cockpit-components-empty-state.css create mode 100644 pkg/lib/cockpit-components-empty-state.jsx create mode 100644 pkg/lib/cockpit-components-file-autocomplete.jsx create mode 100644 pkg/lib/cockpit-components-firewalld-request.jsx create mode 100644 pkg/lib/cockpit-components-firewalld-request.scss create mode 100644 pkg/lib/cockpit-components-form-helper.jsx create mode 100644 pkg/lib/cockpit-components-inline-notification.css create mode 100644 pkg/lib/cockpit-components-inline-notification.jsx create mode 100644 pkg/lib/cockpit-components-install-dialog.css create mode 100644 pkg/lib/cockpit-components-install-dialog.jsx create mode 100644 pkg/lib/cockpit-components-listing-panel.jsx create mode 100644 pkg/lib/cockpit-components-listing-panel.scss create mode 100644 pkg/lib/cockpit-components-logs-panel.jsx create mode 100644 pkg/lib/cockpit-components-logs-panel.scss create mode 100644 pkg/lib/cockpit-components-modifications.css create mode 100644 pkg/lib/cockpit-components-modifications.jsx create mode 100644 pkg/lib/cockpit-components-password.jsx create mode 100644 pkg/lib/cockpit-components-password.scss create mode 100644 pkg/lib/cockpit-components-plot.jsx create mode 100644 pkg/lib/cockpit-components-plot.scss create mode 100644 pkg/lib/cockpit-components-privileged.jsx create mode 100644 pkg/lib/cockpit-components-shutdown.jsx create mode 100644 pkg/lib/cockpit-components-shutdown.scss create mode 100644 pkg/lib/cockpit-components-table.jsx create mode 100644 pkg/lib/cockpit-components-table.scss create mode 100644 pkg/lib/cockpit-components-terminal.jsx create mode 100644 pkg/lib/cockpit-components-truncate.jsx create mode 100644 pkg/lib/cockpit-components-truncate.scss create mode 100644 pkg/lib/cockpit-dark-theme.js create mode 100644 pkg/lib/cockpit-po-plugin.js create mode 100644 pkg/lib/cockpit-rsync-plugin.js create mode 100644 pkg/lib/cockpit.js create mode 100644 pkg/lib/console.css create mode 100644 pkg/lib/context-menu.scss create mode 100644 pkg/lib/credentials-ssh-private-keys.sh create mode 100644 pkg/lib/credentials-ssh-remove-key.sh create mode 100644 pkg/lib/credentials.js create mode 100644 pkg/lib/ct-card.scss create mode 100644 pkg/lib/dialogs.jsx create mode 100644 pkg/lib/esbuild-cleanup-plugin.js create mode 100644 pkg/lib/esbuild-common.js create mode 100644 pkg/lib/esbuild-compress-plugin.js create mode 100644 pkg/lib/esbuild-test-html-plugin.js create mode 100644 pkg/lib/get-timesync-backend.py create mode 100644 pkg/lib/hooks.js create mode 100755 pkg/lib/html2po.js create mode 100644 pkg/lib/inotify.py create mode 100644 pkg/lib/journal.css create mode 100644 pkg/lib/journal.js create mode 100644 pkg/lib/long-running-process.js create mode 100644 pkg/lib/machine-info.js create mode 100755 pkg/lib/manifest2po.js create mode 100644 pkg/lib/menu-select-widget.scss create mode 100644 pkg/lib/notifications.js create mode 100644 pkg/lib/os-release.js create mode 100644 pkg/lib/packagekit.js create mode 100644 pkg/lib/page.scss create mode 100644 pkg/lib/pam_user_parser.js create mode 100644 pkg/lib/patternfly/_fonts.scss create mode 100644 pkg/lib/patternfly/patternfly-5-cockpit.scss create mode 100644 pkg/lib/patternfly/patternfly-5-overrides.scss create mode 100644 pkg/lib/plot.js create mode 100644 pkg/lib/polyfills.js create mode 100644 pkg/lib/python.js create mode 100644 pkg/lib/qunit-tests.js create mode 100644 pkg/lib/serverTime.js create mode 100644 pkg/lib/serverTime.scss create mode 100644 pkg/lib/service.js create mode 100644 pkg/lib/superuser.js create mode 100644 pkg/lib/table.css create mode 100644 pkg/lib/timeformat.js create mode 100644 pkg/lib/utils.jsx create mode 100644 plans/all.fmf create mode 100644 po/cs.po create mode 100644 po/de.po create mode 100644 po/es.po create mode 100644 po/fi.po create mode 100644 po/fr.po create mode 100644 po/ja.po create mode 100644 po/ka.po create mode 100644 po/ko.po create mode 100644 po/pl.po create mode 100644 po/sk.po create mode 100644 po/sv.po create mode 100644 po/tr.po create mode 100644 po/uk.po create mode 100644 po/zh_CN.po create mode 100644 pyproject.toml create mode 100644 src/ContainerCheckpointModal.jsx create mode 100644 src/ContainerCommitModal.jsx create mode 100644 src/ContainerDeleteModal.jsx create mode 100644 src/ContainerDetails.jsx create mode 100644 src/ContainerHeader.jsx create mode 100644 src/ContainerHealthLogs.jsx create mode 100644 src/ContainerIntegration.jsx create mode 100644 src/ContainerLogs.jsx create mode 100644 src/ContainerRenameModal.jsx create mode 100644 src/ContainerRestoreModal.jsx create mode 100644 src/ContainerTerminal.css create mode 100644 src/ContainerTerminal.jsx create mode 100644 src/Containers.jsx create mode 100644 src/Containers.scss create mode 100644 src/Env.jsx create mode 100644 src/ForceRemoveModal.jsx create mode 100644 src/ImageDeleteModal.jsx create mode 100644 src/ImageDetails.jsx create mode 100644 src/ImageHistory.jsx create mode 100644 src/ImageRunModal.jsx create mode 100644 src/ImageRunModal.scss create mode 100644 src/ImageSearchModal.css create mode 100644 src/ImageSearchModal.jsx create mode 100644 src/ImageUsedBy.jsx create mode 100644 src/Images.css create mode 100644 src/Images.jsx create mode 100644 src/Notification.jsx create mode 100644 src/PodActions.jsx create mode 100644 src/PodCreateModal.jsx create mode 100644 src/PruneUnusedContainersModal.jsx create mode 100644 src/PruneUnusedImagesModal.jsx create mode 100644 src/PublishPort.jsx create mode 100644 src/Volume.jsx create mode 100644 src/app.jsx create mode 100644 src/client.js create mode 100644 src/index.html create mode 100644 src/index.js create mode 100644 src/manifest.json create mode 100644 src/podman.scss create mode 100644 src/rest.js create mode 100644 src/util.js create mode 100755 test/browser/browser.sh create mode 100644 test/browser/main.fmf create mode 100755 test/browser/run-test.sh create mode 100755 test/check-application create mode 100644 test/common/__init__.py create mode 100644 test/common/cdp.py create mode 100755 test/common/chromium-cdp-driver.js create mode 100755 test/common/firefox-cdp-driver.js create mode 100644 test/common/git-utils.sh create mode 100755 test/common/lcov.py create mode 100644 test/common/link-patterns.json create mode 100755 test/common/make-bots create mode 100644 test/common/netlib.py create mode 100644 test/common/packagelib.py create mode 100755 test/common/pixel-tests create mode 100644 test/common/pixeldiff.html create mode 100755 test/common/pywrap create mode 100644 test/common/ruff.toml create mode 100755 test/common/run-tests create mode 100644 test/common/storagelib.py create mode 100755 test/common/tap-cdp create mode 100644 test/common/test-functions.js create mode 100644 test/common/testlib.py create mode 100644 test/reference-image create mode 100755 test/run create mode 100755 test/static-code create mode 100755 test/vm.install create mode 100755 tools/node-modules diff --git a/.eslintignore b/.eslintignore new file mode 100644 index 0000000..85f5a45 --- /dev/null +++ b/.eslintignore @@ -0,0 +1,2 @@ +node_modules/* +pkg/lib/* diff --git a/.eslintrc.json b/.eslintrc.json new file mode 100644 index 0000000..5850feb --- /dev/null +++ b/.eslintrc.json @@ -0,0 +1,54 @@ +{ + "root": true, + "env": { + "browser": true, + "es2022": true + }, + "extends": ["eslint:recommended", "standard", "standard-jsx", "standard-react", "plugin:jsx-a11y/recommended"], + "parserOptions": { + "ecmaVersion": 2022 + }, + "plugins": ["react", "react-hooks", "jsx-a11y"], + "rules": { + "indent": ["error", 4, + { + "ObjectExpression": "first", + "CallExpression": {"arguments": "first"}, + "MemberExpression": 2, + "ignoredNodes": [ "JSXAttribute" ] + }], + "newline-per-chained-call": ["error", { "ignoreChainWithDepth": 2 }], + "no-var": "error", + "lines-between-class-members": ["error", "always", { "exceptAfterSingleLine": true }], + "prefer-promise-reject-errors": ["error", { "allowEmptyReject": true }], + "react/jsx-indent": ["error", 4], + "semi": ["error", "always", { "omitLastInOneLineBlock": true }], + + "camelcase": "off", + "comma-dangle": "off", + "curly": "off", + "jsx-quotes": "off", + "no-console": "off", + "no-undef": "error", + "quotes": "off", + "react/jsx-curly-spacing": "off", + "react/jsx-indent-props": "off", + "react/jsx-closing-bracket-location": "off", + "react/jsx-closing-tag-location": "off", + "react/jsx-first-prop-new-line": "off", + "react/jsx-curly-newline": "off", + "react/jsx-handler-names": "off", + "react/prop-types": "off", + "space-before-function-paren": "off", + "standard/no-callback-literal": "off", + + "jsx-a11y/anchor-is-valid": "off", + + "eqeqeq": "off", + "react/jsx-no-bind": "off" + }, + "globals": { + "require": false, + "module": false + } +} diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..cf4c387 --- /dev/null +++ b/.flake8 @@ -0,0 +1,2 @@ +[flake8] +max-line-length = 118 diff --git a/.fmf/version b/.fmf/version new file mode 100644 index 0000000..d00491f --- /dev/null +++ b/.fmf/version @@ -0,0 +1 @@ +1 diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..629f4fc --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,31 @@ +--- +name: Bug report +about: For bugs and general problems +title: +labels: 'bug' +assignees: '' + +--- + +Cockpit version: xxx +Cockpit-podman version: xxx +Podman version: xxx +OS: + + + + + +Steps to reproduce + +1. +2. +3. + + diff --git a/.github/ISSUE_TEMPLATE/enhancement.md b/.github/ISSUE_TEMPLATE/enhancement.md new file mode 100644 index 0000000..4978991 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/enhancement.md @@ -0,0 +1,11 @@ +--- +name: Enhancement +about: For feature requests and discussion of ideas +title: +labels: 'enhancement' +assignees: '' + +--- + + + diff --git a/.github/codeql-config.yml b/.github/codeql-config.yml new file mode 100644 index 0000000..bd11a70 --- /dev/null +++ b/.github/codeql-config.yml @@ -0,0 +1,2 @@ +paths: + - src diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..a6ca0d0 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,38 @@ +version: 2 +updates: + - package-ecosystem: "npm" + directory: "/" + schedule: + interval: "daily" + # run these when most of our developers don't work, don't DoS our CI over the day + time: "22:00" + timezone: "Europe/Berlin" + open-pull-requests-limit: 3 + commit-message: + prefix: "[no-test]" + labels: + - "node_modules" + groups: + eslint: + patterns: + - "eslint*" + esbuild: + patterns: + - "esbuild*" + stylelint: + patterns: + - "stylelint*" + xterm: + patterns: + - "xterm*" + patternfly: + patterns: + - "@patternfly*" + + - package-ecosystem: "github-actions" + directory: "/" + open-pull-requests-limit: 3 + labels: + - "no-test" + schedule: + interval: "weekly" diff --git a/.github/workflows/cockpit-lib-update.yml b/.github/workflows/cockpit-lib-update.yml new file mode 100644 index 0000000..37c398a --- /dev/null +++ b/.github/workflows/cockpit-lib-update.yml @@ -0,0 +1,33 @@ +name: cockpit-lib-update +on: + schedule: + - cron: '0 2 * * 4' + # can be run manually on https://github.com/cockpit-project/starter-kit/actions + workflow_dispatch: +jobs: + cockpit-lib-update: + environment: self + runs-on: ubuntu-latest + permissions: + pull-requests: write + statuses: write + steps: + - name: Set up dependencies + run: | + sudo apt update + sudo apt install -y make + + - name: Set up configuration and secrets + run: | + printf '[user]\n\tname = Cockpit Project\n\temail=cockpituous@gmail.com\n' > ~/.gitconfig + echo '${{ secrets.GITHUB_TOKEN }}' > ~/.config/github-token + + - name: Clone repository + uses: actions/checkout@v4 + with: + ssh-key: ${{ secrets.DEPLOY_KEY }} + + - name: Run cockpit-lib-update + run: | + make bots + bots/cockpit-lib-update diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 0000000..ad050d0 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,33 @@ +name: CodeQL +on: [push, pull_request] + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-22.04 + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: + - javascript + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + queries: +security-and-quality + config-file: ./.github/codeql-config.yml + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{ matrix.language }}" diff --git a/.github/workflows/dependabot.yml b/.github/workflows/dependabot.yml new file mode 100644 index 0000000..0ab8975 --- /dev/null +++ b/.github/workflows/dependabot.yml @@ -0,0 +1,82 @@ +name: update node_modules +on: + pull_request_target: + types: [opened, reopened, synchronize, labeled] + +jobs: + dependabot: + environment: npm-update + permissions: + contents: read + pull-requests: write + timeout-minutes: 5 + # 22.04's podman has issues with piping and causes tar errors + runs-on: ubuntu-20.04 + if: ${{ contains(github.event.pull_request.labels.*.name, 'node_modules') }} + + steps: + - name: Clone repository + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.ref }} + repository: ${{ github.event.pull_request.head.repo.full_name }} + fetch-depth: 0 + + - name: Clear node_modules label + uses: actions/github-script@v7 + with: + script: | + try { + await github.rest.issues.removeLabel({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + name: 'node_modules' + }); + } catch (e) { + if (e.name == 'HttpError' && e.status == 404) { + /* expected: 404 if label is unset */ + } else { + throw e; + } + } + + - name: Update node_modules for package.json changes + run: | + make tools/node-modules + git config --global user.name "GitHub Workflow" + git config --global user.email "cockpituous@cockpit-project.org" + eval $(ssh-agent) + ssh-add - <<< '${{ secrets.NODE_CACHE_DEPLOY_KEY }}' + ./tools/node-modules install + ./tools/node-modules push + git add node_modules + ssh-add -D + ssh-agent -k + + - name: Clear [no-test] prefix from PR title + if: ${{ contains(github.event.pull_request.title, '[no-test]') }} + uses: actions/github-script@v7 + env: + TITLE: '${{ github.event.pull_request.title }}' + with: + script: | + const title = process.env['TITLE'].replace(/\[no-test\]\W+ /, '') + await github.rest.pulls.update({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: context.issue.number, + title, + }); + + - name: Force push node_modules update + run: | + # Dependabot prefixes the commit with [no-test] which we don't want to keep in the commit + title=$(git show --pretty="%s" -s | sed -E "s/\[no-test\]\W+ //") + body=$(git show -s --pretty="%b") + git commit --amend -m "${title}" -m "${body}" --no-edit node_modules + eval $(ssh-agent) + ssh-add - <<< '${{ secrets.SELF_DEPLOY_KEY }}' + git push --force 'git@github.com:${{ github.repository }}' '${{ github.head_ref }}' + ssh-add -D + ssh-agent -k diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml new file mode 100644 index 0000000..9f7de13 --- /dev/null +++ b/.github/workflows/nightly.yml @@ -0,0 +1,22 @@ +name: nightly +on: + schedule: + - cron: '0 1 * * *' + # can be run manually on https://github.com/cockpit-project/cockpit-podman/actions + workflow_dispatch: +jobs: + trigger: + permissions: + statuses: write + runs-on: ubuntu-22.04 + steps: + - name: Clone repository + uses: actions/checkout@v4 + + - name: Trigger updates-testing scenario + run: | + make bots + mkdir -p ~/.config/cockpit-dev + echo "${{ github.token }}" >> ~/.config/cockpit-dev/github-token + TEST_OS=$(PYTHONPATH=bots python3 -c 'from lib.constants import TEST_OS_DEFAULT; print(TEST_OS_DEFAULT)') + bots/tests-trigger --force "-" "${TEST_OS}/updates-testing" "${TEST_OS}/podman-next" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..807860b --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,68 @@ +name: release +on: + push: + tags: + # this is a glob, not a regexp + - '[0-9]*' +jobs: + source: + runs-on: ubuntu-latest + container: + image: quay.io/cockpit/tasks:latest + options: --user root + permissions: + # create GitHub release + contents: write + steps: + - name: Clone repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + # https://github.blog/2022-04-12-git-security-vulnerability-announced/ + - name: Pacify git's permission check + run: git config --global --add safe.directory /__w/cockpit-podman/cockpit-podman + + - name: Workaround for https://github.com/actions/checkout/pull/697 + run: git fetch --force origin $(git describe --tags):refs/tags/$(git describe --tags) + + - name: Build release + run: make dist + + - name: Publish GitHub release + uses: cockpit-project/action-release@7d2e2657382e8d34f88a24b5987f2b81ea165785 + with: + filename: "cockpit-podman-${{ github.ref_name }}.tar.xz" + + node-cache: + # doesn't depend on it, but let's make sure the build passes before we do this + needs: [source] + runs-on: ubuntu-latest + environment: node-cache + # done via deploy key, token needs no write permissions at all + permissions: {} + steps: + - name: Clone repository + uses: actions/checkout@v4 + + - name: Set up git + run: | + git config user.name "GitHub Workflow" + git config user.email "cockpituous@cockpit-project.org" + + - name: Tag node-cache + run: | + set -eux + # this is a shared repo, prefix with project name + TAG="${GITHUB_REPOSITORY#*/}-$(basename $GITHUB_REF)" + make tools/node-modules + tools/node-modules checkout + cd node_modules + git tag "$TAG" + git remote add cache "ssh://git@github.com/${GITHUB_REPOSITORY%/*}/node-cache" + eval $(ssh-agent) + ssh-add - <<< '${{ secrets.DEPLOY_KEY }}' + # make this idempotent: delete an existing tag + git push cache :"$TAG" || true + git push cache tag "$TAG" + ssh-add -D diff --git a/.github/workflows/reposchutz.yml b/.github/workflows/reposchutz.yml new file mode 100644 index 0000000..bdadab6 --- /dev/null +++ b/.github/workflows/reposchutz.yml @@ -0,0 +1,66 @@ +name: repository +on: + pull_request_target: + types: [opened, reopened, synchronize, labeled, unlabeled] + +jobs: + check: + name: Protection checks + # 22.04's podman has issues with piping and causes tar errors + runs-on: ubuntu-20.04 + permissions: + contents: read + pull-requests: write + timeout-minutes: 5 + env: + HEAD_SHA: ${{ github.event.pull_request.head.sha }} + BASE_SHA: ${{ github.event.pull_request.base.sha }} + + steps: + - name: Clone target branch + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Fetch PR commits + run: git fetch origin "${BASE_SHA}" "${HEAD_SHA}" + + - name: Clear .github-changes label + if: ${{ !endsWith(github.event.action, 'labeled') }} + uses: actions/github-script@v7 + with: + script: | + try { + await github.rest.issues.removeLabel({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + name: '.github-changes' + }); + } catch (e) { + if (e.name == 'HttpError' && e.status == 404) { + /* expected: 404 if label is unset */ + } else { + throw e; + } + } + + - name: Check for .github changes + # We want to run this check any time the .github-changes label is not + # set, which needs to include the case where we just unset it above. + if: ${{ !endsWith(github.event.action, 'labeled') || + !contains(github.event.pull_request.labels.*.name, '.github-changes') }} + run: | + set -x + git log --full-history --exit-code --patch "${HEAD_SHA}" --not "${BASE_SHA}" -- .github >&2 + + - name: Check for node_modules availability and package.json consistency + run: | + # Make tools/node-modules available + make tools/node-modules + # for each commit in the PR which modifies package.json or node_modules... + for commit in $(git log --reverse --full-history --format=%H \ + "${HEAD_SHA}" --not "${BASE_SHA}" -- package.json node_modules); do + # ... check that package.json and node_modules/.package.json are in sync + tools/node-modules verify "${commit}" + done diff --git a/.github/workflows/weblate-sync-po.yml b/.github/workflows/weblate-sync-po.yml new file mode 100644 index 0000000..a74d300 --- /dev/null +++ b/.github/workflows/weblate-sync-po.yml @@ -0,0 +1,46 @@ +name: weblate-sync-po +on: + schedule: + # Run this on Tuesday evening (UTC), so that it's ready for release on + # Wednesday, with some spare time + - cron: '0 18 * * 2' + # can be run manually on https://github.com/cockpit-project/cockpit/actions + workflow_dispatch: + +jobs: + po-refresh: + environment: self + permissions: + pull-requests: write + statuses: write + runs-on: ubuntu-latest + steps: + - name: Set up dependencies + run: | + sudo apt update + sudo apt install -y --no-install-recommends gettext + + - name: Clone source repository + uses: actions/checkout@v4 + with: + ssh-key: ${{ secrets.DEPLOY_KEY }} + path: src + + - name: Clone weblate repository + uses: actions/checkout@v4 + with: + repository: ${{ github.repository }}-weblate + path: weblate + + - name: Copy .po files from weblate repository + run: cp weblate/*.po src/po/ + + - name: Run po-refresh bot + run: | + cd src + make bots + git config --global user.name "GitHub Workflow" + git config --global user.email "cockpituous@cockpit-project.org" + mkdir -p ~/.config/cockpit-dev + echo ${{ github.token }} >> ~/.config/cockpit-dev/github-token + PO_REFRESH_NO_SYNC=1 bots/po-refresh diff --git a/.github/workflows/weblate-sync-pot.yml b/.github/workflows/weblate-sync-pot.yml new file mode 100644 index 0000000..8221884 --- /dev/null +++ b/.github/workflows/weblate-sync-pot.yml @@ -0,0 +1,42 @@ +name: weblate-sync-pot +on: + schedule: + # Run this every morning + - cron: '45 2 * * *' + # can be run manually on https://github.com/cockpit-project/cockpit-podman/actions + workflow_dispatch: + +jobs: + pot-upload: + environment: cockpit-podman-weblate + permissions: + pull-requests: write + runs-on: ubuntu-latest + steps: + - name: Set up dependencies + run: | + sudo apt update + sudo apt install -y --no-install-recommends npm make gettext appstream + + - name: Clone source repository + uses: actions/checkout@v4 + with: + path: src + + - name: Generate .pot file + run: make -C src po/podman.pot + + - name: Clone weblate repository + uses: actions/checkout@v4 + with: + path: weblate + repository: ${{ github.repository }}-weblate + ssh-key: ${{ secrets.DEPLOY_KEY }} + + - name: Commit .pot to weblate repo + run: | + cp src/po/podman.pot weblate/podman.pot + git config --global user.name "GitHub Workflow" + git config --global user.email "cockpituous@cockpit-project.org" + git -C weblate commit -m "Update source file" -- podman.pot + git -C weblate push diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..07d6855 --- /dev/null +++ b/.gitignore @@ -0,0 +1,34 @@ +# Please keep this file sorted (LC_COLLATE=C.UTF-8), +# grouped into the 3 categories below: +# - general patterns (match in all directories) +# - patterns to match files at the toplevel +# - patterns to match files in subdirs + +# general patterns +*.pyc +*.rpm + +# toplevel (/...) +/Test*.html +/Test*.json +/Test*.log +/Test*.log.gz +/Test*.png +/*.whl +/bots +/cockpit-*.tar.xz +/cockpit-podman.spec +/dist/ +/package-lock.json +/pkg/ +/tmp/ +/tools/ + +# subdirs (/subdir/...) +/packaging/arch/PKGBUILD +/packaging/debian/changelog +/po/*.pot + /po/LINGUAS +/test/common/ +/test/images/ +/test/static-code diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..2ca4fcf --- /dev/null +++ b/.gitmodules @@ -0,0 +1,7 @@ +[submodule "test/reference"] + path = test/reference + url = https://github.com/cockpit-project/pixel-test-reference + branch = empty +[submodule "node_modules"] + path = node_modules + url = https://github.com/cockpit-project/node-cache.git diff --git a/.stylelintrc.json b/.stylelintrc.json new file mode 100644 index 0000000..67a29f0 --- /dev/null +++ b/.stylelintrc.json @@ -0,0 +1,39 @@ +{ + "extends": "stylelint-config-standard-scss", + "plugins": [ + "stylelint-use-logical-spec" + ], + "rules": { + "at-rule-empty-line-before": null, + "declaration-empty-line-before": null, + "custom-property-empty-line-before": null, + "comment-empty-line-before": null, + "scss/double-slash-comment-empty-line-before": null, + "scss/dollar-variable-colon-space-after": null, + + "custom-property-pattern": null, + "declaration-block-no-duplicate-properties": null, + "declaration-block-no-redundant-longhand-properties": null, + "declaration-block-no-shorthand-property-overrides": null, + "declaration-block-single-line-max-declarations": null, + "font-family-no-duplicate-names": null, + "function-url-quotes": null, + "keyframes-name-pattern": null, + "media-feature-range-notation": "prefix", + "no-descending-specificity": null, + "no-duplicate-selectors": null, + "scss/at-extend-no-missing-placeholder": null, + "scss/at-import-partial-extension": null, + "scss/at-import-no-partial-leading-underscore": null, + "scss/load-no-partial-leading-underscore": true, + "scss/at-mixin-pattern": null, + "scss/comment-no-empty": null, + "scss/dollar-variable-pattern": null, + "scss/double-slash-comment-whitespace-inside": null, + "scss/no-global-function-names": null, + "scss/operator-no-unspaced": null, + "selector-class-pattern": null, + "selector-id-pattern": null, + "liberty/use-logical-spec": "always" + } +} diff --git a/HACKING.md b/HACKING.md new file mode 100644 index 0000000..9b03381 --- /dev/null +++ b/HACKING.md @@ -0,0 +1,107 @@ +# Hacking on Cockpit Podman + +The commands here assume you're in the top level of the Cockpit Podman git +repository checkout. + +## Running out of git checkout + +For development, you usually want to run your module straight out of the git +tree. To do that, run `make devel-install`, which links your checkout to the +location were `cockpit-bridge` looks for packages. If you prefer to do this +manually: + +``` +mkdir -p ~/.local/share/cockpit +ln -s `pwd`/dist ~/.local/share/cockpit/podman +``` + +After changing the code and running `make` again, reload the Cockpit page in +your browser. + +You can also use +[watch mode](https://esbuild.github.io/api/#watch) to +automatically update the bundle on every code change with + + $ make watch + +When developing against a virtual machine, watch mode can also automatically upload +the code changes by setting the `RSYNC` environment variable to +the remote hostname. + + $ RSYNC=c make watch + +When developing against a remote host as a normal user, `RSYNC_DEVEL` can be +set to upload code changes to `~/.local/share/cockpit/` instead of +`/usr/local`. + + $ RSYNC_DEVEL=example.com make watch + +## Running eslint + +Cockpit Podman uses [ESLint](https://eslint.org/) to automatically check +JavaScript code style in `.jsx` and `.js` files. + +eslint is executed as part of `test/static-code`, aka. `make codecheck`. + +For developer convenience, the ESLint can be started explicitly by: + + $ npm run eslint + +Violations of some rules can be fixed automatically by: + + $ npm run eslint:fix + +Rules configuration can be found in the `.eslintrc.json` file. + +## Running stylelint + +Cockpit uses [Stylelint](https://stylelint.io/) to automatically check CSS code +style in `.css` and `scss` files. + +styleint is executed as part of `test/static-code`, aka. `make codecheck`. + +For developer convenience, the Stylelint can be started explicitly by: + + $ npm run stylelint + +Violations of some rules can be fixed automatically by: + + $ npm run stylelint:fix + +Rules configuration can be found in the `.stylelintrc.json` file. + +# Running tests locally + +Run `make vm` to build an RPM and install it into a standard Cockpit test VM. +This will be `fedora-39` by default. You can set `$TEST_OS` to use a different +image, for example + + TEST_OS=centos-8-stream make vm + +Then run + + make test/common + +to pull in [Cockpit's shared test API](https://github.com/cockpit-project/cockpit/tree/main/test/common) +for running Chrome DevTools Protocol based browser tests. + +With this preparation, you can manually run a single test without +rebuilding the VM, possibly with extra options for tracing and halting on test +failures (for interactive debugging): + + TEST_OS=... test/check-application TestApplication.testRunImageSystem -stv + +Use this command to list all known tests: + + test/check-application -l + +You can also run all of the tests: + + TEST_OS=centos-8-stream make check + +However, this is rather expensive, and most of the time it's better to let the +CI machinery do this on a draft pull request. + +Please see [Cockpit's test documentation](https://github.com/cockpit-project/cockpit/blob/main/test/README.md) +for details how to run against existing VMs, interactive browser window, +interacting with the test VM, and more. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..4362b49 --- /dev/null +++ b/LICENSE @@ -0,0 +1,502 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..77b3184 --- /dev/null +++ b/Makefile @@ -0,0 +1,217 @@ +# extract name from package.json +PACKAGE_NAME := $(shell awk '/"name":/ {gsub(/[",]/, "", $$2); print $$2}' package.json) +RPM_NAME := cockpit-$(PACKAGE_NAME) +VERSION := $(shell T=$$(git describe 2>/dev/null) || T=1; echo $$T | tr '-' '.') +ifeq ($(TEST_OS),) +TEST_OS = fedora-39 +endif +export TEST_OS +TARFILE=$(RPM_NAME)-$(VERSION).tar.xz +NODE_CACHE=$(RPM_NAME)-node-$(VERSION).tar.xz +SPEC=$(RPM_NAME).spec +PREFIX ?= /usr/local +APPSTREAMFILE=org.cockpit-project.$(PACKAGE_NAME).metainfo.xml +VM_IMAGE=$(CURDIR)/test/images/$(TEST_OS) +# stamp file to check for node_modules/ +NODE_MODULES_TEST=package-lock.json +# one example file in dist/ from bundler to check if that already ran +DIST_TEST=dist/manifest.json +# one example file in pkg/lib to check if it was already checked out +COCKPIT_REPO_STAMP=pkg/lib/cockpit-po-plugin.js +# common arguments for tar, mostly to make the generated tarballs reproducible +TAR_ARGS = --sort=name --mtime "@$(shell git show --no-patch --format='%at')" --mode=go=rX,u+rw,a-s --numeric-owner --owner=0 --group=0 + +VM_CUSTOMIZE_FLAGS = + +# HACK: https://github.com/containers/podman/issues/21896 +VM_CUSTOMIZE_FLAGS += --run-command 'nmcli con add type dummy con-name fake ifname fake0 ip4 1.2.3.4/24 gw4 1.2.3.1 >&2' + +# the following scenarios need network access +ifeq ("$(TEST_SCENARIO)","updates-testing") +VM_CUSTOMIZE_FLAGS += --run-command 'dnf -y update --setopt=install_weak_deps=False --enablerepo=updates-testing >&2' +else ifeq ("$(TEST_SCENARIO)","podman-next") +VM_CUSTOMIZE_FLAGS += --run-command 'dnf -y copr enable rhcontainerbot/podman-next >&2; dnf -y update --repo "copr*" >&2' +else +# default scenario does not install packages +VM_CUSTOMIZE_FLAGS += --no-network +endif + +ifeq ($(TEST_COVERAGE),yes) +RUN_TESTS_OPTIONS+=--coverage +NODE_ENV=development +endif + +all: $(DIST_TEST) + +# checkout common files from Cockpit repository required to build this project; +# this has no API stability guarantee, so check out a stable tag when you start +# a new project, use the latest release, and update it from time to time +COCKPIT_REPO_FILES = \ + pkg/lib \ + test/common \ + test/static-code \ + tools/node-modules \ + $(NULL) + +COCKPIT_REPO_URL = https://github.com/cockpit-project/cockpit.git +COCKPIT_REPO_COMMIT = 50fc1b962eeefefc9926ece8e4891477a88a4454 # 312 + 22 commits + +$(COCKPIT_REPO_FILES): $(COCKPIT_REPO_STAMP) +COCKPIT_REPO_TREE = '$(strip $(COCKPIT_REPO_COMMIT))^{tree}' +$(COCKPIT_REPO_STAMP): Makefile + @git rev-list --quiet --objects $(COCKPIT_REPO_TREE) -- 2>/dev/null || \ + git fetch --no-tags --no-write-fetch-head --depth=1 $(COCKPIT_REPO_URL) $(COCKPIT_REPO_COMMIT) + git archive $(COCKPIT_REPO_TREE) -- $(COCKPIT_REPO_FILES) | tar x + +# +# i18n +# + +LINGUAS=$(basename $(notdir $(wildcard po/*.po))) + +po/$(PACKAGE_NAME).js.pot: + xgettext --default-domain=$(PACKAGE_NAME) --output=$@ --language=C --keyword= \ + --keyword=_:1,1t --keyword=_:1c,2,2t --keyword=C_:1c,2 \ + --keyword=N_ --keyword=NC_:1c,2 \ + --keyword=gettext:1,1t --keyword=gettext:1c,2,2t \ + --keyword=ngettext:1,2,3t --keyword=ngettext:1c,2,3,4t \ + --keyword=gettextCatalog.getString:1,3c --keyword=gettextCatalog.getPlural:2,3,4c \ + --from-code=UTF-8 $$(find src/ -name '*.js' -o -name '*.jsx') + +po/$(PACKAGE_NAME).html.pot: $(NODE_MODULES_TEST) $(COCKPIT_REPO_STAMP) + pkg/lib/html2po.js -o $@ $$(find src -name '*.html') + +po/$(PACKAGE_NAME).manifest.pot: $(NODE_MODULES_TEST) $(COCKPIT_REPO_STAMP) + pkg/lib/manifest2po.js src/manifest.json -o $@ + +po/$(PACKAGE_NAME).metainfo.pot: $(APPSTREAMFILE) + xgettext --default-domain=$(PACKAGE_NAME) --output=$@ $< + +po/$(PACKAGE_NAME).pot: po/$(PACKAGE_NAME).html.pot po/$(PACKAGE_NAME).js.pot po/$(PACKAGE_NAME).manifest.pot po/$(PACKAGE_NAME).metainfo.pot + msgcat --sort-output --output-file=$@ $^ + +po/LINGUAS: + echo $(LINGUAS) | tr ' ' '\n' > $@ + +# +# Build/Install/dist +# +$(SPEC): packaging/$(SPEC).in $(NODE_MODULES_TEST) + provides=$$(npm ls --omit dev --package-lock-only --depth=Infinity | grep -Eo '[^[:space:]]+@[^[:space:]]+' | sort -u | sed 's/^/Provides: bundled(npm(/; s/\(.*\)@/\1)) = /'); \ + awk -v p="$$provides" '{gsub(/%{VERSION}/, "$(VERSION)"); gsub(/%{NPM_PROVIDES}/, p)}1' $< > $@ + +packaging/arch/PKGBUILD: packaging/arch/PKGBUILD.in + sed 's/VERSION/$(VERSION)/; s/SOURCE/$(TARFILE)/' $< > $@ + +packaging/debian/changelog: packaging/debian/changelog.in + sed 's/VERSION/$(VERSION)/' $< > $@ + +$(DIST_TEST): $(COCKPIT_REPO_STAMP) $(shell find src/ -type f) package.json build.js + $(MAKE) package-lock.json && NODE_ENV=$(NODE_ENV) ./build.js + +watch: $(NODE_MODULES_TEST) + NODE_ENV=$(NODE_ENV) ./build.js -w + +clean: + rm -rf dist/ + rm -f $(SPEC) packaging/arch/PKGBUILD packaging/debian/changelog + rm -f po/LINGUAS + +install: $(DIST_TEST) po/LINGUAS + mkdir -p $(DESTDIR)$(PREFIX)/share/cockpit/$(PACKAGE_NAME) + cp -r dist/* $(DESTDIR)$(PREFIX)/share/cockpit/$(PACKAGE_NAME) + mkdir -p $(DESTDIR)$(PREFIX)/share/metainfo/ + msgfmt --xml -d po \ + --template $(APPSTREAMFILE) \ + -o $(DESTDIR)$(PREFIX)/share/metainfo/$(APPSTREAMFILE) + +# this requires a built source tree and avoids having to install anything system-wide +devel-install: $(DIST_TEST) + mkdir -p ~/.local/share/cockpit + ln -s `pwd`/dist ~/.local/share/cockpit/$(PACKAGE_NAME) + +# assumes that there was symlink set up using the above devel-install target, +# and removes it +devel-uninstall: + rm -f ~/.local/share/cockpit/$(PACKAGE_NAME) + +print-version: + @echo "$(VERSION)" + +# required for running integration tests; commander and ws are deps of chrome-remote-interface +TEST_NPMS = \ + node_modules/chrome-remote-interface \ + node_modules/commander \ + node_modules/sizzle \ + node_modules/ws \ + $(NULL) + +dist: $(TARFILE) + @ls -1 $(TARFILE) + +# when building a distribution tarball, call bundler with a 'production' environment by default +# we don't ship most node_modules for license and compactness reasons, only the ones necessary for running tests +# we ship a pre-built dist/ (so it's not necessary) and ship package-lock.json (so that node_modules/ can be reconstructed if necessary) +$(TARFILE): export NODE_ENV ?= production +$(TARFILE): $(DIST_TEST) $(SPEC) packaging/arch/PKGBUILD packaging/debian/changelog + if type appstream-util >/dev/null 2>&1; then appstream-util validate-relax --nonet *.metainfo.xml; fi + tar --xz $(TAR_ARGS) -cf $(TARFILE) --transform 's,^,$(RPM_NAME)/,' \ + --exclude '*.in' --exclude test/reference \ + $$(git ls-files | grep -v node_modules) \ + $(COCKPIT_REPO_FILES) $(NODE_MODULES_TEST) $(SPEC) $(TEST_NPMS) \ + packaging/arch/PKGBUILD packaging/debian/changelog dist/ + +# convenience target for developers +rpm: $(TARFILE) + rpmbuild -tb --define "_topdir $(CURDIR)/tmp/rpmbuild" $(TARFILE) + find tmp/rpmbuild -name '*.rpm' -printf '%f\n' -exec mv {} . \; + rm -r tmp/rpmbuild + +# build a VM with locally built distro pkgs installed +$(VM_IMAGE): $(TARFILE) packaging/debian/rules packaging/debian/control packaging/arch/PKGBUILD bots + # HACK for ostree images: skip the rpm build/install + if [ "$$TEST_OS" = "fedora-coreos" ] || [ "$$TEST_OS" = "rhel4edge" ]; then \ + bots/image-customize --verbose --fresh --no-network --run-command 'mkdir -p /usr/local/share/cockpit' \ + --upload dist/:/usr/local/share/cockpit/podman \ + --script $(CURDIR)/test/vm.install $(TEST_OS); \ + else \ + bots/image-customize --verbose --fresh $(VM_CUSTOMIZE_FLAGS) --build $(TARFILE) \ + --script $(CURDIR)/test/vm.install $(TEST_OS); \ + fi + +# convenience target for the above +vm: $(VM_IMAGE) + @echo $(VM_IMAGE) + +# convenience target to print the filename of the test image +print-vm: + @echo $(VM_IMAGE) + +# run static code checks for python code +PYEXEFILES=$(shell git grep -lI '^#!.*python') + +codecheck: test/static-code $(NODE_MODULES_TEST) + test/static-code + +# convenience target to setup all the bits needed for the integration tests +# without actually running them +prepare-check: $(NODE_MODULES_TEST) $(VM_IMAGE) test/common test/reference + +# run the browser integration tests; skip check for SELinux denials +# this will run all tests/check-* and format them as TAP +check: prepare-check + TEST_AUDIT_NO_SELINUX=1 test/common/run-tests ${RUN_TESTS_OPTIONS} + +bots: $(COCKPIT_REPO_STAMP) + test/common/make-bots + +test/reference: test/common + test/common/pixel-tests pull + +# We want tools/node-modules to run every time package-lock.json is requested +# See https://www.gnu.org/software/make/manual/html_node/Force-Targets.html +FORCE: +$(NODE_MODULES_TEST): FORCE tools/node-modules + tools/node-modules make_package_lock_json + +.PHONY: all clean install devel-install devel-uninstall print-version dist rpm prepare-check check vm print-vm diff --git a/README.md b/README.md new file mode 100644 index 0000000..e14ec47 --- /dev/null +++ b/README.md @@ -0,0 +1,78 @@ +# cockpit-podman + +This is the [Cockpit](https://cockpit-project.org/) user interface for [podman +containers](https://podman.io/). + +## Technologies + + - cockpit-podman communicates to podman through its [REST API](https://podman.readthedocs.io/en/latest/_static/api.html). + + - This project is based on the [Cockpit Starter Kit](https://github.com/cockpit-project/starter-kit). + See [Starter Kit Intro](http://cockpit-project.org/blog/cockpit-starter-kit.html) for details. + +# Development dependencies + +On Debian/Ubuntu: + + $ sudo apt install gettext nodejs make + +On Fedora: + + $ sudo dnf install gettext nodejs make + +# Getting and building the source + +These commands check out the source and build it into the `dist/` directory: + +``` +git clone https://github.com/cockpit-project/cockpit-podman +cd cockpit-podman +make +``` + +# Installing + +`sudo make install` installs the package in `/usr/local/share/cockpit/`. This depends +on the `dist` target, which generates the distribution tarball. + +You can also run `make rpm` to build RPMs for local installation. + +In `production` mode, source files are automatically minified and compressed. +Set `NODE_ENV=production` if you want to duplicate this behavior. + +# Development instructions + +See [HACKING.md](./HACKING.md) for details about how to efficiently change the +code, run, and test it. + +# Automated release + +The intention is that the only manual step for releasing a project is to create +a signed tag for the version number, which includes a summary of the noteworthy +changes: + +``` +123 + +- this new feature +- fix bug #123 +``` + +Pushing the release tag triggers the [release.yml](.github/workflows/release.yml) +[GitHub action](https://github.com/features/actions) workflow. This creates the +official release tarball and publishes as upstream release to GitHub. + +The Fedora and COPR releases are done with [Packit](https://packit.dev/), +see the [packit.yaml](./packit.yaml) control file. + +# Automated maintenance + +It is important to keep your [NPM modules](./package.json) up to date, to keep +up with security updates and bug fixes. This happens with +[dependabot](https://github.com/dependabot), +see [configuration file](.github/dependabot.yml). + +Translations are refreshed every Tuesday evening (or manually) through the +[weblate-sync-po.yml](.github/workflows/weblate-sync-po.yml) action. +Conversely, the PO template is uploaded to weblate every day through the +[weblate-sync-pot.yml](.github/workflows/weblate-sync-pot.yml) action. diff --git a/build.js b/build.js new file mode 100755 index 0000000..1966bdc --- /dev/null +++ b/build.js @@ -0,0 +1,107 @@ +#!/usr/bin/env node + +import fs from 'node:fs'; +import os from 'node:os'; + +import copy from 'esbuild-plugin-copy'; + +import { cockpitCompressPlugin } from './pkg/lib/esbuild-compress-plugin.js'; +import { cockpitPoEsbuildPlugin } from './pkg/lib/cockpit-po-plugin.js'; +import { cockpitRsyncEsbuildPlugin } from './pkg/lib/cockpit-rsync-plugin.js'; +import { cleanPlugin } from './pkg/lib/esbuild-cleanup-plugin.js'; +import { esbuildStylesPlugins } from './pkg/lib/esbuild-common.js'; + +const useWasm = os.arch() !== 'x64'; +const esbuild = (await import(useWasm ? 'esbuild-wasm' : 'esbuild')); + +const production = process.env.NODE_ENV === 'production'; +/* List of directories to use when resolving import statements */ +const nodePaths = ['pkg/lib']; +const outdir = 'dist'; + +// Obtain package name from package.json +const packageJson = JSON.parse(fs.readFileSync('package.json')); + +const parser = (await import('argparse')).default.ArgumentParser(); +parser.add_argument('-r', '--rsync', { help: "rsync bundles to ssh target after build", metavar: "HOST" }); +parser.add_argument('-w', '--watch', { action: 'store_true', help: "Enable watch mode", default: process.env.ESBUILD_WATCH === "true" }); +const args = parser.parse_args(); + +if (args.rsync) + process.env.RSYNC = args.rsync; + +function notifyEndPlugin() { + return { + name: 'notify-end', + setup(build) { + let startTime; + + build.onStart(() => { + startTime = new Date(); + }); + + build.onEnd(() => { + const endTime = new Date(); + const timeStamp = endTime.toTimeString().split(' ')[0]; + console.log(`${timeStamp}: Build finished in ${endTime - startTime} ms`); + }); + } + }; +} + +const context = await esbuild.context({ + ...!production ? { sourcemap: "linked" } : {}, + bundle: true, + entryPoints: ["./src/index.js"], + external: ['*.woff', '*.woff2', '*.jpg', '*.svg', '../../assets*'], // Allow external font files which live in ../../static/fonts + legalComments: 'external', // Move all legal comments to a .LEGAL.txt file + loader: { ".js": "jsx" }, + minify: production, + nodePaths, + outdir, + target: ['es2020'], + plugins: [ + cleanPlugin(), + // Esbuild will only copy assets that are explicitly imported and used + // in the code. This is a problem for index.html and manifest.json which are not imported + copy({ + assets: [ + { from: ['./src/manifest.json'], to: ['./manifest.json'] }, + { from: ['./src/index.html'], to: ['./index.html'] }, + ] + }), + ...esbuildStylesPlugins, + cockpitPoEsbuildPlugin(), + + ...production ? [cockpitCompressPlugin()] : [], + cockpitRsyncEsbuildPlugin({ dest: packageJson.name }), + + notifyEndPlugin(), + ] +}); + +try { + await context.rebuild(); +} catch (e) { + if (!args.watch) + process.exit(1); + // ignore errors in watch mode +} + +if (args.watch) { + // Attention: this does not watch subdirectories -- if you ever introduce one, need to set up one watch per subdir + fs.watch('src', {}, async (ev, path) => { + // only listen for "change" events, as renames are noisy + if (ev !== "change") + return; + console.log("change detected:", path); + await context.cancel(); + try { + await context.rebuild(); + } catch (e) {} // ignore in watch mode + }); + // wait forever until Control-C + await new Promise(() => {}); +} + +context.dispose(); diff --git a/cockpit-podman.spec b/cockpit-podman.spec new file mode 100644 index 0000000..7cb7262 --- /dev/null +++ b/cockpit-podman.spec @@ -0,0 +1,88 @@ +# +# Copyright (C) 2017-2020 Red Hat, Inc. +# +# Cockpit is free software; you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. +# +# Cockpit is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with Cockpit; If not, see . +# + +Name: cockpit-podman +Version: 85 +Release: 1%{?dist} +Summary: Cockpit component for Podman containers +License: LGPL-2.1-or-later +URL: https://github.com/cockpit-project/cockpit-podman + +Source0: https://github.com/cockpit-project/%{name}/releases/download/%{version}/%{name}-%{version}.tar.xz +BuildArch: noarch +BuildRequires: libappstream-glib +BuildRequires: make +BuildRequires: gettext +%if 0%{?rhel} && 0%{?rhel} <= 8 +BuildRequires: libappstream-glib-devel +%endif + +Requires: cockpit-bridge +Requires: podman >= 2.0.4 +# HACK https://github.com/containers/crun/issues/1091 +%if 0%{?centos} == 9 +Requires: criu-libs +%endif + +Provides: bundled(npm(@patternfly/patternfly)) = 5.2.1 +Provides: bundled(npm(@patternfly/react-core)) = 5.2.1 +Provides: bundled(npm(@patternfly/react-icons)) = 5.2.1 +Provides: bundled(npm(@patternfly/react-styles)) = 5.2.1 +Provides: bundled(npm(@patternfly/react-table)) = 5.2.1 +Provides: bundled(npm(@patternfly/react-tokens)) = 5.2.1 +Provides: bundled(npm(attr-accept)) = 2.2.2 +Provides: bundled(npm(date-fns)) = 3.4.0 +Provides: bundled(npm(docker-names)) = 1.2.1 +Provides: bundled(npm(file-selector)) = 0.6.0 +Provides: bundled(npm(focus-trap)) = 7.5.2 +Provides: bundled(npm(ipaddr.js)) = 2.1.0 +Provides: bundled(npm(js-tokens)) = 4.0.0 +Provides: bundled(npm(lodash)) = 4.17.21 +Provides: bundled(npm(loose-envify)) = 1.4.0 +Provides: bundled(npm(object-assign)) = 4.1.1 +Provides: bundled(npm(prop-types)) = 15.8.1 +Provides: bundled(npm(react-dom)) = 18.2.0 +Provides: bundled(npm(react-dropzone)) = 14.2.3 +Provides: bundled(npm(react-is)) = 16.13.1 +Provides: bundled(npm(react)) = 18.2.0 +Provides: bundled(npm(scheduler)) = 0.23.0 +Provides: bundled(npm(tabbable)) = 6.2.0 +Provides: bundled(npm(throttle-debounce)) = 5.0.0 +Provides: bundled(npm(tslib)) = 2.6.2 +Provides: bundled(npm(xterm-addon-canvas)) = 0.4.0 +Provides: bundled(npm(xterm)) = 5.1.0 + +%description +The Cockpit user interface for Podman containers. + +%prep +%setup -q -n %{name} + +%build +# Nothing to build + +%install +%make_install PREFIX=/usr +appstream-util validate-relax --nonet %{buildroot}/%{_datadir}/metainfo/* + +%files +%doc README.md +%license LICENSE dist/index.js.LEGAL.txt dist/index.css.LEGAL.txt +%{_datadir}/cockpit/* +%{_datadir}/metainfo/* + +%changelog diff --git a/dist/index.css.LEGAL.txt b/dist/index.css.LEGAL.txt new file mode 100644 index 0000000..f1c05c0 --- /dev/null +++ b/dist/index.css.LEGAL.txt @@ -0,0 +1,35 @@ +Bundled license information: + +xterm/css/xterm.css: + /** + * Copyright (c) 2014 The xterm.js authors. All rights reserved. + * Copyright (c) 2012-2013, Christopher Jeffrey (MIT License) + * https://github.com/chjj/term.js + * @license MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + * Originally forked from (with the author's permission): + * Fabrice Bellard's javascript vt100 for jslinux: + * http://bellard.org/jslinux/ + * Copyright (c) 2011 Fabrice Bellard + * The original design remains. The terminal itself + * has been extended to include xterm CSI codes, among + * other features. + */ diff --git a/dist/index.css.gz b/dist/index.css.gz new file mode 100644 index 0000000..321bff0 Binary files /dev/null and b/dist/index.css.gz differ diff --git a/dist/index.html b/dist/index.html new file mode 100644 index 0000000..c4e1856 --- /dev/null +++ b/dist/index.html @@ -0,0 +1,36 @@ + + + + + Podman containers + + + + + + + + + + + + +
+
+ + diff --git a/dist/index.js.LEGAL.txt b/dist/index.js.LEGAL.txt new file mode 100644 index 0000000..eb4acfb --- /dev/null +++ b/dist/index.js.LEGAL.txt @@ -0,0 +1,46 @@ +Bundled license information: + +react/cjs/react.production.min.js: + /** + * @license React + * react.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +scheduler/cjs/scheduler.production.min.js: + /** + * @license React + * scheduler.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +react-dom/cjs/react-dom.production.min.js: + /** + * @license React + * react-dom.production.min.js + * + * Copyright (c) Facebook, Inc. and its affiliates. + * + * This source code is licensed under the MIT license found in the + * LICENSE file in the root directory of this source tree. + */ + +tabbable/dist/index.esm.js: + /*! + * tabbable 6.2.0 + * @license MIT, https://github.com/focus-trap/tabbable/blob/master/LICENSE + */ + +focus-trap/dist/focus-trap.esm.js: + /*! + * focus-trap 7.5.2 + * @license MIT, https://github.com/focus-trap/focus-trap/blob/master/LICENSE + */ diff --git a/dist/index.js.gz b/dist/index.js.gz new file mode 100644 index 0000000..4d2c5cc Binary files /dev/null and b/dist/index.js.gz differ diff --git a/dist/manifest.json b/dist/manifest.json new file mode 100644 index 0000000..530aa6d --- /dev/null +++ b/dist/manifest.json @@ -0,0 +1,16 @@ +{ + "conditions": [ + {"path-exists": "/lib/systemd/system/podman.socket"} + ], + "menu": { + "index": { + "label": "Podman containers", + "order": 50, + "keywords": [ + { + "matches": ["podman", "container", "image"] + } + ] + } + } +} diff --git a/dist/po.cs.js.gz b/dist/po.cs.js.gz new file mode 100644 index 0000000..8751107 Binary files /dev/null and b/dist/po.cs.js.gz differ diff --git a/dist/po.de.js.gz b/dist/po.de.js.gz new file mode 100644 index 0000000..990476a Binary files /dev/null and b/dist/po.de.js.gz differ diff --git a/dist/po.es.js.gz b/dist/po.es.js.gz new file mode 100644 index 0000000..bd8df16 Binary files /dev/null and b/dist/po.es.js.gz differ diff --git a/dist/po.fi.js.gz b/dist/po.fi.js.gz new file mode 100644 index 0000000..49734cb Binary files /dev/null and b/dist/po.fi.js.gz differ diff --git a/dist/po.fr.js.gz b/dist/po.fr.js.gz new file mode 100644 index 0000000..d09fde6 Binary files /dev/null and b/dist/po.fr.js.gz differ diff --git a/dist/po.ja.js.gz b/dist/po.ja.js.gz new file mode 100644 index 0000000..82eb352 Binary files /dev/null and b/dist/po.ja.js.gz differ diff --git a/dist/po.ka.js.gz b/dist/po.ka.js.gz new file mode 100644 index 0000000..998ddd5 Binary files /dev/null and b/dist/po.ka.js.gz differ diff --git a/dist/po.ko.js.gz b/dist/po.ko.js.gz new file mode 100644 index 0000000..1eb7323 Binary files /dev/null and b/dist/po.ko.js.gz differ diff --git a/dist/po.manifest.cs.js.gz b/dist/po.manifest.cs.js.gz new file mode 100644 index 0000000..670a628 Binary files /dev/null and b/dist/po.manifest.cs.js.gz differ diff --git a/dist/po.manifest.de.js.gz b/dist/po.manifest.de.js.gz new file mode 100644 index 0000000..68ccc99 Binary files /dev/null and b/dist/po.manifest.de.js.gz differ diff --git a/dist/po.manifest.es.js.gz b/dist/po.manifest.es.js.gz new file mode 100644 index 0000000..604dbc0 Binary files /dev/null and b/dist/po.manifest.es.js.gz differ diff --git a/dist/po.manifest.fi.js.gz b/dist/po.manifest.fi.js.gz new file mode 100644 index 0000000..e67d6df Binary files /dev/null and b/dist/po.manifest.fi.js.gz differ diff --git a/dist/po.manifest.fr.js.gz b/dist/po.manifest.fr.js.gz new file mode 100644 index 0000000..e02b181 Binary files /dev/null and b/dist/po.manifest.fr.js.gz differ diff --git a/dist/po.manifest.ja.js.gz b/dist/po.manifest.ja.js.gz new file mode 100644 index 0000000..a99264a Binary files /dev/null and b/dist/po.manifest.ja.js.gz differ diff --git a/dist/po.manifest.ka.js.gz b/dist/po.manifest.ka.js.gz new file mode 100644 index 0000000..e6c892f Binary files /dev/null and b/dist/po.manifest.ka.js.gz differ diff --git a/dist/po.manifest.ko.js.gz b/dist/po.manifest.ko.js.gz new file mode 100644 index 0000000..865b27d Binary files /dev/null and b/dist/po.manifest.ko.js.gz differ diff --git a/dist/po.manifest.pl.js.gz b/dist/po.manifest.pl.js.gz new file mode 100644 index 0000000..494ba26 Binary files /dev/null and b/dist/po.manifest.pl.js.gz differ diff --git a/dist/po.manifest.sk.js.gz b/dist/po.manifest.sk.js.gz new file mode 100644 index 0000000..7e079e9 Binary files /dev/null and b/dist/po.manifest.sk.js.gz differ diff --git a/dist/po.manifest.sv.js.gz b/dist/po.manifest.sv.js.gz new file mode 100644 index 0000000..414cbf7 Binary files /dev/null and b/dist/po.manifest.sv.js.gz differ diff --git a/dist/po.manifest.tr.js.gz b/dist/po.manifest.tr.js.gz new file mode 100644 index 0000000..bfdf5e1 Binary files /dev/null and b/dist/po.manifest.tr.js.gz differ diff --git a/dist/po.manifest.uk.js.gz b/dist/po.manifest.uk.js.gz new file mode 100644 index 0000000..ec75de3 Binary files /dev/null and b/dist/po.manifest.uk.js.gz differ diff --git a/dist/po.manifest.zh_CN.js.gz b/dist/po.manifest.zh_CN.js.gz new file mode 100644 index 0000000..0d2d466 Binary files /dev/null and b/dist/po.manifest.zh_CN.js.gz differ diff --git a/dist/po.pl.js.gz b/dist/po.pl.js.gz new file mode 100644 index 0000000..d485ae8 Binary files /dev/null and b/dist/po.pl.js.gz differ diff --git a/dist/po.sk.js.gz b/dist/po.sk.js.gz new file mode 100644 index 0000000..a197bb4 Binary files /dev/null and b/dist/po.sk.js.gz differ diff --git a/dist/po.sv.js.gz b/dist/po.sv.js.gz new file mode 100644 index 0000000..093e070 Binary files /dev/null and b/dist/po.sv.js.gz differ diff --git a/dist/po.tr.js.gz b/dist/po.tr.js.gz new file mode 100644 index 0000000..cdb6f65 Binary files /dev/null and b/dist/po.tr.js.gz differ diff --git a/dist/po.uk.js.gz b/dist/po.uk.js.gz new file mode 100644 index 0000000..72ffc9a Binary files /dev/null and b/dist/po.uk.js.gz differ diff --git a/dist/po.zh_CN.js.gz b/dist/po.zh_CN.js.gz new file mode 100644 index 0000000..c886b0a Binary files /dev/null and b/dist/po.zh_CN.js.gz differ diff --git a/node_modules/chrome-remote-interface/LICENSE b/node_modules/chrome-remote-interface/LICENSE new file mode 100644 index 0000000..91137a0 --- /dev/null +++ b/node_modules/chrome-remote-interface/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2023 Andrea Cardaci + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/node_modules/chrome-remote-interface/README.md b/node_modules/chrome-remote-interface/README.md new file mode 100644 index 0000000..9800abb --- /dev/null +++ b/node_modules/chrome-remote-interface/README.md @@ -0,0 +1,992 @@ +# chrome-remote-interface + +[![CI status](https://github.com/cyrus-and/chrome-remote-interface/actions/workflows/ci.yml/badge.svg)](https://github.com/cyrus-and/chrome-remote-interface/actions?query=workflow:CI) + +[Build Status]: https://app.travis-ci.com/cyrus-and/chrome-remote-interface.svg?branch=master +[travis]: https://app.travis-ci.com/cyrus-and/chrome-remote-interface + +[Chrome Debugging Protocol] interface that helps to instrument Chrome (or any +other suitable [implementation](#implementations)) by providing a simple +abstraction of commands and notifications using a straightforward JavaScript +API. + +This module is one of the many [third-party protocol clients][3rd-party]. + +[3rd-party]: https://developer.chrome.com/devtools/docs/debugging-clients#chrome-remote-interface + +## Sample API usage + +The following snippet loads `https://github.com` and dumps every request made: + +```js +const CDP = require('chrome-remote-interface'); + +async function example() { + let client; + try { + // connect to endpoint + client = await CDP(); + // extract domains + const {Network, Page} = client; + // setup handlers + Network.requestWillBeSent((params) => { + console.log(params.request.url); + }); + // enable events then start! + await Network.enable(); + await Page.enable(); + await Page.navigate({url: 'https://github.com'}); + await Page.loadEventFired(); + } catch (err) { + console.error(err); + } finally { + if (client) { + await client.close(); + } + } +} + +example(); +``` + +Find more examples in the [wiki]. You may also want to take a look at the [FAQ]. + +[wiki]: https://github.com/cyrus-and/chrome-remote-interface/wiki +[async-await-example]: https://github.com/cyrus-and/chrome-remote-interface/wiki/Async-await-example +[FAQ]: https://github.com/cyrus-and/chrome-remote-interface#faq + +## Installation + + npm install chrome-remote-interface + +Install globally (`-g`) to just use the [bundled client](#bundled-client). + +## Implementations + +This module should work with every application implementing the +[Chrome Debugging Protocol]. In particular, it has been tested against the +following implementations: + +Implementation | Protocol version | [Protocol] | [List] | [New] | [Activate] | [Close] | [Version] +---------------------------|--------------------|------------|--------|-------|------------|---------|----------- +[Chrome][1.1] | [tip-of-tree][1.2] | yes¹ | yes | yes | yes | yes | yes +[Opera][2.1] | [tip-of-tree][2.2] | yes | yes | yes | yes | yes | yes +[Node.js][3.1] ([v6.3.0]+) | [node][3.2] | yes | no | no | no | no | yes +[Safari (iOS)][4.1] | [*partial*][4.2] | no | yes | no | no | no | no +[Edge][5.1] | [*partial*][5.2] | yes | yes | no | no | no | yes +[Firefox (Nightly)][6.1] | [*partial*][6.2] | yes | yes | no | yes | yes | yes + +¹ Not available on [Chrome for Android][chrome-mobile-protocol], hence a local version of the protocol must be used. + +[chrome-mobile-protocol]: https://bugs.chromium.org/p/chromium/issues/detail?id=824626#c4 + +[1.1]: #chromechromium +[1.2]: https://chromedevtools.github.io/devtools-protocol/tot/ + +[2.1]: #opera +[2.2]: https://chromedevtools.github.io/devtools-protocol/tot/ + +[3.1]: #nodejs +[3.2]: https://chromedevtools.github.io/devtools-protocol/v8/ + +[4.1]: #safari-ios +[4.2]: http://trac.webkit.org/browser/trunk/Source/JavaScriptCore/inspector/protocol + +[5.1]: #edge +[5.2]: https://docs.microsoft.com/en-us/microsoft-edge/devtools-protocol/0.1/domains/ + +[6.1]: #firefox-nightly +[6.2]: https://firefox-source-docs.mozilla.org/remote/index.html + +[v6.3.0]: https://nodejs.org/en/blog/release/v6.3.0/ + +[Protocol]: #cdpprotocoloptions-callback +[List]: #cdplistoptions-callback +[New]: #cdpnewoptions-callback +[Activate]: #cdpactivateoptions-callback +[Close]: #cdpcloseoptions-callback +[Version]: #cdpversionoptions-callback + +The meaning of *target* varies according to the implementation, for example, +each Chrome tab represents a target whereas for Node.js a target is the +currently inspected script. + +## Setup + +An instance of either Chrome itself or another implementation needs to be +running on a known port in order to use this module (defaults to +`localhost:9222`). + +### Chrome/Chromium + +#### Desktop + +Start Chrome with the `--remote-debugging-port` option, for example: + + google-chrome --remote-debugging-port=9222 + +##### Headless + +Since version 59, additionally use the `--headless` option, for example: + + google-chrome --headless --remote-debugging-port=9222 + +#### Android + +Plug the device and enable the [port forwarding][adb], for example: + + adb forward tcp:9222 localabstract:chrome_devtools_remote + +Note that in Android, Chrome does not have its own protocol available, a local +version must be used. See [here](#chrome-debugging-protocol-versions) for more information. + +[adb]: https://developer.chrome.com/devtools/docs/remote-debugging-legacy + +##### WebView + +In order to be inspectable, a WebView must +be [configured for debugging][webview] and the corresponding process ID must be +known. There are several ways to obtain it, for example: + + adb shell grep -a webview_devtools_remote /proc/net/unix + +Finally, port forwarding can be enabled as follows: + + adb forward tcp:9222 localabstract:webview_devtools_remote_ + +[webview]: https://developers.google.com/web/tools/chrome-devtools/remote-debugging/webviews#configure_webviews_for_debugging + +### Opera + +Start Opera with the `--remote-debugging-port` option, for example: + + opera --remote-debugging-port=9222 + +### Node.js + +Start Node.js with the `--inspect` option, for example: + + node --inspect=9222 script.js + +### Safari (iOS) + +Install and run the [iOS WebKit Debug Proxy][iwdp]. Then use it with the `local` +option set to `true` to use the local version of the protocol or pass a custom +descriptor upon connection (`protocol` option). + +[iwdp]: https://github.com/google/ios-webkit-debug-proxy + +### Edge + +Start Edge with the `--devtools-server-port` option, for example: + + MicrosoftEdge.exe --devtools-server-port 9222 about:blank + +Please find more information [here][edge-devtools]. + +[edge-devtools]: https://docs.microsoft.com/en-us/microsoft-edge/devtools-protocol/ + +### Firefox (Nightly) + +Start Firefox with the `--remote-debugging-port` option, for example: + + firefox --remote-debugging-port 9222 + +Bear in mind that this is an experimental feature of Firefox. + +## Bundled client + +This module comes with a bundled client application that can be used to +interactively control a remote instance. + +### Target management + +The bundled client exposes subcommands to interact with the HTTP frontend +(e.g., [List](#cdplistoptions-callback), [New](#cdpnewoptions-callback), etc.), +run with `--help` to display the list of available options. + +Here are some examples: + +```js +$ chrome-remote-interface new 'http://example.com' +{ + "description": "", + "devtoolsFrontendUrl": "/devtools/inspector.html?ws=localhost:9222/devtools/page/b049bb56-de7d-424c-a331-6ae44cf7ae01", + "id": "b049bb56-de7d-424c-a331-6ae44cf7ae01", + "thumbnailUrl": "/thumb/b049bb56-de7d-424c-a331-6ae44cf7ae01", + "title": "", + "type": "page", + "url": "http://example.com/", + "webSocketDebuggerUrl": "ws://localhost:9222/devtools/page/b049bb56-de7d-424c-a331-6ae44cf7ae01" +} +$ chrome-remote-interface close 'b049bb56-de7d-424c-a331-6ae44cf7ae01' +``` + +### Inspection + +Using the `inspect` subcommand it is possible to perform [command execution](#clientdomainmethodparams-callback) +and [event binding](#clientdomaineventcallback) in a REPL fashion that provides completion. + +Here is a sample session: + +```js +$ chrome-remote-interface inspect +>>> Runtime.evaluate({expression: 'window.location.toString()'}) +{ result: { type: 'string', value: 'about:blank' } } +>>> Page.enable() +{} +>>> Page.loadEventFired(console.log) +[Function] +>>> Page.navigate({url: 'https://github.com'}) +{ frameId: 'E1657E22F06E6E0BE13DFA8130C20298', + loaderId: '439236ADE39978F98C20E8939A32D3A5' } +>>> { timestamp: 7454.721299 } // from Page.loadEventFired +>>> Runtime.evaluate({expression: 'window.location.toString()'}) +{ result: { type: 'string', value: 'https://github.com/' } } +``` + +Additionally there are some custom commands available: + +```js +>>> .help +[...] +.reset Remove all the registered event handlers +.target Display the current target +``` + +## Embedded documentation + +In both the REPL and the regular API every object of the protocol is *decorated* +with the meta information found within the descriptor. In addition The +`category` field is added, which determines if the member is a `command`, an +`event` or a `type`. + +For example to learn how to call `Page.navigate`: + +```js +>>> Page.navigate +{ [Function] + category: 'command', + parameters: { url: { type: 'string', description: 'URL to navigate the page to.' } }, + returns: + [ { name: 'frameId', + '$ref': 'FrameId', + hidden: true, + description: 'Frame id that will be navigated.' } ], + description: 'Navigates current page to the given URL.', + handlers: [ 'browser', 'renderer' ] } +``` + +To learn about the parameters returned by the `Network.requestWillBeSent` event: + +```js +>>> Network.requestWillBeSent +{ [Function] + category: 'event', + description: 'Fired when page is about to send HTTP request.', + parameters: + { requestId: { '$ref': 'RequestId', description: 'Request identifier.' }, + frameId: + { '$ref': 'Page.FrameId', + description: 'Frame identifier.', + hidden: true }, + loaderId: { '$ref': 'LoaderId', description: 'Loader identifier.' }, + documentURL: + { type: 'string', + description: 'URL of the document this request is loaded for.' }, + request: { '$ref': 'Request', description: 'Request data.' }, + timestamp: { '$ref': 'Timestamp', description: 'Timestamp.' }, + wallTime: + { '$ref': 'Timestamp', + hidden: true, + description: 'UTC Timestamp.' }, + initiator: { '$ref': 'Initiator', description: 'Request initiator.' }, + redirectResponse: + { optional: true, + '$ref': 'Response', + description: 'Redirect response data.' }, + type: + { '$ref': 'Page.ResourceType', + optional: true, + hidden: true, + description: 'Type of this resource.' } } } +``` + +To inspect the `Network.Request` (note that unlike commands and events, types +are named in upper camel case) type: + +```js +>>> Network.Request +{ category: 'type', + id: 'Request', + type: 'object', + description: 'HTTP request data.', + properties: + { url: { type: 'string', description: 'Request URL.' }, + method: { type: 'string', description: 'HTTP request method.' }, + headers: { '$ref': 'Headers', description: 'HTTP request headers.' }, + postData: + { type: 'string', + optional: true, + description: 'HTTP POST request data.' }, + mixedContentType: + { optional: true, + type: 'string', + enum: [Object], + description: 'The mixed content status of the request, as defined in http://www.w3.org/TR/mixed-content/' }, + initialPriority: + { '$ref': 'ResourcePriority', + description: 'Priority of the resource request at the time request is sent.' } } } +``` + +## Chrome Debugging Protocol versions + +By default `chrome-remote-interface` *asks* the remote instance to provide its +own protocol. + +This behavior can be changed by setting the `local` option to `true` +upon [connection](#cdpoptions-callback), in which case the [local version] of +the protocol descriptor is used. This file is manually updated from time to time +using `scripts/update-protocol.sh` and pushed to this repository. + +To further override the above behavior there are basically two options: + +- pass a custom protocol descriptor upon [connection](#cdpoptions-callback) + (`protocol` option); + +- use the *raw* version of the [commands](#clientsendmethod-params-callback) + and [events](#event-domainmethod) interface to use bleeding-edge features that + do not appear in the [local version] of the protocol descriptor; + +[local version]: lib/protocol.json + +## Browser usage + +This module is able to run within a web context, with obvious limitations +though, namely external HTTP requests +([List](#cdplistoptions-callback), [New](#cdpnewoptions-callback), etc.) cannot +be performed directly, for this reason the user must provide a global +`criRequest` in order to use them: + +```js +function criRequest(options, callback) {} +``` + +`options` is the same object used by the Node.js `http` module and `callback` is +a function taking two arguments: `err` (JavaScript `Error` object or `null`) and +`data` (string result). + +### Using [webpack](https://webpack.github.io/) + +It just works, simply require this module: + +```js +const CDP = require('chrome-remote-interface'); +``` + +### Using *vanilla* JavaScript + +To generate a JavaScript file that can be used with a ` + + ``` + +## TypeScript Support + +[TypeScript][] definitions are kindly provided by [Khairul Azhar Kasmiran][] and [Seth Westphal][], and can be installed from [DefinitelyTyped][]: + +``` +npm install --save-dev @types/chrome-remote-interface +``` + +Note that the TypeScript definitions are automatically generated from the npm package `devtools-protocol@0.0.927104`. For other versions of devtools-protocol: + +1. Install patch-package using [the instructions given](https://github.com/ds300/patch-package#set-up). +2. Copy the contents of the corresponding https://github.com/ChromeDevTools/devtools-protocol/tree/master/types folder (according to commit) into `node_modules/devtools-protocol/types`. +3. Run `npx patch-package devtools-protocol` so that the changes persist across an `npm install`. + +[TypeScript]: https://www.typescriptlang.org/ +[Khairul Azhar Kasmiran]: https://github.com/kazarmy +[Seth Westphal]: https://github.com/westy92 +[DefinitelyTyped]: https://github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/chrome-remote-interface + +## API + +The API consists of three parts: + +- *DevTools* methods (for those [implementations](#implementations) that support + them, e.g., [List](#cdplistoptions-callback), [New](#cdpnewoptions-callback), + etc.); + +- [connection](#cdpoptions-callback) establishment; + +- the actual [protocol interaction](#class-cdp). + +### CDP([options], [callback]) + +Connects to a remote instance using the [Chrome Debugging Protocol]. + +`options` is an object with the following optional properties: + +- `host`: HTTP frontend host. Defaults to `localhost`; +- `port`: HTTP frontend port. Defaults to `9222`; +- `secure`: HTTPS/WSS frontend. Defaults to `false`; +- `useHostName`: do not perform a DNS lookup of the host. Defaults to `false`; +- `alterPath`: a `function` taking and returning the path fragment of a URL + before that a request happens. Defaults to the identity function; +- `target`: determines which target this client should attach to. The behavior + changes according to the type: + + - a `function` that takes the array returned by the `List` method and returns + a target or its numeric index relative to the array; + - a target `object` like those returned by the `New` and `List` methods; + - a `string` representing the raw WebSocket URL, in this case `host` and + `port` are not used to fetch the target list, yet they are used to complete + the URL if relative; + - a `string` representing the target id. + + Defaults to a function which returns the first available target according to + the implementation (note that at most one connection can be established to the + same target); +- `protocol`: [Chrome Debugging Protocol] descriptor object. Defaults to use the + protocol chosen according to the `local` option; +- `local`: a boolean indicating whether the protocol must be fetched *remotely* + or if the local version must be used. It has no effect if the `protocol` + option is set. Defaults to `false`. + +These options are also valid properties of all the instances of the `CDP` +class. In addition to that, the `webSocketUrl` field contains the currently used +WebSocket URL. + +`callback` is a listener automatically added to the `connect` event of the +returned `EventEmitter`. When `callback` is omitted a `Promise` object is +returned which becomes fulfilled if the `connect` event is triggered and +rejected if the `error` event is triggered. + +The `EventEmitter` supports the following events: + +#### Event: 'connect' + +```js +function (client) {} +``` + +Emitted when the connection to the WebSocket is established. + +`client` is an instance of the `CDP` class. + +#### Event: 'error' + +```js +function (err) {} +``` + +Emitted when `http://host:port/json` cannot be reached or if it is not possible +to connect to the WebSocket. + +`err` is an instance of `Error`. + +### CDP.Protocol([options], [callback]) + +Fetch the [Chrome Debugging Protocol] descriptor. + +`options` is an object with the following optional properties: + +- `host`: HTTP frontend host. Defaults to `localhost`; +- `port`: HTTP frontend port. Defaults to `9222`; +- `secure`: HTTPS/WSS frontend. Defaults to `false`; +- `useHostName`: do not perform a DNS lookup of the host. Defaults to `false`; +- `alterPath`: a `function` taking and returning the path fragment of a URL + before that a request happens. Defaults to the identity function; +- `local`: a boolean indicating whether the protocol must be fetched *remotely* + or if the local version must be returned. Defaults to `false`. + +`callback` is executed when the protocol is fetched, it gets the following +arguments: + +- `err`: a `Error` object indicating the success status; +- `protocol`: the [Chrome Debugging Protocol] descriptor. + +When `callback` is omitted a `Promise` object is returned. + +For example: + +```js +const CDP = require('chrome-remote-interface'); +CDP.Protocol((err, protocol) => { + if (!err) { + console.log(JSON.stringify(protocol, null, 4)); + } +}); +``` + +### CDP.List([options], [callback]) + +Request the list of the available open targets/tabs of the remote instance. + +`options` is an object with the following optional properties: + +- `host`: HTTP frontend host. Defaults to `localhost`; +- `port`: HTTP frontend port. Defaults to `9222`; +- `secure`: HTTPS/WSS frontend. Defaults to `false`; +- `useHostName`: do not perform a DNS lookup of the host. Defaults to `false`; +- `alterPath`: a `function` taking and returning the path fragment of a URL + before that a request happens. Defaults to the identity function. + +`callback` is executed when the list is correctly received, it gets the +following arguments: + +- `err`: a `Error` object indicating the success status; +- `targets`: the array returned by `http://host:port/json/list` containing the + target list. + +When `callback` is omitted a `Promise` object is returned. + +For example: + +```js +const CDP = require('chrome-remote-interface'); +CDP.List((err, targets) => { + if (!err) { + console.log(targets); + } +}); +``` + +### CDP.New([options], [callback]) + +Create a new target/tab in the remote instance. + +`options` is an object with the following optional properties: + +- `host`: HTTP frontend host. Defaults to `localhost`; +- `port`: HTTP frontend port. Defaults to `9222`; +- `secure`: HTTPS/WSS frontend. Defaults to `false`; +- `useHostName`: do not perform a DNS lookup of the host. Defaults to `false`; +- `alterPath`: a `function` taking and returning the path fragment of a URL + before that a request happens. Defaults to the identity function; +- `url`: URL to load in the new target/tab. Defaults to `about:blank`. + +`callback` is executed when the target is created, it gets the following +arguments: + +- `err`: a `Error` object indicating the success status; +- `target`: the object returned by `http://host:port/json/new` containing the + target. + +When `callback` is omitted a `Promise` object is returned. + +For example: + +```js +const CDP = require('chrome-remote-interface'); +CDP.New((err, target) => { + if (!err) { + console.log(target); + } +}); +``` + +### CDP.Activate([options], [callback]) + +Activate an open target/tab of the remote instance. + +`options` is an object with the following properties: + +- `host`: HTTP frontend host. Defaults to `localhost`; +- `port`: HTTP frontend port. Defaults to `9222`; +- `secure`: HTTPS/WSS frontend. Defaults to `false`; +- `useHostName`: do not perform a DNS lookup of the host. Defaults to `false`; +- `alterPath`: a `function` taking and returning the path fragment of a URL + before that a request happens. Defaults to the identity function; +- `id`: Target id. Required, no default. + +`callback` is executed when the response to the activation request is +received. It gets the following arguments: + +- `err`: a `Error` object indicating the success status; + +When `callback` is omitted a `Promise` object is returned. + +For example: + +```js +const CDP = require('chrome-remote-interface'); +CDP.Activate({id: 'CC46FBFA-3BDA-493B-B2E4-2BE6EB0D97EC'}, (err) => { + if (!err) { + console.log('target is activated'); + } +}); +``` + +### CDP.Close([options], [callback]) + +Close an open target/tab of the remote instance. + +`options` is an object with the following properties: + +- `host`: HTTP frontend host. Defaults to `localhost`; +- `port`: HTTP frontend port. Defaults to `9222`; +- `secure`: HTTPS/WSS frontend. Defaults to `false`; +- `useHostName`: do not perform a DNS lookup of the host. Defaults to `false`; +- `alterPath`: a `function` taking and returning the path fragment of a URL + before that a request happens. Defaults to the identity function; +- `id`: Target id. Required, no default. + +`callback` is executed when the response to the close request is received. It +gets the following arguments: + +- `err`: a `Error` object indicating the success status; + +When `callback` is omitted a `Promise` object is returned. + +For example: + +```js +const CDP = require('chrome-remote-interface'); +CDP.Close({id: 'CC46FBFA-3BDA-493B-B2E4-2BE6EB0D97EC'}, (err) => { + if (!err) { + console.log('target is closing'); + } +}); +``` + +Note that the callback is fired when the target is *queued* for removal, but the +actual removal will occur asynchronously. + +### CDP.Version([options], [callback]) + +Request version information from the remote instance. + +`options` is an object with the following optional properties: + +- `host`: HTTP frontend host. Defaults to `localhost`; +- `port`: HTTP frontend port. Defaults to `9222`; +- `secure`: HTTPS/WSS frontend. Defaults to `false`; +- `useHostName`: do not perform a DNS lookup of the host. Defaults to `false`; +- `alterPath`: a `function` taking and returning the path fragment of a URL + before that a request happens. Defaults to the identity function. + +`callback` is executed when the version information is correctly received, it +gets the following arguments: + +- `err`: a `Error` object indicating the success status; +- `info`: a JSON object returned by `http://host:port/json/version` containing + the version information. + +When `callback` is omitted a `Promise` object is returned. + +For example: + +```js +const CDP = require('chrome-remote-interface'); +CDP.Version((err, info) => { + if (!err) { + console.log(info); + } +}); +``` + +### Class: CDP + +#### Event: 'event' + +```js +function (message) {} +``` + +Emitted when the remote instance sends any notification through the WebSocket. + +`message` is the object received, it has the following properties: + +- `method`: a string describing the notification (e.g., + `'Network.requestWillBeSent'`); +- `params`: an object containing the payload; +- `sessionId`: an optional string representing the session identifier. + +Refer to the [Chrome Debugging Protocol] specification for more information. + +For example: + +```js +client.on('event', (message) => { + if (message.method === 'Network.requestWillBeSent') { + console.log(message.params); + } +}); +``` + +#### Event: '``.``' + +```js +function (params, sessionId) {} +``` + +Emitted when the remote instance sends a notification for `.` +through the WebSocket. + +`params` is an object containing the payload. + +`sessionId` is an optional string representing the session identifier. + +This is just a utility event which allows to easily listen for specific +notifications (see [`'event'`](#event-event)), for example: + +```js +client.on('Network.requestWillBeSent', console.log); +``` + +Additionally, the equivalent `.on('', ...)` syntax is available, for example: + +```js +client.Network.on('requestWillBeSent', console.log); +``` + +#### Event: '``.``.``' + +```js +function (params, sessionId) {} +``` + +Equivalent to the following but only for those events belonging to the given `session`: + +```js +client.on('.', callback); +``` + +#### Event: 'ready' + +```js +function () {} +``` + +Emitted every time that there are no more pending commands waiting for a +response from the remote instance. The interaction is asynchronous so the only +way to serialize a sequence of commands is to use the callback provided by +the [`send`](#clientsendmethod-params-callback) method. This event acts as a +barrier and it is useful to avoid the *callback hell* in certain simple +situations. + +Users are encouraged to extensively check the response of each method and should +prefer the promises API when dealing with complex asynchronous program flows. + +For example to load a URL only after having enabled the notifications of both +`Network` and `Page` domains: + +```js +client.Network.enable(); +client.Page.enable(); +client.once('ready', () => { + client.Page.navigate({url: 'https://github.com'}); +}); +``` + +In this particular case, not enforcing this kind of serialization may cause that +the remote instance does not properly deliver the desired notifications the +client. + + +#### Event: 'disconnect' + +```js +function () {} +``` + +Emitted when the instance closes the WebSocket connection. + +This may happen for example when the user opens DevTools or when the tab is +closed. + +#### client.send(method, [params], [sessionId], [callback]) + +Issue a command to the remote instance. + +`method` is a string describing the command. + +`params` is an object containing the payload. + +`sessionId` is a string representing the session identifier. + +`callback` is executed when the remote instance sends a response to this +command, it gets the following arguments: + +- `error`: a boolean value indicating the success status, as reported by the + remote instance; +- `response`: an object containing either the response (`result` field, if + `error === false`) or the indication of the error (`error` field, if `error + === true`). + +When `callback` is omitted a `Promise` object is returned instead, with the +fulfilled/rejected states implemented according to the `error` parameter. The +`Error` object returned contains two additional parameters: `request` and +`response` which contain the raw massages, useful for debugging purposes. In +case of low-level WebSocket errors, the `error` parameter contains the +originating `Error` object and no `response` is returned. + +Note that the field `id` mentioned in the [Chrome Debugging Protocol] +specification is managed internally and it is not exposed to the user. + +For example: + +```js +client.send('Page.navigate', {url: 'https://github.com'}, console.log); +``` + +#### client.``.``([params], [sessionId], [callback]) + +Just a shorthand for: + +```js +client.send('.', params, sessionId, callback); +``` + +For example: + +```js +client.Page.navigate({url: 'https://github.com'}, console.log); +``` + +#### client.``.``([sessionId], [callback]) + +Just a shorthand for: + +```js +client.on('.[.]', callback); +``` + +When `callback` is omitted the event is registered only once and a `Promise` +object is returned. Notice though that in this case the optional `sessionId` usually passed to `callback` is not returned. + +When `callback` is provided, it returns a function that can be used to +unsubscribe `callback` from the event, it can be useful when anonymous functions +are used as callbacks. + +For example: + +```js +const unsubscribe = client.Network.requestWillBeSent((params, sessionId) => { + console.log(params.request.url); +}); +unsubscribe(); +``` + +#### client.close([callback]) + +Close the connection to the remote instance. + +`callback` is executed when the WebSocket is successfully closed. + +When `callback` is omitted a `Promise` object is returned. + +#### client['``.``'] + +Just a shorthand for: + +```js +client.. +``` + +Where `` can be a command, an event, or a type. + +## FAQ + +### Invoking `Domain.methodOrEvent` I obtain `Domain.methodOrEvent is not a function` + +This means that you are trying to use a method or an event that are not present +in the protocol descriptor that you are using. + +If the protocol is fetched from Chrome directly, then it means that this version +of Chrome does not support that feature. The solution is to update it. + +If you are using a local or custom version of the protocol, then it means that +the version is obsolete. The solution is to provide an up-to-date one, or if you +are using the protocol embedded in chrome-remote-interface, make sure to be +running the latest version of this module. In case the embedded protocol is +obsolete, please [file an issue](https://github.com/cyrus-and/chrome-remote-interface/issues/new). + +See [here](#chrome-debugging-protocol-versions) for more information. + +### Invoking `Domain.method` I obtain `Domain.method wasn't found` + +This means that you are providing a custom or local protocol descriptor +(`CDP({protocol: customProtocol})`) which declares `Domain.method` while the +Chrome version that you are using does not support it. + +To inspect the currently available protocol descriptor use: + +``` +$ chrome-remote-interface inspect +``` + +See [here](#chrome-debugging-protocol-versions) for more information. + +### Why my program stalls or behave unexpectedly if I run Chrome in a Docker container? + +This happens because the size of `/dev/shm` is set to 64MB by default in Docker +and may not be enough for Chrome to navigate certain web pages. + +You can change this value by running your container with, say, +`--shm-size=256m`. + +### Using `Runtime.evaluate` with `awaitPromise: true` I sometimes obtain `Error: Promise was collected` + +This is thrown by `Runtime.evaluate` when the browser-side promise gets +*collected* by the Chrome's garbage collector, this happens when the whole +JavaScript execution environment is invalidated, e.g., a when page is navigated +or reloaded while a promise is still waiting to be resolved. + +Here is an example: + +``` +$ chrome-remote-interface inspect +>>> Runtime.evaluate({expression: `new Promise(() => {})`, awaitPromise: true}) +>>> Page.reload() // then wait several seconds +{ result: {} } +{ error: { code: -32000, message: 'Promise was collected' } } +``` + +To fix this, just make sure there are no pending promises before closing, +reloading, etc. a page. + +### How does this compare to Puppeteer? + +[Puppeteer] is an additional high-level API built upon the [Chrome Debugging +Protocol] which, among the other things, may start and use a bundled version of +Chromium instead of the one installed on your system. Use it if its API meets +your needs as it would probably be easier to work with. + +chrome-remote-interface instead is just a general purpose 1:1 Node.js binding +for the [Chrome Debugging Protocol]. Use it if you need all the power of the raw +protocol, e.g., to implement your own high-level API. + +See [#240] for a more thorough discussion. + +[Puppeteer]: https://github.com/GoogleChrome/puppeteer +[#240]: https://github.com/cyrus-and/chrome-remote-interface/issues/240 + +## Contributors + +- [Andrey Sidorov](https://github.com/sidorares) +- [Greg Cochard](https://github.com/gcochard) + +## Resources + +- [Chrome Debugging Protocol] +- [Chrome Debugging Protocol Google group](https://groups.google.com/forum/#!forum/chrome-debugging-protocol) +- [devtools-protocol official repo](https://github.com/ChromeDevTools/devtools-protocol) +- [Showcase Chrome Debugging Protocol Clients](https://developer.chrome.com/devtools/docs/debugging-clients) +- [Awesome chrome-devtools](https://github.com/ChromeDevTools/awesome-chrome-devtools) + +[Chrome Debugging Protocol]: https://chromedevtools.github.io/devtools-protocol/ diff --git a/node_modules/chrome-remote-interface/bin/client.js b/node_modules/chrome-remote-interface/bin/client.js new file mode 100755 index 0000000..80b2d4d --- /dev/null +++ b/node_modules/chrome-remote-interface/bin/client.js @@ -0,0 +1,311 @@ +#!/usr/bin/env node + +'use strict'; + +const repl = require('repl'); +const util = require('util'); +const fs = require('fs'); +const path = require('path'); + +const program = require('commander'); + +const CDP = require('../'); +const packageInfo = require('../package.json'); + +function display(object) { + return util.inspect(object, { + colors: process.stdout.isTTY, + depth: null + }); +} + +function toJSON(object) { + return JSON.stringify(object, null, 4); +} + +/// + +function inspect(target, args, options) { + options.local = args.local; + // otherwise the active target + if (target) { + if (args.webSocket) { + // by WebSocket URL + options.target = target; + } else { + // by target id + options.target = (targets) => { + return targets.findIndex((_target) => { + return _target.id === target; + }); + }; + } + } + + if (args.protocol) { + options.protocol = JSON.parse(fs.readFileSync(args.protocol)); + } + + CDP(options, (client) => { + const cdpRepl = repl.start({ + prompt: process.stdin.isTTY ? '\x1b[32m>>>\x1b[0m ' : '', + ignoreUndefined: true, + writer: display + }); + + // XXX always await promises on the REPL + const defaultEval = cdpRepl.eval; + cdpRepl.eval = (cmd, context, filename, callback) => { + defaultEval(cmd, context, filename, async (err, result) => { + if (err) { + // propagate errors from the eval + callback(err); + } else { + // awaits the promise and either return result or error + try { + callback(null, await Promise.resolve(result)); + } catch (err) { + callback(err); + } + } + }); + }; + + const homePath = process.env.HOME || process.env.USERPROFILE; + const historyFile = path.join(homePath, '.cri_history'); + const historySize = 10000; + + function loadHistory() { + // only if run from a terminal + if (!process.stdin.isTTY) { + return; + } + // attempt to open the history file + let fd; + try { + fd = fs.openSync(historyFile, 'r'); + } catch (err) { + return; // no history file present + } + // populate the REPL history + fs.readFileSync(fd, 'utf8') + .split('\n') + .filter((entry) => { + return entry.trim(); + }) + .reverse() // to be compatible with repl.history files + .forEach((entry) => { + cdpRepl.history.push(entry); + }); + } + + function saveHistory() { + // only if run from a terminal + if (!process.stdin.isTTY) { + return; + } + // only store the last chunk + const entries = cdpRepl.history.slice(0, historySize).reverse().join('\n'); + fs.writeFileSync(historyFile, entries + '\n'); + } + + // utility custom command + cdpRepl.defineCommand('target', { + help: 'Display the current target', + action: () => { + console.log(client.webSocketUrl); + cdpRepl.displayPrompt(); + } + }); + + // utility to purge all the event handlers + cdpRepl.defineCommand('reset', { + help: 'Remove all the registered event handlers', + action: () => { + client.removeAllListeners(); + cdpRepl.displayPrompt(); + } + }); + + // enable history + loadHistory(); + + // disconnect on exit + cdpRepl.on('exit', () => { + if (process.stdin.isTTY) { + console.log(); + } + client.close(); + saveHistory(); + }); + + // exit on disconnection + client.on('disconnect', () => { + console.error('Disconnected.'); + saveHistory(); + process.exit(1); + }); + + // add protocol API + for (const domainObject of client.protocol.domains) { + // walk the domain names + const domainName = domainObject.domain; + cdpRepl.context[domainName] = {}; + // walk the items in the domain + for (const itemName in client[domainName]) { + // add CDP object to the REPL context + const cdpObject = client[domainName][itemName]; + cdpRepl.context[domainName][itemName] = cdpObject; + } + } + }).on('error', (err) => { + console.error('Cannot connect to remote endpoint:', err.toString()); + }); +} + +function list(options) { + CDP.List(options, (err, targets) => { + if (err) { + console.error(err.toString()); + process.exit(1); + } + console.log(toJSON(targets)); + }); +} + +function _new(url, options) { + options.url = url; + CDP.New(options, (err, target) => { + if (err) { + console.error(err.toString()); + process.exit(1); + } + console.log(toJSON(target)); + }); +} + +function activate(args, options) { + options.id = args; + CDP.Activate(options, (err) => { + if (err) { + console.error(err.toString()); + process.exit(1); + } + }); +} + +function close(args, options) { + options.id = args; + CDP.Close(options, (err) => { + if (err) { + console.error(err.toString()); + process.exit(1); + } + }); +} + +function version(options) { + CDP.Version(options, (err, info) => { + if (err) { + console.error(err.toString()); + process.exit(1); + } + console.log(toJSON(info)); + }); +} + +function protocol(args, options) { + options.local = args.local; + CDP.Protocol(options, (err, protocol) => { + if (err) { + console.error(err.toString()); + process.exit(1); + } + console.log(toJSON(protocol)); + }); +} + +/// + +let action; + +program + .option('-v, --v', 'Show this module version') + .option('-t, --host ', 'HTTP frontend host') + .option('-p, --port ', 'HTTP frontend port') + .option('-s, --secure', 'HTTPS/WSS frontend') + .option('-n, --use-host-name', 'Do not perform a DNS lookup of the host'); + +program + .command('inspect []') + .description('inspect a target (defaults to the first available target)') + .option('-w, --web-socket', 'interpret as a WebSocket URL instead of a target id') + .option('-j, --protocol ', 'Chrome Debugging Protocol descriptor (overrides `--local`)') + .option('-l, --local', 'Use the local protocol descriptor') + .action((target, args) => { + action = inspect.bind(null, target, args); + }); + +program + .command('list') + .description('list all the available targets/tabs') + .action(() => { + action = list; + }); + +program + .command('new []') + .description('create a new target/tab') + .action((url) => { + action = _new.bind(null, url); + }); + +program + .command('activate ') + .description('activate a target/tab by id') + .action((id) => { + action = activate.bind(null, id); + }); + +program + .command('close ') + .description('close a target/tab by id') + .action((id) => { + action = close.bind(null, id); + }); + +program + .command('version') + .description('show the browser version') + .action(() => { + action = version; + }); + +program + .command('protocol') + .description('show the currently available protocol descriptor') + .option('-l, --local', 'Return the local protocol descriptor') + .action((args) => { + action = protocol.bind(null, args); + }); + +program.parse(process.argv); + +// common options +const options = { + host: program.host, + port: program.port, + secure: program.secure, + useHostName: program.useHostName +}; + +if (action) { + action(options); +} else { + if (program.v) { + console.log(packageInfo.version); + } else { + program.outputHelp(); + process.exit(1); + } +} diff --git a/node_modules/chrome-remote-interface/chrome-remote-interface.js b/node_modules/chrome-remote-interface/chrome-remote-interface.js new file mode 100644 index 0000000..6835723 --- /dev/null +++ b/node_modules/chrome-remote-interface/chrome-remote-interface.js @@ -0,0 +1 @@ +(()=>{var e={6010:(e,t,r)=>{"use strict";var n=r(4155);const i=r(7187),o=r(4782),a=r(7996),s=r(8855);o.setDefaultResultOrder&&o.setDefaultResultOrder("ipv4first"),e.exports=function(e,t){"function"==typeof e&&(t=e,e=void 0);const r=new i;return"function"==typeof t?(n.nextTick((()=>{new s(e,r)})),r.once("connect",t)):new Promise(((t,n)=>{r.once("connect",t),r.once("error",n),new s(e,r)}))},e.exports.Protocol=a.Protocol,e.exports.List=a.List,e.exports.New=a.New,e.exports.Activate=a.Activate,e.exports.Close=a.Close,e.exports.Version=a.Version},7249:e=>{"use strict";function t(e,t,r){e.category=t,Object.keys(r).forEach((n=>{"name"!==n&&(e[n]="type"===t&&"properties"===n||"parameters"===n?function(e){const t={};return e.forEach((e=>{const r=e.name;delete e.name,t[r]=e})),t}(r[n]):r[n])}))}e.exports.prepare=function(e,r){e.protocol=r,r.domains.forEach((r=>{const n=r.domain;e[n]={},(r.commands||[]).forEach((r=>{!function(e,r,n){const i=`${r}.${n.name}`,o=(t,r,n)=>e.send(i,t,r,n);t(o,"command",n),e[i]=e[r][n.name]=o}(e,n,r)})),(r.events||[]).forEach((r=>{!function(e,r,n){const i=`${r}.${n.name}`,o=(t,r)=>{"function"==typeof t&&(r=t,t=void 0);const n=t?`${i}.${t}`:i;return"function"==typeof r?(e.on(n,r),()=>e.removeListener(n,r)):new Promise(((t,r)=>{e.once(n,t)}))};t(o,"event",n),e[i]=e[r][n.name]=o}(e,n,r)})),(r.types||[]).forEach((r=>{!function(e,r,n){const i=`${r}.${n.id}`,o={};t(o,"type",n),e[i]=e[r][n.id]=o}(e,n,r)})),e[n].on=(t,r)=>e[n][t](r)}))}},8855:(e,t,r)=>{"use strict";var n=r(4155);const i=r(7187),o=r(1588),a=r(8575).WU,s=r(8575).Qc,p=r(5529),d=r(7249),c=r(5372),l=r(7996);class u extends Error{constructor(e,t){let{message:r}=t;t.data&&(r+=` (${t.data})`),super(r),this.request=e,this.response=t}}e.exports=class extends i{constructor(e,t){super();e=e||{},this.host=e.host||c.HOST,this.port=e.port||c.PORT,this.secure=!!e.secure,this.useHostName=!!e.useHostName,this.alterPath=e.alterPath||(e=>e),this.protocol=e.protocol,this.local=!!e.local,this.target=e.target||(e=>{let t,r=e.find((e=>!!e.webSocketDebuggerUrl&&(t=t||e,"page"===e.type)));if(r=r||t,r)return r;throw new Error("No inspectable targets")}),this._notifier=t,this._callbacks={},this._nextCommandId=1,this.webSocketUrl=void 0,this._start()}inspect(e,t){return t.customInspect=!1,o.inspect(this,t)}send(e,t,r,n){const i=Array.from(arguments).slice(1);return t=i.find((e=>"object"==typeof e)),r=i.find((e=>"string"==typeof e)),"function"==typeof(n=i.find((e=>"function"==typeof e)))?void this._enqueueCommand(e,t,r,n):new Promise(((n,i)=>{this._enqueueCommand(e,t,r,((o,a)=>{if(o){const n={method:e,params:t,sessionId:r};i(o instanceof Error?o:new u(n,a))}else n(a)}))}))}close(e){const t=e=>{3===this._ws.readyState?e():(this._ws.removeAllListeners("close"),this._ws.once("close",(()=>{this._ws.removeAllListeners(),this._handleConnectionClose(),e()})),this._ws.close())};return"function"==typeof e?void t(e):new Promise(((e,r)=>{t(e)}))}async _start(){const e={host:this.host,port:this.port,secure:this.secure,useHostName:this.useHostName,alterPath:this.alterPath};try{const t=await this._fetchDebuggerURL(e),r=s(t);r.pathname=e.alterPath(r.pathname),this.webSocketUrl=a(r),e.host=r.hostname,e.port=r.port||e.port;const i=await this._fetchProtocol(e);d.prepare(this,i),await this._connectToWebSocket(),n.nextTick((()=>{this._notifier.emit("connect",this)}))}catch(e){this._notifier.emit("error",e)}}async _fetchDebuggerURL(e){const t=this.target;switch(typeof t){case"string":{let r=t;if(r.startsWith("/")&&(r=`ws://${this.host}:${this.port}${r}`),r.match(/^wss?:/i))return r;return(await l.List(e)).find((e=>e.id===r)).webSocketDebuggerUrl}case"object":return t.webSocketDebuggerUrl;case"function":{const r=t,n=await l.List(e),i=r(n);return("number"==typeof i?n[i]:i).webSocketDebuggerUrl}default:throw new Error(`Invalid target argument "${this.target}"`)}}async _fetchProtocol(e){return this.protocol?this.protocol:(e.local=this.local,await l.Protocol(e))}_connectToWebSocket(){return new Promise(((e,t)=>{try{this.secure&&(this.webSocketUrl=this.webSocketUrl.replace(/^ws:/i,"wss:")),this._ws=new p(this.webSocketUrl,[],{maxPayload:268435456,perMessageDeflate:!1,followRedirects:!0})}catch(e){return void t(e)}this._ws.on("open",(()=>{e()})),this._ws.on("message",(e=>{const t=JSON.parse(e);this._handleMessage(t)})),this._ws.on("close",(e=>{this._handleConnectionClose(),this.emit("disconnect")})),this._ws.on("error",(e=>{t(e)}))}))}_handleConnectionClose(){const e=new Error("WebSocket connection closed");for(const t of Object.values(this._callbacks))t(e);this._callbacks={}}_handleMessage(e){if(e.id){const t=this._callbacks[e.id];if(!t)return;e.error?t(!0,e.error):t(!1,e.result||{}),delete this._callbacks[e.id],0===Object.keys(this._callbacks).length&&this.emit("ready")}else if(e.method){const{method:t,params:r,sessionId:n}=e;this.emit("event",e),this.emit(t,r,n),this.emit(`${t}.${n}`,r,n)}}_enqueueCommand(e,t,r,n){const i=this._nextCommandId++,o={id:i,method:e,sessionId:r,params:t||{}};this._ws.send(JSON.stringify(o),(e=>{e?"function"==typeof n&&n(e):this._callbacks[i]=n}))}}},5372:e=>{"use strict";e.exports.HOST="localhost",e.exports.PORT=9222},7996:(e,t,r)=>{"use strict";const n=r(3423),i=r(8532),o=r(5372),a=r(4162);function s(e,t,r){const s=t.secure?i:n,p={method:t.method,host:t.host||o.HOST,port:t.port||o.PORT,useHostName:t.useHostName,path:t.alterPath?t.alterPath(e):e};a(s,p,r)}function p(e){return(t,r)=>("function"==typeof t&&(r=t,t=void 0),t=t||{},"function"==typeof r?void e(t,r):new Promise(((r,n)=>{e(t,((e,t)=>{e?n(e):r(t)}))})))}e.exports.Protocol=p((function(e,t){if(e.local){const e=r(4203);t(null,e)}else s("/json/protocol",e,((e,r)=>{e?t(e):t(null,JSON.parse(r))}))})),e.exports.List=p((function(e,t){s("/json/list",e,((e,r)=>{e?t(e):t(null,JSON.parse(r))}))})),e.exports.New=p((function(e,t){let r="/json/new";Object.prototype.hasOwnProperty.call(e,"url")&&(r+=`?${e.url}`),e.method=e.method||"PUT",s(r,e,((e,r)=>{e?t(e):t(null,JSON.parse(r))}))})),e.exports.Activate=p((function(e,t){s("/json/activate/"+e.id,e,(e=>{t(e||null)}))})),e.exports.Close=p((function(e,t){s("/json/close/"+e.id,e,(e=>{t(e||null)}))})),e.exports.Version=p((function(e,t){s("/json/version",e,((e,r)=>{e?t(e):t(null,JSON.parse(r))}))}))},5529:(e,t,r)=>{"use strict";const n=r(7187);e.exports=class extends n{constructor(e){super(),this._ws=new WebSocket(e),this._ws.onopen=()=>{this.emit("open")},this._ws.onclose=()=>{this.emit("close")},this._ws.onmessage=e=>{this.emit("message",e.data)},this._ws.onerror=()=>{this.emit("error",new Error("WebSocket error"))}}close(){this._ws.close()}send(e,t){try{this._ws.send(e),t()}catch(e){t(e)}}}},6124:(e,t,r)=>{"use strict";if(r(1934),r(5666),r(7694),r.g._babelPolyfill)throw new Error("only one instance of babel-polyfill is allowed");r.g._babelPolyfill=!0;function n(e,t,r){e[t]||Object.defineProperty(e,t,{writable:!0,configurable:!0,value:r})}n(String.prototype,"padLeft","".padStart),n(String.prototype,"padRight","".padEnd),"pop,reverse,shift,keys,values,entries,indexOf,every,some,forEach,map,filter,find,findIndex,includes,join,slice,concat,push,splice,unshift,sort,lastIndexOf,reduce,reduceRight,copyWithin,fill".split(",").forEach((function(e){[][e]&&n(Array,e,Function.call.bind([][e]))}))},1924:(e,t,r)=>{"use strict";var n=r(210),i=r(5559),o=i(n("String.prototype.indexOf"));e.exports=function(e,t){var r=n(e,!!t);return"function"==typeof r&&o(e,".prototype.")>-1?i(r):r}},5559:(e,t,r)=>{"use strict";var n=r(8612),i=r(210),o=i("%Function.prototype.apply%"),a=i("%Function.prototype.call%"),s=i("%Reflect.apply%",!0)||n.call(a,o),p=i("%Object.getOwnPropertyDescriptor%",!0),d=i("%Object.defineProperty%",!0),c=i("%Math.max%");if(d)try{d({},"a",{value:1})}catch(e){d=null}e.exports=function(e){var t=s(n,a,arguments);if(p&&d){var r=p(t,"length");r.configurable&&d(t,"length",{value:1+c(0,e.length-(arguments.length-1))})}return t};var l=function(){return s(n,o,arguments)};d?d(e.exports,"apply",{value:l}):e.exports.apply=l},7694:(e,t,r)=>{r(1761),e.exports=r(5645).RegExp.escape},4963:e=>{e.exports=function(e){if("function"!=typeof e)throw TypeError(e+" is not a function!");return e}},3365:(e,t,r)=>{var n=r(2032);e.exports=function(e,t){if("number"!=typeof e&&"Number"!=n(e))throw TypeError(t);return+e}},7722:(e,t,r)=>{var n=r(6314)("unscopables"),i=Array.prototype;null==i[n]&&r(7728)(i,n,{}),e.exports=function(e){i[n][e]=!0}},6793:(e,t,r)=>{"use strict";var n=r(4496)(!0);e.exports=function(e,t,r){return t+(r?n(e,t).length:1)}},3328:e=>{e.exports=function(e,t,r,n){if(!(e instanceof t)||void 0!==n&&n in e)throw TypeError(r+": incorrect invocation!");return e}},7007:(e,t,r)=>{var n=r(5286);e.exports=function(e){if(!n(e))throw TypeError(e+" is not an object!");return e}},5216:(e,t,r)=>{"use strict";var n=r(508),i=r(2337),o=r(875);e.exports=[].copyWithin||function(e,t){var r=n(this),a=o(r.length),s=i(e,a),p=i(t,a),d=arguments.length>2?arguments[2]:void 0,c=Math.min((void 0===d?a:i(d,a))-p,a-s),l=1;for(p0;)p in r?r[s]=r[p]:delete r[s],s+=l,p+=l;return r}},6852:(e,t,r)=>{"use strict";var n=r(508),i=r(2337),o=r(875);e.exports=function(e){for(var t=n(this),r=o(t.length),a=arguments.length,s=i(a>1?arguments[1]:void 0,r),p=a>2?arguments[2]:void 0,d=void 0===p?r:i(p,r);d>s;)t[s++]=e;return t}},9490:(e,t,r)=>{var n=r(3531);e.exports=function(e,t){var r=[];return n(e,!1,r.push,r,t),r}},9315:(e,t,r)=>{var n=r(2110),i=r(875),o=r(2337);e.exports=function(e){return function(t,r,a){var s,p=n(t),d=i(p.length),c=o(a,d);if(e&&r!=r){for(;d>c;)if((s=p[c++])!=s)return!0}else for(;d>c;c++)if((e||c in p)&&p[c]===r)return e||c||0;return!e&&-1}}},50:(e,t,r)=>{var n=r(741),i=r(9797),o=r(508),a=r(875),s=r(6886);e.exports=function(e,t){var r=1==e,p=2==e,d=3==e,c=4==e,l=6==e,u=5==e||l,m=t||s;return function(t,s,h){for(var f,y,g=o(t),b=i(g),v=n(s,h,3),w=a(b.length),S=0,I=r?m(t,w):p?m(t,0):void 0;w>S;S++)if((u||S in b)&&(y=v(f=b[S],S,g),e))if(r)I[S]=y;else if(y)switch(e){case 3:return!0;case 5:return f;case 6:return S;case 2:I.push(f)}else if(c)return!1;return l?-1:d||c?c:I}}},7628:(e,t,r)=>{var n=r(4963),i=r(508),o=r(9797),a=r(875);e.exports=function(e,t,r,s,p){n(t);var d=i(e),c=o(d),l=a(d.length),u=p?l-1:0,m=p?-1:1;if(r<2)for(;;){if(u in c){s=c[u],u+=m;break}if(u+=m,p?u<0:l<=u)throw TypeError("Reduce of empty array with no initial value")}for(;p?u>=0:l>u;u+=m)u in c&&(s=t(s,c[u],u,d));return s}},2736:(e,t,r)=>{var n=r(5286),i=r(4302),o=r(6314)("species");e.exports=function(e){var t;return i(e)&&("function"!=typeof(t=e.constructor)||t!==Array&&!i(t.prototype)||(t=void 0),n(t)&&null===(t=t[o])&&(t=void 0)),void 0===t?Array:t}},6886:(e,t,r)=>{var n=r(2736);e.exports=function(e,t){return new(n(e))(t)}},4398:(e,t,r)=>{"use strict";var n=r(4963),i=r(5286),o=r(7242),a=[].slice,s={},p=function(e,t,r){if(!(t in s)){for(var n=[],i=0;i{var n=r(2032),i=r(6314)("toStringTag"),o="Arguments"==n(function(){return arguments}());e.exports=function(e){var t,r,a;return void 0===e?"Undefined":null===e?"Null":"string"==typeof(r=function(e,t){try{return e[t]}catch(e){}}(t=Object(e),i))?r:o?n(t):"Object"==(a=n(t))&&"function"==typeof t.callee?"Arguments":a}},2032:e=>{var t={}.toString;e.exports=function(e){return t.call(e).slice(8,-1)}},9824:(e,t,r)=>{"use strict";var n=r(9275).f,i=r(2503),o=r(4408),a=r(741),s=r(3328),p=r(3531),d=r(2923),c=r(5436),l=r(2974),u=r(7057),m=r(4728).fastKey,h=r(1616),f=u?"_s":"size",y=function(e,t){var r,n=m(t);if("F"!==n)return e._i[n];for(r=e._f;r;r=r.n)if(r.k==t)return r};e.exports={getConstructor:function(e,t,r,d){var c=e((function(e,n){s(e,c,t,"_i"),e._t=t,e._i=i(null),e._f=void 0,e._l=void 0,e[f]=0,null!=n&&p(n,r,e[d],e)}));return o(c.prototype,{clear:function(){for(var e=h(this,t),r=e._i,n=e._f;n;n=n.n)n.r=!0,n.p&&(n.p=n.p.n=void 0),delete r[n.i];e._f=e._l=void 0,e[f]=0},delete:function(e){var r=h(this,t),n=y(r,e);if(n){var i=n.n,o=n.p;delete r._i[n.i],n.r=!0,o&&(o.n=i),i&&(i.p=o),r._f==n&&(r._f=i),r._l==n&&(r._l=o),r[f]--}return!!n},forEach:function(e){h(this,t);for(var r,n=a(e,arguments.length>1?arguments[1]:void 0,3);r=r?r.n:this._f;)for(n(r.v,r.k,this);r&&r.r;)r=r.p},has:function(e){return!!y(h(this,t),e)}}),u&&n(c.prototype,"size",{get:function(){return h(this,t)[f]}}),c},def:function(e,t,r){var n,i,o=y(e,t);return o?o.v=r:(e._l=o={i:i=m(t,!0),k:t,v:r,p:n=e._l,n:void 0,r:!1},e._f||(e._f=o),n&&(n.n=o),e[f]++,"F"!==i&&(e._i[i]=o)),e},getEntry:y,setStrong:function(e,t,r){d(e,t,(function(e,r){this._t=h(e,t),this._k=r,this._l=void 0}),(function(){for(var e=this,t=e._k,r=e._l;r&&r.r;)r=r.p;return e._t&&(e._l=r=r?r.n:e._t._f)?c(0,"keys"==t?r.k:"values"==t?r.v:[r.k,r.v]):(e._t=void 0,c(1))}),r?"entries":"values",!r,!0),l(t)}}},6132:(e,t,r)=>{var n=r(1488),i=r(9490);e.exports=function(e){return function(){if(n(this)!=e)throw TypeError(e+"#toJSON isn't generic");return i(this)}}},3657:(e,t,r)=>{"use strict";var n=r(4408),i=r(4728).getWeak,o=r(7007),a=r(5286),s=r(3328),p=r(3531),d=r(50),c=r(9181),l=r(1616),u=d(5),m=d(6),h=0,f=function(e){return e._l||(e._l=new y)},y=function(){this.a=[]},g=function(e,t){return u(e.a,(function(e){return e[0]===t}))};y.prototype={get:function(e){var t=g(this,e);if(t)return t[1]},has:function(e){return!!g(this,e)},set:function(e,t){var r=g(this,e);r?r[1]=t:this.a.push([e,t])},delete:function(e){var t=m(this.a,(function(t){return t[0]===e}));return~t&&this.a.splice(t,1),!!~t}},e.exports={getConstructor:function(e,t,r,o){var d=e((function(e,n){s(e,d,t,"_i"),e._t=t,e._i=h++,e._l=void 0,null!=n&&p(n,r,e[o],e)}));return n(d.prototype,{delete:function(e){if(!a(e))return!1;var r=i(e);return!0===r?f(l(this,t)).delete(e):r&&c(r,this._i)&&delete r[this._i]},has:function(e){if(!a(e))return!1;var r=i(e);return!0===r?f(l(this,t)).has(e):r&&c(r,this._i)}}),d},def:function(e,t,r){var n=i(o(t),!0);return!0===n?f(e).set(t,r):n[e._i]=r,e},ufstore:f}},5795:(e,t,r)=>{"use strict";var n=r(3816),i=r(2985),o=r(7234),a=r(4408),s=r(4728),p=r(3531),d=r(3328),c=r(5286),l=r(4253),u=r(7462),m=r(2943),h=r(266);e.exports=function(e,t,r,f,y,g){var b=n[e],v=b,w=y?"set":"add",S=v&&v.prototype,I={},x=function(e){var t=S[e];o(S,e,"delete"==e||"has"==e?function(e){return!(g&&!c(e))&&t.call(this,0===e?0:e)}:"get"==e?function(e){return g&&!c(e)?void 0:t.call(this,0===e?0:e)}:"add"==e?function(e){return t.call(this,0===e?0:e),this}:function(e,r){return t.call(this,0===e?0:e,r),this})};if("function"==typeof v&&(g||S.forEach&&!l((function(){(new v).entries().next()})))){var k=new v,T=k[w](g?{}:-0,1)!=k,R=l((function(){k.has(1)})),C=u((function(e){new v(e)})),$=!g&&l((function(){for(var e=new v,t=5;t--;)e[w](t,t);return!e.has(-0)}));C||((v=t((function(t,r){d(t,v,e);var n=h(new b,t,v);return null!=r&&p(r,y,n[w],n),n}))).prototype=S,S.constructor=v),(R||$)&&(x("delete"),x("has"),y&&x("get")),($||T)&&x(w),g&&S.clear&&delete S.clear}else v=f.getConstructor(t,e,y,w),a(v.prototype,r),s.NEED=!0;return m(v,e),I[e]=v,i(i.G+i.W+i.F*(v!=b),I),g||f.setStrong(v,e,y),v}},5645:e=>{var t=e.exports={version:"2.6.12"};"number"==typeof __e&&(__e=t)},2811:(e,t,r)=>{"use strict";var n=r(9275),i=r(681);e.exports=function(e,t,r){t in e?n.f(e,t,i(0,r)):e[t]=r}},741:(e,t,r)=>{var n=r(4963);e.exports=function(e,t,r){if(n(e),void 0===t)return e;switch(r){case 1:return function(r){return e.call(t,r)};case 2:return function(r,n){return e.call(t,r,n)};case 3:return function(r,n,i){return e.call(t,r,n,i)}}return function(){return e.apply(t,arguments)}}},3537:(e,t,r)=>{"use strict";var n=r(4253),i=Date.prototype.getTime,o=Date.prototype.toISOString,a=function(e){return e>9?e:"0"+e};e.exports=n((function(){return"0385-07-25T07:06:39.999Z"!=o.call(new Date(-50000000000001))}))||!n((function(){o.call(new Date(NaN))}))?function(){if(!isFinite(i.call(this)))throw RangeError("Invalid time value");var e=this,t=e.getUTCFullYear(),r=e.getUTCMilliseconds(),n=t<0?"-":t>9999?"+":"";return n+("00000"+Math.abs(t)).slice(n?-6:-4)+"-"+a(e.getUTCMonth()+1)+"-"+a(e.getUTCDate())+"T"+a(e.getUTCHours())+":"+a(e.getUTCMinutes())+":"+a(e.getUTCSeconds())+"."+(r>99?r:"0"+a(r))+"Z"}:o},870:(e,t,r)=>{"use strict";var n=r(7007),i=r(1689),o="number";e.exports=function(e){if("string"!==e&&e!==o&&"default"!==e)throw TypeError("Incorrect hint");return i(n(this),e!=o)}},1355:e=>{e.exports=function(e){if(null==e)throw TypeError("Can't call method on "+e);return e}},7057:(e,t,r)=>{e.exports=!r(4253)((function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a}))},2457:(e,t,r)=>{var n=r(5286),i=r(3816).document,o=n(i)&&n(i.createElement);e.exports=function(e){return o?i.createElement(e):{}}},4430:e=>{e.exports="constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf".split(",")},5541:(e,t,r)=>{var n=r(7184),i=r(4548),o=r(4682);e.exports=function(e){var t=n(e),r=i.f;if(r)for(var a,s=r(e),p=o.f,d=0;s.length>d;)p.call(e,a=s[d++])&&t.push(a);return t}},2985:(e,t,r)=>{var n=r(3816),i=r(5645),o=r(7728),a=r(7234),s=r(741),p=function(e,t,r){var d,c,l,u,m=e&p.F,h=e&p.G,f=e&p.S,y=e&p.P,g=e&p.B,b=h?n:f?n[t]||(n[t]={}):(n[t]||{}).prototype,v=h?i:i[t]||(i[t]={}),w=v.prototype||(v.prototype={});for(d in h&&(r=t),r)l=((c=!m&&b&&void 0!==b[d])?b:r)[d],u=g&&c?s(l,n):y&&"function"==typeof l?s(Function.call,l):l,b&&a(b,d,l,e&p.U),v[d]!=l&&o(v,d,u),y&&w[d]!=l&&(w[d]=l)};n.core=i,p.F=1,p.G=2,p.S=4,p.P=8,p.B=16,p.W=32,p.U=64,p.R=128,e.exports=p},8852:(e,t,r)=>{var n=r(6314)("match");e.exports=function(e){var t=/./;try{"/./"[e](t)}catch(r){try{return t[n]=!1,!"/./"[e](t)}catch(e){}}return!0}},4253:e=>{e.exports=function(e){try{return!!e()}catch(e){return!0}}},8082:(e,t,r)=>{"use strict";r(8269);var n=r(7234),i=r(7728),o=r(4253),a=r(1355),s=r(6314),p=r(1165),d=s("species"),c=!o((function(){var e=/./;return e.exec=function(){var e=[];return e.groups={a:"7"},e},"7"!=="".replace(e,"$")})),l=function(){var e=/(?:)/,t=e.exec;e.exec=function(){return t.apply(this,arguments)};var r="ab".split(e);return 2===r.length&&"a"===r[0]&&"b"===r[1]}();e.exports=function(e,t,r){var u=s(e),m=!o((function(){var t={};return t[u]=function(){return 7},7!=""[e](t)})),h=m?!o((function(){var t=!1,r=/a/;return r.exec=function(){return t=!0,null},"split"===e&&(r.constructor={},r.constructor[d]=function(){return r}),r[u](""),!t})):void 0;if(!m||!h||"replace"===e&&!c||"split"===e&&!l){var f=/./[u],y=r(a,u,""[e],(function(e,t,r,n,i){return t.exec===p?m&&!i?{done:!0,value:f.call(t,r,n)}:{done:!0,value:e.call(r,t,n)}:{done:!1}})),g=y[0],b=y[1];n(String.prototype,e,g),i(RegExp.prototype,u,2==t?function(e,t){return b.call(e,this,t)}:function(e){return b.call(e,this)})}}},3218:(e,t,r)=>{"use strict";var n=r(7007);e.exports=function(){var e=n(this),t="";return e.global&&(t+="g"),e.ignoreCase&&(t+="i"),e.multiline&&(t+="m"),e.unicode&&(t+="u"),e.sticky&&(t+="y"),t}},3325:(e,t,r)=>{"use strict";var n=r(4302),i=r(5286),o=r(875),a=r(741),s=r(6314)("isConcatSpreadable");e.exports=function e(t,r,p,d,c,l,u,m){for(var h,f,y=c,g=0,b=!!u&&a(u,m,3);g0)y=e(t,r,h,o(h.length),y,l-1)-1;else{if(y>=9007199254740991)throw TypeError();t[y]=h}y++}g++}return y}},3531:(e,t,r)=>{var n=r(741),i=r(8851),o=r(6555),a=r(7007),s=r(875),p=r(9002),d={},c={},l=e.exports=function(e,t,r,l,u){var m,h,f,y,g=u?function(){return e}:p(e),b=n(r,l,t?2:1),v=0;if("function"!=typeof g)throw TypeError(e+" is not iterable!");if(o(g)){for(m=s(e.length);m>v;v++)if((y=t?b(a(h=e[v])[0],h[1]):b(e[v]))===d||y===c)return y}else for(f=g.call(e);!(h=f.next()).done;)if((y=i(f,b,h.value,t))===d||y===c)return y};l.BREAK=d,l.RETURN=c},18:(e,t,r)=>{e.exports=r(3825)("native-function-to-string",Function.toString)},3816:e=>{var t=e.exports="undefined"!=typeof window&&window.Math==Math?window:"undefined"!=typeof self&&self.Math==Math?self:Function("return this")();"number"==typeof __g&&(__g=t)},9181:e=>{var t={}.hasOwnProperty;e.exports=function(e,r){return t.call(e,r)}},7728:(e,t,r)=>{var n=r(9275),i=r(681);e.exports=r(7057)?function(e,t,r){return n.f(e,t,i(1,r))}:function(e,t,r){return e[t]=r,e}},639:(e,t,r)=>{var n=r(3816).document;e.exports=n&&n.documentElement},1734:(e,t,r)=>{e.exports=!r(7057)&&!r(4253)((function(){return 7!=Object.defineProperty(r(2457)("div"),"a",{get:function(){return 7}}).a}))},266:(e,t,r)=>{var n=r(5286),i=r(7375).set;e.exports=function(e,t,r){var o,a=t.constructor;return a!==r&&"function"==typeof a&&(o=a.prototype)!==r.prototype&&n(o)&&i&&i(e,o),e}},7242:e=>{e.exports=function(e,t,r){var n=void 0===r;switch(t.length){case 0:return n?e():e.call(r);case 1:return n?e(t[0]):e.call(r,t[0]);case 2:return n?e(t[0],t[1]):e.call(r,t[0],t[1]);case 3:return n?e(t[0],t[1],t[2]):e.call(r,t[0],t[1],t[2]);case 4:return n?e(t[0],t[1],t[2],t[3]):e.call(r,t[0],t[1],t[2],t[3])}return e.apply(r,t)}},9797:(e,t,r)=>{var n=r(2032);e.exports=Object("z").propertyIsEnumerable(0)?Object:function(e){return"String"==n(e)?e.split(""):Object(e)}},6555:(e,t,r)=>{var n=r(2803),i=r(6314)("iterator"),o=Array.prototype;e.exports=function(e){return void 0!==e&&(n.Array===e||o[i]===e)}},4302:(e,t,r)=>{var n=r(2032);e.exports=Array.isArray||function(e){return"Array"==n(e)}},8367:(e,t,r)=>{var n=r(5286),i=Math.floor;e.exports=function(e){return!n(e)&&isFinite(e)&&i(e)===e}},5286:e=>{e.exports=function(e){return"object"==typeof e?null!==e:"function"==typeof e}},5364:(e,t,r)=>{var n=r(5286),i=r(2032),o=r(6314)("match");e.exports=function(e){var t;return n(e)&&(void 0!==(t=e[o])?!!t:"RegExp"==i(e))}},8851:(e,t,r)=>{var n=r(7007);e.exports=function(e,t,r,i){try{return i?t(n(r)[0],r[1]):t(r)}catch(t){var o=e.return;throw void 0!==o&&n(o.call(e)),t}}},9988:(e,t,r)=>{"use strict";var n=r(2503),i=r(681),o=r(2943),a={};r(7728)(a,r(6314)("iterator"),(function(){return this})),e.exports=function(e,t,r){e.prototype=n(a,{next:i(1,r)}),o(e,t+" Iterator")}},2923:(e,t,r)=>{"use strict";var n=r(4461),i=r(2985),o=r(7234),a=r(7728),s=r(2803),p=r(9988),d=r(2943),c=r(468),l=r(6314)("iterator"),u=!([].keys&&"next"in[].keys()),m="keys",h="values",f=function(){return this};e.exports=function(e,t,r,y,g,b,v){p(r,t,y);var w,S,I,x=function(e){if(!u&&e in C)return C[e];switch(e){case m:case h:return function(){return new r(this,e)}}return function(){return new r(this,e)}},k=t+" Iterator",T=g==h,R=!1,C=e.prototype,$=C[l]||C["@@iterator"]||g&&C[g],A=$||x(g),O=g?T?x("entries"):A:void 0,P="Array"==t&&C.entries||$;if(P&&(I=c(P.call(new e)))!==Object.prototype&&I.next&&(d(I,k,!0),n||"function"==typeof I[l]||a(I,l,f)),T&&$&&$.name!==h&&(R=!0,A=function(){return $.call(this)}),n&&!v||!u&&!R&&C[l]||a(C,l,A),s[t]=A,s[k]=f,g)if(w={values:T?A:x(h),keys:b?A:x(m),entries:O},v)for(S in w)S in C||o(C,S,w[S]);else i(i.P+i.F*(u||R),t,w);return w}},7462:(e,t,r)=>{var n=r(6314)("iterator"),i=!1;try{var o=[7][n]();o.return=function(){i=!0},Array.from(o,(function(){throw 2}))}catch(e){}e.exports=function(e,t){if(!t&&!i)return!1;var r=!1;try{var o=[7],a=o[n]();a.next=function(){return{done:r=!0}},o[n]=function(){return a},e(o)}catch(e){}return r}},5436:e=>{e.exports=function(e,t){return{value:t,done:!!e}}},2803:e=>{e.exports={}},4461:e=>{e.exports=!1},3086:e=>{var t=Math.expm1;e.exports=!t||t(10)>22025.465794806718||t(10)<22025.465794806718||-2e-17!=t(-2e-17)?function(e){return 0==(e=+e)?e:e>-1e-6&&e<1e-6?e+e*e/2:Math.exp(e)-1}:t},4934:(e,t,r)=>{var n=r(1801),i=Math.pow,o=i(2,-52),a=i(2,-23),s=i(2,127)*(2-a),p=i(2,-126);e.exports=Math.fround||function(e){var t,r,i=Math.abs(e),d=n(e);return is||r!=r?d*(1/0):d*r}},6206:e=>{e.exports=Math.log1p||function(e){return(e=+e)>-1e-8&&e<1e-8?e-e*e/2:Math.log(1+e)}},8757:e=>{e.exports=Math.scale||function(e,t,r,n,i){return 0===arguments.length||e!=e||t!=t||r!=r||n!=n||i!=i?NaN:e===1/0||e===-1/0?e:(e-t)*(i-n)/(r-t)+n}},1801:e=>{e.exports=Math.sign||function(e){return 0==(e=+e)||e!=e?e:e<0?-1:1}},4728:(e,t,r)=>{var n=r(3953)("meta"),i=r(5286),o=r(9181),a=r(9275).f,s=0,p=Object.isExtensible||function(){return!0},d=!r(4253)((function(){return p(Object.preventExtensions({}))})),c=function(e){a(e,n,{value:{i:"O"+ ++s,w:{}}})},l=e.exports={KEY:n,NEED:!1,fastKey:function(e,t){if(!i(e))return"symbol"==typeof e?e:("string"==typeof e?"S":"P")+e;if(!o(e,n)){if(!p(e))return"F";if(!t)return"E";c(e)}return e[n].i},getWeak:function(e,t){if(!o(e,n)){if(!p(e))return!0;if(!t)return!1;c(e)}return e[n].w},onFreeze:function(e){return d&&l.NEED&&p(e)&&!o(e,n)&&c(e),e}}},133:(e,t,r)=>{var n=r(8416),i=r(2985),o=r(3825)("metadata"),a=o.store||(o.store=new(r(147))),s=function(e,t,r){var i=a.get(e);if(!i){if(!r)return;a.set(e,i=new n)}var o=i.get(t);if(!o){if(!r)return;i.set(t,o=new n)}return o};e.exports={store:a,map:s,has:function(e,t,r){var n=s(t,r,!1);return void 0!==n&&n.has(e)},get:function(e,t,r){var n=s(t,r,!1);return void 0===n?void 0:n.get(e)},set:function(e,t,r,n){s(r,n,!0).set(e,t)},keys:function(e,t){var r=s(e,t,!1),n=[];return r&&r.forEach((function(e,t){n.push(t)})),n},key:function(e){return void 0===e||"symbol"==typeof e?e:String(e)},exp:function(e){i(i.S,"Reflect",e)}}},4351:(e,t,r)=>{var n=r(3816),i=r(4193).set,o=n.MutationObserver||n.WebKitMutationObserver,a=n.process,s=n.Promise,p="process"==r(2032)(a);e.exports=function(){var e,t,r,d=function(){var n,i;for(p&&(n=a.domain)&&n.exit();e;){i=e.fn,e=e.next;try{i()}catch(n){throw e?r():t=void 0,n}}t=void 0,n&&n.enter()};if(p)r=function(){a.nextTick(d)};else if(!o||n.navigator&&n.navigator.standalone)if(s&&s.resolve){var c=s.resolve(void 0);r=function(){c.then(d)}}else r=function(){i.call(n,d)};else{var l=!0,u=document.createTextNode("");new o(d).observe(u,{characterData:!0}),r=function(){u.data=l=!l}}return function(n){var i={fn:n,next:void 0};t&&(t.next=i),e||(e=i,r()),t=i}}},3499:(e,t,r)=>{"use strict";var n=r(4963);function i(e){var t,r;this.promise=new e((function(e,n){if(void 0!==t||void 0!==r)throw TypeError("Bad Promise constructor");t=e,r=n})),this.resolve=n(t),this.reject=n(r)}e.exports.f=function(e){return new i(e)}},5345:(e,t,r)=>{"use strict";var n=r(7057),i=r(7184),o=r(4548),a=r(4682),s=r(508),p=r(9797),d=Object.assign;e.exports=!d||r(4253)((function(){var e={},t={},r=Symbol(),n="abcdefghijklmnopqrst";return e[r]=7,n.split("").forEach((function(e){t[e]=e})),7!=d({},e)[r]||Object.keys(d({},t)).join("")!=n}))?function(e,t){for(var r=s(e),d=arguments.length,c=1,l=o.f,u=a.f;d>c;)for(var m,h=p(arguments[c++]),f=l?i(h).concat(l(h)):i(h),y=f.length,g=0;y>g;)m=f[g++],n&&!u.call(h,m)||(r[m]=h[m]);return r}:d},2503:(e,t,r)=>{var n=r(7007),i=r(5588),o=r(4430),a=r(9335)("IE_PROTO"),s=function(){},p=function(){var e,t=r(2457)("iframe"),n=o.length;for(t.style.display="none",r(639).appendChild(t),t.src="javascript:",(e=t.contentWindow.document).open(),e.write(" + + + + + +
+
+ + diff --git a/src/index.js b/src/index.js new file mode 100644 index 0000000..90c7eb3 --- /dev/null +++ b/src/index.js @@ -0,0 +1,30 @@ +/* + * This file is part of Cockpit. + * + * Copyright (C) 2017 Red Hat, Inc. + * + * Cockpit is free software; you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation; either version 2.1 of the License, or + * (at your option) any later version. + * + * Cockpit is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with Cockpit; If not, see . + */ + +import "cockpit-dark-theme"; +import React from 'react'; +import { createRoot } from 'react-dom/client'; +import 'patternfly/patternfly-5-cockpit.scss'; +import Application from './app.jsx'; +import './podman.scss'; + +document.addEventListener("DOMContentLoaded", function () { + const root = createRoot(document.getElementById('app')); + root.render(); +}); diff --git a/src/manifest.json b/src/manifest.json new file mode 100644 index 0000000..530aa6d --- /dev/null +++ b/src/manifest.json @@ -0,0 +1,16 @@ +{ + "conditions": [ + {"path-exists": "/lib/systemd/system/podman.socket"} + ], + "menu": { + "index": { + "label": "Podman containers", + "order": 50, + "keywords": [ + { + "matches": ["podman", "container", "image"] + } + ] + } + } +} diff --git a/src/podman.scss b/src/podman.scss new file mode 100644 index 0000000..6db864c --- /dev/null +++ b/src/podman.scss @@ -0,0 +1,149 @@ +@use "ct-card.scss"; +@use "page.scss"; +@import "global-variables"; +// For pf-v5-line-clamp +@import "@patternfly/patternfly/sass-utilities/mixins.scss"; +// For pf-u-disabled-color-100 +@import "@patternfly/patternfly/utilities/Text/text.css"; + +#app .pf-v5-c-card.containers-containers, #app .pf-v5-c-card.containers-images { + @extend .ct-card; +} + +.pf-v5-c-modal-box__title-text { + white-space: break-spaces; +} + +#containers-images, #containers-containers { + // Decrease padding for the image/container toggle button list + .pf-v5-c-table.pf-m-compact .pf-v5-c-table__toggle { + padding-inline-start: 0; + } +} + +@media screen and (max-width: 768px) { + // Badges should not stretch in mobile mode + .pf-v5-c-table [data-label] > .pf-v5-c-badge { + justify-self: start; + } +} + +.container-block { + display: flex; + flex-direction: column; + word-break: break-all; +} + +.container-block small { + @include pf-v5-line-clamp("1"); + color: var(--pf-v5-global--Color--200); +} + +.container-name { + font-size: var(--pf-v5-global--FontSize--lg); + font-weight: 400; +} + +.containers-run-onbuildvarclaim input { + max-inline-size: 15em; +} + +.pf-v5-c-alert__description { + overflow-wrap: anywhere; +} + +.listing-action { + inline-size: 100%; + display: flex; + justify-content: space-around; +} + +.ct-badge-container-running, .ct-badge-pod-running { + background-color: var(--pf-v5-global--info-color--100); + color: white; +} + +.ct-badge-container-healthy { + background-color: var(--pf-v5-global--success-color--100); + color: white; +} + +.ct-badge-container-unhealthy { + background-color: var(--pf-v5-global--danger-color--100); + color: white; +} + +.ct-badge-toolbox { + background-color: var(--pf-v5-global--palette--purple-100); + color: var(--pf-v5-global--palette--purple-600); + + .pf-v5-theme-dark & { + background-color: var(--pf-v5-global--palette--purple-500); + color: white; + } +} + +.ct-badge-distrobox { + background-color: var(--pf-v5-global--palette--gold-100); + color: var(--pf-v5-global--palette--gold-600); + + .pf-v5-theme-dark & { + background-color: var(--pf-v5-global--palette--gold-500); + color: white; + } +} + +.green { + color: var(--pf-v5-global--success-color--100); +} + +.red { + color: var(--pf-v5-global--danger-color--100); +} + +// Hide the header nav from the expandable rows - this should be better done with JS but the current cockpit-listing-panel implementation does not support this variant +#containers-images .ct-listing-panel-head { + display: none; +} + +.ct-grey-text { + color: var(--pf-v5-global--Color--200); +} + +.content-action { + text-align: end; + white-space: nowrap !important; +} + +// Remove doubled-up padding and borders on nested tables in mobile +.ct-listing-panel-body .ct-table tr { + --pf-v5-c-table-tr--responsive--PaddingTop: 0; + --pf-v5-c-table-tr--responsive--PaddingRight: 0; + --pf-v5-c-table-tr--responsive--PaddingBottom: 0; + --pf-v5-c-table-tr--responsive--PaddingLeft: 0; +} + +@media (max-width: $pf-v5-global--breakpoint--md - 1) { + .show-only-when-wide { + display: none; + } +} + +@media (min-width: $pf-v5-global--breakpoint--md) { + .show-only-when-narrow { + display: none; + } + + // Add borders to no pod containers list and images list + .container-pod.pf-m-plain tbody, + .containers-images tbody { + border: var(--pf-v5-c-card--m-flat--BorderWidth) solid var(--pf-v5-c-card--m-flat--BorderColor); + } +} + +// Override table padding on mobile +@media (max-width: $pf-v5-global--breakpoint--md) { + .health-logs.pf-m-grid-md.pf-v5-c-table tr:where(.pf-v5-c-table__tr):not(.pf-v5-c-table__expandable-row) { + padding: 0; + } +} diff --git a/src/rest.js b/src/rest.js new file mode 100644 index 0000000..30a5dfa --- /dev/null +++ b/src/rest.js @@ -0,0 +1,89 @@ +import cockpit from "cockpit"; +import { debug } from "./util.js"; + +function manage_error(reject, error, content) { + let content_o = {}; + if (content) { + try { + content_o = JSON.parse(content); + } catch { + content_o.message = content; + } + } + const c = { ...error, ...content_o }; + reject(c); +} + +// calls are async, so keep track of a call counter to associate a result with a call +let call_id = 0; + +function connect(address, system) { + /* This doesn't create a channel until a request */ + const http = cockpit.http(address, { superuser: system ? "require" : null }); + const connection = {}; + + connection.monitor = function(options, callback, system, return_raw) { + return new Promise((resolve, reject) => { + let buffer = ""; + + http.request(options) + .stream(data => { + if (return_raw) + callback(data); + else { + buffer += data; + const chunks = buffer.split("\n"); + buffer = chunks.pop(); + + chunks.forEach(chunk => { + debug(system, "monitor", chunk); + callback(JSON.parse(chunk)); + }); + } + }) + .catch((error, content) => { + manage_error(reject, error, content); + }) + .then(resolve); + }); + }; + + connection.call = function (options) { + const id = call_id++; + debug(system, `call ${id}:`, JSON.stringify(options)); + return new Promise((resolve, reject) => { + options = options || {}; + http.request(options) + .then(result => { + debug(system, `call ${id} result:`, JSON.stringify(result)); + resolve(result); + }) + .catch((error, content) => { + debug(system, `call ${id} error:`, JSON.stringify(error), "content", JSON.stringify(content)); + manage_error(reject, error, content); + }); + }); + }; + + connection.close = function () { + http.close(); + }; + + return connection; +} + +/* + * Connects to the podman service, performs a single call, and closes the + * connection. + */ +async function call (address, system, parameters) { + const connection = connect(address, system); + const result = await connection.call(parameters); + connection.close(); + return result; +} + +export default { + connect, + call +}; diff --git a/src/util.js b/src/util.js new file mode 100644 index 0000000..688194a --- /dev/null +++ b/src/util.js @@ -0,0 +1,185 @@ +import React, { useContext } from "react"; + +import cockpit from 'cockpit'; + +import { debounce } from 'throttle-debounce'; +import * as dfnlocales from 'date-fns/locale'; +import { formatRelative } from 'date-fns'; +const _ = cockpit.gettext; + +export const PodmanInfoContext = React.createContext(); +export const usePodmanInfo = () => useContext(PodmanInfoContext); + +export const WithPodmanInfo = ({ value, children }) => { + return ( + + {children} + + ); +}; + +// https://github.com/containers/podman/blob/main/libpod/define/containerstate.go +// "Restarting" comes from special handling of restart case in Application.updateContainer() +export const states = [_("Exited"), _("Paused"), _("Stopped"), _("Removing"), _("Configured"), _("Created"), _("Restart"), _("Running")]; + +// https://github.com/containers/podman/blob/main/libpod/define/podstate.go +export const podStates = [_("Created"), _("Running"), _("Stopped"), _("Paused"), _("Exited"), _("Error")]; + +export const fallbackRegistries = ["docker.io", "quay.io"]; + +export function debug(system, ...args) { + if (window.debugging === "all" || window.debugging?.includes("podman")) + console.debug("podman", system ? "system" : "user", ...args); +} + +export function truncate_id(id) { + if (!id) { + return ""; + } + return id.substr(0, 12); +} + +export function localize_time(unix_timestamp) { + const locale = (cockpit.language == "en") ? dfnlocales.enUS : dfnlocales[cockpit.language.replace('_', '')]; + return formatRelative(unix_timestamp * 1000, Date.now(), { locale }); +} + +export function format_memory_and_limit(usage, limit) { + if (usage === undefined || isNaN(usage)) + return ""; + + let mtext = ""; + let units = 1000; + let parts; + if (limit) { + parts = cockpit.format_bytes(limit, units, true); + mtext = " / " + parts.join(" "); + units = parts[1]; + } + + if (usage) { + parts = cockpit.format_bytes(usage, units, true); + if (mtext) + return _(parts[0] + mtext); + else + return _(parts.join(" ")); + } else { + return ""; + } +} + +/* + * The functions quote_cmdline and unquote_cmdline implement + * a simple shell-like quoting syntax. They are used when letting the + * user edit a sequence of words as a single string. + * + * When parsing, words are separated by whitespace. Single and double + * quotes can be used to protect a sequence of characters that + * contains whitespace or the other quote character. A backslash can + * be used to protect any character. Quotes can appear in the middle + * of a word. + */ + +export function quote_cmdline(words) { + words = words || []; + + function is_whitespace(c) { + return c == ' '; + } + + function quote(word) { + let text = ""; + let quote_char = ""; + let i; + for (i = 0; i < word.length; i++) { + if (word[i] == '\\' || word[i] == quote_char) + text += '\\'; + else if (quote_char === "") { + if (word[i] == "'" || is_whitespace(word[i])) + quote_char = '"'; + else if (word[i] == '"') + quote_char = "'"; + } + text += word[i]; + } + + return quote_char + text + quote_char; + } + + return words.map(quote).join(' '); +} + +export function unquote_cmdline(text) { + const words = []; + let next; + + function is_whitespace(c) { + return c == ' '; + } + + function skip_whitespace() { + while (next < text.length && is_whitespace(text[next])) + next++; + } + + function parse_word() { + let word = ""; + let quote_char = null; + + while (next < text.length) { + if (text[next] == '\\') { + next++; + if (next < text.length) { + word += text[next]; + } + } else if (text[next] == quote_char) { + quote_char = null; + } else if (quote_char) { + word += text[next]; + } else if (text[next] == '"' || text[next] == "'") { + quote_char = text[next]; + } else if (is_whitespace(text[next])) { + break; + } else + word += text[next]; + next++; + } + return word; + } + + next = 0; + skip_whitespace(); + while (next < text.length) { + words.push(parse_word()); + skip_whitespace(); + } + + return words; +} + +export function image_name(image) { + return image.RepoTags ? image.RepoTags[0] : ":"; +} + +export function is_valid_container_name(name) { + return /^[a-zA-Z0-9][a-zA-Z0-9_\\.-]*$/.test(name); +} + +/* Clears a single field in validationFailed object. + * + * Arguments: + * - validationFailed (object): Object containing list of fields with validation error + * - key (string): Specified which field from validationFailed object is clear + * - onValidationChange (func) + */ +export const validationClear = (validationFailed, key, onValidationChange) => { + if (!validationFailed) + return; + + const delta = { ...validationFailed }; + delete delta[key]; + onValidationChange(delta); +}; + +// This method needs to be outside of component as re-render would create a new instance of debounce +export const validationDebounce = debounce(500, (validationHandler) => validationHandler()); diff --git a/test/browser/browser.sh b/test/browser/browser.sh new file mode 100755 index 0000000..0d5c581 --- /dev/null +++ b/test/browser/browser.sh @@ -0,0 +1,88 @@ +#!/bin/sh +set -eux + +# test plan name, passed on to run-test.sh +PLAN="$1" + +export TEST_BROWSER=${TEST_BROWSER:-firefox} + +TESTS="$(realpath $(dirname "$0"))" +export SOURCE="$(realpath $TESTS/../..)" + +# https://tmt.readthedocs.io/en/stable/overview.html#variables +export LOGS="${TMT_TEST_DATA:-$(pwd)/logs}" +mkdir -p "$LOGS" +chmod a+w "$LOGS" + +# install firefox (available everywhere in Fedora and RHEL) +# we don't need the H.264 codec, and it is sometimes not available (rhbz#2005760) +dnf install --disablerepo=fedora-cisco-openh264 -y --setopt=install_weak_deps=False firefox + +# nodejs 10 is too old for current Cockpit test API +if grep -q platform:el8 /etc/os-release; then + dnf module switch-to -y nodejs:16 +fi + +# HACK: ensure that critical components are up to date: https://github.com/psss/tmt/issues/682 +dnf update -y podman crun conmon criu + +# if we run during cross-project testing against our main-builds COPR, then let that win +# even if Fedora has a newer revision +main_builds_repo="$(ls /etc/yum.repos.d/*cockpit*main-builds* 2>/dev/null || true)" +if [ -n "$main_builds_repo" ]; then + echo 'priority=0' >> "$main_builds_repo" + dnf distro-sync -y --repo 'copr*' cockpit-podman +fi + +# Show critical package versions +rpm -q runc crun podman criu kernel-core selinux-policy cockpit-podman cockpit-bridge || true + +# create user account for logging in +if ! id admin 2>/dev/null; then + useradd -c Administrator -G wheel admin + echo admin:foobar | chpasswd +fi + +# set root's password +echo root:foobar | chpasswd + +# avoid sudo lecture during tests +su -c 'echo foobar | sudo --stdin whoami' - admin + +# create user account for running the test +if ! id runtest 2>/dev/null; then + useradd -c 'Test runner' runtest + # allow test to set up things on the machine + mkdir -p /root/.ssh + curl https://raw.githubusercontent.com/cockpit-project/bots/main/machine/identity.pub >> /root/.ssh/authorized_keys + chmod 600 /root/.ssh/authorized_keys +fi +chown -R runtest "$SOURCE" + +# disable core dumps, we rather investigate them upstream where test VMs are accessible +echo core > /proc/sys/kernel/core_pattern + +# grab a few images to play with; tests run offline, so they cannot download images +podman rmi --all + +# set up our expected images, in the same way that we do for upstream CI +# this sometimes runs into network issues, so retry a few times +for retry in $(seq 5); do + if curl https://raw.githubusercontent.com/cockpit-project/bots/main/images/scripts/lib/podman-images.setup | sh -eux; then + break + fi + sleep $((5 * retry * retry)) +done + +# image setup, shared with upstream tests +$TESTS/../vm.install + +systemctl enable --now cockpit.socket podman.socket + +# Run tests as unprivileged user +# once we drop support for RHEL 8, use this: +# runuser -u runtest --whitelist-environment=TEST_BROWSER,TEST_ALLOW_JOURNAL_MESSAGES,TEST_AUDIT_NO_SELINUX,SOURCE,LOGS $TESTS/run-test.sh $PLAN +runuser -u runtest --preserve-environment env USER=runtest HOME=$(getent passwd runtest | cut -f6 -d:) $TESTS/run-test.sh $PLAN + +RC=$(cat $LOGS/exitcode) +exit ${RC:-1} diff --git a/test/browser/main.fmf b/test/browser/main.fmf new file mode 100644 index 0000000..dfad2c3 --- /dev/null +++ b/test/browser/main.fmf @@ -0,0 +1,24 @@ +require: + - cockpit-podman + - cockpit-ws + - cockpit-system + - bzip2 + - criu + - git-core + - libvirt-python3 + - make + - nodejs + - python3 +duration: 30m + +/system: + test: ./browser.sh system + summary: Run *System tests + +/user: + test: ./browser.sh user + summary: Run *User tests + +/other: + test: ./browser.sh other + summary: Run all other tests diff --git a/test/browser/run-test.sh b/test/browser/run-test.sh new file mode 100755 index 0000000..9b64cde --- /dev/null +++ b/test/browser/run-test.sh @@ -0,0 +1,48 @@ +#!/bin/sh +set -eux + +PLAN="$1" + +# tests need cockpit's bots/ libraries and test infrastructure +cd $SOURCE +rm -f bots # common local case: existing bots symlink +make bots test/common + +if [ -e .git ]; then + tools/node-modules checkout + # disable detection of affected tests; testing takes too long as there is no parallelization + mv .git dot-git +else + # upstream tarballs ship test dependencies; print version for debugging + grep '"version"' node_modules/chrome-remote-interface/package.json +fi + +. /etc/os-release +export TEST_OS="${ID}-${VERSION_ID/./-}" + +if [ "${TEST_OS#centos-}" != "$TEST_OS" ]; then + TEST_OS="${TEST_OS}-stream" +fi + +# select subset of tests according to plan +TESTS="$(test/common/run-tests -l)" +case "$PLAN" in + system) TESTS="$(echo "$TESTS" | grep 'System$')" ;; + user) TESTS="$(echo "$TESTS" | grep 'User$')" ;; + other) TESTS="$(echo "$TESTS" | grep -vE '(System|User)$')" ;; + *) echo "Unknown test plan: $PLAN" >&2; exit 1 ;; +esac + +EXCLUDES="" + +# make it easy to check in logs +echo "TEST_ALLOW_JOURNAL_MESSAGES: ${TEST_ALLOW_JOURNAL_MESSAGES:-}" +echo "TEST_AUDIT_NO_SELINUX: ${TEST_AUDIT_NO_SELINUX:-}" + +RC=0 +test/common/run-tests --nondestructive --machine 127.0.0.1:22 --browser 127.0.0.1:9090 $TESTS $EXCLUDES || RC=$? + +echo $RC > "$LOGS/exitcode" +cp --verbose Test* "$LOGS" || true +# deliver test result via exitcode file +exit 0 diff --git a/test/check-application b/test/check-application new file mode 100755 index 0000000..41109d4 --- /dev/null +++ b/test/check-application @@ -0,0 +1,2871 @@ +#!/usr/bin/python3 -cimport os, sys; os.execv(os.path.dirname(sys.argv[1]) + "/common/pywrap", sys.argv) +# Run this with --help to see available options for tracing and debugging +# See https://github.com/cockpit-project/cockpit/blob/main/test/common/testlib.py +# "class Browser" and "class MachineCase" for the available API. + +import os +import sys +import time + +import testlib +from machine_core import ssh_connection + +REGISTRIES_CONF = """ +[registries.search] +registries = ['localhost:5000', 'localhost:6000'] + +[registries.insecure] +registries = ['localhost:5000', 'localhost:6000'] +""" + +NOT_RUNNING = ["Exited", "Stopped"] + +# image names used in tests +IMG_ALPINE = "localhost/test-alpine" +IMG_ALPINE_LATEST = IMG_ALPINE + ":latest" +IMG_BUSYBOX = "localhost/test-busybox" +IMG_BUSYBOX_LATEST = IMG_BUSYBOX + ":latest" +IMG_REGISTRY = "localhost/test-registry" +IMG_REGISTRY_LATEST = IMG_REGISTRY + ":latest" + + +def podman_version(cls): + version = cls.execute(False, "podman -v").strip().split(' ')[-1] + # HACK: handle possible rc versions such as 4.4.0-rc2 + return tuple(int(v.split('-')[0]) for v in version.split('.')) + + +def showImages(browser): + if browser.attr("#containers-images button.pf-v5-c-expandable-section__toggle", "aria-expanded") == 'false': + browser.click("#containers-images button.pf-v5-c-expandable-section__toggle") + + +def checkImage(browser, name, owner): + showImages(browser) + browser.wait_visible("#containers-images table") + browser.wait_js_func("""(function (first, last) { + let items = ph_select("#containers-images table tbody"); + for (i = 0; i < items.length; i++) + if (items[i].innerText.trim().startsWith(first) && items[i].innerText.trim().includes(last)) + return true; + return false; + })""", name, owner) + + +# HACK: temporary workaround till we bump testlib +def become_superuser(browser, user=None, password=None, passwordless=False): + cur_frame = browser.cdp.cur_frame + browser.switch_to_top() + + browser.open_superuser_dialog() + + pf_prefix = "" if browser.machine.system_before(293) else "-v5" + + if passwordless: + browser.wait_in_text(f".pf{pf_prefix}-c-modal-box:contains('Administrative access')", + "You now have administrative access.") + browser.click(f".pf{pf_prefix}-c-modal-box button:contains('Close')") + browser.wait_not_present(f".pf{pf_prefix}-c-modal-box:contains('You now have administrative access.')") + else: + browser.wait_in_text(f".pf{pf_prefix}-c-modal-box:contains('Switch to administrative access')", + f"Password for {user or 'admin'}:") + browser.set_input_text(f".pf{pf_prefix}-c-modal-box:contains('Switch to administrative access') input", + password or "foobar") + browser.click(f".pf{pf_prefix}-c-modal-box button:contains('Authenticate')") + browser.wait_not_present(f".pf{pf_prefix}-c-modal-box:contains('Switch to administrative access')") + + browser.check_superuser_indicator("Administrative access") + browser.switch_to_frame(cur_frame) + + +# HACK: temporary workaround till we bump testlib +def drop_superuser(browser): + cur_frame = browser.cdp.cur_frame + browser.switch_to_top() + + pf_prefix = "" if browser.machine.system_before(293) else "-v5" + + browser.open_superuser_dialog() + browser.click(f".pf{pf_prefix}-c-modal-box:contains('Switch to limited access') button:contains('Limit access')") + browser.wait_not_present(f".pf{pf_prefix}-c-modal-box:contains('Switch to limited access')") + browser.check_superuser_indicator("Limited access") + + browser.switch_to_frame(cur_frame) + + +@testlib.nondestructive +class TestApplication(testlib.MachineCase): + + def setUp(self): + super().setUp() + m = self.machine + m.execute(""" + systemctl stop podman.service; systemctl --now enable podman.socket + # Ensure podman is really stopped, otherwise it keeps the containers/ directory busy + pkill -e -9 podman || true + while pgrep podman; do sleep 0.1; done + pkill -e -9 conmon || true + while pgrep conmon; do sleep 0.1; done + findmnt --list -otarget | grep /var/lib/containers/. | xargs -r umount + sync + """) + + # backup/restore pristine podman state, so that tests can run on existing testbeds + self.restore_dir("/var/lib/containers") + + self.addCleanup(m.execute, """ + systemctl stop podman.service podman.socket + + # HACK: system reset has 10s timeout, make that faster with an extra `stop` + # https://github.com/containers/podman/issues/21874 + podman stop --time 0 --all + podman pod stop --time 0 --all + + systemctl reset-failed podman.service podman.socket + podman system reset --force + pkill -e -9 podman || true + while pgrep podman; do sleep 0.1; done + pkill -e -9 conmon || true + while pgrep conmon; do sleep 0.1; done + + # HACK: sometimes podman leaks mounts + findmnt --list -otarget | grep /var/lib/containers/. | xargs -r umount + sync + """) + + # Create admin session + m.execute(""" + if [ ! -d /home/admin/.ssh ]; then + mkdir /home/admin/.ssh + cp /root/.ssh/* /home/admin/.ssh + chown -R admin:admin /home/admin/.ssh + chmod -R go-wx /home/admin/.ssh + fi + """) + self.admin_s = ssh_connection.SSHConnection(user="admin", + address=m.ssh_address, + ssh_port=m.ssh_port, + identity_file=m.identity_file) + + # Enable user service as well; copy our images (except cockpit/ws) from system + self.admin_s.execute(""" + systemctl --user stop podman.service + for img in $(ls /var/lib/test-images/*.tar | grep -v cockpitws); do podman load < "$img"; done + systemctl --now --user enable podman.socket + """) + self.addCleanup(self.admin_s.execute, """ + systemctl --user stop podman.service podman.socket + podman system reset --force + """) + # HACK: system reset has 10s timeout, make that faster with an extra `stop` + # https://github.com/containers/podman/issues/21874 + # Ubuntu 22.04 has old podman that does not know about rm --time + if m.image == 'ubuntu-2204': + self.addCleanup(self.admin_s.execute, "podman rm --force --all", timeout=300) + self.addCleanup(self.admin_s.execute, "podman pod rm --force --all", timeout=300) + else: + self.addCleanup(self.admin_s.execute, "podman rm --force --time 0 --all") + self.addCleanup(self.admin_s.execute, "podman pod rm --force --time 0 --all") + + # But disable it globally so that "systemctl --user disable" does what we expect + m.execute("systemctl --global disable podman.socket") + + self.allow_journal_messages("/run.*/podman/podman: couldn't connect.*") + self.allow_journal_messages(".*/run.*/podman/podman.*Connection reset by peer") + + # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1008249 + self.has_criu = "debian" not in m.image and "ubuntu" not in m.image + self.has_selinux = "arch" not in m.image and "debian" not in m.image and "ubuntu" not in m.image + self.has_cgroupsV2 = m.image not in ["centos-8-stream"] and not m.image.startswith('rhel-8') + + self.system_images_count = int(self.execute(True, "podman images -n | wc -l").strip()) + self.user_images_count = int(self.execute(False, "podman images -n | wc -l").strip()) + + # allow console.error + self.allow_browser_errors( + ".*couldn't search registry \".*\": pinging container registry .*", + ".*Error occurred while connecting console: cannot resize session: cannot resize container.*", + ) + + def tearDown(self): + if self.getError(): + # dump container logs for debugging + for auth in [False, True]: + print(f"----- {auth and 'system' or 'user'} containers -----", file=sys.stderr) + self.execute(auth, "podman ps -a >&2") + self.execute(auth, 'for c in $(podman ps -aq); do echo "---- $c ----" >&2; podman logs $c >&2; done') + + super().tearDown() + + def getRestartPolicy(self, auth, container_name): + cmd = f"podman inspect --format '{{{{.HostConfig.RestartPolicy}}}}' {container_name}" + return self.execute(auth, cmd).strip() + + def waitNumImages(self, expected): + self.browser.wait_js_func("ph_count_check", "#containers-images table[aria-label='Images'] > tbody", expected) + + def waitNumContainers(self, expected, auth): + if auth and self.machine.ostree_image: + extra = 1 # cockpit/ws + else: + extra = 0 + + self.browser.wait_js_func("ph_count_check", "#containers-containers tbody", expected + extra) + + def performContainerAction(self, container, cmd): + b = self.browser + b.click(f"#containers-containers tbody tr:contains('{container}') .pf-v5-c-menu-toggle") + b.click(f"#containers-containers tbody tr:contains('{container}') button.pf-v5-c-menu__item:contains({cmd})") + + def getContainerAction(self, container, cmd): + return f"#containers-containers tbody tr:contains('{container}') button.pf-v5-c-menu__item:contains({cmd})" + + def toggleExpandedContainer(self, container): + b = self.browser + b.click(f"#containers-containers tbody tr:contains('{container}') .pf-v5-c-table__toggle button") + + def getContainerAttr(self, container, key, selector=""): + b = self.browser + return b.text(f"#containers-containers tbody tr:contains('{container}') > td[data-label={key}] {selector}") + + def execute(self, system, cmd): + if system: + return self.machine.execute(cmd) + else: + return self.admin_s.execute(cmd) + + def login(self, system=True): + # HACK: The first rootless call often gets stuck or fails + # In such case we have alert banner to start the service (or just empty state) + # A real user would just hit the button so lets do the same as this is always getting + # back to us and we waste too much time reporting to podman with mixed results. + # Examples: + # https://github.com/containers/podman/issues/8762 + # https://github.com/containers/podman/issues/9251 + # https://github.com/containers/podman/issues/6660 + + b = self.browser + + self.login_and_go("/podman", superuser=system) + b.wait_visible("#app") + + with self.browser.wait_timeout(30): + try: + b.wait_visible("#containers-containers") + b.wait_not_in_text("#containers-containers", "Loading") + b.wait_not_present("#overview div.pf-v5-c-alert") + except testlib.Error: + if system: + b.click("#overview div.pf-v5-c-alert .pf-v5-c-alert__action > button:contains(Start)") + b.wait_not_present("#overview div.pf-v5-c-alert") + else: + b.click("#app .pf-v5-c-empty-state button.pf-m-primary") + b.wait_not_present("#app .pf-v5-c-empty-state button") + + def waitPodRow(self, podName, present=False): + if present: + self.browser.wait_visible("#table-" + podName) + else: + self.browser.wait_not_present("#table-" + podName) + + def waitPodContainer(self, podName, containerList, system=True): + if len(containerList): + for container in containerList: + self.waitContainer(container["id"], system, name=container["name"], image=container["image"], + cmd=container["command"], state=container["state"], pod=podName) + else: + if self.browser.val("#containers-containers-filter") == "all": + self.browser.wait_in_text("#table-" + podName + " .pf-v5-c-empty-state", "No containers in this pod") + else: + self.browser.wait_in_text("#table-" + podName + " .pf-v5-c-empty-state", + "No running containers in this pod") + + def waitContainerRow(self, container, present=True): + b = self.browser + if present: + b.wait_visible(f'#containers-containers td[data-label="Container"]:contains("{container}")') + else: + b.wait_not_present(f'#containers-containers td[data-label="Container"]:contains("{container}")') + + def performPodAction(self, podName, podOwner, action): + b = self.browser + + b.click(f"#pod-{podName}-{podOwner}-action-toggle") + b.click(f"ul.pf-v5-c-menu__list li > button.pod-action-{action.lower()}") + b.wait_not_present("ul.pf-v5-c-menu__list") + + def getStartTime(self, container: str, *, auth: bool) -> str: + # don't format the raw time strings from the API, force json format + out = self.execute(auth, "podman inspect --format '{{json .State.StartedAt}}' " + container) + return out.strip().replace('"', '') + + def waitRestart(self, container: str, old_start: str, *, auth: bool) -> int: + for _ in range(10): + new_start = self.getStartTime(container, auth=auth) + if new_start > old_start: + return new_start + time.sleep(1) + else: + self.fail("Timed out waiting for StartedAt change") + + def testPods(self): + b = self.browser + + self.login() + + self.filter_containers("running") + if not self.machine.ostree_image: + b.wait_in_text("#containers-containers", "No running containers") + + # Run a pods as system + self.machine.execute("podman pod create --infra=false --name pod-1") + + self.waitPodRow("pod-1", False) + self.filter_containers("all") + self.waitPodContainer("pod-1", []) + + def get_pod_cpu_usage(pod_name): + cpu = self.browser.text(f"#table-{pod_name}-title .pod-cpu") + self.assertIn('%', cpu) + return float(cpu[:-1]) + + def get_pod_memory(pod_name): + memory = self.browser.text(f"#table-{pod_name}-title .pod-memory") + memory, unit = memory.split(' ') + self.assertIn(unit, ["GB", "MB", "KB"]) + return float(memory) + + run_cmd = f"podman run -d --pod pod-1 --name test-pod-1-system --stop-timeout 0 {IMG_ALPINE} sleep 100" + containerId = self.machine.execute(run_cmd).strip() + self.waitPodContainer("pod-1", [{"name": "test-pod-1-system", "image": IMG_ALPINE, + "command": "sleep 100", "state": "Running", "id": containerId}]) + cpu = get_pod_cpu_usage("pod-1") + b.wait(lambda: get_pod_memory("pod-1") > 0) + + # Test that cpu usage increases + self.machine.execute("podman exec -di test-pod-1-system sh -c 'dd bs=1024 < /dev/urandom > /dev/null'") + b.wait(lambda: get_pod_cpu_usage("pod-1") > cpu) + + self.machine.execute("podman pod stop -t0 pod-1") # disable timeout, so test doesn't wait endlessly + self.waitPodContainer("pod-1", [{"name": "test-pod-1-system", "image": IMG_ALPINE, + "command": "sleep 100", "state": NOT_RUNNING, "id": containerId}]) + self.filter_containers("running") + self.waitPodRow("pod-1", False) + + self.filter_containers("all") + b.set_input_text('#containers-filter', 'pod-1') + self.waitPodContainer("pod-1", [{"name": "test-pod-1-system", "image": IMG_ALPINE, + "command": "sleep 100", "state": NOT_RUNNING, "id": containerId}]) + b.set_input_text('#containers-filter', 'test-pod-1-system') + self.waitPodContainer("pod-1", [{"name": "test-pod-1-system", "image": IMG_ALPINE, + "command": "sleep 100", "state": NOT_RUNNING, "id": containerId}]) + # TODO add pixel test when this is again reachable - https://github.com/cockpit-project/bots/issues/2463 + + # Check Pod Actions + self.performPodAction("pod-1", "system", "Start") + self.waitPodContainer("pod-1", [{"name": "test-pod-1-system", "image": IMG_ALPINE, + "command": "sleep 100", "state": "Running", "id": containerId}]) + + self.performPodAction("pod-1", "system", "Pause") + self.waitPodContainer("pod-1", [{"name": "test-pod-1-system", "image": IMG_ALPINE, + "command": "sleep 100", "state": "Paused", "id": containerId}]) + + self.performPodAction("pod-1", "system", "Unpause") + self.waitPodContainer("pod-1", [{"name": "test-pod-1-system", "image": IMG_ALPINE, + "command": "sleep 100", "state": "Running", "id": containerId}]) + + self.performPodAction("pod-1", "system", "Stop") + self.waitPodContainer("pod-1", [{"name": "test-pod-1-system", "image": IMG_ALPINE, + "command": "sleep 100", "state": NOT_RUNNING, "id": containerId}]) + + self.machine.execute("podman pod start pod-1") + self.waitPodContainer("pod-1", [{"name": "test-pod-1-system", "image": IMG_ALPINE, + "command": "sleep 100", "state": "Running", "id": containerId}]) + + old_start = self.getStartTime("test-pod-1-system", auth=True) + self.performPodAction("pod-1", "system", "Restart") + self.waitRestart("test-pod-1-system", old_start, auth=True) + + self.performPodAction("pod-1", "system", "Delete") + b.click(".pf-v5-c-modal-box button:contains(Delete)") + # Alert should be shown, that running pods need to be force deleted. + b.wait_visible(".pf-v5-c-modal-box__body .pf-v5-c-alert") + b.wait_in_text(".pf-v5-c-modal-box__body .pf-v5-c-list", "test-pod-1-system") + b.click(".pf-v5-c-modal-box button:contains('Force delete')") + self.waitPodRow("pod-1", False) + + # HACK: there is some race here which steals the focus from the filter input and selects the page text instead + for _ in range(3): + b.focus('#containers-filter') + time.sleep(1) + if b.eval_js('document.activeElement == document.querySelector("#containers-filter")'): + break + b.set_input_text('#containers-filter', '') + self.machine.execute("podman pod create --infra=false --name pod-2") + self.waitPodContainer("pod-2", []) + run_cmd = f"podman run -d --pod pod-2 --name test-pod-2-system --stop-timeout 0 {IMG_ALPINE} sleep 100" + containerId = self.machine.execute(run_cmd).strip() + self.waitPodContainer("pod-2", [{"name": "test-pod-2-system", "image": IMG_ALPINE, + "command": "sleep 100", "state": "Running", "id": containerId}]) + self.machine.execute("podman rm --force -t0 test-pod-2-system") + self.waitPodContainer("pod-2", []) + self.performPodAction("pod-2", "system", "Delete") + b.wait_not_in_text(".pf-v5-c-modal-box__body", "test-pod-2-system") + b.click(".pf-v5-c-modal-box button:contains('Delete')") + self.waitPodRow("pod-2", False) + + # Volumes / mounts + self.machine.execute("podman pod create -p 9999:9999 -v /tmp:/app --name pod-3") + self.machine.execute("podman pod start pod-3") + + self.waitPodContainer("pod-3", []) + # Verify 1 port mapping + b.wait_in_text("#table-pod-3-title .pod-details-ports-btn", "1") + b.click("#table-pod-3-title .pod-details-ports-btn") + b.wait_in_text(".pf-v5-c-popover__content", "0.0.0.0:9999 → 9999/tcp") + # Verify 1 mount + b.wait_in_text("#table-pod-3-title .pod-details-volumes-btn", "1") + b.click("#table-pod-3-title .pod-details-volumes-btn") + b.wait_in_text(".pf-v5-c-popover__content", "/tmp ↔ /app") + + def testBasicSystem(self): + self._testBasic(True) + + b = self.browser + + # Test dropping and gaining privileges + b.set_val("#containers-containers-owner", "all") + self.filter_containers("all") + self.execute(False, "podman pod create --infra=false --name pod_user") + self.execute(True, "podman pod create --infra=false --name pod_system") + + checkImage(b, IMG_REGISTRY, "system") + checkImage(b, IMG_REGISTRY, "admin") + b.wait_visible("#containers-containers .pod-name:contains('pod_user')") + b.wait_visible("#containers-containers .pod-name:contains('pod_system')") + b.wait_visible("#containers-containers .container-name:contains('a')") + b.wait_visible("#containers-containers .container-name:contains('b')") + + # Drop privileges - all system things should disappear + if b.machine.system_before(293): + b.drop_superuser() + else: + drop_superuser(b) + b.wait_not_present("#containers-containers .pod-name:contains('pod_system')") + b.wait_not_present("#containers-containers .container-name:contains('a')") + b.wait_visible("#containers-containers .pod-name:contains('pod_user')") + b.wait_visible("#containers-containers .container-name:contains('b')") + # Checking images is harder but if there would be more than one this would fail + b.wait_visible(f"#containers-images:contains('{IMG_REGISTRY}')") + + # Owner select should disappear + b.wait_not_present("#containers-containers-owner") + + # Also user selection in image download should not be visible + b.click("#image-actions-dropdown") + b.click("button:contains(Download new image)") + + b.wait_visible('div.pf-v5-c-modal-box header:contains("Search for an image")') + b.wait_visible("div.pf-v5-c-modal-box footer button:contains(Download):disabled") + b.wait_not_present("#as-user") + b.click(".pf-v5-c-modal-box button:contains('Cancel')") + b.wait_not_present('div.pf-v5-c-modal-box header:contains("Search for an image")') + + # Gain privileges + if b.machine.system_before(293): + b.become_superuser(passwordless=self.machine.image == "rhel4edge") + else: + become_superuser(b, passwordless=self.machine.image == "rhel4edge") + + # We are notified that we can also start the system one + b.wait_in_text("#overview div.pf-v5-c-alert .pf-v5-c-alert__title", "System Podman service is also available") + b.click("#overview div.pf-v5-c-alert .pf-v5-c-alert__action > button:contains(Start)") + b.wait_not_present("#overview div.pf-v5-c-alert .pf-v5-c-alert__title") + + checkImage(b, IMG_REGISTRY, "system") + checkImage(b, IMG_REGISTRY, "admin") + b.wait_visible("#containers-containers .pod-name:contains('pod_user')") + b.wait_visible("#containers-containers .pod-name:contains('pod_system')") + b.wait_visible("#containers-containers .container-name:contains('a')") + b.wait_visible("#containers-containers .container-name:contains('b')") + + # Owner select should appear + b.wait_visible("#containers-containers-owner") + + # Also user selection in image download should be visible + b.click("#image-actions-dropdown") + b.click("button:contains(Download new image)") + b.wait_visible('div.pf-v5-c-modal-box header:contains("Search for an image")') + b.wait_visible("div.pf-v5-c-modal-box footer button:contains(Download):disabled") + b.wait_visible("#as-user") + b.click(".pf-v5-c-modal-box button:contains('Cancel')") + b.wait_not_present('div.pf-v5-c-modal-box header:contains("Search for an image")') + + # Check that when we filter only system stuff an then drop privileges that we show user stuff + b.set_val("#containers-containers-owner", "system") + b.wait_not_present("#containers-containers .pod-name:contains('pod_user')") + b.wait_not_present("#containers-containers .container-name:contains('b')") + # Checking images is harder but if there would be more than one this would fail + b.wait_visible(f"#containers-images:contains('{IMG_REGISTRY}')") + + if b.machine.system_before(293): + b.drop_superuser() + else: + drop_superuser(b) + + b.wait_visible("#containers-containers .pod-name:contains('pod_user')") + b.wait_visible("#containers-containers .container-name:contains('b')") + # Checking images is harder but if there would be more than one this would fail + b.wait_visible(f"#containers-images:contains('{IMG_REGISTRY}')") + + # Check showing of entrypoint + b.click("#containers-containers-create-container-btn") + b.click("#create-image-image-select-typeahead") + b.click(f'button.pf-v5-c-select__menu-item:contains("{IMG_REGISTRY}")') + b.wait_val("#run-image-dialog-command", '/etc/docker/registry/config.yml') + b.wait_text("#run-image-dialog-entrypoint", '/entrypoint.sh') + + # Deleting image will cleanup both command and entrypoint + b.click("button.pf-v5-c-select__toggle-clear") + b.wait_val("#run-image-dialog-command", '') + b.wait_not_present("#run-image-dialog-entrypoint") + + # Edited command will not be cleared + b.click("#create-image-image-select-typeahead") + b.click(f'button.pf-v5-c-select__menu-item:contains("{IMG_REGISTRY}")') + b.wait_val("#run-image-dialog-command", '/etc/docker/registry/config.yml') + b.set_input_text("#run-image-dialog-command", '/etc/docker/registry/config.yaml') + b.click("button.pf-v5-c-select__toggle-clear") + b.wait_not_present("#run-image-dialog-entrypoint") + b.wait_val("#run-image-dialog-command", '/etc/docker/registry/config.yaml') + + # Setting a new image will still keep the old command and not prefill it + b.click("#create-image-image-select-typeahead") + b.click(f'button.pf-v5-c-select__menu-item:contains({IMG_ALPINE})') + b.wait_visible("#run-image-dialog-pull-latest-image") + b.wait_val("#run-image-dialog-command", '/etc/docker/registry/config.yaml') + + b.logout() + + if self.machine.ostree_image: + self.machine.execute("echo foobar | passwd --stdin root") + self.write_file("/etc/ssh/sshd_config.d/99-root-password.conf", "PermitRootLogin yes", + post_restore_action="systemctl try-restart sshd") + self.machine.execute("systemctl try-restart sshd") + + # Test that when root is logged in we don't present "user" and "system" + self.login_and_go("/podman", user="root", enable_root_login=True) + b.wait_visible("#app") + + # `User Service is also available` banner should not be present + b.wait_not_present("#overview div.pf-v5-c-alert") + # There should not be any duplicate images listed + # The "busybox" and "alpine" images have been deleted by _testBasic. + showImages(b) + self.waitNumImages(self.system_images_count - 2) + # There should not be 'owner' selector + b.wait_not_present("#containers-containers-owner") + + # Test the isSystem boolean for searching + # https://github.com/cockpit-project/cockpit-podman/pull/891 + b.click("#containers-containers-create-container-btn") + b.set_input_text("#create-image-image-select-typeahead", "registry") + b.wait_visible('button.pf-v5-c-select__menu-item:contains("registry")') + + def testBasicUser(self): + self._testBasic(False) + + def _testBasic(self, auth): + b = self.browser + + def clickDeleteImage(image_sel): + b.click(f'{image_sel} .pf-v5-c-menu-toggle') + b.click(image_sel + " button.btn-delete") + + if not auth: + self.allow_browser_errors("Failed to start system podman.socket.*") + + expected_ws = "" + if auth and self.machine.ostree_image: + expected_ws += "ws" + + self.login(auth) + + # Check all containers + if auth: + checkImage(b, IMG_ALPINE, "system") + checkImage(b, IMG_BUSYBOX, "system") + checkImage(b, IMG_REGISTRY, "system") + + checkImage(b, IMG_ALPINE, "admin") + checkImage(b, IMG_BUSYBOX, "admin") + checkImage(b, IMG_REGISTRY, "admin") + + # Check order of images + text = b.text("#containers-images table") + if auth: + # all user images before all system images + self.assertRegex(text, ".*admin.*system.*") + self.assertNotRegex(text, ".*system.*admin.*") + else: + self.assertNotIn("system", text) + # images are sorted alphabetically + self.assertRegex(text, ".*/test-alpine.*/test-busybox.*/test-registry") + + # build a dummy image so that the timestamp is "today" (for predictable pixel tests) + # ensure that CMD neither comes first (podman rmi leaves that layer otherwise) + # nor last (then the topmost layer does not match the image ID) + IMG_HELLO_LATEST = "localhost/test-hello:latest" + self.machine.execute(f"""set -eu; D={self.vm_tmpdir}/hello; + mkdir $D + printf 'FROM scratch\\nCOPY test.txt /\\nCMD ["/run.sh"]\\nCOPY test.txt /test2.txt\\n' > $D/Containerfile + echo hello > $D/test.txt""") + self.execute(auth, f"podman build -t {IMG_HELLO_LATEST} {self.vm_tmpdir}/hello") + + # prepare image ids - much easier to pick a specific container + images = {} + for image in self.execute(auth, "podman images --noheading --no-trunc").strip().split("\n"): + # sha256: + items = image.split() + images[f"{items[0]}:{items[1]}"] = items[2].split(":")[-1] + + # show image listing toggle + hello_sel = f"#containers-images tbody tr[data-row-id=\"{images[IMG_HELLO_LATEST]}{auth}\"]".lower() + b.wait_visible(hello_sel) + b.click(hello_sel + " td.pf-v5-c-table__toggle button") + b.click(hello_sel + " .pf-v5-c-menu-toggle") + b.wait_visible(hello_sel + " button.btn-delete") + b.wait_in_text("#containers-images tbody.pf-m-expanded tr .image-details:first-child", "Command/run.sh") + # Show history + b.click("#containers-images tbody.pf-m-expanded .pf-v5-c-tabs__list li:nth-child(2) button") + first_row_sel = "#containers-images .pf-v5-c-table__expandable-row.pf-m-expanded tbody:first-of-type" + b.wait_in_text(f"{first_row_sel} td[data-label=\"ID\"]", + images[IMG_HELLO_LATEST][:12]) + created_sel = f"{first_row_sel} td[data-label=\"Created\"]" + b.wait_in_text(f"{created_sel}", "today at") + # topmost (last) layer + created_sel = f"{first_row_sel} td[data-label=\"Created by\"]" + b.wait_in_text(f"{created_sel}", "COPY") + b.wait_in_text(f"{created_sel}", "in /test2.txt") + # initial (first) layer + last_row_sel = "#containers-images .pf-v5-c-table__expandable-row.pf-m-expanded tbody:last-of-type" + b.wait_in_text(f"{last_row_sel} td[data-label=\"Created by\"]", "COPY") + + self.execute(auth, f"podman rmi {IMG_HELLO_LATEST}") + b.wait_not_present(hello_sel) + + # make sure no running containers shown; on CoreOS there's the cockpit/ws container + self.filter_containers('running') + if auth and self.machine.ostree_image: + self.waitContainerRow("ws") + else: + b.wait_in_text("#containers-containers", "No running containers") + + if auth: + # Run two containers as system (first exits immediately) + self.execute(auth, f"podman run -d --name test-sh-system --stop-timeout 0 {IMG_ALPINE} sh") + self.execute(auth, f"podman run -d --name swamped-crate-system --stop-timeout 0 {IMG_BUSYBOX} sleep 1000") + + # Run two containers as admin (first exits immediately) + self.execute(False, f"podman run -d --name test-sh-user --stop-timeout 0 {IMG_ALPINE} sh") + self.execute(False, f"podman run -d --name swamped-crate-user --stop-timeout 0 {IMG_BUSYBOX} sleep 1000") + + # Test owner filtering + if auth: + self.waitNumImages(self.user_images_count + self.system_images_count) + self.waitNumContainers(2, True) + + def verify_system(): + self.waitNumImages(self.system_images_count) + b.wait_in_text("#containers-images", "system") + self.waitNumContainers(1, True) + b.wait_in_text("#containers-containers", "system") + + b.set_val("#containers-containers-owner", "system") + verify_system() + b.set_val("#containers-containers-owner", "all") + b.go("#/?owner=system") + verify_system() + + def verify_user(): + self.waitNumImages(self.user_images_count) + b.wait_in_text("#containers-images", "admin") + self.waitNumContainers(1, False) + b.wait_in_text("#containers-containers", "admin") + + b.set_val("#containers-containers-owner", "user") + verify_user() + b.set_val("#containers-containers-owner", "all") + b.go("#/?owner=user") + verify_user() + + b.set_val("#containers-containers-owner", "all") + self.waitNumImages(self.user_images_count + self.system_images_count) + self.waitNumContainers(2, True) + else: # No 'owner' selector when not privileged + b.wait_not_present("#containers-containers-owner") + + user_containers = {} + system_containers = {} + for container in self.execute(True, "podman ps --all --no-trunc").strip().split("\n")[1:]: + # + items = container.split() + system_containers[items[-1]] = items[0] + for container in self.execute(False, "podman ps --all --no-trunc").strip().split("\n")[1:]: + # + items = container.split() + user_containers[items[-1]] = items[0] + + # running busybox shown + if auth: + self.waitContainerRow("swamped-crate-system") + self.waitContainer(system_containers["swamped-crate-system"], True, name='swamped-crate-system', + image=IMG_BUSYBOX, cmd="sleep 1000", state='Running') + + self.waitContainerRow("swamped-crate-user") + self.waitContainer(user_containers["swamped-crate-user"], False, name='swamped-crate-user', + image=IMG_BUSYBOX, cmd="sleep 1000", state='Running') + + # exited alpine not shown + b.wait_not_in_text("#containers-containers", "alpine") + + # show all containers and check status + b.go("#/?container=all") + + # exited alpine under everything list + b.wait_visible("#containers-containers") + if auth: + self.waitContainer(system_containers["test-sh-system"], True, name='test-sh-system', image=IMG_ALPINE, + cmd='sh', state=NOT_RUNNING) + + self.waitContainer(user_containers["test-sh-user"], False, name='test-sh-user', image=IMG_ALPINE, + cmd='sh', state=NOT_RUNNING) + + self.performContainerAction("swamped-crate-user", "Delete") + self.confirm_modal("Cancel") + + if auth: + self.performContainerAction("swamped-crate-system", "Delete") + self.confirm_modal("Cancel") + + # Checked order of containers + expected = ["swamped-crate-user", "test-sh-user"] + if auth: + expected.extend(["swamped-crate-system", "test-sh-system"]) + expected.extend([expected_ws]) + b.wait_collected_text("#containers-containers .container-name", ''.join(sorted(expected))) + + # show running container + self.filter_containers('running') + if auth: + self.waitContainer(system_containers["swamped-crate-system"], True, name='swamped-crate-system', + image=IMG_BUSYBOX, cmd="sleep 1000", state='Running') + self.waitContainer(user_containers["swamped-crate-user"], False, name='swamped-crate-user', + image=IMG_BUSYBOX, cmd="sleep 1000", state='Running') + # check exited alpine not in running list + b.wait_not_in_text("#containers-containers", "alpine") + + # delete running container busybox using force delete + if auth: + self.performContainerAction("swamped-crate-system", "Delete") + self.confirm_modal("Force delete") + self.waitContainerRow("swamped-crate-system", False) + + self.filter_containers("all") + + self.performContainerAction("swamped-crate-user", "Delete") + self.confirm_modal("Force delete") + self.waitContainerRow("swamped-crate-user", False) + + self.waitContainerRow("test-sh-user") + self.performContainerAction("test-sh-user", "Delete") + self.confirm_modal("Delete") + b.wait_not_in_text("#containers-containers", "test-sh-user") + + if auth: + self.waitContainerRow("test-sh-system") + self.performContainerAction("test-sh-system", "Delete") + self.confirm_modal("Delete") + b.wait_not_in_text("#containers-containers", "test-sh-system") + + # delete image busybox that hasn't been used + # First try to just untag and then remove with more tags + self.execute(auth, f"podman tag {IMG_BUSYBOX} {IMG_BUSYBOX}:1") + self.execute(auth, f"podman tag {IMG_BUSYBOX} {IMG_BUSYBOX}:2") + self.execute(auth, f"podman tag {IMG_BUSYBOX} {IMG_BUSYBOX}:3") + self.execute(auth, f"podman tag {IMG_BUSYBOX} {IMG_BUSYBOX}:4") + + busybox_sel = f"#containers-images tbody tr[data-row-id=\"{images[IMG_BUSYBOX_LATEST]}{auth}\"]".lower() + b.click(busybox_sel + " td.pf-v5-c-table__toggle button") + + b.wait_in_text(busybox_sel + " + tr", f"{IMG_BUSYBOX}:1") + b.wait_in_text(busybox_sel + " + tr", f"{IMG_BUSYBOX}:2") + b.wait_in_text(busybox_sel + " + tr", f"{IMG_BUSYBOX}:3") + b.wait_in_text(busybox_sel + " + tr", f"{IMG_BUSYBOX}:4") + + clickDeleteImage(busybox_sel) + self.assertTrue(b.get_checked(f".pf-v5-c-check__input[aria-label='{IMG_BUSYBOX_LATEST}']")) + b.set_checked(f".pf-v5-c-check__input[aria-label='{IMG_BUSYBOX}:1']", True) + b.set_checked(f".pf-v5-c-check__input[aria-label='{IMG_BUSYBOX}:3']", True) + b.set_checked(f".pf-v5-c-check__input[aria-label='{IMG_BUSYBOX_LATEST}']", False) + self.confirm_modal("Delete") + b.wait_in_text(busybox_sel + " + tr", f"{IMG_BUSYBOX_LATEST}") + b.wait_in_text(busybox_sel + " + tr", f"{IMG_BUSYBOX}:2") + b.wait_not_in_text(busybox_sel + " + tr", f"{IMG_BUSYBOX}:1") + b.wait_not_in_text(busybox_sel + " + tr", f"{IMG_BUSYBOX}:3") + + clickDeleteImage(busybox_sel) + b.click("#delete-all") + self.assertTrue(b.get_checked(f".pf-v5-c-check__input[aria-label='{IMG_BUSYBOX_LATEST}']")) + self.assertTrue(b.get_checked(f".pf-v5-c-check__input[aria-label='{IMG_BUSYBOX}:2']")) + self.assertTrue(b.get_checked(f".pf-v5-c-check__input[aria-label='{IMG_BUSYBOX}:4']")) + self.confirm_modal("Delete") + self.confirm_modal("Force delete") + b.wait_not_present(busybox_sel) + + # Check that we correctly show networking information + # Rootless don't have this info + if auth: + self.execute(auth, f"podman run -dt --name net_check --stop-timeout 0 {IMG_ALPINE}") + self.toggleExpandedContainer("net_check") + b.wait_in_text(".pf-m-expanded .container-details-networking", + self.execute(auth, """ + podman inspect --format '{{.NetworkSettings.Gateway}}' net_check""").strip()) + b.wait_in_text(".pf-m-expanded .container-details-networking", + self.execute(auth, """ + podman inspect --format '{{.NetworkSettings.IPAddress}}' net_check""").strip()) + b.wait_in_text(".pf-m-expanded .container-details-networking", + self.execute(auth, """ + podman inspect --format '{{.NetworkSettings.MacAddress}}' net_check""").strip()) + self.execute(auth, "podman stop net_check") + b.wait(lambda: self.execute(True, "podman ps --all | grep -e net_check -e Exited")) + self.toggleExpandedContainer("net_check") + sha = self.execute(auth, "podman inspect --format '{{.Id}}' net_check").strip() + self.waitContainer(sha, auth, state='Exited') + + # delete image alpine that has been used by a container + self.execute(auth, f"podman run -d --name test-sh4 --stop-timeout 0 {IMG_ALPINE} sh") + # our pixel test expects both containers to be in state "Exited" + sha = self.execute(auth, "podman inspect --format '{{.Id}}' test-sh4").strip() + self.waitContainer(sha, auth, name="test-sh4", state='Exited') + if auth: + b.assert_pixels('#app', "overview", ignore=[".ignore-pixels"], skip_layouts=["rtl", "mobile"]) + alpine_sel = f"#containers-images tbody tr[data-row-id=\"{images[IMG_ALPINE_LATEST]}{auth}\"]".lower() + b.wait_visible(alpine_sel) + b.click(alpine_sel + " td.pf-v5-c-table__toggle button") + clickDeleteImage(alpine_sel) + self.confirm_modal("Delete") + self.confirm_modal("Force delete") + b.wait_not_present(alpine_sel) + + b.wait_collected_text("#containers-containers .container-name", expected_ws) + self.execute(auth, f"podman run -d --name c --stop-timeout 0 {IMG_REGISTRY} sh") + b.wait_collected_text("#containers-containers .container-name", "c" + expected_ws) + self.execute(auth, f"podman run -d --name a --stop-timeout 0 {IMG_REGISTRY} sh") + b.wait_collected_text("#containers-containers .container-name", "ac" + expected_ws) + + self.execute(False, f"podman run -d --name b --stop-timeout 0 {IMG_REGISTRY} sh") + if auth: + b.wait_collected_text("#containers-containers .container-name", "abc" + expected_ws) + self.execute(False, f"podman run -d --name doremi --stop-timeout 0 {IMG_REGISTRY} sh") + b.wait_collected_text("#containers-containers .container-name", "abcdoremi" + expected_ws) + b.wait(lambda: self.getContainerAttr("doremi", "State") in NOT_RUNNING) + else: + b.wait_collected_text("#containers-containers .container-name", "abc") + + # Test intermediate images + b.wait_not_present(".listing-action") + tmpdir = self.execute(auth, "mktemp -d").strip() + self.execute(auth, f"echo 'FROM {IMG_REGISTRY}\nRUN ls' > {tmpdir}/Dockerfile") + self.execute(auth, f"podman build {tmpdir}") + + b.wait_not_in_text("#containers-images", ":") + b.click(".listing-action button:contains('Show intermediate images')") + b.wait_in_text("#containers-images", ":") + b.wait_in_text("#containers-images tbody:last-child td[data-label=Created]", "today at") + + b.click(".listing-action button:contains('Hide intermediate images')") + b.wait_not_in_text("#containers-images", ":") + + # Intermediate images are not shown in create container dialog + b.click("#containers-containers-create-container-btn") + b.wait_visible('div.pf-v5-c-modal-box header:contains("Create container")') + b.click("#create-image-image-select-typeahead") + b.wait_visible(f".pf-v5-c-select__menu-item:contains('{IMG_REGISTRY}')") + b.wait_not_present(".pf-v5-c-select__menu-item:contains('none')") + b.click(".pf-v5-c-modal-box .btn-cancel") + b.wait_not_present(".pf-v5-c-modal-box") + + # Delete intermediate images + intermediate_image_sel = "#containers-images tbody:last-child:contains(':')" + b.click(".listing-action button:contains('Show intermediate images')") + clickDeleteImage(intermediate_image_sel) + self.confirm_modal("Delete") + b.wait_not_present(intermediate_image_sel) + + # Create intermediate image and use it in a container + tmpdir = self.execute(auth, "mktemp -d").strip() + self.execute(auth, f"echo 'FROM {IMG_REGISTRY}\nRUN ls' > {tmpdir}/Dockerfile") + IMG_INTERMEDIATE = 'localhost/test-intermediate' + self.execute(auth, f"podman build -t {IMG_INTERMEDIATE} {tmpdir}") + b.click(f'#containers-images tbody tr:contains("{IMG_INTERMEDIATE}") .ct-container-create') + b.wait_visible('div.pf-v5-c-modal-box header:contains("Create container")') + b.click("#create-image-create-btn") + b.wait_not_present("div.pf-v5-c-modal-box") + self.waitContainerRow(IMG_INTERMEDIATE) + + # Delete intermediate image which is in use + self.execute(auth, f"podman untag {IMG_INTERMEDIATE}") + clickDeleteImage(intermediate_image_sel) + self.confirm_modal("Delete") + self.confirm_modal("Force delete") + b.wait_not_in_text("#containers-images", ":") + b.wait_not_in_text("#containers-containers", IMG_INTERMEDIATE) + + def testCommitUser(self): + self._testCommit(False) + + def testCommitSystem(self): + self._testCommit(True) + + def _testCommit(self, auth): + b = self.browser + self.allow_browser_errors("Failed to commit container .* repository name must be lowercase") + + self.login(auth) + + # run a container (will exit immediately) and test the display of commit modal + self.execute(auth, f"podman run -d --name test-sh0 --stop-timeout 0 {IMG_ALPINE} sh -c 'ls -a'") + + self.filter_containers("all") + self.waitContainerRow("test-sh0") + self.toggleExpandedContainer("test-sh0") + + self.performContainerAction("test-sh0", "Commit") + b.wait_visible(".pf-v5-c-modal-box") + + b.wait_in_text(".pf-v5-c-modal-box__description", "state of the test-sh0 container") + + # Empty name yields warning + b.click("button:contains(Commit)") + b.wait_text("#commit-dialog-image-name-helper", "Image name is required") + b.wait_visible("button:contains(Commit):disabled") + b.wait_visible("button:contains('Force commit')") + # Warning should be cleaned when updating name + b.set_input_text("#commit-dialog-image-name", "foobar") + b.wait_not_present("button:contains('Force commit')") + b.wait_not_present("#commit-dialog-image-name-helper") + + # Existing name yields warning + b.set_input_text("#commit-dialog-image-name", IMG_ALPINE) + b.click("button:contains(Commit)") + b.wait_text("#commit-dialog-image-name-helper", "Image name is not unique") + b.wait_visible("button:contains(Commit):disabled") + b.wait_visible("button:contains('Force commit')") + # Warning should be cleaned when updating tag + b.set_input_text("#commit-dialog-image-tag", "foobar") + b.wait_not_present("button:contains('Force commit')") + b.wait_not_present("#commit-dialog-image-name-helper") + + # Check failing commit + b.set_input_text("#commit-dialog-image-name", "TEST") + b.click("button:contains(Commit)") + b.wait_in_text(".pf-v5-c-alert", "Failed to commit container test-sh0") + b.wait_in_text(".pf-v5-c-alert", "repository name must be lowercase") + + # Test cancel + self.confirm_modal("Cancel") + + # Force commit empty container + self.performContainerAction("test-sh0", "Commit") + b.wait_visible(".pf-v5-c-modal-box") + # We prefill command + b.wait_val("#commit-dialog-command", 'sh -c "ls -a"') + # Test docker format + b.set_checked("#commit-dialog-docker", True) + b.click("button:contains(Commit)") + self.confirm_modal("Force commit") + + # don't use waitNumImages() here, as we want to include anonymous images + def waitImageCount(expected): + if auth: + expected += self.system_images_count + + b.wait_in_text("#containers-images", f"{expected} images") + + waitImageCount(self.user_images_count + 1) + image_id = self.execute(auth, "podman images --sort created --format '{{.Id}}' | head -n 1").strip() + manifest_type = self.execute(auth, "podman inspect --format '{{.ManifestType}}' " + image_id).strip() + cmd = self.execute(auth, "podman inspect --format '{{.Config.Cmd}}' " + image_id).strip() + self.assertIn("docker.distribution.manifest", manifest_type) + self.assertEqual("[sh -c ls -a]", cmd) + + # Commit with name, tag, author and edited command + self.performContainerAction("test-sh0", "Commit") + b.wait_visible(".pf-v5-c-modal-box") + b.set_input_text("#commit-dialog-image-name", "newname") + b.set_input_text("#commit-dialog-image-tag", "24") + b.set_input_text("#commit-dialog-author", "MM") + b.set_input_text("#commit-dialog-command", "sh -c 'ps'") + + if auth: + b.assert_pixels(".pf-v5-c-modal-box", "commit", skip_layouts=["rtl"]) + + self.confirm_modal("Commit") + + waitImageCount(self.user_images_count + 2) + self.assertEqual(self.execute(auth, "podman inspect --format '{{.Author}}' newname:24").strip(), "MM") + self.assertEqual(self.execute(auth, "podman inspect --format '{{.Config.Cmd}}' newname:24").strip(), + "[sh -c ps]") + self.assertIn("vnd.oci.image.manifest", + self.execute(auth, "podman inspect --format '{{.ManifestType}}' newname:24").strip()) + + # Test commit of running container + self.execute(auth, f"podman run -d --name test-sh2 --stop-timeout 0 {IMG_BUSYBOX} sleep 1000") + self.performContainerAction("test-sh2", "Commit") + b.wait_visible(".pf-v5-c-modal-box") + b.set_input_text("#commit-dialog-image-name", "newname") + self.confirm_modal("Commit") + waitImageCount(self.user_images_count + 3) + self.assertEqual(self.execute(auth, + "podman inspect --format '{{.Config.Cmd}}' newname:latest").strip(), + "[sleep 1000]") + + # Test commit of running container with pause (also conflicting name through :latest) + # This only works on rootless with cgroupsv2 + if auth or self.has_cgroupsV2: + self.performContainerAction("test-sh2", "Commit") + b.wait_visible(".pf-v5-c-modal-box") + b.set_input_text("#commit-dialog-image-name", "newname") + b.set_checked("#commit-dialog-pause", True) + b.click("button:contains(Commit)") + self.confirm_modal("Force commit") + waitImageCount(self.user_images_count + 4) + + def testDownloadImage(self): + b = self.browser + execute = self.execute + + def prepare(): + # Create and start registry containers + self.execute(True, f"podman run -d -p 5000:5000 --name registry --stop-timeout 0 {IMG_REGISTRY}") + self.execute(True, f"podman run -d -p 6000:5000 --name registry_alt --stop-timeout 0 {IMG_REGISTRY}") + # Add local insecure registry into registries conf + self.machine.write("/etc/containers/registries.conf", REGISTRIES_CONF) + self.execute(True, "systemctl stop podman.service") + # Push busybox image to the local registries + self.execute(True, + f"podman tag {IMG_BUSYBOX} localhost:5000/my-busybox; podman push localhost:5000/my-busybox") + self.execute(True, + f"podman tag {IMG_BUSYBOX} localhost:6000/my-busybox; podman push localhost:6000/my-busybox") + # Untag busybox image which duplicates the image we are about to download + self.execute(True, f"podman rmi -f {IMG_BUSYBOX} localhost:5000/my-busybox localhost:6000/my-busybox") + self.execute(False, f"podman rmi -f {IMG_BUSYBOX}") + + class DownloadImageDialog(): + def __init__(self, test_obj, imageName, imageTag=None, user="system"): + self.imageName = imageName + self.imageTag = imageTag + self.user = user + self.imageSha = "" + self.assertTrue = test_obj.assertTrue + + def openDialog(self): + # Open get new image modal + b.click("#image-actions-dropdown") + b.click("button:contains(Download new image)") + b.wait_visible('div.pf-v5-c-modal-box header:contains("Search for an image")') + b.wait_visible("div.pf-v5-c-modal-box footer button:contains(Download):disabled") + + return self + + def fillDialog(self): + # Search for image specified with self.imageName and self.imageTag + b.click(f"#{self.user}") + b.set_val('#registry-select', "localhost:5000") + # HACK: Sometimes the value is not shown fully. FIXME + b.set_input_text("#search-image-dialog-name", self.imageName, value_check=False) + if self.imageTag: + b.set_input_text(".image-tag-entry input", self.imageTag) + + return self + + def selectImageAndDownload(self): + # Select and download the self.imageName image + b.wait_visible(f".pf-v5-c-data-list .image-name:contains({self.imageName})") + b.click(f".pf-v5-c-data-list .image-name:contains({self.imageName})") + b.wait_visible("div.pf-v5-c-modal-box footer button:contains(Download):not([disabled])") + b.click("div.pf-v5-c-modal-box footer button:contains(Download)") + b.wait_not_present("div.pf-v5-c-modal-box") + + return self + + def expectDownloadErrorForNonExistingTag(self): + title = f"Danger alert:Failed to download image localhost:5000/{self.imageName}:{self.imageTag}" + b.wait_visible(f'h4.pf-v5-c-alert__title:contains("{title}")') + + return self + + def expectSearchErrorForNotExistingImage(self): + b.wait_visible(f".pf-v5-c-modal-box__body:contains(No results for {self.imageName})") + b.click(".pf-v5-c-modal-box button.btn-cancel") + b.wait_not_present(".pf-v5-c-modal-box__body") + + return self + + def expectDownloadSuccess(self): + # Confirm that the modal dialog is not open anymore + b.wait_not_present('div.pf-v5-c-modal-box') + # Confirm that the image got downloaded + checkImage(b, + f"localhost:5000/{self.imageName}:{self.imageTag or 'latest'}", + "system" if self.user == "system" else "admin") + + # Confirm that no error has happened + b.wait_not_present('h4.pf-v5-c-alert__title:contains("Failed to download image")') + + # Find out this image ID + container_name = f"localhost:5000/{self.imageName}:{self.imageTag or 'latest'}" + self.imageSha = execute(self.user == "system", + f"podman inspect --format '{{{{.Id}}}}' {container_name}").strip() + + return self + + def deleteImage(self, force=False, another=None): + imageTagSuffix = ":" + (self.imageTag or 'latest') + + # Select the image row + + # show image listing toggle + imageId = f"{self.imageSha}{'true' if self.user == 'system' else 'false'}" + sel = f"#containers-images tbody tr[data-row-id=\"{imageId}\"]" + b.wait_visible(sel) + b.click(sel + " td.pf-v5-c-table__toggle button") + + # Click the delete icon on the image row + b.click(sel + " .pf-v5-c-menu-toggle") + b.click(sel + ' button.btn-delete') + + if another: + b.click("#delete-all") + sel = f".pf-v5-c-check__input[aria-label='localhost:5000/{self.imageName}{imageTagSuffix}']" + self.assertTrue(b.get_checked(sel)) + self.assertTrue(b.get_checked(f".pf-v5-c-check__input[aria-label='{another}']")) + b.click("#delete-all") + b.wait_visible("#btn-img-delete:disabled") + + b.set_checked( + f".pf-v5-c-check__input[aria-label='localhost:5000/{self.imageName}{imageTagSuffix}']", True) + b.set_checked(f".pf-v5-c-check__input[aria-label='{another}']", True) + + # Confirm deletion in the delete dialog + b.click(".pf-v5-c-modal-box #btn-img-delete") + + if force: + # Confirm force delete + b.click(".pf-v5-c-modal-box button:contains('Force delete')") + + b.wait_not_present(sel) + + return self + + prepare() + + self.login() + + # Test registries + b.click("#image-actions-dropdown") + b.click("button:contains(Download new image)") + b.wait_visible('div.pf-v5-c-modal-box header:contains("Search for an image")') + # HACK: Sometimes the value is not shown fully. FIXME + b.set_input_text("#search-image-dialog-name", "my-busybox", value_check=False) + + b.wait_visible(".pf-v5-c-data-list .image-name:contains('localhost:5000/my-busybox')") + b.wait_visible(".pf-v5-c-data-list .image-name:contains('localhost:6000/my-busybox')") + b.assert_pixels(".podman-search", "download", skip_layouts=["rtl"]) + + b.set_val('#registry-select', "localhost:6000") + b.wait_not_present(".pf-v5-c-data-list .image-name:contains('localhost:5000/my-busybox')") + b.wait_visible(".pf-v5-c-data-list .image-name:contains('localhost:6000/my-busybox')") + b.click(".pf-v5-c-modal-box button:contains('Cancel')") + b.wait_not_present('div.pf-v5-c-modal-box') + + dialog0 = DownloadImageDialog(self, imageName='my-busybox', user="system") + dialog0.openDialog() \ + .fillDialog() \ + .selectImageAndDownload() \ + .expectDownloadSuccess() + dialog0.deleteImage() + + dialog1 = DownloadImageDialog(self, imageName='my-busybox', user="user") + dialog1.openDialog() \ + .fillDialog() \ + .selectImageAndDownload() \ + .expectDownloadSuccess() + # test recognition/deletion of multiple image tags + second_tag = "localhost/copybox:latest" + self.execute(False, f"podman tag localhost:5000/my-busybox {second_tag}") + # expand details + b.click("#containers-images tr:contains('my-busybox') td.pf-v5-c-table__toggle button") + b.wait_in_text("#containers-images tbody.pf-m-expanded tr .image-details", second_tag) + dialog1.deleteImage(True, another=second_tag) + + dialog = DownloadImageDialog(self, imageName='my-busybox', imageTag='latest', user="system") + dialog.openDialog() \ + .fillDialog() \ + .selectImageAndDownload() \ + .expectDownloadSuccess() \ + .deleteImage() + + dialog = DownloadImageDialog(self, imageName='foobar') + dialog.openDialog() \ + .fillDialog() \ + .expectSearchErrorForNotExistingImage() + + dialog = DownloadImageDialog(self, imageName='my-busybox', imageTag='foobar') + dialog.openDialog() \ + .fillDialog() \ + .selectImageAndDownload() \ + .expectDownloadErrorForNonExistingTag() + + def testLifecycleOperationsUser(self): + self._testLifecycleOperations(False) + + def testLifecycleOperationsSystem(self): + self._testLifecycleOperations(True) + + def _testLifecycleOperations(self, auth): + b = self.browser + + if not auth: + self.allow_browser_errors("Failed to start system podman.socket.*") + + self.login() + self.filter_containers('all') + + # run a container + self.execute(auth, f""" + podman run -d --name swamped-crate --stop-timeout 0 {IMG_BUSYBOX} sh -c 'echo 123; sleep infinity'; + podman stop swamped-crate""") + b.wait(lambda: self.execute(auth, "podman ps --all | grep -e swamped-crate -e Exited")) + + b.wait_visible("#containers-containers") + container_sha = self.execute(auth, "podman inspect --format '{{.Id}}' swamped-crate").strip() + self.waitContainer(container_sha, auth, name='swamped-crate', image=IMG_BUSYBOX, + state='Exited', owner="system" if auth else "admin") + b.click("#containers-containers tbody tr:contains('swamped-crate') .pf-v5-c-menu-toggle") + + if not auth: + # Checkpoint/restore is not supported on user containers yet - the related buttons should not be shown + # Check that the restore option is not present + b.wait_not_present(self.getContainerAction('swamped-crate', 'Restore')) + + # Health check is not set up + b.wait_not_present(self.getContainerAction('swamped-crate', 'Run health check')) + + b.click("#containers-containers tbody tr:contains('swamped-crate') .pf-v5-c-menu-toggle") + + # Start the container + self.performContainerAction(IMG_BUSYBOX, "Start") + + self.waitContainer(container_sha, auth, name='swamped-crate', image=IMG_BUSYBOX, + state='Running', owner="system" if auth else "admin") + + def get_cpu_usage(sel): + cpu = self.getContainerAttr(sel, "CPU") + self.assertIn('%', cpu) + # If it not a number it will raise ValueError which is what we want to know + return float(cpu[:-1]) + + # Check we show usage + b.wait(lambda: self.getContainerAttr(IMG_BUSYBOX, "CPU") != "") + b.wait(lambda: self.getContainerAttr(IMG_BUSYBOX, "Memory") != "") + memory = self.getContainerAttr(IMG_BUSYBOX, "Memory") + if auth or self.has_cgroupsV2: + cpu = get_cpu_usage(IMG_BUSYBOX) + + self.assertIn('/', memory) + numbers = memory.split('/') + self.assertTrue(numbers[0].strip().replace('.', '', 1).isdigit()) + full = numbers[1].strip().split() + self.assertTrue(full[0].replace('.', '', 1).isdigit()) + self.assertIn(full[1], ["GB", "MB"]) + + # Test that the value is updated dynamically + self.execute(auth, "podman exec -i swamped-crate sh -c 'dd bs=1024 < /dev/urandom > /dev/null &'") + b.wait(lambda: get_cpu_usage(IMG_BUSYBOX) > cpu) + self.execute(auth, "podman exec swamped-crate sh -c 'pkill dd'") + else: + # No support for CGroupsV2 + self.assertEqual(self.getContainerAttr(IMG_BUSYBOX, "CPU"), "n/a") + self.assertEqual(memory, "n/a") + + # Restart the container; there is no steady state change in the visible UI, so look for + # a changed data-started-at attribute + old_start = self.getStartTime("swamped-crate", auth=auth) + b.wait_in_text(f'#containers-containers tr[data-started-at="{old_start}"]', "swamped-crate") + self.performContainerAction(IMG_BUSYBOX, "Force restart") + new_start = self.waitRestart("swamped-crate", old_start, auth=auth) + b.wait_in_text(f'#containers-containers tr[data-started-at="{new_start}"]', "swamped-crate") + self.waitContainer(container_sha, auth, name='swamped-crate', image=IMG_BUSYBOX, state='Running') + + self.waitContainerRow(IMG_BUSYBOX) + if not auth: + # Check that the checkpoint option is not present for rootless + b.click(f"#containers-containers tbody tr:contains('{IMG_BUSYBOX}') .pf-v5-c-menu-toggle") + b.wait_visible(self.getContainerAction(IMG_BUSYBOX, 'Force stop')) + b.wait_not_present(self.getContainerAction(IMG_BUSYBOX, 'Checkpoint')) + b.click(f"#containers-containers tbody tr:contains('{IMG_BUSYBOX}') .pf-v5-c-menu-toggle") + # Stop the container + self.performContainerAction(IMG_BUSYBOX, "Force stop") + + self.waitContainer(container_sha, auth, name='swamped-crate', image=IMG_BUSYBOX) + b.wait(lambda: self.getContainerAttr("swamped-crate", "State") in NOT_RUNNING) + b.wait(lambda: self.getContainerAttr("swamped-crate", "CPU") == "") + b.wait(lambda: self.getContainerAttr("swamped-crate", "Memory") == "") + + # Check that container details are not lost when the container is stopped + self.toggleExpandedContainer("swamped-crate") + b.click(".pf-m-expanded button:contains('Integration')") + b.wait_visible(f'#containers-containers tr:contains("{IMG_BUSYBOX}") dt:contains("Environment variables")') + + # Check that console reconnects when container starts + b.click(".pf-m-expanded button:contains('Console')") + b.wait_text(".pf-m-expanded .pf-v5-c-empty-state", "Container is not running") + self.performContainerAction("swamped-crate", "Start") + b.wait_in_text(".pf-m-expanded .xterm-accessibility-tree", "/ # ") + b.focus(".pf-m-expanded .xterm-helper-textarea") + b.key_press('clear\r') + b.wait_not_in_text(".pf-m-expanded .xterm-accessibility-tree", "clear") + b.wait_text(".pf-m-expanded .xterm-accessibility-tree > div:nth-child(1)", "/ # ") + b.key_press('echo hello\r') + b.wait_text(".pf-m-expanded .xterm-accessibility-tree > div:nth-child(2)", "hello") + b.wait_text(".pf-m-expanded .xterm-accessibility-tree > div:nth-child(3)", "/ # ") + self.performContainerAction("swamped-crate", "Stop") + b.wait_text(".pf-m-expanded .xterm-accessibility-tree > div:nth-child(3)", "/ # disconnected ") + sha = self.execute(auth, "podman inspect --format '{{.Id}}' swamped-crate").strip() + self.waitContainer(sha, auth, name='swamped-crate', image=IMG_BUSYBOX, state=NOT_RUNNING) + self.performContainerAction("swamped-crate", "Start") + self.waitContainer(sha, auth, state='Running') + b.wait_text(".pf-m-expanded .xterm-accessibility-tree > div:nth-child(1)", "/ # ") + b.wait_not_in_text(".pf-m-expanded .xterm-accessibility-tree > div:nth-child(2)", "hello") + + # Check that logs reconnect when container starts + b.click(".pf-m-expanded button:contains('Logs')") + self.performContainerAction("swamped-crate", "Stop") + self.waitContainer(sha, auth, state=NOT_RUNNING) + b.wait_in_text(".pf-m-expanded .container-logs .xterm-accessibility-tree", "Streaming disconnected") + self.performContainerAction("swamped-crate", "Start") + b.wait_in_text(".pf-m-expanded .container-logs .xterm-accessibility-tree", "Streaming disconnected123") + + def testCheckpointRestore(self): + m = self.machine + b = self.browser + + self.login() + self.filter_containers('all') + + if not self.has_criu: + # On cgroupsv1 systems just check that we get expected error messages + + # Run a container + self.execute(True, f"podman run -dit --name swamped-crate --stop-timeout 0 {IMG_BUSYBOX} sh") + b.wait(lambda: self.execute(True, "podman ps --all | grep -e swamped-crate")) + + # Checkpoint the container + self.performContainerAction(IMG_BUSYBOX, "Checkpoint") + b.set_checked('.pf-v5-c-modal-box input#checkpoint-dialog-keep', True) + b.set_checked('.pf-v5-c-modal-box input#checkpoint-dialog-tcpEstablished', True) + b.click('.pf-v5-c-modal-box button:contains(Checkpoint)') + b.wait_not_present('.modal_dialog') + + def criu_alert(): + text = b.text(".pf-v5-c-alert.pf-m-danger > .pf-v5-c-alert__description").lower() + return "checkpoint/restore requires at least criu" in text or "failed to check for criu" in text + b.wait(criu_alert) + return + + # Run a container + mac_address = '92:d0:c6:0a:29:38' + self.execute(True, f""" + podman run -dit --mac-address {mac_address} --name swamped-crate --stop-timeout 0 {IMG_BUSYBOX} sh; + podman stop swamped-crate + """) + b.wait(lambda: self.execute(True, "podman ps --all | grep -e swamped-crate -e Exited")) + + # Check that the restore option is not present (i.e. start is a regular button) + b.click(f"#containers-containers tbody tr:contains('{IMG_BUSYBOX}') .pf-v5-c-menu-toggle") + b.wait_not_present(self.getContainerAction(IMG_BUSYBOX, 'Restore')) + b.click(f"#containers-containers tbody tr:contains('{IMG_BUSYBOX}') .pf-v5-c-menu-toggle") + + # Start the container + self.performContainerAction("swamped-crate", "Start") + b.wait(lambda: self.getContainerAttr("swamped-crate", "State") in 'Running') + + self.toggleExpandedContainer("swamped-crate") + b.wait_visible(".pf-m-expanded button:contains('Details')") + b.wait_not_present(f'#containers-containers tr:contains("{IMG_BUSYBOX}") dt:contains("Latest checkpoint")') + + # Checkpoint the container + self.performContainerAction("swamped-crate", "Checkpoint") + b.set_checked('.pf-v5-c-modal-box input#checkpoint-dialog-keep', True) + b.set_checked('.pf-v5-c-modal-box input#checkpoint-dialog-tcpEstablished', True) + b.click('.pf-v5-c-modal-box button:contains(Checkpoint)') + + with b.wait_timeout(300): + b.wait_not_present(".pf-v5-c-modal-box") + + if self.has_criu: + b.wait(lambda: self.getContainerAttr("swamped-crate", "State") in NOT_RUNNING) + b.wait_in_text( + f'#containers-containers tr:contains("{IMG_BUSYBOX}") dt:contains("Latest checkpoint") + dd', + 'today at' + ) + else: + # expect proper error message + b.wait_in_text(".pf-v5-c-alert.pf-m-danger", "Failed to checkpoint container swamped-crate") + b.wait(lambda: "checkpoint/restore requires at least criu" in + b.text(".pf-v5-c-alert.pf-m-danger > .pf-v5-c-alert__description").lower()) + return + + # Restore the container + self.waitContainerRow("swamped-crate") + self.performContainerAction("swamped-crate", "Restore") + b.set_checked('.pf-v5-c-modal-box input#restore-dialog-keep', True) + b.set_checked('.pf-v5-c-modal-box input#restore-dialog-tcpEstablished', True) + b.set_checked('.pf-v5-c-modal-box input#restore-dialog-ignoreStaticIP', True) + b.set_checked('.pf-v5-c-modal-box input#restore-dialog-ignoreStaticMAC', True) + b.click('.pf-v5-c-modal-box button:contains(Restore)') + b.wait(lambda: self.getContainerAttr("swamped-crate", "State") in 'Running') + + # A new MAC address should have been generated + # Fixed in podman 4.4.0 https://github.com/containers/podman/issues/16666 + cmd = "podman inspect --format '{{.NetworkSettings.MacAddress}}' swamped-crate" + new_mac_address = self.execute(True, cmd).strip() + if podman_version(self) >= (4, 4, 0): + self.assertNotEqual(new_mac_address, mac_address) + else: + self.assertEqual(new_mac_address, mac_address) + + # Checkpoint the container without stopping + self.waitContainerRow("swamped-crate") + self.performContainerAction("swamped-crate", "Checkpoint") + b.set_checked('.pf-v5-c-modal-box input#checkpoint-dialog-leaveRunning', True) + b.click('.pf-v5-c-modal-box button:contains(Checkpoint)') + b.wait_not_present('.modal_dialog') + + # Stop the container + m.execute("podman stop swamped-crate") + b.wait(lambda: self.getContainerAttr("swamped-crate", "State") in NOT_RUNNING) + + # Restore the container + self.performContainerAction("swamped-crate", "Restore") + b.click('.pf-v5-c-modal-box button:contains(Restore)') + b.wait(lambda: self.getContainerAttr("swamped-crate", "State") in 'Running') + + def testNotRunning(self): + b = self.browser + + def disable_system(): + self.execute(True, "systemctl disable --now podman.socket; systemctl stop podman.service") + + def enable_system(): + self.execute(True, "systemctl enable --now podman.socket") + + def enable_user(): + self.execute(False, "systemctl --user enable --now podman.socket") + + def disable_user(): + self.execute(False, "systemctl --user disable --now podman.socket") + + def is_active_system(string): + b.wait(lambda: self.execute(True, "systemctl is-active podman.socket || true").strip() == string) + + def is_enabled_system(string): + b.wait(lambda: self.execute(True, "systemctl is-enabled podman.socket || true").strip() == string) + + def is_active_user(string): + b.wait(lambda: self.execute(False, "systemctl --user is-active podman.socket || true").strip() == string) + + def is_enabled_user(string): + b.wait(lambda: self.execute(False, "systemctl --user is-enabled podman.socket || true").strip() == string) + + disable_system() + disable_user() + self.login_and_go("/podman") + + # Troubleshoot action + b.click("#app .pf-v5-c-empty-state button.pf-m-link") + b.enter_page("/system/services") + # services page is too slow + with b.wait_timeout(60): + b.wait_in_text("#service-details", "podman.socket") + + # Start action, with enabling (by default) + b.go("/podman") + b.enter_page("/podman") + b.click("#app .pf-v5-c-empty-state button.pf-m-primary") + + b.wait_visible("#containers-containers") + b.wait_not_present("#overview div.pf-v5-c-alert.pf-m-info") + + is_active_system("active") + is_active_user("active") + is_enabled_system("enabled") + is_enabled_user("enabled") + + # Start action, without enabling + disable_system() + disable_user() + b.click("#app .pf-v5-c-empty-state input[type=checkbox]") + b.assert_pixels("#app .pf-v5-c-empty-state", "podman-service-disabled", skip_layouts=["medium", "mobile"]) + b.click("#app .pf-v5-c-empty-state button.pf-m-primary") + + b.wait_visible("#containers-containers") + is_enabled_system("disabled") + is_enabled_user("disabled") + is_active_system("active") + is_active_user("active") + + b.logout() + disable_system() + # HACK: Due to https://github.com/containers/podman/issues/7180, avoid + # user podman.service to time out; make sure to start it afresh + disable_user() + enable_user() + self.login_and_go("/podman") + b.wait_in_text("#overview div.pf-v5-c-alert .pf-v5-c-alert__title", "System Podman service is also available") + b.click("#overview div.pf-v5-c-alert .pf-v5-c-alert__action > button:contains(Start)") + b.wait_not_present("#overview div.pf-v5-c-alert") + is_active_system("active") + is_active_user("active") + is_enabled_user("enabled") + is_enabled_system("enabled") + + b.logout() + disable_user() + enable_system() + self.login_and_go("/podman") + b.wait_in_text("#overview div.pf-v5-c-alert .pf-v5-c-alert__title", "User Podman service is also available") + b.click("#overview div.pf-v5-c-alert .pf-v5-c-alert__action > button:contains(Start)") + b.wait_not_present("#overview div.pf-v5-c-alert") + is_active_system("active") + is_active_user("active") + is_enabled_user("enabled") + is_enabled_system("enabled") + + b.logout() + disable_user() + disable_system() + self.login_and_go("/podman", superuser=False) + b.click("#app .pf-v5-c-empty-state button.pf-m-primary") + b.wait_visible("#containers-containers") + b.wait_not_present("#overview div.pf-v5-c-alert") + + is_active_system("inactive") + is_active_user("active") + is_enabled_user("enabled") + is_enabled_system("disabled") + b.logout() + + # no Troubleshoot action without cockpit-system package + disable_system() + disable_user() + self.restore_dir("/usr/share/cockpit/systemd") + self.machine.execute("rm /usr/share/cockpit/systemd/manifest.json") + self.login_and_go("/podman") + b.wait_visible("#app .pf-v5-c-empty-state button.pf-m-primary") + self.assertFalse(b.is_present("#app .pf-v5-c-empty-state button.pf-m-link")) + # starting still works + b.click("#app .pf-v5-c-empty-state button.pf-m-primary") + b.wait_visible("#containers-containers") + + self.allow_restart_journal_messages() + self.allow_journal_messages(".*podman/podman.sock/.*: couldn't connect:.*") + self.allow_journal_messages(".*podman/podman.sock: .*Connection.*Error.*") + self.allow_journal_messages(".*podman/podman.sock/.*/events.*: received truncated HTTP response.*") + + def testCreateContainerSystem(self): + self._testCreateContainer(True) + + def testCreateContainerUser(self): + self._testCreateContainer(False) + + def _testCreateContainer(self, auth): + new_container = 'new-container' + self.execute(True, f"podman run -d --name {new_container} --stop-timeout 0 {IMG_BUSYBOX} touch /latest") + self.execute(True, f"podman commit {new_container} newimage") + new_image_sha = self.execute(True, "podman inspect --format '{{.Id}}' newimage").strip() + + self.execute(True, f"podman run -d -p 5000:5000 --name registry --stop-timeout 0 {IMG_REGISTRY}") + self.execute(True, f"podman run -d -p 6000:5000 --name registry_alt --stop-timeout 0 {IMG_REGISTRY}") + # Add local insecure registry into registries conf + self.machine.write("/etc/containers/registries.conf", REGISTRIES_CONF) + self.execute(True, "systemctl stop podman.service") + # Push busybox image to the local registries + self.execute(True, + f"podman tag {IMG_BUSYBOX} localhost:5000/my-busybox; podman push localhost:5000/my-busybox") + self.execute(True, + f"podman tag {IMG_BUSYBOX} localhost:6000/my-busybox; podman push localhost:6000/my-busybox") + # Untag busybox image which duplicates the image we are about to download + self.execute(True, f"podman rmi -f {IMG_BUSYBOX} localhost:5000/my-busybox localhost:6000/my-busybox") + + self.login(auth) + + b = self.browser + container_name = "busybox-downloaded" + + b.click("#containers-containers button.pf-v5-c-button.pf-m-primary") + b.set_input_text("#run-image-dialog-name", container_name) + + # Test invalid input + b.set_input_text("#create-image-image-select-typeahead", "|alpi*ne?\\") + b.wait_text("button.pf-v5-c-select__menu-item:not(.pf-m-disabled)", "localhost/test-alpine:latest") + + # No local results found + b.set_input_text("#create-image-image-select-typeahead", "notfound") + + b.click('button.pf-v5-c-toggle-group__button:contains("Local")') + b.wait_text("button.pf-v5-c-select__menu-item.pf-m-disabled", "No images found") + + # Local results found + b.set_input_text("#create-image-image-select-typeahead", "registry") + if auth: + b.assert_pixels(".pf-v5-c-modal-box", "image-select", skip_layouts=["rtl"]) + b.click('button.pf-v5-c-toggle-group__button:contains("Local")') + b.wait_text("button.pf-v5-c-select__menu-item", IMG_REGISTRY_LATEST) + + # Local registry + b.set_input_text("#create-image-image-select-typeahead", "my-busybox") + b.click('button.pf-v5-c-toggle-group__button:contains("localhost:5000")') + b.wait_text("button.pf-v5-c-select__menu-item:not(.pf-m-disabled)", "localhost:5000/my-busybox") + + # Select image + b.click('button.pf-v5-c-select__menu-item:contains("localhost:5000/my-busybox")') + + # Remote image, no pull latest image option + b.wait_not_present("#run-image-dialog-pull-latest-image") + + # Create Container, image is pulled and should end up being "running" + b.click('.pf-v5-c-modal-box__footer #create-image-create-run-btn') + sel = " span:not(.downloading)" + b.wait(lambda: self.getContainerAttr(container_name, "State", sel) in 'Running') + self.execute(auth, f"podman exec {container_name} test ! -e /latest") + + # Now that we have downloaded an image, verify that selecting download latest image + # downloads the latest image we now push to the registry. Note this image has a /latest file + # to differnatiate it from the other local image. + self.execute(True, f"podman push {new_image_sha} localhost:5000/my-busybox") + self.execute(True, f"podman push {new_image_sha} localhost:6000/my-busybox") + self.execute(True, f"podman rmi {new_image_sha}") + + container_name = "busybox-latest" + + b.click("#containers-containers button.pf-v5-c-button.pf-m-primary") + b.set_input_text("#run-image-dialog-name", container_name) + + # Local registry + b.set_input_text("#create-image-image-select-typeahead", "my-busybox") + b.click('button.pf-v5-c-toggle-group__button:contains("Local")') + + # Select image + b.click('button.pf-v5-c-select__menu-item:contains("localhost:5000/my-busybox")') + + # Pull the latest image + b.set_checked("#run-image-dialog-pull-latest-image", True) + + # Create Container, image is pulled and should end up being "running" + b.click('.pf-v5-c-modal-box__footer #create-image-create-run-btn') + sel = " span:not(.downloading)" + b.wait(lambda: self.getContainerAttr(container_name, "State", sel) in 'Running') + # Verify that the latest file exists + output = self.execute(auth, f"podman exec {container_name} ls -lh /latest").strip() + self.assertNotIn("No such file or directory", output) + + # Test creating a container with + if auth: + container_name = "busybox-download-admin" + b.click("#containers-containers button.pf-v5-c-button.pf-m-primary") + + # Start container as admin + b.click('#run-image-dialog-owner-user') + + # Create Container, image is pulled and should end up being "Running" + b.set_input_text("#run-image-dialog-name", container_name) + + b.set_input_text("#create-image-image-select-typeahead", IMG_BUSYBOX) + b.click('button.pf-v5-c-toggle-group__button:contains("Local")') + b.click(f'button.pf-v5-c-select__menu-item:contains("{IMG_BUSYBOX}")') + + b.click('.pf-v5-c-modal-box__footer #create-image-create-run-btn') + b.wait(lambda: self.getContainerAttr(container_name, "State", sel) in 'Running') + + def testRunImageSystem(self): + self._testRunImage(True) + + def testRunImageUser(self): + self._testRunImage(False) + + def _testRunImage(self, auth): + b = self.browser + m = self.machine + + # Just drop user images so we can use simpler selectors + if auth: + self.execute(False, "podman rmi --all") + + self.login(auth) + + b.click("#containers-images button.pf-v5-c-expandable-section__toggle") + + b.wait_in_text("#containers-images", IMG_BUSYBOX) + b.wait_in_text("#containers-images", IMG_ALPINE) + if auth: + b.wait_not_in_text("#containers-images", "admin") + + # Check command in alpine + b.wait_visible(f'#containers-images td[data-label="Image"]:contains("{IMG_ALPINE}")') + b.click(f'#containers-images tbody tr:contains("{IMG_ALPINE}") .ct-container-create') + b.wait_visible('div.pf-v5-c-modal-box header:contains("Create container")') + # depending on the precise container, this can be /bin/sh or /bin/ash + cmd = self.execute(auth, 'podman image inspect --format "{{.Config.Cmd}}" ' + IMG_ALPINE) + cmd = cmd.strip().replace('[', '').replace(']', '') + b.wait_attr("#run-image-dialog-command", "value", cmd) + b.click(".btn-cancel") + + # Open run image dialog + b.wait_visible(f'#containers-images td[data-label="Image"]:contains("{IMG_BUSYBOX}")') + b.click(f'#containers-images tbody tr:contains("{IMG_BUSYBOX}") .ct-container-create') + b.wait_visible('div.pf-v5-c-modal-box header:contains("Create container")') + + # Inspect and fill modal dialog + b.wait_val("#create-image-image-select-typeahead", IMG_BUSYBOX_LATEST) + + # Check that there is autogenerated name and then overwrite it + b.wait_not_val("#run-image-dialog-name", "") + b.set_input_text("#run-image-dialog-name", "busybox-with-tty") + + b.wait_visible("#run-image-dialog-command[value='sh']") + + # Check memory configuration + # Only works with CGroupsV2 + if auth or self.has_cgroupsV2: + b.set_checked("#run-image-dialog-memory-limit-checkbox", True) + b.wait_visible("#run-image-dialog-memory-limit-checkbox:checked") + b.wait_visible('#run-image-dialog-memory-limit input[value="512"]') + b.set_input_text("#run-image-dialog-memory-limit input[type=number]", "0.5") + b.set_val('#memory-unit-select', "GB") + + # CPU shares work only with system containers + if auth: + # Check that the checkbox is enabled when clicked on the field + b.wait_visible("#run-image-dialog-cpu-priority-checkbox:not(:checked)") + b.click('#run-image-cpu-priority') + b.wait_visible("#run-image-dialog-cpu-priority-checkbox:checked") + b.set_checked("#run-image-dialog-cpu-priority-checkbox", False) + + b.set_checked("#run-image-dialog-cpu-priority-checkbox", True) + b.wait_visible("#run-image-dialog-cpu-priority-checkbox:checked") + b.wait_visible('#run-image-dialog-cpu-priority input[value="1024"]') + b.set_input_text("#run-image-dialog-cpu-priority input[type=number]", "512") + else: + b.wait_not_present("#run-image-dialog-cpu-priority-checkbox") + + # Enable tty + b.set_checked("#run-image-dialog-tty", True) + + # Set up command line + b.set_input_text('#run-image-dialog-command', + "sh -c 'for i in $(seq 20); do sleep 1; echo $i; done; sleep infinity'") + + if auth: + # Set restart policy to 3 retries + b.set_val("#run-image-dialog-restart-policy", "on-failure") + b.set_input_text('#run-image-dialog-restart-retries input', '3') + else: # no lingering enabled so it's disabled + b.wait_not_present("#run-image-dialog-restart-policy") + + # Switch to Integration tab + b.click("#pf-tab-1-create-image-dialog-tab-integration") + + # Configure published ports + b.click('.publish-port-form .btn-add') + b.set_input_text('#run-image-dialog-publish-0-host-port', '6000') + b.set_input_text('#run-image-dialog-publish-0-container-port', '5000') + b.click('.publish-port-form .btn-add') + b.set_input_text('#run-image-dialog-publish-1-ip-address', '127.0.0.1') + b.set_input_text('#run-image-dialog-publish-1-host-port', '6001') + b.set_input_text('#run-image-dialog-publish-1-container-port', '5001') + b.set_val('#run-image-dialog-publish-1-protocol', "udp") + b.click('.publish-port-form .btn-add') + b.set_input_text('#run-image-dialog-publish-2-ip-address', '7001') + b.set_input_text('#run-image-dialog-publish-2-host-port', '7001') + b.click('#run-image-dialog-publish-2-btn-close') + b.click('.publish-port-form .btn-add') + b.set_input_text('#run-image-dialog-publish-3-container-port', '8001') + b.click('.publish-port-form .btn-add') + b.set_input_text('#run-image-dialog-publish-4-ip-address', '127.0.0.2') + b.set_input_text('#run-image-dialog-publish-4-container-port', '9001') + + # Configure env + b.click('.env-form .btn-add') + b.set_input_text('#run-image-dialog-env-0-key', 'APPLE') + b.set_input_text('#run-image-dialog-env-0-value', 'ORANGE') + b.click('.env-form .btn-add') + b.set_input_text('#run-image-dialog-env-1-key', 'PEAR') + b.set_input_text('#run-image-dialog-env-1-value', 'BANANA') + b.click('.env-form .btn-add') + b.set_input_text('#run-image-dialog-env-2-key', 'MELON') + b.set_input_text('#run-image-dialog-env-2-value', 'GRAPE') + b.click('#run-image-dialog-env-2-btn-close') + b.click('.env-form .btn-add') + # Test inputting an key=var entry + b.set_val('#run-image-dialog-env-3-value', + "RHUBARB=STRAWBERRY DURIAN=LEMON TEST_URL=wss://cockpit/?start=1&stop=0") + # set_val does not trigger onChange so append a space. + b.set_input_text('#run-image-dialog-env-3-value', ' ', append=True, value_check=False) + + b.click('.env-form .btn-add') + b.set_input_text('#run-image-dialog-env-6-key', 'HOSTNAME') + b.set_input_text('#run-image-dialog-env-6-value', 'busybox') + + # Test inputting a var with = in it doesn't reset key + b.click('.env-form .btn-add') + b.set_input_text('#run-image-dialog-env-7-key', 'TEST') + b.set_input_text('#run-image-dialog-env-7-value', 'REBASE=1') + + # Configure volumes + b.click('.volume-form .btn-add') + rodir, rwdir = m.execute("mktemp; mktemp").split('\n')[:2] + m.execute(f"chown admin:admin {rodir}") + m.execute(f"chown admin:admin {rwdir}") + b.set_checked("#run-image-dialog-volume-0-mode", False) + + if self.has_selinux: + b.set_val('#run-image-dialog-volume-0-selinux', "z") + else: + b.wait_not_present('#run-image-dialog-volume-0-selinux') + + b.set_file_autocomplete_val("#run-image-dialog-volume-0 .pf-v5-c-select", rodir) + b.key_press(["\r"]) + b.set_input_text('#run-image-dialog-volume-0-container-path', '/tmp/ro') + ro_label = m.execute(f"ls -dZ {rodir}").split(" ")[0] + b.key_press(["\r"]) + b.click('.volume-form .btn-add') + b.wait_visible('#run-image-dialog-volume-1') + b.click('#run-image-dialog-volume-1-btn-close') + b.wait_not_present('#run-image-dialog-volume-1') + b.click('.volume-form .btn-add') + + if auth: + b.assert_pixels(".pf-v5-c-modal-box", "integration", + ignore=["#run-image-dialog-volume-0 .pf-v5-c-select__toggle-typeahead"], + skip_layouts=["rtl"]) + + if self.has_selinux: + b.set_val('#run-image-dialog-volume-2-selinux', "Z") + else: + b.wait_not_present('#run-image-dialog-volume-2-selinux') + + b.set_file_autocomplete_val("#run-image-dialog-volume-2 .pf-v5-c-select", rwdir) + b.key_press(["\r"]) + b.set_input_text('#run-image-dialog-volume-2-container-path', '/tmp/rw') + rw_label = m.execute(f"ls -dZ {rwdir}").split(" ")[0] + + b.click('.pf-v5-c-modal-box__footer #create-image-create-run-btn') + b.wait_not_present("div.pf-v5-c-modal-box") + self.waitContainerRow(IMG_BUSYBOX) + sha = self.execute(auth, "podman inspect --format '{{.Id}}' busybox-with-tty").strip() + self.waitContainer(sha, auth, name='busybox-with-tty', image=IMG_BUSYBOX, + cmd='sh -c "for i in $(seq 20); do sleep 1; echo $i; done; sleep infinity"', + state='Running', owner="system" if auth else "admin") + hasTTY = self.execute(auth, "podman inspect --format '{{.Config.Tty}}' busybox-with-tty").strip() + self.assertEqual(hasTTY, 'true') + # Only works with CGroupsV2 + if auth or self.has_cgroupsV2: + memory = self.execute(auth, "podman inspect --format '{{.HostConfig.Memory}}' busybox-with-tty").strip() + self.assertEqual(memory, '500000000') + + if auth: + cpuShares = self.execute(auth, + "podman inspect --format '{{.HostConfig.CpuShares}}' busybox-with-tty").strip() + self.assertEqual(cpuShares, '512') + + restartPolicy = self.getRestartPolicy(auth, "busybox-with-tty") + if auth: + self.assertEqual(restartPolicy, '{on-failure 3}') + else: + # No restart policy + self.assertEqual(restartPolicy, '{ 0}') + + b.wait(lambda: "3" in self.execute(auth, "podman logs busybox-with-tty")) + + self.toggleExpandedContainer(IMG_BUSYBOX) + + b.wait_in_text(f'#containers-containers tr:contains("{IMG_BUSYBOX}") dt:contains("Created") + dd', 'today at') + + b.click(".pf-m-expanded button:contains('Integration')") + + b.wait_in_text(f'#containers-containers tr:contains("{IMG_BUSYBOX}") dt:contains("Ports") + dd', + '0.0.0.0:6000 \u2192 5000/tcp') + b.wait_in_text(f'#containers-containers tr:contains("{IMG_BUSYBOX}") dt:contains("Ports") + dd', + '127.0.0.1:6001 \u2192 5001/udp') + b.wait_in_text(f'#containers-containers tr:contains("{IMG_BUSYBOX}") dt:contains("Ports") + dd', + '127.0.0.2:') + b.wait_in_text(f'#containers-containers tr:contains("{IMG_BUSYBOX}") dt:contains("Ports") + dd', + ' \u2192 8001/tcp') + b.wait_not_in_text(f'#containers-containers tr:contains("{IMG_BUSYBOX}") dt:contains("Ports") + dd', + '7001/tcp') + + ports = self.execute(auth, "podman inspect --format '{{.NetworkSettings.Ports}}' busybox-with-tty") + self.assertRegex(ports, r'5000/tcp:\[{(0.0.0.0)? 6000}\]') + self.assertIn('5001/udp:[{127.0.0.1 6001}]', ports) + self.assertIn('8001/tcp:[{', ports) + self.assertIn('9001/tcp:[{127.0.0.2 ', ports) + self.assertNotIn('7001/tcp', ports) + + env_select = f'#containers-containers tr:contains("{IMG_BUSYBOX}") dt:contains("Environment variables") + dd' + b.wait_in_text(env_select, 'APPLE=ORANGE') + b.wait_in_text(env_select, 'PEAR=BANANA') + b.wait_in_text(env_select, 'RHUBARB=STRAWBERRY') + b.wait_in_text(env_select, 'DURIAN=LEMON') + b.wait_in_text(env_select, 'TEST_URL=wss://cockpit/?start=1&stop=0') + b.wait_in_text(env_select, 'HOSTNAME=busybox') + b.wait_in_text(env_select, 'TEST=REBASE=1') + # variables are present in env but are not displayed in the UI + b.wait_not_in_text(env_select, 'container=podman') + b.wait_not_in_text(env_select, 'TERM=xterm') + b.wait_not_in_text(env_select, 'HOME=/root') + b.wait_not_in_text(env_select, 'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin') + + b.click(".container-integration button:contains('Show more')") + # previously hidden variables are now visible + b.wait_in_text(env_select, 'container=podman') + b.wait_in_text(env_select, 'TERM=xterm') + b.wait_in_text(env_select, 'HOME=/root') + b.wait_in_text(env_select, 'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin') + + env = self.execute(auth, "podman exec busybox-with-tty env") + self.assertIn('APPLE=ORANGE', env) + self.assertIn('PEAR=BANANA', env) + self.assertIn('RHUBARB=STRAWBERRY', env) + self.assertIn('DURIAN=LEMON', env) + self.assertIn('HOSTNAME=busybox', env) + self.assertIn('TEST=REBASE=1', env) + self.assertIn('container=podman', env) + self.assertIn('TERM=xterm', env) + self.assertIn('HOME=/root', env) + self.assertIn('PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin', env) + self.assertNotIn('MELON=GRAPE', env) + + vol_select = f'#containers-containers tr:contains("{IMG_BUSYBOX}") dt:contains("Volumes") + dd' + b.wait_in_text(vol_select, f"{rodir} \u2192 /tmp/ro") + b.wait_in_text(vol_select, f"{rwdir} \u2194 /tmp/rw") + + romnt = self.execute(auth, "podman exec busybox-with-tty cat /proc/self/mountinfo | grep /tmp/ro") + self.assertIn('ro', romnt) + self.assertIn(rodir[4:], romnt) + rwmnt = self.execute(auth, "podman exec busybox-with-tty cat /proc/self/mountinfo | grep /tmp/rw") + self.assertIn('rw', rwmnt) + self.assertIn(rwdir[4:], rwmnt) + + if self.has_selinux: + # rw was set to :Z so it should change, but not be shared + rw_label_new = m.execute(f"ls -dZ {rwdir}").split(" ")[0] + self.assertNotEqual(rw_label, rw_label_new) + self.assertRegex(rw_label_new, r"container_file_t:s0:c\d*,c\d*$") + + # ro was set to :z to it should change and be shared + ro_label_new = m.execute(f"ls -dZ {rodir}").split(" ")[0] + self.assertNotEqual(ro_label, ro_label_new) + self.assertRegex(ro_label_new, "container_file_t:s0$") + + def get_int(n): + try: + return int(n) + except ValueError: + return 0 + + b.wait_not_present("button:contains('Health check logs')") + b.click(".pf-m-expanded button:contains('Logs')") + b.wait_text(".pf-m-expanded .container-logs .xterm-accessibility-tree > div:nth-child(1)", "1") + + # firefox optimizes these out when not visible + b.eval_js(""" + document.querySelector('.pf-m-expanded .container-logs .xterm-accessibility-tree').scrollIntoView() + """) + b.wait_in_text(".pf-m-expanded .container-logs .xterm-accessibility-tree", "6") + + b.click(".pf-m-expanded button:contains('Console')") + b.wait(lambda: + get_int(b.text(".pf-m-expanded .container-terminal .xterm-accessibility-tree > div:nth-child(3)")) > 7) + + # Create another instance without port publishing + b.wait_visible(f'#containers-images td[data-label="Image"]:contains("{IMG_BUSYBOX}")') + self.toggleExpandedContainer(IMG_BUSYBOX) + b.click(f'#containers-images tbody tr:contains("{IMG_BUSYBOX}") .ct-container-create') + b.wait_visible('div.pf-v5-c-modal-box header:contains("Create container")') + + b.wait_val("#create-image-image-select-typeahead", IMG_BUSYBOX_LATEST) + b.set_input_text("#run-image-dialog-name", "busybox-without-publish") + + # Set up command line + b.set_input_text('#run-image-dialog-command', + "sh -c 'for i in $(seq 20); do echo $i; sleep 3; done; sleep infinity'") + + # Run without tty, console should be able to `exec` + b.set_checked("#run-image-dialog-tty", False) + + b.click('.pf-v5-c-modal-box__footer #create-image-create-run-btn') + b.wait_not_present("div.pf-v5-c-modal-box") + + self.waitContainerRow("busybox-without-publish") + self.toggleExpandedContainer("busybox-without-publish") + b.wait_not_present(""" + #containers-containers tbody tr:contains("busybox-without-publish") + tr dt:contains("Ports") + """) + + # Rootless only works with CGroupsV2 + if auth or self.has_cgroupsV2: + cpuShares = self.execute(auth, """ + podman inspect --format '{{.HostConfig.CpuShares}}' busybox-without-publish + """).strip() + # podman ≥ 1.8 translates 0 default into actual value + self.assertIn(cpuShares, ['0', '1024']) + + b.set_val("#containers-containers-filter", "all") + + b.click(".pf-m-expanded button:contains('Console')") + b.wait_in_text(".pf-m-expanded .xterm-accessibility-tree", "/ # ") + b.focus(".pf-m-expanded .xterm-helper-textarea") + b.key_press('clear\r') + b.wait_not_in_text(".pf-m-expanded .xterm-accessibility-tree", "clear") + b.wait_text(".pf-m-expanded .xterm-accessibility-tree > div:nth-child(1)", "/ # ") + b.key_press('echo hello\r') + b.wait_text(".pf-m-expanded .xterm-accessibility-tree > div:nth-child(2)", "hello") + b.wait_text(".pf-m-expanded .xterm-accessibility-tree > div:nth-child(3)", "/ # ") + b.wait_text(".pf-m-expanded .xterm-accessibility-tree > div:nth-child(1)", "/ # echo hello") + + b.go("#/?name=tty") + self.check_containers(["busybox-with-tty"], ["busybox-without-publish"]) + b.go("#/?name=busy") + self.check_containers(["busybox-with-tty", "busybox-without-publish"], []) + + b.set_input_text('#containers-filter', 'tty') + self.check_containers(["busybox-with-tty"], ["busybox-without-publish"]) + self.check_images([], [IMG_ALPINE, IMG_BUSYBOX, IMG_REGISTRY]) + b.set_input_text('#containers-filter', 'busy') + b.wait_js_cond('window.location.hash === "#/?name=busy"') + self.check_containers(["busybox-with-tty", "busybox-without-publish"], []) + self.check_images([IMG_BUSYBOX], [IMG_ALPINE, IMG_REGISTRY]) + b.set_input_text('#containers-filter', 'alpine') + b.wait_js_cond('window.location.hash === "#/?name=alpine"') + self.check_containers([], ["busybox-with-tty", "busybox-without-publish"]) + self.check_images([IMG_ALPINE], [IMG_BUSYBOX, IMG_REGISTRY]) + b.set_input_text('#containers-filter', '') + self.check_containers(["busybox-with-tty", "busybox-without-publish"], []) + self.check_images([IMG_ALPINE, IMG_BUSYBOX, IMG_REGISTRY], []) + b.wait_js_cond('window.location.hash === "#/"') + + self.filter_containers("running") + id_with_tty = self.execute(auth, "podman inspect --format '{{.Id}}' busybox-with-tty").strip() + + container_sel = f'#containers-images tbody tr:contains("{IMG_BUSYBOX}")' + b.click(f'{container_sel} td.pf-v5-c-table__toggle button') + # running container, just selects it, but leaves "Only running" alone + b.click(f"{container_sel} + tr div.ct-listing-panel-body dt:contains('Used by') + dd button:contains('busybox-with-tty')") # noqa: E501 + b.wait_js_cond('window.location.hash === "#' + id_with_tty + '"') + b.wait_val("#containers-containers-filter", "running") + # FIXME: expanding running container details does not actually work right now + # b.wait_in_text("#containers-containers tr.pf-m-expanded .container-details", "sleep infinity") + # stopped container, switches to showing all containers + + # Create a container without starting it + self.filter_containers("all") + container_name = "busybox-not-started" + b.wait_visible(f'#containers-images td[data-label="Image"]:contains("{IMG_BUSYBOX}")') + b.click(f'#containers-images tbody tr:contains("{IMG_BUSYBOX}") .ct-container-create') + b.wait_visible('div.pf-v5-c-modal-box header:contains("Create container")') + + b.wait_val("#create-image-image-select-typeahead", IMG_BUSYBOX_LATEST) + b.set_input_text("#run-image-dialog-name", container_name) + b.set_input_text("#run-image-dialog-command", "sh -c sleep infinity") + + b.click('.pf-v5-c-modal-box__footer #create-image-create-btn') + b.wait_not_present("div.pf-v5-c-modal-box") + + sha = self.execute(auth, "podman inspect --format '{{.Id}}' " + container_name).strip() + self.waitContainer(sha, auth, name=container_name, image=IMG_BUSYBOX, state=['Configured', 'Created']) + + self.filter_containers("running") + b.wait_not_in_text("#containers-containers", "busybox-not-started") + container_sel = f"#containers-images tbody tr:contains('{IMG_BUSYBOX}') + tr div.ct-listing-panel-body" + b.click(f"{container_sel} dt:contains('Used by') + dd button:contains('busybox-not-started')") + b.wait_js_cond(f"window.location.hash === '#{sha}'") + b.wait_val("#containers-containers-filter", "all") + b.wait_in_text("#containers-containers", "busybox-not-started") + # auto-expands container details + b.wait_in_text("#containers-containers tbody tr:contains('busybox-not-started') + tr", "sleep infinity") + + b.click(f'#containers-images tbody tr:contains("{IMG_ALPINE}") td.pf-v5-c-table__toggle button') + b.wait_in_text(f"#containers-images tbody tr:contains('{IMG_ALPINE}') td[data-label='Used by']", 'unused') + + b.set_input_text('#containers-filter', 'foobar') + b.wait_in_text('#containers-containers .pf-v5-c-empty-state', 'No containers that match the current filter') + b.wait_in_text('#containers-images .pf-v5-c-empty-state', 'No images that match the current filter') + b.set_input_text('#containers-filter', '') + + if not auth or not self.machine.ostree_image: # don't kill ws container + # Ubuntu 22.04 has old podman that does not know about --time + if m.image != 'ubuntu-2204': + # Remove all containers first as it is not possible to set --time 0 to rmi command + self.execute(auth, "podman rm --all --force --time 0") + self.execute(auth, "podman rmi -af") + b.wait_in_text('#containers-containers .pf-v5-c-empty-state', 'No containers') + b.set_val("#containers-containers-filter", "running") + b.wait_in_text('#containers-containers .pf-v5-c-empty-state', 'No running containers') + b.wait_in_text('#containers-images .pf-v5-c-empty-state', 'No images') + + def check_content(self, kind, present, not_present): + b = self.browser + for item in present: + b.wait_visible(f'#containers-{kind} tbody tr:first-child:contains({item})') + for item in not_present: + b.wait_not_present(f'#containers-{kind} tbody tr:first-child:contains({item})') + + def check_containers(self, present, not_present): + self.check_content("containers", present, not_present) + + def check_images(self, present, not_present): + self.check_content("images", present, not_present) + + def waitContainer(self, row_id, auth, name="", image="", cmd="", owner="", state=None, pod="no-pod"): + """Check the container with row_name has the expected values + "image" can be substring, "state" might be string or array of possible states, other are + checked for exact match. + """ + sel = "#containers-containers #table-" + pod + f" tbody tr[data-row-id=\"{row_id}{auth}\"]".lower() + b = self.browser + if name: + b.wait_text(sel + " .container-name", name) + if image: + b.wait_in_text(sel + " .container-block small:nth-child(2)", image) + if cmd: + b.wait_text(sel + " .container-block small:last-child", cmd) + if owner: + if owner == "system": + b.wait_text(sel + " td[data-label=Owner]", owner) + else: + b.wait_text(sel + " td[data-label=Owner]", "user: " + owner) + if state is not None: + if not isinstance(state, list): + state = [state] + b.wait(lambda: b.text(sel + " td[data-label=State]") in state) + + def filter_containers(self, value): + """Use dropdown menu in the header to filter containers""" + b = self.browser + b.set_val("#containers-containers-filter", value) + + def confirm_modal(self, text): + """Wait for the pop up window and click the button with text""" + b = self.browser + b.click(f".pf-v5-c-modal-box footer button:contains({text})") + b.wait_not_present(f".pf-v5-c-modal-box footer button:contains({text})") + + def testPruneUnusedImagesSystem(self): + self._testPruneUnusedImagesSystem(True) + + def testPruneUnusedImagesUser(self): + self._testPruneUnusedImagesSystem(False) + + @testlib.skipOstree("no root login available on ostree") + def testPruneUnusedImagesRoot(self): + self._testPruneUnusedImagesSystem(False, True) + + def _testPruneUnusedImagesSystem(self, auth, root=False): + b = self.browser + if root: + self.login_and_go("/podman", user="root", enable_root_login=True) + b.wait_visible("#app") + else: + self.login(auth) + + leftover_images = 1 + # cockpit-ws image + if self.machine.ostree_image and auth: + leftover_images += 1 + + # By default we have 3 unused images, start one. + self.execute(auth or root, f"podman run -d --name used_image --stop-timeout 0 {IMG_ALPINE} sh") + b.click("#image-actions-dropdown") + b.click("#prune-unused-images-button") + + if auth: + b.wait_js_func("ph_count_check", ".pf-v5-c-modal-box__body .pf-v5-c-list li", + (self.user_images_count + self.system_images_count) - leftover_images) + elif root: + b.wait_js_func("ph_count_check", ".pf-v5-c-modal-box__body .pf-v5-c-list li", + self.system_images_count - leftover_images) + else: + b.wait_js_func("ph_count_check", ".pf-v5-c-modal-box__body .pf-v5-c-list li", + self.user_images_count - leftover_images) + b.click(".pf-v5-c-modal-box button:contains(Prune)") + + # When being superuser, admin images are also removed + if auth: + self.waitNumImages(leftover_images) + checkImage(b, IMG_ALPINE, "system") + else: + self.waitNumImages(leftover_images) + # Two images removed, one in use kept + b.wait_not_present(f"#containers-images:contains('{IMG_BUSYBOX}')") + b.wait_not_present(f"#containers-images:contains('{IMG_REGISTRY}')") + b.wait_visible(f"#containers-images:contains('{IMG_ALPINE}')") + + # Prune button should now be disabled + b.click("#image-actions-dropdown") + b.wait_visible(".pf-m-disabled.pf-v5-c-menu__list-item:contains(Prune unused images)") + + def testPruneUnusedImagesSystemSelections(self): + """ Test the prune unused images selection options""" + b = self.browser + self.login(True) + + b.click("#image-actions-dropdown") + b.click("button:contains(Prune unused images)") + + # Deselect both + b.click("#deleteSystemImages") + b.click("#deleteUserImages") + b.wait_visible(".pf-v5-c-modal-box button:contains(Prune):disabled") + + # Admin / user images are selected + expected_images = self.user_images_count + self.system_images_count + if self.machine.ostree_image: + expected_images -= 1 + b.wait_js_func("ph_count_check", ".pf-v5-c-modal-box__body .pf-v5-c-list li", expected_images) + # Select user images + b.click("#deleteUserImages") + b.click(".pf-v5-c-modal-box button:contains(Prune)") + + # System images are left over + self.waitNumImages(self.system_images_count) + checkImage(b, IMG_ALPINE, "system") + checkImage(b, IMG_BUSYBOX, "system") + checkImage(b, IMG_REGISTRY, "system") + + # Pruning again, should delete all system images + b.click("#image-actions-dropdown") + b.click("button:contains(Prune unused images)") + b.wait_js_func("ph_count_check", ".pf-v5-c-modal-box__body .pf-v5-c-list li", + self.system_images_count - 1 if self.machine.ostree_image else self.system_images_count) + b.click(".pf-v5-c-modal-box button:contains(Prune)") + self.waitNumImages(1 if self.machine.ostree_image else 0) + + # Prune button should now be disabled + b.click("#image-actions-dropdown") + b.wait_visible(".pf-v5-c-menu__list-item.pf-m-disabled:contains(Prune unused images)") + + def testPruneUnusedContainersSystem(self): + self._testPruneUnusedContainersSystem(True) + + def testPruneUnusedContainersUser(self): + self._testPruneUnusedContainersSystem(False) + + def _testPruneUnusedContainersSystem(self, auth): + """Test the prune unused container image dialog""" + + b = self.browser + self.login(auth) + + # Create running and non-running containers + self.execute(auth, "podman pod create --name pod") + notrunninginpodId = self.execute(auth, f""" + podman run --name inpod --pod pod -tid {IMG_BUSYBOX} sh -c 'exit 1'""").strip() + runninginpodId = self.execute(auth, f""" + podman run --name inpodrunning --pod pod -tid {IMG_BUSYBOX} sh -c 'sleep infinity'""").strip() + + self.execute(auth, f"podman run --name notrunning -tid {IMG_BUSYBOX} sh -c 'exit 1'") + self.execute(auth, f"podman run --name containerrunning -tid {IMG_BUSYBOX} sh -c 'sleep infinity'") + + # Create containers for the opposite of what we are, admin or super admin + if auth: + self.execute(False, f"podman run --name adminnotrunning -tid {IMG_BUSYBOX} sh 'exit 1'") + b.wait(lambda: self.getContainerAttr("adminnotrunning", "State") in NOT_RUNNING) + self.execute(False, f"podman run --name adminrunning -tid {IMG_BUSYBOX} sh -c 'sleep infinity'") + b.wait(lambda: self.getContainerAttr("adminrunning", "State") == "Running") + + b.click("#containers-actions-dropdown") + b.click("button:contains(Prune unused containers)") + + if auth: + b.wait_in_text(".pf-v5-c-modal-box__body tbody:nth-of-type(1) td[data-label=Name]", "adminnotrunning") + b.wait_in_text(".pf-v5-c-modal-box__body tbody:nth-of-type(2) td[data-label=Name]", "notrunning") + else: + b.wait_in_text(".pf-v5-c-modal-box__body tbody td[data-label=Name]", "notrunning") + + b.click(".pf-v5-c-modal-box button:contains(Prune)") + b.wait_not_present(".pf-v5-c-modal-box__body") + + if auth: + self.waitContainerRow("notrunning", False) + self.waitContainerRow("adminnotrunning", False) + else: + self.waitContainerRow("notrunning", False) + + # Verify running containers still exists + self.waitContainerRow("containerrunning") + pods = [{"name": "inpod", "state": "Exited", "id": notrunninginpodId, + "image": IMG_BUSYBOX, "command": 'sh -c "exit 1"'}, + {"name": "inpodrunning", "state": "Running", "id": runninginpodId, + "image": IMG_BUSYBOX, "command": 'sh -c "sleep infinity"'}] + self.waitPodContainer("pod", pods, auth) + + def testCreateContainerValidation(self): + def validateField(groupSelector, value, errorMessage, resetValue=""): + b.set_input_text(f"{groupSelector} input", value) + b.wait_visible(".pf-v5-c-modal-box__footer #create-image-create-run-btn:not(:disabled)") + b.wait_in_text(f"{groupSelector} .pf-v5-c-helper-text__item-text", errorMessage) + b.wait_visible(".pf-v5-c-modal-box__footer #create-image-create-run-btn[aria-disabled=true]") + # Reset to acceptable value and verify the validation message is not present + b.set_input_text(f"{groupSelector} input", resetValue) + b.wait_not_present(f"{groupSelector} .pf-v5-c-helper-text__item-text") + b.wait_visible(".pf-v5-c-modal-box__footer #create-image-create-run-btn:not(:disabled)") + + # Test the validation errors + + # complaint about port conflict + self.allow_browser_errors("error: Container failed to be started:.*") + self.allow_browser_errors("No routable interface.*") + self.allow_browser_errors(".*ddress already in use.*5000.*") + b = self.browser + self.login(False) + container_name = 'portused' + + # Start a podman container which uses a port + self.execute(False, f"podman run -d -p 5000:5000 --name registry --stop-timeout 0 {IMG_REGISTRY}") + b.click("#containers-images button.pf-v5-c-expandable-section__toggle") + + b.wait_visible(f'#containers-images td[data-label="Image"]:contains("{IMG_BUSYBOX}")') + b.click(f'#containers-images tbody tr:contains("{IMG_BUSYBOX}") .ct-container-create') + b.wait_visible('div.pf-v5-c-modal-box header:contains("Create container")') + + validateField("#image-name-group", "registry", "Name already in use") + + # Switch to Integration tab + b.click("#pf-tab-1-create-image-dialog-tab-integration") + + # Test validation of port mapping + b.click('.publish-port-form .btn-add') + b.set_input_text("#run-image-dialog-publish-0-container-port-group input", "1") + validateField("#run-image-dialog-publish-0-ip-address-group", "abcd", "valid IP address") + validateField("#run-image-dialog-publish-0-host-port-group", "-1", "1 to 65535") + validateField("#run-image-dialog-publish-0-host-port-group", "99999", "1 to 65535") + validateField("#run-image-dialog-publish-0-container-port-group", "-1", "1 to 65535", resetValue="1") + validateField("#run-image-dialog-publish-0-container-port-group", "", "must not be empty", resetValue="1") + validateField("#run-image-dialog-publish-0-container-port-group", "99999", "1 to 65535", resetValue="1") + + # Test validation of volumes + b.click('.volume-form .btn-add') + b.set_input_text("#run-image-dialog-volume-0-container-path-group input", "/somepath") + validateField("#run-image-dialog-volume-0-container-path-group", "", "not be empty", resetValue="/somepath") + + # Test validation of environment variables + b.click('.env-form .btn-add') + b.set_input_text("#run-image-dialog-env-0-key-group input", "sometext") + validateField("#run-image-dialog-env-0-key-group", "", "must not be empty", resetValue="sometext") + + b.set_input_text("#run-image-dialog-name", container_name) + + # Port address is already in use + b.set_input_text('#run-image-dialog-publish-0-host-port', '5000') + b.set_input_text('#run-image-dialog-publish-0-container-port', '5000') + b.click('.pf-v5-c-modal-box__footer #create-image-create-run-btn') + # Can be "[aA]ddress" + b.wait_in_text(".pf-v5-c-alert", "ddress already in use") + + # Changing the port should allow creation of container + b.set_input_text('#run-image-dialog-publish-0-host-port', '5001') + b.click('.pf-v5-c-modal-box__footer #create-image-create-run-btn') + self.waitContainerRow(container_name) + + def _testHealthcheck(self, auth): + b = self.browser + + # Just drop user images so we can use simpler selectors + if auth: + self.execute(False, f"podman rmi {IMG_BUSYBOX}") + + self.login(auth) + + b.click("#containers-images button.pf-v5-c-expandable-section__toggle") + + b.wait_visible(f'#containers-images td[data-label="Image"]:contains("{IMG_BUSYBOX}")') + b.click(f'#containers-images tbody tr:contains("{IMG_BUSYBOX}") .ct-container-create') + b.wait_visible('div.pf-v5-c-modal-box header:contains("Create container")') + + b.set_input_text("#run-image-dialog-name", "healthy") + + b.click("#pf-tab-2-create-image-dialog-tab-healthcheck") + b.set_input_text('#run-image-dialog-healthcheck-command', 'true') + b.set_input_text('#run-image-healthcheck-interval input', '325') + b.set_input_text('#run-image-healthcheck-timeout input', '35') + b.set_input_text('#run-image-healthcheck-start-period input', '5') + b.click('#run-image-healthcheck-retries .pf-v5-c-input-group__item:nth-child(1) button') + b.wait_val("#run-image-healthcheck-retries input", 2) + if auth: + b.assert_pixels('.pf-v5-c-modal-box', "healthcheck-modal", skip_layouts=["rtl"]) + # Test that the healthcheck option is not available before podman 4.3 + if podman_version(self) < (4, 3, 0): + b.wait_not_present("#run-image-healthcheck-action") + b.click('.pf-v5-c-modal-box__footer #create-image-create-run-btn') + + self.waitContainerRow("healthy") + b.click("#containers-images button.pf-v5-c-expandable-section__toggle") + + healthy_sha = self.execute(auth, "podman inspect --format '{{.Id}}' healthy").strip() + self.waitContainer(healthy_sha, auth, state='RunningHealthy') + + self.toggleExpandedContainer("healthy") + b.click(".pf-m-expanded button:contains('Health check')") + + b.wait_in_text('#container-details-healthcheck dt:contains("Command") + dd', 'true') + b.wait_in_text('#container-details-healthcheck dt:contains("Interval") + dd', '325 seconds') + b.wait_in_text('#container-details-healthcheck dt:contains("Retries") + dd', '2') + b.wait_in_text('#container-details-healthcheck dt:contains("Timeout") + dd', '35 seconds') + b.wait_in_text('#container-details-healthcheck dt:contains("Start period") + dd', '5 seconds') + b.wait_not_present('#container-details-healthcheck dt:contains("Failing streak")') + if podman_version(self) >= (4, 3, 0): + b.wait_in_text('#container-details-healthcheck dt:contains("When unhealthy") + dd', 'No action') + + self.assertEqual(self.execute(auth, "podman inspect --format '{{.Config.Healthcheck}}' healthy").strip(), + "{[true] 5s 5m25s 35s 2}") + + # single successful health check + b.wait_in_text(".ct-listing-panel-body tbody tr", "Passed health run") + b.wait_visible(".ct-listing-panel-body tbody:nth-of-type(1) svg.green") + b.wait_not_present(".ct-listing-panel-body tbody:nth-of-type(2)") + + # Trigger run manually, adds one more healthy run + self.performContainerAction("healthy", "Run health check") + b.wait_visible(".ct-listing-panel-body tbody:nth-of-type(2) svg.green") + b.wait_not_present(".ct-listing-panel-body tbody:nth-of-type(3)") + + self.toggleExpandedContainer("healthy") + + self.execute(auth, f"podman run --name sick -dt --health-cmd false --health-interval 5s {IMG_BUSYBOX}") + self.waitContainerRow("sick") + unhealthy_sha = self.execute(auth, "podman inspect --format '{{.Id}}' sick").strip() + self.waitContainer(unhealthy_sha, auth, state='RunningUnhealthy') + # Unhealthy should be first + expected_ws = "" + if auth and self.machine.ostree_image: + expected_ws = "ws" + b.wait_collected_text("#containers-containers .container-name", "healthysick" + expected_ws) + + self.toggleExpandedContainer("sick") + b.click(".pf-m-expanded button:contains('Health check')") + b.wait_visible(".pf-m-expanded .ct-listing-panel-body tbody:nth-of-type(1)") + b.wait_visible(".pf-m-expanded .ct-listing-panel-body tbody:nth-of-type(4)") + b.wait_visible(".pf-m-expanded .ct-listing-panel-body tbody:nth-of-type(2) svg.red") + b.wait_visible('.pf-m-expanded #container-details-healthcheck dt:contains("Failing streak")') + failures = int(b.text('.pf-m-expanded #container-details-healthcheck dt:contains("Failing streak") + dd')) + self.assertGreater(failures, 3) + if auth: + b.wait_js_func("ph_count_check", ".pf-m-expanded table[aria-label=Logs] tbody tr", 5) + b.assert_pixels(".pf-m-expanded .pf-v5-c-table__expandable-row-content", + "healthcheck-details", + ignore=["thead", "#container-details-healthcheck dt:contains('Failing streak') + dd", + "td[data-label='Started at']"], + skip_layouts=["rtl"]) + + self.toggleExpandedContainer("sick") + b.click("#containers-images button.pf-v5-c-expandable-section__toggle") + + b.wait_visible('#containers-images td[data-label="Image"]:contains("busybox:latest")') + b.click('#containers-images tbody tr:contains("busybox:latest") .ct-container-create') + b.wait_visible('div.pf-v5-c-modal-box header:contains("Create container")') + + # Test the health check action, only supported in podman 4.3 and later. + # To test this we make a healthcheck which depends on a file, so when starting the + # container is healthy, after we remove the file the healthcheck should fail and our + # configured action should executed. + if podman_version(self) < (4, 3, 0): + return + + containername = "healthaction" + b.set_input_text("#run-image-dialog-name", containername) + b.set_input_text("#run-image-dialog-command", "/bin/sh -c 'echo 1 > /healthy && sleep infinity'") + + b.click("#pf-tab-2-create-image-dialog-tab-healthcheck") + b.set_input_text('#run-image-dialog-healthcheck-command', '/bin/test -f /healthy') + b.set_input_text('#run-image-healthcheck-interval input', '1') + b.set_input_text('#run-image-healthcheck-timeout input', '1') + b.click('#run-image-healthcheck-action-2') + b.click('.pf-v5-c-modal-box__footer #create-image-create-run-btn') + + self.waitContainerRow(containername) + self.toggleExpandedContainer(containername) + b.wait(lambda: self.getContainerAttr(containername, "State") == "RunningHealthy") + b.click(".pf-m-expanded button:contains('Health check')") + b.wait_in_text('.pf-m-expanded #container-details-healthcheck dt:contains("When unhealthy") + dd', + 'Force stop') + # Removing the file should kill the container + status = self.execute(auth, f"podman exec {containername} rm -f /healthy").strip() + b.wait(lambda: self.getContainerAttr(containername, + "State", "span:not(.ct-badge-container-unhealthy)") in NOT_RUNNING) + status = self.execute(auth, f"podman inspect --format '{{{{.State.Health.Status}}}}' {containername}").strip() + self.assertEqual(status, "unhealthy") + + def testHealthcheckSystem(self): + self._testHealthcheck(True) + + def testHealthcheckUser(self): + self._testHealthcheck(False) + + # Ubuntu 2204 lacks user systemd units + # https://github.com/containers/podman/commit/9312d458b4254b48e331d1ae40cb2f6d0fec9bd0 + @testlib.skipImage("podman-restart not available for user", "ubuntu-2204") + def testPodmanRestartEnabledUser(self): + self._testPodmanRestartEnabled(False) + + def testPodmanRestartEnabledSystem(self): + self._testPodmanRestartEnabled(True) + + def _testPodmanRestartEnabled(self, auth): + b = self.browser + if auth: + self.addCleanup(self.machine.execute, "systemctl disable podman-restart.service") + else: + self.addCleanup(self.machine.execute, "systemctl --user disable podman-restart.service") + self.machine.execute("loginctl enable-linger $(id -u admin)") + # HACK: verify that the file we watch exists + self.assertTrue(self.machine.execute(""" + if test -e /var/lib/systemd/linger/admin; then echo yes; fi + """).strip() != "") + self.addCleanup(self.machine.execute, "loginctl disable-linger $(id -u admin)") + + # Drop user images for easy selection + if auth: + self.execute(False, f"podman rmi {IMG_BUSYBOX}") + + self.login(auth) + b.click("#containers-images button.pf-v5-c-expandable-section__toggle") + + def create_container(name, policy=None): + b.wait_visible(f'#containers-images td[data-label="Image"]:contains("{IMG_BUSYBOX}")') + b.click(f'#containers-images tbody tr:contains("{IMG_BUSYBOX}") .ct-container-create') + b.wait_visible('div.pf-v5-c-modal-box header:contains("Create container")') + + b.set_input_text("#run-image-dialog-name", name) + if policy: + b.set_val("#run-image-dialog-restart-policy", "always") + b.click('.pf-v5-c-modal-box__footer #create-image-create-run-btn') + self.waitContainerRow(name) + + container_name = 'none' + create_container(container_name) + self.assertEqual(self.getRestartPolicy(auth, container_name), '{ 0}') + + container_name = 'restart' + create_container(container_name, 'always') + self.assertEqual(self.getRestartPolicy(auth, container_name), '{always 0}') + if auth: + podmanRestartEnabled = self.execute(True, "systemctl is-enabled podman-restart.service || true").strip() + else: + podmanRestartEnabled = self.execute(False, + "systemctl --user is-enabled podman-restart.service || true").strip() + self.assertEqual(podmanRestartEnabled, 'enabled') + + def _testCreateContainerInPod(self, auth): + b = self.browser + + container_name = 'containerinpod' + podname = "pod1" + self.execute(auth, f"podman pod create --infra=false --name={podname}") + + self.login(auth) + + self.filter_containers('all') + b.click(".create-container-in-pod") + + # the podname should be in the "Create container" header + self.assertIn(podname, b.text("#pf-modal-part-1")) + + b.set_input_text("#run-image-dialog-name", container_name) + b.set_input_text("#create-image-image-select-typeahead", IMG_BUSYBOX_LATEST) + b.click('button.pf-v5-c-toggle-group__button:contains("Local")') + b.click(f'button.pf-v5-c-select__menu-item:contains("{IMG_BUSYBOX_LATEST}")') + b.click('.pf-v5-c-modal-box__footer #create-image-create-run-btn') + b.wait_not_present("#run-image-dialog-name") + + container_sha = self.execute(auth, f"podman inspect --format '{{{{.Id}}}}' {container_name}").strip() + self.waitContainer(container_sha, auth, name=container_name, image=IMG_BUSYBOX, cmd='sh', + state='Running', owner="system" if auth else "admin", pod=podname) + + # Check that we correctly preselect owner + if auth: + self.execute(False, "podman pod create --infra=false --name=system_pod") + b.click("#table-system_pod .create-container-in-pod") + b.wait_visible("#run-image-dialog-owner-user:checked") + b.wait_visible("#run-image-dialog-owner-user:disabled") + b.wait_visible("#run-image-dialog-owner-system:disabled") + + def testCreateContainerInPodSystem(self): + self._testCreateContainerInPod(True) + + def testCreateContainerInPodUser(self): + self._testCreateContainerInPod(False) + + def testPauseResumeContainerSystem(self): + self._testPauseResumeContainer(True) + + def testPauseResumeContainerUser(self): + # rootless cgroupv1 containers do not support pausing + if not self.has_cgroupsV2: + return + self._testPauseResumeContainer(False) + + def _testPauseResumeContainer(self, auth): + b = self.browser + container_name = "pauseresume" + + self.execute(auth, f"podman run -dt --name {container_name} --stop-timeout 0 {IMG_ALPINE}") + self.login(auth) + + self.waitContainerRow(container_name) + self.toggleExpandedContainer(container_name) + b.wait_not_present(self.getContainerAction(container_name, 'Resume')) + self.performContainerAction(container_name, "Pause") + + # show all containers and check status + self.filter_containers('all') + + # Check that container details are not lost when the container is paused + b.click(".pf-m-expanded button:contains('Integration')") + b.wait_visible(f'#containers-containers tr:contains("{IMG_ALPINE}") dt:contains("Environment variables")') + + b.wait(lambda: self.getContainerAttr(container_name, "State") == "Paused") + b.wait_not_present(self.getContainerAction(container_name, 'Pause')) + self.performContainerAction(container_name, "Resume") + b.wait(lambda: self.getContainerAttr(container_name, "State") == "Running") + + def testRenameContainerSystem(self): + self._testRenameContainer(True) + + def testRenameContainerUser(self): + self._testRenameContainer(False) + + def _testRenameContainer(self, auth): + b = self.browser + container_name = "rename" + container_name_new = "rename-new" + + self.execute(auth, f"podman container create -t --name {container_name} {IMG_BUSYBOX}") + self.login(auth) + + self.filter_containers('all') + + self.waitContainerRow(container_name) + self.toggleExpandedContainer(container_name) + self.performContainerAction(container_name, "Rename") + + # the container name should be in the "Rename container" header + b.wait_in_text("#pf-modal-part-1", container_name) + b.set_input_text("#rename-dialog-container-name", "") + b.wait_in_text("#commit-dialog-image-name-helper", "Container name is required") + b.set_input_text("#rename-dialog-container-name", "banana???") + b.wait_in_text("#commit-dialog-image-name-helper", "Name can only contain letters, numbers") + + b.set_input_text("#rename-dialog-container-name", container_name_new) + b.click('#btn-rename-dialog-container') + b.wait_not_present("#rename-dialog-container-name") + + self.execute(auth, f"podman inspect --format '{{{{.Id}}}}' {container_name_new}").strip() + self.waitContainerRow(container_name_new) + + # rename using the enter key + self.toggleExpandedContainer(container_name_new) + self.performContainerAction(container_name_new, "Rename") + + container_name_new = "rename-new-enter" + b.set_input_text("#rename-dialog-container-name", "") + b.focus("#rename-dialog-container-name") + b.key_press("\r") # Simulate enter key + b.wait_in_text("#commit-dialog-image-name-helper", "Container name is required") + b.set_input_text("#rename-dialog-container-name", container_name_new) + b.focus("#rename-dialog-container-name") + b.key_press("\r") # Simulate enter key + b.wait_not_present("#rename-dialog-container-name") + + self.execute(auth, f"podman inspect --format '{{{{.Id}}}}' {container_name_new}").strip() + self.waitContainerRow(container_name_new) + + def testMultipleContainers(self): + self.login() + + # Create 31 containers + for i in range(31): + self.execute(True, f"podman run -dt --name container{i} --stop-timeout 0 {IMG_BUSYBOX}") + + self.waitContainerRow("container30") + + # Generic cleanup takes too long and timeouts, so remove these container manually one by one + for i in range(31): + self.execute(True, f"podman rm -f container{i}") + + def testSpecialContainers(self): + m = self.machine + b = self.browser + + toolbox_label = "com.github.containers.toolbox=true" + distrobox_label = "manager=distrobox" + + container_1_id = m.execute(f"podman run -d --name container_1 -l {toolbox_label} {IMG_BUSYBOX}").strip() + container_2_id = m.execute(f"podman run -d --name container_2 -l {distrobox_label} {IMG_BUSYBOX}").strip() + + self.login() + + self.waitContainerRow('container_1') + self.waitContainerRow('container_2') + + container_1_sel = f"#containers-containers tbody tr[data-row-id=\"{container_1_id}{'true'}\"]" + container_2_sel = f"#containers-containers tbody tr[data-row-id=\"{container_2_id}{'true'}\"]" + + b.wait_visible(container_1_sel + " .ct-badge-toolbox:contains('toolbox')") + b.wait_visible(container_2_sel + " .ct-badge-distrobox:contains('distrobox')") + + def testCreatePodSystem(self): + self._createPod(True) + + def testCreatePodUser(self): + self._createPod(False) + + def _createPod(self, auth): + b = self.browser + m = self.machine + pod_name = "testpod1" + + self.login(auth) + + b.click("#containers-containers-create-pod-btn") + b.set_input_text("#create-pod-dialog-name", "") + b.wait_visible(".pf-v5-c-modal-box__footer #create-pod-create-btn:disabled") + b.wait_in_text("#pod-name-group .pf-v5-c-helper-text__item-text", "Invalid characters") + + b.set_input_text("#create-pod-dialog-name", pod_name) + b.wait_visible(".pf-v5-c-modal-box__footer #create-pod-create-btn:not(:disabled)") + + b.click('.publish-port-form .btn-add') + b.set_input_text("#create-pod-dialog-publish-0-container-port-group input", "-1") + b.click(".pf-v5-c-modal-box__footer #create-pod-create-btn") + b.wait_in_text("#create-pod-dialog-publish-0-container-port-group .pf-v5-c-helper-text__item-text", + "1 to 65535") + b.click("#create-pod-dialog-publish-0-btn-close") + + if auth: + b.wait_visible("#create-pod-dialog-owner-system:checked") + else: + b.wait_not_present("#create-pod-dialog-owner-system") + + # Ports + b.click('.publish-port-form .btn-add') + b.set_input_text('#create-pod-dialog-publish-1-host-port', '6000') + b.set_input_text('#create-pod-dialog-publish-1-container-port', '5000') + b.click('.publish-port-form .btn-add') + b.set_input_text('#create-pod-dialog-publish-2-ip-address', '127.0.0.1') + b.set_input_text('#create-pod-dialog-publish-2-host-port', '6001') + b.set_input_text('#create-pod-dialog-publish-2-container-port', '5001') + b.set_val('#create-pod-dialog-publish-2-protocol', "udp") + b.click('.publish-port-form .btn-add') + b.set_input_text('#create-pod-dialog-publish-3-ip-address', '127.0.0.2') + b.set_input_text('#create-pod-dialog-publish-3-container-port', '9001') + + # Volumes + if self.machine.image not in ["ubuntu-2204"]: + b.click('.volume-form .btn-add') + rodir, rwdir = m.execute("mktemp; mktemp").split('\n')[:2] + m.execute(f"chown admin:admin {rodir}") + m.execute(f"chown admin:admin {rwdir}") + + if self.has_selinux: + b.set_val('#create-pod-dialog-volume-0-selinux', "z") + else: + b.wait_not_present('#create-pod-dialog-volume-0-selinux') + + b.set_file_autocomplete_val("#create-pod-dialog-volume-0 .pf-v5-c-select", rodir) + b.set_input_text('#create-pod-dialog-volume-0-container-path', '/tmp/ro') + b.click('.volume-form .btn-add') + + b.set_file_autocomplete_val("#create-pod-dialog-volume-1 .pf-v5-c-select", rwdir) + b.set_input_text('#create-pod-dialog-volume-1-container-path', '/tmp/rw') + + b.click("#create-pod-create-btn") + b.set_val("#containers-containers-filter", "all") + self.waitPodContainer(pod_name, []) + + container_name = 'test-pod-1-system' if auth else 'test-pod-1' + cmd = f"podman run -d --pod {pod_name} --name {container_name} --stop-timeout 0 {IMG_ALPINE} sleep 500" + containerId = self.execute(auth, cmd).strip() + self.waitPodContainer(pod_name, + [{"name": container_name, "image": IMG_ALPINE, + "command": "sleep 500", "state": "Running", "id": containerId}], auth) + + self.toggleExpandedContainer(container_name) + b.click(".pf-m-expanded button:contains('Integration')") + if self.machine.image not in ["ubuntu-2204"]: + b.wait_in_text('#containers-containers tr:contains("alpine") dt:contains("Volumes") + dd', + f"{rodir} \u2194 /tmp/ro") + b.wait_in_text('#containers-containers tr:contains("alpine") dt:contains("Volumes") + dd', + f"{rwdir} \u2194 /tmp/rw") + + b.wait_in_text('#containers-containers tr:contains("alpine") dt:contains("Ports") + dd', + '0.0.0.0:6000 \u2192 5000/tcp') + b.wait_in_text('#containers-containers tr:contains("alpine") dt:contains("Ports") + dd', + '127.0.0.1:6001 \u2192 5001/udp') + b.wait_in_text('#containers-containers tr:contains("alpine") dt:contains("Ports") + dd', + ' \u2192 9001/tcp') + + # Create pod as admin + if auth: + pod_name = 'testpod2' + b.click("#containers-containers-create-pod-btn") + b.set_input_text("#create-pod-dialog-name", pod_name) + b.click("#create-pod-dialog-owner-user") + b.click("#create-pod-create-btn") + + b.set_val("#containers-containers-filter", "all") + self.waitPodContainer(pod_name, []) + + @testlib.skipImage("passthrough log driver not supported", "ubuntu-2204") + def testLogErrors(self): + b = self.browser + container_name = "logissue" + self.login() + + self.execute(False, + f"podman run --log-driver=passthrough --name {container_name} -d {IMG_ALPINE} false " + kwargs.get('trace', cmd)) + + # avoid having to write the "client." prefix everywhere + cmd = "client." + cmd + res = self.command(cmd) + if trace: + if res and "result" in res: + print("<- " + repr(res["result"])) + else: + print("<- " + repr(res)) + return res + + def command(self, cmd): + if not self._driver: + self.start() + self._driver.stdin.write(cmd.encode("UTF-8")) + self._driver.stdin.write(b"\n") + self._driver.stdin.flush() + line = self._driver.stdout.readline().decode("UTF-8") + if not line: + self.kill() + raise RuntimeError("CDP broken") + try: + res = json.loads(line) + except ValueError: + print(line.strip()) + raise + + if "error" in res: + if self.trace: + print("<- raise %s" % str(res["error"])) + raise RuntimeError(res["error"]) + return res["result"] + + def claim_port(self, port): + f = None + try: + f = open(os.path.join(tempfile.gettempdir(), ".cdp-%i.lock" % port), "w") + fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB) + self._cdp_port_lockfile = f + return True + except (IOError, OSError): + if f: + f.close() + return False + + def find_cdp_port(self): + """Find an unused port and claim it through lock file""" + + for _ in range(100): + # don't use the default CDP port 9222 to avoid interfering with running browsers + port = random.randint(9223, 10222) + if self.claim_port(port): + return port + + raise RuntimeError("unable to find free port") + + def start(self): + environ = os.environ.copy() + if self.lang: + environ["LC_ALL"] = self.lang + self.cur_frame = None + + # allow attaching to external browser + cdp_port = None + if "TEST_CDP_PORT" in os.environ: + p = int(os.environ["TEST_CDP_PORT"]) + if self.claim_port(p): + # can fail when a test starts multiple browsers; only show the first one + cdp_port = p + + if not cdp_port: + # start browser on a new port + cdp_port = self.find_cdp_port() + self._browser_home = tempfile.mkdtemp() + environ = os.environ.copy() + environ["HOME"] = self._browser_home + environ["LC_ALL"] = "C.UTF-8" + # this might be set for the tests themselves, but we must isolate caching between tests + try: + del environ["XDG_CACHE_HOME"] + except KeyError: + pass + + cmd = self.browser.cmd(cdp_port, environ, self.show_browser, + self._browser_home, self.download_dir) + + # sandboxing does not work in Docker container + self._browser = subprocess.Popen( + cmd, env=environ, close_fds=True, + preexec_fn=lambda: resource.setrlimit(resource.RLIMIT_CORE, (0, 0))) + if self.verbose: + sys.stderr.write("Started %s (pid %i) on port %i\n" % (cmd[0], self._browser.pid, cdp_port)) + + # wait for CDP to be up and have at least one target + for _ in range(120): + try: + res = urllib.request.urlopen(f"http://127.0.0.1:{cdp_port}/json/list", timeout=5) + if res.getcode() == 200 and json.loads(res.read()): + break + except URLError: + pass + time.sleep(0.5) + else: + raise RuntimeError('timed out waiting for browser to start') + + # now start the driver + if self.trace: + # enable frame/execution context debugging if tracing is on + environ["TEST_CDP_DEBUG"] = "1" + + self._driver = subprocess.Popen([self.browser.CDP_DRIVER_FILENAME, str(cdp_port)], + env=environ, + stdout=subprocess.PIPE, + stdin=subprocess.PIPE, + close_fds=True) + self.valid = True + + for inject in self.inject_helpers: + with open(inject) as f: + src = f.read() + # HACK: injecting sizzle fails on missing `document` in assert() + src = src.replace('function assert( fn ) {', 'function assert( fn ) { if (true) return true; else ') + # HACK: sizzle tracks document and when we switch frames, it sees the old document + # although we execute it in different context. + src = src.replace('context = context || document;', 'context = context || window.document;') + self.invoke("Page.addScriptToEvaluateOnNewDocument", source=src, no_trace=True) + + if self.start_profile: + self.invoke("Profiler.enable") + self.invoke("Profiler.startPreciseCoverage", callCount=False, detailed=True) + + def kill(self): + self.valid = False + self.cur_frame = None + if self._driver: + self._driver.stdin.close() + self._driver.wait() + self._driver = None + + shutil.rmtree(self.download_dir, ignore_errors=True) + + if self._browser: + if self.verbose: + sys.stderr.write("Killing browser (pid %i)\n" % self._browser.pid) + try: + self._browser.terminate() + except OSError: + pass # ignore if it crashed for some reason + self._browser.wait() + self._browser = None + shutil.rmtree(self._browser_home, ignore_errors=True) + os.remove(self._cdp_port_lockfile.name) + self._cdp_port_lockfile.close() + + def set_frame(self, frame): + self.cur_frame = frame + if self.trace: + print("-> switch to frame %s" % frame) + + def get_js_log(self): + """Return the current javascript console log""" + + if self.valid: + # needs to be wrapped in Promise + messages = self.command("Promise.resolve(messages)") + return ["%s: %s" % tuple(m) for m in messages] + return [] + + def read_log(self): + """Returns an iterator that produces log messages one by one. + + Blocks if there are no new messages right now.""" + + if not self.valid: + yield [] + return + + while True: + messages = self.command("waitLog()") + for m in messages: + yield m diff --git a/test/common/chromium-cdp-driver.js b/test/common/chromium-cdp-driver.js new file mode 100755 index 0000000..0d7a597 --- /dev/null +++ b/test/common/chromium-cdp-driver.js @@ -0,0 +1,332 @@ +#!/usr/bin/env node + +/* + * This file is part of Cockpit. + * + * Copyright (C) 2017 Red Hat, Inc. + * + * Cockpit is free software; you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation; either version 2.1 of the License, or + * (at your option) any later version. + * + * Cockpit is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with Cockpit; If not, see . + */ + +/* chromium-cdp-driver -- A command-line JSON input/output wrapper around + * chrome-remote-interface (Chrome Debug Protocol). + * See https://chromedevtools.github.io/devtools-protocol/ + * This needs support for protocol version 1.3. + * + * Set $TEST_CDP_DEBUG environment variable to enable additional + * frame/execution context debugging. + */ + +import * as readline from 'node:readline/promises'; +import CDP from 'chrome-remote-interface'; + +let enable_debug = false; + +function debug(msg) { + if (enable_debug) + process.stderr.write("CDP: " + msg + "\n"); +} + +/** + * Format response to the client + */ + +function fail(err) { + if (typeof err === 'undefined') + err = null; + process.stdout.write(JSON.stringify({ error: err }) + '\n'); +} + +function success(result) { + if (typeof result === 'undefined') + result = null; + process.stdout.write(JSON.stringify({ result }) + '\n'); +} + +/** + * Record console.*() calls and Log messages so that we can forward them to + * stderr and dump them on test failure + */ +const messages = []; +let logPromiseResolver; +let nReportedLogMessages = 0; +const unhandledExceptions = []; + +function clearExceptions() { + unhandledExceptions.length = 0; + return Promise.resolve(); +} + +function stringifyConsoleArg(arg) { + try { + if (arg.type === 'string') + return arg.value; + if (arg.type === 'number') + return arg.value; + if (arg.type === 'undefined') + return "undefined"; + if (arg.value === null) + return "null"; + if (arg.type === 'object' && arg.preview?.properties) { + const obj = {}; + arg.preview.properties.forEach(prop => { + obj[prop.name] = prop.value.toString(); + }); + return JSON.stringify(obj); + } + return JSON.stringify(arg); + } catch (error) { + return "[error stringifying argument: " + error.toString() + "]"; + } +} + +function setupLogging(client) { + client.Runtime.enable(); + + client.Runtime.consoleAPICalled(info => { + const msg = info.args.map(stringifyConsoleArg).join(" "); + messages.push([info.type, msg]); + process.stderr.write("> " + info.type + ": " + msg + "\n"); + + resolveLogPromise(); + }); + + client.Runtime.exceptionThrown(info => { + const details = info.exceptionDetails; + // don't log test timeouts, they already get handled + if (details.exception && details.exception.className === "PhWaitCondTimeout") + return; + + process.stderr.write(details.description || JSON.stringify(details) + "\n"); + + unhandledExceptions.push(details.exception.message || + details.exception.description || + details.exception.value || + JSON.stringify(details.exception)); + }); + + client.Log.enable(); + client.Log.entryAdded(entry => { + const msg = entry.entry; + + messages.push(["cdp", msg]); + /* Ignore authentication failure log lines that don't denote failures */ + if (!(msg.url || "").endsWith("/login") || (msg.text || "").indexOf("401") === -1) { + process.stderr.write("CDP: " + JSON.stringify(msg) + "\n"); + } + resolveLogPromise(); + }); +} + +/** + * Resolve the log promise created with waitLog(). + */ +function resolveLogPromise() { + if (logPromiseResolver) { + logPromiseResolver(messages.slice(nReportedLogMessages)); + nReportedLogMessages = messages.length; + logPromiseResolver = undefined; + } +} + +/** + * Returns a promise that resolves when log messages are available. If there + * are already some unreported ones in the global messages variable, resolves + * immediately. + * + * Only one such promise can be active at a given time. Once the promise is + * resolved, this function can be called again to wait for further messages. + */ +function waitLog() { // eslint-disable-line no-unused-vars + console.assert(logPromiseResolver === undefined); + + return new Promise((resolve, reject) => { + logPromiseResolver = resolve; + + if (nReportedLogMessages < messages.length) + resolveLogPromise(); + }); +} + +/** + * Frame tracking + * + * For tests to be able to select the current frame (by its name) and make + * subsequent queries apply to that, we need to track frame name → frameId → + * executionContextId. Frame and context IDs can even change through page + * operations (e. g. in systemd/logs.js when reporting a crash is complete), + * so we also need a helper function to explicitly wait for a particular frame + * to load. This is very laborious, see this issue for discussing improvements: + * https://github.com/ChromeDevTools/devtools-protocol/issues/72 + */ +const frameIdToContextId = {}; +const frameNameToFrameId = {}; + +let pageLoadHandler = null; + +function setupFrameTracking(client) { + client.Page.enable(); + + // map frame names to frame IDs; root frame has no name, no need to track that + client.Page.frameNavigated(info => { + debug("frameNavigated " + JSON.stringify(info)); + frameNameToFrameId[info.frame.name || "cockpit1"] = info.frame.id; + }); + + client.Page.loadEventFired(() => { + if (pageLoadHandler) { + debug("loadEventFired, resolving pageLoadHandler"); + pageLoadHandler(); + } + }); + + // track execution contexts so that we can map between context and frame IDs + client.Runtime.executionContextCreated(info => { + debug("executionContextCreated " + JSON.stringify(info)); + frameIdToContextId[info.context.auxData.frameId] = info.context.id; + }); + + client.Runtime.executionContextDestroyed(info => { + debug("executionContextDestroyed " + info.executionContextId); + for (const frameId in frameIdToContextId) { + if (frameIdToContextId[frameId] == info.executionContextId) { + delete frameIdToContextId[frameId]; + break; + } + } + }); +} + +function setupLocalFunctions(client) { + client.waitPageLoad = (args) => new Promise((resolve, reject) => { + const timeout = setTimeout(() => { + pageLoadHandler = null; + reject("Timeout waiting for page load"); // eslint-disable-line prefer-promise-reject-errors + }, (args.timeout ?? 15) * 1000); + pageLoadHandler = () => { + clearTimeout(timeout); + pageLoadHandler = null; + resolve({}); + }; + }); + + client.reloadPageAndWait = (args) => new Promise((resolve, reject) => { + pageLoadHandler = () => { pageLoadHandler = null; resolve({}) }; + client.Page.reload(args); + }); + + async function setCSS({ text, frame }) { + await client.DOM.enable(); + await client.CSS.enable(); + const id = (await client.CSS.createStyleSheet({ frameId: frameNameToFrameId[frame] })).styleSheetId; + await client.CSS.setStyleSheetText({ + styleSheetId: id, + text + }); + } + + client.setCSS = setCSS; +} + +// helper functions for testlib.py which are too unwieldy to be poked in from Python + +// eslint-disable-next-line no-unused-vars +const getFrameExecId = frame => frameIdToContextId[frameNameToFrameId[frame ?? "cockpit1"]]; + +/** + * SSL handling + */ + +// secure by default; tests can override to "continue" +// https://chromedevtools.github.io/devtools-protocol/1-3/Security/#type-CertificateErrorAction +let ssl_bad_certificate_action = "cancel"; + +/** + * Change what happens when the browser opens a page with an invalid SSL certificate. + * Defaults to "cancel", can be set to "continue". + */ +function setSSLBadCertificateAction(action) { // eslint-disable-line no-unused-vars + ssl_bad_certificate_action = action; + return Promise.resolve(); +} + +function setupSSLCertHandling(client) { + client.Security.enable(); + + client.Security.setOverrideCertificateErrors({ override: true }); + client.Security.certificateError(info => { + process.stderr.write(`CDP: Security.certificateError ${JSON.stringify(info)}; action: ${ssl_bad_certificate_action}\n`); + client.Security.handleCertificateError({ eventId: info.eventId, action: ssl_bad_certificate_action }) + .catch(ex => { + // some race condition in Chromium, ok if the event is already gone + if (ex.response && ex.response.message && ex.response.message.indexOf("Unknown event id") >= 0) + debug(`setupSSLCertHandling for event ${info.eventId} failed, ignoring: ${JSON.stringify(ex.response)}`); + else + throw ex; + }); + }); +} + +/** + * Main input/process loop + * + * Read one line with a JS expression, eval() it, and respond with the result: + * success + * fail + * EOF shuts down the client. + */ +async function main() { + process.stdin.setEncoding('utf8'); + + if (process.env.TEST_CDP_DEBUG) + enable_debug = true; + + const options = { }; + if (process.argv.length >= 3) { + options.port = parseInt(process.argv[2]); + if (!options.port) { + process.stderr.write("Usage: chromium-cdp-driver.js [port]\n"); + process.exit(1); + } + } + + const target = await CDP.New(options); + target.port = options.port; + const client = await CDP({ target }); + setupLogging(client); + setupFrameTracking(client); + setupSSLCertHandling(client); + setupLocalFunctions(client); + + for await (const command of readline.createInterface(process.stdin)) { + try { + const reply = await eval(command); // eslint-disable-line no-eval + if (unhandledExceptions.length === 0) { + success(reply); + } else { + const message = unhandledExceptions[0]; + fail(message.split("\n")[0]); + clearExceptions(); + } + } catch (err) { + fail(err); + } + } + await CDP.Close(target); +} + +main().catch(err => { + console.error(err); + process.exit(1); +}); diff --git a/test/common/firefox-cdp-driver.js b/test/common/firefox-cdp-driver.js new file mode 100755 index 0000000..f5b2bf8 --- /dev/null +++ b/test/common/firefox-cdp-driver.js @@ -0,0 +1,392 @@ +#!/usr/bin/env node + +/* + * This file is part of Cockpit. + * + * Copyright (C) 2019 Red Hat, Inc. + * + * Cockpit is free software; you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation; either version 2.1 of the License, or + * (at your option) any later version. + * + * Cockpit is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with Cockpit; If not, see . + */ + +/* firefox-cdp-driver -- A command-line JSON input/output wrapper around + * chrome-remote-interface (Chrome Debug Protocol). + * See https://chromedevtools.github.io/devtools-protocol/ + * This needs support for protocol version 1.3. + * + * Set $TEST_CDP_DEBUG environment variable to enable additional + * frame/execution context debugging. + */ + +import * as readline from 'readline'; +import CDP from 'chrome-remote-interface'; + +let enable_debug = false; + +function debug(msg) { + if (enable_debug) + process.stderr.write("CDP: " + msg + "\n"); +} + +/** + * Format response to the client + */ + +function fatal() { + console.error.apply(console.error, arguments); + process.exit(1); +} + +// We keep sequence numbers so that we never get the protocol out of +// synch with re-ordered or duplicate replies. This only matters for +// duplicate replies due to destroyed contexts, but that is already so +// hairy that this big hammer seems necessary. + +let cur_cmd_seq = 0; +let next_reply_seq = 1; + +function fail(seq, err) { + if (seq != next_reply_seq) + return; + next_reply_seq++; + + if (typeof err === 'undefined') + err = null; + process.stdout.write(JSON.stringify({ error: err }) + '\n'); +} + +function success(seq, result) { + if (seq != next_reply_seq) + return; + next_reply_seq++; + + if (typeof result === 'undefined') + result = null; + process.stdout.write(JSON.stringify({ result }) + '\n'); +} + +/** + * Record console.*() calls and Log messages so that we can forward them to + * stderr and dump them on test failure + */ +const messages = []; +let logPromiseResolver; +let nReportedLogMessages = 0; +const unhandledExceptions = []; + +function clearExceptions() { + unhandledExceptions.length = 0; + return Promise.resolve(); +} + +function stringifyConsoleArg(arg) { + if (arg.type === 'string') + return arg.value; + if (arg.type === 'object') + return JSON.stringify(arg.value); + return JSON.stringify(arg); +} + +function setupLogging(client) { + client.Runtime.enable(); + + client.Runtime.consoleAPICalled(info => { + const msg = info.args.map(stringifyConsoleArg).join(" "); + messages.push([info.type, msg]); + process.stderr.write("> " + info.type + ": " + msg + "\n"); + + resolveLogPromise(); + }); + + function processException(info) { + let details = info.exceptionDetails; + if (details.exception) + details = details.exception; + + // don't log test timeouts, they already get handled + if (details.className === "PhWaitCondTimeout") + return; + + process.stderr.write(details.description || details.text || JSON.stringify(details) + "\n"); + + unhandledExceptions.push(details.message || + details.description || + details.value || + JSON.stringify(details)); + } + + client.Runtime.exceptionThrown(info => processException(info)); + + client.Log.enable(); + client.Log.entryAdded(entry => { + // HACK: Firefox does not implement `Runtime.exceptionThrown` but logs it + // Lets parse it to have at least some basic check that code did not throw + // exception + // https://bugzilla.mozilla.org/show_bug.cgi?id=1549528 + + const msg = entry.entry; + let text = msg.text; + if (typeof text !== "string") + if (text[0] && typeof text[0] === "string") + text = text[0]; + + if (msg.stackTrace !== undefined && + typeof text === "string" && + text.indexOf("Error: ") !== -1) { + const trace = text.split(": ", 1); + processException({ + exceptionDetails: { + exception: { + className: trace[0], + message: trace.length > 1 ? trace[1] : "", + stacktrace: msg.stackTrace, + entry: msg, + }, + } + }); + } else { + messages.push(["cdp", msg]); + /* Ignore authentication failure log lines that don't denote failures */ + if (!(msg.url || "").endsWith("/login") || (text || "").indexOf("401") === -1) { + process.stderr.write("CDP: " + JSON.stringify(msg) + "\n"); + } + resolveLogPromise(); + } + }); +} + +/** + * Resolve the log promise created with waitLog(). + */ +function resolveLogPromise() { + if (logPromiseResolver) { + logPromiseResolver(messages.slice(nReportedLogMessages)); + nReportedLogMessages = messages.length; + logPromiseResolver = undefined; + } +} + +/** + * Returns a promise that resolves when log messages are available. If there + * are already some unreported ones in the global messages variable, resolves + * immediately. + * + * Only one such promise can be active at a given time. Once the promise is + * resolved, this function can be called again to wait for further messages. + */ +function waitLog() { // eslint-disable-line no-unused-vars + console.assert(logPromiseResolver === undefined); + + return new Promise((resolve, reject) => { + logPromiseResolver = resolve; + + if (nReportedLogMessages < messages.length) + resolveLogPromise(); + }); +} + +/** + * Frame tracking + * + * For tests to be able to select the current frame (by its name) and make + * subsequent queries apply to that, we need to track frame name → frameId → + * executionContextId. Frame and context IDs can even change through page + * operations (e. g. in systemd/logs.js when reporting a crash is complete), + * so we also need a helper function to explicitly wait for a particular frame + * to load. This is very laborious, see this issue for discussing improvements: + * https://github.com/ChromeDevTools/devtools-protocol/issues/72 + */ +const scriptsOnNewContext = []; +const frameIdToContextId = {}; +const frameNameToFrameId = {}; + +let pageLoadHandler = null; +let currentExecId = null; + +function setupFrameTracking(client) { + client.Page.enable(); + + // map frame names to frame IDs; root frame has no name, no need to track that + client.Page.frameNavigated(info => { + if (info.frame?.url?.startsWith("about:")) { + debug("frameNavigated: ignoring about: frame " + JSON.stringify(info)); + return; + } + debug("frameNavigated " + JSON.stringify(info)); + frameNameToFrameId[info.frame.name || "cockpit1"] = info.frame.id; + }); + + client.Page.loadEventFired(() => { + if (pageLoadHandler) { + debug("loadEventFired, resolving pageLoadHandler"); + pageLoadHandler(); + } + }); + + // track execution contexts so that we can map between context and frame IDs + client.Runtime.executionContextCreated(info => { + debug("executionContextCreated " + JSON.stringify(info)); + frameIdToContextId[info.context.auxData.frameId] = info.context.id; + scriptsOnNewContext.forEach(s => { + client.Runtime.evaluate({ expression: s, contextId: info.context.id }) + .catch(ex => { + // race condition with short-lived frames -- OK if the frame is already gone + if (ex.response && ex.response.message && ex.response.message.indexOf("Cannot find context") >= 0) + debug(`scriptsOnNewContext for context ${info.context.id} failed, ignoring: ${JSON.stringify(ex.response)}`); + else + throw ex; + }); + }); + }); + + client.Runtime.executionContextDestroyed(info => { + debug("executionContextDestroyed " + info.executionContextId); + for (const frameId in frameIdToContextId) { + if (frameIdToContextId[frameId] == info.executionContextId) { + delete frameIdToContextId[frameId]; + break; + } + } + + // Firefox does not report an error when the execution context + // of a Runtime.evaluate call gets destroyed. It will never + // ever resolve or be rejected. So let's provide the failure + // reply from here. + // + // However, if the timing is just right, the context gets + // destroyed before Runtime.evaluate has started the real + // processing, and in that case it will return an error. Then + // we would send the reply here, and would also send the + // error. This would drive the protocol out of synch. Also, our driver + // might immediately send more commands after seeing the first reply, + // and the unwanted second reply might be triggered in the middle of one + // of the next commands. To reliably suppress the second reply we have + // the pretty general sequence number checks. + // + if (info.executionContextId == currentExecId) { + currentExecId = null; + fail(cur_cmd_seq, { response: { message: "Execution context was destroyed." } }); + } + }); +} + +function setupLocalFunctions(client) { + client.waitPageLoad = (args) => new Promise((resolve, reject) => { + const timeout = setTimeout(() => { + pageLoadHandler = null; + reject("Timeout waiting for page load"); // eslint-disable-line prefer-promise-reject-errors + }, 15000); + pageLoadHandler = () => { + clearTimeout(timeout); + pageLoadHandler = null; + resolve({}); + }; + }); + + client.reloadPageAndWait = (args) => new Promise((resolve, reject) => { + pageLoadHandler = () => { pageLoadHandler = null; resolve({}) }; + client.Page.reload(args); + }); +} + +// helper functions for testlib.py which are too unwieldy to be poked in from Python +function getFrameExecId(frame) { // eslint-disable-line no-unused-vars + const frameId = frameNameToFrameId[frame || "cockpit1"]; + const execId = frameIdToContextId[frameId]; + if (execId !== undefined) + currentExecId = execId; + else + debug(`WARNING: getFrameExecId: frame ${frame} ID ${frameId} has no known execution context`); + return execId; +} + +/** + * Main input/process loop + * + * Read one line with a JS expression, eval() it, and respond with the result: + * success + * fail + * EOF shuts down the client. + */ +process.stdin.setEncoding('utf8'); + +if (process.env.TEST_CDP_DEBUG) + enable_debug = true; + +const options = { }; +if (process.argv.length >= 3) { + options.port = parseInt(process.argv[2]); + if (!options.port) { + process.stderr.write("Usage: firefox-cdp-driver.js [port]\n"); + process.exit(1); + } +} + +// HACK: `addScriptToEvaluateOnNewDocument` is not implemented in Firefox +// thus save all scripts in array and on each new context just execute these +// scripts in them +// https://bugzilla.mozilla.org/show_bug.cgi?id=1549465 +function addScriptToEvaluateOnNewDocument(script) { // eslint-disable-line no-unused-vars + return new Promise((resolve, reject) => { + scriptsOnNewContext.push(script.source); + resolve(); + }); +} + +// This should work on different targets (meaning tabs) +// CDP takes {target:target} so we can pick target +// Problem is that CDP.New() which creates new target works only for chrome/ium +// But we should be able to use CPD.List to list all targets and then pick one +// Firefox just gives them ascending numbers, so we can pick the one with highest number +// and if we feel fancy we can check that url is `about:newtab`. +// That still though does not create new tab - but we can just call `firefox about:blank` +// from cdline and since firefox would open it in the same browser, it should work. +// This would work just fine in CI (as there would be only one browser) but on our machines it may +// pick a wrong window (no idea if they can be somehow distinguish and execute it in a specific +// one). But I guess we can live with it (and it seems it picks the last opened window anyway, +// so having your own browser running should not interfere) +// +// Just calling executable to open another tab in the same browser works also for chromium, so +// should be fine +CDP(options) + .then(client => { + setupLogging(client); + setupFrameTracking(client); + setupLocalFunctions(client); + // TODO: Security handling not yet supported in Firefox + + readline.createInterface(process.stdin) + .on('line', command => { + // HACKS: See description of related functions + if (command.startsWith("client.Page.addScriptToEvaluateOnNewDocument")) + command = command.substring(12); + + // run the command + const seq = ++cur_cmd_seq; + eval(command).then(reply => { // eslint-disable-line no-eval + currentExecId = null; + if (unhandledExceptions.length === 0) { + success(seq, reply); + } else { + const message = unhandledExceptions[0]; + fail(seq, message.split("\n")[0]); + clearExceptions(); + } + }, err => { + currentExecId = null; + fail(seq, err); + }); + }) + .on('close', () => process.exit(0)); + }) + .catch(fatal); diff --git a/test/common/git-utils.sh b/test/common/git-utils.sh new file mode 100644 index 0000000..dc0767d --- /dev/null +++ b/test/common/git-utils.sh @@ -0,0 +1,150 @@ +# shellcheck shell=sh +# doesn't do anything on its own. must be sourced. + +# The script which sources this script must set the following variables: +# GITHUB_REPO = the relative repo name of the submodule on github +# SUBDIR = the location in the working tree where the submodule goes +# We also expect `set -eu`, but set them ourselves for shellcheck. +set -eu +[ -n "${GITHUB_REPO}" ] +[ -n "${SUBDIR}" ] + +# Set by git-rebase for spawned actions +unset GIT_DIR GIT_EXEC_PATH GIT_PREFIX GIT_REFLOG_ACTION GIT_WORK_TREE + +GITHUB_BASE="${GITHUB_BASE:-cockpit-project/cockpit}" +GITHUB_REPOSITORY="${GITHUB_BASE%/*}/${GITHUB_REPO}" +HTTPS_REMOTE="https://github.com/${GITHUB_REPOSITORY}" +# shellcheck disable=SC2034 # used in other scripts +SSH_REMOTE="git@github.com:${GITHUB_REPOSITORY}" + +CACHE_DIR="${XDG_CACHE_HOME-${HOME}/.cache}/cockpit-dev/${GITHUB_REPOSITORY}.git" + +if [ "${V-}" = 0 ]; then + message() { printf " %-8s %s\n" "$1" "$2" >&2; } + quiet='--quiet' +else + message() { :; } + quiet='' +fi + +init_cache() { + if [ ! -d "${CACHE_DIR}" ]; then + message INIT "${CACHE_DIR}" + mkdir -p "${CACHE_DIR}" + git init --bare --template='' ${quiet} "${CACHE_DIR}" + git --git-dir "${CACHE_DIR}" remote add origin "${HTTPS_REMOTE}" + fi +} + +# runs a git command on the cache dir +git_cache() { + init_cache + git --git-dir "${CACHE_DIR}" "$@" +} + +# reads the named gitlink from the current state of the index +# returns (ie: prints) a 40-character commit ID +get_index_gitlink() { + if ! git ls-files -s "$1" | grep -E -o '\<[[:xdigit:]]{40}\>'; then + echo "*** couldn't read gitlink for file $1 from the index" >&2 + exit 1 + fi +} + +# This checks if the given argument "$1" (already) exists in the repository +# we use git rev-list --objects to to avoid problems with incomplete fetches: +# we want to make sure the complete commit is there +check_ref() { + git_cache rev-list --quiet --objects "$1" -- 2>/dev/null +} + +# Fetch a specific commit ID into the cache +# Either we have this commit available locally (in which case this function +# does nothing), or we need to fetch it. There's no chance that the object +# changed on the server, because we define it by its checksum. +fetch_sha_to_cache() { + sha="$1" + + # No "offline mode" here: we either have the commit, or we don't + if ! check_ref "${sha}"; then + message FETCH "${SUBDIR} [ref: ${sha}]" + git_cache fetch --no-tags ${quiet} origin "${sha}" + # tag it to keep it from being GC'd. + git_cache tag "sha-${sha}" "${sha}" + fi +} + +# General purpose "fetch" function to be used with tags, refs, or nothing at +# all (to fetch everything). This checks the server for updates, because all +# of those things might change at any given time. Supports an "offline" mode +# to skip the fetch and use the possibly-stale local version, if we have it. +fetch_to_cache() { + # We're fetching a named ref (or all refs), which means: + # - we should always do the fetch because it might have changed. but + # - we might be able to skip updating in case we already have it + if [ -z "${OFFLINE-}" ]; then + for retry in $(seq 3); do + message FETCH "${SUBDIR} ${1+[ref: $*]}" + if git_cache fetch --prune ${quiet} origin "$@"; then + return + fi + sleep $((retry * retry * 5)) + done + echo "repeated git fetch failure, giving up" >&2 + exit 1 + fi +} + +# Get the content of "$2" from cache commit "$1" +cat_from_cache() { + git_cache cat-file blob "$1:$2" +} + +# Consistency checking: for a given cache commit "$1", check if it contains a +# file "$2" which is equal to the file "$3" present in the working tree. +cmp_from_cache() { + cat_from_cache "$1" "$2" | cmp "$3" +} + +# Like `git clone` except that it uses the original origin url and supports +# checking out commit IDs as detached heads. The target directory must either +# be empty, or not exist. +clone_from_cache() { + message CLONE "${SUBDIR} [ref: $1]" + [ ! -e "${SUBDIR}" ] || rmdir "${SUBDIR}" + mkdir "${SUBDIR}" + cp -a --reflink=auto "${CACHE_DIR}" "${SUBDIR}/.git" + git --git-dir "${SUBDIR}/.git" config --unset core.bare + git -c advice.detachedHead=false -C "${SUBDIR}" checkout ${quiet} "$1" +} + +# This stores a .tar file from stdin into the cache as a tree object. +# Returns the ID. Opposite of `git archive`, basically. +tar_to_cache() { + # Need to do this before we set the GIT_* variables + init_cache + + # Use a sub-shell to enable cleanup of the temporary directory + ( + tmpdir="$(mktemp --tmpdir --directory cockpit-tar-to-git.XXXXXX)" + # shellcheck disable=SC2064 # we want ${tmpdir} expanded now + trap "rm -r '${tmpdir}'" EXIT + + export GIT_INDEX_FILE="${tmpdir}/tmp-index" + export GIT_WORK_TREE="${tmpdir}/work" + + mkdir "${GIT_WORK_TREE}" + cd "${GIT_WORK_TREE}" + + tar --extract --exclude '.git*' + message INDEX "${SUBDIR}" + git_cache add --all + git_cache write-tree + ) +} + + # Small helper to run a git command on the cache directory +cmd_git() { + git_cache "$@" +} diff --git a/test/common/lcov.py b/test/common/lcov.py new file mode 100755 index 0000000..7d2fec5 --- /dev/null +++ b/test/common/lcov.py @@ -0,0 +1,505 @@ +#!/usr/bin/python3 -cimport os, sys; os.execv(os.path.dirname(sys.argv[1]) + "/pywrap", sys.argv) + +# This file is part of Cockpit. +# +# Copyright (C) 2022 Red Hat, Inc. +# +# Cockpit is free software; you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. +# +# Cockpit is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with Cockpit; If not, see . + +# This module can convert profile data from CDP to LCOV, produce a +# HTML report, and post review comments. +# +# - write_lcov (coverage_data, outlabel) +# - create_coverage_report() + +import glob +import gzip +import itertools +import json +import os +import re +import shutil +import subprocess +import sys +from bisect import bisect_left + +from task import github + +BASE_DIR = os.path.realpath(f'{__file__}/../../..') + +debug = False + +# parse_vlq and parse_sourcemap are based on +# https://github.com/mattrobenolt/python-sourcemap, licensed with +# "BSD-2-Clause License" + +# Mapping of base64 letter -> integer value. +B64 = {c: i for i, c in + enumerate('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' + '0123456789+/')} + + +def parse_vlq(segment): + """Parse a string of VLQ-encoded data. + Returns: + a list of integers. + """ + + values = [] + + cur, shift = 0, 0 + for c in segment: + val = B64[c] + # Each character is 6 bits: + # 5 of value and the high bit is the continuation. + val, cont = val & 0b11111, val >> 5 + cur += val << shift + shift += 5 + + if not cont: + # The low bit of the unpacked value is the sign. + cur, sign = cur >> 1, cur & 1 + if sign: + cur = -cur + values.append(cur) + cur, shift = 0, 0 + + if cur or shift: + raise Exception('leftover cur/shift in vlq decode') + + return values + + +def parse_sourcemap(f, line_starts, dir_name): + smap = json.load(f) + sources = smap['sources'] + mappings = smap['mappings'] + lines = mappings.split(';') + + our_map = [] + + our_sources = set() + for s in sources: + if "node_modules" not in s and (s.endswith(('.js', '.jsx'))): + our_sources.add(s) + + dst_col, src_id, src_line = 0, 0, 0 + for dst_line, line in enumerate(lines): + segments = line.split(',') + dst_col = 0 + for segment in segments: + if not segment: + continue + parse = parse_vlq(segment) + dst_col += parse[0] + + src = None + if len(parse) > 1: + src_id += parse[1] + src = sources[src_id] + src_line += parse[2] + + if src in our_sources: + norm_src = os.path.normpath(os.path.join(dir_name, src)) + our_map.append((line_starts[dst_line] + dst_col, norm_src, src_line)) + + return our_map + + +class DistFile: + def __init__(self, path): + line_starts = [0] + with open(path, newline='') as f: + for line in f.readlines(): + line_starts.append(line_starts[-1] + len(line)) + with open(path + ".map") as f: + self.smap = parse_sourcemap(f, line_starts, os.path.relpath(os.path.dirname(path), BASE_DIR)) + + def find_sources_slow(self, start, end): + res = [] + for m in self.smap: + if m[0] >= start and m[0] < end: + res.append(m) + return res + + def find_sources(self, start, end): + res = [] + i = bisect_left(self.smap, start, key=lambda m: m[0]) + while i < len(self.smap) and self.smap[i][0] < end: + res.append(self.smap[i]) + i += 1 + if debug and res != self.find_sources_slow(start, end): + raise RuntimeError("Bug in find_sources") + return res + + +def get_dist_map(package): + dmap = {} + for manifest_json in glob.glob(f"{BASE_DIR}/dist/*/manifest.json") + glob.glob(f"{BASE_DIR}/dist/manifest.json"): + with open(manifest_json) as f: + m = json.load(f) + if "name" in m: + dmap[m["name"]] = os.path.dirname(manifest_json) + elif manifest_json == f"{BASE_DIR}/dist/manifest.json": + if "name" in package: + dmap[package["name"]] = os.path.dirname(manifest_json) + return dmap + + +def get_distfile(url, dist_map): + parts = url.split("/") + if len(parts) < 3 or "cockpit" not in parts: + return None + + base = parts[-2] + file = parts[-1] + if file == "manifests.js": + return None + if base in dist_map: + path = dist_map[base] + "/" + file + else: + path = f"{BASE_DIR}/dist/" + base + "/" + file + if os.path.exists(path) and os.path.exists(path + ".map"): + return DistFile(path) + else: + sys.stderr.write(f"SKIP {url} -> {path}\n") + return None + + +def grow_array(arr, size, val): + if len(arr) < size: + arr.extend([val] * (size - len(arr))) + + +def record_covered(file_hits, src, line, hits): + if src in file_hits: + line_hits = file_hits[src] + else: + line_hits = [] + grow_array(line_hits, line + 1, None) + line_hits[line] = hits + file_hits[src] = line_hits + + +def record_range(file_hits, r, distfile): + sources = distfile.find_sources(r['startOffset'], r['endOffset']) + for src in sources: + record_covered(file_hits, src[1], src[2], r['count']) + + +def merge_hits(file_hits, hits): + for src in hits: + if src not in file_hits: + file_hits[src] = hits[src] + else: + lines = file_hits[src] + merge_lines = hits[src] + grow_array(lines, len(merge_lines), None) + for i in range(len(merge_lines)): + if lines[i] is None: + lines[i] = merge_lines[i] + elif merge_lines[i] is not None: + lines[i] += merge_lines[i] + + +def print_file_coverage(path, line_hits, out): + lines_found = 0 + lines_hit = 0 + src = f"{BASE_DIR}/{path}" + out.write(f"SF:{src}\n") + for i in range(len(line_hits)): + if line_hits[i] is not None: + lines_found += 1 + out.write(f"DA:{i + 1},{line_hits[i]}\n") + if line_hits[i] > 0: + lines_hit += 1 + out.write(f"LH:{lines_hit}\n") + out.write(f"LF:{lines_found}\n") + out.write("end_of_record\n") + + +class DiffMap: + # Parse a unified diff and make a index for the added lines + def __init__(self, diff): + self.map = {} + self.source_map = {} + plus_name = None + diff_line = 0 + with open(diff) as f: + for line in f.readlines(): + diff_line += 1 + if line.startswith("+++ /dev/null"): + # removed file, only `^-` following after that until the next hunk + continue + elif line.startswith("+++ b/"): + plus_name = os.path.normpath(line[6:].strip()) + plus_line = 1 + self.map[plus_name] = {} + elif line.startswith("@@ "): + plus_line = int(line.split(" ")[2].split(",")[0]) + elif line.startswith(" "): + plus_line += 1 + elif line.startswith("+"): + self.map[plus_name][plus_line] = diff_line + self.source_map[diff_line] = (plus_name, plus_line, line[1:]) + plus_line += 1 + + def find_line(self, file, line): + if file in self.map and line in self.map[file]: + return self.map[file][line] + return None + + def find_source(self, diff_line): + return self.source_map.get(diff_line) + + +def print_diff_coverage(path, file_hits, out): + if not os.path.exists(path): + return + dm = DiffMap(path) + src = f"{BASE_DIR}/{path}" + lines_found = 0 + lines_hit = 0 + out.write(f"SF:{src}\n") + for f in file_hits: + line_hits = file_hits[f] + for i in range(len(line_hits)): + if line_hits[i] is not None: + diff_line = dm.find_line(f, i + 1) + if diff_line: + lines_found += 1 + out.write(f"DA:{diff_line},{line_hits[i]}\n") + if line_hits[i] > 0: + lines_hit += 1 + out.write(f"LH:{lines_hit}\n") + out.write(f"LF:{lines_found}\n") + out.write("end_of_record\n") + + +def write_lcov(covdata, outlabel): + + with open(f"{BASE_DIR}/package.json") as f: + package = json.load(f) + dist_map = get_dist_map(package) + file_hits = {} + + def covranges(functions): + for f in functions: + for r in f['ranges']: + yield r + + # Coverage data is reported as a "count" value for a range of + # text. These ranges overlap when functions are nested. For + # example, take this source code: + # + # 1 . function foo(x) { + # 2 . function bar() { + # 3 . } + # 4 . if (x) + # 5 . bar(); + # 6 . } + # 7 . + # 8 . foo(0) + # + # There will be a range with count 1 for the whole source code + # (lines 1 to 8) since all code is executed when loading a file. + # Then there will be a range with count 1 for "foo" (lines 1 to 6) + # since it is called from the top-level, and there will be a range + # with count 0 for "bar" (lines 2 and 3), since it is never + # actually called. If block-level precision has been enabled + # while collecting the coverage data, there will also be a range + # with count 0 for line 5, since that branch if the "if" is not + # executed. + # + # We process ranges like this in order, from longest to shortest, + # and record their counts for each line they cover. The count of a + # range that is processed later will overwrite any count that has + # been recorded earlier. This makes the count correct for nested + # functions since they are processed last. + # + # In the example, first lines 1 to 8 are set to count 1, then + # lines 1 to 6 are set to count 1 again, then lines 2 and 3 are + # set to count 0, and finally line 5 is also set to count 0: + # + # 1 1 function foo(x) { + # 2 0 function bar() { + # 3 0 } + # 4 1 if (x) + # 5 0 bar(); + # 6 1 } + # 7 1 + # 8 1 foo(0) + # + # Thus, when processing ranges for a single file, we must + # prioritize the counts of smaller ranges over larger ones, and + # can't just add them all up. This doesn't work, however, when + # something like webpack is involved, and a source file is copied + # into multiple files in "dist/". + # + # The coverage data contains ranges for all files that are loaded + # into the browser during the whole session, such as when + # transitioning from the login page to the shell, and when loading + # multiple iframes for the individual pages. + # + # For example, if both shell.js (loaded at the top-level) and + # overview.js (loaded into an iframe) include lib/button.js, then + # the coverage data might report that shell.js does execute line 5 + # of lib/button.js and also that overview.js does not execute it. + # We need to add the counts up for line 5 so that the combined + # report says that is has been executed. + # + # The same applies to reloading and navigating in the browser. If + # a page is reloaded, there will be separate coverage reports for + # its files. For example, if a reload happens, shell.js will be + # mentioned twice in the report, and we need to add up the counts + # from each mention. + + for script in covdata: + distfile = get_distfile(script['url'], dist_map) + if distfile: + ranges = sorted(covranges(script['functions']), + key=lambda r: r['endOffset'] - r['startOffset'], reverse=True) + hits = {} + for r in ranges: + record_range(hits, r, distfile) + merge_hits(file_hits, hits) + + if len(file_hits) > 0: + os.makedirs(f"{BASE_DIR}/lcov", exist_ok=True) + filename = f"{BASE_DIR}/lcov/{outlabel}.info.gz" + with gzip.open(filename, "wt") as out: + for f in file_hits: + print_file_coverage(f, file_hits[f], out) + print_diff_coverage("lcov/github-pr.diff", file_hits, out) + print("Wrote coverage data to " + filename) + + +def get_review_comments(diff_info_file): + comments = [] + cur_src = None + start_line = None + cur_line = None + + def is_interesting_line(text): + # Don't complain when being told to shut up + if "// not-covered: " in text: + return False + # Don't complain about lines that contain only punctuation, or + # nothing but "else". We don't seem to get reliable + # information for them. + if not re.search('[a-zA-Z0-9]', text.replace("else", "")): + return False + return True + + def flush_cur_comment(): + nonlocal comments + if cur_src: + ta_url = os.environ.get("TEST_ATTACHMENTS_URL", None) + comment = {"path": cur_src, + "line": cur_line} + if start_line != cur_line: + comment["start_line"] = start_line + body = f"These {cur_line - start_line + 1} added lines are not executed by any test." + else: + body = "This added line is not executed by any test." + if ta_url: + body += f" [Details]({ta_url}/Coverage/lcov/github-pr.diff.gcov.html)" + comment["body"] = body + comments.append(comment) + + dm = DiffMap("lcov/github-pr.diff") + + with open(diff_info_file) as f: + for line in f.readlines(): + if line.startswith("DA:"): + parts = line[3:].split(",") + if int(parts[1]) == 0: + info = dm.find_source(int(parts[0])) + if not info: + continue + (src, line, text) = info + if not is_interesting_line(text): + continue + if src == cur_src and line == cur_line + 1: + cur_line = line + else: + flush_cur_comment() + cur_src = src + start_line = line + cur_line = line + flush_cur_comment() + + return comments + + +def prepare_for_code_coverage(): + # This gives us a convenient link at the top of the logs, see link-patterns.json + print("Code coverage report in Coverage/index.html") + if os.path.exists("lcov"): + shutil.rmtree("lcov") + os.makedirs("lcov") + # Detect the default branch to compare with, Anaconda still uses master as main. + branch = "main" + try: + subprocess.check_call(["git", "rev-parse", "--quiet", "--verify", branch], stdout=subprocess.DEVNULL) + except subprocess.SubprocessError: + branch = "master" + with open("lcov/github-pr.diff", "w") as f: + subprocess.check_call(["git", "-c", "diff.noprefix=false", "diff", "--patience", branch], stdout=f) + + +def create_coverage_report(): + output = os.environ.get("TEST_ATTACHMENTS", BASE_DIR) + lcov_files = glob.glob(f"{BASE_DIR}/lcov/*.info.gz") + try: + title = os.path.basename(subprocess.check_output(["git", "remote", "get-url", "origin"])).decode().strip() + except subprocess.CalledProcessError: + title = "?" + if len(lcov_files) > 0: + all_file = f"{BASE_DIR}/lcov/all.info" + diff_file = f"{BASE_DIR}/lcov/diff.info" + excludes = [] + # Exclude pkg/lib in Cockpit projects such as podman/machines. + if title != "cockpit.git": + excludes = ["--exclude", "pkg/lib"] + subprocess.check_call(["lcov", "--quiet", "--output", all_file, *excludes, + *itertools.chain(*[["--add", f] for f in lcov_files])]) + subprocess.check_call(["lcov", "--quiet", "--ignore-errors", "empty,empty,unused,unused", "--output", diff_file, + "--extract", all_file, "*/github-pr.diff"]) + summary = subprocess.check_output(["genhtml", "--no-function-coverage", + "--prefix", os.getcwd(), + "--title", title, + "--output-dir", f"{output}/Coverage", all_file]).decode() + + coverage = summary.split("\n")[-2] + match = re.search(r".*lines\.*:\s*([\d\.]*%).*", coverage) + if match: + print("Overall line coverage:", match.group(1)) + + comments = get_review_comments(diff_file) + rev = os.environ.get("TEST_REVISION", None) + pull = os.environ.get("TEST_PULL", None) + if rev and pull: + api = github.GitHub() + old_comments = api.get(f"pulls/{pull}/comments?sort=created&direction=desc&per_page=100") or [] + for oc in old_comments: + if ("body" in oc and "path" in oc and "line" in oc and + "not executed by any test." in oc["body"]): + api.delete(f"pulls/comments/{oc['id']}") + if len(comments) > 0: + api.post(f"pulls/{pull}/reviews", + {"commit_id": rev, "event": "COMMENT", + "comments": comments}) + else: + sys.stderr.write("Error: no code coverage files generated\n") diff --git a/test/common/link-patterns.json b/test/common/link-patterns.json new file mode 100644 index 0000000..cef6c78 --- /dev/null +++ b/test/common/link-patterns.json @@ -0,0 +1,39 @@ +[ + { + "label": "screenshot", + "pattern": "Wrote screenshot to ([A-Za-z0-9.-]+.png)$", + "url": "$1", + "icon": "bi bi-camera-fill" + }, + { + "label": "new pixels", + "pattern": "New pixel test reference ([A-Za-z0-9.-]+.png)$", + "url": "$1" + }, + { + "label": "journal", + "pattern": "Journal extracted to ([A-Za-z0-9.-]+.log(?:.[gx]z)?)$", + "url": "$1", + "icon": "bi bi-card-text" + }, + { + "label": "changed pixels", + "pattern": "Differences in pixel test ([A-Za-z0-9.-]+)$", + "url": "pixeldiff.html#$1" + }, + { + "label": "coverage", + "pattern": "Code coverage report in ([A-Za-z0-9.-]+)$", + "url": "$1/" + }, + { + "label": "vm xml", + "pattern": "Wrote ([A-Za-z0-9.-]+) XML to ([A-Za-z0-9.-]+.xml)$", + "url": "$2" + }, + { + "label": "vm log", + "pattern": "Wrote ([A-Za-z0-9.-]+) log to ([A-Za-z0-9.-]+.log)$", + "url": "$2" + } +] diff --git a/test/common/make-bots b/test/common/make-bots new file mode 100755 index 0000000..5160e90 --- /dev/null +++ b/test/common/make-bots @@ -0,0 +1,28 @@ +#!/bin/sh + +# Prepare bots by creating ./bots directory +# Specify $COCKPIT_BOTS_REF to checkout non-main branch + +GITHUB_REPO='bots' +SUBDIR='bots' + +V="${V-0}" # default to friendly messages + +set -eu +cd "${0%/*}/../.." +# shellcheck source-path=SCRIPTDIR/../.. +. test/common/git-utils.sh + +if [ ! -e bots ]; then + [ -n "${quiet}" ] || set -x + if [ -h ~/.config/cockpit-dev/bots ]; then + message SYMLINK "bots → $(realpath --relative-to=. ~/.config/cockpit-dev/bots)" + ln -sfT "$(realpath --relative-to=. ~/.config/cockpit-dev)/bots" bots + else + # it's small, so keep everything cached + fetch_to_cache ${COCKPIT_BOTS_REF+"${COCKPIT_BOTS_REF}"} + clone_from_cache "${COCKPIT_BOTS_REF-main}" + fi +else + echo "bots/ already exists, skipping" +fi diff --git a/test/common/netlib.py b/test/common/netlib.py new file mode 100644 index 0000000..e9e868f --- /dev/null +++ b/test/common/netlib.py @@ -0,0 +1,214 @@ +# This file is part of Cockpit. +# +# Copyright (C) 2017 Red Hat, Inc. +# +# Cockpit is free software; you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. +# +# Cockpit is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with Cockpit; If not, see . + +import re +import subprocess + +from testlib import Error, MachineCase, wait + + +class NetworkHelpers: + """Mix-in class for tests that require network setup""" + + def add_veth(self, name, dhcp_cidr=None, dhcp_range=None): + """Add a veth device that is manageable with NetworkManager + + This is safe for @nondestructive tests, the interface gets cleaned up automatically. + """ + if dhcp_range is None: + dhcp_range = ['10.111.112.2', '10.111.127.254'] + self.machine.execute(r""" + mkdir -p /run/udev/rules.d/ + echo 'ENV{ID_NET_DRIVER}=="veth", ENV{INTERFACE}=="%(name)s", ENV{NM_UNMANAGED}="0"' > /run/udev/rules.d/99-nm-veth-%(name)s-test.rules + udevadm control --reload + ip link add name %(name)s type veth peer name v_%(name)s + # Trigger udev to make sure that it has been renamed to its final name + udevadm trigger --subsystem-match=net + udevadm settle + """ % {"name": name}) + self.addCleanup(self.machine.execute, f"rm /run/udev/rules.d/99-nm-veth-{name}-test.rules; ip link del dev {name}") + if dhcp_cidr: + # up the remote end, give it an IP, and start DHCP server + self.machine.execute(f"ip a add {dhcp_cidr} dev v_{name}; ip link set v_{name} up") + server = self.machine.spawn("dnsmasq --keep-in-foreground --log-queries --log-facility=- " + f"--conf-file=/dev/null --dhcp-leasefile=/tmp/leases.{name} --no-resolv " + f"--bind-interfaces --except-interface=lo --interface=v_{name} --dhcp-range={dhcp_range[0]},{dhcp_range[1]},4h", + f"dhcp-{name}.log") + self.addCleanup(self.machine.execute, "kill %i" % server) + self.machine.execute("if firewall-cmd --state >/dev/null 2>&1; then firewall-cmd --add-service=dhcp; fi") + + def nm_activate_eth(self, iface): + """Create an NM connection for a given interface""" + + m = self.machine + wait(lambda: m.execute(f'nmcli device | grep "{iface}.*disconnected"')) + m.execute(f"nmcli con add type ethernet ifname {iface} con-name {iface}") + m.execute(f"nmcli con up {iface} ifname {iface}") + self.addCleanup(m.execute, f"nmcli con delete {iface}") + + def nm_checkpoints_disable(self): + self.browser.eval_js("window.cockpit_tests_disable_checkpoints = true;") + + def nm_checkpoints_enable(self, settle_time=3.0): + self.browser.eval_js("window.cockpit_tests_disable_checkpoints = false;") + self.browser.eval_js(f"window.cockpit_tests_checkpoint_settle_time = {settle_time};") + + +class NetworkCase(MachineCase, NetworkHelpers): + def setUp(self): + super().setUp() + + m = self.machine + + # clean up after nondestructive tests + if self.is_nondestructive(): + def devs(): + return set(self.machine.execute("ls /sys/class/net/ | grep -v bonding_masters").strip().split()) + + def cleanupDevs(): + new = devs() - self.orig_devs + self.machine.execute(f"for d in {' '.join(new)}; do nmcli dev del $d; done") + + self.orig_devs = devs() + self.restore_dir("/etc/NetworkManager", restart_unit="NetworkManager") + self.restore_dir("/etc/sysconfig/network-scripts") + self.restore_dir("/etc/netplan") + self.restore_dir("/run/NetworkManager/system-connections") + self.addCleanup(cleanupDevs) + + m.execute("systemctl start NetworkManager") + + # Ensure a clean and consistent state. We remove rogue + # connections that might still be here from the time of + # creating the image and we prevent NM from automatically + # creating new connections. + # if the command fails, try again + failures_allowed = 3 + while True: + try: + print(m.execute("nmcli con show")) + m.execute( + """nmcli -f UUID,DEVICE connection show | awk '$2 == "--" { print $1 }' | xargs -r nmcli con del""") + break + except subprocess.CalledProcessError: + failures_allowed -= 1 + if failures_allowed == 0: + raise + + m.write("/etc/NetworkManager/conf.d/99-test.conf", "[main]\nno-auto-default=*\n") + m.execute("systemctl reload-or-restart NetworkManager") + + # our assertions and pixel tests assume that virbr0 is absent + m.execute('[ -z "$(systemctl --legend=false list-unit-files libvirtd.service)" ] || ' + 'systemctl try-restart libvirtd.service') + if 'default' in m.execute("virsh net-list --name || true"): + m.execute("virsh net-autostart --disable default; virsh net-destroy default") + + ver = self.machine.execute( + "busctl --system get-property org.freedesktop.NetworkManager /org/freedesktop/NetworkManager org.freedesktop.NetworkManager Version || true") + ver_match = re.match('s "(.*)"', ver) + if ver_match: + self.networkmanager_version = [int(x) for x in ver_match.group(1).split(".")] + else: + self.networkmanager_version = [0] + + # Something unknown sometimes goes wrong with PCP, see #15625 + self.allow_journal_messages("pcp-archive: no such metric: network.interface.* Unknown metric name", + "direct: instance name lookup failed: network.*") + + def get_iface(self, m, mac): + def getit(): + path = m.execute(f"grep -li '{mac}' /sys/class/net/*/address") + return path.split("/")[-2] + iface = wait(getit).strip() + print(f"{mac} -> {iface}") + return iface + + def add_iface(self, activate=True): + m = self.machine + mac = m.add_netiface(networking=self.network.interface()) + # Wait for the interface to show up + self.get_iface(m, mac) + # Trigger udev to make sure that it has been renamed to its final name + m.execute("udevadm trigger; udevadm settle") + iface = self.get_iface(m, mac) + if activate: + self.nm_activate_eth(iface) + return iface + + def wait_for_iface(self, iface, active=True, state=None, prefix="10.111."): + sel = f"#networking-interfaces tr[data-interface='{iface}']" + + if state: + text = state + elif active: + text = prefix + else: + text = "Inactive" + + try: + with self.browser.wait_timeout(30): + self.browser.wait_in_text(sel, text) + except Error as e: + print(f"Interface {iface} didn't show up.") + print(self.machine.execute(f"grep . /sys/class/net/*/address; nmcli con; nmcli dev; nmcli dev show {iface} || true")) + raise e + + def select_iface(self, iface): + b = self.browser + b.click(f"#networking-interfaces tr[data-interface='{iface}'] button") + + def iface_con_id(self, iface): + con_id = self.machine.execute(f"nmcli -m tabular -t -f GENERAL.CONNECTION device show {iface}").strip() + if con_id == "" or con_id == "--": + return None + else: + return con_id + + def wait_for_iface_setting(self, setting_title, setting_value): + b = self.browser + b.wait_in_text(f"dt:contains('{setting_title}') + dd", setting_value) + + def configure_iface_setting(self, setting_title): + b = self.browser + b.click(f"dt:contains('{setting_title}') + dd button") + + def ensure_nm_uses_dhclient(self): + m = self.machine + m.write("/etc/NetworkManager/conf.d/99-dhcp.conf", "[main]\ndhcp=dhclient\n") + m.execute("systemctl restart NetworkManager") + + def slow_down_dhclient(self, delay): + self.machine.execute(f""" + mkdir -p {self.vm_tmpdir} + cp -a /usr/sbin/dhclient {self.vm_tmpdir}/dhclient.real + printf '#!/bin/sh\\nsleep {delay}\\nexec {self.vm_tmpdir}/dhclient.real "$@"' > {self.vm_tmpdir}/dhclient + chmod a+x {self.vm_tmpdir}/dhclient + if selinuxenabled 2>&1; then chcon --reference /usr/sbin/dhclient {self.vm_tmpdir}/dhclient; fi + mount -o bind {self.vm_tmpdir}/dhclient /usr/sbin/dhclient + """) + self.addCleanup(self.machine.execute, "umount /usr/sbin/dhclient") + + def wait_onoff(self, sel, val): + self.browser.wait_visible(sel + " input[type=checkbox]" + (":checked" if val else ":not(:checked)")) + + def toggle_onoff(self, sel): + self.browser.click(sel + " input[type=checkbox]") + + def login_and_go(self, *args, **kwargs): + super().login_and_go(*args, **kwargs) + self.nm_checkpoints_disable() diff --git a/test/common/packagelib.py b/test/common/packagelib.py new file mode 100644 index 0000000..ed2fe89 --- /dev/null +++ b/test/common/packagelib.py @@ -0,0 +1,428 @@ +# This file is part of Cockpit. +# +# Copyright (C) 2017 Red Hat, Inc. +# +# Cockpit is free software; you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. +# +# Cockpit is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with Cockpit; If not, see . + +import logging +import os +import textwrap + +from testlib import MachineCase + + +class PackageCase(MachineCase): + def setUp(self): + super().setUp() + + self.repo_dir = os.path.join(self.vm_tmpdir, "repo") + + if self.machine.ostree_image: + logging.warning("PackageCase: OSTree images can't install additional packages") + return + + # expected backend; hardcode this on image names to check the auto-detection + if self.machine.image.startswith("debian") or self.machine.image.startswith("ubuntu"): + self.backend = "apt" + self.primary_arch = "all" + self.secondary_arch = "amd64" + elif self.machine.image.startswith("fedora") or self.machine.image.startswith("rhel-") or self.machine.image.startswith("centos-"): + self.backend = "dnf" + self.primary_arch = "noarch" + self.secondary_arch = "x86_64" + elif self.machine.image == "arch": + self.backend = "alpm" + self.primary_arch = "any" + self.secondary_arch = "x86_64" + else: + raise NotImplementedError("unknown image " + self.machine.image) + + if "debian" in self.image or "ubuntu" in self.image: + # PackageKit refuses to work when offline, and main interface is not managed by NM on these images + self.machine.execute("nmcli con add type dummy con-name fake ifname fake0 ip4 1.2.3.4/24 gw4 1.2.3.1") + self.addCleanup(self.machine.execute, "nmcli con delete fake") + + # HACK: packagekit often hangs on shutdown; https://bugzilla.redhat.com/show_bug.cgi?id=1717185 + self.write_file("/etc/systemd/system/packagekit.service.d/timeout.conf", "[Service]\nTimeoutStopSec=5\n") + self.addCleanup(self.machine.execute, "systemctl stop packagekit; systemctl reset-failed packagekit || true") + + # disable all existing repositories to avoid hitting the network + if self.backend == "apt": + self.restore_dir("/var/lib/apt", reboot_safe=True) + self.restore_dir("/var/cache/apt", reboot_safe=True) + self.restore_dir("/etc/apt", reboot_safe=True) + self.machine.execute("echo > /etc/apt/sources.list; rm -f /etc/apt/sources.list.d/*; apt-get clean; apt-get update") + elif self.backend == "alpm": + self.restore_dir("/var/lib/pacman", reboot_safe=True) + self.restore_dir("/var/cache/pacman", reboot_safe=True) + self.restore_dir("/etc/pacman.d", reboot_safe=True) + self.restore_dir("/var/lib/PackageKit/alpm", reboot_safe=True) + self.restore_file("/etc/pacman.conf") + self.restore_file("/etc/pacman.d/mirrorlist") + self.restore_file("/usr/share/libalpm/hooks/90-packagekit-refresh.hook") + + self.machine.execute("rm /etc/pacman.conf /etc/pacman.d/mirrorlist /var/lib/pacman/sync/* /usr/share/libalpm/hooks/90-packagekit-refresh.hook") + self.machine.execute("test -d /var/lib/PackageKit/alpm && rm -r /var/lib/PackageKit/alpm || true") # Drop alpm state directory as it interferes with running offline + # Initial config for installation + empty_repo_dir = '/var/lib/cockpittest/empty' + config = f""" +[options] +Architecture = auto +HoldPkg = pacman glibc + +[empty] +SigLevel = Never +Server = file://{empty_repo_dir} +""" + # HACK: Setup empty repo for packagekit + self.machine.execute(f"mkdir -p {empty_repo_dir} || true") + self.machine.execute(f"repo-add {empty_repo_dir}/empty.db.tar.gz") + self.machine.write("/etc/pacman.conf", config) + # Clean up possible leftover lockfile + self.machine.execute(""" + if [ -f /var/lib/pacman/db.lck ]; then + fuser -k /var/lib/pacman/db.lck || true; + rm /var/lib/pacman/db.lck; + fi + """) + self.machine.execute("pacman -Sy") + else: + self.restore_dir("/etc/yum.repos.d", reboot_safe=True) + self.restore_dir("/var/cache/dnf", reboot_safe=True) + self.machine.execute("rm -rf /etc/yum.repos.d/* /var/cache/dnf/*") + + # have PackageKit start from a clean slate + self.machine.execute("systemctl stop packagekit") + self.machine.execute("systemctl kill --signal=SIGKILL packagekit || true; rm -rf /var/cache/PackageKit") + self.machine.execute("systemctl reset-failed packagekit || true") + self.restore_file("/var/lib/PackageKit/transactions.db") + + if self.image in ["debian-stable", "debian-testing"]: + # PackageKit tries to resolve some DNS names, but our test VM is offline; temporarily disable the name server to fail quickly + self.machine.execute("mv /etc/resolv.conf /etc/resolv.conf.test") + self.addCleanup(self.machine.execute, "mv /etc/resolv.conf.test /etc/resolv.conf") + + # reset automatic updates + if self.backend == 'dnf': + self.machine.execute("systemctl disable --now dnf-automatic dnf-automatic-install " + "dnf-automatic.service dnf-automatic-install.timer") + self.machine.execute("rm -r /etc/systemd/system/dnf-automatic* && systemctl daemon-reload || true") + + self.updateInfo = {} + + # HACK: kpatch check sometimes complains that we don't set up a full repo in unrelated tests + self.allow_browser_errors("Could not determine kpatch packages:.*repodata updates was not complete") + + # + # Helper functions for creating packages/repository + # + + def createPackage(self, name, version, release, install=False, + postinst=None, depends="", content=None, arch=None, provides=None, **updateinfo): + """Create a dummy package in repo_dir on self.machine + + If install is True, install the package. Otherwise, update the package + index in repo_dir. + """ + if provides: + provides = f"Provides: {provides}" + else: + provides = "" + + if self.backend == "apt": + self.createDeb(name, version + '-' + release, depends, postinst, install, content, arch, provides) + elif self.backend == "alpm": + self.createPacmanPkg(name, version, release, depends, postinst, install, content, arch, provides) + else: + self.createRpm(name, version, release, depends, postinst, install, content, arch, provides) + if updateinfo: + self.updateInfo[(name, version, release)] = updateinfo + + def createDeb(self, name, version, depends, postinst, install, content, arch, provides): + """Create a dummy deb in repo_dir on self.machine + + If install is True, install the package. Otherwise, update the package + index in repo_dir. + """ + m = self.machine + + if arch is None: + arch = self.primary_arch + deb = f"{self.repo_dir}/{name}_{version}_{arch}.deb" + if postinst: + postinstcode = f"printf '#!/bin/sh\n{postinst}' > /tmp/b/DEBIAN/postinst; chmod 755 /tmp/b/DEBIAN/postinst" + else: + postinstcode = '' + if content is not None: + for path, data in content.items(): + dest = "/tmp/b/" + path + m.execute(f"mkdir -p '{os.path.dirname(dest)}'") + if isinstance(data, dict): + m.execute(f"cp '{data['path']}' '{dest}'") + else: + m.write(dest, data) + m.execute(f"mkdir -p {self.repo_dir}") + m.write("/tmp/b/DEBIAN/control", textwrap.dedent(f""" + Package: {name} + Version: {version} + Priority: optional + Section: test + Maintainer: foo + Depends: {depends} + Architecture: {arch} + Description: dummy {name} + {provides} + """)) + + cmd = f"""set -e + {postinstcode} + touch /tmp/b/stamp-{name}-{version} + dpkg -b /tmp/b {deb} + rm -r /tmp/b + """ + if install: + cmd += "dpkg -i " + deb + m.execute(cmd) + self.addCleanup(m.execute, f"dpkg -P --force-depends --force-remove-reinstreq {name} 2>/dev/null || true") + + def createRpm(self, name, version, release, requires, post, install, content, arch, provides): + """Create a dummy rpm in repo_dir on self.machine + + If install is True, install the package. Otherwise, update the package + index in repo_dir. + """ + if post: + postcode = '\n%%post\n' + post + else: + postcode = '' + if requires: + requires = f"Requires: {requires}\n" + if arch is None: + arch = self.primary_arch + installcmds = f"touch $RPM_BUILD_ROOT/stamp-{name}-{version}-{release}\n" + installedfiles = f"/stamp-{name}-{version}-{release}\n" + if content is not None: + for path, data in content.items(): + installcmds += f'mkdir -p $(dirname "$RPM_BUILD_ROOT/{path}")\n' + if isinstance(data, dict): + installcmds += f"cp {data['path']} \"$RPM_BUILD_ROOT/{path}\"" + else: + installcmds += f'cat >"$RPM_BUILD_ROOT/{path}" <<\'EOF\'\n' + data + '\nEOF\n' + installedfiles += f"{path}\n" + + architecture = "" + if arch == self.primary_arch: + architecture = f"BuildArch: {self.primary_arch}" + spec = f""" +Summary: dummy {name} +Name: {name} +Version: {version} +Release: {release} +License: BSD +{provides} +{architecture} +{requires} + +%%install +{installcmds} + +%%description +Test package. + +%%files +{installedfiles} + +{postcode} +""" + self.machine.write("/tmp/spec", spec) + cmd = """ +rpmbuild --quiet -bb /tmp/spec +mkdir -p {0} +cp ~/rpmbuild/RPMS/{4}/*.rpm {0} +rm -rf ~/rpmbuild +""" + if install: + cmd += "rpm -i {0}/{1}-{2}-{3}.*.rpm" + self.machine.execute(cmd.format(self.repo_dir, name, version, release, arch)) + self.addCleanup(self.machine.execute, f"rpm -e --nodeps {name} 2>/dev/null || true") + + def createPacmanPkg(self, name, version, release, requires, postinst, install, content, arch, provides): + """Create a dummy pacman package in repo_dir on self.machine + + If install is True, install the package. Otherwise, update the package + index in repo_dir. + """ + + if arch is None: + arch = 'any' + + sources = "" + installcmds = 'package() {\n' + if content is not None: + sources = "source=(" + files = 0 + for path, data in content.items(): + p = os.path.dirname(path) + installcmds += f'mkdir -p $pkgdir{p}\n' + if isinstance(data, dict): + dpath = data["path"] + + file = os.path.basename(dpath) + sources += file + files += 1 + # TODO: hardcoded /tmp + self.machine.execute(f'cp {data["path"]} /tmp/{file}') + installcmds += f'cp {file} $pkgdir{path}\n' + else: + installcmds += f'cat >"$pkgdir{path}" <<\'EOF\'\n' + data + '\nEOF\n' + + sources += ")" + + # Always stamp a file + installcmds += f"touch $pkgdir/stamp-{name}-{version}-{release}\n" + installcmds += '}' + + pkgbuild = f""" +pkgname={name} +pkgver={version} +pkgdesc="dummy {name}" +pkgrel={release} +arch=({arch}) +depends=({requires}) +{sources} + +{installcmds} +""" + + if postinst: + postinstcode = f""" +post_install() {{ + {postinst} +}} + +post_upgrade() {{ + post_install $* +}} +""" + self.machine.write(f"/tmp/{name}.install", postinstcode) + pkgbuild += f"\ninstall={name}.install\n" + + self.machine.write("/tmp/PKGBUILD", pkgbuild) + + cmd = """ + cd /tmp/ + su builder -c "makepkg --cleanbuild --clean --force --nodeps --skipinteg --noconfirm" +""" + + if install: + cmd += f"pacman -U --overwrite '*' --noconfirm {name}-{version}-{release}-{arch}.pkg.tar.zst\n" + + cmd += f"mkdir -p {self.repo_dir}\n" + cmd += f"mv *.pkg.tar.zst {self.repo_dir}\n" + # Clean up packaging files + cmd += "rm PKGBUILD\n" + if postinst: + cmd += f"rm /tmp/{name}.install" + self.machine.execute(cmd) + self.addCleanup(self.machine.execute, f"pacman -Rdd --noconfirm {name} 2>/dev/null || true") + + def createAptChangelogs(self): + # apt metadata has no formal field for bugs/CVEs, they are parsed from the changelog + for ((pkg, ver, rel), info) in self.updateInfo.items(): + changes = info.get("changes", "some changes") + if info.get("bugs"): + changes += f" (Closes: {', '.join([('#' + str(b)) for b in info['bugs']])})" + if info.get("cves"): + changes += "\n * " + ", ".join(info["cves"]) + + path = f"{self.repo_dir}/changelogs/{pkg[0]}/{pkg}/{pkg}_{ver}-{rel}" + contents = f"""{pkg} ({ver}-{rel}) unstable; urgency=medium + + * {changes} + + -- Joe Developer Wed, 31 May 2017 14:52:25 +0200 +""" + self.machine.execute(f"mkdir -p $(dirname {path}); echo '{contents}' > {path}") + + def createYumUpdateInfo(self): + xml = '\n\n' + for ((pkg, ver, rel), info) in self.updateInfo.items(): + refs = "" + for b in info.get("bugs", []): + refs += f' \n' + for c in info.get("cves", []): + refs += f' \n' + if info.get("securitySeverity"): + refs += ' \n'.format(info[ + "securitySeverity"]) + for e in info.get("errata", []): + refs += f' \n' + + xml += """ + UPDATE-{pkg}-{ver}-{rel} + {pkg} {ver}-{rel} update + + {desc} + +{refs} + + + + + {pkg}-{ver}-{rel}.noarch.rpm + + + + +""".format(pkg=pkg, ver=ver, rel=rel, refs=refs, + desc=info.get("changes", ""), severity=info.get("severity", "bugfix")) + + xml += '\n' + return xml + + def addPackageSet(self, name): + self.machine.execute(f"mkdir -p {self.repo_dir}; cp /var/lib/package-sets/{name}/* {self.repo_dir}") + + def enableRepo(self): + if self.backend == "apt": + self.createAptChangelogs() + self.machine.execute(f"""echo 'deb [trusted=yes] file://{self.repo_dir} /' > /etc/apt/sources.list.d/test.list + cd {self.repo_dir}; apt-ftparchive packages . > Packages + xz -c Packages > Packages.xz + O=$(apt-ftparchive -o APT::FTPArchive::Release::Origin=cockpittest release .); echo "$O" > Release + echo 'Changelogs: http://localhost:12345/changelogs/@CHANGEPATH@' >> Release + """) + pid = self.machine.spawn(f"cd {self.repo_dir}; exec python3 -m http.server 12345", "changelog") + # pid will not be present for rebooting tests + self.addCleanup(self.machine.execute, "kill %i || true" % pid) + self.machine.wait_for_cockpit_running(port=12345) # wait for changelog HTTP server to start up + elif self.backend == "alpm": + self.machine.execute(f"""cd {self.repo_dir} + repo-add {self.repo_dir}/testrepo.db.tar.gz *.pkg.tar.zst + """) + + config = f""" +[testrepo] +SigLevel = Never +Server = file://{self.repo_dir} + """ + if 'testrepo' not in self.machine.execute('grep testrepo /etc/pacman.conf || true'): + self.machine.write("/etc/pacman.conf", config, append=True) + + else: + self.machine.execute("""printf '[updates]\nname=cockpittest\nbaseurl=file://{0}\nenabled=1\ngpgcheck=0\n' > /etc/yum.repos.d/cockpittest.repo + echo '{1}' > /tmp/updateinfo.xml + createrepo_c {0} + modifyrepo_c /tmp/updateinfo.xml {0}/repodata + dnf clean all""".format(self.repo_dir, self.createYumUpdateInfo())) diff --git a/test/common/pixel-tests b/test/common/pixel-tests new file mode 100755 index 0000000..0c70dc6 --- /dev/null +++ b/test/common/pixel-tests @@ -0,0 +1,261 @@ +#!/bin/bash + +set -eu + +TEST_REFERENCE_SUBDIR="${TEST_REFERENCE_SUBDIR:-test/reference}" +REPO=pixel-test-reference + +GITHUB_BASE="${GITHUB_BASE:-cockpit-project/cockpit}" +GITHUB_REPOSITORY="${GITHUB_BASE%/*}/${REPO}" +CLONE_REMOTE="https://github.com/${GITHUB_REPOSITORY}" +PUSH_REMOTE="git@github.com:${GITHUB_REPOSITORY}" + +message() { + [ "${V-}" != 0 ] || printf " %-8s %s\n" "$1" "$2" +} + +cmd_init() { + git submodule add -b empty "$CLONE_REMOTE" "$TEST_REFERENCE_SUBDIR" +} + +cmd_update() { + git submodule update --init -- "$TEST_REFERENCE_SUBDIR" || ( + echo "" + echo "Updating test/reference has failed, maybe because of" + echo "local changes that have been accidentally made while" + echo "it was out of date." + echo "" + echo "If you want to throw away these local changes, run" + echo "" + echo " $ ./test/common/pixel-tests reset" + echo "" + exit 1 + ) +} + +cmd_pull() { + cmd_update +} + +cmd_status() { + cmd_update + ( cd "$TEST_REFERENCE_SUBDIR" + git rm --force --cached --quiet '*.png' + git add *.png + if git diff-index --name-status --cached --exit-code HEAD; then + echo No changes + fi + ) +} + +cmd_push() { + cmd_update + ( cd "$TEST_REFERENCE_SUBDIR" + git rm --force --cached --quiet '*.png' + git add *.png + if ! git diff-index --name-status --cached --exit-code HEAD; then + git fetch origin empty:empty + git reset --soft empty + git commit --quiet -m "$(date)" + else + echo No changes + fi + tag="sha-$(git rev-parse HEAD)" + [ $(git tag -l "$tag") ] || git tag "$tag" HEAD + git push "$PUSH_REMOTE" "$tag" + ) + git add "$TEST_REFERENCE_SUBDIR" + if [ -n "$(git status --porcelain "$TEST_REFERENCE_SUBDIR")" ]; then + echo "" + echo "The test/reference link has changed. The next step is to commit and" + echo "push this change, just like any other change to a file." + echo "" + echo "The change has already been added with 'git add', so you could now" + echo "amend your current HEAD commit with it like this:" + echo "" + echo " $ git commit --amend" + echo "" + echo "Then the HEAD commit can be pushed like normally. There is nothing" + echo "special about committing and pushing a change to test/reference." + fi +} + +cmd_reset() { + rm -rf "$TEST_REFERENCE_SUBDIR" + cmd_update +} + +pixel_test_logs_urls() { + arg=${1:-} + + if [[ "$arg" == http* ]]; then + echo $arg + return + fi + + repo=$(git remote get-url origin | sed -re 's,git@github.com:|https://github.com/,,' -e 's,\.git$,,') + + if [ -n "$arg" ]; then + revision=$(curl -s "https://api.github.com/repos/$repo/pulls/$arg" | python3 -c " +import json +import sys + +print(json.load(sys.stdin)['head']['sha']) +") + else + revision=$(git rev-parse @{upstream}) + fi + + context=$(cat test/reference-image) + curl -s "https://api.github.com/repos/$repo/statuses/$revision?per_page=100" | python3 -c " +import json +import sys +import os + +seen = set() +for s in json.load(sys.stdin): + c = s['context'] + if 'pybridge' in c or 'firefox' in c or 'devel' in c: + continue + if c.split('/')[0] == sys.argv[1] and s['target_url']: + url=os.path.dirname(s['target_url']) + if url not in seen: + seen.add(url) + print(url) +" "$context" +} + +cmd_fetch() { + urls=$(pixel_test_logs_urls ${1:-}) + if [ -z "$urls" ]; then + echo >&2 "Can't find test results for $(cat test/reference-image), sorry." + exit 1 + fi + cmd_update + for url in ${urls}; do + url=${url/\/log.html/} + echo "Fetching new pixel test references from $url" + pixels=$(curl -s "$url/index.html" | grep '[^=><"]*-pixels.png' -o | uniq) + for f in ${pixels}; do + echo "$f" + curl -s --output-dir test/reference/ -O "$url/$f" + done + done +} + +cmd_help() { + cat < + + + Cockpit Integration Tests - Pixel diffs + + + + + +

Pixel comparison for

+

New on the left, reference on the right.

+
+
+

Changed pixels in red, ignored changes in green

+
+
+ + diff --git a/test/common/pywrap b/test/common/pywrap new file mode 100755 index 0000000..c5e78b3 --- /dev/null +++ b/test/common/pywrap @@ -0,0 +1,30 @@ +#!/bin/sh + +# Run a Python script, setting up PYTHONPATH for access to test/common and the +# python libraries in bots/. Checks out the bots first, if necessary. + +# This is intended to be used from the interpreter line of executable Python +# scripts, referring to it with a relative path. The interpreter line should +# look something like so: + + #!/usr/bin/python3 -cimport os, sys; os.execv(os.path.dirname(sys.argv[1]) + "/../test/common/pywrap", sys.argv) + +# with the `/../test/common/` part determined by the location of the script +# relative to this script. + +set -eu + +realpath="$(realpath "$0")" +top_srcdir="${realpath%/*}/../.." + +# Check out the bots if required +test -d "${top_srcdir}/bots" || "${top_srcdir}/test/common/make-bots" + +# Prepend the path +PYTHONPATH="${top_srcdir}/test/common:${top_srcdir}/bots:${top_srcdir}/bots/machine${PYTHONPATH:+:${PYTHONPATH}}" +export PYTHONPATH + +# Run the script +# -B : don't write .pyc files on import; also PYTHONDONTWRITEBYTECODE=x +# -P : don't prepend a potentially unsafe path to sys.path -- but not available in RHEL 8/9 yet, use once we can +exec python3 -B "$@" diff --git a/test/common/ruff.toml b/test/common/ruff.toml new file mode 100644 index 0000000..6e46ee9 --- /dev/null +++ b/test/common/ruff.toml @@ -0,0 +1,11 @@ +extend = "../../pyproject.toml" + +[lint] +ignore = [ + "E501", # https://github.com/charliermarsh/ruff/issues/3206#issuecomment-1562681390 + + "B010", # Do not call `setattr` with a constant attribute value. It is not any safer than normal property access. + "FBT001", # Boolean positional arg in function definition + "FBT002", # Boolean default value in function definition + "PT009", # Use a regular `assert` instead of unittest-style `assertEqual` +] diff --git a/test/common/run-tests b/test/common/run-tests new file mode 100755 index 0000000..58c2962 --- /dev/null +++ b/test/common/run-tests @@ -0,0 +1,585 @@ +#!/usr/bin/python3 -cimport os, sys; os.execv(os.path.dirname(sys.argv[1]) + "/pywrap", sys.argv) + +import argparse +import binascii +import errno +import glob +import importlib.machinery +import importlib.util +import logging +import os +import socket +import string +import subprocess +import sys +import tempfile +import time +import unittest +from typing import List, Optional, Tuple + +import testlib +import testvm +from lcov import create_coverage_report, prepare_for_code_coverage + +os.environ['PYTHONUNBUFFERED'] = '1' + + +def flush_stdout(): + while True: + try: + sys.stdout.flush() + break + except BlockingIOError: + time.sleep(0.1) + + +class Test: + def __init__(self, test_id, command, timeout, nondestructive, retry_when_affected, todo, cost=1): + self.process = None + self.retries = 0 + self.test_id = test_id + self.command = command + self.timeout = timeout + self.nondestructive = nondestructive + self.machine_id = None + self.retry_when_affected = retry_when_affected + self.todo = todo + self.cost = cost + self.returncode = None + + def assign_machine(self, machine_id, ssh_address, web_address): + assert self.nondestructive, "assigning a machine only works for nondestructive test" + self.machine_id = machine_id + self.command.insert(-2, "--machine") + self.command.insert(-2, ssh_address) + self.command.insert(-2, "--browser") + self.command.insert(-2, web_address) + + def start(self): + if self.nondestructive: + assert self.machine_id is not None, f"need to assign nondestructive test {self} {self.command} to a machine" + self.outfile = tempfile.TemporaryFile() + self.process = subprocess.Popen(["timeout", "-v", str(self.timeout), *self.command], + stdout=self.outfile, stderr=subprocess.STDOUT) + + def poll(self): + poll_result = self.process.poll() + if poll_result is not None: + self.outfile.flush() + self.outfile.seek(0) + self.output = self.outfile.read() + self.outfile.close() + self.outfile = None + self.returncode = self.process.returncode + + return poll_result + + def finish(self, affected_tests: List[str], opts: argparse.Namespace) -> Tuple[Optional[str], int]: + """Returns if a test should retry or not + + Call test-failure-policy on the test's output, print if needed. + + Return (retry_reason, exit_code). retry_reason can be None or a string. + """ + + print_tap = not opts.list + affected = any(self.command[0].endswith(t) for t in affected_tests) + retry_reason = "" + + # Try affected tests 3 times + if self.returncode == 0 and affected and self.retry_when_affected and self.retries < 2: + retry_reason = "test affected tests 3 times" + self.retries += 1 + self._print_test(print_tap, f"# RETRY {self.retries} ({retry_reason})") + return retry_reason, 0 + + # If test is being skipped pick up the reason + if self.returncode == 77: + lines = self.output.splitlines() + skip_reason = lines[-1].strip().decode("utf-8") + self.output = b"\n".join(lines[:-1]) + self._print_test(print_tap, skip_reason=skip_reason) + return None, 0 + + # If the test was marked with @todo then... + if self.todo is not None: + if self.returncode == 0: + # The test passed, but it shouldn't have. + self.returncode = 1 # that's a fail + self._print_test(print_tap, todo_reason=f'# expected failure: {self.todo}') + return None, 1 + else: + # The test failed as expected + # Outputs 'not ok 1 test # TODO ...' + self._print_test(print_tap, todo_reason=f'# TODO {self.todo}') + return None, 0 + + if self.returncode == 0: + self._print_test(print_tap) + return None, 0 + + if not opts.thorough: + cmd = ["test-failure-policy", "--all"] + if not opts.track_naughties: + cmd.append("--offline") + cmd.append(testvm.DEFAULT_IMAGE) + try: + proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) + reason = proc.communicate(self.output + ("not ok " + str(self)).encode())[0].strip() + + if proc.returncode == 77: + self.returncode = proc.returncode + self._print_test(skip_reason="# SKIP {0}".format(reason.decode("utf-8"))) + return None, 0 + + if proc.returncode == 78: + self.returncode = proc.returncode + self._print_test(skip_reason="# NOTE {0}".format(reason.decode("utf-8"))) + return None, 1 + + if proc.returncode == 1: + retry_reason = reason.decode("utf-8") + + except OSError as ex: + if ex.errno != errno.ENOENT: + sys.stderr.write(f"\nCouldn't run test-failure-policy: {ex!s}\n") + + # HACK: many tests are unstable, always retry them 3 times unless affected + if not affected and not retry_reason and not opts.no_retry_fail: + retry_reason = "be robust against unstable tests" + + has_unexpected_message = testlib.UNEXPECTED_MESSAGE.encode() in self.output + has_pixel_test_message = testlib.PIXEL_TEST_MESSAGE.encode() in self.output + if self.retries < 2 and not (has_unexpected_message or has_pixel_test_message) and retry_reason: + self.retries += 1 + self._print_test(retry_reason=f"# RETRY {self.retries} ({retry_reason})") + return retry_reason, 0 + + self.output += b"\n" + self._print_test() + self.machine_id = None + return None, 1 + + # internal methods + + def __str__(self): + cost = "" if self.cost == 1 else f" ${self.cost}" + nd = f" [ND@{self.machine_id}]" if self.nondestructive else "" + return f"{self.test_id} {self.command[0]} {self.command[-1]}{cost}{nd}" + + def _print_test(self, print_tap=True, retry_reason="", skip_reason="", todo_reason=""): + def write_line(line): + while line: + try: + sys.stdout.buffer.write(line) + break + except BlockingIOError as e: + line = line[e.characters_written:] + time.sleep(0.1) + + # be quiet in TAP mode for successful tests + lines = self.output.strip().splitlines(keepends=True) + if print_tap and self.returncode == 0 and len(lines) > 0: + for line in lines[:-1]: + if line.startswith(b"WARNING:"): + write_line(line) + write_line(lines[-1]) + else: + for line in lines: + write_line(line) + + if retry_reason: + retry_reason = " " + retry_reason + if skip_reason: + skip_reason = " " + skip_reason + if todo_reason: + todo_reason = " " + todo_reason + + if not print_tap: + print(retry_reason + skip_reason + todo_reason) + flush_stdout() + return + + print() # Tap needs to start on a separate line + status = 'ok' if self.returncode in [0, 77] else 'not ok' + print(f"{status} {self}{retry_reason}{skip_reason}{todo_reason}") + flush_stdout() + + +class GlobalMachine: + def __init__(self, restrict=True, cpus=None, memory_mb=None, machine_class=testvm.VirtMachine): + self.image = testvm.DEFAULT_IMAGE + self.network = testvm.VirtNetwork(image=self.image) + self.networking = self.network.host(restrict=restrict) + # provide enough RAM for cryptsetup's PBKDF, as long as that is not configurable: + # https://bugzilla.redhat.com/show_bug.cgi?id=1881829 + self.machine = machine_class(verbose=True, networking=self.networking, image=self.image, cpus=cpus, + memory_mb=memory_mb or 1400) + self.machine_class = machine_class + if not os.path.exists(self.machine.image_file): + self.machine.pull(self.machine.image_file) + self.machine.start() + self.start_time = time.time() + self.duration = None + self.ssh_address = f"{self.machine.ssh_address}:{self.machine.ssh_port}" + self.web_address = f"{self.machine.web_address}:{self.machine.web_port}" + self.running_test = None + + def reset(self): + # It is important to re-use self.networking here, so that the + # machine keeps its browser and control port. + self.machine.kill() + self.machine = self.machine_class(verbose=True, networking=self.networking, image=self.image) + self.machine.start() + + def kill(self): + assert self.running_test is None, "can't kill global machine with running test" + self.machine.kill() + self.network.kill() + self.duration = round(time.time() - self.start_time) + self.machine = None + self.ssh_address = None + self.web_address = None + + def is_available(self): + return self.machine and self.running_test is None + + +def check_valid(filename): + name = os.path.basename(filename) + allowed = string.ascii_letters + string.digits + '-_' + if not all(c in allowed for c in name): + return None + return name.replace("-", "_") + + +def build_command(filename, test, opts): + cmd = [filename] + if opts.trace: + cmd.append("-t") + if opts.verbosity: + cmd.append("-v") + if not opts.fetch: + cmd.append("--nonet") + if opts.list: + cmd.append("-l") + if opts.coverage: + cmd.append("--coverage") + cmd.append(test) + return cmd + + +def get_affected_tests(test_dir, base_branch, test_files): + if not base_branch: + return [] + + changed_tests = [] + + # Detect affected tests from changed test files + diff_out = subprocess.check_output(["git", "diff", "--name-only", "origin/" + base_branch, test_dir]) + # Never consider 'test/verify/check-example' to be affected - our tests for tests count on that + # This file provides only examples, there is no place for it being flaky, no need to retry + changed_tests = [test.decode("utf-8") for test in diff_out.strip().splitlines() if not test.endswith(b"check-example")] + + # If more than 3 test files were changed don't consider any of them as affected + # as it might be a PR that changes more unrelated things. + if len(changed_tests) > 3: + # If 'test/verify/check-testlib' is affected, keep just that one - our tests for tests count on that + if "test/verify/check-testlib" in changed_tests: + changed_tests = ["test/verify/check-testlib"] + else: + changed_tests = [] + + # Detect affected tests from changed pkg/* subdirectories in cockpit + # If affected tests get detected from pkg/* changes, don't apply the + # "only do this for max. 3 check-* changes" (even if the PR also changes ≥ 3 check-*) + # (this does not apply to other projects) + diff_out = subprocess.check_output(["git", "diff", "--name-only", "origin/" + base_branch, "--", "pkg/"]) + + # Drop changes in css files - this does not affect tests thus no reason to retry + files = [f.decode("utf-8") for f in diff_out.strip().splitlines() if not f.endswith(b"css")] + + changed_pkgs = {"check-" + pkg.split('/')[1] for pkg in files} + changed_tests.extend([test for test in test_files if any(pkg in test for pkg in changed_pkgs)]) + + return changed_tests + + +def detect_tests(test_files, image, opts): + """Detect tests to be run + + Builds the list of tests we'll run in separate machines (destructive tests) + and the ones we can run on the same machine (nondestructive) + """ + + destructive_tests = [] + nondestructive_tests = [] + seen_classes = {} + machine_class = None + test_id = 1 + + for filename in test_files: + name = check_valid(filename) + if not name or not os.path.isfile(filename): + continue + loader = importlib.machinery.SourceFileLoader(name, filename) + module = importlib.util.module_from_spec(importlib.util.spec_from_loader(loader.name, loader)) + loader.exec_module(module) + for test_suite in unittest.TestLoader().loadTestsFromModule(module): + for test in test_suite: + if hasattr(test, "machine_class") and test.machine_class is not None: + if machine_class is not None and machine_class != test.machine_class: + raise ValueError(f"only one unique machine_class can be used per project, provided with {machine_class} and {test.machine_class}") + + machine_class = test.machine_class + + # ensure that test classes are unique, so that they can be selected properly + cls = test.__class__.__name__ + if seen_classes.get(cls) not in [None, filename]: + raise ValueError("test class %s in %s already defined in %s" % (cls, filename, seen_classes[cls])) + seen_classes[cls] = filename + + test_method = getattr(test.__class__, test._testMethodName) + test_str = f"{cls}.{test._testMethodName}" + # most tests should take much less than 10mins, so default to that; + # longer tests can be annotated with @timeout(seconds) + # check the test function first, fall back to the class'es timeout + if opts.tests and not any(t in test_str for t in opts.tests): + continue + if test_str in opts.exclude: + continue + test_timeout = testlib.get_decorator(test_method, test, "timeout", 600) + nd = testlib.get_decorator(test_method, test, "nondestructive") + rwa = not testlib.get_decorator(test_method, test, "no_retry_when_changed") + todo = testlib.get_decorator(test_method, test, "todo") + if getattr(test.__class__, "provision", None): + # each additionally provisioned VM costs destructive test capacity + cost = len(test.__class__.provision) + else: + cost = 1 + test = Test(test_id, build_command(filename, test_str, opts), test_timeout, nd, rwa, todo, cost=cost) + if nd: + nondestructive_tests.append(test) + else: + if not opts.nondestructive: + destructive_tests.append(test) + test_id += 1 + + # sort non destructive tests by class/test name, to avoid spurious errors where failures depend on the order of + # execution but let's make sure we always test them both ways around; hash the image name, which is + # robust, reproducible, and provides an even distribution of both directions + nondestructive_tests.sort(key=lambda t: t.command[-1], reverse=bool(binascii.crc32(image.encode()) & 1)) + + return (nondestructive_tests, destructive_tests, machine_class) + + +def list_tests(opts): + test_files = glob.glob(os.path.join(opts.test_dir, opts.test_glob)) + nondestructive_tests, destructive_tests, _ = detect_tests(test_files, "dummy", opts) + names = {t.command[-1] for t in nondestructive_tests + destructive_tests} + for n in sorted(names): + print(n) + + +def run(opts, image): + fail_count = 0 + start_time = time.time() + + if opts.coverage: + prepare_for_code_coverage() + + test_files = glob.glob(os.path.join(opts.test_dir, opts.test_glob)) + changed_tests = get_affected_tests(opts.test_dir, opts.base, test_files) + nondestructive_tests, destructive_tests, machine_class = detect_tests(test_files, image, opts) + nondestructive_tests_len = len(nondestructive_tests) + destructive_tests_len = len(destructive_tests) + + if opts.machine: + assert not destructive_tests + + print(f"1..{nondestructive_tests_len + destructive_tests_len}") + flush_stdout() + + running_tests = [] + global_machines = [] + + if not opts.machine: + # Create appropriate number of nondestructive machines; prioritize the nondestructive tests, to get + # them out of the way as fast as possible, then let the destructive ones start as soon as + # a given nondestructive runner is done. + num_global = min(nondestructive_tests_len, opts.jobs) + + for _ in range(num_global): + global_machines.append(GlobalMachine(restrict=not opts.enable_network, cpus=opts.nondestructive_cpus, + memory_mb=opts.nondestructive_memory_mb, + machine_class=machine_class or testvm.VirtMachine)) + + # test scheduling loop + while True: + made_progress = False + + # mop up finished tests + logging.debug("test loop: %d running tests", len(running_tests)) + for test in running_tests.copy(): + poll_result = test.poll() + if poll_result is not None: + made_progress = True + running_tests.remove(test) + test_machine = test.machine_id # test_finish() resets it + retry_reason, test_result = test.finish(changed_tests, opts) + fail_count += test_result + logging.debug("test %s finished; result %s retry reason %s", test, test_result, retry_reason) + + if test_machine is not None and not opts.machine: + # unassign from global machine + global_machines[test_machine].running_test = None + + # sometimes our global machine gets messed up; also, tests that time out don't run cleanup handlers + # restart it to avoid an unbounded number of test retries and follow-up errors + if not opts.machine and (poll_result == 124 or (retry_reason and "test harness" in retry_reason)): + # try hard to keep the test output consistent + sys.stderr.write("\nRestarting global machine %s\n" % test_machine) + sys.stderr.flush() + global_machines[test_machine].reset() + + # run again if needed + if retry_reason: + if test.nondestructive: + nondestructive_tests.insert(0, test) + else: + destructive_tests.insert(0, test) + + if opts.machine: + if not running_tests and nondestructive_tests: + test = nondestructive_tests.pop(0) + logging.debug("Static machine is free, assigning next test %s", test) + test.assign_machine(-1, opts.machine, opts.browser) + test.start() + running_tests.append(test) + made_progress = True + else: + # find free global machines, and either assign a new non destructive test, or kill them to free resources + for (idx, machine) in enumerate(global_machines): + if machine.is_available(): + if nondestructive_tests: + test = nondestructive_tests.pop(0) + logging.debug("Global machine %s is free, assigning next test %s", idx, test) + machine.running_test = test + test.assign_machine(idx, machine.ssh_address, machine.web_address) + test.start() + running_tests.append(test) + else: + logging.debug("Global machine %s is free, and no more non destructive tests; killing", idx) + machine.kill() + + made_progress = True + + def running_cost(): + return sum(test.cost for test in running_tests) + + # fill the remaining available job slots with destructive tests; run tests with a cost higher than #jobs by themselves + while destructive_tests and (running_cost() + destructive_tests[0].cost <= opts.jobs or len(running_tests) == 0): + test = destructive_tests.pop(0) + logging.debug("%d running tests with total cost %d, starting next destructive test %s", + len(running_tests), running_cost(), test) + test.start() + running_tests.append(test) + made_progress = True + + # are we done? + if not running_tests: + assert not nondestructive_tests, f"nondestructive_tests should be empty: {[str(t) for t in nondestructive_tests]}" + assert not destructive_tests, f"destructive_tests should be empty: {[str(t) for t in destructive_tests]}" + break + + # Sleep if we didn't make progress + if not made_progress: + time.sleep(0.5) + + # Create coverage report + if opts.coverage: + create_coverage_report() + + # print summary + duration = int(time.time() - start_time) + hostname = socket.gethostname().split(".")[0] + + nondestructive_details = [] + if not opts.machine: + for (idx, machine) in enumerate(global_machines): + nondestructive_details.append(f"{idx}: {machine.duration}s") + + details = f"[{duration}s on {hostname}, {destructive_tests_len} destructive tests, {nondestructive_tests_len} nondestructive tests: {', '.join(nondestructive_details)}]" + print() + if fail_count > 0: + print(f"# {fail_count} TESTS FAILED {details}") + else: + print(f"# TESTS PASSED {details}") + flush_stdout() + + return fail_count + + +def main(): + parser = testlib.arg_parser(enable_sit=False) + parser.add_argument('-j', '--jobs', type=int, + default=int(os.environ.get("TEST_JOBS", 1)), help="Number of concurrent jobs") + parser.add_argument('--thorough', action='store_true', + help='Thorough mode, no skipping known issues') + parser.add_argument('-n', '--nondestructive', action='store_true', + help='Only consider @nondestructive tests') + parser.add_argument('--machine', metavar="hostname[:port]", + default=None, help="Run tests against an already running machine; implies --nondestructive") + parser.add_argument('--browser', metavar="hostname[:port]", + default=None, help="When using --machine, use this cockpit web address") + parser.add_argument('--test-dir', default=os.environ.get("TEST_DIR", testvm.TEST_DIR), + help="Directory in which to glob for test files; default: %(default)s") + parser.add_argument('--test-glob', default="check-*", + help="Pattern with which to glob in the test directory; default: %(default)s") + parser.add_argument('--exclude', action="append", default=[], metavar="TestClass.testName", + help="Exclude test (exact match only); can be specified multiple times") + parser.add_argument('--nondestructive-cpus', type=int, default=None, + help="Number of CPUs for nondestructive test global machines") + parser.add_argument('--nondestructive-memory-mb', type=int, default=None, + help="RAM size for nondestructive test global machines") + parser.add_argument('--base', default=os.environ.get("BASE_BRANCH"), + help="Retry affected tests compared to given base branch; default: %(default)s") + parser.add_argument('--track-naughties', action='store_true', + help='Update the occurrence of naughties on cockpit-project/bots') + parser.add_argument('--no-retry-fail', action='store_true', + help="Don't retry failed tests") + opts = parser.parse_args() + + if opts.machine: + if opts.jobs > 1: + parser.error("--machine cannot be used with concurrent jobs") + if not opts.browser: + parser.error("--browser must be specified together with --machine") + opts.nondestructive = True + + # Tell any subprocesses what we are testing + if "TEST_REVISION" not in os.environ: + r = subprocess.run(["git", "rev-parse", "HEAD"], + universal_newlines=True, check=False, stdout=subprocess.PIPE) + if r.returncode == 0: + os.environ["TEST_REVISION"] = r.stdout.strip() + + os.environ["TEST_BROWSER"] = os.environ.get("TEST_BROWSER", "chromium") + + image = testvm.DEFAULT_IMAGE + testvm.DEFAULT_IMAGE = image + os.environ["TEST_OS"] = image + + # Make sure tests can make relative imports + sys.path.append(os.path.realpath(opts.test_dir)) + + if opts.list: + list_tests(opts) + return 0 + + return run(opts, image) + + +if __name__ == '__main__': + # logging.basicConfig(level=logging.DEBUG) + sys.exit(main()) diff --git a/test/common/storagelib.py b/test/common/storagelib.py new file mode 100644 index 0000000..efc738b --- /dev/null +++ b/test/common/storagelib.py @@ -0,0 +1,669 @@ +# This file is part of Cockpit. +# +# Copyright (C) 2015 Red Hat, Inc. +# +# Cockpit is free software; you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. +# +# Cockpit is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with Cockpit; If not, see . + +import json +import os.path +import re +import textwrap + +from testlib import Error, MachineCase, wait + + +def from_udisks_ascii(codepoints): + return ''.join(map(chr, codepoints[:-1])) + + +class StorageHelpers: + """Mix-in class for using in tests that derive from something else than MachineCase or StorageCase""" + + def inode(self, f): + return self.machine.execute("stat -L '%s' -c %%i" % f) + + def retry(self, setup, check, teardown): + def step(): + if setup: + setup() + if check(): + return True + if teardown: + teardown() + return False + + self.browser.wait(step) + + def add_ram_disk(self, size=50): + """Add per-test RAM disk + + The disk gets removed automatically when the test ends. This is safe for @nondestructive tests. + + Return the device name. + """ + # sanity test: should not yet be loaded + self.machine.execute("test ! -e /sys/module/scsi_debug") + self.machine.execute(f"modprobe scsi_debug dev_size_mb={size}") + dev = self.machine.execute('while true; do O=$(ls /sys/bus/pseudo/drivers/scsi_debug/adapter*/host*/target*/*:*/block 2>/dev/null || true); ' + '[ -n "$O" ] && break || sleep 0.1; done; echo "/dev/$O"').strip() + # don't use addCleanup() here, this is often busy and needs to be cleaned up late; done in MachineCase.nonDestructiveSetup() + + return dev + + def add_loopback_disk(self, size=50, name=None): + """Add per-test loopback disk + + The disk gets removed automatically when the test ends. This is safe for @nondestructive tests. + + Unlike add_ram_disk(), this can be called multiple times, and + is less size constrained. The backing file starts out sparse, + so this can be used to create massive block devices, as long + as you are careful to not actually use much of it. + + However, loopback devices look quite special to the OS, so + they are not a very good simulation of a "real" disk. + + Return the device name. + + """ + # HACK: https://bugzilla.redhat.com/show_bug.cgi?id=1969408 + # It would be nicer to remove $F immediately after the call to + # losetup, but that will break some versions of lvm2. + backf = self.machine.execute("mktemp /var/tmp/loop.XXXX").strip() + dev = self.machine.execute(f"truncate --size={size}MB {backf}; " + f"losetup -P --show {name if name else '--find'} {backf}").strip() + # If this device had partions in its last incarnation on this + # machine, they might come back for unknown reasons, in a + # non-functional state. Running partprobe will get rid of + # them. + self.machine.execute("partprobe '%s'" % dev) + # right after unmounting the device is often still busy, so retry a few times + self.addCleanup(self.machine.execute, f"until losetup -d {dev}; do sleep 1; done; rm {backf}", timeout=10) + self.addCleanup(self.machine.execute, f"findmnt -n -o TARGET {dev} | xargs --no-run-if-empty umount;") + + return dev + + def add_targetd_loopback_disk(self, index, size=50): + """Add per-test loopback device that can be forcefully removed. + """ + + m = self.machine + model = f"disk{index}" + wwn = f"naa.5000{index:012x}" + + m.execute(f"rm -f /var/tmp/targetd.{model}") + m.execute(f"targetcli /backstores/fileio create name={model} size={size}M file_or_dev=/var/tmp/targetd.{model}") + m.execute(f"targetcli /loopback create {wwn}") + m.execute(f"targetcli /loopback/{wwn}/luns create /backstores/fileio/{model}") + + self.addCleanup(m.execute, f"targetcli /loopback delete {wwn}") + self.addCleanup(m.execute, f"targetcli /backstores/fileio delete {model}") + self.addCleanup(m.execute, f"rm -f /var/tmp/targetd.{model}") + + dev = m.execute(f'for dev in /sys/block/*; do if [ -f $dev/device/model ] && [ "$(cat $dev/device/model | tr -d [:space:])" == "{model}" ]; then echo /dev/$(basename $dev); fi; done').strip() + if dev == "": + raise Error("Device not found") + return dev + + def force_remove_disk(self, device): + """Act like the given device gets physically removed. + + This circumvents all the normal EBUSY failures, and thus can be used for testing + the cleanup after a forceful removal. + """ + self.machine.execute(f'echo 1 > /sys/block/{os.path.basename(device)}/device/delete') + # the removal trips up PCP and our usage graphs + self.allow_browser_errors("direct: instance name lookup failed.*") + + def addCleanupVG(self, vgname): + """Ensure the given VG is removed after the test""" + + self.addCleanup(self.machine.execute, f"if [ -d /dev/{vgname} ]; then vgremove --force {vgname}; fi") + + # Dialogs + + def dialog_wait_open(self): + self.browser.wait_visible('#dialog') + + def dialog_wait_alert(self, text): + self.browser.wait_in_text('#dialog .pf-v5-c-alert__title', text) + + def dialog_wait_title(self, text): + self.browser.wait_in_text('#dialog .pf-v5-c-modal-box__title', text) + + def dialog_field(self, field): + return f'#dialog [data-field="{field}"]' + + def dialog_val(self, field): + sel = self.dialog_field(field) + ftype = self.browser.attr(sel, "data-field-type") + if ftype == "text-input-checked": + if self.browser.is_present(sel + " input[type=checkbox]:not(:checked)"): + return False + else: + return self.browser.val(sel + " input[type=text]") + elif ftype == "select": + return self.browser.attr(sel, "data-value") + else: + return self.browser.val(sel) + + def dialog_set_val(self, field, val): + sel = self.dialog_field(field) + ftype = self.browser.attr(sel, "data-field-type") + if ftype == "checkbox": + self.browser.set_checked(sel, val) + elif ftype == "select-spaces": + for label in val: + self.browser.set_checked(f'{sel} :contains("{label}") input', val) + elif ftype == "size-slider": + self.browser.set_val(sel + " .size-unit select", "1000000") + self.browser.set_input_text(sel + " .size-text input", str(val)) + elif ftype == "select": + self.browser._wait_present(sel + f" select option[value='{val}']:not([disabled])") + self.browser.set_val(sel + " select", val) + elif ftype == "select-radio": + self.browser.click(sel + f" input[data-data='{val}']") + elif ftype == "text-input": + self.browser.set_input_text(sel, val) + elif ftype == "text-input-checked": + if not val: + self.browser.set_checked(sel + " input[type=checkbox]", val=False) + else: + self.browser.set_checked(sel + " input[type=checkbox]", val=True) + self.browser.set_input_text(sel + " [type=text]", val) + elif ftype == "combobox": + self.browser.click(sel + " button.pf-v5-c-select__toggle-button") + self.browser.click(sel + f" .pf-v5-c-select__menu li:contains('{val}') button") + else: + self.browser.set_val(sel, val) + + def dialog_combobox_choices(self, field): + return self.browser.call_js_func("""(function (sel) { + var lis = ph_find(sel).querySelectorAll('li'); + var result = []; + for (i = 0; i < lis.length; ++i) + result.push(lis[i].textContent); + return result; + })""", self.dialog_field(field)) + + def dialog_is_present(self, field, label): + return self.browser.is_present(f'{self.dialog_field(field)} :contains("{label}") input') + + def dialog_wait_val(self, field, val, unit=None): + if unit is None: + unit = "1000000" + + sel = self.dialog_field(field) + ftype = self.browser.attr(sel, "data-field-type") + if ftype == "size-slider": + self.browser.wait_val(sel + " .size-unit select", unit) + self.browser.wait_val(sel + " .size-text input", str(val)) + elif ftype == "select": + self.browser.wait_attr(sel, "data-value", val) + else: + self.browser.wait_val(sel, val) + + def dialog_wait_error(self, field, val): + # XXX - allow for more than one error + self.browser.wait_in_text('#dialog .pf-v5-c-form__helper-text .pf-m-error', val) + + def dialog_wait_not_present(self, field): + self.browser.wait_not_present(self.dialog_field(field)) + + def dialog_wait_apply_enabled(self): + self.browser.wait_attr('#dialog button.apply:nth-of-type(1)', "disabled", None) + + def dialog_wait_apply_disabled(self): + self.browser.wait_visible('#dialog button.apply:nth-of-type(1)[disabled]') + + def dialog_apply(self): + self.browser.click('#dialog button.apply:nth-of-type(1)') + + def dialog_apply_secondary(self): + self.browser.click('#dialog button.apply:nth-of-type(2)') + + def dialog_cancel(self): + self.browser.click('#dialog button.cancel') + + def dialog_wait_close(self): + # file system operations often take longer than 10s + with self.browser.wait_timeout(max(self.browser.cdp.timeout, 60)): + self.browser.wait_not_present('#dialog') + + def dialog_check(self, expect): + for f in expect: + if not self.dialog_val(f) == expect[f]: + return False + return True + + def dialog_set_vals(self, values): + # Sometimes a certain field needs to be set before other + # fields come into existence and thus the order matters that + # we set the fields in. The tests however just give us a + # unordered 'dict'. Instead of changing the tests, we figure + # out the right order dynamically here by just setting what we + # can and then starting over. As long as we make progress in + # each iteration, everything is good. + failed = {} + last_error = Exception + for f in values: + try: + self.dialog_set_val(f, values[f]) + except Error as e: + failed[f] = values[f] + last_error = e + if failed: + if len(failed) < len(values): + self.dialog_set_vals(failed) + else: + raise last_error + + def dialog(self, values, expect=None, secondary=False): + if expect is None: + expect = {} + self.dialog_wait_open() + for f in expect: + self.dialog_wait_val(f, expect[f]) + self.dialog_set_vals(values) + if secondary: + self.dialog_apply_secondary() + else: + self.dialog_apply() + self.dialog_wait_close() + + def confirm(self): + self.dialog({}) + + # There is some asynchronous activity in the storage stack. (It + # used to be much worse, but it has improved over the years, yay!) + # + # The tests deal with that by waiting for the right conditions, + # which sometimes means opening a dialog a couple of times until + # it has the right contents, or applying it a couple of times + # until it works. + + def dialog_open_with_retry(self, trigger, expect): + def setup(): + trigger() + self.dialog_wait_open() + + def check(): + if callable(expect): + return expect() + else: + return self.dialog_check(expect) + + def teardown(): + self.dialog_cancel() + self.dialog_wait_close() + self.retry(setup, check, teardown) + + def dialog_apply_with_retry(self, expected_errors=None): + def step(): + try: + self.dialog_apply() + self.dialog_wait_close() + except Error: + if expected_errors is None: + return False + err = self.browser.text('#dialog') + print(err) + for exp in expected_errors: + if exp in err: + return False + raise + return True + self.browser.wait(step) + + def dialog_with_retry(self, trigger, values, expect): + self.dialog_open_with_retry(trigger, expect) + if values: + for f in values: + self.dialog_set_val(f, values[f]) + self.dialog_apply() + else: + self.dialog_cancel() + self.dialog_wait_close() + + def dialog_with_error_retry(self, trigger, errors, values=None, first_setup=None, retry_setup=None, setup=None): + def doit(): + nonlocal first_setup + trigger() + self.dialog_wait_open() + if values: + self.dialog_set_vals(values) + if first_setup: + first_setup() + first_setup = None + elif retry_setup: + retry_setup() + elif setup: + setup() + self.dialog_apply() + try: + self.dialog_wait_close() + return True + except Exception: + dialog_text = self.browser.text('#dialog .pf-v5-c-alert__title') + for err in errors: + if err in dialog_text: + print("WARNING: retrying dialog") + self.dialog_cancel() + self.dialog_wait_close() + return False + raise + self.browser.wait(doit) + + def udisks_objects(self): + return json.loads(self.machine.execute(["python3", "-c", textwrap.dedent(""" + import dbus, json + print(json.dumps(dbus.SystemBus().call_blocking( + "org.freedesktop.UDisks2", + "/org/freedesktop/UDisks2", + "org.freedesktop.DBus.ObjectManager", + "GetManagedObjects", "", [])))""")])) + + def configuration_field(self, dev, tab, field): + managerObjects = self.udisks_objects() + for path in managerObjects: + if "org.freedesktop.UDisks2.Block" in managerObjects[path]: + iface = managerObjects[path]["org.freedesktop.UDisks2.Block"] + if from_udisks_ascii(iface["Device"]) == dev or from_udisks_ascii(iface["PreferredDevice"]) == dev: + for entry in iface["Configuration"]: + if entry[0] == tab: + if field in entry[1]: + print(f"{path}/{tab}/{field} = {from_udisks_ascii(entry[1][field])}") + return from_udisks_ascii(entry[1][field]) + return "" + + def assert_in_configuration(self, dev, tab, field, text): + self.assertIn(text, self.configuration_field(dev, tab, field)) + + def assert_not_in_configuration(self, dev, tab, field, text): + self.assertNotIn(text, self.configuration_field(dev, tab, field)) + + def child_configuration_field(self, dev, tab, field): + udisks_objects = self.udisks_objects() + for path in udisks_objects: + if "org.freedesktop.UDisks2.Encrypted" in udisks_objects[path]: + block_iface = udisks_objects[path]["org.freedesktop.UDisks2.Block"] + crypto_iface = udisks_objects[path]["org.freedesktop.UDisks2.Encrypted"] + if from_udisks_ascii(block_iface["Device"]) == dev or from_udisks_ascii(block_iface["PreferredDevice"]) == dev: + for entry in crypto_iface["ChildConfiguration"]: + if entry[0] == tab: + if field in entry[1]: + print("%s/child/%s/%s = %s" % (path, tab, field, + from_udisks_ascii(entry[1][field]))) + return from_udisks_ascii(entry[1][field]) + return "" + + def assert_in_child_configuration(self, dev, tab, field, text): + self.assertIn(text, self.child_configuration_field(dev, tab, field)) + + def lvol_child_configuration_field(self, lvol, tab, field): + udisk_objects = self.udisks_objects() + for path in udisk_objects: + if "org.freedesktop.UDisks2.LogicalVolume" in udisk_objects[path]: + iface = udisk_objects[path]["org.freedesktop.UDisks2.LogicalVolume"] + if iface["Name"] == lvol: + for entry in iface["ChildConfiguration"]: + if entry[0] == tab: + if field in entry[1]: + print("%s/child/%s/%s = %s" % (path, tab, field, + from_udisks_ascii(entry[1][field]))) + return from_udisks_ascii(entry[1][field]) + return "" + + def assert_in_lvol_child_configuration(self, lvol, tab, field, text): + self.assertIn(text, self.lvol_child_configuration_field(lvol, tab, field)) + + def setup_systemd_password_agent(self, password): + # This sets up a systemd password agent that replies to all + # queries with the given password. + + self.write_file("/usr/local/bin/test-password-agent", + f"""#!/bin/sh +# Sleep a bit to avoid starting this agent too quickly over and over, +# and so that other agents get a chance as well. +sleep 30 + +for s in $(grep -h ^Socket= /run/systemd/ask-password/ask.* | sed 's/^Socket=//'); do + printf '%s' '{password}' | /usr/lib/systemd/systemd-reply-password 1 $s +done +""", perm="0755") + + self.write_file("/etc/systemd/system/test-password-agent.service", + """ +[Unit] +Description=Test Password Agent +DefaultDependencies=no +Conflicts=shutdown.target emergency.service +Before=shutdown.target +[Service] +ExecStart=/usr/local/bin/test-password-agent +""") + + self.write_file("/etc/systemd/system/test-password-agent.path", + """ +[Unit] +Description=Test Password Agent Directory Watch +DefaultDependencies=no +Conflicts=shutdown.target emergency.service +Before=paths.target shutdown.target cryptsetup.target +[Path] +DirectoryNotEmpty=/run/systemd/ask-password +MakeDirectory=yes +""") + self.machine.execute("ln -s ../test-password-agent.path /etc/systemd/system/sysinit.target.wants/") + + def encrypt_root(self, passphrase): + m = self.machine + + # Set up a password agent in the old root and then arrange for + # it to be included in the initrd. This will unlock the new + # encrypted root during boot. + # + # The password agent and its initrd configuration will be + # copied to the new root, so it will stay in place also when + # the initrd is regenerated again from within the new root. + + self.setup_systemd_password_agent(passphrase) + install_items = [ + '/etc/systemd/system/sysinit.target.wants/test-password-agent.path', + '/etc/systemd/system/test-password-agent.path', + '/etc/systemd/system/test-password-agent.service', + '/usr/local/bin/test-password-agent', + ] + m.write("/etc/dracut.conf.d/01-askpass.conf", + f'install_items+=" {" ".join(install_items)} "') + + # The first step is to move /boot to a new unencrypted + # partition on the new disk but keep it mounted at /boot. + # This helps when running grub2-install and grub2-mkconfig, + # which will look at /boot and do the right thing. + # + # Then we copy (most of) the old root to the new disk, into a + # logical volume sitting on top of a LUKS container. + # + # The kernel command line is changed to use the new root + # filesystem, and grub is installed on the new disk. The boot + # configuration of the VM has been changed to boot from the + # new disk. + # + # At that point the new root can be booted by the existing + # initrd, but the initrd will prompt for the passphrase (as + # expected). Thus, the initrd is regenerated to include the + # password agent from above. + # + # Before the reboot, we destroy the original disk to make + # really sure that it wont be used anymore. + + info = m.add_disk("6G", serial="NEWROOT", boot_disk=True) + dev = "/dev/" + info["dev"] + wait(lambda: m.execute(f"test -b {dev} && echo present").strip() == "present") + m.execute(f""" +set -x +parted -s {dev} mktable msdos +parted -s {dev} mkpart primary ext4 1M 500M +parted -s {dev} mkpart primary ext4 500M 100% +echo {passphrase} | cryptsetup luksFormat --pbkdf-memory=300 {dev}2 +luks_uuid=$(blkid -p {dev}2 -s UUID -o value) +echo {passphrase} | cryptsetup luksOpen --pbkdf-memory=300 {dev}2 luks-$luks_uuid +vgcreate root /dev/mapper/luks-$luks_uuid +lvcreate root -n root -l100%VG +mkfs.ext4 /dev/root/root +mkdir /new-root +mount /dev/root/root /new-root +mkfs.ext4 {dev}1 +# don't move the EFI partition +if mountpoint /boot/efi; then umount /boot/efi; fi +mkdir /new-root/boot +mount {dev}1 /new-root/boot +tar --selinux --one-file-system -cf - --exclude /boot --exclude='/var/tmp/*' --exclude='/var/cache/*' \ + --exclude='/var/lib/mock/*' --exclude='/var/lib/containers/*' --exclude='/new-root/*' \ + / | tar --selinux -C /new-root -xf - +tar --one-file-system -C /boot -cf - . | tar -C /new-root/boot -xf - +umount /new-root/boot +mount {dev}1 /boot +echo "(hd0) {dev}" >/boot/grub2/device.map +sed -i -e 's,/boot/,/,' /boot/loader/entries/* +uuid=$(blkid -p /dev/root/root -s UUID -o value) +buuid=$(blkid -p {dev}1 -s UUID -o value) +echo "UUID=$uuid / auto defaults 0 0" >/new-root/etc/fstab +echo "UUID=$buuid /boot auto defaults 0 0" >>/new-root/etc/fstab +dracut --regenerate-all --force +grub2-install {dev} +( # HACK - grub2-mkconfig messes with /boot/loader/entries/ and /etc/kernel/cmdline + mv /boot/loader/entries /boot/loader/entries.stowed + ! test -f /etc/kernel/cmdline || mv /etc/kernel/cmdline /etc/kernel/cmdline.stowed + grub2-mkconfig -o /boot/grub2/grub.cfg + mv /boot/loader/entries.stowed /boot/loader/entries + ! test -f /etc/kernel/cmdline.stowed || mv /etc/kernel/cmdline.stowed /etc/kernel/cmdline +) +grubby --update-kernel=ALL --args="root=UUID=$uuid rootflags=defaults rd.luks.uuid=$luks_uuid rd.lvm.lv=root/root" +! test -f /etc/kernel/cmdline || cp /etc/kernel/cmdline /new-root/etc/kernel/cmdline +""", timeout=300) + m.spawn("dd if=/dev/zero of=/dev/vda bs=1M count=100; reboot", "reboot", check=False) + m.wait_reboot(300) + self.assertEqual(m.execute("findmnt -n -o SOURCE /").strip(), "/dev/mapper/root-root") + + # Cards and tables + + def card(self, title): + return f"[data-test-card-title='{title}']" + + def card_parent_link(self): + return ".pf-v5-c-breadcrumb__item:nth-last-child(2) > a" + + def card_header(self, title): + return self.card(title) + " .pf-v5-c-card__header" + + def card_row(self, title, index=None, name=None, location=None): + if index is not None: + return self.card(title) + f" tbody tr:nth-child({index})" + elif name is not None: + name = name.replace("/dev/", "") + return self.card(title) + f" tbody [data-test-row-name='{name}']" + else: + return self.card(title) + f" tbody [data-test-row-location='{location}']" + + def click_card_row(self, title, index=None, name=None, location=None): + # We need to click on a element since that's where the handlers are... + self.browser.click(self.card_row(title, index, name, location) + " td:nth-child(1)") + + def card_row_col(self, title, row_index=None, col_index=None, row_name=None, row_location=None): + return self.card_row(title, row_index, row_name, row_location) + f" td:nth-child({col_index})" + + def card_desc(self, card_title, desc_title): + return self.card(card_title) + f" [data-test-desc-title='{desc_title}'] [data-test-value=true]" + + def card_desc_action(self, card_title, desc_title): + return self.card(card_title) + f" [data-test-desc-title='{desc_title}'] [data-test-action=true] button" + + def card_button(self, card_title, button_title): + return self.card(card_title) + f" button:contains('{button_title}')" + + def dropdown_toggle(self, parent): + return parent + " .pf-v5-c-menu-toggle" + + def dropdown_action(self, parent, title): + return parent + f" .pf-v5-c-menu button:contains('{title}')" + + def dropdown_description(self, parent, title): + return parent + f" .pf-v5-c-menu button:contains('{title}') .pf-v5-c-menu__item-description" + + def click_dropdown(self, parent, title): + self.browser.click(self.dropdown_toggle(parent)) + self.browser.click(self.dropdown_action(parent, title)) + + def click_card_dropdown(self, card_title, button_title): + self.click_dropdown(self.card_header(card_title), button_title) + + def click_devices_dropdown(self, title): + self.click_card_dropdown("Storage", title) + + def check_dropdown_action_disabled(self, parent, title, expected_text): + self.browser.click(self.dropdown_toggle(parent)) + self.browser.wait_visible(self.dropdown_action(parent, title) + "[disabled]") + self.browser.wait_text(self.dropdown_description(parent, title), expected_text) + self.browser.click(self.dropdown_toggle(parent)) + + def wait_mounted(self, card_title): + with self.browser.wait_timeout(30): + self.browser.wait_not_in_text(self.card_desc(card_title, "Mount point"), + "The filesystem is not mounted.") + + def wait_not_mounted(self, card_title): + with self.browser.wait_timeout(30): + self.browser.wait_in_text(self.card_desc(card_title, "Mount point"), + "The filesystem is not mounted.") + + def wait_card_button_disabled(self, card_title, button_title): + with self.browser.wait_timeout(30): + self.browser.wait_visible(self.card_button(card_title, button_title) + ":disabled") + + +class StorageCase(MachineCase, StorageHelpers): + + def setUp(self): + + if self.image in ["fedora-coreos", "rhel4edge"]: + self.skipTest("No udisks/cockpit-storaged on OSTree images") + + super().setUp() + + ver = self.machine.execute("busctl --system get-property org.freedesktop.UDisks2 /org/freedesktop/UDisks2/Manager org.freedesktop.UDisks2.Manager Version || true") + m = re.match('s "(.*)"', ver) + if m: + self.storaged_version = list(map(int, m.group(1).split("."))) + else: + self.storaged_version = [0] + + crypto_types = self.machine.execute("busctl --system get-property org.freedesktop.UDisks2 /org/freedesktop/UDisks2/Manager org.freedesktop.UDisks2.Manager SupportedEncryptionTypes || true") + if "luks2" in crypto_types: + self.default_crypto_type = "luks2" + else: + self.default_crypto_type = "luks1" + + if self.image.startswith("rhel-8") or self.image.startswith("centos-8"): + # HACK: missing /etc/crypttab file upsets udisks: https://github.com/storaged-project/udisks/pull/835 + self.machine.write("/etc/crypttab", "") + + # starting out with empty PCP logs and pmlogger not running causes these metrics channel messages + self.allow_journal_messages("pcp-archive: no such metric: disk.*") + + # UDisks2 invalidates the Size property and cockpit-bridge + # gets it immediately. But sometimes the interface is already + # gone. + self.allow_journal_messages("org.freedesktop.UDisks2: couldn't get property org.freedesktop.UDisks2.Filesystem Size .* No such interface.*") diff --git a/test/common/tap-cdp b/test/common/tap-cdp new file mode 100755 index 0000000..a70d530 --- /dev/null +++ b/test/common/tap-cdp @@ -0,0 +1,119 @@ +#!/usr/bin/python3 + +# +# Copyright (C) 2017 Red Hat, Inc. +# +# Cockpit is free software; you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. +# +# Cockpit is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with Cockpit; If not, see . +# + +import argparse +import os +import re +import subprocess +import sys + +import cdp + +tap_line_re = re.compile(r'^(ok [0-9]+|not ok [0-9]+|bail out!|[0-9]+\.\.[0-9]+|# )', re.IGNORECASE) + +parser = argparse.ArgumentParser(description="A CDP driver for QUnit which outputs TAP") +parser.add_argument("server", help="path to the test-server and the test page to run", nargs=argparse.REMAINDER) + +# Strip prefix from url +# We need this to compensate for automake test generation behavior: +# The tests are called with the path (relative to the build directory) of the testfile, +# but from the build directory. Some tests make assumptions regarding the structure of the +# filename. In order to make sure that they receive the same name, regardless of actual +# build directory location, we need to strip that prefix (path from build to source directory) +# from the filename +parser.add_argument("--strip", dest="strip", help="strip prefix from test file paths") + +opts = parser.parse_args() + +# argparse sometimes forgets to remove this on argparse.REMAINDER args +if opts.server[0] == '--': + opts.server = opts.server[1:] + +# The test file is the last argument, but 'server' might contain arbitrary +# amount of options. We cannot express this with argparse, so take it apart +# manually. +opts.test = opts.server[-1] +opts.server = opts.server[:-1] + +if opts.strip and opts.test.startswith(opts.strip): + opts.test = opts.test[len(opts.strip):] + +cdp = cdp.CDP("C.utf8") + +try: + cdp.browser.path(cdp.show_browser) +except SystemError: + print('1..0 # skip web browser not found') + sys.exit(0) + +# pass the address through a separate fd, so that we can see g_debug() messages (which go to stdout) +(addr_r, addr_w) = os.pipe() +env = os.environ.copy() +env["TEST_SERVER_ADDRESS_FD"] = str(addr_w) + +server = subprocess.Popen(opts.server, + stdin=subprocess.DEVNULL, + pass_fds=(addr_w,), + close_fds=True, + env=env) +os.close(addr_w) +address = os.read(addr_r, 1000).decode() +os.close(addr_r) + +cdp.invoke("Page.navigate", url=address + '/' + opts.test) + +success = True +ignore_resource_errors = False + +for t, message in cdp.read_log(): + + # fail on browser level errors + if t == 'cdp': + if message['level'] == "error": + if ignore_resource_errors and "Failed to load resource" in message["text"]: + continue + success = False + break + else: + continue + + if message == 'cockpittest-tap-done': + break + elif message == 'cockpittest-tap-error': + success = False + break + elif message == 'cockpittest-tap-expect-resource-error': + ignore_resource_errors = True + continue + + # TAP lines go to stdout, everything else to stderr + if tap_line_re.match(message): + if message.startswith('not ok'): + success = False + print(message) + else: + print(message, file=sys.stderr) + + +server.terminate() +server.wait() +cdp.kill() + +if not success: + sys.exit(1) diff --git a/test/common/test-functions.js b/test/common/test-functions.js new file mode 100644 index 0000000..c2c28bd --- /dev/null +++ b/test/common/test-functions.js @@ -0,0 +1,360 @@ +/* eslint no-unused-vars: 0 */ + +/* + * These are routines used by our testing code. + * + * jQuery is not necessarily present. Don't rely on it + * for routine operations. + */ + +function ph_select(sel) { + if (!window.Sizzle) { + return Array.from(document.querySelectorAll(sel)); + } + + if (sel.includes(":contains(")) { + if (!window.Sizzle) { + throw new Error("Using ':contains' when window.Sizzle is not available."); + } + return window.Sizzle(sel); + } else { + return Array.from(document.querySelectorAll(sel)); + } +} + +function ph_only(els, sel) { + if (els.length === 0) + throw new Error(sel + " not found"); + if (els.length > 1) + throw new Error(sel + " is ambiguous"); + return els[0]; +} + +function ph_find (sel) { + const els = ph_select(sel); + return ph_only(els, sel); +} + +function ph_count(sel) { + const els = ph_select(sel); + return els.length; +} + +function ph_count_check(sel, expected_num) { + return (ph_count(sel) == expected_num); +} + +function ph_val (sel) { + const el = ph_find(sel); + if (el.value === undefined) + throw new Error(sel + " does not have a value"); + return el.value; +} + +function ph_set_val (sel, val) { + const el = ph_find(sel); + if (el.value === undefined) + throw new Error(sel + " does not have a value"); + el.value = val; + const ev = new Event("change", { bubbles: true, cancelable: false }); + el.dispatchEvent(ev); +} + +function ph_has_val (sel, val) { + return ph_val(sel) == val; +} + +function ph_collected_text_is (sel, val) { + const els = ph_select(sel); + const rest = els.map(el => { + if (el.textContent === undefined) + throw new Error(sel + " can not have text"); + return el.textContent.replaceAll("\xa0", " "); + }).join(""); + return rest === val; +} + +function ph_text (sel) { + const el = ph_find(sel); + if (el.textContent === undefined) + throw new Error(sel + " can not have text"); + // 0xa0 is a non-breakable space, which is a rendering detail of Chromium + // and awkward to handle in tests; turn it into normal spaces + return el.textContent.replaceAll("\xa0", " "); +} + +function ph_attr (sel, attr) { + return ph_find(sel).getAttribute(attr); +} + +function ph_set_attr (sel, attr, val) { + const el = ph_find(sel); + if (val === null || val === undefined) + el.removeAttribute(attr); + else + el.setAttribute(attr, val); + + const ev = new Event("change", { bubbles: true, cancelable: false }); + el.dispatchEvent(ev); +} + +function ph_has_attr (sel, attr, val) { + return ph_attr(sel, attr) == val; +} + +function ph_attr_contains (sel, attr, val) { + const a = ph_attr(sel, attr); + return a && a.indexOf(val) > -1; +} + +function ph_mouse(sel, type, x, y, btn, ctrlKey, shiftKey, altKey, metaKey) { + const el = ph_find(sel); + + /* The element has to be visible, and not collapsed */ + if (el.offsetWidth <= 0 && el.offsetHeight <= 0 && el.tagName != 'svg') + throw new Error(sel + " is not visible"); + + /* The event has to actually work */ + let processed = false; + function handler() { + processed = true; + } + + el.addEventListener(type, handler, true); + + let elp = el; + let left = elp.offsetLeft || 0; + let top = elp.offsetTop || 0; + while (elp.offsetParent) { + elp = elp.offsetParent; + left += elp.offsetLeft; + top += elp.offsetTop; + } + + let detail = 0; + if (["click", "mousedown", "mouseup"].indexOf(type) > -1) + detail = 1; + else if (type === "dblclick") + detail = 2; + + const ev = new MouseEvent(type, { + bubbles: true, + cancelable: true, + view: window, + detail, + screenX: left + x, + screenY: top + y, + clientX: left + x, + clientY: top + y, + button: btn, + ctrlKey: ctrlKey || false, + shiftKey: shiftKey || false, + altKey: altKey || false, + metaKey: metaKey || false + }); + + el.dispatchEvent(ev); + + el.removeEventListener(type, handler, true); + + /* It really had to work */ + if (!processed) + throw new Error(sel + " is disabled or somehow doesn't process events"); +} + +function ph_get_checked (sel) { + const el = ph_find(sel); + if (el.checked === undefined) + throw new Error(sel + " is not checkable"); + + return el.checked; +} + +function ph_set_checked (sel, val) { + const el = ph_find(sel); + if (el.checked === undefined) + throw new Error(sel + " is not checkable"); + + if (el.checked != val) + ph_mouse(sel, "click", 0, 0, 0); +} + +function ph_is_visible (sel) { + const el = ph_find(sel); + return el.tagName == "svg" || ((el.offsetWidth > 0 || el.offsetHeight > 0) && !(el.style.visibility == "hidden" || el.style.display == "none")); +} + +function ph_is_present(sel) { + const els = ph_select(sel); + return els.length > 0; +} + +function ph_in_text (sel, text) { + return ph_text(sel).indexOf(text) != -1; +} + +function ph_text_is (sel, text) { + return ph_text(sel) == text; +} + +function ph_text_matches (sel, pattern) { + return ph_text(sel).match(pattern); +} + +function ph_go(href) { + if (href.indexOf("#") === 0) { + window.location.hash = href; + } else { + if (window.name.indexOf("cockpit1") !== 0) + throw new Error("ph_go() called in non cockpit window"); + const control = { + command: "jump", + location: href + }; + window.parent.postMessage("\n" + JSON.stringify(control), "*"); + } +} + +function ph_focus(sel) { + ph_find(sel).focus(); +} + +function ph_scrollIntoViewIfNeeded(sel) { + ph_find(sel).scrollIntoViewIfNeeded(); +} + +function ph_blur(sel) { + ph_find(sel).blur(); +} + +function ph_blur_active() { + const elt = window.document.activeElement; + if (elt) + elt.blur(); +} + +class PhWaitCondTimeout extends Error { + constructor(description) { + if (description && description.apply) + description = description.apply(); + if (description) + super(description); + else + super("condition did not become true"); + } +} + +function ph_wait_cond(cond, timeout, error_description) { + return new Promise((resolve, reject) => { + // poll every 100 ms for now; FIXME: poll less often and re-check on mutations using + // https://developer.mozilla.org/en-US/docs/Web/API/MutationObserver + let stepTimer = null; + let last_err = null; + const tm = window.setTimeout(() => { + if (stepTimer) + window.clearTimeout(stepTimer); + reject(last_err || new PhWaitCondTimeout(error_description)); + }, timeout); + function step() { + try { + if (cond()) { + window.clearTimeout(tm); + resolve(); + return; + } + } catch (err) { + last_err = err; + } + stepTimer = window.setTimeout(step, 100); + } + step(); + }); +} + +function currentFrameAbsolutePosition() { + let currentWindow = window; + let currentParentWindow; + const positions = []; + let rect; + + while (currentWindow !== window.top) { + currentParentWindow = currentWindow.parent; + for (let idx = 0; idx < currentParentWindow.frames.length; idx++) + if (currentParentWindow.frames[idx] === currentWindow) { + for (const frameElement of currentParentWindow.document.getElementsByTagName('iframe')) { + if (frameElement.contentWindow === currentWindow) { + rect = frameElement.getBoundingClientRect(); + positions.push({ x: rect.x, y: rect.y }); + } + } + currentWindow = currentParentWindow; + break; + } + } + + return positions.reduce((accumulator, currentValue) => { + return { + x: accumulator.x + currentValue.x, + y: accumulator.y + currentValue.y + }; + }, { x: 0, y: 0 }); +} + +function flatten(array_of_arrays) { + if (array_of_arrays.length > 0) + return Array.prototype.concat.apply([], array_of_arrays); + else + return []; +} + +function ph_selector_clips(sels) { + const f = currentFrameAbsolutePosition(); + const elts = flatten(sels.map(ph_select)); + return elts.map(e => { + const r = e.getBoundingClientRect(); + return { x: r.x + f.x, y: r.y + f.y, width: r.width, height: r.height, scale: 1 }; + }); +} + +function ph_element_clip(sel) { + ph_find(sel); // just to make sure it is not ambiguous + return ph_selector_clips([sel])[0]; +} + +function ph_count_animations(sel) { + return ph_find(sel).getAnimations({ subtree: true }).length; +} + +function ph_set_texts(new_texts) { + for (const sel in new_texts) { + const elts = ph_select(sel); + if (elts.length == 0) + throw new Error(sel + " not found"); + for (let elt of elts) { + // We have to be careful to not replace any actual nodes + // in the DOM since that would cause React to fail later + // when it tries to remove some of its nodes that are no + // longer in the DOM. This means that setting the + // "textContent" property is out, for example. + // + // Instead, we insist on finding an actual "Text" node + // that we then modify. If the given selector results in + // elements that have other elements in them, we refuse to + // mock them. + // + // However, for convenience, this function digs into + // elements that have exactly one other child element. + while (elt.children.length == 1) + elt = elt.children[0]; + if (elt.children.length != 0) + throw new Error(sel + " can not be mocked since it contains more than text"); + let subst = new_texts[sel]; + for (const n of elt.childNodes) { + if (n.nodeType == 3) { // 3 == TEXT + n.data = subst; + subst = ""; + } + } + } + } +} diff --git a/test/common/testlib.py b/test/common/testlib.py new file mode 100644 index 0000000..cbb9bb7 --- /dev/null +++ b/test/common/testlib.py @@ -0,0 +1,2535 @@ +# This file is part of Cockpit. +# +# Copyright (C) 2013 Red Hat, Inc. +# +# Cockpit is free software; you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. +# +# Cockpit is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with Cockpit; If not, see . + +"""Tools for writing Cockpit test cases.""" + +import argparse +import base64 +import errno +import fnmatch +import functools +import glob +import io +import json +import os +import re +import shutil +import socket +import subprocess +import sys +import tempfile +import time +import traceback +import unittest +from time import sleep +from typing import Any, Callable, Dict, List, Optional, Union + +import cdp +import testvm +from lcov import write_lcov +from lib.constants import OSTREE_IMAGES + +try: + from PIL import Image +except ImportError: + Image = None + +BASE_DIR = os.path.realpath(f'{__file__}/../../..') +TEST_DIR = f'{BASE_DIR}/test' +BOTS_DIR = f'{BASE_DIR}/bots' + +os.environ["PATH"] = "{0}:{1}:{2}".format(os.environ.get("PATH"), BOTS_DIR, TEST_DIR) + +# Be careful when changing this string, check in cockpit-project/bots where it is being used +UNEXPECTED_MESSAGE = "FAIL: Test completed, but found unexpected " +PIXEL_TEST_MESSAGE = "Some pixel tests have failed" + +__all__ = ( + # Test definitions + 'test_main', + 'arg_parser', + 'Browser', + 'MachineCase', + 'nondestructive', + 'no_retry_when_changed', + 'onlyImage', + 'skipImage', + 'skipDistroPackage', + 'skipOstree', + 'skipBrowser', + 'todo', + 'todoPybridge', + 'todoPybridgeRHEL8', + 'timeout', + 'Error', + + 'sit', + 'wait', + 'opts', + 'TEST_DIR', + 'UNEXPECTED_MESSAGE', + 'PIXEL_TEST_MESSAGE' +) + +# Command line options +opts = argparse.Namespace() +opts.sit = False +opts.trace = False +opts.attachments = None +opts.revision = None +opts.address = None +opts.jobs = 1 +opts.fetch = True +opts.coverage = False + +# Browser layouts +# +# A browser can be switched into a number of different layouts, such +# as "desktop" and "mobile". A default set of layouts is defined +# here, but projects can override this with a file called +# "test/browser-layouts.json". +# +# Each layout defines the size of the shell (where the main navigation +# is) and also the size of the content iframe (where the actual page +# like "Networking" or "Overview" is displayed). +# +# When the browser layout is switched (by calling Browset.set_layout), +# this will either set the shell size or the content size, depending +# on which frame is current (as set by Browser.enter_page or +# Browser.leave_page). +# +# This makes sure that pixel tests for the whole content iframe are +# always the exact size as specified in the layout definition, and +# don't change size when the navigation stuff in the shell changes. +# +# The browser starts out in the first layout of this list, which is +# "desktop" by default. + +default_layouts = [ + { + "name": "desktop", + "theme": "light", + "shell_size": [1920, 1200], + "content_size": [1680, 1130] + }, + { + "name": "medium", + "theme": "light", + "is_mobile": False, + "shell_size": [1280, 768], + "content_size": [1040, 698] + }, + { + "name": "mobile", + "theme": "light", + "shell_size": [414, 1920], + "content_size": [414, 1856] + }, + { + "name": "dark", + "theme": "dark", + "shell_size": [1920, 1200], + "content_size": [1680, 1130] + }, + { + "name": "rtl", + "theme": "light", + "shell_size": [1920, 1200], + "content_size": [1680, 1130] + }, +] + + +def attach(filename: str, move: bool = False): + """Put a file into the attachments directory. + + :param filename: file to put in attachments directory + :param move: set this to true to move dynamically generated files which + are not touched by destructive tests. (default False) + """ + if not opts.attachments: + return + dest = os.path.join(opts.attachments, os.path.basename(filename)) + if os.path.exists(filename) and not os.path.exists(dest): + if move: + shutil.move(filename, dest) + else: + shutil.copy(filename, dest) + + +def unique_filename(base, ext): + for i in range(20): + if i == 0: + f = f"{base}.{ext}" + else: + f = f"{base}-{i}.{ext}" + if not os.path.exists(f): + return f + return f"{base}.{ext}" + + +class Browser: + def __init__(self, address, label, machine, pixels_label=None, coverage_label=None, port=None): + if ":" in address: + self.address, _, self.port = address.rpartition(":") + else: + self.address = address + self.port = 9090 + if port is not None: + self.port = port + self.default_user = "admin" + self.label = label + self.pixels_label = pixels_label + self.used_pixel_references = set() + self.coverage_label = coverage_label + self.machine = machine + path = os.path.dirname(__file__) + sizzle_js = os.path.join(path, "../../node_modules/sizzle/dist/sizzle.js") + helpers = [os.path.join(path, "test-functions.js")] + if os.path.exists(sizzle_js): + helpers.append(sizzle_js) + self.cdp = cdp.CDP("C.utf8", verbose=opts.trace, trace=opts.trace, + inject_helpers=helpers, + start_profile=coverage_label is not None) + self.password = "foobar" + self.timeout_factor = int(os.getenv("TEST_TIMEOUT_FACTOR", "1")) + self.failed_pixel_tests = 0 + self.allow_oops = False + self.body_clip = None + try: + with open(f'{TEST_DIR}/browser-layouts.json') as fp: + self.layouts = json.load(fp) + except FileNotFoundError: + self.layouts = default_layouts + # Firefox CDP does not support setting EmulatedMedia + # https://bugzilla.mozilla.org/show_bug.cgi?id=1549434 + if self.cdp.browser.name != "chromium": + self.layouts = [layout for layout in self.layouts if layout["theme"] != "dark"] + self.current_layout = None + + def allow_download(self) -> None: + """Allow browser downloads""" + if self.cdp.browser.name == "chromium": + self.cdp.invoke("Page.setDownloadBehavior", behavior="allow", downloadPath=self.cdp.download_dir) + + def open(self, href: str, cookie: Optional[Dict[str, str]] = None, tls: bool = False): + """Load a page into the browser. + + :param href: the path of the Cockpit page to load, such as "/users". Either PAGE or URL needs to be given. + :param cookie: a dictionary object representing a cookie. + :param tls: load the page using https (default False) + + Raises: + Error: When a timeout occurs waiting for the page to load. + """ + if href.startswith("/"): + schema = tls and "https" or "http" + href = "%s://%s:%s%s" % (schema, self.address, self.port, href) + + if not self.current_layout and os.environ.get("TEST_SHOW_BROWSER") in [None, "pixels"]: + self.current_layout = self.layouts[0] + size = self.current_layout["shell_size"] + self._set_window_size(size[0], size[1]) + if cookie: + self.cdp.invoke("Network.setCookie", **cookie) + + self.switch_to_top() + opts = {} + if self.cdp.browser.name == "firefox": + # by default, Firefox optimizes this away if the current and the given href URL + # are the same (Like in TestKeys.testAuthorizedKeys). + # Force a reload in this case, to make tests and the waitPageLoad below predictable + # But that option has the inverse effect with Chromium (argh) + opts["transitionType"] = "reload" + elif self.cdp.browser.name == 'chromium': + # Chromium also optimizes this away, but doesn't have a knob to force loading + # so load the blank page first + self.cdp.invoke("Page.navigate", url="about:blank") + self.cdp.invoke("waitPageLoad", timeout=5) + self.cdp.invoke("Page.navigate", url=href, **opts) + self.cdp.invoke("waitPageLoad", timeout=self.cdp.timeout) + + def set_user_agent(self, ua: str): + """Set the user agent of the browser + + :param ua: user agent string + :type ua: str + """ + self.cdp.invoke("Emulation.setUserAgentOverride", userAgent=ua) + + def reload(self, ignore_cache: bool = False): + """Reload the current page + + :param ignore_cache: if true browser cache is ignored (default False) + :type ignore_cache: bool + """ + + self.switch_to_top() + self.wait_js_cond("ph_select('iframe.container-frame').every(function (e) { return e.getAttribute('data-loaded'); })") + self.cdp.invoke("reloadPageAndWait", ignoreCache=ignore_cache) + + self.machine.allow_restart_journal_messages() + + def switch_to_frame(self, name: str): + """Switch to frame in browser tab + + Each page has a main frame and can have multiple subframes, usually + iframes. + + :param name: frame name + """ + self.cdp.set_frame(name) + + def switch_to_top(self): + """Switch to the main frame + + Switch to the main frame from for example an iframe. + """ + self.cdp.set_frame(None) + + def upload_file(self, selector: str, file: str): + r = self.cdp.invoke("Runtime.evaluate", expression='document.querySelector(%s)' % jsquote(selector)) + objectId = r["result"]["objectId"] + self.cdp.invoke("DOM.setFileInputFiles", files=[file], objectId=objectId) + + def raise_cdp_exception(self, func, arg, details, trailer=None): + # unwrap a typical error string + if details.get("exception", {}).get("type") == "string": + msg = details["exception"]["value"] + elif details.get("text", None): + msg = details.get("text", None) + else: + msg = str(details) + if trailer: + msg += "\n" + trailer + raise Error("%s(%s): %s" % (func, arg, msg)) + + def inject_js(self, code: str): + """Execute JS code that does not return anything + + :param code: a string containing JavaScript code + :type code: str + """ + self.cdp.invoke("Runtime.evaluate", expression=code, trace=code, + silent=False, awaitPromise=True, returnByValue=False, no_trace=True) + + def eval_js(self, code: str, no_trace: bool = False) -> Optional[Any]: + """Execute JS code that returns something + + :param code: a string containing JavaScript code + :param no_trace: do not print information about unknown return values (default False) + """ + result = self.cdp.invoke("Runtime.evaluate", expression=code, trace=code, + silent=False, awaitPromise=True, returnByValue=True, no_trace=no_trace) + if "exceptionDetails" in result: + self.raise_cdp_exception("eval_js", code, result["exceptionDetails"]) + _type = result.get("result", {}).get("type") + if _type == 'object' and result["result"].get("subtype", "") == "error": + raise Error(result["result"]["description"]) + if _type == "undefined": + return None + if _type and "value" in result["result"]: + return result["result"]["value"] + + if opts.trace: + print("eval_js(%s): cannot interpret return value %s" % (code, result)) + return None + + def call_js_func(self, func: str, *args: Any) -> Optional[Any]: + """Call a JavaScript function + + :param func: JavaScript function to call + :param args: arguments for the JavaScript function + """ + return self.eval_js("%s(%s)" % (func, ','.join(map(jsquote, args)))) + + def set_mock(self, mock: Dict[str, str], base: Optional[str] = ""): + """Replace some DOM elements with mock text + + The 'mock' parameter is a dictionary from CSS selectors to the + text that the elements matching the selector should be + replaced with. + + XXX - There is no way to easily undo the effects of this + function. There is no coordination with React. This + will improve as necessary. + + :param mock: the mock data, see above + :param base: if given, all selectors are relative to this one + """ + self.call_js_func('ph_set_texts', {base + " " + k: v for k, v in mock.items()}) + + def cookie(self, name: str): + """Retrieve a browser cookie by name + + :param name: the name of the cookie + :type name: str + """ + cookies = self.cdp.invoke("Network.getCookies") + for c in cookies["cookies"]: + if c["name"] == name: + return c + return None + + def go(self, url_hash: str): + self.call_js_func('ph_go', url_hash) + + def mouse(self, selector: str, event: str, x: int = 0, y: int = 0, btn: int = 0, ctrlKey: bool = False, shiftKey: bool = False, altKey: bool = False, metaKey: bool = False): + """Simulate a browser mouse event + + :param selector: the element to interact with + :param type: the mouse event to simulate, for example mouseenter, mouseleave, mousemove, click + :param x: the x coordinate + :param y: the y coordinate + :param btn: mouse button to click https://developer.mozilla.org/en-US/docs/Web/API/MouseEvent/buttons + :param crtlKey: press the ctrl key + :param shiftKey: press the shift key + :param altKey: press the alt key + :param metaKey: press the meta key + """ + self.wait_visible(selector) + self.call_js_func('ph_mouse', selector, event, x, y, btn, ctrlKey, shiftKey, altKey, metaKey) + + def click(self, selector: str): + """Click on a ui element + + :param selector: the selector to click on + """ + self.mouse(selector + ":not([disabled]):not([aria-disabled=true])", "click", 0, 0, 0) + + def val(self, selector: str): + """Get the value attribute of a selector. + + :param selector: the selector to get the value of + """ + self.wait_visible(selector) + return self.call_js_func('ph_val', selector) + + def set_val(self, selector: str, val): + """Set the value attribute of a non disabled DOM element. + + This also emits a change DOM change event. + + :param selector: the selector to set the value of + :param val: the value to set + """ + self.wait_visible(selector + ':not([disabled]):not([aria-disabled=true])') + self.call_js_func('ph_set_val', selector, val) + + def text(self, selector: str): + """Get an element's textContent value. + + :param selector: the selector to get the value of + """ + self.wait_visible(selector) + return self.call_js_func('ph_text', selector) + + def attr(self, selector: str, attr): + """Get the value of a given attribute of an element. + + :param selector: the selector to get the attribute of + :param attr: the DOM element attribute + """ + self._wait_present(selector) + return self.call_js_func('ph_attr', selector, attr) + + def set_attr(self, selector, attr, val): + """Set an attribute value of an element. + + :param selector: the selector + :param attr: the element attribute + :param val: the value of the attribute + """ + self._wait_present(selector + ':not([disabled]):not([aria-disabled=true])') + self.call_js_func('ph_set_attr', selector, attr, val) + + def get_checked(self, selector: str): + """Get checked state of a given selector. + + :param selector: the selector + :return: the checked state + """ + self.wait_visible(selector + ':not([disabled]):not([aria-disabled=true])') + return self.call_js_func('ph_get_checked', selector) + + def set_checked(self, selector: str, val): + """Set checked state of a given selector. + + :param selector: the selector + :param val: boolean value to enable or disable checkbox + """ + self.wait_visible(selector + ':not([disabled]):not([aria-disabled=true])') + self.call_js_func('ph_set_checked', selector, val) + + def focus(self, selector: str): + """Set focus on selected element. + + :param selector: the selector + """ + self.wait_visible(selector + ':not([disabled]):not([aria-disabled=true])') + self.call_js_func('ph_focus', selector) + + def blur(self, selector: str): + """Remove keyboard focus from selected element. + + :param selector: the selector + """ + self.wait_visible(selector + ':not([disabled]):not([aria-disabled=true])') + self.call_js_func('ph_blur', selector) + + # TODO: Unify them so we can have only one + def key_press(self, keys: str, modifiers: int = 0, use_ord: bool = False): + if self.cdp.browser.name == "chromium": + self._key_press_chromium(keys, modifiers, use_ord) + else: + self._key_press_firefox(keys, modifiers, use_ord) + + def _key_press_chromium(self, keys: str, modifiers: int = 0, use_ord=False): + for key in keys: + args = {"type": "keyDown", "modifiers": modifiers} + + # If modifiers are used we need to pass windowsVirtualKeyCode which is + # basically the asci decimal representation of the key + args["text"] = key + if use_ord: + args["windowsVirtualKeyCode"] = ord(key) + elif (not key.isalnum() and ord(key) < 32) or modifiers != 0: + args["windowsVirtualKeyCode"] = ord(key.upper()) + else: + args["key"] = key + + self.cdp.invoke("Input.dispatchKeyEvent", **args) + args["type"] = "keyUp" + self.cdp.invoke("Input.dispatchKeyEvent", **args) + + def _key_press_firefox(self, keys: str, modifiers: int = 0, use_ord: bool = False): + # https://python-reference.readthedocs.io/en/latest/docs/str/ASCII.html + # Both line feed and carriage return are normalized to Enter (https://html.spec.whatwg.org/multipage/form-elements.html) + keyMap = { + 8: "Backspace", # Backspace key + 9: "Tab", # Tab key + 10: "Enter", # Enter key (normalized from line feed) + 13: "Enter", # Enter key (normalized from carriage return) + 27: "Escape", # Escape key + 37: "ArrowLeft", # Arrow key left + 40: "ArrowDown", # Arrow key down + 45: "Insert", # Insert key + } + for key in keys: + args = {"type": "keyDown", "modifiers": modifiers} + + args["key"] = key + if ord(key) < 32 or use_ord: + args["key"] = keyMap[ord(key)] + + self.cdp.invoke("Input.dispatchKeyEvent", **args) + args["type"] = "keyUp" + self.cdp.invoke("Input.dispatchKeyEvent", **args) + + def select_from_dropdown(self, selector: str, value): + self.wait_visible(selector + ':not([disabled]):not([aria-disabled=true])') + text_selector = f"{selector} option[value='{value}']" + self._wait_present(text_selector) + self.set_val(selector, value) + self.wait_val(selector, value) + + def select_PF4(self, selector: str, value): + self.click(f"{selector}:not([disabled]):not([aria-disabled=true])") + select_entry = f"{selector} + ul button:contains('{value}')" + self.click(select_entry) + if self.is_present(f"{selector}.pf-m-typeahead"): + self.wait_val(f"{selector} > div input[type=text]", value) + else: + self.wait_text(f"{selector} .pf-v5-c-select__toggle-text", value) + + def set_input_text(self, selector: str, val: str, append: bool = False, value_check: bool = True, blur: bool = True): + self.focus(selector) + if not append: + self.key_press("a", 2) # Ctrl + a + if val == "": + self.key_press("\b") # Backspace + else: + self.key_press(val) + if blur: + self.blur(selector) + + if value_check: + self.wait_val(selector, val) + + def set_file_autocomplete_val(self, group_identifier: str, location: str): + self.set_input_text(f"{group_identifier} .pf-v5-c-select__toggle-typeahead input", location) + # click away the selection list, to force a state update + self.click(f"{group_identifier} .pf-v5-c-select__toggle-typeahead") + self.wait_not_present(f"{group_identifier} .pf-v5-c-select__menu") + + def wait_timeout(self, timeout: int): + browser = self + + class WaitParamsRestorer(): + def __init__(self, timeout): + self.timeout = timeout + + def __enter__(self): + pass + + def __exit__(self, type_, value, traceback): + browser.cdp.timeout = self.timeout + r = WaitParamsRestorer(self.cdp.timeout) + self.cdp.timeout = timeout + return r + + def wait(self, predicate: Callable): + for _ in range(self.cdp.timeout * self.timeout_factor * 5): + val = predicate() + if val: + return val + time.sleep(0.2) + raise Error('timed out waiting for predicate to become true') + + def wait_js_cond(self, cond: str, error_description: str = "null"): + count = 0 + timeout = self.cdp.timeout * self.timeout_factor + start = time.time() + while True: + count += 1 + try: + result = self.cdp.invoke("Runtime.evaluate", + expression="ph_wait_cond(() => %s, %i, %s)" % (cond, timeout * 1000, error_description), + silent=False, awaitPromise=True, trace="wait: " + cond) + if "exceptionDetails" in result: + if self.cdp.browser.name == "firefox" and count < 20 and "ph_wait_cond is not defined" in result["exceptionDetails"].get("text", ""): + time.sleep(0.1) + continue + trailer = "\n".join(self.cdp.get_js_log()) + self.raise_cdp_exception("timeout\nwait_js_cond", cond, result["exceptionDetails"], trailer) + if timeout > 0: + duration = time.time() - start + percent = int(duration / timeout * 100) + if percent >= 50: + print(f"WARNING: Waiting for {cond} took {duration:.1f} seconds, which is {percent}% of the timeout.") + return + except RuntimeError as e: + data = e.args[0] + if count < 20 and isinstance(data, dict) and "response" in data and data["response"].get("message") in ["Execution context was destroyed.", "Cannot find context with specified id"]: + time.sleep(1) + else: + raise e + + def wait_js_func(self, func: str, *args: Any): + self.wait_js_cond("%s(%s)" % (func, ','.join(map(jsquote, args)))) + + def is_present(self, selector: str) -> Optional[bool]: + return self.call_js_func('ph_is_present', selector) + + def _wait_present(self, selector: str): + self.wait_js_func('ph_is_present', selector) + + def wait_not_present(self, selector: str): + self.wait_js_func('!ph_is_present', selector) + + def is_visible(self, selector: str) -> Optional[bool]: + return self.call_js_func('ph_is_visible', selector) + + def wait_visible(self, selector: str): + self._wait_present(selector) + self.wait_js_func('ph_is_visible', selector) + + def wait_val(self, selector: str, val: str): + self.wait_visible(selector) + self.wait_js_func('ph_has_val', selector, val) + + def wait_not_val(self, selector: str, val: str): + self.wait_visible(selector) + self.wait_js_func('!ph_has_val', selector, val) + + def wait_attr(self, selector, attr, val): + self._wait_present(selector) + self.wait_js_func('ph_has_attr', selector, attr, val) + + def wait_attr_contains(self, selector, attr, val): + self._wait_present(selector) + self.wait_js_func('ph_attr_contains', selector, attr, val) + + def wait_attr_not_contains(self, selector, attr, val): + self._wait_present(selector) + self.wait_js_func('!ph_attr_contains', selector, attr, val) + + def wait_not_attr(self, selector, attr, val): + self._wait_present(selector) + self.wait_js_func('!ph_has_attr', selector, attr, val) + + def wait_not_visible(self, selector: str): + self.wait_js_func('!ph_is_visible', selector) + + def wait_in_text(self, selector: str, text: str): + self.wait_visible(selector) + self.wait_js_cond("ph_in_text(%s,%s)" % (jsquote(selector), jsquote(text)), + error_description="() => 'actual text: ' + ph_text(%s)" % jsquote(selector)) + + def wait_not_in_text(self, selector: str, text: str): + self.wait_visible(selector) + self.wait_js_func('!ph_in_text', selector, text) + + def wait_collected_text(self, selector: str, text: str): + self.wait_js_func('ph_collected_text_is', selector, text) + + def wait_text(self, selector: str, text: str): + self.wait_visible(selector) + self.wait_js_cond("ph_text_is(%s,%s)" % (jsquote(selector), jsquote(text)), + error_description="() => 'actual text: ' + ph_text(%s)" % jsquote(selector)) + + def wait_text_not(self, selector: str, text: str): + self.wait_visible(selector) + self.wait_js_func('!ph_text_is', selector, text) + + def wait_text_matches(self, selector: str, pattern: str): + self.wait_visible(selector) + self.wait_js_func('ph_text_matches', selector, pattern) + + def wait_popup(self, elem_id: str): + """Wait for a popup to open. + + :param id: the 'id' attribute of the popup. + """ + self.wait_visible('#' + elem_id) + + def wait_popdown(self, elem_id: str): + """Wait for a popup to close. + + :param id: the 'id' attribute of the popup. + """ + self.wait_not_visible('#' + elem_id) + + def wait_language(self, lang: str): + parts = lang.split("-") + code_1 = parts[0] + code_2 = parts[0] + if len(parts) > 1: + code_2 += "_" + parts[1].upper() + self.wait_js_cond("cockpit.language == '%s' || cockpit.language == '%s'" % (code_1, code_2)) + + def dialog_cancel(self, sel: str, button: str = "button[data-dismiss='modal']"): + self.click(sel + " " + button) + self.wait_not_visible(sel) + + def enter_page(self, path: str, host: Optional[str] = None, reconnect: bool = True): + """Wait for a page to become current. + + :param path: The identifier the page. This is a string starting with "/" + :type path: str + :param host: The host to connect too + :type host: str + :param reconnect: Try to reconnect + :type reconnect: bool + """ + assert path.startswith("/") + if host: + frame = host + path + else: + frame = "localhost" + path + frame = "cockpit1:" + frame + + self.switch_to_top() + + while True: + try: + self._wait_present("iframe.container-frame[name='%s'][data-loaded]" % frame) + self.wait_not_visible(".curtains-ct") + self.wait_visible("iframe.container-frame[name='%s']" % frame) + break + except Error as ex: + if reconnect and ex.msg.startswith('timeout'): + reconnect = False + if self.is_present("#machine-reconnect"): + self.click("#machine-reconnect") + self.wait_not_visible(".curtains-ct") + continue + raise + + self.switch_to_frame(frame) + self._wait_present("body") + self.wait_visible("body") + + def leave_page(self): + self.switch_to_top() + + def try_login(self, user: Optional[str] = None, password: Optional[str] = None, superuser: Optional[bool] = True, legacy_authorized: Optional[bool] = None): + """Fills in the login dialog and clicks the button. + + This differs from login_and_go() by not expecting any particular result. + + :param user: the username to login with + :type user: str + :param password: the password of the user + :type password: str + :param superuser: determines whether the new session will try to get Administrative Access (default true) + :type superuser: bool + :param legacy_authorized: old versions of the login dialog that still + have the "[ ] Reuse my password for magic things" checkbox. Such a + dialog is encountered when testing against old bastion hosts, for + example. + """ + if user is None: + user = self.default_user + if password is None: + password = self.password + self.wait_visible("#login") + self.set_val('#login-user-input', user) + self.set_val('#login-password-input', password) + if legacy_authorized is not None: + self.set_checked('#authorized-input', legacy_authorized) + if superuser is not None: + self.eval_js('window.localStorage.setItem("superuser:%s", "%s");' % (user, "any" if superuser else "none")) + self.click('#login-button') + + def login_and_go(self, path: Optional[str] = None, user: Optional[str] = None, host: Optional[str] = None, + superuser: bool = True, urlroot: Optional[str] = None, tls: bool = False, password: Optional[str] = None, + legacy_authorized: Optional[bool] = None): + """Fills in the login dialog, clicks the button and navigates to the given path + + :param user: the username to login with + :type user: str + :param password: the password of the user + :type password: str + :param superuser: determines whether the new session will try to get Administrative Access (default true) + :type superuser: bool + :param legacy_authorized: old versions of the login dialog that still + have the "[ ] Reuse my password for magic things" checkbox. Such a + dialog is encountered when testing against old bastion hosts, for + example. + """ + href = path + if not href: + href = "/" + if urlroot: + href = urlroot + href + if host: + href = "/@" + host + href + self.open(href, tls=tls) + + self.try_login(user, password, superuser=superuser, legacy_authorized=legacy_authorized) + + self._wait_present('#content') + self.wait_visible('#content') + if path: + self.enter_page(path.split("#")[0], host=host) + + def logout(self): + self.assert_no_oops() + self.switch_to_top() + + self.wait_visible("#toggle-menu") + if self.is_present("button#machine-reconnect") and self.is_visible("button#machine-reconnect"): + # happens when shutting down cockpit or rebooting machine + self.click("button#machine-reconnect") + else: + # happens when cockpit is still running + self.open_session_menu() + try: + self.click('#logout') + except RuntimeError as e: + # logging out does destroy the current frame context, it races with the CDP driver finishing the command + if "Execution context was destroyed" not in str(e): + raise + self.wait_visible('#login') + + self.machine.allow_restart_journal_messages() + + def relogin(self, path: Optional[str] = None, user: Optional[str] = None, password: Optional[str] = None, + superuser: Optional[bool] = None, wait_remote_session_machine: Optional[testvm.Machine] = None): + self.logout() + if wait_remote_session_machine: + wait_remote_session_machine.execute("while pgrep -a cockpit-ssh; do sleep 1; done") + self.try_login(user, password=password, superuser=superuser) + self._wait_present('#content') + self.wait_visible('#content') + if path: + if path.startswith("/@"): + host = path[2:].split("/")[0] + else: + host = None + self.enter_page(path.split("#")[0], host=host) + + def open_session_menu(self): + self.wait_visible("#toggle-menu") + if (self.attr("#toggle-menu", "aria-expanded") != "true"): + self.click("#toggle-menu") + + def layout_is_mobile(self): + return self.current_layout and self.current_layout["shell_size"][0] < 420 + + def open_superuser_dialog(self): + if self.layout_is_mobile(): + self.open_session_menu() + self.click("#super-user-indicator-mobile button") + else: + self.click("#super-user-indicator button") + + def check_superuser_indicator(self, expected: str): + if self.layout_is_mobile(): + self.open_session_menu() + self.wait_text("#super-user-indicator-mobile", expected) + self.click("#toggle-menu") + else: + self.wait_text("#super-user-indicator", expected) + + def become_superuser(self, user: Optional[str] = None, password: Optional[str] = None, passwordless: Optional[bool] = False): + cur_frame = self.cdp.cur_frame + self.switch_to_top() + + self.open_superuser_dialog() + + if passwordless: + self.wait_in_text("div[role=dialog]:contains('Administrative access')", "You now have administrative access.") + self.click("div[role=dialog] button:contains('Close')") + self.wait_not_present("div[role=dialog]:contains('You now have administrative access.')") + else: + self.wait_in_text("div[role=dialog]:contains('Switch to administrative access')", f"Password for {user or 'admin'}:") + self.set_input_text("div[role=dialog]:contains('Switch to administrative access') input", password or "foobar") + self.click("div[role=dialog] button:contains('Authenticate')") + self.wait_not_present("div[role=dialog]:contains('Switch to administrative access')") + + self.check_superuser_indicator("Administrative access") + self.switch_to_frame(cur_frame) + + def drop_superuser(self): + cur_frame = self.cdp.cur_frame + self.switch_to_top() + + self.open_superuser_dialog() + self.click("div[role=dialog]:contains('Switch to limited access') button:contains('Limit access')") + self.wait_not_present("div[role=dialog]:contains('Switch to limited access')") + self.check_superuser_indicator("Limited access") + + self.switch_to_frame(cur_frame) + + def click_system_menu(self, path: str, enter: bool = True): + """Click on a "System" menu entry with given URL path + + Enters the given target frame afterwards, unless enter=False is given + (useful for remote hosts). + """ + self.switch_to_top() + self.click(f"#host-apps a[href='{path}']") + if enter: + # strip off parameters after hash + self.enter_page(path.split('#')[0].rstrip('/')) + + def get_pf_progress_value(self, progress_bar_sel): + """Get numeric value of a PatternFly component""" + sel = progress_bar_sel + " .pf-v5-c-progress__indicator" + self.wait_visible(sel) + self.wait_attr_contains(sel, "style", "width:") + style = self.attr(sel, "style") + m = re.search(r"width: (\d+)%;", style) + return int(m.group(1)) + + def ignore_ssl_certificate_errors(self, ignore: bool): + action = ignore and "continue" or "cancel" + if opts.trace: + print("-> Setting SSL certificate error policy to %s" % action) + self.cdp.command(f"setSSLBadCertificateAction('{action}')") + + def grant_permissions(self, *args: str): + """Grant permissions to the browser""" + # https://chromedevtools.github.io/devtools-protocol/tot/Browser/#method-grantPermissions + self.cdp.invoke("Browser.grantPermissions", + origin="http://%s:%s" % (self.address, self.port), + permissions=args) + + def snapshot(self, title: str, label: Optional[str] = None): + """Take a snapshot of the current screen and save it as a PNG and HTML. + + Arguments: + title: Used for the filename. + """ + if self.cdp and self.cdp.valid: + self.cdp.command("clearExceptions()") + + filename = unique_filename(f"{label or self.label}-{title}", "png") + if self.body_clip: + ret = self.cdp.invoke("Page.captureScreenshot", clip=self.body_clip, no_trace=True) + else: + ret = self.cdp.invoke("Page.captureScreenshot", no_trace=True) + if "data" in ret: + with open(filename, 'wb') as f: + f.write(base64.standard_b64decode(ret["data"])) + attach(filename, move=True) + print("Wrote screenshot to " + filename) + else: + print("Screenshot not available") + + filename = unique_filename(f"{label or self.label}-{title}", "html") + html = self.cdp.invoke("Runtime.evaluate", expression="document.documentElement.outerHTML", + no_trace=True)["result"]["value"] + with open(filename, 'wb') as f: + f.write(html.encode('UTF-8')) + attach(filename, move=True) + print("Wrote HTML dump to " + filename) + + def _set_window_size(self, width: int, height: int): + self.cdp.invoke("Emulation.setDeviceMetricsOverride", + width=width, height=height, + deviceScaleFactor=0, mobile=False) + + def _set_emulated_media_theme(self, name: str): + # https://bugzilla.mozilla.org/show_bug.cgi?id=1549434 + if self.cdp.browser.name == "chromium": + self.cdp.invoke("Emulation.setEmulatedMedia", features=[{'name': 'prefers-color-scheme', 'value': name}]) + + def _set_direction(self, direction: str): + cur_frame = self.cdp.cur_frame + if self.is_present("#shell-page"): + self.switch_to_top() + self.set_attr("#shell-page", "dir", direction) + self.switch_to_frame(cur_frame) + self.set_attr("html", "dir", direction) + + def set_layout(self, name: str): + layout = next(lo for lo in self.layouts if lo["name"] == name) + if layout != self.current_layout: + if layout["name"] == "rtl": + self._set_direction("rtl") + elif layout["name"] != "rtl" and self.current_layout and self.current_layout["name"] == "rtl": + self._set_direction("ltr") + + self.current_layout = layout + size = layout["shell_size"] + self._set_window_size(size[0], size[1]) + self._adjust_window_for_fixed_content_size() + self._set_emulated_media_theme(layout["theme"]) + + def _adjust_window_for_fixed_content_size(self): + if self.eval_js("window.name").startswith("cockpit1:"): + # Adjust the window size further so that the content is + # exactly the expected size. This will make sure that + # pixel tests of the content will not be affected by + # changes in shell navigation elements around it. It is + # important that we do this only after getting the shell + # into about the right size so that it switches into the + # right layout mode. + shell_size = self.current_layout["shell_size"] + want_size = self.current_layout["content_size"] + have_size = self.eval_js("[ document.body.offsetWidth, document.body.offsetHeight ]") + delta = (want_size[0] - have_size[0], want_size[1] - have_size[1]) + if delta[0] != 0 or delta[1] != 0: + self._set_window_size(shell_size[0] + delta[0], shell_size[1] + delta[1]) + + def assert_pixels_in_current_layout(self, selector: str, key: str, + ignore: Optional[List[str]] = None, + mock: Optional[Dict[str, str]] = None, + sit_after_mock: bool = False, + scroll_into_view: Optional[str] = None, + wait_animations: bool = True, + wait_delay: float = 0.5): + """Compare the given element with its reference in the current layout""" + + if ignore is None: + ignore = [] + + if not (Image and self.pixels_label): + return + + self._adjust_window_for_fixed_content_size() + self.call_js_func('ph_scrollIntoViewIfNeeded', scroll_into_view or selector) + self.call_js_func('ph_blur_active') + + # Wait for all animations to be over. This is done by + # counting them all over and over again until there are zero. + # Calling `.finish()` on all animations would miss those that + # are created while we wait, and would also fail with an + # exception if any unlimited animations are present, like + # spinners. + # + # There is another complication with tooltips. They are shown + # on top of certain elements, but are not DOM children of + # these elements. Also, Patternfly sometimes creates tooltips + # on dialog titles that are too long for the dialog, but only + # a little bit after the dialog has appeared. + # + # We don't want to predict whether tooltips will appear, and + # thus we can't wait for them to be present before waiting for + # their fade-in animation to be over. + # + # But we know that tooltips fade in within 300ms, so we just + # wait half a second to and side-step all that complexity. + + if wait_animations: + time.sleep(wait_delay) + self.wait_js_cond('ph_count_animations(%s) == 0' % jsquote(selector)) + + if mock is not None: + self.set_mock(mock, base=selector) + if sit_after_mock: + sit() + + rect = self.call_js_func('ph_element_clip', selector) + + def relative_clips(sels): + return [( + r['x'] - rect['x'], + r['y'] - rect['y'], + r['x'] - rect['x'] + r['width'], + r['y'] - rect['y'] + r['height']) + for r in self.call_js_func('ph_selector_clips', sels)] + + reference_dir = os.path.join(TEST_DIR, 'reference') + if not os.path.exists(os.path.join(reference_dir, '.git')): + raise SystemError("Pixel test references are missing, please run: test/common/pixel-tests pull") + + ignore_rects = relative_clips([f"{selector} {item}" for item in ignore]) + base = self.pixels_label + "-" + key + if self.current_layout != self.layouts[0]: + base += "-" + self.current_layout["name"] + filename = base + "-pixels.png" + ref_filename = os.path.join(reference_dir, filename) + self.used_pixel_references.add(ref_filename) + ret = self.cdp.invoke("Page.captureScreenshot", clip=rect, no_trace=True) + png_now = base64.standard_b64decode(ret["data"]) + png_ref = os.path.exists(ref_filename) and open(ref_filename, "rb").read() + if not png_ref: + with open(filename, 'wb') as f: + f.write(png_now) + attach(filename, move=True) + print("New pixel test reference " + filename) + self.failed_pixel_tests += 1 + else: + img_now = Image.open(io.BytesIO(png_now)).convert("RGBA") + img_ref = Image.open(io.BytesIO(png_ref)).convert("RGBA") + img_delta = Image.new("RGBA", + (max(img_now.size[0], img_ref.size[0]), max(img_now.size[1], img_ref.size[1])), + (255, 0, 0, 255)) + + # The current snapshot and the reference don't need to + # be perfectly identical. They might differ in the + # following ways: + # + # - A pixel in the reference image might be + # transparent. These pixels are ignored. + # + # - The call to assert_pixels specifies a list of + # rectangles (via CSS selectors). Pixels within those + # rectangles (and slightly outside) are ignored. Pixels + # just outside the rectangles are also ignored to avoid + # issues with rounding coordinates. + # + # - The RGB values of pixels can differ by up to 2. + # + # - There can be up to 20 different pixels + # + # Pixels that are different but have been ignored are + # marked in the delta image in green. + + def masked(ref): + return ref[3] != 255 + + def ignorable_coord(x, y): + for (x0, y0, x1, y1) in ignore_rects: + if x >= x0 - 2 and x < x1 + 2 and y >= y0 - 2 and y < y1 + 2: + return True + return False + + def ignorable_change(a, b): + return abs(a[0] - b[0]) <= 2 and abs(a[1] - b[1]) <= 2 and abs(a[1] - b[1]) <= 2 + + def img_eq(ref, now, delta): + # This is slow but exactly what we want. + # ImageMath might be able to speed this up. + data_ref = ref.load() + data_now = now.load() + data_delta = delta.load() + result = True + count = 0 + width, height = delta.size + for y in range(height): + for x in range(width): + if x >= ref.size[0] or x >= now.size[0] or y >= ref.size[1] or y >= now.size[1]: + result = False + elif data_ref[x, y] != data_now[x, y]: + if masked(data_ref[x, y]) or ignorable_coord(x, y) or ignorable_change(data_ref[x, y], data_now[x, y]): + data_delta[x, y] = (0, 255, 0, 255) + else: + data_delta[x, y] = (255, 0, 0, 255) + count += 1 + if count > 20: + result = False + else: + data_delta[x, y] = data_ref[x, y] + return result + + if not img_eq(img_ref, img_now, img_delta): + if img_now.size == img_ref.size: + # Preserve alpha channel so that the 'now' + # image can be used as the new reference image + # without further changes + img_now.putalpha(img_ref.getchannel("A")) + img_now.save(filename) + attach(filename, move=True) + ref_filename_for_attach = base + "-reference.png" + img_ref.save(ref_filename_for_attach) + attach(ref_filename_for_attach, move=True) + delta_filename = base + "-delta.png" + img_delta.save(delta_filename) + attach(delta_filename, move=True) + print("Differences in pixel test " + base) + self.failed_pixel_tests += 1 + + def assert_pixels(self, selector: str, key: str, + ignore: Optional[List[str]] = None, + mock: Optional[Dict[str, str]] = None, + sit_after_mock: bool = False, + skip_layouts: Optional[List[str]] = None, + scroll_into_view: Optional[str] = None, + wait_animations: bool = True, + wait_after_layout_change: bool = False, + wait_delay: float = 0.5): + """Compare the given element with its reference in all layouts""" + + if ignore is None: + ignore = [] + + if skip_layouts is None: + skip_layouts = [] + + if not (Image and self.pixels_label): + return + + # If the page overflows make sure to not show a scrollbar + # Don't apply this hack for login and terminal and shell as they don't use PF Page + if not self.is_present("#shell-page") and not self.is_present("#login-details") and not self.is_present("#system-terminal-page"): + self.switch_to_frame(self.cdp.cur_frame) + classes = self.attr("main", "class") + if "pf-v5-c-page__main" in classes: + self.set_attr("main.pf-v5-c-page__main", "class", f"{classes} pixel-test") + + if self.current_layout: + previous_layout = self.current_layout["name"] + for layout in self.layouts: + if layout["name"] not in skip_layouts: + self.set_layout(layout["name"]) + if wait_after_layout_change: + time.sleep(wait_delay) + self.assert_pixels_in_current_layout(selector, key, ignore=ignore, + mock=mock, sit_after_mock=sit_after_mock, + scroll_into_view=scroll_into_view, + wait_animations=wait_animations, + wait_delay=wait_delay) + + self.set_layout(previous_layout) + + def assert_no_unused_pixel_test_references(self): + """Check whether all reference images in test/reference have been used.""" + + if not (Image and self.pixels_label): + return + + pixel_references = set(glob.glob(os.path.join(TEST_DIR, "reference", self.pixels_label + "*-pixels.png"))) + unused = pixel_references - self.used_pixel_references + for u in unused: + print("Unused reference image " + os.path.basename(u)) + self.failed_pixel_tests += 1 + + def get_js_log(self): + """Return the current javascript log""" + + if self.cdp: + return self.cdp.get_js_log() + return [] + + def copy_js_log(self, title: str, label: Optional[str] = None): + """Copy the current javascript log""" + + logs = list(self.get_js_log()) + if logs: + filename = unique_filename(f"{label or self.label}-{title}", "js.log") + with open(filename, 'wb') as f: + f.write('\n'.join(logs).encode('UTF-8')) + attach(filename, move=True) + print("Wrote JS log to " + filename) + + def kill(self): + self.cdp.kill() + + def write_coverage_data(self): + if self.coverage_label and self.cdp and self.cdp.valid: + coverage = self.cdp.invoke("Profiler.takePreciseCoverage") + write_lcov(coverage['result'], self.coverage_label) + + def assert_no_oops(self): + if self.allow_oops: + return + + if self.cdp and self.cdp.valid: + self.switch_to_top() + if self.is_present("#navbar-oops"): + assert not self.is_visible("#navbar-oops"), "Cockpit shows an Oops" + + +class MachineCase(unittest.TestCase): + image = testvm.DEFAULT_IMAGE + libexecdir = None + runner = None + machine: testvm.Machine + machines = Dict[str, testvm.Machine] + machine_class = None + browser: Browser + network = None + journal_start = None + + # provision is a dictionary of dictionaries, one for each additional machine to be created, e.g.: + # provision = { 'openshift' : { 'image': 'openshift', 'memory_mb': 1024 } } + # These will be instantiated during setUp, and replaced with machine objects + provision: Optional[Dict[str, Dict[str, Union[str, int]]]] = None + + global_machine = None + + @classmethod + def get_global_machine(cls): + if cls.global_machine: + return cls.global_machine + cls.global_machine = cls.new_machine(cls, restrict=True, cleanup=False) + if opts.trace: + print(f"Starting global machine {cls.global_machine.label}") + cls.global_machine.start() + return cls.global_machine + + @classmethod + def kill_global_machine(cls): + if cls.global_machine: + cls.global_machine.kill() + cls.global_machine = None + + def label(self): + return self.__class__.__name__ + '-' + self._testMethodName + + def new_machine(self, image=None, forward=None, restrict=True, cleanup=True, inherit_machine_class=True, **kwargs): + machine_class = inherit_machine_class and self.machine_class or testvm.VirtMachine + + if opts.address: + if forward: + raise unittest.SkipTest("Cannot run this test when specific machine address is specified") + machine = testvm.Machine(address=opts.address, image=image or self.image, verbose=opts.trace, browser=opts.browser) + if cleanup: + self.addCleanup(machine.disconnect) + else: + if image is None: + image = os.path.join(TEST_DIR, "images", self.image) + if not os.path.exists(image): + raise FileNotFoundError("Can't run tests without a prepared image; use test/image-prepare") + if not self.network: + network = testvm.VirtNetwork(image=image) + if cleanup: + self.addCleanup(network.kill) + self.network = network + networking = self.network.host(restrict=restrict, forward=forward or {}) + machine = machine_class(verbose=opts.trace, networking=networking, image=image, **kwargs) + if opts.fetch and not os.path.exists(machine.image_file): + machine.pull(machine.image_file) + if cleanup: + self.addCleanup(machine.kill) + return machine + + def new_browser(self, machine=None, coverage=False): + if machine is None: + machine = self.machine + label = self.label() + "-" + machine.label + pixels_label = None + if os.environ.get("TEST_BROWSER", "chromium") == "chromium" and not self.is_devel_build(): + try: + with open(f'{TEST_DIR}/reference-image') as fp: + reference_image = fp.read().strip() + except FileNotFoundError: + # no "reference-image" file available; this most likely means that + # there are no pixel tests to execute + pass + else: + if machine.image == reference_image: + pixels_label = self.label() + browser = Browser(machine.web_address, + label=label, pixels_label=pixels_label, coverage_label=self.label() if coverage else None, + port=machine.web_port, machine=self) + self.addCleanup(browser.kill) + return browser + + def getError(self): + # errors is a list of (method, exception) calls (usually multiple + # per method); None exception means success + errors = [] + if hasattr(self._outcome, 'errors'): + # Python 3.4 - 3.10 (These two methods have no side effects) + result = self.defaultTestResult() + errors = result.errors + self._feedErrorsToResult(result, self._outcome.errors) + elif hasattr(self._outcome, 'result') and hasattr(self._outcome.result, '_excinfo'): + # pytest emulating unittest + return self._outcome.result._excinfo + else: + # Python 3.11+ now records errors and failures seperate + errors = self._outcome.result.errors + self._outcome.result.failures + + try: + return errors[0][1] + except IndexError: + return None + + def is_nondestructive(self): + test_method = getattr(self.__class__, self._testMethodName) + return get_decorator(test_method, self.__class__, "nondestructive") + + def is_devel_build(self) -> bool: + return os.environ.get('NODE_ENV') == 'development' + + def is_pybridge(self) -> bool: + # some tests start e.g. centos-7 as first machine, bridge may not exist there + return any('python' in m.execute('head -c 30 /usr/bin/cockpit-bridge || true') for m in self.machines.values()) + + def disable_preload(self, *packages, machine=None): + if machine is None: + machine = self.machine + for pkg in packages: + machine.write(f"/etc/cockpit/{pkg}.override.json", '{ "preload": [ ] }') + + def enable_preload(self, package: str, *pages: str): + pages_str = ', '.join(f'"{page}"' for page in pages) + self.machine.write(f"/etc/cockpit/{package}.override.json", f'{{ "preload": [ {pages_str} ] }}') + + def system_before(self, version): + try: + v = self.machine.execute("""rpm -q --qf '%{V}' cockpit-system || + dpkg-query -W -f '${source:Upstream-Version}' cockpit-system || + (pacman -Q cockpit | cut -f2 -d' ' | cut -f1 -d-) + """).split(".") + except subprocess.CalledProcessError: + return False + + return int(v[0]) < version + + def setUp(self, restrict=True): + self.allowed_messages = self.default_allowed_messages + self.allowed_console_errors = self.default_allowed_console_errors + self.allow_core_dumps = False + + if os.getenv("MACHINE"): + # apply env variable together if MACHINE envvar is set + opts.address = os.getenv("MACHINE") + if self.is_nondestructive(): + pass + elif os.getenv("DESTRUCTIVE") and not self.is_nondestructive(): + print("Run destructive test, be careful, may lead to upredictable state of machine") + else: + raise unittest.SkipTest("Skip destructive test by default") + if os.getenv("BROWSER"): + opts.browser = os.getenv("BROWSER") + if os.getenv("TRACE"): + opts.trace = True + if os.getenv("SIT"): + opts.sit = True + + if opts.address and self.provision is not None: + raise unittest.SkipTest("Cannot provision multiple machines if a specific machine address is specified") + + self.machines = {} + provision = self.provision or {'machine1': {}} + self.tmpdir = tempfile.mkdtemp() + # automatically cleaned up for @nondestructive tests, but you have to create it yourself + self.vm_tmpdir = "/var/lib/cockpittest" + + if self.is_nondestructive() and not opts.address: + if self.provision: + raise unittest.SkipTest("Cannot provision machines if test is marked as nondestructive") + self.machine = self.machines['machine1'] = MachineCase.get_global_machine() + else: + MachineCase.kill_global_machine() + first_machine = True + # First create all machines, wait for them later + for key in sorted(provision.keys()): + options = provision[key].copy() + if 'address' in options: + del options['address'] + if 'dns' in options: + del options['dns'] + if 'dhcp' in options: + del options['dhcp'] + if 'restrict' not in options: + options['restrict'] = restrict + machine = self.new_machine(**options) + self.machines[key] = machine + if first_machine: + first_machine = False + self.machine = machine + if opts.trace: + print(f"Starting {key} {machine.label}") + machine.start() + + self.danger_btn_class = '.pf-m-danger' + self.primary_btn_class = '.pf-m-primary' + self.default_btn_class = '.pf-m-secondary' + + # Now wait for the other machines to be up + for key in self.machines.keys(): + machine = self.machines[key] + machine.wait_boot() + address = provision[key].get("address") + if address is not None: + machine.set_address(address) + dns = provision[key].get("dns") + if address or dns: + machine.set_dns(dns) + dhcp = provision[key].get("dhcp", False) + if dhcp: + machine.dhcp_server() + + self.journal_start = self.machine.journal_cursor() + self.browser: Browser = self.new_browser(coverage=opts.coverage) + # fail tests on criticals + self.machine.write("/etc/cockpit/cockpit.conf", "[Log]\nFatal = criticals\n") + if self.is_nondestructive(): + self.nonDestructiveSetup() + + # Pages with debug enabled are huge and loading/executing them is heavy for browsers + # To make it easier for browsers and thus make tests quicker, disable packagekit and systemd preloads + if self.is_devel_build(): + self.disable_preload("packagekit", "systemd") + + if self.machine.image.startswith('debian') or self.machine.image.startswith('ubuntu') or self.machine.image == 'arch': + self.libexecdir = '/usr/lib/cockpit' + else: + self.libexecdir = '/usr/libexec' + + def nonDestructiveSetup(self): + """generic setUp/tearDown for @nondestructive tests""" + + m = self.machine + + # helps with mapping journal output to particular tests + name = "%s.%s" % (self.__class__.__name__, self._testMethodName) + m.execute("logger -p user.info 'COCKPITTEST: start %s'" % name) + self.addCleanup(m.execute, "logger -p user.info 'COCKPITTEST: end %s'" % name) + + # core dumps get copied per-test, don't clobber subsequent tests with them + self.addCleanup(m.execute, "find /var/lib/systemd/coredump -type f -delete") + + # temporary directory in the VM + self.addCleanup(m.execute, "if [ -d {0} ]; then findmnt --list --noheadings --output TARGET | grep ^{0} | xargs -r umount; rm -r {0}; fi".format(self.vm_tmpdir)) + + # users/groups/home dirs + self.restore_file("/etc/passwd") + self.restore_file("/etc/group") + self.restore_file("/etc/shadow") + self.restore_file("/etc/gshadow") + self.restore_file("/etc/subuid") + self.restore_file("/etc/subgid") + self.restore_file("/var/log/wtmp") + home_dirs = m.execute("ls /home").strip().split() + + def cleanup_home_dirs(): + for d in m.execute("ls /home").strip().split(): + if d not in home_dirs: + m.execute("rm -r /home/" + d) + self.addCleanup(cleanup_home_dirs) + + if m.image == "arch": + # arch configures pam_faillock by default + self.addCleanup(m.execute, "rm -rf /run/faillock") + + # cockpit configuration + self.restore_dir("/etc/cockpit") + + if not m.ostree_image: + # for storage tests + self.restore_file("/etc/fstab") + self.restore_file("/etc/crypttab") + + # tests expect cockpit.service to not run at start; also, avoid log leakage into the next test + self.addCleanup(m.execute, "systemctl stop --quiet cockpit") + + # The sssd daemon seems to get confused when we restore + # backups of /etc/group etc and stops following updates to it. + # Let's restart the daemon to reset that condition. + m.execute("systemctl try-restart sssd || true") + + # reset scsi_debug (see e. g. StorageHelpers.add_ram_disk() + # this needs to happen very late in the cleanup, so that test cases can clean up the users of that disk first + # right after unmounting the device is often still busy, so retry a few times + self.addCleanup(self.machine.execute, + "set -e; [ -e /sys/module/scsi_debug ] || exit 0; " + "for dev in $(ls /sys/bus/pseudo/drivers/scsi_debug/adapter*/host*/target*/*:*/block); do " + " for s in /sys/block/*/slaves/${dev}*; do [ -e $s ] || break; " + " d=/dev/$(dirname $(dirname ${s#/sys/block/})); " + " while fuser --mount $d --kill; do sleep 0.1; done; " + " umount $d || true; dmsetup remove --force $d || true; " + " done; " + " while fuser --mount /dev/$dev --kill; do sleep 0.1; done; " + " umount /dev/$dev || true; " + " swapon --show=NAME --noheadings | grep $dev | xargs -r swapoff; " + "done; until rmmod scsi_debug; do sleep 0.2; done", stdout=None) + + def terminate_sessions(): + # on OSTree we don't get "web console" sessions with the cockpit/ws container; just SSH; but also, some tests start + # admin sessions without Cockpit + self.machine.execute("""for u in $(loginctl --no-legend list-users | awk '{ if ($2 != "root") print $1 }'); do + loginctl terminate-user $u 2>/dev/null || true + loginctl kill-user $u 2>/dev/null || true + pkill -9 -u $u || true + while pgrep -u $u; do sleep 0.2; done + while mountpoint -q /run/user/$u && ! umount /run/user/$u; do sleep 0.2; done + rm -rf /run/user/$u + done""") + + # Terminate all other Cockpit sessions + sessions = self.machine.execute("loginctl --no-legend list-sessions | awk '/web console/ { print $1 }'").strip().split() + for s in sessions: + # Don't insist that terminating works, the session might be gone by now. + self.machine.execute(f"loginctl kill-session {s} || true; loginctl terminate-session {s} || true") + + # Restart logind to mop up empty "closing" sessions + self.machine.execute("systemctl stop systemd-logind") + + # Wait for sessions to be gone + sessions = self.machine.execute("loginctl --no-legend list-sessions | awk '/web console/ { print $1 }'").strip().split() + for s in sessions: + try: + m.execute(f"while loginctl show-session {s}; do sleep 0.2; done", timeout=30) + except RuntimeError: + # show the status in debug logs, to see what's wrong + m.execute(f"loginctl session-status {s}; systemd-cgls", stdout=None) + raise + + # terminate all systemd user services for users who are not logged in + self.machine.execute("systemctl stop user@*.service") + + # Clean up "closing" sessions again, and clean user id cache for non-system users + self.machine.execute("systemctl stop systemd-logind; cd /run/systemd/users/; " + "for f in $(ls); do [ $f -le 500 ] || rm $f; done") + + self.addCleanup(terminate_sessions) + + def tearDown(self): + error = self.getError() + + if error: + print(error, file=sys.stderr) + try: + self.snapshot("FAIL") + self.copy_js_log("FAIL") + self.copy_journal("FAIL") + self.copy_cores("FAIL") + except (OSError, RuntimeError): + # failures in these debug artifacts should not skip cleanup actions + sys.stderr.write("Failed to generate debug artifact:\n") + traceback.print_exc(file=sys.stderr) + + if opts.sit: + sit(self.machines) + + if self.browser: + self.browser.write_coverage_data() + + if self.machine.ssh_reachable: + self.check_journal_messages() + if not error: + self.check_browser_errors() + self.check_pixel_tests() + + shutil.rmtree(self.tmpdir, ignore_errors=True) + + def login_and_go(self, path: Optional[str] = None, user: Optional[str] = None, host: Optional[str] = None, + superuser: bool = True, urlroot: Optional[str] = None, tls: bool = False, + enable_root_login: bool = False): + if enable_root_login: + self.enable_root_login() + self.machine.start_cockpit(tls=tls) + # first load after starting cockpit tends to take longer, due to on-demand service start + with self.browser.wait_timeout(30): + self.browser.login_and_go(path, user=user, host=host, superuser=superuser, urlroot=urlroot, tls=tls) + + def start_machine_troubleshoot(self, new=False, known_host=False, password=None, expect_closed_dialog=True, browser=None): + b = browser or self.browser + + b.wait_visible("#machine-troubleshoot") + b.click('#machine-troubleshoot') + + b.wait_visible('#hosts_setup_server_dialog') + if new: + b.click('#hosts_setup_server_dialog button:contains(Add)') + if not known_host: + b.wait_in_text('#hosts_setup_server_dialog', "You are connecting to") + b.wait_in_text('#hosts_setup_server_dialog', "for the first time.") + b.click("#hosts_setup_server_dialog button:contains('Trust and add host')") + if password: + b.wait_in_text('#hosts_setup_server_dialog', "Unable to log in") + b.set_input_text('#login-custom-password', password) + b.click('#hosts_setup_server_dialog button:contains(Log in)') + if expect_closed_dialog: + b.wait_not_present('#hosts_setup_server_dialog') + + def add_machine(self, address, known_host=False, password="foobar", browser=None): + b = browser or self.browser + b.switch_to_top() + b.go(f"/@{address}") + self.start_machine_troubleshoot(new=True, known_host=known_host, password=password, browser=browser) + b.enter_page("/system", host=address) + + # List of allowed journal messages during tests; these need to match the *entire* message + default_allowed_messages = [ + # Reauth stuff + '.*Reauthorizing unix-user:.*', + '.*user .* was reauthorized.*', + + # Happens when the user logs out during reauthorization + "Error executing command as another user: Not authorized", + "This incident has been reported.", + + # Reboots are ok + "-- Reboot --", + + # Sometimes D-Bus goes away before us during shutdown + "Lost the name com.redhat.Cockpit on the session message bus", + "GLib-GIO:ERROR:gdbusobjectmanagerserver\\.c:.*:g_dbus_object_manager_server_emit_interfaces_.*: assertion failed \\(error == NULL\\): The connection is closed \\(g-io-error-quark, 18\\)", + "Error sending message: The connection is closed", + + # PAM noise + "cockpit-session: pam: Creating directory .*", + "cockpit-session: pam: Changing password for .*", + + # btmp tracking + "cockpit-session: pam: Last failed login:.*", + "cockpit-session: pam: There .* failed login attempts? since the last successful login.", + + # pam_lastlog complaints + ".*/var/log/lastlog: No such file or directory", + + # ssh messages may be dropped when closing + '10.*: dropping message while waiting for child to exit', + + # pkg/packagekit/autoupdates.jsx backend check often gets interrupted by logout + "xargs: basename: terminated by signal 13", + + # SELinux messages to ignore + "(audit: )?type=1403 audit.*", + "(audit: )?type=1404 audit.*", + "(audit: )?type=1405 audit.*", + + # apparmor loading + "(audit: )?type=1400.*apparmor=\"STATUS\".*", + + # apparmor noise + "(audit: )?type=1400.*apparmor=\"ALLOWED\".*", + + # Messages from systemd libraries when they are in debug mode + 'Successfully loaded SELinux database in.*', + 'calling: info', + 'Sent message type=method_call sender=.*', + 'Got message type=method_return sender=.*', + + # Various operating systems see this from time to time + "Journal file.*truncated, ignoring file.", + + # our core dump retrieval is not entirely reliable + "Failed to send coredump datagram:.*", + + # Something crashed, but we don't have more info. Don't fail on that + "Failed to get (COMM|EXE).*: No such process", + + # several tests change the host name + "sudo: unable to resolve host.*", + + # The usual sudo finger wagging + "We trust you have received the usual lecture from the local System", + "Administrator. It usually boils down to these three things:", + r"#1\) Respect the privacy of others.", + r"#2\) Think before you type.", + r"#3\) With great power comes great responsibility.", + "For security reasons, the password you type will not be visible", + + # starting out with empty PCP logs and pmlogger not running causes these metrics channel messages + "(direct|pcp-archive): no such metric: .*: Unknown metric name", + "(direct|pcp-archive): instance name lookup failed:.*", + "(direct|pcp-archive): couldn't create pcp archive context for.*", + + # timedatex.service shuts down after timeout, runs into race condition with property watching + ".*org.freedesktop.timedate1: couldn't get all properties.*Error:org.freedesktop.DBus.Error.NoReply.*", + + # https://github.com/cockpit-project/cockpit/issues/19235 + "invalid non-UTF8 @data passed as text to web_socket_connection_send.*", + ] + + default_allowed_messages += os.environ.get("TEST_ALLOW_JOURNAL_MESSAGES", "").split(",") + + # List of allowed console.error() messages during tests; these match substrings + default_allowed_console_errors = [ + # HACK: These should be fixed, but debugging these is not trivial, and the impact is very low + "Warning: .* setState.*on an unmounted component", + "Warning: Can't perform a React state update on an unmounted component", + "Warning: Cannot update a component.*while rendering a different component", + "Warning: A component is changing an uncontrolled input to be controlled", + "Warning: A component is changing a controlled input to be uncontrolled", + "Warning: Can't call.*on a component that is not yet mounted. This is a no-op", + "Warning: Cannot update during an existing state transition", + r"Warning: You are calling ReactDOMClient.createRoot\(\) on a container that has already been passed to createRoot", + + # FIXME: PatternFly complains about these, but https://www.a11y-collective.com/blog/the-first-rule-for-using-aria/ + # and https://www.accessibility-developer-guide.com/knowledge/aria/bad-practices/ + "aria-label", + + # PackageKit crashes a lot; let that not be the sole reason for failing a test + "error: Could not determine kpatch packages:.*PackageKit crashed", + ] + + if testvm.DEFAULT_IMAGE.startswith('rhel-8') or testvm.DEFAULT_IMAGE.startswith('centos-8'): + # old occasional bugs in tracer, don't happen in newer versions any more + default_allowed_console_errors.append('Tracer failed:.*Traceback') + + env_allow = os.environ.get("TEST_ALLOW_BROWSER_ERRORS") + if env_allow: + default_allowed_console_errors += env_allow.split(",") + + def allow_journal_messages(self, *patterns: str): + """Don't fail if the journal contains a entry completely matching the given regexp""" + for p in patterns: + self.allowed_messages.append(p) + + def allow_hostkey_messages(self): + self.allow_journal_messages('.*: .* host key for server is not known: .*', + '.*: refusing to connect to unknown host: .*', + '.*: .* host key for server has changed to: .*', + '.*: host key for this server changed key type: .*', + '.*: failed to retrieve resource: hostkey-unknown') + + def allow_restart_journal_messages(self): + self.allow_journal_messages(".*Connection reset by peer.*", + "connection unexpectedly closed by peer", + ".*Broken pipe.*", + "g_dbus_connection_real_closed: Remote peer vanished with error: Underlying GIOStream returned 0 bytes on an async read \\(g-io-error-quark, 0\\). Exiting.", + "cockpit-session: .*timed out.*", + "ignoring failure from session process:.*", + "peer did not close io when expected", + "request timed out, closing", + "PolicyKit daemon disconnected from the bus.", + ".*couldn't create polkit session subject: No session for pid.*", + "We are no longer a registered authentication agent.", + ".*: failed to retrieve resource: terminated", + ".*: external channel failed: (terminated|protocol-error)", + 'audit:.*denied.*comm="systemd-user-se".*nologin.*', + ".*No session for cookie", + + 'localhost: dropping message while waiting for child to exit', + '.*: GDBus.Error:org.freedesktop.PolicyKit1.Error.Failed: .*', + '.*g_dbus_connection_call_finish_internal.*G_IS_DBUS_CONNECTION.*', + '.*Message recipient disconnected from message bus without replying.*', + '.*Unable to shutdown socket: Transport endpoint is not connected.*', + + # If restarts or reloads happen really fast, the code in python.js + # that figures out which python to use crashes with SIGPIPE, + # and this is the resulting message + 'which: no python in .*' + ) + + def check_journal_messages(self, machine=None): + """Check for unexpected journal entries.""" + machine = machine or self.machine + # on main machine, only consider journal entries since test case start + cursor = (machine == self.machine) and self.journal_start or None + + # Journald does not always set trusted fields like + # _SYSTEMD_UNIT or _EXE correctly for the last few messages of + # a dying process, so we filter by the untrusted but reliable + # SYSLOG_IDENTIFIER instead. + + matches = [ + "SYSLOG_IDENTIFIER=cockpit-ws", + "SYSLOG_IDENTIFIER=cockpit-bridge", + "SYSLOG_IDENTIFIER=cockpit/ssh", + # also catch GLIB_DOMAIN= which apply to cockpit-ws (but not to -bridge, too much random noise) + "_COMM=cockpit-ws", + "GLIB_DOMAIN=cockpit-ws", + "GLIB_DOMAIN=cockpit-bridge", + "GLIB_DOMAIN=cockpit-ssh", + "GLIB_DOMAIN=cockpit-pcp" + ] + + if not self.allow_core_dumps: + matches += ["SYSLOG_IDENTIFIER=systemd-coredump"] + self.allowed_messages.append("Resource limits disable core dumping for process.*") + # can happen on shutdown when /run/systemd/coredump is gone already + self.allowed_messages.append("Failed to connect to coredump service: No such file or directory") + self.allowed_messages.append("Failed to connect to coredump service: Connection refused") + + messages = machine.journal_messages(matches, 6, cursor=cursor) + + if "TEST_AUDIT_NO_SELINUX" not in os.environ: + messages += machine.audit_messages("14", cursor=cursor) # 14xx is selinux + + self.allowed_messages += self.machine.allowed_messages() + + all_found = True + first = None + for m in messages: + # remove leading/trailing whitespace + m = m.strip() + # Ignore empty lines + if not m: + continue + found = False + + # When coredump could not be generated, we cannot do much with info about there being a coredump + # Ignore this message and all subsequent core dumps + # If there is more than just one line about coredump, it will fail and show this messages + if m.startswith("Failed to generate stack trace"): + self.allowed_messages.append("Process .* of user .* dumped core.*") + continue + + for p in self.allowed_messages: + match = re.match(p, m) + if match and match.group(0) == m: + found = True + break + if not found: + all_found = False + if not first: + first = m + print(m) + if not all_found: + self.copy_js_log("FAIL") + self.copy_journal("FAIL") + self.copy_cores("FAIL") + if not self.getError(): + # fail test on the unexpected messages + raise Error(UNEXPECTED_MESSAGE + "journal messages:\n" + first) + + def allow_browser_errors(self, *patterns): + """Don't fail if the test caused a console error contains the given regexp""" + for p in patterns: + self.allowed_console_errors.append(p) + + def check_browser_errors(self): + if not self.browser: + return + for log in self.browser.get_js_log(): + if not log.startswith("error: "): + continue + # errors are fatal in general; they need to be explicitly whitelisted + for p in self.allowed_console_errors: + if re.search(p, log): + break + else: + raise Error(UNEXPECTED_MESSAGE + "browser errors:\n" + log) + + self.browser.assert_no_oops() + + def check_pixel_tests(self): + if self.browser: + self.browser.assert_no_unused_pixel_test_references() + if self.browser.failed_pixel_tests > 0: + raise Error(PIXEL_TEST_MESSAGE) + + def snapshot(self, title: str, label: Optional[str] = None): + """Take a snapshot of the current screen and save it as a PNG. + + Arguments: + title: Used for the filename. + """ + if self.browser is not None: + try: + self.browser.snapshot(title, label) + except RuntimeError: + # this usually runs in exception handlers; raising an exception here skips cleanup handlers, so don't + sys.stderr.write("Unexpected exception in snapshot():\n") + sys.stderr.write(traceback.format_exc()) + + def copy_js_log(self, title, label=None): + if self.browser is not None: + try: + self.browser.copy_js_log(title, label) + except RuntimeError: + # this usually runs in exception handlers; raising an exception here skips cleanup handlers, so don't + sys.stderr.write("Unexpected exception in copy_js_log():\n") + sys.stderr.write(traceback.format_exc()) + + def copy_journal(self, title: str, label: Optional[str] = None): + for _, m in self.machines.items(): + if m.ssh_reachable: + log = unique_filename("%s-%s-%s" % (label or self.label(), m.label, title), "log.gz") + with open(log, "w") as fp: + m.execute("journalctl|gzip", stdout=fp) + print("Journal extracted to %s" % (log)) + attach(log, move=True) + + def copy_cores(self, title: str, label: Optional[str] = None): + if self.allow_core_dumps: + return + for _, m in self.machines.items(): + if m.ssh_reachable: + directory = "%s-%s-%s.core" % (label or self.label(), m.label, title) + dest = os.path.abspath(directory) + # overwrite core dumps from previous retries + if os.path.exists(dest): + shutil.rmtree(dest) + m.download_dir("/var/lib/systemd/coredump", dest) + try: + os.rmdir(dest) + except OSError as ex: + if ex.errno == errno.ENOTEMPTY: + print("Core dumps downloaded to %s" % (dest)) + # Enable this to temporarily(!) create artifacts for core dumps, if a crash is hard to reproduce + # attach(dest, move=True) + + def settle_cpu(self): + """Wait until CPU usage in the VM settles down + + Wait until the process with the highest CPU usage drops below 20% + usage. Wait for up to a minute, then return. There is no error if the + CPU stays busy, as usually a test then should just try to run anyway. + """ + for _ in range(20): + # get the CPU percentage of the most busy process + busy_proc = self.machine.execute("ps --no-headers -eo pcpu,pid,args | sort -k 1 -n -r | head -n1") + if float(busy_proc.split()[0]) < 20.0: + break + time.sleep(3) + + def sed_file(self, expr: str, path: str, apply_change_action: Optional[str] = None): + """sed a file on primary machine + + This is safe for @nondestructive tests, the file will be restored during cleanup. + + The optional apply_change_action will be run both after sedding and after restoring the file. + """ + m = self.machine + m.execute(f"sed -i.cockpittest '{expr}' {path}") + if apply_change_action: + m.execute(apply_change_action) + + if self.is_nondestructive(): + if apply_change_action: + self.addCleanup(m.execute, apply_change_action) + self.addCleanup(m.execute, f"mv {path}.cockpittest {path}") + + def file_exists(self, path: str) -> bool: + """Check if file exists on test machine""" + + return self.machine.execute(f"if test -e {path}; then echo yes; fi").strip() != "" + + def restore_dir(self, path: str, post_restore_action: Optional[str] = None, reboot_safe: bool = False, + restart_unit: Optional[str] = None): + """Backup/restore a directory for a nondestructive test + + This takes care to not ever touch the original content on disk, but uses transient overlays. + As this uses a bind mount, it does not work for files that get changed atomically (with mv); + use restore_file() for these. + + `restart_unit` will be stopped before restoring path, and restarted afterwards if it was running. + The optional post_restore_action will run after restoring the original content. + + If the directory needs to survive reboot, `reboot_safe=True` needs to be specified; then this + will just backup/restore the directory instead of bind-mounting, which is less robust. + """ + if not self.is_nondestructive() and not self.machine.ostree_image: + return # skip for efficiency reasons + + exe = self.machine.execute + + if not self.file_exists(path): + self.addCleanup(exe, f"rm -rf '{path}'") + return + + backup = os.path.join(self.vm_tmpdir, path.replace('/', '_')) + exe(f"mkdir -p {self.vm_tmpdir}; cp -a {path}/ {backup}/") + + if not reboot_safe: + exe(f"mount -o bind {backup} {path}") + + if restart_unit: + restart_stamp = f"/run/cockpit_restart_{restart_unit}" + self.addCleanup( + exe, + f"if [ -e {restart_stamp} ]; then systemctl start {restart_unit}; rm {restart_stamp}; fi" + ) + + if post_restore_action: + self.addCleanup(exe, post_restore_action) + + if reboot_safe: + self.addCleanup(exe, f"rm -rf {path}; mv {backup} {path}") + else: + # HACK: a lot of tests call this on /home/...; that restoration happens before killing all user + # processes in nonDestructiveSetup(), so we have to do it lazily + if path.startswith("/home"): + cmd = f"umount -lf {path}" + else: + cmd = f"umount {path} || {{ fuser -uvk {path} {path}/* >&2 || true; sleep 1; umount {path}; }}" + self.addCleanup(exe, cmd) + + if restart_unit: + self.addCleanup(exe, f"if systemctl --quiet is-active {restart_unit}; then touch {restart_stamp}; fi; " + f"systemctl stop {restart_unit}") + + def restore_file(self, path: str, post_restore_action: Optional[str] = None): + """Backup/restore a file for a nondestructive test + + This is less robust than restore_dir(), but works for files that need to get changed atomically. + + If path does not currently exist, it will be removed again on cleanup. + """ + if not self.is_nondestructive(): + return # skip for efficiency reasons + + if post_restore_action: + self.addCleanup(self.machine.execute, post_restore_action) + + if self.file_exists(path): + backup = os.path.join(self.vm_tmpdir, path.replace('/', '_')) + self.machine.execute(f"mkdir -p {self.vm_tmpdir}; cp -a {path} {backup}") + self.addCleanup(self.machine.execute, f"mv {backup} {path}") + else: + self.addCleanup(self.machine.execute, f"rm -f {path}") + + def write_file(self, path: str, content: str, append: bool = False, owner: Optional[str] = None, perm: Optional[str] = None, + post_restore_action: Optional[str] = None): + """Write a file on primary machine + + This is safe for @nondestructive tests, the file will be removed during cleanup. + + If @append is True, append to existing file instead of replacing it. + @owner is the desired file owner as chown shell string (e.g. "admin:nogroup") + @perm is the desired file permission as chmod shell string (e.g. "0600") + """ + m = self.machine + self.restore_file(path, post_restore_action=post_restore_action) + m.write(path, content, append=append, owner=owner, perm=perm) + + def enable_root_login(self): + """Enable root login + + By default root login is disabled in cockpit, removing the root entry of /etc/cockpit/disallowed-users allows root to login. + """ + + # fedora-coreos runs cockpit-ws in a containter so does not install cockpit-ws on the host + disallowed_conf = '/etc/cockpit/disallowed-users' + if not self.machine.ostree_image and self.file_exists(disallowed_conf): + self.sed_file('/root/d', disallowed_conf) + + def setup_provisioned_hosts(self, disable_preload: bool = False): + """Setup provisioned hosts for testing + + This sets the hostname of all machines to the name given in the + provision dictionary and optionally disabled preload. + """ + for name, m in self.machines.items(): + m.execute(f"hostnamectl set-hostname {name}") + if disable_preload: + self.disable_preload("packagekit", "playground", "systemd", machine=m) + + def authorize_pubkey(self, machine, account, pubkey): + machine.execute(f"a={account} d=/home/$a/.ssh; mkdir -p $d; chown $a:$a $d; chmod 700 $d") + machine.write(f"/home/{account}/.ssh/authorized_keys", pubkey) + machine.execute(f"a={account}; chown $a:$a /home/$a/.ssh/authorized_keys") + + def get_pubkey(self, machine, account): + return machine.execute(f"cat /home/{account}/.ssh/id_rsa.pub") + + def setup_ssh_auth(self): + self.machine.execute("d=/home/admin/.ssh; mkdir -p $d; chown admin:admin $d; chmod 700 $d") + self.machine.execute("test -f /home/admin/.ssh/id_rsa || ssh-keygen -f /home/admin/.ssh/id_rsa -t rsa -N ''") + self.machine.execute("chown admin:admin /home/admin/.ssh/id_rsa*") + pubkey = self.get_pubkey(self.machine, "admin") + + for m in self.machines: + self.authorize_pubkey(self.machines[m], "admin", pubkey) + + +########################### +# Global helper functions +# + + +def jsquote(js: str) -> str: + return json.dumps(js) + + +def get_decorator(method, _class, name, default=None): + """Get decorator value of a test method or its class + + Return None if the decorator was not set. + """ + attr = "_testlib__" + name + return getattr(method, attr, getattr(_class, attr, default)) + + +########################### +# Test decorators +# + +def skipBrowser(reason: str, *browsers: str): + """Decorator for skipping a test on given browser(s) + + Skips a test for provided *reason* on *browsers*. + """ + browser = os.environ.get("TEST_BROWSER", "chromium") + if browser in browsers: + return unittest.skip(f"{browser}: {reason}") + return lambda testEntity: testEntity + + +def skipImage(reason: str, *images: str): + """Decorator for skipping a test for given image(s) + + Skip a test for a provided *reason* for given *images*. These + support Unix shell style patterns via fnmatch.fnmatch. + + Example: @skipImage("no btrfs support on RHEL", "rhel-*") + """ + if any(fnmatch.fnmatch(testvm.DEFAULT_IMAGE, img) for img in images): + return unittest.skip(f"{testvm.DEFAULT_IMAGE}: {reason}") + return lambda testEntity: testEntity + + +def onlyImage(reason: str, *images: str): + """Decorator to only run a test on given image(s) + + Only run this test on provided *images* for *reason*. These + support Unix shell style patterns via fnmatch.fnmatch. + """ + if not any(fnmatch.fnmatch(testvm.DEFAULT_IMAGE, arg) for arg in images): + return unittest.skip(f"{testvm.DEFAULT_IMAGE}: {reason}") + return lambda testEntity: testEntity + + +def skipOstree(reason: str): + """Decorator for skipping a test on OSTree images + + Skip test for *reason* on OSTree images defined in OSTREE_IMAGES in bots/lib/constants.py. + """ + if testvm.DEFAULT_IMAGE in OSTREE_IMAGES: + return unittest.skip(f"{testvm.DEFAULT_IMAGE}: {reason}") + return lambda testEntity: testEntity + + +def skipDistroPackage(): + """For tests which apply to BaseOS packages + + With that, tests can evolve with latest code, without constantly breaking them when + running against older package versions in the -distropkg tests. + """ + if 'distropkg' in testvm.DEFAULT_IMAGE: + return unittest.skip(f"{testvm.DEFAULT_IMAGE}: Do not test BaseOS packages") + return lambda testEntity: testEntity + + +def nondestructive(testEntity): + """Tests decorated as nondestructive will all run against the same VM + + Can be used on test classes and individual test methods. + """ + setattr(testEntity, '_testlib__nondestructive', True) + return testEntity + + +def no_retry_when_changed(testEntity): + """Tests decorated with no_retry_when_changed will only run once if they've been changed + + Tests that have been changed are expected to succeed 3 times, if the test + takes a long time, this prevents timeouts. Can be used on test classes and + individual methods. + """ + setattr(testEntity, '_testlib__no_retry_when_changed', True) + return testEntity + + +def todo(reason: str = ''): + """Tests decorated with @todo are expected to fail. + + An optional reason can be given, and will appear in the TAP output if run + via run-tests. + """ + def wrapper(testEntity): + setattr(testEntity, '_testlib__todo', reason) + return testEntity + return wrapper + + +def todoPybridge(reason: Optional[str] = None): + if not reason: + reason = 'still fails with python bridge' + + def wrap(test_method): + @functools.wraps(test_method) + def wrapped_test(self): + is_pybridge = self.is_pybridge() + try: + test_method(self) + if is_pybridge: + return self.fail(reason) + return None + # only accept our testlib Errors, plus RuntimeError for TestSuperuserDashboardOldMachine + except (Error, RuntimeError): + if is_pybridge: + traceback.print_exc() + return self.skipTest(reason) + raise + + return wrapped_test + + return wrap + + +def todoPybridgeRHEL8(reason: Optional[str] = None): + if testvm.DEFAULT_IMAGE.startswith('rhel-8') or testvm.DEFAULT_IMAGE.startswith('centos-8'): + return todoPybridge(reason or 'known fail on el8 with python bridge') + return lambda testEntity: testEntity + + +def timeout(seconds: int): + """Change default test timeout of 600s, for long running tests + + Can be applied to an individual test method or the entire class. This only + applies to test/common/run-tests, not to calling check-* directly. + """ + def wrapper(testEntity): + setattr(testEntity, '_testlib__timeout', seconds) + return testEntity + return wrapper + + +class TapRunner: + def __init__(self, verbosity=1): + self.verbosity = verbosity + + def runOne(self, test): + result = unittest.TestResult() + print('# ----------------------------------------------------------------------') + print('#', test) + try: + unittest.TestSuite([test]).run(result) + except KeyboardInterrupt: + result.addError(test, sys.exc_info()) + return result + except Exception: + result.addError(test, sys.exc_info()) + sys.stderr.write(f"Unexpected exception while running {test}\n") + sys.stderr.write(traceback.format_exc()) + return result + else: + result.printErrors() + + if result.skipped: + print(f"# Result {test} skipped: {result.skipped[0][1]}") + elif result.wasSuccessful(): + print(f"# Result {test} succeeded") + else: + for failure in result.failures: + print(failure[1]) + for error in result.errors: + print(error[1]) + print(f"# Result {test} failed") + return result + + def run(self, testable): + tests = [] + + # The things to test + def collapse(test, tests): + if isinstance(test, unittest.TestCase): + tests.append(test) + else: + for t in test: + collapse(t, tests) + collapse(testable, tests) + test_count = len(tests) + + # For statistics + start = time.time() + failures = 0 + skips = [] + while tests: + # The next test to test + test = tests.pop(0) + result = self.runOne(test) + if not result.wasSuccessful(): + failures += 1 + skips += result.skipped + + # Report on the results + duration = int(time.time() - start) + hostname = socket.gethostname().split(".")[0] + details = f"[{duration}s on {hostname}]" + + MachineCase.kill_global_machine() + + # Return 77 if all tests were skipped + if len(skips) == test_count: + sys.stdout.write("# SKIP {0}\n".format(", ".join([f"{s[0]!s} {s[1]}" for s in skips]))) + return 77 + if failures: + sys.stdout.write("# {0} TEST{1} FAILED {2}\n".format(failures, "S" if failures > 1 else "", details)) + return 1 + else: + sys.stdout.write("# {0} TEST{1} PASSED {2}\n".format(test_count, "S" if test_count > 1 else "", details)) + return 0 + + +def print_tests(tests): + for test in tests: + if isinstance(test, unittest.TestSuite): + print_tests(test) + elif isinstance(test, unittest.loader._FailedTest): + name = test.id().replace("unittest.loader._FailedTest.", "") + print(f"Error: '{name}' does not match a test", file=sys.stderr) + else: + print(test.id().replace("__main__.", "")) + + +def arg_parser(enable_sit=True): + parser = argparse.ArgumentParser(description='Run Cockpit test(s)') + parser.add_argument('-v', '--verbose', dest="verbosity", action='store_const', + const=2, help='Verbose output') + parser.add_argument('-t', "--trace", dest='trace', action='store_true', + help='Trace machine boot and commands') + parser.add_argument('-q', '--quiet', dest='verbosity', action='store_const', + const=0, help='Quiet output') + if enable_sit: + parser.add_argument('-s', "--sit", dest='sit', action='store_true', + help="Sit and wait after test failure") + parser.add_argument('--nonet', dest="fetch", action="store_false", + help="Don't go online to download images or data") + parser.add_argument('--enable-network', dest='enable_network', action='store_true', + help="Enable network access for tests") + parser.add_argument('--coverage', action='store_true', + help="Collect code coverage data") + parser.add_argument("-l", "--list", action="store_true", help="Print the list of tests that would be executed") + # TMT compatibility, pass testnames as whitespace separated list + parser.add_argument('tests', nargs='*', default=os.getenv("TEST_NAMES").split() if os.getenv("TEST_NAMES") else []) + + parser.set_defaults(verbosity=1, fetch=True) + return parser + + +def test_main(options=None, suite=None, attachments=None, **kwargs): + """ + Run all test cases, as indicated by arguments. + + If no arguments are given on the command line, all test cases are + executed. Otherwise only the given test cases are run. + """ + + global opts + + # Turn off python stdout buffering + buf_arg = 0 + os.environ['PYTHONUNBUFFERED'] = '1' + buf_arg = 1 + sys.stdout.flush() + sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', buf_arg) + + standalone = options is None + parser = arg_parser() + parser.add_argument('--machine', metavar="hostname[:port]", dest="address", + default=None, help="Run this test against an already running machine") + parser.add_argument('--browser', metavar="hostname[:port]", dest="browser", + default=None, help="When using --machine, use this cockpit web address") + + if standalone: + options = parser.parse_args() + + # Sit should always imply verbose + if options.sit: + options.verbosity = 2 + + # Have to copy into opts due to python globals across modules + for (key, value) in vars(options).items(): + setattr(opts, key, value) + + opts.address = getattr(opts, "address", None) + opts.browser = getattr(opts, "browser", None) + opts.attachments = os.environ.get("TEST_ATTACHMENTS", attachments) + if opts.attachments: + os.makedirs(opts.attachments, exist_ok=True) + + import __main__ + if len(opts.tests) > 0: + if suite: + parser.error("tests may not be specified when running a predefined test suite") + suite = unittest.TestLoader().loadTestsFromNames(opts.tests, module=__main__) + elif not suite: + suite = unittest.TestLoader().loadTestsFromModule(__main__) + + if options.list: + print_tests(suite) + return 0 + + attach(os.path.join(TEST_DIR, "common/pixeldiff.html")) + attach(os.path.join(TEST_DIR, "common/link-patterns.json")) + + runner = TapRunner(verbosity=opts.verbosity) + ret = runner.run(suite) + if not standalone: + return ret + sys.exit(ret) + + +class Error(Exception): + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + + +def wait(func: Callable, msg: Optional[str] = None, delay: int = 1, tries: int = 60): + """Wait for FUNC to return something truthy, and return that. + + FUNC is called repeatedly until it returns a true value or until a + timeout occurs. In the latter case, a exception is raised that + describes the situation. The exception is either the last one + thrown by FUNC, or includes MSG, or a default message. + + :param func: The function to call + :param msg: A error message to use when the timeout occurs. Defaults + to a generic message. + :param delay: How long to wait between calls to FUNC, in seconds. (default 1) + :param tries: How often to call FUNC. (defaults 60) + :raises Error: When a timeout occurs. + """ + + t = 0 + while t < tries: + try: + val = func() + if val: + return val + except Exception: + if t == tries - 1: + raise + else: + pass + t = t + 1 + sleep(delay) + raise Error(msg or "Condition did not become true.") + + +def sit(machines=None): + """ + Wait until the user confirms to continue. + + The current test case is suspended so that the user can inspect + the browser. + """ + + for (_, machine) in (machines or {}).items(): + sys.stderr.write(machine.diagnose()) + print("Press RET to continue...") + sys.stdin.readline() diff --git a/test/reference-image b/test/reference-image new file mode 100644 index 0000000..eb90814 --- /dev/null +++ b/test/reference-image @@ -0,0 +1 @@ +fedora-39 diff --git a/test/run b/test/run new file mode 100755 index 0000000..b50fe1e --- /dev/null +++ b/test/run @@ -0,0 +1,17 @@ +#! /bin/bash +# This is the expected entry point for Cockpit CI; will be called without +# arguments but with an appropriate $TEST_OS + +set -eu + +export RUN_TESTS_OPTIONS=--track-naughties + +TEST_SCENARIO=${TEST_SCENARIO:-} + +if [ "$TEST_SCENARIO" == "devel" ]; then + export TEST_COVERAGE=yes +fi + +make codecheck +make check +make po/podman.pot diff --git a/test/static-code b/test/static-code new file mode 100755 index 0000000..4734a7c --- /dev/null +++ b/test/static-code @@ -0,0 +1,200 @@ +#!/bin/bash +# run static code checks like eslint, flake8, mypy, ruff, vulture. + +set -eu + +# requires: .flake8 +# requires: pyproject.toml +# requires: containers/flatpak/test/ruff.toml +# requires: pkg/ruff.toml +# requires: test/common/ruff.toml +# requires: test/example/ruff.toml +# requires: test/verify/ruff.toml +# requires: tools/vulture-suppressions/ruff.toml + +# we consider any function named test_* to be a test case +# each test is considered to succeed if it exits with no output +# exit with status 77 is a skip, with the message in the output +# otherwise, any output is a failure, even if exit status is 0 + +# note: `set -e` is not active during the tests. + +find_scripts() { + # Helper to find all scripts in the tree + ( + # Any non-binary file which contains a given shebang + git grep --cached -lIz '^#!.*'"$1" + shift + # Any file matching the provided globs + git ls-files -z "$@" + ) | sort -z | uniq -z +} + +find_python_files() { + find_scripts 'python3' '*.py' +} + +test_flake8() { + command -v flake8 >/dev/null || skip 'no flake8' + find_python_files | xargs -r -0 flake8 +} + +test_ruff() { + command -v ruff >/dev/null || skip 'no ruff' + find_python_files | xargs -r -0 ruff check --no-cache +} + +if [ "${WITH_PARTIAL_TREE:-0}" = 0 ]; then + mypy_strict_files=' + src/cockpit/__init__.py + src/cockpit/_version.py + src/cockpit/jsonutil.py + src/cockpit/protocol.py + src/cockpit/transports.py + ' + test_mypy() { + command -v mypy >/dev/null || skip 'no mypy' + for pkg in systemd_ctypes ferny bei; do + test -e "src/cockpit/_vendor/${pkg}/__init__.py" || skip "no ${pkg}" + done + mypy --no-error-summary src/cockpit test/pytest + # test scripts individually, to avoid clashing on `__main__` + # also skip integration tests, they are too big and not annotated + find_scripts 'python3' "*.none" | grep -zv 'test/' | xargs -r -0 -n1 mypy --no-error-summary + mypy --no-error-summary --strict $mypy_strict_files + } + + test_vulture() { + # vulture to find unused variables/functions + command -v vulture >/dev/null || skip 'no vulture' + find_python_files | xargs -r -0 vulture + } +fi + + +test_js_translatable_strings() { + # Translatable strings must be marked with _(""), not _('') + + ! git grep -n -E "(gettext|_)\(['\`]" -- {src,pkg}/'*'.{js,jsx} +} + +if [ "${WITH_PARTIAL_TREE:-0}" = 0 ]; then + test_eslint() { + test -x node_modules/.bin/eslint -a -x /usr/bin/node || skip 'no eslint' + find_scripts 'node' '*.js' '*.jsx' | xargs -0 node_modules/.bin/eslint + } +fi + +test_stylelint() { + test -x node_modules/.bin/stylelint -a -x /usr/bin/node || skip 'no stylelint' + git ls-files -z '*.css' '*.scss' | xargs -r -0 node_modules/.bin/stylelint +} + +test_no_translatable_attr() { + # Use of translatable attribute in HTML: should be 'translate' instead + + ! git grep -n 'translatable=["'\'']yes' -- pkg doc +} + +test_unsafe_security_policy() { + # It's dangerous to have 'unsafe-inline' or 'unsafe-eval' in our + # content-security-policy entries. + + git grep -lIz -E 'content-security-policy.*(\*|unsafe)' 'pkg/*/manifest.json' | while read -d '' filename; do + if test ! -f "$(dirname ${filename})/content-security-policy.override"; then + echo "${filename} contains unsafe content security policy" + fi + done +} + +test_json_verify() { + # Check all JSON files for validity + + git ls-files -z '*.json' | while read -d '' filename; do + python3 -m json.tool "${filename}" /dev/null 2>&1 | sed "s@^@${filename}: @" + done +} + +test_html_verify() { + # Check all HTML files for syntactic validity + + git ls-files -z 'pkg/*.html' | while read -d '' filename; do + if ! python3 -c "import xml.etree.ElementTree as ET; ET.parse('${filename}')"; then + echo "${filename} contains invalid XML" + fi + done +} + +test_include_config_h() { + # Every C file should #include "config.h" at the top + + git ls-files -cz '*.c' | while read -d '' filename; do + if sed -n '/^#include "config.h"$/q1; /^\s*#/q;' "${filename}"; then + printf '%s: #include "config.h" is not the first line\n' "${filename}" + fi + done +} + +### end of tests. start of machinery. + +skip() { + printf "%s\n" "$*" + exit 77 +} + +main() { + if [ $# = 0 ]; then + tap='' + elif [ $# = 1 -a "$1" = "--tap" ]; then + tap='1' + else + printf "usage: %s [--tap]\n" "$0" >&2 + exit 1 + fi + + cd "${0%/*}/.." + if [ ! -e .git ]; then + echo '1..0 # SKIP not in a git checkout' + exit 0 + fi + + exit_status=0 + counter=0 + + tests=($(compgen -A function 'test_')) + [ -n "${tap}" ] && printf "1..%d\n" "${#tests[@]}" + + for test_function in "${tests[@]}"; do + path="/static-code/$(echo ${test_function} | tr '_' '-')" + counter=$((counter + 1)) + fail='' + skip='' + + # run the test, capturing its output and exit status + output="$(${test_function} 2>&1)" && test_status=0 || test_status=$? + + if [ "${test_status}" = 77 ]; then + if [ -z "${tap}" ]; then + printf >&2 "WARNING: skipping %s: %s\n" "${path}" "${output}" + fi + skip=" # SKIP ${output}" + output='' + elif [ "${test_status}" != 0 -o -n "${output}" ]; then + exit_status=1 + fail=1 + fi + + # Only print output on failures or --tap mode + [ -n "${tap}" -o -n "${fail}" ] || continue + + # excluding the plan, this is the only output that we ever generate + printf "%s %d %s%s\n" "${fail:+not }ok" "${counter}" "${path}" "${skip}" + if [ -n "${output}" ]; then + printf "%s\n" "${output}" | sed -e 's/^/# /' + fi + done + + exit "${exit_status}" +} + +main "$@" diff --git a/test/vm.install b/test/vm.install new file mode 100755 index 0000000..f6c52cf --- /dev/null +++ b/test/vm.install @@ -0,0 +1,38 @@ +#!/bin/sh +# image-customize script to prepare a bots VM for cockpit-podman testing +set -eu + +if grep -q ID.*debian /usr/lib/os-release; then + # Debian does not enable user namespaces by default + echo kernel.unprivileged_userns_clone = 1 > /etc/sysctl.d/00-local-userns.conf + systemctl restart systemd-sysctl + + # disable services that get in the way of /var/lib/containers + if systemctl is-enabled docker.service; then + systemctl disable docker.service + fi +fi + +# don't force https:// (self-signed cert) +printf "[WebService]\\nAllowUnencrypted=true\\n" > /etc/cockpit/cockpit.conf + +if type firewall-cmd >/dev/null 2>&1; then + firewall-cmd --add-service=cockpit --permanent +fi + +. /usr/lib/os-release + +# Remove extra images, tests assume our specific set +# Since 4.0 podman now ships the pause image +podman images --format '{{.Repository}}:{{.Tag}}' | grep -Ev 'localhost/test-|pause|cockpit/ws' | xargs -r podman rmi -f + +# tests reset podman, save the images +mkdir -p /var/lib/test-images +for img in $(podman images --format '{{.Repository}}:{{.Tag}}'); do + fname="$(echo "$img" | tr -dc '[a-zA-Z-]')" + podman save -o "/var/lib/test-images/${fname}.tar" "$img" +done + +# 15minutes after boot tmp files are removed and podman stores some tmp lock files +systemctl disable --now systemd-tmpfiles-clean.timer +systemctl --global disable systemd-tmpfiles-clean.timer diff --git a/tools/node-modules b/tools/node-modules new file mode 100755 index 0000000..a8519e1 --- /dev/null +++ b/tools/node-modules @@ -0,0 +1,198 @@ +#!/bin/sh + +# shellcheck disable=SC3043 # local is not POSIX, but every shell has it +# shellcheck disable=SC3013,SC3045 # ditto for test {-nt,-t} + +GITHUB_REPO='node-cache' +SUBDIR='node_modules' + +V="${V-0}" # default to friendly messages + +set -eu +cd "${0%/*}/.." +# shellcheck source-path=SCRIPTDIR/.. +. test/common/git-utils.sh + +cmd_remove() { + # if we did this for ourselves the rm is enough, but it might be the case + # that someone actually used git-submodule to fetch this, so clean up after + # that as well. NB: deinit nicely recreates the empty directory for us. + message REMOVE node_modules + rm -rf node_modules + git submodule deinit node_modules + rm -rf -- "$(git rev-parse --absolute-git-dir)/modules/node_modules" +} + +cmd_checkout() { + # we default to check out the node_modules corresponding to the gitlink in the index + local force="" + if [ "${1-}" = "--force" ]; then + force="1" + shift + fi + + local sha="${1-$(get_index_gitlink node_modules)}" + + # fetch by sha to prevent us from downloading something we don't want + fetch_sha_to_cache "${sha}" + + # verify that our package.json is equal to the one the cached node_modules + # was created with, unless --force is given + if [ -z "$force" ]; then + if ! cmp_from_cache "${sha}" '.package.json' 'package.json'; then + cat >&2 <&2 + printf "\nCommit %s package.json and node_modules aren't in sync!\n\n" "$1" + git --no-pager show --stat "$1" + + printf "\nThe above commit refers to the following node_modules commit:\n\n" + git_cache --no-pager show --no-patch "${commit}" + + printf "\nOur attempt to recreate that commit differs as follows:\n\n" + git_cache --no-pager diff --stat "${commit}" "${expected_tree}" -- + git_cache --no-pager diff "${commit}" "${expected_tree}" -- .package-lock.json + exit 1 + fi +} + +# called from Makefile.am +cmd_make_package_lock_json() { + # Run from make to ensure package-lock.json is up to date + + # package-lock.json is used as the stamp file for all things that use + # node_modules, so this is the main bit of glue that drives the entire process + + # We try our best not to touch package-lock.json unless it actually changes + + # This isn't going to work for a tarball, but as long as + # package-lock.json is already there, and newer than package.json, + # we're OK + if [ ! -e .git ]; then + if [ package-lock.json -nt package.json ]; then + exit 0 + fi + + echo "*** Can't update node modules unless running from git" >&2 + exit 1 + fi + + # Otherwise, our main goal is to ensure that the node_modules from + # the index is the one that we actually have. + local sha + sha="$(get_index_gitlink node_modules)" + if [ ! -e node_modules/.git ]; then + # nothing there yet... + cmd_checkout + elif [ "$(git -C node_modules rev-parse HEAD)" != "${sha}" ]; then + # wrong thing there... + cmd_checkout + fi + + # This check is more about catching local changes to package.json than + # about validating something we just checked out: + if ! cmp -s node_modules/.package.json package.json; then + cat 2>&1 <